1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * event tracer 4 * 5 * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com> 6 * 7 * - Added format output of fields of the trace point. 8 * This was based off of work by Tom Zanussi <tzanussi@gmail.com>. 9 * 10 */ 11 12 #define pr_fmt(fmt) fmt 13 14 #include <linux/workqueue.h> 15 #include <linux/security.h> 16 #include <linux/spinlock.h> 17 #include <linux/kthread.h> 18 #include <linux/tracefs.h> 19 #include <linux/uaccess.h> 20 #include <linux/module.h> 21 #include <linux/ctype.h> 22 #include <linux/sort.h> 23 #include <linux/slab.h> 24 #include <linux/delay.h> 25 26 #include <trace/events/sched.h> 27 #include <trace/syscall.h> 28 29 #include <asm/setup.h> 30 31 #include "trace_output.h" 32 33 #undef TRACE_SYSTEM 34 #define TRACE_SYSTEM "TRACE_SYSTEM" 35 36 DEFINE_MUTEX(event_mutex); 37 38 LIST_HEAD(ftrace_events); 39 static LIST_HEAD(ftrace_generic_fields); 40 static LIST_HEAD(ftrace_common_fields); 41 static bool eventdir_initialized; 42 43 #define GFP_TRACE (GFP_KERNEL | __GFP_ZERO) 44 45 static struct kmem_cache *field_cachep; 46 static struct kmem_cache *file_cachep; 47 48 static inline int system_refcount(struct event_subsystem *system) 49 { 50 return system->ref_count; 51 } 52 53 static int system_refcount_inc(struct event_subsystem *system) 54 { 55 return system->ref_count++; 56 } 57 58 static int system_refcount_dec(struct event_subsystem *system) 59 { 60 return --system->ref_count; 61 } 62 63 /* Double loops, do not use break, only goto's work */ 64 #define do_for_each_event_file(tr, file) \ 65 list_for_each_entry(tr, &ftrace_trace_arrays, list) { \ 66 list_for_each_entry(file, &tr->events, list) 67 68 #define do_for_each_event_file_safe(tr, file) \ 69 list_for_each_entry(tr, &ftrace_trace_arrays, list) { \ 70 struct trace_event_file *___n; \ 71 list_for_each_entry_safe(file, ___n, &tr->events, list) 72 73 #define while_for_each_event_file() \ 74 } 75 76 static struct ftrace_event_field * 77 __find_event_field(struct list_head *head, char *name) 78 { 79 struct ftrace_event_field *field; 80 81 list_for_each_entry(field, head, link) { 82 if (!strcmp(field->name, name)) 83 return field; 84 } 85 86 return NULL; 87 } 88 89 struct ftrace_event_field * 90 trace_find_event_field(struct trace_event_call *call, char *name) 91 { 92 struct ftrace_event_field *field; 93 struct list_head *head; 94 95 head = trace_get_fields(call); 96 field = __find_event_field(head, name); 97 if (field) 98 return field; 99 100 field = __find_event_field(&ftrace_generic_fields, name); 101 if (field) 102 return field; 103 104 return __find_event_field(&ftrace_common_fields, name); 105 } 106 107 static int __trace_define_field(struct list_head *head, const char *type, 108 const char *name, int offset, int size, 109 int is_signed, int filter_type) 110 { 111 struct ftrace_event_field *field; 112 113 field = kmem_cache_alloc(field_cachep, GFP_TRACE); 114 if (!field) 115 return -ENOMEM; 116 117 field->name = name; 118 field->type = type; 119 120 if (filter_type == FILTER_OTHER) 121 field->filter_type = filter_assign_type(type); 122 else 123 field->filter_type = filter_type; 124 125 field->offset = offset; 126 field->size = size; 127 field->is_signed = is_signed; 128 129 list_add(&field->link, head); 130 131 return 0; 132 } 133 134 int trace_define_field(struct trace_event_call *call, const char *type, 135 const char *name, int offset, int size, int is_signed, 136 int filter_type) 137 { 138 struct list_head *head; 139 140 if (WARN_ON(!call->class)) 141 return 0; 142 143 head = trace_get_fields(call); 144 return __trace_define_field(head, type, name, offset, size, 145 is_signed, filter_type); 146 } 147 EXPORT_SYMBOL_GPL(trace_define_field); 148 149 #define __generic_field(type, item, filter_type) \ 150 ret = __trace_define_field(&ftrace_generic_fields, #type, \ 151 #item, 0, 0, is_signed_type(type), \ 152 filter_type); \ 153 if (ret) \ 154 return ret; 155 156 #define __common_field(type, item) \ 157 ret = __trace_define_field(&ftrace_common_fields, #type, \ 158 "common_" #item, \ 159 offsetof(typeof(ent), item), \ 160 sizeof(ent.item), \ 161 is_signed_type(type), FILTER_OTHER); \ 162 if (ret) \ 163 return ret; 164 165 static int trace_define_generic_fields(void) 166 { 167 int ret; 168 169 __generic_field(int, CPU, FILTER_CPU); 170 __generic_field(int, cpu, FILTER_CPU); 171 __generic_field(char *, COMM, FILTER_COMM); 172 __generic_field(char *, comm, FILTER_COMM); 173 174 return ret; 175 } 176 177 static int trace_define_common_fields(void) 178 { 179 int ret; 180 struct trace_entry ent; 181 182 __common_field(unsigned short, type); 183 __common_field(unsigned char, flags); 184 __common_field(unsigned char, preempt_count); 185 __common_field(int, pid); 186 187 return ret; 188 } 189 190 static void trace_destroy_fields(struct trace_event_call *call) 191 { 192 struct ftrace_event_field *field, *next; 193 struct list_head *head; 194 195 head = trace_get_fields(call); 196 list_for_each_entry_safe(field, next, head, link) { 197 list_del(&field->link); 198 kmem_cache_free(field_cachep, field); 199 } 200 } 201 202 /* 203 * run-time version of trace_event_get_offsets_<call>() that returns the last 204 * accessible offset of trace fields excluding __dynamic_array bytes 205 */ 206 int trace_event_get_offsets(struct trace_event_call *call) 207 { 208 struct ftrace_event_field *tail; 209 struct list_head *head; 210 211 head = trace_get_fields(call); 212 /* 213 * head->next points to the last field with the largest offset, 214 * since it was added last by trace_define_field() 215 */ 216 tail = list_first_entry(head, struct ftrace_event_field, link); 217 return tail->offset + tail->size; 218 } 219 220 /* 221 * Check if the referenced field is an array and return true, 222 * as arrays are OK to dereference. 223 */ 224 static bool test_field(const char *fmt, struct trace_event_call *call) 225 { 226 struct trace_event_fields *field = call->class->fields_array; 227 const char *array_descriptor; 228 const char *p = fmt; 229 int len; 230 231 if (!(len = str_has_prefix(fmt, "REC->"))) 232 return false; 233 fmt += len; 234 for (p = fmt; *p; p++) { 235 if (!isalnum(*p) && *p != '_') 236 break; 237 } 238 len = p - fmt; 239 240 for (; field->type; field++) { 241 if (strncmp(field->name, fmt, len) || 242 field->name[len]) 243 continue; 244 array_descriptor = strchr(field->type, '['); 245 /* This is an array and is OK to dereference. */ 246 return array_descriptor != NULL; 247 } 248 return false; 249 } 250 251 /* 252 * Examine the print fmt of the event looking for unsafe dereference 253 * pointers using %p* that could be recorded in the trace event and 254 * much later referenced after the pointer was freed. Dereferencing 255 * pointers are OK, if it is dereferenced into the event itself. 256 */ 257 static void test_event_printk(struct trace_event_call *call) 258 { 259 u64 dereference_flags = 0; 260 bool first = true; 261 const char *fmt, *c, *r, *a; 262 int parens = 0; 263 char in_quote = 0; 264 int start_arg = 0; 265 int arg = 0; 266 int i; 267 268 fmt = call->print_fmt; 269 270 if (!fmt) 271 return; 272 273 for (i = 0; fmt[i]; i++) { 274 switch (fmt[i]) { 275 case '\\': 276 i++; 277 if (!fmt[i]) 278 return; 279 continue; 280 case '"': 281 case '\'': 282 /* 283 * The print fmt starts with a string that 284 * is processed first to find %p* usage, 285 * then after the first string, the print fmt 286 * contains arguments that are used to check 287 * if the dereferenced %p* usage is safe. 288 */ 289 if (first) { 290 if (fmt[i] == '\'') 291 continue; 292 if (in_quote) { 293 arg = 0; 294 first = false; 295 /* 296 * If there was no %p* uses 297 * the fmt is OK. 298 */ 299 if (!dereference_flags) 300 return; 301 } 302 } 303 if (in_quote) { 304 if (in_quote == fmt[i]) 305 in_quote = 0; 306 } else { 307 in_quote = fmt[i]; 308 } 309 continue; 310 case '%': 311 if (!first || !in_quote) 312 continue; 313 i++; 314 if (!fmt[i]) 315 return; 316 switch (fmt[i]) { 317 case '%': 318 continue; 319 case 'p': 320 /* Find dereferencing fields */ 321 switch (fmt[i + 1]) { 322 case 'B': case 'R': case 'r': 323 case 'b': case 'M': case 'm': 324 case 'I': case 'i': case 'E': 325 case 'U': case 'V': case 'N': 326 case 'a': case 'd': case 'D': 327 case 'g': case 't': case 'C': 328 case 'O': case 'f': 329 if (WARN_ONCE(arg == 63, 330 "Too many args for event: %s", 331 trace_event_name(call))) 332 return; 333 dereference_flags |= 1ULL << arg; 334 } 335 break; 336 default: 337 { 338 bool star = false; 339 int j; 340 341 /* Increment arg if %*s exists. */ 342 for (j = 0; fmt[i + j]; j++) { 343 if (isdigit(fmt[i + j]) || 344 fmt[i + j] == '.') 345 continue; 346 if (fmt[i + j] == '*') { 347 star = true; 348 continue; 349 } 350 if ((fmt[i + j] == 's') && star) 351 arg++; 352 break; 353 } 354 break; 355 } /* default */ 356 357 } /* switch */ 358 arg++; 359 continue; 360 case '(': 361 if (in_quote) 362 continue; 363 parens++; 364 continue; 365 case ')': 366 if (in_quote) 367 continue; 368 parens--; 369 if (WARN_ONCE(parens < 0, 370 "Paren mismatch for event: %s\narg='%s'\n%*s", 371 trace_event_name(call), 372 fmt + start_arg, 373 (i - start_arg) + 5, "^")) 374 return; 375 continue; 376 case ',': 377 if (in_quote || parens) 378 continue; 379 i++; 380 while (isspace(fmt[i])) 381 i++; 382 start_arg = i; 383 if (!(dereference_flags & (1ULL << arg))) 384 goto next_arg; 385 386 /* Find the REC-> in the argument */ 387 c = strchr(fmt + i, ','); 388 r = strstr(fmt + i, "REC->"); 389 if (r && (!c || r < c)) { 390 /* 391 * Addresses of events on the buffer, 392 * or an array on the buffer is 393 * OK to dereference. 394 * There's ways to fool this, but 395 * this is to catch common mistakes, 396 * not malicious code. 397 */ 398 a = strchr(fmt + i, '&'); 399 if ((a && (a < r)) || test_field(r, call)) 400 dereference_flags &= ~(1ULL << arg); 401 } 402 next_arg: 403 i--; 404 arg++; 405 } 406 } 407 408 /* 409 * If you triggered the below warning, the trace event reported 410 * uses an unsafe dereference pointer %p*. As the data stored 411 * at the trace event time may no longer exist when the trace 412 * event is printed, dereferencing to the original source is 413 * unsafe. The source of the dereference must be copied into the 414 * event itself, and the dereference must access the copy instead. 415 */ 416 if (WARN_ON_ONCE(dereference_flags)) { 417 arg = 1; 418 while (!(dereference_flags & 1)) { 419 dereference_flags >>= 1; 420 arg++; 421 } 422 pr_warn("event %s has unsafe dereference of argument %d\n", 423 trace_event_name(call), arg); 424 pr_warn("print_fmt: %s\n", fmt); 425 } 426 } 427 428 int trace_event_raw_init(struct trace_event_call *call) 429 { 430 int id; 431 432 id = register_trace_event(&call->event); 433 if (!id) 434 return -ENODEV; 435 436 test_event_printk(call); 437 438 return 0; 439 } 440 EXPORT_SYMBOL_GPL(trace_event_raw_init); 441 442 bool trace_event_ignore_this_pid(struct trace_event_file *trace_file) 443 { 444 struct trace_array *tr = trace_file->tr; 445 struct trace_array_cpu *data; 446 struct trace_pid_list *no_pid_list; 447 struct trace_pid_list *pid_list; 448 449 pid_list = rcu_dereference_raw(tr->filtered_pids); 450 no_pid_list = rcu_dereference_raw(tr->filtered_no_pids); 451 452 if (!pid_list && !no_pid_list) 453 return false; 454 455 data = this_cpu_ptr(tr->array_buffer.data); 456 457 return data->ignore_pid; 458 } 459 EXPORT_SYMBOL_GPL(trace_event_ignore_this_pid); 460 461 void *trace_event_buffer_reserve(struct trace_event_buffer *fbuffer, 462 struct trace_event_file *trace_file, 463 unsigned long len) 464 { 465 struct trace_event_call *event_call = trace_file->event_call; 466 467 if ((trace_file->flags & EVENT_FILE_FL_PID_FILTER) && 468 trace_event_ignore_this_pid(trace_file)) 469 return NULL; 470 471 /* 472 * If CONFIG_PREEMPTION is enabled, then the tracepoint itself disables 473 * preemption (adding one to the preempt_count). Since we are 474 * interested in the preempt_count at the time the tracepoint was 475 * hit, we need to subtract one to offset the increment. 476 */ 477 fbuffer->trace_ctx = tracing_gen_ctx_dec(); 478 fbuffer->trace_file = trace_file; 479 480 fbuffer->event = 481 trace_event_buffer_lock_reserve(&fbuffer->buffer, trace_file, 482 event_call->event.type, len, 483 fbuffer->trace_ctx); 484 if (!fbuffer->event) 485 return NULL; 486 487 fbuffer->regs = NULL; 488 fbuffer->entry = ring_buffer_event_data(fbuffer->event); 489 return fbuffer->entry; 490 } 491 EXPORT_SYMBOL_GPL(trace_event_buffer_reserve); 492 493 int trace_event_reg(struct trace_event_call *call, 494 enum trace_reg type, void *data) 495 { 496 struct trace_event_file *file = data; 497 498 WARN_ON(!(call->flags & TRACE_EVENT_FL_TRACEPOINT)); 499 switch (type) { 500 case TRACE_REG_REGISTER: 501 return tracepoint_probe_register(call->tp, 502 call->class->probe, 503 file); 504 case TRACE_REG_UNREGISTER: 505 tracepoint_probe_unregister(call->tp, 506 call->class->probe, 507 file); 508 return 0; 509 510 #ifdef CONFIG_PERF_EVENTS 511 case TRACE_REG_PERF_REGISTER: 512 return tracepoint_probe_register(call->tp, 513 call->class->perf_probe, 514 call); 515 case TRACE_REG_PERF_UNREGISTER: 516 tracepoint_probe_unregister(call->tp, 517 call->class->perf_probe, 518 call); 519 return 0; 520 case TRACE_REG_PERF_OPEN: 521 case TRACE_REG_PERF_CLOSE: 522 case TRACE_REG_PERF_ADD: 523 case TRACE_REG_PERF_DEL: 524 return 0; 525 #endif 526 } 527 return 0; 528 } 529 EXPORT_SYMBOL_GPL(trace_event_reg); 530 531 void trace_event_enable_cmd_record(bool enable) 532 { 533 struct trace_event_file *file; 534 struct trace_array *tr; 535 536 lockdep_assert_held(&event_mutex); 537 538 do_for_each_event_file(tr, file) { 539 540 if (!(file->flags & EVENT_FILE_FL_ENABLED)) 541 continue; 542 543 if (enable) { 544 tracing_start_cmdline_record(); 545 set_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags); 546 } else { 547 tracing_stop_cmdline_record(); 548 clear_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags); 549 } 550 } while_for_each_event_file(); 551 } 552 553 void trace_event_enable_tgid_record(bool enable) 554 { 555 struct trace_event_file *file; 556 struct trace_array *tr; 557 558 lockdep_assert_held(&event_mutex); 559 560 do_for_each_event_file(tr, file) { 561 if (!(file->flags & EVENT_FILE_FL_ENABLED)) 562 continue; 563 564 if (enable) { 565 tracing_start_tgid_record(); 566 set_bit(EVENT_FILE_FL_RECORDED_TGID_BIT, &file->flags); 567 } else { 568 tracing_stop_tgid_record(); 569 clear_bit(EVENT_FILE_FL_RECORDED_TGID_BIT, 570 &file->flags); 571 } 572 } while_for_each_event_file(); 573 } 574 575 static int __ftrace_event_enable_disable(struct trace_event_file *file, 576 int enable, int soft_disable) 577 { 578 struct trace_event_call *call = file->event_call; 579 struct trace_array *tr = file->tr; 580 unsigned long file_flags = file->flags; 581 int ret = 0; 582 int disable; 583 584 switch (enable) { 585 case 0: 586 /* 587 * When soft_disable is set and enable is cleared, the sm_ref 588 * reference counter is decremented. If it reaches 0, we want 589 * to clear the SOFT_DISABLED flag but leave the event in the 590 * state that it was. That is, if the event was enabled and 591 * SOFT_DISABLED isn't set, then do nothing. But if SOFT_DISABLED 592 * is set we do not want the event to be enabled before we 593 * clear the bit. 594 * 595 * When soft_disable is not set but the SOFT_MODE flag is, 596 * we do nothing. Do not disable the tracepoint, otherwise 597 * "soft enable"s (clearing the SOFT_DISABLED bit) wont work. 598 */ 599 if (soft_disable) { 600 if (atomic_dec_return(&file->sm_ref) > 0) 601 break; 602 disable = file->flags & EVENT_FILE_FL_SOFT_DISABLED; 603 clear_bit(EVENT_FILE_FL_SOFT_MODE_BIT, &file->flags); 604 } else 605 disable = !(file->flags & EVENT_FILE_FL_SOFT_MODE); 606 607 if (disable && (file->flags & EVENT_FILE_FL_ENABLED)) { 608 clear_bit(EVENT_FILE_FL_ENABLED_BIT, &file->flags); 609 if (file->flags & EVENT_FILE_FL_RECORDED_CMD) { 610 tracing_stop_cmdline_record(); 611 clear_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags); 612 } 613 614 if (file->flags & EVENT_FILE_FL_RECORDED_TGID) { 615 tracing_stop_tgid_record(); 616 clear_bit(EVENT_FILE_FL_RECORDED_TGID_BIT, &file->flags); 617 } 618 619 call->class->reg(call, TRACE_REG_UNREGISTER, file); 620 } 621 /* If in SOFT_MODE, just set the SOFT_DISABLE_BIT, else clear it */ 622 if (file->flags & EVENT_FILE_FL_SOFT_MODE) 623 set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags); 624 else 625 clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags); 626 break; 627 case 1: 628 /* 629 * When soft_disable is set and enable is set, we want to 630 * register the tracepoint for the event, but leave the event 631 * as is. That means, if the event was already enabled, we do 632 * nothing (but set SOFT_MODE). If the event is disabled, we 633 * set SOFT_DISABLED before enabling the event tracepoint, so 634 * it still seems to be disabled. 635 */ 636 if (!soft_disable) 637 clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags); 638 else { 639 if (atomic_inc_return(&file->sm_ref) > 1) 640 break; 641 set_bit(EVENT_FILE_FL_SOFT_MODE_BIT, &file->flags); 642 } 643 644 if (!(file->flags & EVENT_FILE_FL_ENABLED)) { 645 bool cmd = false, tgid = false; 646 647 /* Keep the event disabled, when going to SOFT_MODE. */ 648 if (soft_disable) 649 set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags); 650 651 if (tr->trace_flags & TRACE_ITER_RECORD_CMD) { 652 cmd = true; 653 tracing_start_cmdline_record(); 654 set_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags); 655 } 656 657 if (tr->trace_flags & TRACE_ITER_RECORD_TGID) { 658 tgid = true; 659 tracing_start_tgid_record(); 660 set_bit(EVENT_FILE_FL_RECORDED_TGID_BIT, &file->flags); 661 } 662 663 ret = call->class->reg(call, TRACE_REG_REGISTER, file); 664 if (ret) { 665 if (cmd) 666 tracing_stop_cmdline_record(); 667 if (tgid) 668 tracing_stop_tgid_record(); 669 pr_info("event trace: Could not enable event " 670 "%s\n", trace_event_name(call)); 671 break; 672 } 673 set_bit(EVENT_FILE_FL_ENABLED_BIT, &file->flags); 674 675 /* WAS_ENABLED gets set but never cleared. */ 676 set_bit(EVENT_FILE_FL_WAS_ENABLED_BIT, &file->flags); 677 } 678 break; 679 } 680 681 /* Enable or disable use of trace_buffered_event */ 682 if ((file_flags & EVENT_FILE_FL_SOFT_DISABLED) != 683 (file->flags & EVENT_FILE_FL_SOFT_DISABLED)) { 684 if (file->flags & EVENT_FILE_FL_SOFT_DISABLED) 685 trace_buffered_event_enable(); 686 else 687 trace_buffered_event_disable(); 688 } 689 690 return ret; 691 } 692 693 int trace_event_enable_disable(struct trace_event_file *file, 694 int enable, int soft_disable) 695 { 696 return __ftrace_event_enable_disable(file, enable, soft_disable); 697 } 698 699 static int ftrace_event_enable_disable(struct trace_event_file *file, 700 int enable) 701 { 702 return __ftrace_event_enable_disable(file, enable, 0); 703 } 704 705 static void ftrace_clear_events(struct trace_array *tr) 706 { 707 struct trace_event_file *file; 708 709 mutex_lock(&event_mutex); 710 list_for_each_entry(file, &tr->events, list) { 711 ftrace_event_enable_disable(file, 0); 712 } 713 mutex_unlock(&event_mutex); 714 } 715 716 static void 717 event_filter_pid_sched_process_exit(void *data, struct task_struct *task) 718 { 719 struct trace_pid_list *pid_list; 720 struct trace_array *tr = data; 721 722 pid_list = rcu_dereference_raw(tr->filtered_pids); 723 trace_filter_add_remove_task(pid_list, NULL, task); 724 725 pid_list = rcu_dereference_raw(tr->filtered_no_pids); 726 trace_filter_add_remove_task(pid_list, NULL, task); 727 } 728 729 static void 730 event_filter_pid_sched_process_fork(void *data, 731 struct task_struct *self, 732 struct task_struct *task) 733 { 734 struct trace_pid_list *pid_list; 735 struct trace_array *tr = data; 736 737 pid_list = rcu_dereference_sched(tr->filtered_pids); 738 trace_filter_add_remove_task(pid_list, self, task); 739 740 pid_list = rcu_dereference_sched(tr->filtered_no_pids); 741 trace_filter_add_remove_task(pid_list, self, task); 742 } 743 744 void trace_event_follow_fork(struct trace_array *tr, bool enable) 745 { 746 if (enable) { 747 register_trace_prio_sched_process_fork(event_filter_pid_sched_process_fork, 748 tr, INT_MIN); 749 register_trace_prio_sched_process_free(event_filter_pid_sched_process_exit, 750 tr, INT_MAX); 751 } else { 752 unregister_trace_sched_process_fork(event_filter_pid_sched_process_fork, 753 tr); 754 unregister_trace_sched_process_free(event_filter_pid_sched_process_exit, 755 tr); 756 } 757 } 758 759 static void 760 event_filter_pid_sched_switch_probe_pre(void *data, bool preempt, 761 struct task_struct *prev, struct task_struct *next) 762 { 763 struct trace_array *tr = data; 764 struct trace_pid_list *no_pid_list; 765 struct trace_pid_list *pid_list; 766 bool ret; 767 768 pid_list = rcu_dereference_sched(tr->filtered_pids); 769 no_pid_list = rcu_dereference_sched(tr->filtered_no_pids); 770 771 /* 772 * Sched switch is funny, as we only want to ignore it 773 * in the notrace case if both prev and next should be ignored. 774 */ 775 ret = trace_ignore_this_task(NULL, no_pid_list, prev) && 776 trace_ignore_this_task(NULL, no_pid_list, next); 777 778 this_cpu_write(tr->array_buffer.data->ignore_pid, ret || 779 (trace_ignore_this_task(pid_list, NULL, prev) && 780 trace_ignore_this_task(pid_list, NULL, next))); 781 } 782 783 static void 784 event_filter_pid_sched_switch_probe_post(void *data, bool preempt, 785 struct task_struct *prev, struct task_struct *next) 786 { 787 struct trace_array *tr = data; 788 struct trace_pid_list *no_pid_list; 789 struct trace_pid_list *pid_list; 790 791 pid_list = rcu_dereference_sched(tr->filtered_pids); 792 no_pid_list = rcu_dereference_sched(tr->filtered_no_pids); 793 794 this_cpu_write(tr->array_buffer.data->ignore_pid, 795 trace_ignore_this_task(pid_list, no_pid_list, next)); 796 } 797 798 static void 799 event_filter_pid_sched_wakeup_probe_pre(void *data, struct task_struct *task) 800 { 801 struct trace_array *tr = data; 802 struct trace_pid_list *no_pid_list; 803 struct trace_pid_list *pid_list; 804 805 /* Nothing to do if we are already tracing */ 806 if (!this_cpu_read(tr->array_buffer.data->ignore_pid)) 807 return; 808 809 pid_list = rcu_dereference_sched(tr->filtered_pids); 810 no_pid_list = rcu_dereference_sched(tr->filtered_no_pids); 811 812 this_cpu_write(tr->array_buffer.data->ignore_pid, 813 trace_ignore_this_task(pid_list, no_pid_list, task)); 814 } 815 816 static void 817 event_filter_pid_sched_wakeup_probe_post(void *data, struct task_struct *task) 818 { 819 struct trace_array *tr = data; 820 struct trace_pid_list *no_pid_list; 821 struct trace_pid_list *pid_list; 822 823 /* Nothing to do if we are not tracing */ 824 if (this_cpu_read(tr->array_buffer.data->ignore_pid)) 825 return; 826 827 pid_list = rcu_dereference_sched(tr->filtered_pids); 828 no_pid_list = rcu_dereference_sched(tr->filtered_no_pids); 829 830 /* Set tracing if current is enabled */ 831 this_cpu_write(tr->array_buffer.data->ignore_pid, 832 trace_ignore_this_task(pid_list, no_pid_list, current)); 833 } 834 835 static void unregister_pid_events(struct trace_array *tr) 836 { 837 unregister_trace_sched_switch(event_filter_pid_sched_switch_probe_pre, tr); 838 unregister_trace_sched_switch(event_filter_pid_sched_switch_probe_post, tr); 839 840 unregister_trace_sched_wakeup(event_filter_pid_sched_wakeup_probe_pre, tr); 841 unregister_trace_sched_wakeup(event_filter_pid_sched_wakeup_probe_post, tr); 842 843 unregister_trace_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_pre, tr); 844 unregister_trace_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_post, tr); 845 846 unregister_trace_sched_waking(event_filter_pid_sched_wakeup_probe_pre, tr); 847 unregister_trace_sched_waking(event_filter_pid_sched_wakeup_probe_post, tr); 848 } 849 850 static void __ftrace_clear_event_pids(struct trace_array *tr, int type) 851 { 852 struct trace_pid_list *pid_list; 853 struct trace_pid_list *no_pid_list; 854 struct trace_event_file *file; 855 int cpu; 856 857 pid_list = rcu_dereference_protected(tr->filtered_pids, 858 lockdep_is_held(&event_mutex)); 859 no_pid_list = rcu_dereference_protected(tr->filtered_no_pids, 860 lockdep_is_held(&event_mutex)); 861 862 /* Make sure there's something to do */ 863 if (!pid_type_enabled(type, pid_list, no_pid_list)) 864 return; 865 866 if (!still_need_pid_events(type, pid_list, no_pid_list)) { 867 unregister_pid_events(tr); 868 869 list_for_each_entry(file, &tr->events, list) { 870 clear_bit(EVENT_FILE_FL_PID_FILTER_BIT, &file->flags); 871 } 872 873 for_each_possible_cpu(cpu) 874 per_cpu_ptr(tr->array_buffer.data, cpu)->ignore_pid = false; 875 } 876 877 if (type & TRACE_PIDS) 878 rcu_assign_pointer(tr->filtered_pids, NULL); 879 880 if (type & TRACE_NO_PIDS) 881 rcu_assign_pointer(tr->filtered_no_pids, NULL); 882 883 /* Wait till all users are no longer using pid filtering */ 884 tracepoint_synchronize_unregister(); 885 886 if ((type & TRACE_PIDS) && pid_list) 887 trace_free_pid_list(pid_list); 888 889 if ((type & TRACE_NO_PIDS) && no_pid_list) 890 trace_free_pid_list(no_pid_list); 891 } 892 893 static void ftrace_clear_event_pids(struct trace_array *tr, int type) 894 { 895 mutex_lock(&event_mutex); 896 __ftrace_clear_event_pids(tr, type); 897 mutex_unlock(&event_mutex); 898 } 899 900 static void __put_system(struct event_subsystem *system) 901 { 902 struct event_filter *filter = system->filter; 903 904 WARN_ON_ONCE(system_refcount(system) == 0); 905 if (system_refcount_dec(system)) 906 return; 907 908 list_del(&system->list); 909 910 if (filter) { 911 kfree(filter->filter_string); 912 kfree(filter); 913 } 914 kfree_const(system->name); 915 kfree(system); 916 } 917 918 static void __get_system(struct event_subsystem *system) 919 { 920 WARN_ON_ONCE(system_refcount(system) == 0); 921 system_refcount_inc(system); 922 } 923 924 static void __get_system_dir(struct trace_subsystem_dir *dir) 925 { 926 WARN_ON_ONCE(dir->ref_count == 0); 927 dir->ref_count++; 928 __get_system(dir->subsystem); 929 } 930 931 static void __put_system_dir(struct trace_subsystem_dir *dir) 932 { 933 WARN_ON_ONCE(dir->ref_count == 0); 934 /* If the subsystem is about to be freed, the dir must be too */ 935 WARN_ON_ONCE(system_refcount(dir->subsystem) == 1 && dir->ref_count != 1); 936 937 __put_system(dir->subsystem); 938 if (!--dir->ref_count) 939 kfree(dir); 940 } 941 942 static void put_system(struct trace_subsystem_dir *dir) 943 { 944 mutex_lock(&event_mutex); 945 __put_system_dir(dir); 946 mutex_unlock(&event_mutex); 947 } 948 949 static void remove_subsystem(struct trace_subsystem_dir *dir) 950 { 951 if (!dir) 952 return; 953 954 if (!--dir->nr_events) { 955 tracefs_remove(dir->entry); 956 list_del(&dir->list); 957 __put_system_dir(dir); 958 } 959 } 960 961 static void remove_event_file_dir(struct trace_event_file *file) 962 { 963 struct dentry *dir = file->dir; 964 struct dentry *child; 965 966 if (dir) { 967 spin_lock(&dir->d_lock); /* probably unneeded */ 968 list_for_each_entry(child, &dir->d_subdirs, d_child) { 969 if (d_really_is_positive(child)) /* probably unneeded */ 970 d_inode(child)->i_private = NULL; 971 } 972 spin_unlock(&dir->d_lock); 973 974 tracefs_remove(dir); 975 } 976 977 list_del(&file->list); 978 remove_subsystem(file->system); 979 free_event_filter(file->filter); 980 kmem_cache_free(file_cachep, file); 981 } 982 983 /* 984 * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events. 985 */ 986 static int 987 __ftrace_set_clr_event_nolock(struct trace_array *tr, const char *match, 988 const char *sub, const char *event, int set) 989 { 990 struct trace_event_file *file; 991 struct trace_event_call *call; 992 const char *name; 993 int ret = -EINVAL; 994 int eret = 0; 995 996 list_for_each_entry(file, &tr->events, list) { 997 998 call = file->event_call; 999 name = trace_event_name(call); 1000 1001 if (!name || !call->class || !call->class->reg) 1002 continue; 1003 1004 if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE) 1005 continue; 1006 1007 if (match && 1008 strcmp(match, name) != 0 && 1009 strcmp(match, call->class->system) != 0) 1010 continue; 1011 1012 if (sub && strcmp(sub, call->class->system) != 0) 1013 continue; 1014 1015 if (event && strcmp(event, name) != 0) 1016 continue; 1017 1018 ret = ftrace_event_enable_disable(file, set); 1019 1020 /* 1021 * Save the first error and return that. Some events 1022 * may still have been enabled, but let the user 1023 * know that something went wrong. 1024 */ 1025 if (ret && !eret) 1026 eret = ret; 1027 1028 ret = eret; 1029 } 1030 1031 return ret; 1032 } 1033 1034 static int __ftrace_set_clr_event(struct trace_array *tr, const char *match, 1035 const char *sub, const char *event, int set) 1036 { 1037 int ret; 1038 1039 mutex_lock(&event_mutex); 1040 ret = __ftrace_set_clr_event_nolock(tr, match, sub, event, set); 1041 mutex_unlock(&event_mutex); 1042 1043 return ret; 1044 } 1045 1046 int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set) 1047 { 1048 char *event = NULL, *sub = NULL, *match; 1049 int ret; 1050 1051 if (!tr) 1052 return -ENOENT; 1053 /* 1054 * The buf format can be <subsystem>:<event-name> 1055 * *:<event-name> means any event by that name. 1056 * :<event-name> is the same. 1057 * 1058 * <subsystem>:* means all events in that subsystem 1059 * <subsystem>: means the same. 1060 * 1061 * <name> (no ':') means all events in a subsystem with 1062 * the name <name> or any event that matches <name> 1063 */ 1064 1065 match = strsep(&buf, ":"); 1066 if (buf) { 1067 sub = match; 1068 event = buf; 1069 match = NULL; 1070 1071 if (!strlen(sub) || strcmp(sub, "*") == 0) 1072 sub = NULL; 1073 if (!strlen(event) || strcmp(event, "*") == 0) 1074 event = NULL; 1075 } 1076 1077 ret = __ftrace_set_clr_event(tr, match, sub, event, set); 1078 1079 /* Put back the colon to allow this to be called again */ 1080 if (buf) 1081 *(buf - 1) = ':'; 1082 1083 return ret; 1084 } 1085 1086 /** 1087 * trace_set_clr_event - enable or disable an event 1088 * @system: system name to match (NULL for any system) 1089 * @event: event name to match (NULL for all events, within system) 1090 * @set: 1 to enable, 0 to disable 1091 * 1092 * This is a way for other parts of the kernel to enable or disable 1093 * event recording. 1094 * 1095 * Returns 0 on success, -EINVAL if the parameters do not match any 1096 * registered events. 1097 */ 1098 int trace_set_clr_event(const char *system, const char *event, int set) 1099 { 1100 struct trace_array *tr = top_trace_array(); 1101 1102 if (!tr) 1103 return -ENODEV; 1104 1105 return __ftrace_set_clr_event(tr, NULL, system, event, set); 1106 } 1107 EXPORT_SYMBOL_GPL(trace_set_clr_event); 1108 1109 /** 1110 * trace_array_set_clr_event - enable or disable an event for a trace array. 1111 * @tr: concerned trace array. 1112 * @system: system name to match (NULL for any system) 1113 * @event: event name to match (NULL for all events, within system) 1114 * @enable: true to enable, false to disable 1115 * 1116 * This is a way for other parts of the kernel to enable or disable 1117 * event recording. 1118 * 1119 * Returns 0 on success, -EINVAL if the parameters do not match any 1120 * registered events. 1121 */ 1122 int trace_array_set_clr_event(struct trace_array *tr, const char *system, 1123 const char *event, bool enable) 1124 { 1125 int set; 1126 1127 if (!tr) 1128 return -ENOENT; 1129 1130 set = (enable == true) ? 1 : 0; 1131 return __ftrace_set_clr_event(tr, NULL, system, event, set); 1132 } 1133 EXPORT_SYMBOL_GPL(trace_array_set_clr_event); 1134 1135 /* 128 should be much more than enough */ 1136 #define EVENT_BUF_SIZE 127 1137 1138 static ssize_t 1139 ftrace_event_write(struct file *file, const char __user *ubuf, 1140 size_t cnt, loff_t *ppos) 1141 { 1142 struct trace_parser parser; 1143 struct seq_file *m = file->private_data; 1144 struct trace_array *tr = m->private; 1145 ssize_t read, ret; 1146 1147 if (!cnt) 1148 return 0; 1149 1150 ret = tracing_update_buffers(); 1151 if (ret < 0) 1152 return ret; 1153 1154 if (trace_parser_get_init(&parser, EVENT_BUF_SIZE + 1)) 1155 return -ENOMEM; 1156 1157 read = trace_get_user(&parser, ubuf, cnt, ppos); 1158 1159 if (read >= 0 && trace_parser_loaded((&parser))) { 1160 int set = 1; 1161 1162 if (*parser.buffer == '!') 1163 set = 0; 1164 1165 ret = ftrace_set_clr_event(tr, parser.buffer + !set, set); 1166 if (ret) 1167 goto out_put; 1168 } 1169 1170 ret = read; 1171 1172 out_put: 1173 trace_parser_put(&parser); 1174 1175 return ret; 1176 } 1177 1178 static void * 1179 t_next(struct seq_file *m, void *v, loff_t *pos) 1180 { 1181 struct trace_event_file *file = v; 1182 struct trace_event_call *call; 1183 struct trace_array *tr = m->private; 1184 1185 (*pos)++; 1186 1187 list_for_each_entry_continue(file, &tr->events, list) { 1188 call = file->event_call; 1189 /* 1190 * The ftrace subsystem is for showing formats only. 1191 * They can not be enabled or disabled via the event files. 1192 */ 1193 if (call->class && call->class->reg && 1194 !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)) 1195 return file; 1196 } 1197 1198 return NULL; 1199 } 1200 1201 static void *t_start(struct seq_file *m, loff_t *pos) 1202 { 1203 struct trace_event_file *file; 1204 struct trace_array *tr = m->private; 1205 loff_t l; 1206 1207 mutex_lock(&event_mutex); 1208 1209 file = list_entry(&tr->events, struct trace_event_file, list); 1210 for (l = 0; l <= *pos; ) { 1211 file = t_next(m, file, &l); 1212 if (!file) 1213 break; 1214 } 1215 return file; 1216 } 1217 1218 static void * 1219 s_next(struct seq_file *m, void *v, loff_t *pos) 1220 { 1221 struct trace_event_file *file = v; 1222 struct trace_array *tr = m->private; 1223 1224 (*pos)++; 1225 1226 list_for_each_entry_continue(file, &tr->events, list) { 1227 if (file->flags & EVENT_FILE_FL_ENABLED) 1228 return file; 1229 } 1230 1231 return NULL; 1232 } 1233 1234 static void *s_start(struct seq_file *m, loff_t *pos) 1235 { 1236 struct trace_event_file *file; 1237 struct trace_array *tr = m->private; 1238 loff_t l; 1239 1240 mutex_lock(&event_mutex); 1241 1242 file = list_entry(&tr->events, struct trace_event_file, list); 1243 for (l = 0; l <= *pos; ) { 1244 file = s_next(m, file, &l); 1245 if (!file) 1246 break; 1247 } 1248 return file; 1249 } 1250 1251 static int t_show(struct seq_file *m, void *v) 1252 { 1253 struct trace_event_file *file = v; 1254 struct trace_event_call *call = file->event_call; 1255 1256 if (strcmp(call->class->system, TRACE_SYSTEM) != 0) 1257 seq_printf(m, "%s:", call->class->system); 1258 seq_printf(m, "%s\n", trace_event_name(call)); 1259 1260 return 0; 1261 } 1262 1263 static void t_stop(struct seq_file *m, void *p) 1264 { 1265 mutex_unlock(&event_mutex); 1266 } 1267 1268 static void * 1269 __next(struct seq_file *m, void *v, loff_t *pos, int type) 1270 { 1271 struct trace_array *tr = m->private; 1272 struct trace_pid_list *pid_list; 1273 1274 if (type == TRACE_PIDS) 1275 pid_list = rcu_dereference_sched(tr->filtered_pids); 1276 else 1277 pid_list = rcu_dereference_sched(tr->filtered_no_pids); 1278 1279 return trace_pid_next(pid_list, v, pos); 1280 } 1281 1282 static void * 1283 p_next(struct seq_file *m, void *v, loff_t *pos) 1284 { 1285 return __next(m, v, pos, TRACE_PIDS); 1286 } 1287 1288 static void * 1289 np_next(struct seq_file *m, void *v, loff_t *pos) 1290 { 1291 return __next(m, v, pos, TRACE_NO_PIDS); 1292 } 1293 1294 static void *__start(struct seq_file *m, loff_t *pos, int type) 1295 __acquires(RCU) 1296 { 1297 struct trace_pid_list *pid_list; 1298 struct trace_array *tr = m->private; 1299 1300 /* 1301 * Grab the mutex, to keep calls to p_next() having the same 1302 * tr->filtered_pids as p_start() has. 1303 * If we just passed the tr->filtered_pids around, then RCU would 1304 * have been enough, but doing that makes things more complex. 1305 */ 1306 mutex_lock(&event_mutex); 1307 rcu_read_lock_sched(); 1308 1309 if (type == TRACE_PIDS) 1310 pid_list = rcu_dereference_sched(tr->filtered_pids); 1311 else 1312 pid_list = rcu_dereference_sched(tr->filtered_no_pids); 1313 1314 if (!pid_list) 1315 return NULL; 1316 1317 return trace_pid_start(pid_list, pos); 1318 } 1319 1320 static void *p_start(struct seq_file *m, loff_t *pos) 1321 __acquires(RCU) 1322 { 1323 return __start(m, pos, TRACE_PIDS); 1324 } 1325 1326 static void *np_start(struct seq_file *m, loff_t *pos) 1327 __acquires(RCU) 1328 { 1329 return __start(m, pos, TRACE_NO_PIDS); 1330 } 1331 1332 static void p_stop(struct seq_file *m, void *p) 1333 __releases(RCU) 1334 { 1335 rcu_read_unlock_sched(); 1336 mutex_unlock(&event_mutex); 1337 } 1338 1339 static ssize_t 1340 event_enable_read(struct file *filp, char __user *ubuf, size_t cnt, 1341 loff_t *ppos) 1342 { 1343 struct trace_event_file *file; 1344 unsigned long flags; 1345 char buf[4] = "0"; 1346 1347 mutex_lock(&event_mutex); 1348 file = event_file_data(filp); 1349 if (likely(file)) 1350 flags = file->flags; 1351 mutex_unlock(&event_mutex); 1352 1353 if (!file) 1354 return -ENODEV; 1355 1356 if (flags & EVENT_FILE_FL_ENABLED && 1357 !(flags & EVENT_FILE_FL_SOFT_DISABLED)) 1358 strcpy(buf, "1"); 1359 1360 if (flags & EVENT_FILE_FL_SOFT_DISABLED || 1361 flags & EVENT_FILE_FL_SOFT_MODE) 1362 strcat(buf, "*"); 1363 1364 strcat(buf, "\n"); 1365 1366 return simple_read_from_buffer(ubuf, cnt, ppos, buf, strlen(buf)); 1367 } 1368 1369 static ssize_t 1370 event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt, 1371 loff_t *ppos) 1372 { 1373 struct trace_event_file *file; 1374 unsigned long val; 1375 int ret; 1376 1377 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 1378 if (ret) 1379 return ret; 1380 1381 ret = tracing_update_buffers(); 1382 if (ret < 0) 1383 return ret; 1384 1385 switch (val) { 1386 case 0: 1387 case 1: 1388 ret = -ENODEV; 1389 mutex_lock(&event_mutex); 1390 file = event_file_data(filp); 1391 if (likely(file)) 1392 ret = ftrace_event_enable_disable(file, val); 1393 mutex_unlock(&event_mutex); 1394 break; 1395 1396 default: 1397 return -EINVAL; 1398 } 1399 1400 *ppos += cnt; 1401 1402 return ret ? ret : cnt; 1403 } 1404 1405 static ssize_t 1406 system_enable_read(struct file *filp, char __user *ubuf, size_t cnt, 1407 loff_t *ppos) 1408 { 1409 const char set_to_char[4] = { '?', '0', '1', 'X' }; 1410 struct trace_subsystem_dir *dir = filp->private_data; 1411 struct event_subsystem *system = dir->subsystem; 1412 struct trace_event_call *call; 1413 struct trace_event_file *file; 1414 struct trace_array *tr = dir->tr; 1415 char buf[2]; 1416 int set = 0; 1417 int ret; 1418 1419 mutex_lock(&event_mutex); 1420 list_for_each_entry(file, &tr->events, list) { 1421 call = file->event_call; 1422 if ((call->flags & TRACE_EVENT_FL_IGNORE_ENABLE) || 1423 !trace_event_name(call) || !call->class || !call->class->reg) 1424 continue; 1425 1426 if (system && strcmp(call->class->system, system->name) != 0) 1427 continue; 1428 1429 /* 1430 * We need to find out if all the events are set 1431 * or if all events or cleared, or if we have 1432 * a mixture. 1433 */ 1434 set |= (1 << !!(file->flags & EVENT_FILE_FL_ENABLED)); 1435 1436 /* 1437 * If we have a mixture, no need to look further. 1438 */ 1439 if (set == 3) 1440 break; 1441 } 1442 mutex_unlock(&event_mutex); 1443 1444 buf[0] = set_to_char[set]; 1445 buf[1] = '\n'; 1446 1447 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, 2); 1448 1449 return ret; 1450 } 1451 1452 static ssize_t 1453 system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt, 1454 loff_t *ppos) 1455 { 1456 struct trace_subsystem_dir *dir = filp->private_data; 1457 struct event_subsystem *system = dir->subsystem; 1458 const char *name = NULL; 1459 unsigned long val; 1460 ssize_t ret; 1461 1462 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 1463 if (ret) 1464 return ret; 1465 1466 ret = tracing_update_buffers(); 1467 if (ret < 0) 1468 return ret; 1469 1470 if (val != 0 && val != 1) 1471 return -EINVAL; 1472 1473 /* 1474 * Opening of "enable" adds a ref count to system, 1475 * so the name is safe to use. 1476 */ 1477 if (system) 1478 name = system->name; 1479 1480 ret = __ftrace_set_clr_event(dir->tr, NULL, name, NULL, val); 1481 if (ret) 1482 goto out; 1483 1484 ret = cnt; 1485 1486 out: 1487 *ppos += cnt; 1488 1489 return ret; 1490 } 1491 1492 enum { 1493 FORMAT_HEADER = 1, 1494 FORMAT_FIELD_SEPERATOR = 2, 1495 FORMAT_PRINTFMT = 3, 1496 }; 1497 1498 static void *f_next(struct seq_file *m, void *v, loff_t *pos) 1499 { 1500 struct trace_event_call *call = event_file_data(m->private); 1501 struct list_head *common_head = &ftrace_common_fields; 1502 struct list_head *head = trace_get_fields(call); 1503 struct list_head *node = v; 1504 1505 (*pos)++; 1506 1507 switch ((unsigned long)v) { 1508 case FORMAT_HEADER: 1509 node = common_head; 1510 break; 1511 1512 case FORMAT_FIELD_SEPERATOR: 1513 node = head; 1514 break; 1515 1516 case FORMAT_PRINTFMT: 1517 /* all done */ 1518 return NULL; 1519 } 1520 1521 node = node->prev; 1522 if (node == common_head) 1523 return (void *)FORMAT_FIELD_SEPERATOR; 1524 else if (node == head) 1525 return (void *)FORMAT_PRINTFMT; 1526 else 1527 return node; 1528 } 1529 1530 static int f_show(struct seq_file *m, void *v) 1531 { 1532 struct trace_event_call *call = event_file_data(m->private); 1533 struct ftrace_event_field *field; 1534 const char *array_descriptor; 1535 1536 switch ((unsigned long)v) { 1537 case FORMAT_HEADER: 1538 seq_printf(m, "name: %s\n", trace_event_name(call)); 1539 seq_printf(m, "ID: %d\n", call->event.type); 1540 seq_puts(m, "format:\n"); 1541 return 0; 1542 1543 case FORMAT_FIELD_SEPERATOR: 1544 seq_putc(m, '\n'); 1545 return 0; 1546 1547 case FORMAT_PRINTFMT: 1548 seq_printf(m, "\nprint fmt: %s\n", 1549 call->print_fmt); 1550 return 0; 1551 } 1552 1553 field = list_entry(v, struct ftrace_event_field, link); 1554 /* 1555 * Smartly shows the array type(except dynamic array). 1556 * Normal: 1557 * field:TYPE VAR 1558 * If TYPE := TYPE[LEN], it is shown: 1559 * field:TYPE VAR[LEN] 1560 */ 1561 array_descriptor = strchr(field->type, '['); 1562 1563 if (str_has_prefix(field->type, "__data_loc")) 1564 array_descriptor = NULL; 1565 1566 if (!array_descriptor) 1567 seq_printf(m, "\tfield:%s %s;\toffset:%u;\tsize:%u;\tsigned:%d;\n", 1568 field->type, field->name, field->offset, 1569 field->size, !!field->is_signed); 1570 else 1571 seq_printf(m, "\tfield:%.*s %s%s;\toffset:%u;\tsize:%u;\tsigned:%d;\n", 1572 (int)(array_descriptor - field->type), 1573 field->type, field->name, 1574 array_descriptor, field->offset, 1575 field->size, !!field->is_signed); 1576 1577 return 0; 1578 } 1579 1580 static void *f_start(struct seq_file *m, loff_t *pos) 1581 { 1582 void *p = (void *)FORMAT_HEADER; 1583 loff_t l = 0; 1584 1585 /* ->stop() is called even if ->start() fails */ 1586 mutex_lock(&event_mutex); 1587 if (!event_file_data(m->private)) 1588 return ERR_PTR(-ENODEV); 1589 1590 while (l < *pos && p) 1591 p = f_next(m, p, &l); 1592 1593 return p; 1594 } 1595 1596 static void f_stop(struct seq_file *m, void *p) 1597 { 1598 mutex_unlock(&event_mutex); 1599 } 1600 1601 static const struct seq_operations trace_format_seq_ops = { 1602 .start = f_start, 1603 .next = f_next, 1604 .stop = f_stop, 1605 .show = f_show, 1606 }; 1607 1608 static int trace_format_open(struct inode *inode, struct file *file) 1609 { 1610 struct seq_file *m; 1611 int ret; 1612 1613 /* Do we want to hide event format files on tracefs lockdown? */ 1614 1615 ret = seq_open(file, &trace_format_seq_ops); 1616 if (ret < 0) 1617 return ret; 1618 1619 m = file->private_data; 1620 m->private = file; 1621 1622 return 0; 1623 } 1624 1625 static ssize_t 1626 event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) 1627 { 1628 int id = (long)event_file_data(filp); 1629 char buf[32]; 1630 int len; 1631 1632 if (unlikely(!id)) 1633 return -ENODEV; 1634 1635 len = sprintf(buf, "%d\n", id); 1636 1637 return simple_read_from_buffer(ubuf, cnt, ppos, buf, len); 1638 } 1639 1640 static ssize_t 1641 event_filter_read(struct file *filp, char __user *ubuf, size_t cnt, 1642 loff_t *ppos) 1643 { 1644 struct trace_event_file *file; 1645 struct trace_seq *s; 1646 int r = -ENODEV; 1647 1648 if (*ppos) 1649 return 0; 1650 1651 s = kmalloc(sizeof(*s), GFP_KERNEL); 1652 1653 if (!s) 1654 return -ENOMEM; 1655 1656 trace_seq_init(s); 1657 1658 mutex_lock(&event_mutex); 1659 file = event_file_data(filp); 1660 if (file) 1661 print_event_filter(file, s); 1662 mutex_unlock(&event_mutex); 1663 1664 if (file) 1665 r = simple_read_from_buffer(ubuf, cnt, ppos, 1666 s->buffer, trace_seq_used(s)); 1667 1668 kfree(s); 1669 1670 return r; 1671 } 1672 1673 static ssize_t 1674 event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt, 1675 loff_t *ppos) 1676 { 1677 struct trace_event_file *file; 1678 char *buf; 1679 int err = -ENODEV; 1680 1681 if (cnt >= PAGE_SIZE) 1682 return -EINVAL; 1683 1684 buf = memdup_user_nul(ubuf, cnt); 1685 if (IS_ERR(buf)) 1686 return PTR_ERR(buf); 1687 1688 mutex_lock(&event_mutex); 1689 file = event_file_data(filp); 1690 if (file) 1691 err = apply_event_filter(file, buf); 1692 mutex_unlock(&event_mutex); 1693 1694 kfree(buf); 1695 if (err < 0) 1696 return err; 1697 1698 *ppos += cnt; 1699 1700 return cnt; 1701 } 1702 1703 static LIST_HEAD(event_subsystems); 1704 1705 static int subsystem_open(struct inode *inode, struct file *filp) 1706 { 1707 struct event_subsystem *system = NULL; 1708 struct trace_subsystem_dir *dir = NULL; /* Initialize for gcc */ 1709 struct trace_array *tr; 1710 int ret; 1711 1712 if (tracing_is_disabled()) 1713 return -ENODEV; 1714 1715 /* Make sure the system still exists */ 1716 mutex_lock(&event_mutex); 1717 mutex_lock(&trace_types_lock); 1718 list_for_each_entry(tr, &ftrace_trace_arrays, list) { 1719 list_for_each_entry(dir, &tr->systems, list) { 1720 if (dir == inode->i_private) { 1721 /* Don't open systems with no events */ 1722 if (dir->nr_events) { 1723 __get_system_dir(dir); 1724 system = dir->subsystem; 1725 } 1726 goto exit_loop; 1727 } 1728 } 1729 } 1730 exit_loop: 1731 mutex_unlock(&trace_types_lock); 1732 mutex_unlock(&event_mutex); 1733 1734 if (!system) 1735 return -ENODEV; 1736 1737 /* Some versions of gcc think dir can be uninitialized here */ 1738 WARN_ON(!dir); 1739 1740 /* Still need to increment the ref count of the system */ 1741 if (trace_array_get(tr) < 0) { 1742 put_system(dir); 1743 return -ENODEV; 1744 } 1745 1746 ret = tracing_open_generic(inode, filp); 1747 if (ret < 0) { 1748 trace_array_put(tr); 1749 put_system(dir); 1750 } 1751 1752 return ret; 1753 } 1754 1755 static int system_tr_open(struct inode *inode, struct file *filp) 1756 { 1757 struct trace_subsystem_dir *dir; 1758 struct trace_array *tr = inode->i_private; 1759 int ret; 1760 1761 /* Make a temporary dir that has no system but points to tr */ 1762 dir = kzalloc(sizeof(*dir), GFP_KERNEL); 1763 if (!dir) 1764 return -ENOMEM; 1765 1766 ret = tracing_open_generic_tr(inode, filp); 1767 if (ret < 0) { 1768 kfree(dir); 1769 return ret; 1770 } 1771 dir->tr = tr; 1772 filp->private_data = dir; 1773 1774 return 0; 1775 } 1776 1777 static int subsystem_release(struct inode *inode, struct file *file) 1778 { 1779 struct trace_subsystem_dir *dir = file->private_data; 1780 1781 trace_array_put(dir->tr); 1782 1783 /* 1784 * If dir->subsystem is NULL, then this is a temporary 1785 * descriptor that was made for a trace_array to enable 1786 * all subsystems. 1787 */ 1788 if (dir->subsystem) 1789 put_system(dir); 1790 else 1791 kfree(dir); 1792 1793 return 0; 1794 } 1795 1796 static ssize_t 1797 subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt, 1798 loff_t *ppos) 1799 { 1800 struct trace_subsystem_dir *dir = filp->private_data; 1801 struct event_subsystem *system = dir->subsystem; 1802 struct trace_seq *s; 1803 int r; 1804 1805 if (*ppos) 1806 return 0; 1807 1808 s = kmalloc(sizeof(*s), GFP_KERNEL); 1809 if (!s) 1810 return -ENOMEM; 1811 1812 trace_seq_init(s); 1813 1814 print_subsystem_event_filter(system, s); 1815 r = simple_read_from_buffer(ubuf, cnt, ppos, 1816 s->buffer, trace_seq_used(s)); 1817 1818 kfree(s); 1819 1820 return r; 1821 } 1822 1823 static ssize_t 1824 subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt, 1825 loff_t *ppos) 1826 { 1827 struct trace_subsystem_dir *dir = filp->private_data; 1828 char *buf; 1829 int err; 1830 1831 if (cnt >= PAGE_SIZE) 1832 return -EINVAL; 1833 1834 buf = memdup_user_nul(ubuf, cnt); 1835 if (IS_ERR(buf)) 1836 return PTR_ERR(buf); 1837 1838 err = apply_subsystem_event_filter(dir, buf); 1839 kfree(buf); 1840 if (err < 0) 1841 return err; 1842 1843 *ppos += cnt; 1844 1845 return cnt; 1846 } 1847 1848 static ssize_t 1849 show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) 1850 { 1851 int (*func)(struct trace_seq *s) = filp->private_data; 1852 struct trace_seq *s; 1853 int r; 1854 1855 if (*ppos) 1856 return 0; 1857 1858 s = kmalloc(sizeof(*s), GFP_KERNEL); 1859 if (!s) 1860 return -ENOMEM; 1861 1862 trace_seq_init(s); 1863 1864 func(s); 1865 r = simple_read_from_buffer(ubuf, cnt, ppos, 1866 s->buffer, trace_seq_used(s)); 1867 1868 kfree(s); 1869 1870 return r; 1871 } 1872 1873 static void ignore_task_cpu(void *data) 1874 { 1875 struct trace_array *tr = data; 1876 struct trace_pid_list *pid_list; 1877 struct trace_pid_list *no_pid_list; 1878 1879 /* 1880 * This function is called by on_each_cpu() while the 1881 * event_mutex is held. 1882 */ 1883 pid_list = rcu_dereference_protected(tr->filtered_pids, 1884 mutex_is_locked(&event_mutex)); 1885 no_pid_list = rcu_dereference_protected(tr->filtered_no_pids, 1886 mutex_is_locked(&event_mutex)); 1887 1888 this_cpu_write(tr->array_buffer.data->ignore_pid, 1889 trace_ignore_this_task(pid_list, no_pid_list, current)); 1890 } 1891 1892 static void register_pid_events(struct trace_array *tr) 1893 { 1894 /* 1895 * Register a probe that is called before all other probes 1896 * to set ignore_pid if next or prev do not match. 1897 * Register a probe this is called after all other probes 1898 * to only keep ignore_pid set if next pid matches. 1899 */ 1900 register_trace_prio_sched_switch(event_filter_pid_sched_switch_probe_pre, 1901 tr, INT_MAX); 1902 register_trace_prio_sched_switch(event_filter_pid_sched_switch_probe_post, 1903 tr, 0); 1904 1905 register_trace_prio_sched_wakeup(event_filter_pid_sched_wakeup_probe_pre, 1906 tr, INT_MAX); 1907 register_trace_prio_sched_wakeup(event_filter_pid_sched_wakeup_probe_post, 1908 tr, 0); 1909 1910 register_trace_prio_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_pre, 1911 tr, INT_MAX); 1912 register_trace_prio_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_post, 1913 tr, 0); 1914 1915 register_trace_prio_sched_waking(event_filter_pid_sched_wakeup_probe_pre, 1916 tr, INT_MAX); 1917 register_trace_prio_sched_waking(event_filter_pid_sched_wakeup_probe_post, 1918 tr, 0); 1919 } 1920 1921 static ssize_t 1922 event_pid_write(struct file *filp, const char __user *ubuf, 1923 size_t cnt, loff_t *ppos, int type) 1924 { 1925 struct seq_file *m = filp->private_data; 1926 struct trace_array *tr = m->private; 1927 struct trace_pid_list *filtered_pids = NULL; 1928 struct trace_pid_list *other_pids = NULL; 1929 struct trace_pid_list *pid_list; 1930 struct trace_event_file *file; 1931 ssize_t ret; 1932 1933 if (!cnt) 1934 return 0; 1935 1936 ret = tracing_update_buffers(); 1937 if (ret < 0) 1938 return ret; 1939 1940 mutex_lock(&event_mutex); 1941 1942 if (type == TRACE_PIDS) { 1943 filtered_pids = rcu_dereference_protected(tr->filtered_pids, 1944 lockdep_is_held(&event_mutex)); 1945 other_pids = rcu_dereference_protected(tr->filtered_no_pids, 1946 lockdep_is_held(&event_mutex)); 1947 } else { 1948 filtered_pids = rcu_dereference_protected(tr->filtered_no_pids, 1949 lockdep_is_held(&event_mutex)); 1950 other_pids = rcu_dereference_protected(tr->filtered_pids, 1951 lockdep_is_held(&event_mutex)); 1952 } 1953 1954 ret = trace_pid_write(filtered_pids, &pid_list, ubuf, cnt); 1955 if (ret < 0) 1956 goto out; 1957 1958 if (type == TRACE_PIDS) 1959 rcu_assign_pointer(tr->filtered_pids, pid_list); 1960 else 1961 rcu_assign_pointer(tr->filtered_no_pids, pid_list); 1962 1963 list_for_each_entry(file, &tr->events, list) { 1964 set_bit(EVENT_FILE_FL_PID_FILTER_BIT, &file->flags); 1965 } 1966 1967 if (filtered_pids) { 1968 tracepoint_synchronize_unregister(); 1969 trace_free_pid_list(filtered_pids); 1970 } else if (pid_list && !other_pids) { 1971 register_pid_events(tr); 1972 } 1973 1974 /* 1975 * Ignoring of pids is done at task switch. But we have to 1976 * check for those tasks that are currently running. 1977 * Always do this in case a pid was appended or removed. 1978 */ 1979 on_each_cpu(ignore_task_cpu, tr, 1); 1980 1981 out: 1982 mutex_unlock(&event_mutex); 1983 1984 if (ret > 0) 1985 *ppos += ret; 1986 1987 return ret; 1988 } 1989 1990 static ssize_t 1991 ftrace_event_pid_write(struct file *filp, const char __user *ubuf, 1992 size_t cnt, loff_t *ppos) 1993 { 1994 return event_pid_write(filp, ubuf, cnt, ppos, TRACE_PIDS); 1995 } 1996 1997 static ssize_t 1998 ftrace_event_npid_write(struct file *filp, const char __user *ubuf, 1999 size_t cnt, loff_t *ppos) 2000 { 2001 return event_pid_write(filp, ubuf, cnt, ppos, TRACE_NO_PIDS); 2002 } 2003 2004 static int ftrace_event_avail_open(struct inode *inode, struct file *file); 2005 static int ftrace_event_set_open(struct inode *inode, struct file *file); 2006 static int ftrace_event_set_pid_open(struct inode *inode, struct file *file); 2007 static int ftrace_event_set_npid_open(struct inode *inode, struct file *file); 2008 static int ftrace_event_release(struct inode *inode, struct file *file); 2009 2010 static const struct seq_operations show_event_seq_ops = { 2011 .start = t_start, 2012 .next = t_next, 2013 .show = t_show, 2014 .stop = t_stop, 2015 }; 2016 2017 static const struct seq_operations show_set_event_seq_ops = { 2018 .start = s_start, 2019 .next = s_next, 2020 .show = t_show, 2021 .stop = t_stop, 2022 }; 2023 2024 static const struct seq_operations show_set_pid_seq_ops = { 2025 .start = p_start, 2026 .next = p_next, 2027 .show = trace_pid_show, 2028 .stop = p_stop, 2029 }; 2030 2031 static const struct seq_operations show_set_no_pid_seq_ops = { 2032 .start = np_start, 2033 .next = np_next, 2034 .show = trace_pid_show, 2035 .stop = p_stop, 2036 }; 2037 2038 static const struct file_operations ftrace_avail_fops = { 2039 .open = ftrace_event_avail_open, 2040 .read = seq_read, 2041 .llseek = seq_lseek, 2042 .release = seq_release, 2043 }; 2044 2045 static const struct file_operations ftrace_set_event_fops = { 2046 .open = ftrace_event_set_open, 2047 .read = seq_read, 2048 .write = ftrace_event_write, 2049 .llseek = seq_lseek, 2050 .release = ftrace_event_release, 2051 }; 2052 2053 static const struct file_operations ftrace_set_event_pid_fops = { 2054 .open = ftrace_event_set_pid_open, 2055 .read = seq_read, 2056 .write = ftrace_event_pid_write, 2057 .llseek = seq_lseek, 2058 .release = ftrace_event_release, 2059 }; 2060 2061 static const struct file_operations ftrace_set_event_notrace_pid_fops = { 2062 .open = ftrace_event_set_npid_open, 2063 .read = seq_read, 2064 .write = ftrace_event_npid_write, 2065 .llseek = seq_lseek, 2066 .release = ftrace_event_release, 2067 }; 2068 2069 static const struct file_operations ftrace_enable_fops = { 2070 .open = tracing_open_generic, 2071 .read = event_enable_read, 2072 .write = event_enable_write, 2073 .llseek = default_llseek, 2074 }; 2075 2076 static const struct file_operations ftrace_event_format_fops = { 2077 .open = trace_format_open, 2078 .read = seq_read, 2079 .llseek = seq_lseek, 2080 .release = seq_release, 2081 }; 2082 2083 static const struct file_operations ftrace_event_id_fops = { 2084 .read = event_id_read, 2085 .llseek = default_llseek, 2086 }; 2087 2088 static const struct file_operations ftrace_event_filter_fops = { 2089 .open = tracing_open_generic, 2090 .read = event_filter_read, 2091 .write = event_filter_write, 2092 .llseek = default_llseek, 2093 }; 2094 2095 static const struct file_operations ftrace_subsystem_filter_fops = { 2096 .open = subsystem_open, 2097 .read = subsystem_filter_read, 2098 .write = subsystem_filter_write, 2099 .llseek = default_llseek, 2100 .release = subsystem_release, 2101 }; 2102 2103 static const struct file_operations ftrace_system_enable_fops = { 2104 .open = subsystem_open, 2105 .read = system_enable_read, 2106 .write = system_enable_write, 2107 .llseek = default_llseek, 2108 .release = subsystem_release, 2109 }; 2110 2111 static const struct file_operations ftrace_tr_enable_fops = { 2112 .open = system_tr_open, 2113 .read = system_enable_read, 2114 .write = system_enable_write, 2115 .llseek = default_llseek, 2116 .release = subsystem_release, 2117 }; 2118 2119 static const struct file_operations ftrace_show_header_fops = { 2120 .open = tracing_open_generic, 2121 .read = show_header, 2122 .llseek = default_llseek, 2123 }; 2124 2125 static int 2126 ftrace_event_open(struct inode *inode, struct file *file, 2127 const struct seq_operations *seq_ops) 2128 { 2129 struct seq_file *m; 2130 int ret; 2131 2132 ret = security_locked_down(LOCKDOWN_TRACEFS); 2133 if (ret) 2134 return ret; 2135 2136 ret = seq_open(file, seq_ops); 2137 if (ret < 0) 2138 return ret; 2139 m = file->private_data; 2140 /* copy tr over to seq ops */ 2141 m->private = inode->i_private; 2142 2143 return ret; 2144 } 2145 2146 static int ftrace_event_release(struct inode *inode, struct file *file) 2147 { 2148 struct trace_array *tr = inode->i_private; 2149 2150 trace_array_put(tr); 2151 2152 return seq_release(inode, file); 2153 } 2154 2155 static int 2156 ftrace_event_avail_open(struct inode *inode, struct file *file) 2157 { 2158 const struct seq_operations *seq_ops = &show_event_seq_ops; 2159 2160 /* Checks for tracefs lockdown */ 2161 return ftrace_event_open(inode, file, seq_ops); 2162 } 2163 2164 static int 2165 ftrace_event_set_open(struct inode *inode, struct file *file) 2166 { 2167 const struct seq_operations *seq_ops = &show_set_event_seq_ops; 2168 struct trace_array *tr = inode->i_private; 2169 int ret; 2170 2171 ret = tracing_check_open_get_tr(tr); 2172 if (ret) 2173 return ret; 2174 2175 if ((file->f_mode & FMODE_WRITE) && 2176 (file->f_flags & O_TRUNC)) 2177 ftrace_clear_events(tr); 2178 2179 ret = ftrace_event_open(inode, file, seq_ops); 2180 if (ret < 0) 2181 trace_array_put(tr); 2182 return ret; 2183 } 2184 2185 static int 2186 ftrace_event_set_pid_open(struct inode *inode, struct file *file) 2187 { 2188 const struct seq_operations *seq_ops = &show_set_pid_seq_ops; 2189 struct trace_array *tr = inode->i_private; 2190 int ret; 2191 2192 ret = tracing_check_open_get_tr(tr); 2193 if (ret) 2194 return ret; 2195 2196 if ((file->f_mode & FMODE_WRITE) && 2197 (file->f_flags & O_TRUNC)) 2198 ftrace_clear_event_pids(tr, TRACE_PIDS); 2199 2200 ret = ftrace_event_open(inode, file, seq_ops); 2201 if (ret < 0) 2202 trace_array_put(tr); 2203 return ret; 2204 } 2205 2206 static int 2207 ftrace_event_set_npid_open(struct inode *inode, struct file *file) 2208 { 2209 const struct seq_operations *seq_ops = &show_set_no_pid_seq_ops; 2210 struct trace_array *tr = inode->i_private; 2211 int ret; 2212 2213 ret = tracing_check_open_get_tr(tr); 2214 if (ret) 2215 return ret; 2216 2217 if ((file->f_mode & FMODE_WRITE) && 2218 (file->f_flags & O_TRUNC)) 2219 ftrace_clear_event_pids(tr, TRACE_NO_PIDS); 2220 2221 ret = ftrace_event_open(inode, file, seq_ops); 2222 if (ret < 0) 2223 trace_array_put(tr); 2224 return ret; 2225 } 2226 2227 static struct event_subsystem * 2228 create_new_subsystem(const char *name) 2229 { 2230 struct event_subsystem *system; 2231 2232 /* need to create new entry */ 2233 system = kmalloc(sizeof(*system), GFP_KERNEL); 2234 if (!system) 2235 return NULL; 2236 2237 system->ref_count = 1; 2238 2239 /* Only allocate if dynamic (kprobes and modules) */ 2240 system->name = kstrdup_const(name, GFP_KERNEL); 2241 if (!system->name) 2242 goto out_free; 2243 2244 system->filter = NULL; 2245 2246 system->filter = kzalloc(sizeof(struct event_filter), GFP_KERNEL); 2247 if (!system->filter) 2248 goto out_free; 2249 2250 list_add(&system->list, &event_subsystems); 2251 2252 return system; 2253 2254 out_free: 2255 kfree_const(system->name); 2256 kfree(system); 2257 return NULL; 2258 } 2259 2260 static struct dentry * 2261 event_subsystem_dir(struct trace_array *tr, const char *name, 2262 struct trace_event_file *file, struct dentry *parent) 2263 { 2264 struct trace_subsystem_dir *dir; 2265 struct event_subsystem *system; 2266 struct dentry *entry; 2267 2268 /* First see if we did not already create this dir */ 2269 list_for_each_entry(dir, &tr->systems, list) { 2270 system = dir->subsystem; 2271 if (strcmp(system->name, name) == 0) { 2272 dir->nr_events++; 2273 file->system = dir; 2274 return dir->entry; 2275 } 2276 } 2277 2278 /* Now see if the system itself exists. */ 2279 list_for_each_entry(system, &event_subsystems, list) { 2280 if (strcmp(system->name, name) == 0) 2281 break; 2282 } 2283 /* Reset system variable when not found */ 2284 if (&system->list == &event_subsystems) 2285 system = NULL; 2286 2287 dir = kmalloc(sizeof(*dir), GFP_KERNEL); 2288 if (!dir) 2289 goto out_fail; 2290 2291 if (!system) { 2292 system = create_new_subsystem(name); 2293 if (!system) 2294 goto out_free; 2295 } else 2296 __get_system(system); 2297 2298 dir->entry = tracefs_create_dir(name, parent); 2299 if (!dir->entry) { 2300 pr_warn("Failed to create system directory %s\n", name); 2301 __put_system(system); 2302 goto out_free; 2303 } 2304 2305 dir->tr = tr; 2306 dir->ref_count = 1; 2307 dir->nr_events = 1; 2308 dir->subsystem = system; 2309 file->system = dir; 2310 2311 /* the ftrace system is special, do not create enable or filter files */ 2312 if (strcmp(name, "ftrace") != 0) { 2313 2314 entry = tracefs_create_file("filter", 0644, dir->entry, dir, 2315 &ftrace_subsystem_filter_fops); 2316 if (!entry) { 2317 kfree(system->filter); 2318 system->filter = NULL; 2319 pr_warn("Could not create tracefs '%s/filter' entry\n", name); 2320 } 2321 2322 trace_create_file("enable", 0644, dir->entry, dir, 2323 &ftrace_system_enable_fops); 2324 } 2325 2326 list_add(&dir->list, &tr->systems); 2327 2328 return dir->entry; 2329 2330 out_free: 2331 kfree(dir); 2332 out_fail: 2333 /* Only print this message if failed on memory allocation */ 2334 if (!dir || !system) 2335 pr_warn("No memory to create event subsystem %s\n", name); 2336 return NULL; 2337 } 2338 2339 static int 2340 event_define_fields(struct trace_event_call *call) 2341 { 2342 struct list_head *head; 2343 int ret = 0; 2344 2345 /* 2346 * Other events may have the same class. Only update 2347 * the fields if they are not already defined. 2348 */ 2349 head = trace_get_fields(call); 2350 if (list_empty(head)) { 2351 struct trace_event_fields *field = call->class->fields_array; 2352 unsigned int offset = sizeof(struct trace_entry); 2353 2354 for (; field->type; field++) { 2355 if (field->type == TRACE_FUNCTION_TYPE) { 2356 field->define_fields(call); 2357 break; 2358 } 2359 2360 offset = ALIGN(offset, field->align); 2361 ret = trace_define_field(call, field->type, field->name, 2362 offset, field->size, 2363 field->is_signed, field->filter_type); 2364 if (WARN_ON_ONCE(ret)) { 2365 pr_err("error code is %d\n", ret); 2366 break; 2367 } 2368 2369 offset += field->size; 2370 } 2371 } 2372 2373 return ret; 2374 } 2375 2376 static int 2377 event_create_dir(struct dentry *parent, struct trace_event_file *file) 2378 { 2379 struct trace_event_call *call = file->event_call; 2380 struct trace_array *tr = file->tr; 2381 struct dentry *d_events; 2382 const char *name; 2383 int ret; 2384 2385 /* 2386 * If the trace point header did not define TRACE_SYSTEM 2387 * then the system would be called "TRACE_SYSTEM". 2388 */ 2389 if (strcmp(call->class->system, TRACE_SYSTEM) != 0) { 2390 d_events = event_subsystem_dir(tr, call->class->system, file, parent); 2391 if (!d_events) 2392 return -ENOMEM; 2393 } else 2394 d_events = parent; 2395 2396 name = trace_event_name(call); 2397 file->dir = tracefs_create_dir(name, d_events); 2398 if (!file->dir) { 2399 pr_warn("Could not create tracefs '%s' directory\n", name); 2400 return -1; 2401 } 2402 2403 if (call->class->reg && !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)) 2404 trace_create_file("enable", 0644, file->dir, file, 2405 &ftrace_enable_fops); 2406 2407 #ifdef CONFIG_PERF_EVENTS 2408 if (call->event.type && call->class->reg) 2409 trace_create_file("id", 0444, file->dir, 2410 (void *)(long)call->event.type, 2411 &ftrace_event_id_fops); 2412 #endif 2413 2414 ret = event_define_fields(call); 2415 if (ret < 0) { 2416 pr_warn("Could not initialize trace point events/%s\n", name); 2417 return ret; 2418 } 2419 2420 /* 2421 * Only event directories that can be enabled should have 2422 * triggers or filters. 2423 */ 2424 if (!(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)) { 2425 trace_create_file("filter", 0644, file->dir, file, 2426 &ftrace_event_filter_fops); 2427 2428 trace_create_file("trigger", 0644, file->dir, file, 2429 &event_trigger_fops); 2430 } 2431 2432 #ifdef CONFIG_HIST_TRIGGERS 2433 trace_create_file("hist", 0444, file->dir, file, 2434 &event_hist_fops); 2435 #endif 2436 #ifdef CONFIG_HIST_TRIGGERS_DEBUG 2437 trace_create_file("hist_debug", 0444, file->dir, file, 2438 &event_hist_debug_fops); 2439 #endif 2440 trace_create_file("format", 0444, file->dir, call, 2441 &ftrace_event_format_fops); 2442 2443 #ifdef CONFIG_TRACE_EVENT_INJECT 2444 if (call->event.type && call->class->reg) 2445 trace_create_file("inject", 0200, file->dir, file, 2446 &event_inject_fops); 2447 #endif 2448 2449 return 0; 2450 } 2451 2452 static void remove_event_from_tracers(struct trace_event_call *call) 2453 { 2454 struct trace_event_file *file; 2455 struct trace_array *tr; 2456 2457 do_for_each_event_file_safe(tr, file) { 2458 if (file->event_call != call) 2459 continue; 2460 2461 remove_event_file_dir(file); 2462 /* 2463 * The do_for_each_event_file_safe() is 2464 * a double loop. After finding the call for this 2465 * trace_array, we use break to jump to the next 2466 * trace_array. 2467 */ 2468 break; 2469 } while_for_each_event_file(); 2470 } 2471 2472 static void event_remove(struct trace_event_call *call) 2473 { 2474 struct trace_array *tr; 2475 struct trace_event_file *file; 2476 2477 do_for_each_event_file(tr, file) { 2478 if (file->event_call != call) 2479 continue; 2480 2481 if (file->flags & EVENT_FILE_FL_WAS_ENABLED) 2482 tr->clear_trace = true; 2483 2484 ftrace_event_enable_disable(file, 0); 2485 /* 2486 * The do_for_each_event_file() is 2487 * a double loop. After finding the call for this 2488 * trace_array, we use break to jump to the next 2489 * trace_array. 2490 */ 2491 break; 2492 } while_for_each_event_file(); 2493 2494 if (call->event.funcs) 2495 __unregister_trace_event(&call->event); 2496 remove_event_from_tracers(call); 2497 list_del(&call->list); 2498 } 2499 2500 static int event_init(struct trace_event_call *call) 2501 { 2502 int ret = 0; 2503 const char *name; 2504 2505 name = trace_event_name(call); 2506 if (WARN_ON(!name)) 2507 return -EINVAL; 2508 2509 if (call->class->raw_init) { 2510 ret = call->class->raw_init(call); 2511 if (ret < 0 && ret != -ENOSYS) 2512 pr_warn("Could not initialize trace events/%s\n", name); 2513 } 2514 2515 return ret; 2516 } 2517 2518 static int 2519 __register_event(struct trace_event_call *call, struct module *mod) 2520 { 2521 int ret; 2522 2523 ret = event_init(call); 2524 if (ret < 0) 2525 return ret; 2526 2527 list_add(&call->list, &ftrace_events); 2528 if (call->flags & TRACE_EVENT_FL_DYNAMIC) 2529 atomic_set(&call->refcnt, 0); 2530 else 2531 call->module = mod; 2532 2533 return 0; 2534 } 2535 2536 static char *eval_replace(char *ptr, struct trace_eval_map *map, int len) 2537 { 2538 int rlen; 2539 int elen; 2540 2541 /* Find the length of the eval value as a string */ 2542 elen = snprintf(ptr, 0, "%ld", map->eval_value); 2543 /* Make sure there's enough room to replace the string with the value */ 2544 if (len < elen) 2545 return NULL; 2546 2547 snprintf(ptr, elen + 1, "%ld", map->eval_value); 2548 2549 /* Get the rest of the string of ptr */ 2550 rlen = strlen(ptr + len); 2551 memmove(ptr + elen, ptr + len, rlen); 2552 /* Make sure we end the new string */ 2553 ptr[elen + rlen] = 0; 2554 2555 return ptr + elen; 2556 } 2557 2558 static void update_event_printk(struct trace_event_call *call, 2559 struct trace_eval_map *map) 2560 { 2561 char *ptr; 2562 int quote = 0; 2563 int len = strlen(map->eval_string); 2564 2565 for (ptr = call->print_fmt; *ptr; ptr++) { 2566 if (*ptr == '\\') { 2567 ptr++; 2568 /* paranoid */ 2569 if (!*ptr) 2570 break; 2571 continue; 2572 } 2573 if (*ptr == '"') { 2574 quote ^= 1; 2575 continue; 2576 } 2577 if (quote) 2578 continue; 2579 if (isdigit(*ptr)) { 2580 /* skip numbers */ 2581 do { 2582 ptr++; 2583 /* Check for alpha chars like ULL */ 2584 } while (isalnum(*ptr)); 2585 if (!*ptr) 2586 break; 2587 /* 2588 * A number must have some kind of delimiter after 2589 * it, and we can ignore that too. 2590 */ 2591 continue; 2592 } 2593 if (isalpha(*ptr) || *ptr == '_') { 2594 if (strncmp(map->eval_string, ptr, len) == 0 && 2595 !isalnum(ptr[len]) && ptr[len] != '_') { 2596 ptr = eval_replace(ptr, map, len); 2597 /* enum/sizeof string smaller than value */ 2598 if (WARN_ON_ONCE(!ptr)) 2599 return; 2600 /* 2601 * No need to decrement here, as eval_replace() 2602 * returns the pointer to the character passed 2603 * the eval, and two evals can not be placed 2604 * back to back without something in between. 2605 * We can skip that something in between. 2606 */ 2607 continue; 2608 } 2609 skip_more: 2610 do { 2611 ptr++; 2612 } while (isalnum(*ptr) || *ptr == '_'); 2613 if (!*ptr) 2614 break; 2615 /* 2616 * If what comes after this variable is a '.' or 2617 * '->' then we can continue to ignore that string. 2618 */ 2619 if (*ptr == '.' || (ptr[0] == '-' && ptr[1] == '>')) { 2620 ptr += *ptr == '.' ? 1 : 2; 2621 if (!*ptr) 2622 break; 2623 goto skip_more; 2624 } 2625 /* 2626 * Once again, we can skip the delimiter that came 2627 * after the string. 2628 */ 2629 continue; 2630 } 2631 } 2632 } 2633 2634 void trace_event_eval_update(struct trace_eval_map **map, int len) 2635 { 2636 struct trace_event_call *call, *p; 2637 const char *last_system = NULL; 2638 bool first = false; 2639 int last_i; 2640 int i; 2641 2642 down_write(&trace_event_sem); 2643 list_for_each_entry_safe(call, p, &ftrace_events, list) { 2644 /* events are usually grouped together with systems */ 2645 if (!last_system || call->class->system != last_system) { 2646 first = true; 2647 last_i = 0; 2648 last_system = call->class->system; 2649 } 2650 2651 /* 2652 * Since calls are grouped by systems, the likelihood that the 2653 * next call in the iteration belongs to the same system as the 2654 * previous call is high. As an optimization, we skip searching 2655 * for a map[] that matches the call's system if the last call 2656 * was from the same system. That's what last_i is for. If the 2657 * call has the same system as the previous call, then last_i 2658 * will be the index of the first map[] that has a matching 2659 * system. 2660 */ 2661 for (i = last_i; i < len; i++) { 2662 if (call->class->system == map[i]->system) { 2663 /* Save the first system if need be */ 2664 if (first) { 2665 last_i = i; 2666 first = false; 2667 } 2668 update_event_printk(call, map[i]); 2669 } 2670 } 2671 } 2672 up_write(&trace_event_sem); 2673 } 2674 2675 static struct trace_event_file * 2676 trace_create_new_event(struct trace_event_call *call, 2677 struct trace_array *tr) 2678 { 2679 struct trace_event_file *file; 2680 2681 file = kmem_cache_alloc(file_cachep, GFP_TRACE); 2682 if (!file) 2683 return NULL; 2684 2685 file->event_call = call; 2686 file->tr = tr; 2687 atomic_set(&file->sm_ref, 0); 2688 atomic_set(&file->tm_ref, 0); 2689 INIT_LIST_HEAD(&file->triggers); 2690 list_add(&file->list, &tr->events); 2691 2692 return file; 2693 } 2694 2695 /* Add an event to a trace directory */ 2696 static int 2697 __trace_add_new_event(struct trace_event_call *call, struct trace_array *tr) 2698 { 2699 struct trace_event_file *file; 2700 2701 file = trace_create_new_event(call, tr); 2702 if (!file) 2703 return -ENOMEM; 2704 2705 if (eventdir_initialized) 2706 return event_create_dir(tr->event_dir, file); 2707 else 2708 return event_define_fields(call); 2709 } 2710 2711 /* 2712 * Just create a descriptor for early init. A descriptor is required 2713 * for enabling events at boot. We want to enable events before 2714 * the filesystem is initialized. 2715 */ 2716 static int 2717 __trace_early_add_new_event(struct trace_event_call *call, 2718 struct trace_array *tr) 2719 { 2720 struct trace_event_file *file; 2721 2722 file = trace_create_new_event(call, tr); 2723 if (!file) 2724 return -ENOMEM; 2725 2726 return event_define_fields(call); 2727 } 2728 2729 struct ftrace_module_file_ops; 2730 static void __add_event_to_tracers(struct trace_event_call *call); 2731 2732 /* Add an additional event_call dynamically */ 2733 int trace_add_event_call(struct trace_event_call *call) 2734 { 2735 int ret; 2736 lockdep_assert_held(&event_mutex); 2737 2738 mutex_lock(&trace_types_lock); 2739 2740 ret = __register_event(call, NULL); 2741 if (ret >= 0) 2742 __add_event_to_tracers(call); 2743 2744 mutex_unlock(&trace_types_lock); 2745 return ret; 2746 } 2747 2748 /* 2749 * Must be called under locking of trace_types_lock, event_mutex and 2750 * trace_event_sem. 2751 */ 2752 static void __trace_remove_event_call(struct trace_event_call *call) 2753 { 2754 event_remove(call); 2755 trace_destroy_fields(call); 2756 free_event_filter(call->filter); 2757 call->filter = NULL; 2758 } 2759 2760 static int probe_remove_event_call(struct trace_event_call *call) 2761 { 2762 struct trace_array *tr; 2763 struct trace_event_file *file; 2764 2765 #ifdef CONFIG_PERF_EVENTS 2766 if (call->perf_refcount) 2767 return -EBUSY; 2768 #endif 2769 do_for_each_event_file(tr, file) { 2770 if (file->event_call != call) 2771 continue; 2772 /* 2773 * We can't rely on ftrace_event_enable_disable(enable => 0) 2774 * we are going to do, EVENT_FILE_FL_SOFT_MODE can suppress 2775 * TRACE_REG_UNREGISTER. 2776 */ 2777 if (file->flags & EVENT_FILE_FL_ENABLED) 2778 return -EBUSY; 2779 /* 2780 * The do_for_each_event_file_safe() is 2781 * a double loop. After finding the call for this 2782 * trace_array, we use break to jump to the next 2783 * trace_array. 2784 */ 2785 break; 2786 } while_for_each_event_file(); 2787 2788 __trace_remove_event_call(call); 2789 2790 return 0; 2791 } 2792 2793 /* Remove an event_call */ 2794 int trace_remove_event_call(struct trace_event_call *call) 2795 { 2796 int ret; 2797 2798 lockdep_assert_held(&event_mutex); 2799 2800 mutex_lock(&trace_types_lock); 2801 down_write(&trace_event_sem); 2802 ret = probe_remove_event_call(call); 2803 up_write(&trace_event_sem); 2804 mutex_unlock(&trace_types_lock); 2805 2806 return ret; 2807 } 2808 2809 #define for_each_event(event, start, end) \ 2810 for (event = start; \ 2811 (unsigned long)event < (unsigned long)end; \ 2812 event++) 2813 2814 #ifdef CONFIG_MODULES 2815 2816 static void trace_module_add_events(struct module *mod) 2817 { 2818 struct trace_event_call **call, **start, **end; 2819 2820 if (!mod->num_trace_events) 2821 return; 2822 2823 /* Don't add infrastructure for mods without tracepoints */ 2824 if (trace_module_has_bad_taint(mod)) { 2825 pr_err("%s: module has bad taint, not creating trace events\n", 2826 mod->name); 2827 return; 2828 } 2829 2830 start = mod->trace_events; 2831 end = mod->trace_events + mod->num_trace_events; 2832 2833 for_each_event(call, start, end) { 2834 __register_event(*call, mod); 2835 __add_event_to_tracers(*call); 2836 } 2837 } 2838 2839 static void trace_module_remove_events(struct module *mod) 2840 { 2841 struct trace_event_call *call, *p; 2842 2843 down_write(&trace_event_sem); 2844 list_for_each_entry_safe(call, p, &ftrace_events, list) { 2845 if ((call->flags & TRACE_EVENT_FL_DYNAMIC) || !call->module) 2846 continue; 2847 if (call->module == mod) 2848 __trace_remove_event_call(call); 2849 } 2850 up_write(&trace_event_sem); 2851 2852 /* 2853 * It is safest to reset the ring buffer if the module being unloaded 2854 * registered any events that were used. The only worry is if 2855 * a new module gets loaded, and takes on the same id as the events 2856 * of this module. When printing out the buffer, traced events left 2857 * over from this module may be passed to the new module events and 2858 * unexpected results may occur. 2859 */ 2860 tracing_reset_all_online_cpus(); 2861 } 2862 2863 static int trace_module_notify(struct notifier_block *self, 2864 unsigned long val, void *data) 2865 { 2866 struct module *mod = data; 2867 2868 mutex_lock(&event_mutex); 2869 mutex_lock(&trace_types_lock); 2870 switch (val) { 2871 case MODULE_STATE_COMING: 2872 trace_module_add_events(mod); 2873 break; 2874 case MODULE_STATE_GOING: 2875 trace_module_remove_events(mod); 2876 break; 2877 } 2878 mutex_unlock(&trace_types_lock); 2879 mutex_unlock(&event_mutex); 2880 2881 return NOTIFY_OK; 2882 } 2883 2884 static struct notifier_block trace_module_nb = { 2885 .notifier_call = trace_module_notify, 2886 .priority = 1, /* higher than trace.c module notify */ 2887 }; 2888 #endif /* CONFIG_MODULES */ 2889 2890 /* Create a new event directory structure for a trace directory. */ 2891 static void 2892 __trace_add_event_dirs(struct trace_array *tr) 2893 { 2894 struct trace_event_call *call; 2895 int ret; 2896 2897 list_for_each_entry(call, &ftrace_events, list) { 2898 ret = __trace_add_new_event(call, tr); 2899 if (ret < 0) 2900 pr_warn("Could not create directory for event %s\n", 2901 trace_event_name(call)); 2902 } 2903 } 2904 2905 /* Returns any file that matches the system and event */ 2906 struct trace_event_file * 2907 __find_event_file(struct trace_array *tr, const char *system, const char *event) 2908 { 2909 struct trace_event_file *file; 2910 struct trace_event_call *call; 2911 const char *name; 2912 2913 list_for_each_entry(file, &tr->events, list) { 2914 2915 call = file->event_call; 2916 name = trace_event_name(call); 2917 2918 if (!name || !call->class) 2919 continue; 2920 2921 if (strcmp(event, name) == 0 && 2922 strcmp(system, call->class->system) == 0) 2923 return file; 2924 } 2925 return NULL; 2926 } 2927 2928 /* Returns valid trace event files that match system and event */ 2929 struct trace_event_file * 2930 find_event_file(struct trace_array *tr, const char *system, const char *event) 2931 { 2932 struct trace_event_file *file; 2933 2934 file = __find_event_file(tr, system, event); 2935 if (!file || !file->event_call->class->reg || 2936 file->event_call->flags & TRACE_EVENT_FL_IGNORE_ENABLE) 2937 return NULL; 2938 2939 return file; 2940 } 2941 2942 /** 2943 * trace_get_event_file - Find and return a trace event file 2944 * @instance: The name of the trace instance containing the event 2945 * @system: The name of the system containing the event 2946 * @event: The name of the event 2947 * 2948 * Return a trace event file given the trace instance name, trace 2949 * system, and trace event name. If the instance name is NULL, it 2950 * refers to the top-level trace array. 2951 * 2952 * This function will look it up and return it if found, after calling 2953 * trace_array_get() to prevent the instance from going away, and 2954 * increment the event's module refcount to prevent it from being 2955 * removed. 2956 * 2957 * To release the file, call trace_put_event_file(), which will call 2958 * trace_array_put() and decrement the event's module refcount. 2959 * 2960 * Return: The trace event on success, ERR_PTR otherwise. 2961 */ 2962 struct trace_event_file *trace_get_event_file(const char *instance, 2963 const char *system, 2964 const char *event) 2965 { 2966 struct trace_array *tr = top_trace_array(); 2967 struct trace_event_file *file = NULL; 2968 int ret = -EINVAL; 2969 2970 if (instance) { 2971 tr = trace_array_find_get(instance); 2972 if (!tr) 2973 return ERR_PTR(-ENOENT); 2974 } else { 2975 ret = trace_array_get(tr); 2976 if (ret) 2977 return ERR_PTR(ret); 2978 } 2979 2980 mutex_lock(&event_mutex); 2981 2982 file = find_event_file(tr, system, event); 2983 if (!file) { 2984 trace_array_put(tr); 2985 ret = -EINVAL; 2986 goto out; 2987 } 2988 2989 /* Don't let event modules unload while in use */ 2990 ret = trace_event_try_get_ref(file->event_call); 2991 if (!ret) { 2992 trace_array_put(tr); 2993 ret = -EBUSY; 2994 goto out; 2995 } 2996 2997 ret = 0; 2998 out: 2999 mutex_unlock(&event_mutex); 3000 3001 if (ret) 3002 file = ERR_PTR(ret); 3003 3004 return file; 3005 } 3006 EXPORT_SYMBOL_GPL(trace_get_event_file); 3007 3008 /** 3009 * trace_put_event_file - Release a file from trace_get_event_file() 3010 * @file: The trace event file 3011 * 3012 * If a file was retrieved using trace_get_event_file(), this should 3013 * be called when it's no longer needed. It will cancel the previous 3014 * trace_array_get() called by that function, and decrement the 3015 * event's module refcount. 3016 */ 3017 void trace_put_event_file(struct trace_event_file *file) 3018 { 3019 mutex_lock(&event_mutex); 3020 trace_event_put_ref(file->event_call); 3021 mutex_unlock(&event_mutex); 3022 3023 trace_array_put(file->tr); 3024 } 3025 EXPORT_SYMBOL_GPL(trace_put_event_file); 3026 3027 #ifdef CONFIG_DYNAMIC_FTRACE 3028 3029 /* Avoid typos */ 3030 #define ENABLE_EVENT_STR "enable_event" 3031 #define DISABLE_EVENT_STR "disable_event" 3032 3033 struct event_probe_data { 3034 struct trace_event_file *file; 3035 unsigned long count; 3036 int ref; 3037 bool enable; 3038 }; 3039 3040 static void update_event_probe(struct event_probe_data *data) 3041 { 3042 if (data->enable) 3043 clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &data->file->flags); 3044 else 3045 set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &data->file->flags); 3046 } 3047 3048 static void 3049 event_enable_probe(unsigned long ip, unsigned long parent_ip, 3050 struct trace_array *tr, struct ftrace_probe_ops *ops, 3051 void *data) 3052 { 3053 struct ftrace_func_mapper *mapper = data; 3054 struct event_probe_data *edata; 3055 void **pdata; 3056 3057 pdata = ftrace_func_mapper_find_ip(mapper, ip); 3058 if (!pdata || !*pdata) 3059 return; 3060 3061 edata = *pdata; 3062 update_event_probe(edata); 3063 } 3064 3065 static void 3066 event_enable_count_probe(unsigned long ip, unsigned long parent_ip, 3067 struct trace_array *tr, struct ftrace_probe_ops *ops, 3068 void *data) 3069 { 3070 struct ftrace_func_mapper *mapper = data; 3071 struct event_probe_data *edata; 3072 void **pdata; 3073 3074 pdata = ftrace_func_mapper_find_ip(mapper, ip); 3075 if (!pdata || !*pdata) 3076 return; 3077 3078 edata = *pdata; 3079 3080 if (!edata->count) 3081 return; 3082 3083 /* Skip if the event is in a state we want to switch to */ 3084 if (edata->enable == !(edata->file->flags & EVENT_FILE_FL_SOFT_DISABLED)) 3085 return; 3086 3087 if (edata->count != -1) 3088 (edata->count)--; 3089 3090 update_event_probe(edata); 3091 } 3092 3093 static int 3094 event_enable_print(struct seq_file *m, unsigned long ip, 3095 struct ftrace_probe_ops *ops, void *data) 3096 { 3097 struct ftrace_func_mapper *mapper = data; 3098 struct event_probe_data *edata; 3099 void **pdata; 3100 3101 pdata = ftrace_func_mapper_find_ip(mapper, ip); 3102 3103 if (WARN_ON_ONCE(!pdata || !*pdata)) 3104 return 0; 3105 3106 edata = *pdata; 3107 3108 seq_printf(m, "%ps:", (void *)ip); 3109 3110 seq_printf(m, "%s:%s:%s", 3111 edata->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR, 3112 edata->file->event_call->class->system, 3113 trace_event_name(edata->file->event_call)); 3114 3115 if (edata->count == -1) 3116 seq_puts(m, ":unlimited\n"); 3117 else 3118 seq_printf(m, ":count=%ld\n", edata->count); 3119 3120 return 0; 3121 } 3122 3123 static int 3124 event_enable_init(struct ftrace_probe_ops *ops, struct trace_array *tr, 3125 unsigned long ip, void *init_data, void **data) 3126 { 3127 struct ftrace_func_mapper *mapper = *data; 3128 struct event_probe_data *edata = init_data; 3129 int ret; 3130 3131 if (!mapper) { 3132 mapper = allocate_ftrace_func_mapper(); 3133 if (!mapper) 3134 return -ENODEV; 3135 *data = mapper; 3136 } 3137 3138 ret = ftrace_func_mapper_add_ip(mapper, ip, edata); 3139 if (ret < 0) 3140 return ret; 3141 3142 edata->ref++; 3143 3144 return 0; 3145 } 3146 3147 static int free_probe_data(void *data) 3148 { 3149 struct event_probe_data *edata = data; 3150 3151 edata->ref--; 3152 if (!edata->ref) { 3153 /* Remove the SOFT_MODE flag */ 3154 __ftrace_event_enable_disable(edata->file, 0, 1); 3155 trace_event_put_ref(edata->file->event_call); 3156 kfree(edata); 3157 } 3158 return 0; 3159 } 3160 3161 static void 3162 event_enable_free(struct ftrace_probe_ops *ops, struct trace_array *tr, 3163 unsigned long ip, void *data) 3164 { 3165 struct ftrace_func_mapper *mapper = data; 3166 struct event_probe_data *edata; 3167 3168 if (!ip) { 3169 if (!mapper) 3170 return; 3171 free_ftrace_func_mapper(mapper, free_probe_data); 3172 return; 3173 } 3174 3175 edata = ftrace_func_mapper_remove_ip(mapper, ip); 3176 3177 if (WARN_ON_ONCE(!edata)) 3178 return; 3179 3180 if (WARN_ON_ONCE(edata->ref <= 0)) 3181 return; 3182 3183 free_probe_data(edata); 3184 } 3185 3186 static struct ftrace_probe_ops event_enable_probe_ops = { 3187 .func = event_enable_probe, 3188 .print = event_enable_print, 3189 .init = event_enable_init, 3190 .free = event_enable_free, 3191 }; 3192 3193 static struct ftrace_probe_ops event_enable_count_probe_ops = { 3194 .func = event_enable_count_probe, 3195 .print = event_enable_print, 3196 .init = event_enable_init, 3197 .free = event_enable_free, 3198 }; 3199 3200 static struct ftrace_probe_ops event_disable_probe_ops = { 3201 .func = event_enable_probe, 3202 .print = event_enable_print, 3203 .init = event_enable_init, 3204 .free = event_enable_free, 3205 }; 3206 3207 static struct ftrace_probe_ops event_disable_count_probe_ops = { 3208 .func = event_enable_count_probe, 3209 .print = event_enable_print, 3210 .init = event_enable_init, 3211 .free = event_enable_free, 3212 }; 3213 3214 static int 3215 event_enable_func(struct trace_array *tr, struct ftrace_hash *hash, 3216 char *glob, char *cmd, char *param, int enabled) 3217 { 3218 struct trace_event_file *file; 3219 struct ftrace_probe_ops *ops; 3220 struct event_probe_data *data; 3221 const char *system; 3222 const char *event; 3223 char *number; 3224 bool enable; 3225 int ret; 3226 3227 if (!tr) 3228 return -ENODEV; 3229 3230 /* hash funcs only work with set_ftrace_filter */ 3231 if (!enabled || !param) 3232 return -EINVAL; 3233 3234 system = strsep(¶m, ":"); 3235 if (!param) 3236 return -EINVAL; 3237 3238 event = strsep(¶m, ":"); 3239 3240 mutex_lock(&event_mutex); 3241 3242 ret = -EINVAL; 3243 file = find_event_file(tr, system, event); 3244 if (!file) 3245 goto out; 3246 3247 enable = strcmp(cmd, ENABLE_EVENT_STR) == 0; 3248 3249 if (enable) 3250 ops = param ? &event_enable_count_probe_ops : &event_enable_probe_ops; 3251 else 3252 ops = param ? &event_disable_count_probe_ops : &event_disable_probe_ops; 3253 3254 if (glob[0] == '!') { 3255 ret = unregister_ftrace_function_probe_func(glob+1, tr, ops); 3256 goto out; 3257 } 3258 3259 ret = -ENOMEM; 3260 3261 data = kzalloc(sizeof(*data), GFP_KERNEL); 3262 if (!data) 3263 goto out; 3264 3265 data->enable = enable; 3266 data->count = -1; 3267 data->file = file; 3268 3269 if (!param) 3270 goto out_reg; 3271 3272 number = strsep(¶m, ":"); 3273 3274 ret = -EINVAL; 3275 if (!strlen(number)) 3276 goto out_free; 3277 3278 /* 3279 * We use the callback data field (which is a pointer) 3280 * as our counter. 3281 */ 3282 ret = kstrtoul(number, 0, &data->count); 3283 if (ret) 3284 goto out_free; 3285 3286 out_reg: 3287 /* Don't let event modules unload while probe registered */ 3288 ret = trace_event_try_get_ref(file->event_call); 3289 if (!ret) { 3290 ret = -EBUSY; 3291 goto out_free; 3292 } 3293 3294 ret = __ftrace_event_enable_disable(file, 1, 1); 3295 if (ret < 0) 3296 goto out_put; 3297 3298 ret = register_ftrace_function_probe(glob, tr, ops, data); 3299 /* 3300 * The above returns on success the # of functions enabled, 3301 * but if it didn't find any functions it returns zero. 3302 * Consider no functions a failure too. 3303 */ 3304 if (!ret) { 3305 ret = -ENOENT; 3306 goto out_disable; 3307 } else if (ret < 0) 3308 goto out_disable; 3309 /* Just return zero, not the number of enabled functions */ 3310 ret = 0; 3311 out: 3312 mutex_unlock(&event_mutex); 3313 return ret; 3314 3315 out_disable: 3316 __ftrace_event_enable_disable(file, 0, 1); 3317 out_put: 3318 trace_event_put_ref(file->event_call); 3319 out_free: 3320 kfree(data); 3321 goto out; 3322 } 3323 3324 static struct ftrace_func_command event_enable_cmd = { 3325 .name = ENABLE_EVENT_STR, 3326 .func = event_enable_func, 3327 }; 3328 3329 static struct ftrace_func_command event_disable_cmd = { 3330 .name = DISABLE_EVENT_STR, 3331 .func = event_enable_func, 3332 }; 3333 3334 static __init int register_event_cmds(void) 3335 { 3336 int ret; 3337 3338 ret = register_ftrace_command(&event_enable_cmd); 3339 if (WARN_ON(ret < 0)) 3340 return ret; 3341 ret = register_ftrace_command(&event_disable_cmd); 3342 if (WARN_ON(ret < 0)) 3343 unregister_ftrace_command(&event_enable_cmd); 3344 return ret; 3345 } 3346 #else 3347 static inline int register_event_cmds(void) { return 0; } 3348 #endif /* CONFIG_DYNAMIC_FTRACE */ 3349 3350 /* 3351 * The top level array and trace arrays created by boot-time tracing 3352 * have already had its trace_event_file descriptors created in order 3353 * to allow for early events to be recorded. 3354 * This function is called after the tracefs has been initialized, 3355 * and we now have to create the files associated to the events. 3356 */ 3357 static void __trace_early_add_event_dirs(struct trace_array *tr) 3358 { 3359 struct trace_event_file *file; 3360 int ret; 3361 3362 3363 list_for_each_entry(file, &tr->events, list) { 3364 ret = event_create_dir(tr->event_dir, file); 3365 if (ret < 0) 3366 pr_warn("Could not create directory for event %s\n", 3367 trace_event_name(file->event_call)); 3368 } 3369 } 3370 3371 /* 3372 * For early boot up, the top trace array and the trace arrays created 3373 * by boot-time tracing require to have a list of events that can be 3374 * enabled. This must be done before the filesystem is set up in order 3375 * to allow events to be traced early. 3376 */ 3377 void __trace_early_add_events(struct trace_array *tr) 3378 { 3379 struct trace_event_call *call; 3380 int ret; 3381 3382 list_for_each_entry(call, &ftrace_events, list) { 3383 /* Early boot up should not have any modules loaded */ 3384 if (!(call->flags & TRACE_EVENT_FL_DYNAMIC) && 3385 WARN_ON_ONCE(call->module)) 3386 continue; 3387 3388 ret = __trace_early_add_new_event(call, tr); 3389 if (ret < 0) 3390 pr_warn("Could not create early event %s\n", 3391 trace_event_name(call)); 3392 } 3393 } 3394 3395 /* Remove the event directory structure for a trace directory. */ 3396 static void 3397 __trace_remove_event_dirs(struct trace_array *tr) 3398 { 3399 struct trace_event_file *file, *next; 3400 3401 list_for_each_entry_safe(file, next, &tr->events, list) 3402 remove_event_file_dir(file); 3403 } 3404 3405 static void __add_event_to_tracers(struct trace_event_call *call) 3406 { 3407 struct trace_array *tr; 3408 3409 list_for_each_entry(tr, &ftrace_trace_arrays, list) 3410 __trace_add_new_event(call, tr); 3411 } 3412 3413 extern struct trace_event_call *__start_ftrace_events[]; 3414 extern struct trace_event_call *__stop_ftrace_events[]; 3415 3416 static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata; 3417 3418 static __init int setup_trace_event(char *str) 3419 { 3420 strlcpy(bootup_event_buf, str, COMMAND_LINE_SIZE); 3421 ring_buffer_expanded = true; 3422 disable_tracing_selftest("running event tracing"); 3423 3424 return 1; 3425 } 3426 __setup("trace_event=", setup_trace_event); 3427 3428 /* Expects to have event_mutex held when called */ 3429 static int 3430 create_event_toplevel_files(struct dentry *parent, struct trace_array *tr) 3431 { 3432 struct dentry *d_events; 3433 struct dentry *entry; 3434 3435 entry = tracefs_create_file("set_event", 0644, parent, 3436 tr, &ftrace_set_event_fops); 3437 if (!entry) { 3438 pr_warn("Could not create tracefs 'set_event' entry\n"); 3439 return -ENOMEM; 3440 } 3441 3442 d_events = tracefs_create_dir("events", parent); 3443 if (!d_events) { 3444 pr_warn("Could not create tracefs 'events' directory\n"); 3445 return -ENOMEM; 3446 } 3447 3448 entry = trace_create_file("enable", 0644, d_events, 3449 tr, &ftrace_tr_enable_fops); 3450 if (!entry) { 3451 pr_warn("Could not create tracefs 'enable' entry\n"); 3452 return -ENOMEM; 3453 } 3454 3455 /* There are not as crucial, just warn if they are not created */ 3456 3457 entry = tracefs_create_file("set_event_pid", 0644, parent, 3458 tr, &ftrace_set_event_pid_fops); 3459 if (!entry) 3460 pr_warn("Could not create tracefs 'set_event_pid' entry\n"); 3461 3462 entry = tracefs_create_file("set_event_notrace_pid", 0644, parent, 3463 tr, &ftrace_set_event_notrace_pid_fops); 3464 if (!entry) 3465 pr_warn("Could not create tracefs 'set_event_notrace_pid' entry\n"); 3466 3467 /* ring buffer internal formats */ 3468 entry = trace_create_file("header_page", 0444, d_events, 3469 ring_buffer_print_page_header, 3470 &ftrace_show_header_fops); 3471 if (!entry) 3472 pr_warn("Could not create tracefs 'header_page' entry\n"); 3473 3474 entry = trace_create_file("header_event", 0444, d_events, 3475 ring_buffer_print_entry_header, 3476 &ftrace_show_header_fops); 3477 if (!entry) 3478 pr_warn("Could not create tracefs 'header_event' entry\n"); 3479 3480 tr->event_dir = d_events; 3481 3482 return 0; 3483 } 3484 3485 /** 3486 * event_trace_add_tracer - add a instance of a trace_array to events 3487 * @parent: The parent dentry to place the files/directories for events in 3488 * @tr: The trace array associated with these events 3489 * 3490 * When a new instance is created, it needs to set up its events 3491 * directory, as well as other files associated with events. It also 3492 * creates the event hierarchy in the @parent/events directory. 3493 * 3494 * Returns 0 on success. 3495 * 3496 * Must be called with event_mutex held. 3497 */ 3498 int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr) 3499 { 3500 int ret; 3501 3502 lockdep_assert_held(&event_mutex); 3503 3504 ret = create_event_toplevel_files(parent, tr); 3505 if (ret) 3506 goto out; 3507 3508 down_write(&trace_event_sem); 3509 /* If tr already has the event list, it is initialized in early boot. */ 3510 if (unlikely(!list_empty(&tr->events))) 3511 __trace_early_add_event_dirs(tr); 3512 else 3513 __trace_add_event_dirs(tr); 3514 up_write(&trace_event_sem); 3515 3516 out: 3517 return ret; 3518 } 3519 3520 /* 3521 * The top trace array already had its file descriptors created. 3522 * Now the files themselves need to be created. 3523 */ 3524 static __init int 3525 early_event_add_tracer(struct dentry *parent, struct trace_array *tr) 3526 { 3527 int ret; 3528 3529 mutex_lock(&event_mutex); 3530 3531 ret = create_event_toplevel_files(parent, tr); 3532 if (ret) 3533 goto out_unlock; 3534 3535 down_write(&trace_event_sem); 3536 __trace_early_add_event_dirs(tr); 3537 up_write(&trace_event_sem); 3538 3539 out_unlock: 3540 mutex_unlock(&event_mutex); 3541 3542 return ret; 3543 } 3544 3545 /* Must be called with event_mutex held */ 3546 int event_trace_del_tracer(struct trace_array *tr) 3547 { 3548 lockdep_assert_held(&event_mutex); 3549 3550 /* Disable any event triggers and associated soft-disabled events */ 3551 clear_event_triggers(tr); 3552 3553 /* Clear the pid list */ 3554 __ftrace_clear_event_pids(tr, TRACE_PIDS | TRACE_NO_PIDS); 3555 3556 /* Disable any running events */ 3557 __ftrace_set_clr_event_nolock(tr, NULL, NULL, NULL, 0); 3558 3559 /* Make sure no more events are being executed */ 3560 tracepoint_synchronize_unregister(); 3561 3562 down_write(&trace_event_sem); 3563 __trace_remove_event_dirs(tr); 3564 tracefs_remove(tr->event_dir); 3565 up_write(&trace_event_sem); 3566 3567 tr->event_dir = NULL; 3568 3569 return 0; 3570 } 3571 3572 static __init int event_trace_memsetup(void) 3573 { 3574 field_cachep = KMEM_CACHE(ftrace_event_field, SLAB_PANIC); 3575 file_cachep = KMEM_CACHE(trace_event_file, SLAB_PANIC); 3576 return 0; 3577 } 3578 3579 static __init void 3580 early_enable_events(struct trace_array *tr, bool disable_first) 3581 { 3582 char *buf = bootup_event_buf; 3583 char *token; 3584 int ret; 3585 3586 while (true) { 3587 token = strsep(&buf, ","); 3588 3589 if (!token) 3590 break; 3591 3592 if (*token) { 3593 /* Restarting syscalls requires that we stop them first */ 3594 if (disable_first) 3595 ftrace_set_clr_event(tr, token, 0); 3596 3597 ret = ftrace_set_clr_event(tr, token, 1); 3598 if (ret) 3599 pr_warn("Failed to enable trace event: %s\n", token); 3600 } 3601 3602 /* Put back the comma to allow this to be called again */ 3603 if (buf) 3604 *(buf - 1) = ','; 3605 } 3606 } 3607 3608 static __init int event_trace_enable(void) 3609 { 3610 struct trace_array *tr = top_trace_array(); 3611 struct trace_event_call **iter, *call; 3612 int ret; 3613 3614 if (!tr) 3615 return -ENODEV; 3616 3617 for_each_event(iter, __start_ftrace_events, __stop_ftrace_events) { 3618 3619 call = *iter; 3620 ret = event_init(call); 3621 if (!ret) 3622 list_add(&call->list, &ftrace_events); 3623 } 3624 3625 /* 3626 * We need the top trace array to have a working set of trace 3627 * points at early init, before the debug files and directories 3628 * are created. Create the file entries now, and attach them 3629 * to the actual file dentries later. 3630 */ 3631 __trace_early_add_events(tr); 3632 3633 early_enable_events(tr, false); 3634 3635 trace_printk_start_comm(); 3636 3637 register_event_cmds(); 3638 3639 register_trigger_cmds(); 3640 3641 return 0; 3642 } 3643 3644 /* 3645 * event_trace_enable() is called from trace_event_init() first to 3646 * initialize events and perhaps start any events that are on the 3647 * command line. Unfortunately, there are some events that will not 3648 * start this early, like the system call tracepoints that need 3649 * to set the %SYSCALL_WORK_SYSCALL_TRACEPOINT flag of pid 1. But 3650 * event_trace_enable() is called before pid 1 starts, and this flag 3651 * is never set, making the syscall tracepoint never get reached, but 3652 * the event is enabled regardless (and not doing anything). 3653 */ 3654 static __init int event_trace_enable_again(void) 3655 { 3656 struct trace_array *tr; 3657 3658 tr = top_trace_array(); 3659 if (!tr) 3660 return -ENODEV; 3661 3662 early_enable_events(tr, true); 3663 3664 return 0; 3665 } 3666 3667 early_initcall(event_trace_enable_again); 3668 3669 /* Init fields which doesn't related to the tracefs */ 3670 static __init int event_trace_init_fields(void) 3671 { 3672 if (trace_define_generic_fields()) 3673 pr_warn("tracing: Failed to allocated generic fields"); 3674 3675 if (trace_define_common_fields()) 3676 pr_warn("tracing: Failed to allocate common fields"); 3677 3678 return 0; 3679 } 3680 3681 __init int event_trace_init(void) 3682 { 3683 struct trace_array *tr; 3684 struct dentry *entry; 3685 int ret; 3686 3687 tr = top_trace_array(); 3688 if (!tr) 3689 return -ENODEV; 3690 3691 entry = tracefs_create_file("available_events", 0444, NULL, 3692 tr, &ftrace_avail_fops); 3693 if (!entry) 3694 pr_warn("Could not create tracefs 'available_events' entry\n"); 3695 3696 ret = early_event_add_tracer(NULL, tr); 3697 if (ret) 3698 return ret; 3699 3700 #ifdef CONFIG_MODULES 3701 ret = register_module_notifier(&trace_module_nb); 3702 if (ret) 3703 pr_warn("Failed to register trace events module notifier\n"); 3704 #endif 3705 3706 eventdir_initialized = true; 3707 3708 return 0; 3709 } 3710 3711 void __init trace_event_init(void) 3712 { 3713 event_trace_memsetup(); 3714 init_ftrace_syscalls(); 3715 event_trace_enable(); 3716 event_trace_init_fields(); 3717 } 3718 3719 #ifdef CONFIG_EVENT_TRACE_STARTUP_TEST 3720 3721 static DEFINE_SPINLOCK(test_spinlock); 3722 static DEFINE_SPINLOCK(test_spinlock_irq); 3723 static DEFINE_MUTEX(test_mutex); 3724 3725 static __init void test_work(struct work_struct *dummy) 3726 { 3727 spin_lock(&test_spinlock); 3728 spin_lock_irq(&test_spinlock_irq); 3729 udelay(1); 3730 spin_unlock_irq(&test_spinlock_irq); 3731 spin_unlock(&test_spinlock); 3732 3733 mutex_lock(&test_mutex); 3734 msleep(1); 3735 mutex_unlock(&test_mutex); 3736 } 3737 3738 static __init int event_test_thread(void *unused) 3739 { 3740 void *test_malloc; 3741 3742 test_malloc = kmalloc(1234, GFP_KERNEL); 3743 if (!test_malloc) 3744 pr_info("failed to kmalloc\n"); 3745 3746 schedule_on_each_cpu(test_work); 3747 3748 kfree(test_malloc); 3749 3750 set_current_state(TASK_INTERRUPTIBLE); 3751 while (!kthread_should_stop()) { 3752 schedule(); 3753 set_current_state(TASK_INTERRUPTIBLE); 3754 } 3755 __set_current_state(TASK_RUNNING); 3756 3757 return 0; 3758 } 3759 3760 /* 3761 * Do various things that may trigger events. 3762 */ 3763 static __init void event_test_stuff(void) 3764 { 3765 struct task_struct *test_thread; 3766 3767 test_thread = kthread_run(event_test_thread, NULL, "test-events"); 3768 msleep(1); 3769 kthread_stop(test_thread); 3770 } 3771 3772 /* 3773 * For every trace event defined, we will test each trace point separately, 3774 * and then by groups, and finally all trace points. 3775 */ 3776 static __init void event_trace_self_tests(void) 3777 { 3778 struct trace_subsystem_dir *dir; 3779 struct trace_event_file *file; 3780 struct trace_event_call *call; 3781 struct event_subsystem *system; 3782 struct trace_array *tr; 3783 int ret; 3784 3785 tr = top_trace_array(); 3786 if (!tr) 3787 return; 3788 3789 pr_info("Running tests on trace events:\n"); 3790 3791 list_for_each_entry(file, &tr->events, list) { 3792 3793 call = file->event_call; 3794 3795 /* Only test those that have a probe */ 3796 if (!call->class || !call->class->probe) 3797 continue; 3798 3799 /* 3800 * Testing syscall events here is pretty useless, but 3801 * we still do it if configured. But this is time consuming. 3802 * What we really need is a user thread to perform the 3803 * syscalls as we test. 3804 */ 3805 #ifndef CONFIG_EVENT_TRACE_TEST_SYSCALLS 3806 if (call->class->system && 3807 strcmp(call->class->system, "syscalls") == 0) 3808 continue; 3809 #endif 3810 3811 pr_info("Testing event %s: ", trace_event_name(call)); 3812 3813 /* 3814 * If an event is already enabled, someone is using 3815 * it and the self test should not be on. 3816 */ 3817 if (file->flags & EVENT_FILE_FL_ENABLED) { 3818 pr_warn("Enabled event during self test!\n"); 3819 WARN_ON_ONCE(1); 3820 continue; 3821 } 3822 3823 ftrace_event_enable_disable(file, 1); 3824 event_test_stuff(); 3825 ftrace_event_enable_disable(file, 0); 3826 3827 pr_cont("OK\n"); 3828 } 3829 3830 /* Now test at the sub system level */ 3831 3832 pr_info("Running tests on trace event systems:\n"); 3833 3834 list_for_each_entry(dir, &tr->systems, list) { 3835 3836 system = dir->subsystem; 3837 3838 /* the ftrace system is special, skip it */ 3839 if (strcmp(system->name, "ftrace") == 0) 3840 continue; 3841 3842 pr_info("Testing event system %s: ", system->name); 3843 3844 ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 1); 3845 if (WARN_ON_ONCE(ret)) { 3846 pr_warn("error enabling system %s\n", 3847 system->name); 3848 continue; 3849 } 3850 3851 event_test_stuff(); 3852 3853 ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 0); 3854 if (WARN_ON_ONCE(ret)) { 3855 pr_warn("error disabling system %s\n", 3856 system->name); 3857 continue; 3858 } 3859 3860 pr_cont("OK\n"); 3861 } 3862 3863 /* Test with all events enabled */ 3864 3865 pr_info("Running tests on all trace events:\n"); 3866 pr_info("Testing all events: "); 3867 3868 ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 1); 3869 if (WARN_ON_ONCE(ret)) { 3870 pr_warn("error enabling all events\n"); 3871 return; 3872 } 3873 3874 event_test_stuff(); 3875 3876 /* reset sysname */ 3877 ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 0); 3878 if (WARN_ON_ONCE(ret)) { 3879 pr_warn("error disabling all events\n"); 3880 return; 3881 } 3882 3883 pr_cont("OK\n"); 3884 } 3885 3886 #ifdef CONFIG_FUNCTION_TRACER 3887 3888 static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable); 3889 3890 static struct trace_event_file event_trace_file __initdata; 3891 3892 static void __init 3893 function_test_events_call(unsigned long ip, unsigned long parent_ip, 3894 struct ftrace_ops *op, struct ftrace_regs *regs) 3895 { 3896 struct trace_buffer *buffer; 3897 struct ring_buffer_event *event; 3898 struct ftrace_entry *entry; 3899 unsigned int trace_ctx; 3900 long disabled; 3901 int cpu; 3902 3903 trace_ctx = tracing_gen_ctx(); 3904 preempt_disable_notrace(); 3905 cpu = raw_smp_processor_id(); 3906 disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu)); 3907 3908 if (disabled != 1) 3909 goto out; 3910 3911 event = trace_event_buffer_lock_reserve(&buffer, &event_trace_file, 3912 TRACE_FN, sizeof(*entry), 3913 trace_ctx); 3914 if (!event) 3915 goto out; 3916 entry = ring_buffer_event_data(event); 3917 entry->ip = ip; 3918 entry->parent_ip = parent_ip; 3919 3920 event_trigger_unlock_commit(&event_trace_file, buffer, event, 3921 entry, trace_ctx); 3922 out: 3923 atomic_dec(&per_cpu(ftrace_test_event_disable, cpu)); 3924 preempt_enable_notrace(); 3925 } 3926 3927 static struct ftrace_ops trace_ops __initdata = 3928 { 3929 .func = function_test_events_call, 3930 }; 3931 3932 static __init void event_trace_self_test_with_function(void) 3933 { 3934 int ret; 3935 3936 event_trace_file.tr = top_trace_array(); 3937 if (WARN_ON(!event_trace_file.tr)) 3938 return; 3939 3940 ret = register_ftrace_function(&trace_ops); 3941 if (WARN_ON(ret < 0)) { 3942 pr_info("Failed to enable function tracer for event tests\n"); 3943 return; 3944 } 3945 pr_info("Running tests again, along with the function tracer\n"); 3946 event_trace_self_tests(); 3947 unregister_ftrace_function(&trace_ops); 3948 } 3949 #else 3950 static __init void event_trace_self_test_with_function(void) 3951 { 3952 } 3953 #endif 3954 3955 static __init int event_trace_self_tests_init(void) 3956 { 3957 if (!tracing_selftest_disabled) { 3958 event_trace_self_tests(); 3959 event_trace_self_test_with_function(); 3960 } 3961 3962 return 0; 3963 } 3964 3965 late_initcall(event_trace_self_tests_init); 3966 3967 #endif 3968