1 /* 2 * event tracer 3 * 4 * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com> 5 * 6 * - Added format output of fields of the trace point. 7 * This was based off of work by Tom Zanussi <tzanussi@gmail.com>. 8 * 9 */ 10 11 #define pr_fmt(fmt) fmt 12 13 #include <linux/workqueue.h> 14 #include <linux/spinlock.h> 15 #include <linux/kthread.h> 16 #include <linux/tracefs.h> 17 #include <linux/uaccess.h> 18 #include <linux/module.h> 19 #include <linux/ctype.h> 20 #include <linux/sort.h> 21 #include <linux/slab.h> 22 #include <linux/delay.h> 23 24 #include <trace/events/sched.h> 25 26 #include <asm/setup.h> 27 28 #include "trace_output.h" 29 30 #undef TRACE_SYSTEM 31 #define TRACE_SYSTEM "TRACE_SYSTEM" 32 33 DEFINE_MUTEX(event_mutex); 34 35 LIST_HEAD(ftrace_events); 36 static LIST_HEAD(ftrace_generic_fields); 37 static LIST_HEAD(ftrace_common_fields); 38 39 #define GFP_TRACE (GFP_KERNEL | __GFP_ZERO) 40 41 static struct kmem_cache *field_cachep; 42 static struct kmem_cache *file_cachep; 43 44 static inline int system_refcount(struct event_subsystem *system) 45 { 46 return system->ref_count; 47 } 48 49 static int system_refcount_inc(struct event_subsystem *system) 50 { 51 return system->ref_count++; 52 } 53 54 static int system_refcount_dec(struct event_subsystem *system) 55 { 56 return --system->ref_count; 57 } 58 59 /* Double loops, do not use break, only goto's work */ 60 #define do_for_each_event_file(tr, file) \ 61 list_for_each_entry(tr, &ftrace_trace_arrays, list) { \ 62 list_for_each_entry(file, &tr->events, list) 63 64 #define do_for_each_event_file_safe(tr, file) \ 65 list_for_each_entry(tr, &ftrace_trace_arrays, list) { \ 66 struct trace_event_file *___n; \ 67 list_for_each_entry_safe(file, ___n, &tr->events, list) 68 69 #define while_for_each_event_file() \ 70 } 71 72 static struct list_head * 73 trace_get_fields(struct trace_event_call *event_call) 74 { 75 if (!event_call->class->get_fields) 76 return &event_call->class->fields; 77 return event_call->class->get_fields(event_call); 78 } 79 80 static struct ftrace_event_field * 81 __find_event_field(struct list_head *head, char *name) 82 { 83 struct ftrace_event_field *field; 84 85 list_for_each_entry(field, head, link) { 86 if (!strcmp(field->name, name)) 87 return field; 88 } 89 90 return NULL; 91 } 92 93 struct ftrace_event_field * 94 trace_find_event_field(struct trace_event_call *call, char *name) 95 { 96 struct ftrace_event_field *field; 97 struct list_head *head; 98 99 head = trace_get_fields(call); 100 field = __find_event_field(head, name); 101 if (field) 102 return field; 103 104 field = __find_event_field(&ftrace_generic_fields, name); 105 if (field) 106 return field; 107 108 return __find_event_field(&ftrace_common_fields, name); 109 } 110 111 static int __trace_define_field(struct list_head *head, const char *type, 112 const char *name, int offset, int size, 113 int is_signed, int filter_type) 114 { 115 struct ftrace_event_field *field; 116 117 field = kmem_cache_alloc(field_cachep, GFP_TRACE); 118 if (!field) 119 return -ENOMEM; 120 121 field->name = name; 122 field->type = type; 123 124 if (filter_type == FILTER_OTHER) 125 field->filter_type = filter_assign_type(type); 126 else 127 field->filter_type = filter_type; 128 129 field->offset = offset; 130 field->size = size; 131 field->is_signed = is_signed; 132 133 list_add(&field->link, head); 134 135 return 0; 136 } 137 138 int trace_define_field(struct trace_event_call *call, const char *type, 139 const char *name, int offset, int size, int is_signed, 140 int filter_type) 141 { 142 struct list_head *head; 143 144 if (WARN_ON(!call->class)) 145 return 0; 146 147 head = trace_get_fields(call); 148 return __trace_define_field(head, type, name, offset, size, 149 is_signed, filter_type); 150 } 151 EXPORT_SYMBOL_GPL(trace_define_field); 152 153 #define __generic_field(type, item, filter_type) \ 154 ret = __trace_define_field(&ftrace_generic_fields, #type, \ 155 #item, 0, 0, is_signed_type(type), \ 156 filter_type); \ 157 if (ret) \ 158 return ret; 159 160 #define __common_field(type, item) \ 161 ret = __trace_define_field(&ftrace_common_fields, #type, \ 162 "common_" #item, \ 163 offsetof(typeof(ent), item), \ 164 sizeof(ent.item), \ 165 is_signed_type(type), FILTER_OTHER); \ 166 if (ret) \ 167 return ret; 168 169 static int trace_define_generic_fields(void) 170 { 171 int ret; 172 173 __generic_field(int, CPU, FILTER_CPU); 174 __generic_field(int, cpu, FILTER_CPU); 175 __generic_field(char *, COMM, FILTER_COMM); 176 __generic_field(char *, comm, FILTER_COMM); 177 178 return ret; 179 } 180 181 static int trace_define_common_fields(void) 182 { 183 int ret; 184 struct trace_entry ent; 185 186 __common_field(unsigned short, type); 187 __common_field(unsigned char, flags); 188 __common_field(unsigned char, preempt_count); 189 __common_field(int, pid); 190 191 return ret; 192 } 193 194 static void trace_destroy_fields(struct trace_event_call *call) 195 { 196 struct ftrace_event_field *field, *next; 197 struct list_head *head; 198 199 head = trace_get_fields(call); 200 list_for_each_entry_safe(field, next, head, link) { 201 list_del(&field->link); 202 kmem_cache_free(field_cachep, field); 203 } 204 } 205 206 /* 207 * run-time version of trace_event_get_offsets_<call>() that returns the last 208 * accessible offset of trace fields excluding __dynamic_array bytes 209 */ 210 int trace_event_get_offsets(struct trace_event_call *call) 211 { 212 struct ftrace_event_field *tail; 213 struct list_head *head; 214 215 head = trace_get_fields(call); 216 /* 217 * head->next points to the last field with the largest offset, 218 * since it was added last by trace_define_field() 219 */ 220 tail = list_first_entry(head, struct ftrace_event_field, link); 221 return tail->offset + tail->size; 222 } 223 224 int trace_event_raw_init(struct trace_event_call *call) 225 { 226 int id; 227 228 id = register_trace_event(&call->event); 229 if (!id) 230 return -ENODEV; 231 232 return 0; 233 } 234 EXPORT_SYMBOL_GPL(trace_event_raw_init); 235 236 bool trace_event_ignore_this_pid(struct trace_event_file *trace_file) 237 { 238 struct trace_array *tr = trace_file->tr; 239 struct trace_array_cpu *data; 240 struct trace_pid_list *pid_list; 241 242 pid_list = rcu_dereference_sched(tr->filtered_pids); 243 if (!pid_list) 244 return false; 245 246 data = this_cpu_ptr(tr->trace_buffer.data); 247 248 return data->ignore_pid; 249 } 250 EXPORT_SYMBOL_GPL(trace_event_ignore_this_pid); 251 252 void *trace_event_buffer_reserve(struct trace_event_buffer *fbuffer, 253 struct trace_event_file *trace_file, 254 unsigned long len) 255 { 256 struct trace_event_call *event_call = trace_file->event_call; 257 258 if ((trace_file->flags & EVENT_FILE_FL_PID_FILTER) && 259 trace_event_ignore_this_pid(trace_file)) 260 return NULL; 261 262 local_save_flags(fbuffer->flags); 263 fbuffer->pc = preempt_count(); 264 /* 265 * If CONFIG_PREEMPT is enabled, then the tracepoint itself disables 266 * preemption (adding one to the preempt_count). Since we are 267 * interested in the preempt_count at the time the tracepoint was 268 * hit, we need to subtract one to offset the increment. 269 */ 270 if (IS_ENABLED(CONFIG_PREEMPT)) 271 fbuffer->pc--; 272 fbuffer->trace_file = trace_file; 273 274 fbuffer->event = 275 trace_event_buffer_lock_reserve(&fbuffer->buffer, trace_file, 276 event_call->event.type, len, 277 fbuffer->flags, fbuffer->pc); 278 if (!fbuffer->event) 279 return NULL; 280 281 fbuffer->entry = ring_buffer_event_data(fbuffer->event); 282 return fbuffer->entry; 283 } 284 EXPORT_SYMBOL_GPL(trace_event_buffer_reserve); 285 286 int trace_event_reg(struct trace_event_call *call, 287 enum trace_reg type, void *data) 288 { 289 struct trace_event_file *file = data; 290 291 WARN_ON(!(call->flags & TRACE_EVENT_FL_TRACEPOINT)); 292 switch (type) { 293 case TRACE_REG_REGISTER: 294 return tracepoint_probe_register(call->tp, 295 call->class->probe, 296 file); 297 case TRACE_REG_UNREGISTER: 298 tracepoint_probe_unregister(call->tp, 299 call->class->probe, 300 file); 301 return 0; 302 303 #ifdef CONFIG_PERF_EVENTS 304 case TRACE_REG_PERF_REGISTER: 305 return tracepoint_probe_register(call->tp, 306 call->class->perf_probe, 307 call); 308 case TRACE_REG_PERF_UNREGISTER: 309 tracepoint_probe_unregister(call->tp, 310 call->class->perf_probe, 311 call); 312 return 0; 313 case TRACE_REG_PERF_OPEN: 314 case TRACE_REG_PERF_CLOSE: 315 case TRACE_REG_PERF_ADD: 316 case TRACE_REG_PERF_DEL: 317 return 0; 318 #endif 319 } 320 return 0; 321 } 322 EXPORT_SYMBOL_GPL(trace_event_reg); 323 324 void trace_event_enable_cmd_record(bool enable) 325 { 326 struct trace_event_file *file; 327 struct trace_array *tr; 328 329 mutex_lock(&event_mutex); 330 do_for_each_event_file(tr, file) { 331 332 if (!(file->flags & EVENT_FILE_FL_ENABLED)) 333 continue; 334 335 if (enable) { 336 tracing_start_cmdline_record(); 337 set_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags); 338 } else { 339 tracing_stop_cmdline_record(); 340 clear_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags); 341 } 342 } while_for_each_event_file(); 343 mutex_unlock(&event_mutex); 344 } 345 346 void trace_event_enable_tgid_record(bool enable) 347 { 348 struct trace_event_file *file; 349 struct trace_array *tr; 350 351 mutex_lock(&event_mutex); 352 do_for_each_event_file(tr, file) { 353 if (!(file->flags & EVENT_FILE_FL_ENABLED)) 354 continue; 355 356 if (enable) { 357 tracing_start_tgid_record(); 358 set_bit(EVENT_FILE_FL_RECORDED_TGID_BIT, &file->flags); 359 } else { 360 tracing_stop_tgid_record(); 361 clear_bit(EVENT_FILE_FL_RECORDED_TGID_BIT, 362 &file->flags); 363 } 364 } while_for_each_event_file(); 365 mutex_unlock(&event_mutex); 366 } 367 368 static int __ftrace_event_enable_disable(struct trace_event_file *file, 369 int enable, int soft_disable) 370 { 371 struct trace_event_call *call = file->event_call; 372 struct trace_array *tr = file->tr; 373 unsigned long file_flags = file->flags; 374 int ret = 0; 375 int disable; 376 377 switch (enable) { 378 case 0: 379 /* 380 * When soft_disable is set and enable is cleared, the sm_ref 381 * reference counter is decremented. If it reaches 0, we want 382 * to clear the SOFT_DISABLED flag but leave the event in the 383 * state that it was. That is, if the event was enabled and 384 * SOFT_DISABLED isn't set, then do nothing. But if SOFT_DISABLED 385 * is set we do not want the event to be enabled before we 386 * clear the bit. 387 * 388 * When soft_disable is not set but the SOFT_MODE flag is, 389 * we do nothing. Do not disable the tracepoint, otherwise 390 * "soft enable"s (clearing the SOFT_DISABLED bit) wont work. 391 */ 392 if (soft_disable) { 393 if (atomic_dec_return(&file->sm_ref) > 0) 394 break; 395 disable = file->flags & EVENT_FILE_FL_SOFT_DISABLED; 396 clear_bit(EVENT_FILE_FL_SOFT_MODE_BIT, &file->flags); 397 } else 398 disable = !(file->flags & EVENT_FILE_FL_SOFT_MODE); 399 400 if (disable && (file->flags & EVENT_FILE_FL_ENABLED)) { 401 clear_bit(EVENT_FILE_FL_ENABLED_BIT, &file->flags); 402 if (file->flags & EVENT_FILE_FL_RECORDED_CMD) { 403 tracing_stop_cmdline_record(); 404 clear_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags); 405 } 406 407 if (file->flags & EVENT_FILE_FL_RECORDED_TGID) { 408 tracing_stop_tgid_record(); 409 clear_bit(EVENT_FILE_FL_RECORDED_TGID_BIT, &file->flags); 410 } 411 412 call->class->reg(call, TRACE_REG_UNREGISTER, file); 413 } 414 /* If in SOFT_MODE, just set the SOFT_DISABLE_BIT, else clear it */ 415 if (file->flags & EVENT_FILE_FL_SOFT_MODE) 416 set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags); 417 else 418 clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags); 419 break; 420 case 1: 421 /* 422 * When soft_disable is set and enable is set, we want to 423 * register the tracepoint for the event, but leave the event 424 * as is. That means, if the event was already enabled, we do 425 * nothing (but set SOFT_MODE). If the event is disabled, we 426 * set SOFT_DISABLED before enabling the event tracepoint, so 427 * it still seems to be disabled. 428 */ 429 if (!soft_disable) 430 clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags); 431 else { 432 if (atomic_inc_return(&file->sm_ref) > 1) 433 break; 434 set_bit(EVENT_FILE_FL_SOFT_MODE_BIT, &file->flags); 435 } 436 437 if (!(file->flags & EVENT_FILE_FL_ENABLED)) { 438 bool cmd = false, tgid = false; 439 440 /* Keep the event disabled, when going to SOFT_MODE. */ 441 if (soft_disable) 442 set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags); 443 444 if (tr->trace_flags & TRACE_ITER_RECORD_CMD) { 445 cmd = true; 446 tracing_start_cmdline_record(); 447 set_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags); 448 } 449 450 if (tr->trace_flags & TRACE_ITER_RECORD_TGID) { 451 tgid = true; 452 tracing_start_tgid_record(); 453 set_bit(EVENT_FILE_FL_RECORDED_TGID_BIT, &file->flags); 454 } 455 456 ret = call->class->reg(call, TRACE_REG_REGISTER, file); 457 if (ret) { 458 if (cmd) 459 tracing_stop_cmdline_record(); 460 if (tgid) 461 tracing_stop_tgid_record(); 462 pr_info("event trace: Could not enable event " 463 "%s\n", trace_event_name(call)); 464 break; 465 } 466 set_bit(EVENT_FILE_FL_ENABLED_BIT, &file->flags); 467 468 /* WAS_ENABLED gets set but never cleared. */ 469 set_bit(EVENT_FILE_FL_WAS_ENABLED_BIT, &file->flags); 470 } 471 break; 472 } 473 474 /* Enable or disable use of trace_buffered_event */ 475 if ((file_flags & EVENT_FILE_FL_SOFT_DISABLED) != 476 (file->flags & EVENT_FILE_FL_SOFT_DISABLED)) { 477 if (file->flags & EVENT_FILE_FL_SOFT_DISABLED) 478 trace_buffered_event_enable(); 479 else 480 trace_buffered_event_disable(); 481 } 482 483 return ret; 484 } 485 486 int trace_event_enable_disable(struct trace_event_file *file, 487 int enable, int soft_disable) 488 { 489 return __ftrace_event_enable_disable(file, enable, soft_disable); 490 } 491 492 static int ftrace_event_enable_disable(struct trace_event_file *file, 493 int enable) 494 { 495 return __ftrace_event_enable_disable(file, enable, 0); 496 } 497 498 static void ftrace_clear_events(struct trace_array *tr) 499 { 500 struct trace_event_file *file; 501 502 mutex_lock(&event_mutex); 503 list_for_each_entry(file, &tr->events, list) { 504 ftrace_event_enable_disable(file, 0); 505 } 506 mutex_unlock(&event_mutex); 507 } 508 509 static void 510 event_filter_pid_sched_process_exit(void *data, struct task_struct *task) 511 { 512 struct trace_pid_list *pid_list; 513 struct trace_array *tr = data; 514 515 pid_list = rcu_dereference_sched(tr->filtered_pids); 516 trace_filter_add_remove_task(pid_list, NULL, task); 517 } 518 519 static void 520 event_filter_pid_sched_process_fork(void *data, 521 struct task_struct *self, 522 struct task_struct *task) 523 { 524 struct trace_pid_list *pid_list; 525 struct trace_array *tr = data; 526 527 pid_list = rcu_dereference_sched(tr->filtered_pids); 528 trace_filter_add_remove_task(pid_list, self, task); 529 } 530 531 void trace_event_follow_fork(struct trace_array *tr, bool enable) 532 { 533 if (enable) { 534 register_trace_prio_sched_process_fork(event_filter_pid_sched_process_fork, 535 tr, INT_MIN); 536 register_trace_prio_sched_process_exit(event_filter_pid_sched_process_exit, 537 tr, INT_MAX); 538 } else { 539 unregister_trace_sched_process_fork(event_filter_pid_sched_process_fork, 540 tr); 541 unregister_trace_sched_process_exit(event_filter_pid_sched_process_exit, 542 tr); 543 } 544 } 545 546 static void 547 event_filter_pid_sched_switch_probe_pre(void *data, bool preempt, 548 struct task_struct *prev, struct task_struct *next) 549 { 550 struct trace_array *tr = data; 551 struct trace_pid_list *pid_list; 552 553 pid_list = rcu_dereference_sched(tr->filtered_pids); 554 555 this_cpu_write(tr->trace_buffer.data->ignore_pid, 556 trace_ignore_this_task(pid_list, prev) && 557 trace_ignore_this_task(pid_list, next)); 558 } 559 560 static void 561 event_filter_pid_sched_switch_probe_post(void *data, bool preempt, 562 struct task_struct *prev, struct task_struct *next) 563 { 564 struct trace_array *tr = data; 565 struct trace_pid_list *pid_list; 566 567 pid_list = rcu_dereference_sched(tr->filtered_pids); 568 569 this_cpu_write(tr->trace_buffer.data->ignore_pid, 570 trace_ignore_this_task(pid_list, next)); 571 } 572 573 static void 574 event_filter_pid_sched_wakeup_probe_pre(void *data, struct task_struct *task) 575 { 576 struct trace_array *tr = data; 577 struct trace_pid_list *pid_list; 578 579 /* Nothing to do if we are already tracing */ 580 if (!this_cpu_read(tr->trace_buffer.data->ignore_pid)) 581 return; 582 583 pid_list = rcu_dereference_sched(tr->filtered_pids); 584 585 this_cpu_write(tr->trace_buffer.data->ignore_pid, 586 trace_ignore_this_task(pid_list, task)); 587 } 588 589 static void 590 event_filter_pid_sched_wakeup_probe_post(void *data, struct task_struct *task) 591 { 592 struct trace_array *tr = data; 593 struct trace_pid_list *pid_list; 594 595 /* Nothing to do if we are not tracing */ 596 if (this_cpu_read(tr->trace_buffer.data->ignore_pid)) 597 return; 598 599 pid_list = rcu_dereference_sched(tr->filtered_pids); 600 601 /* Set tracing if current is enabled */ 602 this_cpu_write(tr->trace_buffer.data->ignore_pid, 603 trace_ignore_this_task(pid_list, current)); 604 } 605 606 static void __ftrace_clear_event_pids(struct trace_array *tr) 607 { 608 struct trace_pid_list *pid_list; 609 struct trace_event_file *file; 610 int cpu; 611 612 pid_list = rcu_dereference_protected(tr->filtered_pids, 613 lockdep_is_held(&event_mutex)); 614 if (!pid_list) 615 return; 616 617 unregister_trace_sched_switch(event_filter_pid_sched_switch_probe_pre, tr); 618 unregister_trace_sched_switch(event_filter_pid_sched_switch_probe_post, tr); 619 620 unregister_trace_sched_wakeup(event_filter_pid_sched_wakeup_probe_pre, tr); 621 unregister_trace_sched_wakeup(event_filter_pid_sched_wakeup_probe_post, tr); 622 623 unregister_trace_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_pre, tr); 624 unregister_trace_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_post, tr); 625 626 unregister_trace_sched_waking(event_filter_pid_sched_wakeup_probe_pre, tr); 627 unregister_trace_sched_waking(event_filter_pid_sched_wakeup_probe_post, tr); 628 629 list_for_each_entry(file, &tr->events, list) { 630 clear_bit(EVENT_FILE_FL_PID_FILTER_BIT, &file->flags); 631 } 632 633 for_each_possible_cpu(cpu) 634 per_cpu_ptr(tr->trace_buffer.data, cpu)->ignore_pid = false; 635 636 rcu_assign_pointer(tr->filtered_pids, NULL); 637 638 /* Wait till all users are no longer using pid filtering */ 639 synchronize_sched(); 640 641 trace_free_pid_list(pid_list); 642 } 643 644 static void ftrace_clear_event_pids(struct trace_array *tr) 645 { 646 mutex_lock(&event_mutex); 647 __ftrace_clear_event_pids(tr); 648 mutex_unlock(&event_mutex); 649 } 650 651 static void __put_system(struct event_subsystem *system) 652 { 653 struct event_filter *filter = system->filter; 654 655 WARN_ON_ONCE(system_refcount(system) == 0); 656 if (system_refcount_dec(system)) 657 return; 658 659 list_del(&system->list); 660 661 if (filter) { 662 kfree(filter->filter_string); 663 kfree(filter); 664 } 665 kfree_const(system->name); 666 kfree(system); 667 } 668 669 static void __get_system(struct event_subsystem *system) 670 { 671 WARN_ON_ONCE(system_refcount(system) == 0); 672 system_refcount_inc(system); 673 } 674 675 static void __get_system_dir(struct trace_subsystem_dir *dir) 676 { 677 WARN_ON_ONCE(dir->ref_count == 0); 678 dir->ref_count++; 679 __get_system(dir->subsystem); 680 } 681 682 static void __put_system_dir(struct trace_subsystem_dir *dir) 683 { 684 WARN_ON_ONCE(dir->ref_count == 0); 685 /* If the subsystem is about to be freed, the dir must be too */ 686 WARN_ON_ONCE(system_refcount(dir->subsystem) == 1 && dir->ref_count != 1); 687 688 __put_system(dir->subsystem); 689 if (!--dir->ref_count) 690 kfree(dir); 691 } 692 693 static void put_system(struct trace_subsystem_dir *dir) 694 { 695 mutex_lock(&event_mutex); 696 __put_system_dir(dir); 697 mutex_unlock(&event_mutex); 698 } 699 700 static void remove_subsystem(struct trace_subsystem_dir *dir) 701 { 702 if (!dir) 703 return; 704 705 if (!--dir->nr_events) { 706 tracefs_remove_recursive(dir->entry); 707 list_del(&dir->list); 708 __put_system_dir(dir); 709 } 710 } 711 712 static void remove_event_file_dir(struct trace_event_file *file) 713 { 714 struct dentry *dir = file->dir; 715 struct dentry *child; 716 717 if (dir) { 718 spin_lock(&dir->d_lock); /* probably unneeded */ 719 list_for_each_entry(child, &dir->d_subdirs, d_child) { 720 if (d_really_is_positive(child)) /* probably unneeded */ 721 d_inode(child)->i_private = NULL; 722 } 723 spin_unlock(&dir->d_lock); 724 725 tracefs_remove_recursive(dir); 726 } 727 728 list_del(&file->list); 729 remove_subsystem(file->system); 730 free_event_filter(file->filter); 731 kmem_cache_free(file_cachep, file); 732 } 733 734 /* 735 * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events. 736 */ 737 static int 738 __ftrace_set_clr_event_nolock(struct trace_array *tr, const char *match, 739 const char *sub, const char *event, int set) 740 { 741 struct trace_event_file *file; 742 struct trace_event_call *call; 743 const char *name; 744 int ret = -EINVAL; 745 int eret = 0; 746 747 list_for_each_entry(file, &tr->events, list) { 748 749 call = file->event_call; 750 name = trace_event_name(call); 751 752 if (!name || !call->class || !call->class->reg) 753 continue; 754 755 if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE) 756 continue; 757 758 if (match && 759 strcmp(match, name) != 0 && 760 strcmp(match, call->class->system) != 0) 761 continue; 762 763 if (sub && strcmp(sub, call->class->system) != 0) 764 continue; 765 766 if (event && strcmp(event, name) != 0) 767 continue; 768 769 ret = ftrace_event_enable_disable(file, set); 770 771 /* 772 * Save the first error and return that. Some events 773 * may still have been enabled, but let the user 774 * know that something went wrong. 775 */ 776 if (ret && !eret) 777 eret = ret; 778 779 ret = eret; 780 } 781 782 return ret; 783 } 784 785 static int __ftrace_set_clr_event(struct trace_array *tr, const char *match, 786 const char *sub, const char *event, int set) 787 { 788 int ret; 789 790 mutex_lock(&event_mutex); 791 ret = __ftrace_set_clr_event_nolock(tr, match, sub, event, set); 792 mutex_unlock(&event_mutex); 793 794 return ret; 795 } 796 797 static int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set) 798 { 799 char *event = NULL, *sub = NULL, *match; 800 int ret; 801 802 /* 803 * The buf format can be <subsystem>:<event-name> 804 * *:<event-name> means any event by that name. 805 * :<event-name> is the same. 806 * 807 * <subsystem>:* means all events in that subsystem 808 * <subsystem>: means the same. 809 * 810 * <name> (no ':') means all events in a subsystem with 811 * the name <name> or any event that matches <name> 812 */ 813 814 match = strsep(&buf, ":"); 815 if (buf) { 816 sub = match; 817 event = buf; 818 match = NULL; 819 820 if (!strlen(sub) || strcmp(sub, "*") == 0) 821 sub = NULL; 822 if (!strlen(event) || strcmp(event, "*") == 0) 823 event = NULL; 824 } 825 826 ret = __ftrace_set_clr_event(tr, match, sub, event, set); 827 828 /* Put back the colon to allow this to be called again */ 829 if (buf) 830 *(buf - 1) = ':'; 831 832 return ret; 833 } 834 835 /** 836 * trace_set_clr_event - enable or disable an event 837 * @system: system name to match (NULL for any system) 838 * @event: event name to match (NULL for all events, within system) 839 * @set: 1 to enable, 0 to disable 840 * 841 * This is a way for other parts of the kernel to enable or disable 842 * event recording. 843 * 844 * Returns 0 on success, -EINVAL if the parameters do not match any 845 * registered events. 846 */ 847 int trace_set_clr_event(const char *system, const char *event, int set) 848 { 849 struct trace_array *tr = top_trace_array(); 850 851 if (!tr) 852 return -ENODEV; 853 854 return __ftrace_set_clr_event(tr, NULL, system, event, set); 855 } 856 EXPORT_SYMBOL_GPL(trace_set_clr_event); 857 858 /* 128 should be much more than enough */ 859 #define EVENT_BUF_SIZE 127 860 861 static ssize_t 862 ftrace_event_write(struct file *file, const char __user *ubuf, 863 size_t cnt, loff_t *ppos) 864 { 865 struct trace_parser parser; 866 struct seq_file *m = file->private_data; 867 struct trace_array *tr = m->private; 868 ssize_t read, ret; 869 870 if (!cnt) 871 return 0; 872 873 ret = tracing_update_buffers(); 874 if (ret < 0) 875 return ret; 876 877 if (trace_parser_get_init(&parser, EVENT_BUF_SIZE + 1)) 878 return -ENOMEM; 879 880 read = trace_get_user(&parser, ubuf, cnt, ppos); 881 882 if (read >= 0 && trace_parser_loaded((&parser))) { 883 int set = 1; 884 885 if (*parser.buffer == '!') 886 set = 0; 887 888 parser.buffer[parser.idx] = 0; 889 890 ret = ftrace_set_clr_event(tr, parser.buffer + !set, set); 891 if (ret) 892 goto out_put; 893 } 894 895 ret = read; 896 897 out_put: 898 trace_parser_put(&parser); 899 900 return ret; 901 } 902 903 static void * 904 t_next(struct seq_file *m, void *v, loff_t *pos) 905 { 906 struct trace_event_file *file = v; 907 struct trace_event_call *call; 908 struct trace_array *tr = m->private; 909 910 (*pos)++; 911 912 list_for_each_entry_continue(file, &tr->events, list) { 913 call = file->event_call; 914 /* 915 * The ftrace subsystem is for showing formats only. 916 * They can not be enabled or disabled via the event files. 917 */ 918 if (call->class && call->class->reg && 919 !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)) 920 return file; 921 } 922 923 return NULL; 924 } 925 926 static void *t_start(struct seq_file *m, loff_t *pos) 927 { 928 struct trace_event_file *file; 929 struct trace_array *tr = m->private; 930 loff_t l; 931 932 mutex_lock(&event_mutex); 933 934 file = list_entry(&tr->events, struct trace_event_file, list); 935 for (l = 0; l <= *pos; ) { 936 file = t_next(m, file, &l); 937 if (!file) 938 break; 939 } 940 return file; 941 } 942 943 static void * 944 s_next(struct seq_file *m, void *v, loff_t *pos) 945 { 946 struct trace_event_file *file = v; 947 struct trace_array *tr = m->private; 948 949 (*pos)++; 950 951 list_for_each_entry_continue(file, &tr->events, list) { 952 if (file->flags & EVENT_FILE_FL_ENABLED) 953 return file; 954 } 955 956 return NULL; 957 } 958 959 static void *s_start(struct seq_file *m, loff_t *pos) 960 { 961 struct trace_event_file *file; 962 struct trace_array *tr = m->private; 963 loff_t l; 964 965 mutex_lock(&event_mutex); 966 967 file = list_entry(&tr->events, struct trace_event_file, list); 968 for (l = 0; l <= *pos; ) { 969 file = s_next(m, file, &l); 970 if (!file) 971 break; 972 } 973 return file; 974 } 975 976 static int t_show(struct seq_file *m, void *v) 977 { 978 struct trace_event_file *file = v; 979 struct trace_event_call *call = file->event_call; 980 981 if (strcmp(call->class->system, TRACE_SYSTEM) != 0) 982 seq_printf(m, "%s:", call->class->system); 983 seq_printf(m, "%s\n", trace_event_name(call)); 984 985 return 0; 986 } 987 988 static void t_stop(struct seq_file *m, void *p) 989 { 990 mutex_unlock(&event_mutex); 991 } 992 993 static void * 994 p_next(struct seq_file *m, void *v, loff_t *pos) 995 { 996 struct trace_array *tr = m->private; 997 struct trace_pid_list *pid_list = rcu_dereference_sched(tr->filtered_pids); 998 999 return trace_pid_next(pid_list, v, pos); 1000 } 1001 1002 static void *p_start(struct seq_file *m, loff_t *pos) 1003 __acquires(RCU) 1004 { 1005 struct trace_pid_list *pid_list; 1006 struct trace_array *tr = m->private; 1007 1008 /* 1009 * Grab the mutex, to keep calls to p_next() having the same 1010 * tr->filtered_pids as p_start() has. 1011 * If we just passed the tr->filtered_pids around, then RCU would 1012 * have been enough, but doing that makes things more complex. 1013 */ 1014 mutex_lock(&event_mutex); 1015 rcu_read_lock_sched(); 1016 1017 pid_list = rcu_dereference_sched(tr->filtered_pids); 1018 1019 if (!pid_list) 1020 return NULL; 1021 1022 return trace_pid_start(pid_list, pos); 1023 } 1024 1025 static void p_stop(struct seq_file *m, void *p) 1026 __releases(RCU) 1027 { 1028 rcu_read_unlock_sched(); 1029 mutex_unlock(&event_mutex); 1030 } 1031 1032 static ssize_t 1033 event_enable_read(struct file *filp, char __user *ubuf, size_t cnt, 1034 loff_t *ppos) 1035 { 1036 struct trace_event_file *file; 1037 unsigned long flags; 1038 char buf[4] = "0"; 1039 1040 mutex_lock(&event_mutex); 1041 file = event_file_data(filp); 1042 if (likely(file)) 1043 flags = file->flags; 1044 mutex_unlock(&event_mutex); 1045 1046 if (!file) 1047 return -ENODEV; 1048 1049 if (flags & EVENT_FILE_FL_ENABLED && 1050 !(flags & EVENT_FILE_FL_SOFT_DISABLED)) 1051 strcpy(buf, "1"); 1052 1053 if (flags & EVENT_FILE_FL_SOFT_DISABLED || 1054 flags & EVENT_FILE_FL_SOFT_MODE) 1055 strcat(buf, "*"); 1056 1057 strcat(buf, "\n"); 1058 1059 return simple_read_from_buffer(ubuf, cnt, ppos, buf, strlen(buf)); 1060 } 1061 1062 static ssize_t 1063 event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt, 1064 loff_t *ppos) 1065 { 1066 struct trace_event_file *file; 1067 unsigned long val; 1068 int ret; 1069 1070 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 1071 if (ret) 1072 return ret; 1073 1074 ret = tracing_update_buffers(); 1075 if (ret < 0) 1076 return ret; 1077 1078 switch (val) { 1079 case 0: 1080 case 1: 1081 ret = -ENODEV; 1082 mutex_lock(&event_mutex); 1083 file = event_file_data(filp); 1084 if (likely(file)) 1085 ret = ftrace_event_enable_disable(file, val); 1086 mutex_unlock(&event_mutex); 1087 break; 1088 1089 default: 1090 return -EINVAL; 1091 } 1092 1093 *ppos += cnt; 1094 1095 return ret ? ret : cnt; 1096 } 1097 1098 static ssize_t 1099 system_enable_read(struct file *filp, char __user *ubuf, size_t cnt, 1100 loff_t *ppos) 1101 { 1102 const char set_to_char[4] = { '?', '0', '1', 'X' }; 1103 struct trace_subsystem_dir *dir = filp->private_data; 1104 struct event_subsystem *system = dir->subsystem; 1105 struct trace_event_call *call; 1106 struct trace_event_file *file; 1107 struct trace_array *tr = dir->tr; 1108 char buf[2]; 1109 int set = 0; 1110 int ret; 1111 1112 mutex_lock(&event_mutex); 1113 list_for_each_entry(file, &tr->events, list) { 1114 call = file->event_call; 1115 if (!trace_event_name(call) || !call->class || !call->class->reg) 1116 continue; 1117 1118 if (system && strcmp(call->class->system, system->name) != 0) 1119 continue; 1120 1121 /* 1122 * We need to find out if all the events are set 1123 * or if all events or cleared, or if we have 1124 * a mixture. 1125 */ 1126 set |= (1 << !!(file->flags & EVENT_FILE_FL_ENABLED)); 1127 1128 /* 1129 * If we have a mixture, no need to look further. 1130 */ 1131 if (set == 3) 1132 break; 1133 } 1134 mutex_unlock(&event_mutex); 1135 1136 buf[0] = set_to_char[set]; 1137 buf[1] = '\n'; 1138 1139 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, 2); 1140 1141 return ret; 1142 } 1143 1144 static ssize_t 1145 system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt, 1146 loff_t *ppos) 1147 { 1148 struct trace_subsystem_dir *dir = filp->private_data; 1149 struct event_subsystem *system = dir->subsystem; 1150 const char *name = NULL; 1151 unsigned long val; 1152 ssize_t ret; 1153 1154 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 1155 if (ret) 1156 return ret; 1157 1158 ret = tracing_update_buffers(); 1159 if (ret < 0) 1160 return ret; 1161 1162 if (val != 0 && val != 1) 1163 return -EINVAL; 1164 1165 /* 1166 * Opening of "enable" adds a ref count to system, 1167 * so the name is safe to use. 1168 */ 1169 if (system) 1170 name = system->name; 1171 1172 ret = __ftrace_set_clr_event(dir->tr, NULL, name, NULL, val); 1173 if (ret) 1174 goto out; 1175 1176 ret = cnt; 1177 1178 out: 1179 *ppos += cnt; 1180 1181 return ret; 1182 } 1183 1184 enum { 1185 FORMAT_HEADER = 1, 1186 FORMAT_FIELD_SEPERATOR = 2, 1187 FORMAT_PRINTFMT = 3, 1188 }; 1189 1190 static void *f_next(struct seq_file *m, void *v, loff_t *pos) 1191 { 1192 struct trace_event_call *call = event_file_data(m->private); 1193 struct list_head *common_head = &ftrace_common_fields; 1194 struct list_head *head = trace_get_fields(call); 1195 struct list_head *node = v; 1196 1197 (*pos)++; 1198 1199 switch ((unsigned long)v) { 1200 case FORMAT_HEADER: 1201 node = common_head; 1202 break; 1203 1204 case FORMAT_FIELD_SEPERATOR: 1205 node = head; 1206 break; 1207 1208 case FORMAT_PRINTFMT: 1209 /* all done */ 1210 return NULL; 1211 } 1212 1213 node = node->prev; 1214 if (node == common_head) 1215 return (void *)FORMAT_FIELD_SEPERATOR; 1216 else if (node == head) 1217 return (void *)FORMAT_PRINTFMT; 1218 else 1219 return node; 1220 } 1221 1222 static int f_show(struct seq_file *m, void *v) 1223 { 1224 struct trace_event_call *call = event_file_data(m->private); 1225 struct ftrace_event_field *field; 1226 const char *array_descriptor; 1227 1228 switch ((unsigned long)v) { 1229 case FORMAT_HEADER: 1230 seq_printf(m, "name: %s\n", trace_event_name(call)); 1231 seq_printf(m, "ID: %d\n", call->event.type); 1232 seq_puts(m, "format:\n"); 1233 return 0; 1234 1235 case FORMAT_FIELD_SEPERATOR: 1236 seq_putc(m, '\n'); 1237 return 0; 1238 1239 case FORMAT_PRINTFMT: 1240 seq_printf(m, "\nprint fmt: %s\n", 1241 call->print_fmt); 1242 return 0; 1243 } 1244 1245 field = list_entry(v, struct ftrace_event_field, link); 1246 /* 1247 * Smartly shows the array type(except dynamic array). 1248 * Normal: 1249 * field:TYPE VAR 1250 * If TYPE := TYPE[LEN], it is shown: 1251 * field:TYPE VAR[LEN] 1252 */ 1253 array_descriptor = strchr(field->type, '['); 1254 1255 if (!strncmp(field->type, "__data_loc", 10)) 1256 array_descriptor = NULL; 1257 1258 if (!array_descriptor) 1259 seq_printf(m, "\tfield:%s %s;\toffset:%u;\tsize:%u;\tsigned:%d;\n", 1260 field->type, field->name, field->offset, 1261 field->size, !!field->is_signed); 1262 else 1263 seq_printf(m, "\tfield:%.*s %s%s;\toffset:%u;\tsize:%u;\tsigned:%d;\n", 1264 (int)(array_descriptor - field->type), 1265 field->type, field->name, 1266 array_descriptor, field->offset, 1267 field->size, !!field->is_signed); 1268 1269 return 0; 1270 } 1271 1272 static void *f_start(struct seq_file *m, loff_t *pos) 1273 { 1274 void *p = (void *)FORMAT_HEADER; 1275 loff_t l = 0; 1276 1277 /* ->stop() is called even if ->start() fails */ 1278 mutex_lock(&event_mutex); 1279 if (!event_file_data(m->private)) 1280 return ERR_PTR(-ENODEV); 1281 1282 while (l < *pos && p) 1283 p = f_next(m, p, &l); 1284 1285 return p; 1286 } 1287 1288 static void f_stop(struct seq_file *m, void *p) 1289 { 1290 mutex_unlock(&event_mutex); 1291 } 1292 1293 static const struct seq_operations trace_format_seq_ops = { 1294 .start = f_start, 1295 .next = f_next, 1296 .stop = f_stop, 1297 .show = f_show, 1298 }; 1299 1300 static int trace_format_open(struct inode *inode, struct file *file) 1301 { 1302 struct seq_file *m; 1303 int ret; 1304 1305 ret = seq_open(file, &trace_format_seq_ops); 1306 if (ret < 0) 1307 return ret; 1308 1309 m = file->private_data; 1310 m->private = file; 1311 1312 return 0; 1313 } 1314 1315 static ssize_t 1316 event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) 1317 { 1318 int id = (long)event_file_data(filp); 1319 char buf[32]; 1320 int len; 1321 1322 if (*ppos) 1323 return 0; 1324 1325 if (unlikely(!id)) 1326 return -ENODEV; 1327 1328 len = sprintf(buf, "%d\n", id); 1329 1330 return simple_read_from_buffer(ubuf, cnt, ppos, buf, len); 1331 } 1332 1333 static ssize_t 1334 event_filter_read(struct file *filp, char __user *ubuf, size_t cnt, 1335 loff_t *ppos) 1336 { 1337 struct trace_event_file *file; 1338 struct trace_seq *s; 1339 int r = -ENODEV; 1340 1341 if (*ppos) 1342 return 0; 1343 1344 s = kmalloc(sizeof(*s), GFP_KERNEL); 1345 1346 if (!s) 1347 return -ENOMEM; 1348 1349 trace_seq_init(s); 1350 1351 mutex_lock(&event_mutex); 1352 file = event_file_data(filp); 1353 if (file) 1354 print_event_filter(file, s); 1355 mutex_unlock(&event_mutex); 1356 1357 if (file) 1358 r = simple_read_from_buffer(ubuf, cnt, ppos, 1359 s->buffer, trace_seq_used(s)); 1360 1361 kfree(s); 1362 1363 return r; 1364 } 1365 1366 static ssize_t 1367 event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt, 1368 loff_t *ppos) 1369 { 1370 struct trace_event_file *file; 1371 char *buf; 1372 int err = -ENODEV; 1373 1374 if (cnt >= PAGE_SIZE) 1375 return -EINVAL; 1376 1377 buf = memdup_user_nul(ubuf, cnt); 1378 if (IS_ERR(buf)) 1379 return PTR_ERR(buf); 1380 1381 mutex_lock(&event_mutex); 1382 file = event_file_data(filp); 1383 if (file) 1384 err = apply_event_filter(file, buf); 1385 mutex_unlock(&event_mutex); 1386 1387 kfree(buf); 1388 if (err < 0) 1389 return err; 1390 1391 *ppos += cnt; 1392 1393 return cnt; 1394 } 1395 1396 static LIST_HEAD(event_subsystems); 1397 1398 static int subsystem_open(struct inode *inode, struct file *filp) 1399 { 1400 struct event_subsystem *system = NULL; 1401 struct trace_subsystem_dir *dir = NULL; /* Initialize for gcc */ 1402 struct trace_array *tr; 1403 int ret; 1404 1405 if (tracing_is_disabled()) 1406 return -ENODEV; 1407 1408 /* Make sure the system still exists */ 1409 mutex_lock(&event_mutex); 1410 mutex_lock(&trace_types_lock); 1411 list_for_each_entry(tr, &ftrace_trace_arrays, list) { 1412 list_for_each_entry(dir, &tr->systems, list) { 1413 if (dir == inode->i_private) { 1414 /* Don't open systems with no events */ 1415 if (dir->nr_events) { 1416 __get_system_dir(dir); 1417 system = dir->subsystem; 1418 } 1419 goto exit_loop; 1420 } 1421 } 1422 } 1423 exit_loop: 1424 mutex_unlock(&trace_types_lock); 1425 mutex_unlock(&event_mutex); 1426 1427 if (!system) 1428 return -ENODEV; 1429 1430 /* Some versions of gcc think dir can be uninitialized here */ 1431 WARN_ON(!dir); 1432 1433 /* Still need to increment the ref count of the system */ 1434 if (trace_array_get(tr) < 0) { 1435 put_system(dir); 1436 return -ENODEV; 1437 } 1438 1439 ret = tracing_open_generic(inode, filp); 1440 if (ret < 0) { 1441 trace_array_put(tr); 1442 put_system(dir); 1443 } 1444 1445 return ret; 1446 } 1447 1448 static int system_tr_open(struct inode *inode, struct file *filp) 1449 { 1450 struct trace_subsystem_dir *dir; 1451 struct trace_array *tr = inode->i_private; 1452 int ret; 1453 1454 if (tracing_is_disabled()) 1455 return -ENODEV; 1456 1457 if (trace_array_get(tr) < 0) 1458 return -ENODEV; 1459 1460 /* Make a temporary dir that has no system but points to tr */ 1461 dir = kzalloc(sizeof(*dir), GFP_KERNEL); 1462 if (!dir) { 1463 trace_array_put(tr); 1464 return -ENOMEM; 1465 } 1466 1467 dir->tr = tr; 1468 1469 ret = tracing_open_generic(inode, filp); 1470 if (ret < 0) { 1471 trace_array_put(tr); 1472 kfree(dir); 1473 return ret; 1474 } 1475 1476 filp->private_data = dir; 1477 1478 return 0; 1479 } 1480 1481 static int subsystem_release(struct inode *inode, struct file *file) 1482 { 1483 struct trace_subsystem_dir *dir = file->private_data; 1484 1485 trace_array_put(dir->tr); 1486 1487 /* 1488 * If dir->subsystem is NULL, then this is a temporary 1489 * descriptor that was made for a trace_array to enable 1490 * all subsystems. 1491 */ 1492 if (dir->subsystem) 1493 put_system(dir); 1494 else 1495 kfree(dir); 1496 1497 return 0; 1498 } 1499 1500 static ssize_t 1501 subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt, 1502 loff_t *ppos) 1503 { 1504 struct trace_subsystem_dir *dir = filp->private_data; 1505 struct event_subsystem *system = dir->subsystem; 1506 struct trace_seq *s; 1507 int r; 1508 1509 if (*ppos) 1510 return 0; 1511 1512 s = kmalloc(sizeof(*s), GFP_KERNEL); 1513 if (!s) 1514 return -ENOMEM; 1515 1516 trace_seq_init(s); 1517 1518 print_subsystem_event_filter(system, s); 1519 r = simple_read_from_buffer(ubuf, cnt, ppos, 1520 s->buffer, trace_seq_used(s)); 1521 1522 kfree(s); 1523 1524 return r; 1525 } 1526 1527 static ssize_t 1528 subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt, 1529 loff_t *ppos) 1530 { 1531 struct trace_subsystem_dir *dir = filp->private_data; 1532 char *buf; 1533 int err; 1534 1535 if (cnt >= PAGE_SIZE) 1536 return -EINVAL; 1537 1538 buf = memdup_user_nul(ubuf, cnt); 1539 if (IS_ERR(buf)) 1540 return PTR_ERR(buf); 1541 1542 err = apply_subsystem_event_filter(dir, buf); 1543 kfree(buf); 1544 if (err < 0) 1545 return err; 1546 1547 *ppos += cnt; 1548 1549 return cnt; 1550 } 1551 1552 static ssize_t 1553 show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) 1554 { 1555 int (*func)(struct trace_seq *s) = filp->private_data; 1556 struct trace_seq *s; 1557 int r; 1558 1559 if (*ppos) 1560 return 0; 1561 1562 s = kmalloc(sizeof(*s), GFP_KERNEL); 1563 if (!s) 1564 return -ENOMEM; 1565 1566 trace_seq_init(s); 1567 1568 func(s); 1569 r = simple_read_from_buffer(ubuf, cnt, ppos, 1570 s->buffer, trace_seq_used(s)); 1571 1572 kfree(s); 1573 1574 return r; 1575 } 1576 1577 static void ignore_task_cpu(void *data) 1578 { 1579 struct trace_array *tr = data; 1580 struct trace_pid_list *pid_list; 1581 1582 /* 1583 * This function is called by on_each_cpu() while the 1584 * event_mutex is held. 1585 */ 1586 pid_list = rcu_dereference_protected(tr->filtered_pids, 1587 mutex_is_locked(&event_mutex)); 1588 1589 this_cpu_write(tr->trace_buffer.data->ignore_pid, 1590 trace_ignore_this_task(pid_list, current)); 1591 } 1592 1593 static ssize_t 1594 ftrace_event_pid_write(struct file *filp, const char __user *ubuf, 1595 size_t cnt, loff_t *ppos) 1596 { 1597 struct seq_file *m = filp->private_data; 1598 struct trace_array *tr = m->private; 1599 struct trace_pid_list *filtered_pids = NULL; 1600 struct trace_pid_list *pid_list; 1601 struct trace_event_file *file; 1602 ssize_t ret; 1603 1604 if (!cnt) 1605 return 0; 1606 1607 ret = tracing_update_buffers(); 1608 if (ret < 0) 1609 return ret; 1610 1611 mutex_lock(&event_mutex); 1612 1613 filtered_pids = rcu_dereference_protected(tr->filtered_pids, 1614 lockdep_is_held(&event_mutex)); 1615 1616 ret = trace_pid_write(filtered_pids, &pid_list, ubuf, cnt); 1617 if (ret < 0) 1618 goto out; 1619 1620 rcu_assign_pointer(tr->filtered_pids, pid_list); 1621 1622 list_for_each_entry(file, &tr->events, list) { 1623 set_bit(EVENT_FILE_FL_PID_FILTER_BIT, &file->flags); 1624 } 1625 1626 if (filtered_pids) { 1627 synchronize_sched(); 1628 trace_free_pid_list(filtered_pids); 1629 } else if (pid_list) { 1630 /* 1631 * Register a probe that is called before all other probes 1632 * to set ignore_pid if next or prev do not match. 1633 * Register a probe this is called after all other probes 1634 * to only keep ignore_pid set if next pid matches. 1635 */ 1636 register_trace_prio_sched_switch(event_filter_pid_sched_switch_probe_pre, 1637 tr, INT_MAX); 1638 register_trace_prio_sched_switch(event_filter_pid_sched_switch_probe_post, 1639 tr, 0); 1640 1641 register_trace_prio_sched_wakeup(event_filter_pid_sched_wakeup_probe_pre, 1642 tr, INT_MAX); 1643 register_trace_prio_sched_wakeup(event_filter_pid_sched_wakeup_probe_post, 1644 tr, 0); 1645 1646 register_trace_prio_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_pre, 1647 tr, INT_MAX); 1648 register_trace_prio_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_post, 1649 tr, 0); 1650 1651 register_trace_prio_sched_waking(event_filter_pid_sched_wakeup_probe_pre, 1652 tr, INT_MAX); 1653 register_trace_prio_sched_waking(event_filter_pid_sched_wakeup_probe_post, 1654 tr, 0); 1655 } 1656 1657 /* 1658 * Ignoring of pids is done at task switch. But we have to 1659 * check for those tasks that are currently running. 1660 * Always do this in case a pid was appended or removed. 1661 */ 1662 on_each_cpu(ignore_task_cpu, tr, 1); 1663 1664 out: 1665 mutex_unlock(&event_mutex); 1666 1667 if (ret > 0) 1668 *ppos += ret; 1669 1670 return ret; 1671 } 1672 1673 static int ftrace_event_avail_open(struct inode *inode, struct file *file); 1674 static int ftrace_event_set_open(struct inode *inode, struct file *file); 1675 static int ftrace_event_set_pid_open(struct inode *inode, struct file *file); 1676 static int ftrace_event_release(struct inode *inode, struct file *file); 1677 1678 static const struct seq_operations show_event_seq_ops = { 1679 .start = t_start, 1680 .next = t_next, 1681 .show = t_show, 1682 .stop = t_stop, 1683 }; 1684 1685 static const struct seq_operations show_set_event_seq_ops = { 1686 .start = s_start, 1687 .next = s_next, 1688 .show = t_show, 1689 .stop = t_stop, 1690 }; 1691 1692 static const struct seq_operations show_set_pid_seq_ops = { 1693 .start = p_start, 1694 .next = p_next, 1695 .show = trace_pid_show, 1696 .stop = p_stop, 1697 }; 1698 1699 static const struct file_operations ftrace_avail_fops = { 1700 .open = ftrace_event_avail_open, 1701 .read = seq_read, 1702 .llseek = seq_lseek, 1703 .release = seq_release, 1704 }; 1705 1706 static const struct file_operations ftrace_set_event_fops = { 1707 .open = ftrace_event_set_open, 1708 .read = seq_read, 1709 .write = ftrace_event_write, 1710 .llseek = seq_lseek, 1711 .release = ftrace_event_release, 1712 }; 1713 1714 static const struct file_operations ftrace_set_event_pid_fops = { 1715 .open = ftrace_event_set_pid_open, 1716 .read = seq_read, 1717 .write = ftrace_event_pid_write, 1718 .llseek = seq_lseek, 1719 .release = ftrace_event_release, 1720 }; 1721 1722 static const struct file_operations ftrace_enable_fops = { 1723 .open = tracing_open_generic, 1724 .read = event_enable_read, 1725 .write = event_enable_write, 1726 .llseek = default_llseek, 1727 }; 1728 1729 static const struct file_operations ftrace_event_format_fops = { 1730 .open = trace_format_open, 1731 .read = seq_read, 1732 .llseek = seq_lseek, 1733 .release = seq_release, 1734 }; 1735 1736 static const struct file_operations ftrace_event_id_fops = { 1737 .read = event_id_read, 1738 .llseek = default_llseek, 1739 }; 1740 1741 static const struct file_operations ftrace_event_filter_fops = { 1742 .open = tracing_open_generic, 1743 .read = event_filter_read, 1744 .write = event_filter_write, 1745 .llseek = default_llseek, 1746 }; 1747 1748 static const struct file_operations ftrace_subsystem_filter_fops = { 1749 .open = subsystem_open, 1750 .read = subsystem_filter_read, 1751 .write = subsystem_filter_write, 1752 .llseek = default_llseek, 1753 .release = subsystem_release, 1754 }; 1755 1756 static const struct file_operations ftrace_system_enable_fops = { 1757 .open = subsystem_open, 1758 .read = system_enable_read, 1759 .write = system_enable_write, 1760 .llseek = default_llseek, 1761 .release = subsystem_release, 1762 }; 1763 1764 static const struct file_operations ftrace_tr_enable_fops = { 1765 .open = system_tr_open, 1766 .read = system_enable_read, 1767 .write = system_enable_write, 1768 .llseek = default_llseek, 1769 .release = subsystem_release, 1770 }; 1771 1772 static const struct file_operations ftrace_show_header_fops = { 1773 .open = tracing_open_generic, 1774 .read = show_header, 1775 .llseek = default_llseek, 1776 }; 1777 1778 static int 1779 ftrace_event_open(struct inode *inode, struct file *file, 1780 const struct seq_operations *seq_ops) 1781 { 1782 struct seq_file *m; 1783 int ret; 1784 1785 ret = seq_open(file, seq_ops); 1786 if (ret < 0) 1787 return ret; 1788 m = file->private_data; 1789 /* copy tr over to seq ops */ 1790 m->private = inode->i_private; 1791 1792 return ret; 1793 } 1794 1795 static int ftrace_event_release(struct inode *inode, struct file *file) 1796 { 1797 struct trace_array *tr = inode->i_private; 1798 1799 trace_array_put(tr); 1800 1801 return seq_release(inode, file); 1802 } 1803 1804 static int 1805 ftrace_event_avail_open(struct inode *inode, struct file *file) 1806 { 1807 const struct seq_operations *seq_ops = &show_event_seq_ops; 1808 1809 return ftrace_event_open(inode, file, seq_ops); 1810 } 1811 1812 static int 1813 ftrace_event_set_open(struct inode *inode, struct file *file) 1814 { 1815 const struct seq_operations *seq_ops = &show_set_event_seq_ops; 1816 struct trace_array *tr = inode->i_private; 1817 int ret; 1818 1819 if (trace_array_get(tr) < 0) 1820 return -ENODEV; 1821 1822 if ((file->f_mode & FMODE_WRITE) && 1823 (file->f_flags & O_TRUNC)) 1824 ftrace_clear_events(tr); 1825 1826 ret = ftrace_event_open(inode, file, seq_ops); 1827 if (ret < 0) 1828 trace_array_put(tr); 1829 return ret; 1830 } 1831 1832 static int 1833 ftrace_event_set_pid_open(struct inode *inode, struct file *file) 1834 { 1835 const struct seq_operations *seq_ops = &show_set_pid_seq_ops; 1836 struct trace_array *tr = inode->i_private; 1837 int ret; 1838 1839 if (trace_array_get(tr) < 0) 1840 return -ENODEV; 1841 1842 if ((file->f_mode & FMODE_WRITE) && 1843 (file->f_flags & O_TRUNC)) 1844 ftrace_clear_event_pids(tr); 1845 1846 ret = ftrace_event_open(inode, file, seq_ops); 1847 if (ret < 0) 1848 trace_array_put(tr); 1849 return ret; 1850 } 1851 1852 static struct event_subsystem * 1853 create_new_subsystem(const char *name) 1854 { 1855 struct event_subsystem *system; 1856 1857 /* need to create new entry */ 1858 system = kmalloc(sizeof(*system), GFP_KERNEL); 1859 if (!system) 1860 return NULL; 1861 1862 system->ref_count = 1; 1863 1864 /* Only allocate if dynamic (kprobes and modules) */ 1865 system->name = kstrdup_const(name, GFP_KERNEL); 1866 if (!system->name) 1867 goto out_free; 1868 1869 system->filter = NULL; 1870 1871 system->filter = kzalloc(sizeof(struct event_filter), GFP_KERNEL); 1872 if (!system->filter) 1873 goto out_free; 1874 1875 list_add(&system->list, &event_subsystems); 1876 1877 return system; 1878 1879 out_free: 1880 kfree_const(system->name); 1881 kfree(system); 1882 return NULL; 1883 } 1884 1885 static struct dentry * 1886 event_subsystem_dir(struct trace_array *tr, const char *name, 1887 struct trace_event_file *file, struct dentry *parent) 1888 { 1889 struct trace_subsystem_dir *dir; 1890 struct event_subsystem *system; 1891 struct dentry *entry; 1892 1893 /* First see if we did not already create this dir */ 1894 list_for_each_entry(dir, &tr->systems, list) { 1895 system = dir->subsystem; 1896 if (strcmp(system->name, name) == 0) { 1897 dir->nr_events++; 1898 file->system = dir; 1899 return dir->entry; 1900 } 1901 } 1902 1903 /* Now see if the system itself exists. */ 1904 list_for_each_entry(system, &event_subsystems, list) { 1905 if (strcmp(system->name, name) == 0) 1906 break; 1907 } 1908 /* Reset system variable when not found */ 1909 if (&system->list == &event_subsystems) 1910 system = NULL; 1911 1912 dir = kmalloc(sizeof(*dir), GFP_KERNEL); 1913 if (!dir) 1914 goto out_fail; 1915 1916 if (!system) { 1917 system = create_new_subsystem(name); 1918 if (!system) 1919 goto out_free; 1920 } else 1921 __get_system(system); 1922 1923 dir->entry = tracefs_create_dir(name, parent); 1924 if (!dir->entry) { 1925 pr_warn("Failed to create system directory %s\n", name); 1926 __put_system(system); 1927 goto out_free; 1928 } 1929 1930 dir->tr = tr; 1931 dir->ref_count = 1; 1932 dir->nr_events = 1; 1933 dir->subsystem = system; 1934 file->system = dir; 1935 1936 entry = tracefs_create_file("filter", 0644, dir->entry, dir, 1937 &ftrace_subsystem_filter_fops); 1938 if (!entry) { 1939 kfree(system->filter); 1940 system->filter = NULL; 1941 pr_warn("Could not create tracefs '%s/filter' entry\n", name); 1942 } 1943 1944 trace_create_file("enable", 0644, dir->entry, dir, 1945 &ftrace_system_enable_fops); 1946 1947 list_add(&dir->list, &tr->systems); 1948 1949 return dir->entry; 1950 1951 out_free: 1952 kfree(dir); 1953 out_fail: 1954 /* Only print this message if failed on memory allocation */ 1955 if (!dir || !system) 1956 pr_warn("No memory to create event subsystem %s\n", name); 1957 return NULL; 1958 } 1959 1960 static int 1961 event_create_dir(struct dentry *parent, struct trace_event_file *file) 1962 { 1963 struct trace_event_call *call = file->event_call; 1964 struct trace_array *tr = file->tr; 1965 struct list_head *head; 1966 struct dentry *d_events; 1967 const char *name; 1968 int ret; 1969 1970 /* 1971 * If the trace point header did not define TRACE_SYSTEM 1972 * then the system would be called "TRACE_SYSTEM". 1973 */ 1974 if (strcmp(call->class->system, TRACE_SYSTEM) != 0) { 1975 d_events = event_subsystem_dir(tr, call->class->system, file, parent); 1976 if (!d_events) 1977 return -ENOMEM; 1978 } else 1979 d_events = parent; 1980 1981 name = trace_event_name(call); 1982 file->dir = tracefs_create_dir(name, d_events); 1983 if (!file->dir) { 1984 pr_warn("Could not create tracefs '%s' directory\n", name); 1985 return -1; 1986 } 1987 1988 if (call->class->reg && !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)) 1989 trace_create_file("enable", 0644, file->dir, file, 1990 &ftrace_enable_fops); 1991 1992 #ifdef CONFIG_PERF_EVENTS 1993 if (call->event.type && call->class->reg) 1994 trace_create_file("id", 0444, file->dir, 1995 (void *)(long)call->event.type, 1996 &ftrace_event_id_fops); 1997 #endif 1998 1999 /* 2000 * Other events may have the same class. Only update 2001 * the fields if they are not already defined. 2002 */ 2003 head = trace_get_fields(call); 2004 if (list_empty(head)) { 2005 ret = call->class->define_fields(call); 2006 if (ret < 0) { 2007 pr_warn("Could not initialize trace point events/%s\n", 2008 name); 2009 return -1; 2010 } 2011 } 2012 trace_create_file("filter", 0644, file->dir, file, 2013 &ftrace_event_filter_fops); 2014 2015 /* 2016 * Only event directories that can be enabled should have 2017 * triggers. 2018 */ 2019 if (!(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)) 2020 trace_create_file("trigger", 0644, file->dir, file, 2021 &event_trigger_fops); 2022 2023 #ifdef CONFIG_HIST_TRIGGERS 2024 trace_create_file("hist", 0444, file->dir, file, 2025 &event_hist_fops); 2026 #endif 2027 trace_create_file("format", 0444, file->dir, call, 2028 &ftrace_event_format_fops); 2029 2030 return 0; 2031 } 2032 2033 static void remove_event_from_tracers(struct trace_event_call *call) 2034 { 2035 struct trace_event_file *file; 2036 struct trace_array *tr; 2037 2038 do_for_each_event_file_safe(tr, file) { 2039 if (file->event_call != call) 2040 continue; 2041 2042 remove_event_file_dir(file); 2043 /* 2044 * The do_for_each_event_file_safe() is 2045 * a double loop. After finding the call for this 2046 * trace_array, we use break to jump to the next 2047 * trace_array. 2048 */ 2049 break; 2050 } while_for_each_event_file(); 2051 } 2052 2053 static void event_remove(struct trace_event_call *call) 2054 { 2055 struct trace_array *tr; 2056 struct trace_event_file *file; 2057 2058 do_for_each_event_file(tr, file) { 2059 if (file->event_call != call) 2060 continue; 2061 2062 if (file->flags & EVENT_FILE_FL_WAS_ENABLED) 2063 tr->clear_trace = true; 2064 2065 ftrace_event_enable_disable(file, 0); 2066 /* 2067 * The do_for_each_event_file() is 2068 * a double loop. After finding the call for this 2069 * trace_array, we use break to jump to the next 2070 * trace_array. 2071 */ 2072 break; 2073 } while_for_each_event_file(); 2074 2075 if (call->event.funcs) 2076 __unregister_trace_event(&call->event); 2077 remove_event_from_tracers(call); 2078 list_del(&call->list); 2079 } 2080 2081 static int event_init(struct trace_event_call *call) 2082 { 2083 int ret = 0; 2084 const char *name; 2085 2086 name = trace_event_name(call); 2087 if (WARN_ON(!name)) 2088 return -EINVAL; 2089 2090 if (call->class->raw_init) { 2091 ret = call->class->raw_init(call); 2092 if (ret < 0 && ret != -ENOSYS) 2093 pr_warn("Could not initialize trace events/%s\n", name); 2094 } 2095 2096 return ret; 2097 } 2098 2099 static int 2100 __register_event(struct trace_event_call *call, struct module *mod) 2101 { 2102 int ret; 2103 2104 ret = event_init(call); 2105 if (ret < 0) 2106 return ret; 2107 2108 list_add(&call->list, &ftrace_events); 2109 call->mod = mod; 2110 2111 return 0; 2112 } 2113 2114 static char *eval_replace(char *ptr, struct trace_eval_map *map, int len) 2115 { 2116 int rlen; 2117 int elen; 2118 2119 /* Find the length of the eval value as a string */ 2120 elen = snprintf(ptr, 0, "%ld", map->eval_value); 2121 /* Make sure there's enough room to replace the string with the value */ 2122 if (len < elen) 2123 return NULL; 2124 2125 snprintf(ptr, elen + 1, "%ld", map->eval_value); 2126 2127 /* Get the rest of the string of ptr */ 2128 rlen = strlen(ptr + len); 2129 memmove(ptr + elen, ptr + len, rlen); 2130 /* Make sure we end the new string */ 2131 ptr[elen + rlen] = 0; 2132 2133 return ptr + elen; 2134 } 2135 2136 static void update_event_printk(struct trace_event_call *call, 2137 struct trace_eval_map *map) 2138 { 2139 char *ptr; 2140 int quote = 0; 2141 int len = strlen(map->eval_string); 2142 2143 for (ptr = call->print_fmt; *ptr; ptr++) { 2144 if (*ptr == '\\') { 2145 ptr++; 2146 /* paranoid */ 2147 if (!*ptr) 2148 break; 2149 continue; 2150 } 2151 if (*ptr == '"') { 2152 quote ^= 1; 2153 continue; 2154 } 2155 if (quote) 2156 continue; 2157 if (isdigit(*ptr)) { 2158 /* skip numbers */ 2159 do { 2160 ptr++; 2161 /* Check for alpha chars like ULL */ 2162 } while (isalnum(*ptr)); 2163 if (!*ptr) 2164 break; 2165 /* 2166 * A number must have some kind of delimiter after 2167 * it, and we can ignore that too. 2168 */ 2169 continue; 2170 } 2171 if (isalpha(*ptr) || *ptr == '_') { 2172 if (strncmp(map->eval_string, ptr, len) == 0 && 2173 !isalnum(ptr[len]) && ptr[len] != '_') { 2174 ptr = eval_replace(ptr, map, len); 2175 /* enum/sizeof string smaller than value */ 2176 if (WARN_ON_ONCE(!ptr)) 2177 return; 2178 /* 2179 * No need to decrement here, as eval_replace() 2180 * returns the pointer to the character passed 2181 * the eval, and two evals can not be placed 2182 * back to back without something in between. 2183 * We can skip that something in between. 2184 */ 2185 continue; 2186 } 2187 skip_more: 2188 do { 2189 ptr++; 2190 } while (isalnum(*ptr) || *ptr == '_'); 2191 if (!*ptr) 2192 break; 2193 /* 2194 * If what comes after this variable is a '.' or 2195 * '->' then we can continue to ignore that string. 2196 */ 2197 if (*ptr == '.' || (ptr[0] == '-' && ptr[1] == '>')) { 2198 ptr += *ptr == '.' ? 1 : 2; 2199 if (!*ptr) 2200 break; 2201 goto skip_more; 2202 } 2203 /* 2204 * Once again, we can skip the delimiter that came 2205 * after the string. 2206 */ 2207 continue; 2208 } 2209 } 2210 } 2211 2212 void trace_event_eval_update(struct trace_eval_map **map, int len) 2213 { 2214 struct trace_event_call *call, *p; 2215 const char *last_system = NULL; 2216 bool first = false; 2217 int last_i; 2218 int i; 2219 2220 down_write(&trace_event_sem); 2221 list_for_each_entry_safe(call, p, &ftrace_events, list) { 2222 /* events are usually grouped together with systems */ 2223 if (!last_system || call->class->system != last_system) { 2224 first = true; 2225 last_i = 0; 2226 last_system = call->class->system; 2227 } 2228 2229 /* 2230 * Since calls are grouped by systems, the likelyhood that the 2231 * next call in the iteration belongs to the same system as the 2232 * previous call is high. As an optimization, we skip seaching 2233 * for a map[] that matches the call's system if the last call 2234 * was from the same system. That's what last_i is for. If the 2235 * call has the same system as the previous call, then last_i 2236 * will be the index of the first map[] that has a matching 2237 * system. 2238 */ 2239 for (i = last_i; i < len; i++) { 2240 if (call->class->system == map[i]->system) { 2241 /* Save the first system if need be */ 2242 if (first) { 2243 last_i = i; 2244 first = false; 2245 } 2246 update_event_printk(call, map[i]); 2247 } 2248 } 2249 } 2250 up_write(&trace_event_sem); 2251 } 2252 2253 static struct trace_event_file * 2254 trace_create_new_event(struct trace_event_call *call, 2255 struct trace_array *tr) 2256 { 2257 struct trace_event_file *file; 2258 2259 file = kmem_cache_alloc(file_cachep, GFP_TRACE); 2260 if (!file) 2261 return NULL; 2262 2263 file->event_call = call; 2264 file->tr = tr; 2265 atomic_set(&file->sm_ref, 0); 2266 atomic_set(&file->tm_ref, 0); 2267 INIT_LIST_HEAD(&file->triggers); 2268 list_add(&file->list, &tr->events); 2269 2270 return file; 2271 } 2272 2273 /* Add an event to a trace directory */ 2274 static int 2275 __trace_add_new_event(struct trace_event_call *call, struct trace_array *tr) 2276 { 2277 struct trace_event_file *file; 2278 2279 file = trace_create_new_event(call, tr); 2280 if (!file) 2281 return -ENOMEM; 2282 2283 return event_create_dir(tr->event_dir, file); 2284 } 2285 2286 /* 2287 * Just create a decriptor for early init. A descriptor is required 2288 * for enabling events at boot. We want to enable events before 2289 * the filesystem is initialized. 2290 */ 2291 static __init int 2292 __trace_early_add_new_event(struct trace_event_call *call, 2293 struct trace_array *tr) 2294 { 2295 struct trace_event_file *file; 2296 2297 file = trace_create_new_event(call, tr); 2298 if (!file) 2299 return -ENOMEM; 2300 2301 return 0; 2302 } 2303 2304 struct ftrace_module_file_ops; 2305 static void __add_event_to_tracers(struct trace_event_call *call); 2306 2307 /* Add an additional event_call dynamically */ 2308 int trace_add_event_call(struct trace_event_call *call) 2309 { 2310 int ret; 2311 mutex_lock(&event_mutex); 2312 mutex_lock(&trace_types_lock); 2313 2314 ret = __register_event(call, NULL); 2315 if (ret >= 0) 2316 __add_event_to_tracers(call); 2317 2318 mutex_unlock(&trace_types_lock); 2319 mutex_unlock(&event_mutex); 2320 return ret; 2321 } 2322 2323 /* 2324 * Must be called under locking of trace_types_lock, event_mutex and 2325 * trace_event_sem. 2326 */ 2327 static void __trace_remove_event_call(struct trace_event_call *call) 2328 { 2329 event_remove(call); 2330 trace_destroy_fields(call); 2331 free_event_filter(call->filter); 2332 call->filter = NULL; 2333 } 2334 2335 static int probe_remove_event_call(struct trace_event_call *call) 2336 { 2337 struct trace_array *tr; 2338 struct trace_event_file *file; 2339 2340 #ifdef CONFIG_PERF_EVENTS 2341 if (call->perf_refcount) 2342 return -EBUSY; 2343 #endif 2344 do_for_each_event_file(tr, file) { 2345 if (file->event_call != call) 2346 continue; 2347 /* 2348 * We can't rely on ftrace_event_enable_disable(enable => 0) 2349 * we are going to do, EVENT_FILE_FL_SOFT_MODE can suppress 2350 * TRACE_REG_UNREGISTER. 2351 */ 2352 if (file->flags & EVENT_FILE_FL_ENABLED) 2353 return -EBUSY; 2354 /* 2355 * The do_for_each_event_file_safe() is 2356 * a double loop. After finding the call for this 2357 * trace_array, we use break to jump to the next 2358 * trace_array. 2359 */ 2360 break; 2361 } while_for_each_event_file(); 2362 2363 __trace_remove_event_call(call); 2364 2365 return 0; 2366 } 2367 2368 /* Remove an event_call */ 2369 int trace_remove_event_call(struct trace_event_call *call) 2370 { 2371 int ret; 2372 2373 mutex_lock(&event_mutex); 2374 mutex_lock(&trace_types_lock); 2375 down_write(&trace_event_sem); 2376 ret = probe_remove_event_call(call); 2377 up_write(&trace_event_sem); 2378 mutex_unlock(&trace_types_lock); 2379 mutex_unlock(&event_mutex); 2380 2381 return ret; 2382 } 2383 2384 #define for_each_event(event, start, end) \ 2385 for (event = start; \ 2386 (unsigned long)event < (unsigned long)end; \ 2387 event++) 2388 2389 #ifdef CONFIG_MODULES 2390 2391 static void trace_module_add_events(struct module *mod) 2392 { 2393 struct trace_event_call **call, **start, **end; 2394 2395 if (!mod->num_trace_events) 2396 return; 2397 2398 /* Don't add infrastructure for mods without tracepoints */ 2399 if (trace_module_has_bad_taint(mod)) { 2400 pr_err("%s: module has bad taint, not creating trace events\n", 2401 mod->name); 2402 return; 2403 } 2404 2405 start = mod->trace_events; 2406 end = mod->trace_events + mod->num_trace_events; 2407 2408 for_each_event(call, start, end) { 2409 __register_event(*call, mod); 2410 __add_event_to_tracers(*call); 2411 } 2412 } 2413 2414 static void trace_module_remove_events(struct module *mod) 2415 { 2416 struct trace_event_call *call, *p; 2417 2418 down_write(&trace_event_sem); 2419 list_for_each_entry_safe(call, p, &ftrace_events, list) { 2420 if (call->mod == mod) 2421 __trace_remove_event_call(call); 2422 } 2423 up_write(&trace_event_sem); 2424 2425 /* 2426 * It is safest to reset the ring buffer if the module being unloaded 2427 * registered any events that were used. The only worry is if 2428 * a new module gets loaded, and takes on the same id as the events 2429 * of this module. When printing out the buffer, traced events left 2430 * over from this module may be passed to the new module events and 2431 * unexpected results may occur. 2432 */ 2433 tracing_reset_all_online_cpus(); 2434 } 2435 2436 static int trace_module_notify(struct notifier_block *self, 2437 unsigned long val, void *data) 2438 { 2439 struct module *mod = data; 2440 2441 mutex_lock(&event_mutex); 2442 mutex_lock(&trace_types_lock); 2443 switch (val) { 2444 case MODULE_STATE_COMING: 2445 trace_module_add_events(mod); 2446 break; 2447 case MODULE_STATE_GOING: 2448 trace_module_remove_events(mod); 2449 break; 2450 } 2451 mutex_unlock(&trace_types_lock); 2452 mutex_unlock(&event_mutex); 2453 2454 return 0; 2455 } 2456 2457 static struct notifier_block trace_module_nb = { 2458 .notifier_call = trace_module_notify, 2459 .priority = 1, /* higher than trace.c module notify */ 2460 }; 2461 #endif /* CONFIG_MODULES */ 2462 2463 /* Create a new event directory structure for a trace directory. */ 2464 static void 2465 __trace_add_event_dirs(struct trace_array *tr) 2466 { 2467 struct trace_event_call *call; 2468 int ret; 2469 2470 list_for_each_entry(call, &ftrace_events, list) { 2471 ret = __trace_add_new_event(call, tr); 2472 if (ret < 0) 2473 pr_warn("Could not create directory for event %s\n", 2474 trace_event_name(call)); 2475 } 2476 } 2477 2478 struct trace_event_file * 2479 find_event_file(struct trace_array *tr, const char *system, const char *event) 2480 { 2481 struct trace_event_file *file; 2482 struct trace_event_call *call; 2483 const char *name; 2484 2485 list_for_each_entry(file, &tr->events, list) { 2486 2487 call = file->event_call; 2488 name = trace_event_name(call); 2489 2490 if (!name || !call->class || !call->class->reg) 2491 continue; 2492 2493 if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE) 2494 continue; 2495 2496 if (strcmp(event, name) == 0 && 2497 strcmp(system, call->class->system) == 0) 2498 return file; 2499 } 2500 return NULL; 2501 } 2502 2503 #ifdef CONFIG_DYNAMIC_FTRACE 2504 2505 /* Avoid typos */ 2506 #define ENABLE_EVENT_STR "enable_event" 2507 #define DISABLE_EVENT_STR "disable_event" 2508 2509 struct event_probe_data { 2510 struct trace_event_file *file; 2511 unsigned long count; 2512 int ref; 2513 bool enable; 2514 }; 2515 2516 static void update_event_probe(struct event_probe_data *data) 2517 { 2518 if (data->enable) 2519 clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &data->file->flags); 2520 else 2521 set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &data->file->flags); 2522 } 2523 2524 static void 2525 event_enable_probe(unsigned long ip, unsigned long parent_ip, 2526 struct trace_array *tr, struct ftrace_probe_ops *ops, 2527 void *data) 2528 { 2529 struct ftrace_func_mapper *mapper = data; 2530 struct event_probe_data *edata; 2531 void **pdata; 2532 2533 pdata = ftrace_func_mapper_find_ip(mapper, ip); 2534 if (!pdata || !*pdata) 2535 return; 2536 2537 edata = *pdata; 2538 update_event_probe(edata); 2539 } 2540 2541 static void 2542 event_enable_count_probe(unsigned long ip, unsigned long parent_ip, 2543 struct trace_array *tr, struct ftrace_probe_ops *ops, 2544 void *data) 2545 { 2546 struct ftrace_func_mapper *mapper = data; 2547 struct event_probe_data *edata; 2548 void **pdata; 2549 2550 pdata = ftrace_func_mapper_find_ip(mapper, ip); 2551 if (!pdata || !*pdata) 2552 return; 2553 2554 edata = *pdata; 2555 2556 if (!edata->count) 2557 return; 2558 2559 /* Skip if the event is in a state we want to switch to */ 2560 if (edata->enable == !(edata->file->flags & EVENT_FILE_FL_SOFT_DISABLED)) 2561 return; 2562 2563 if (edata->count != -1) 2564 (edata->count)--; 2565 2566 update_event_probe(edata); 2567 } 2568 2569 static int 2570 event_enable_print(struct seq_file *m, unsigned long ip, 2571 struct ftrace_probe_ops *ops, void *data) 2572 { 2573 struct ftrace_func_mapper *mapper = data; 2574 struct event_probe_data *edata; 2575 void **pdata; 2576 2577 pdata = ftrace_func_mapper_find_ip(mapper, ip); 2578 2579 if (WARN_ON_ONCE(!pdata || !*pdata)) 2580 return 0; 2581 2582 edata = *pdata; 2583 2584 seq_printf(m, "%ps:", (void *)ip); 2585 2586 seq_printf(m, "%s:%s:%s", 2587 edata->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR, 2588 edata->file->event_call->class->system, 2589 trace_event_name(edata->file->event_call)); 2590 2591 if (edata->count == -1) 2592 seq_puts(m, ":unlimited\n"); 2593 else 2594 seq_printf(m, ":count=%ld\n", edata->count); 2595 2596 return 0; 2597 } 2598 2599 static int 2600 event_enable_init(struct ftrace_probe_ops *ops, struct trace_array *tr, 2601 unsigned long ip, void *init_data, void **data) 2602 { 2603 struct ftrace_func_mapper *mapper = *data; 2604 struct event_probe_data *edata = init_data; 2605 int ret; 2606 2607 if (!mapper) { 2608 mapper = allocate_ftrace_func_mapper(); 2609 if (!mapper) 2610 return -ENODEV; 2611 *data = mapper; 2612 } 2613 2614 ret = ftrace_func_mapper_add_ip(mapper, ip, edata); 2615 if (ret < 0) 2616 return ret; 2617 2618 edata->ref++; 2619 2620 return 0; 2621 } 2622 2623 static int free_probe_data(void *data) 2624 { 2625 struct event_probe_data *edata = data; 2626 2627 edata->ref--; 2628 if (!edata->ref) { 2629 /* Remove the SOFT_MODE flag */ 2630 __ftrace_event_enable_disable(edata->file, 0, 1); 2631 module_put(edata->file->event_call->mod); 2632 kfree(edata); 2633 } 2634 return 0; 2635 } 2636 2637 static void 2638 event_enable_free(struct ftrace_probe_ops *ops, struct trace_array *tr, 2639 unsigned long ip, void *data) 2640 { 2641 struct ftrace_func_mapper *mapper = data; 2642 struct event_probe_data *edata; 2643 2644 if (!ip) { 2645 if (!mapper) 2646 return; 2647 free_ftrace_func_mapper(mapper, free_probe_data); 2648 return; 2649 } 2650 2651 edata = ftrace_func_mapper_remove_ip(mapper, ip); 2652 2653 if (WARN_ON_ONCE(!edata)) 2654 return; 2655 2656 if (WARN_ON_ONCE(edata->ref <= 0)) 2657 return; 2658 2659 free_probe_data(edata); 2660 } 2661 2662 static struct ftrace_probe_ops event_enable_probe_ops = { 2663 .func = event_enable_probe, 2664 .print = event_enable_print, 2665 .init = event_enable_init, 2666 .free = event_enable_free, 2667 }; 2668 2669 static struct ftrace_probe_ops event_enable_count_probe_ops = { 2670 .func = event_enable_count_probe, 2671 .print = event_enable_print, 2672 .init = event_enable_init, 2673 .free = event_enable_free, 2674 }; 2675 2676 static struct ftrace_probe_ops event_disable_probe_ops = { 2677 .func = event_enable_probe, 2678 .print = event_enable_print, 2679 .init = event_enable_init, 2680 .free = event_enable_free, 2681 }; 2682 2683 static struct ftrace_probe_ops event_disable_count_probe_ops = { 2684 .func = event_enable_count_probe, 2685 .print = event_enable_print, 2686 .init = event_enable_init, 2687 .free = event_enable_free, 2688 }; 2689 2690 static int 2691 event_enable_func(struct trace_array *tr, struct ftrace_hash *hash, 2692 char *glob, char *cmd, char *param, int enabled) 2693 { 2694 struct trace_event_file *file; 2695 struct ftrace_probe_ops *ops; 2696 struct event_probe_data *data; 2697 const char *system; 2698 const char *event; 2699 char *number; 2700 bool enable; 2701 int ret; 2702 2703 if (!tr) 2704 return -ENODEV; 2705 2706 /* hash funcs only work with set_ftrace_filter */ 2707 if (!enabled || !param) 2708 return -EINVAL; 2709 2710 system = strsep(¶m, ":"); 2711 if (!param) 2712 return -EINVAL; 2713 2714 event = strsep(¶m, ":"); 2715 2716 mutex_lock(&event_mutex); 2717 2718 ret = -EINVAL; 2719 file = find_event_file(tr, system, event); 2720 if (!file) 2721 goto out; 2722 2723 enable = strcmp(cmd, ENABLE_EVENT_STR) == 0; 2724 2725 if (enable) 2726 ops = param ? &event_enable_count_probe_ops : &event_enable_probe_ops; 2727 else 2728 ops = param ? &event_disable_count_probe_ops : &event_disable_probe_ops; 2729 2730 if (glob[0] == '!') { 2731 ret = unregister_ftrace_function_probe_func(glob+1, tr, ops); 2732 goto out; 2733 } 2734 2735 ret = -ENOMEM; 2736 2737 data = kzalloc(sizeof(*data), GFP_KERNEL); 2738 if (!data) 2739 goto out; 2740 2741 data->enable = enable; 2742 data->count = -1; 2743 data->file = file; 2744 2745 if (!param) 2746 goto out_reg; 2747 2748 number = strsep(¶m, ":"); 2749 2750 ret = -EINVAL; 2751 if (!strlen(number)) 2752 goto out_free; 2753 2754 /* 2755 * We use the callback data field (which is a pointer) 2756 * as our counter. 2757 */ 2758 ret = kstrtoul(number, 0, &data->count); 2759 if (ret) 2760 goto out_free; 2761 2762 out_reg: 2763 /* Don't let event modules unload while probe registered */ 2764 ret = try_module_get(file->event_call->mod); 2765 if (!ret) { 2766 ret = -EBUSY; 2767 goto out_free; 2768 } 2769 2770 ret = __ftrace_event_enable_disable(file, 1, 1); 2771 if (ret < 0) 2772 goto out_put; 2773 2774 ret = register_ftrace_function_probe(glob, tr, ops, data); 2775 /* 2776 * The above returns on success the # of functions enabled, 2777 * but if it didn't find any functions it returns zero. 2778 * Consider no functions a failure too. 2779 */ 2780 if (!ret) { 2781 ret = -ENOENT; 2782 goto out_disable; 2783 } else if (ret < 0) 2784 goto out_disable; 2785 /* Just return zero, not the number of enabled functions */ 2786 ret = 0; 2787 out: 2788 mutex_unlock(&event_mutex); 2789 return ret; 2790 2791 out_disable: 2792 __ftrace_event_enable_disable(file, 0, 1); 2793 out_put: 2794 module_put(file->event_call->mod); 2795 out_free: 2796 kfree(data); 2797 goto out; 2798 } 2799 2800 static struct ftrace_func_command event_enable_cmd = { 2801 .name = ENABLE_EVENT_STR, 2802 .func = event_enable_func, 2803 }; 2804 2805 static struct ftrace_func_command event_disable_cmd = { 2806 .name = DISABLE_EVENT_STR, 2807 .func = event_enable_func, 2808 }; 2809 2810 static __init int register_event_cmds(void) 2811 { 2812 int ret; 2813 2814 ret = register_ftrace_command(&event_enable_cmd); 2815 if (WARN_ON(ret < 0)) 2816 return ret; 2817 ret = register_ftrace_command(&event_disable_cmd); 2818 if (WARN_ON(ret < 0)) 2819 unregister_ftrace_command(&event_enable_cmd); 2820 return ret; 2821 } 2822 #else 2823 static inline int register_event_cmds(void) { return 0; } 2824 #endif /* CONFIG_DYNAMIC_FTRACE */ 2825 2826 /* 2827 * The top level array has already had its trace_event_file 2828 * descriptors created in order to allow for early events to 2829 * be recorded. This function is called after the tracefs has been 2830 * initialized, and we now have to create the files associated 2831 * to the events. 2832 */ 2833 static __init void 2834 __trace_early_add_event_dirs(struct trace_array *tr) 2835 { 2836 struct trace_event_file *file; 2837 int ret; 2838 2839 2840 list_for_each_entry(file, &tr->events, list) { 2841 ret = event_create_dir(tr->event_dir, file); 2842 if (ret < 0) 2843 pr_warn("Could not create directory for event %s\n", 2844 trace_event_name(file->event_call)); 2845 } 2846 } 2847 2848 /* 2849 * For early boot up, the top trace array requires to have 2850 * a list of events that can be enabled. This must be done before 2851 * the filesystem is set up in order to allow events to be traced 2852 * early. 2853 */ 2854 static __init void 2855 __trace_early_add_events(struct trace_array *tr) 2856 { 2857 struct trace_event_call *call; 2858 int ret; 2859 2860 list_for_each_entry(call, &ftrace_events, list) { 2861 /* Early boot up should not have any modules loaded */ 2862 if (WARN_ON_ONCE(call->mod)) 2863 continue; 2864 2865 ret = __trace_early_add_new_event(call, tr); 2866 if (ret < 0) 2867 pr_warn("Could not create early event %s\n", 2868 trace_event_name(call)); 2869 } 2870 } 2871 2872 /* Remove the event directory structure for a trace directory. */ 2873 static void 2874 __trace_remove_event_dirs(struct trace_array *tr) 2875 { 2876 struct trace_event_file *file, *next; 2877 2878 list_for_each_entry_safe(file, next, &tr->events, list) 2879 remove_event_file_dir(file); 2880 } 2881 2882 static void __add_event_to_tracers(struct trace_event_call *call) 2883 { 2884 struct trace_array *tr; 2885 2886 list_for_each_entry(tr, &ftrace_trace_arrays, list) 2887 __trace_add_new_event(call, tr); 2888 } 2889 2890 extern struct trace_event_call *__start_ftrace_events[]; 2891 extern struct trace_event_call *__stop_ftrace_events[]; 2892 2893 static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata; 2894 2895 static __init int setup_trace_event(char *str) 2896 { 2897 strlcpy(bootup_event_buf, str, COMMAND_LINE_SIZE); 2898 ring_buffer_expanded = true; 2899 tracing_selftest_disabled = true; 2900 2901 return 1; 2902 } 2903 __setup("trace_event=", setup_trace_event); 2904 2905 /* Expects to have event_mutex held when called */ 2906 static int 2907 create_event_toplevel_files(struct dentry *parent, struct trace_array *tr) 2908 { 2909 struct dentry *d_events; 2910 struct dentry *entry; 2911 2912 entry = tracefs_create_file("set_event", 0644, parent, 2913 tr, &ftrace_set_event_fops); 2914 if (!entry) { 2915 pr_warn("Could not create tracefs 'set_event' entry\n"); 2916 return -ENOMEM; 2917 } 2918 2919 d_events = tracefs_create_dir("events", parent); 2920 if (!d_events) { 2921 pr_warn("Could not create tracefs 'events' directory\n"); 2922 return -ENOMEM; 2923 } 2924 2925 entry = trace_create_file("enable", 0644, d_events, 2926 tr, &ftrace_tr_enable_fops); 2927 if (!entry) { 2928 pr_warn("Could not create tracefs 'enable' entry\n"); 2929 return -ENOMEM; 2930 } 2931 2932 /* There are not as crucial, just warn if they are not created */ 2933 2934 entry = tracefs_create_file("set_event_pid", 0644, parent, 2935 tr, &ftrace_set_event_pid_fops); 2936 if (!entry) 2937 pr_warn("Could not create tracefs 'set_event_pid' entry\n"); 2938 2939 /* ring buffer internal formats */ 2940 entry = trace_create_file("header_page", 0444, d_events, 2941 ring_buffer_print_page_header, 2942 &ftrace_show_header_fops); 2943 if (!entry) 2944 pr_warn("Could not create tracefs 'header_page' entry\n"); 2945 2946 entry = trace_create_file("header_event", 0444, d_events, 2947 ring_buffer_print_entry_header, 2948 &ftrace_show_header_fops); 2949 if (!entry) 2950 pr_warn("Could not create tracefs 'header_event' entry\n"); 2951 2952 tr->event_dir = d_events; 2953 2954 return 0; 2955 } 2956 2957 /** 2958 * event_trace_add_tracer - add a instance of a trace_array to events 2959 * @parent: The parent dentry to place the files/directories for events in 2960 * @tr: The trace array associated with these events 2961 * 2962 * When a new instance is created, it needs to set up its events 2963 * directory, as well as other files associated with events. It also 2964 * creates the event hierachry in the @parent/events directory. 2965 * 2966 * Returns 0 on success. 2967 * 2968 * Must be called with event_mutex held. 2969 */ 2970 int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr) 2971 { 2972 int ret; 2973 2974 lockdep_assert_held(&event_mutex); 2975 2976 ret = create_event_toplevel_files(parent, tr); 2977 if (ret) 2978 goto out; 2979 2980 down_write(&trace_event_sem); 2981 __trace_add_event_dirs(tr); 2982 up_write(&trace_event_sem); 2983 2984 out: 2985 return ret; 2986 } 2987 2988 /* 2989 * The top trace array already had its file descriptors created. 2990 * Now the files themselves need to be created. 2991 */ 2992 static __init int 2993 early_event_add_tracer(struct dentry *parent, struct trace_array *tr) 2994 { 2995 int ret; 2996 2997 mutex_lock(&event_mutex); 2998 2999 ret = create_event_toplevel_files(parent, tr); 3000 if (ret) 3001 goto out_unlock; 3002 3003 down_write(&trace_event_sem); 3004 __trace_early_add_event_dirs(tr); 3005 up_write(&trace_event_sem); 3006 3007 out_unlock: 3008 mutex_unlock(&event_mutex); 3009 3010 return ret; 3011 } 3012 3013 /* Must be called with event_mutex held */ 3014 int event_trace_del_tracer(struct trace_array *tr) 3015 { 3016 lockdep_assert_held(&event_mutex); 3017 3018 /* Disable any event triggers and associated soft-disabled events */ 3019 clear_event_triggers(tr); 3020 3021 /* Clear the pid list */ 3022 __ftrace_clear_event_pids(tr); 3023 3024 /* Disable any running events */ 3025 __ftrace_set_clr_event_nolock(tr, NULL, NULL, NULL, 0); 3026 3027 /* Access to events are within rcu_read_lock_sched() */ 3028 synchronize_sched(); 3029 3030 down_write(&trace_event_sem); 3031 __trace_remove_event_dirs(tr); 3032 tracefs_remove_recursive(tr->event_dir); 3033 up_write(&trace_event_sem); 3034 3035 tr->event_dir = NULL; 3036 3037 return 0; 3038 } 3039 3040 static __init int event_trace_memsetup(void) 3041 { 3042 field_cachep = KMEM_CACHE(ftrace_event_field, SLAB_PANIC); 3043 file_cachep = KMEM_CACHE(trace_event_file, SLAB_PANIC); 3044 return 0; 3045 } 3046 3047 static __init void 3048 early_enable_events(struct trace_array *tr, bool disable_first) 3049 { 3050 char *buf = bootup_event_buf; 3051 char *token; 3052 int ret; 3053 3054 while (true) { 3055 token = strsep(&buf, ","); 3056 3057 if (!token) 3058 break; 3059 3060 if (*token) { 3061 /* Restarting syscalls requires that we stop them first */ 3062 if (disable_first) 3063 ftrace_set_clr_event(tr, token, 0); 3064 3065 ret = ftrace_set_clr_event(tr, token, 1); 3066 if (ret) 3067 pr_warn("Failed to enable trace event: %s\n", token); 3068 } 3069 3070 /* Put back the comma to allow this to be called again */ 3071 if (buf) 3072 *(buf - 1) = ','; 3073 } 3074 } 3075 3076 static __init int event_trace_enable(void) 3077 { 3078 struct trace_array *tr = top_trace_array(); 3079 struct trace_event_call **iter, *call; 3080 int ret; 3081 3082 if (!tr) 3083 return -ENODEV; 3084 3085 for_each_event(iter, __start_ftrace_events, __stop_ftrace_events) { 3086 3087 call = *iter; 3088 ret = event_init(call); 3089 if (!ret) 3090 list_add(&call->list, &ftrace_events); 3091 } 3092 3093 /* 3094 * We need the top trace array to have a working set of trace 3095 * points at early init, before the debug files and directories 3096 * are created. Create the file entries now, and attach them 3097 * to the actual file dentries later. 3098 */ 3099 __trace_early_add_events(tr); 3100 3101 early_enable_events(tr, false); 3102 3103 trace_printk_start_comm(); 3104 3105 register_event_cmds(); 3106 3107 register_trigger_cmds(); 3108 3109 return 0; 3110 } 3111 3112 /* 3113 * event_trace_enable() is called from trace_event_init() first to 3114 * initialize events and perhaps start any events that are on the 3115 * command line. Unfortunately, there are some events that will not 3116 * start this early, like the system call tracepoints that need 3117 * to set the TIF_SYSCALL_TRACEPOINT flag of pid 1. But event_trace_enable() 3118 * is called before pid 1 starts, and this flag is never set, making 3119 * the syscall tracepoint never get reached, but the event is enabled 3120 * regardless (and not doing anything). 3121 */ 3122 static __init int event_trace_enable_again(void) 3123 { 3124 struct trace_array *tr; 3125 3126 tr = top_trace_array(); 3127 if (!tr) 3128 return -ENODEV; 3129 3130 early_enable_events(tr, true); 3131 3132 return 0; 3133 } 3134 3135 early_initcall(event_trace_enable_again); 3136 3137 static __init int event_trace_init(void) 3138 { 3139 struct trace_array *tr; 3140 struct dentry *d_tracer; 3141 struct dentry *entry; 3142 int ret; 3143 3144 tr = top_trace_array(); 3145 if (!tr) 3146 return -ENODEV; 3147 3148 d_tracer = tracing_init_dentry(); 3149 if (IS_ERR(d_tracer)) 3150 return 0; 3151 3152 entry = tracefs_create_file("available_events", 0444, d_tracer, 3153 tr, &ftrace_avail_fops); 3154 if (!entry) 3155 pr_warn("Could not create tracefs 'available_events' entry\n"); 3156 3157 if (trace_define_generic_fields()) 3158 pr_warn("tracing: Failed to allocated generic fields"); 3159 3160 if (trace_define_common_fields()) 3161 pr_warn("tracing: Failed to allocate common fields"); 3162 3163 ret = early_event_add_tracer(d_tracer, tr); 3164 if (ret) 3165 return ret; 3166 3167 #ifdef CONFIG_MODULES 3168 ret = register_module_notifier(&trace_module_nb); 3169 if (ret) 3170 pr_warn("Failed to register trace events module notifier\n"); 3171 #endif 3172 return 0; 3173 } 3174 3175 void __init trace_event_init(void) 3176 { 3177 event_trace_memsetup(); 3178 init_ftrace_syscalls(); 3179 event_trace_enable(); 3180 } 3181 3182 fs_initcall(event_trace_init); 3183 3184 #ifdef CONFIG_FTRACE_STARTUP_TEST 3185 3186 static DEFINE_SPINLOCK(test_spinlock); 3187 static DEFINE_SPINLOCK(test_spinlock_irq); 3188 static DEFINE_MUTEX(test_mutex); 3189 3190 static __init void test_work(struct work_struct *dummy) 3191 { 3192 spin_lock(&test_spinlock); 3193 spin_lock_irq(&test_spinlock_irq); 3194 udelay(1); 3195 spin_unlock_irq(&test_spinlock_irq); 3196 spin_unlock(&test_spinlock); 3197 3198 mutex_lock(&test_mutex); 3199 msleep(1); 3200 mutex_unlock(&test_mutex); 3201 } 3202 3203 static __init int event_test_thread(void *unused) 3204 { 3205 void *test_malloc; 3206 3207 test_malloc = kmalloc(1234, GFP_KERNEL); 3208 if (!test_malloc) 3209 pr_info("failed to kmalloc\n"); 3210 3211 schedule_on_each_cpu(test_work); 3212 3213 kfree(test_malloc); 3214 3215 set_current_state(TASK_INTERRUPTIBLE); 3216 while (!kthread_should_stop()) { 3217 schedule(); 3218 set_current_state(TASK_INTERRUPTIBLE); 3219 } 3220 __set_current_state(TASK_RUNNING); 3221 3222 return 0; 3223 } 3224 3225 /* 3226 * Do various things that may trigger events. 3227 */ 3228 static __init void event_test_stuff(void) 3229 { 3230 struct task_struct *test_thread; 3231 3232 test_thread = kthread_run(event_test_thread, NULL, "test-events"); 3233 msleep(1); 3234 kthread_stop(test_thread); 3235 } 3236 3237 /* 3238 * For every trace event defined, we will test each trace point separately, 3239 * and then by groups, and finally all trace points. 3240 */ 3241 static __init void event_trace_self_tests(void) 3242 { 3243 struct trace_subsystem_dir *dir; 3244 struct trace_event_file *file; 3245 struct trace_event_call *call; 3246 struct event_subsystem *system; 3247 struct trace_array *tr; 3248 int ret; 3249 3250 tr = top_trace_array(); 3251 if (!tr) 3252 return; 3253 3254 pr_info("Running tests on trace events:\n"); 3255 3256 list_for_each_entry(file, &tr->events, list) { 3257 3258 call = file->event_call; 3259 3260 /* Only test those that have a probe */ 3261 if (!call->class || !call->class->probe) 3262 continue; 3263 3264 /* 3265 * Testing syscall events here is pretty useless, but 3266 * we still do it if configured. But this is time consuming. 3267 * What we really need is a user thread to perform the 3268 * syscalls as we test. 3269 */ 3270 #ifndef CONFIG_EVENT_TRACE_TEST_SYSCALLS 3271 if (call->class->system && 3272 strcmp(call->class->system, "syscalls") == 0) 3273 continue; 3274 #endif 3275 3276 pr_info("Testing event %s: ", trace_event_name(call)); 3277 3278 /* 3279 * If an event is already enabled, someone is using 3280 * it and the self test should not be on. 3281 */ 3282 if (file->flags & EVENT_FILE_FL_ENABLED) { 3283 pr_warn("Enabled event during self test!\n"); 3284 WARN_ON_ONCE(1); 3285 continue; 3286 } 3287 3288 ftrace_event_enable_disable(file, 1); 3289 event_test_stuff(); 3290 ftrace_event_enable_disable(file, 0); 3291 3292 pr_cont("OK\n"); 3293 } 3294 3295 /* Now test at the sub system level */ 3296 3297 pr_info("Running tests on trace event systems:\n"); 3298 3299 list_for_each_entry(dir, &tr->systems, list) { 3300 3301 system = dir->subsystem; 3302 3303 /* the ftrace system is special, skip it */ 3304 if (strcmp(system->name, "ftrace") == 0) 3305 continue; 3306 3307 pr_info("Testing event system %s: ", system->name); 3308 3309 ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 1); 3310 if (WARN_ON_ONCE(ret)) { 3311 pr_warn("error enabling system %s\n", 3312 system->name); 3313 continue; 3314 } 3315 3316 event_test_stuff(); 3317 3318 ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 0); 3319 if (WARN_ON_ONCE(ret)) { 3320 pr_warn("error disabling system %s\n", 3321 system->name); 3322 continue; 3323 } 3324 3325 pr_cont("OK\n"); 3326 } 3327 3328 /* Test with all events enabled */ 3329 3330 pr_info("Running tests on all trace events:\n"); 3331 pr_info("Testing all events: "); 3332 3333 ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 1); 3334 if (WARN_ON_ONCE(ret)) { 3335 pr_warn("error enabling all events\n"); 3336 return; 3337 } 3338 3339 event_test_stuff(); 3340 3341 /* reset sysname */ 3342 ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 0); 3343 if (WARN_ON_ONCE(ret)) { 3344 pr_warn("error disabling all events\n"); 3345 return; 3346 } 3347 3348 pr_cont("OK\n"); 3349 } 3350 3351 #ifdef CONFIG_FUNCTION_TRACER 3352 3353 static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable); 3354 3355 static struct trace_event_file event_trace_file __initdata; 3356 3357 static void __init 3358 function_test_events_call(unsigned long ip, unsigned long parent_ip, 3359 struct ftrace_ops *op, struct pt_regs *pt_regs) 3360 { 3361 struct ring_buffer_event *event; 3362 struct ring_buffer *buffer; 3363 struct ftrace_entry *entry; 3364 unsigned long flags; 3365 long disabled; 3366 int cpu; 3367 int pc; 3368 3369 pc = preempt_count(); 3370 preempt_disable_notrace(); 3371 cpu = raw_smp_processor_id(); 3372 disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu)); 3373 3374 if (disabled != 1) 3375 goto out; 3376 3377 local_save_flags(flags); 3378 3379 event = trace_event_buffer_lock_reserve(&buffer, &event_trace_file, 3380 TRACE_FN, sizeof(*entry), 3381 flags, pc); 3382 if (!event) 3383 goto out; 3384 entry = ring_buffer_event_data(event); 3385 entry->ip = ip; 3386 entry->parent_ip = parent_ip; 3387 3388 event_trigger_unlock_commit(&event_trace_file, buffer, event, 3389 entry, flags, pc); 3390 out: 3391 atomic_dec(&per_cpu(ftrace_test_event_disable, cpu)); 3392 preempt_enable_notrace(); 3393 } 3394 3395 static struct ftrace_ops trace_ops __initdata = 3396 { 3397 .func = function_test_events_call, 3398 .flags = FTRACE_OPS_FL_RECURSION_SAFE, 3399 }; 3400 3401 static __init void event_trace_self_test_with_function(void) 3402 { 3403 int ret; 3404 3405 event_trace_file.tr = top_trace_array(); 3406 if (WARN_ON(!event_trace_file.tr)) 3407 return; 3408 3409 ret = register_ftrace_function(&trace_ops); 3410 if (WARN_ON(ret < 0)) { 3411 pr_info("Failed to enable function tracer for event tests\n"); 3412 return; 3413 } 3414 pr_info("Running tests again, along with the function tracer\n"); 3415 event_trace_self_tests(); 3416 unregister_ftrace_function(&trace_ops); 3417 } 3418 #else 3419 static __init void event_trace_self_test_with_function(void) 3420 { 3421 } 3422 #endif 3423 3424 static __init int event_trace_self_tests_init(void) 3425 { 3426 if (!tracing_selftest_disabled) { 3427 event_trace_self_tests(); 3428 event_trace_self_test_with_function(); 3429 } 3430 3431 return 0; 3432 } 3433 3434 late_initcall(event_trace_self_tests_init); 3435 3436 #endif 3437