1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * ring buffer based function tracer 4 * 5 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com> 6 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com> 7 * 8 * Originally taken from the RT patch by: 9 * Arnaldo Carvalho de Melo <acme@redhat.com> 10 * 11 * Based on code from the latency_tracer, that is: 12 * Copyright (C) 2004-2006 Ingo Molnar 13 * Copyright (C) 2004 Nadia Yvette Chambers 14 */ 15 #include <linux/ring_buffer.h> 16 #include <generated/utsrelease.h> 17 #include <linux/stacktrace.h> 18 #include <linux/writeback.h> 19 #include <linux/kallsyms.h> 20 #include <linux/security.h> 21 #include <linux/seq_file.h> 22 #include <linux/irqflags.h> 23 #include <linux/debugfs.h> 24 #include <linux/tracefs.h> 25 #include <linux/pagemap.h> 26 #include <linux/hardirq.h> 27 #include <linux/linkage.h> 28 #include <linux/uaccess.h> 29 #include <linux/vmalloc.h> 30 #include <linux/ftrace.h> 31 #include <linux/module.h> 32 #include <linux/percpu.h> 33 #include <linux/splice.h> 34 #include <linux/kdebug.h> 35 #include <linux/string.h> 36 #include <linux/mount.h> 37 #include <linux/rwsem.h> 38 #include <linux/slab.h> 39 #include <linux/ctype.h> 40 #include <linux/init.h> 41 #include <linux/panic_notifier.h> 42 #include <linux/poll.h> 43 #include <linux/nmi.h> 44 #include <linux/fs.h> 45 #include <linux/trace.h> 46 #include <linux/sched/clock.h> 47 #include <linux/sched/rt.h> 48 #include <linux/fsnotify.h> 49 #include <linux/irq_work.h> 50 #include <linux/workqueue.h> 51 52 #include <asm/setup.h> /* COMMAND_LINE_SIZE */ 53 54 #include "trace.h" 55 #include "trace_output.h" 56 57 /* 58 * On boot up, the ring buffer is set to the minimum size, so that 59 * we do not waste memory on systems that are not using tracing. 60 */ 61 bool ring_buffer_expanded; 62 63 #ifdef CONFIG_FTRACE_STARTUP_TEST 64 /* 65 * We need to change this state when a selftest is running. 66 * A selftest will lurk into the ring-buffer to count the 67 * entries inserted during the selftest although some concurrent 68 * insertions into the ring-buffer such as trace_printk could occurred 69 * at the same time, giving false positive or negative results. 70 */ 71 static bool __read_mostly tracing_selftest_running; 72 73 /* 74 * If boot-time tracing including tracers/events via kernel cmdline 75 * is running, we do not want to run SELFTEST. 76 */ 77 bool __read_mostly tracing_selftest_disabled; 78 79 void __init disable_tracing_selftest(const char *reason) 80 { 81 if (!tracing_selftest_disabled) { 82 tracing_selftest_disabled = true; 83 pr_info("Ftrace startup test is disabled due to %s\n", reason); 84 } 85 } 86 #else 87 #define tracing_selftest_running 0 88 #define tracing_selftest_disabled 0 89 #endif 90 91 /* Pipe tracepoints to printk */ 92 static struct trace_iterator *tracepoint_print_iter; 93 int tracepoint_printk; 94 static bool tracepoint_printk_stop_on_boot __initdata; 95 static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key); 96 97 /* For tracers that don't implement custom flags */ 98 static struct tracer_opt dummy_tracer_opt[] = { 99 { } 100 }; 101 102 static int 103 dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) 104 { 105 return 0; 106 } 107 108 /* 109 * To prevent the comm cache from being overwritten when no 110 * tracing is active, only save the comm when a trace event 111 * occurred. 112 */ 113 static DEFINE_PER_CPU(bool, trace_taskinfo_save); 114 115 /* 116 * Kill all tracing for good (never come back). 117 * It is initialized to 1 but will turn to zero if the initialization 118 * of the tracer is successful. But that is the only place that sets 119 * this back to zero. 120 */ 121 static int tracing_disabled = 1; 122 123 cpumask_var_t __read_mostly tracing_buffer_mask; 124 125 /* 126 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops 127 * 128 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops 129 * is set, then ftrace_dump is called. This will output the contents 130 * of the ftrace buffers to the console. This is very useful for 131 * capturing traces that lead to crashes and outputing it to a 132 * serial console. 133 * 134 * It is default off, but you can enable it with either specifying 135 * "ftrace_dump_on_oops" in the kernel command line, or setting 136 * /proc/sys/kernel/ftrace_dump_on_oops 137 * Set 1 if you want to dump buffers of all CPUs 138 * Set 2 if you want to dump the buffer of the CPU that triggered oops 139 */ 140 141 enum ftrace_dump_mode ftrace_dump_on_oops; 142 143 /* When set, tracing will stop when a WARN*() is hit */ 144 int __disable_trace_on_warning; 145 146 #ifdef CONFIG_TRACE_EVAL_MAP_FILE 147 /* Map of enums to their values, for "eval_map" file */ 148 struct trace_eval_map_head { 149 struct module *mod; 150 unsigned long length; 151 }; 152 153 union trace_eval_map_item; 154 155 struct trace_eval_map_tail { 156 /* 157 * "end" is first and points to NULL as it must be different 158 * than "mod" or "eval_string" 159 */ 160 union trace_eval_map_item *next; 161 const char *end; /* points to NULL */ 162 }; 163 164 static DEFINE_MUTEX(trace_eval_mutex); 165 166 /* 167 * The trace_eval_maps are saved in an array with two extra elements, 168 * one at the beginning, and one at the end. The beginning item contains 169 * the count of the saved maps (head.length), and the module they 170 * belong to if not built in (head.mod). The ending item contains a 171 * pointer to the next array of saved eval_map items. 172 */ 173 union trace_eval_map_item { 174 struct trace_eval_map map; 175 struct trace_eval_map_head head; 176 struct trace_eval_map_tail tail; 177 }; 178 179 static union trace_eval_map_item *trace_eval_maps; 180 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */ 181 182 int tracing_set_tracer(struct trace_array *tr, const char *buf); 183 static void ftrace_trace_userstack(struct trace_array *tr, 184 struct trace_buffer *buffer, 185 unsigned int trace_ctx); 186 187 #define MAX_TRACER_SIZE 100 188 static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata; 189 static char *default_bootup_tracer; 190 191 static bool allocate_snapshot; 192 static bool snapshot_at_boot; 193 194 static char boot_instance_info[COMMAND_LINE_SIZE] __initdata; 195 static int boot_instance_index; 196 197 static char boot_snapshot_info[COMMAND_LINE_SIZE] __initdata; 198 static int boot_snapshot_index; 199 200 static int __init set_cmdline_ftrace(char *str) 201 { 202 strscpy(bootup_tracer_buf, str, MAX_TRACER_SIZE); 203 default_bootup_tracer = bootup_tracer_buf; 204 /* We are using ftrace early, expand it */ 205 ring_buffer_expanded = true; 206 return 1; 207 } 208 __setup("ftrace=", set_cmdline_ftrace); 209 210 static int __init set_ftrace_dump_on_oops(char *str) 211 { 212 if (*str++ != '=' || !*str || !strcmp("1", str)) { 213 ftrace_dump_on_oops = DUMP_ALL; 214 return 1; 215 } 216 217 if (!strcmp("orig_cpu", str) || !strcmp("2", str)) { 218 ftrace_dump_on_oops = DUMP_ORIG; 219 return 1; 220 } 221 222 return 0; 223 } 224 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops); 225 226 static int __init stop_trace_on_warning(char *str) 227 { 228 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0)) 229 __disable_trace_on_warning = 1; 230 return 1; 231 } 232 __setup("traceoff_on_warning", stop_trace_on_warning); 233 234 static int __init boot_alloc_snapshot(char *str) 235 { 236 char *slot = boot_snapshot_info + boot_snapshot_index; 237 int left = sizeof(boot_snapshot_info) - boot_snapshot_index; 238 int ret; 239 240 if (str[0] == '=') { 241 str++; 242 if (strlen(str) >= left) 243 return -1; 244 245 ret = snprintf(slot, left, "%s\t", str); 246 boot_snapshot_index += ret; 247 } else { 248 allocate_snapshot = true; 249 /* We also need the main ring buffer expanded */ 250 ring_buffer_expanded = true; 251 } 252 return 1; 253 } 254 __setup("alloc_snapshot", boot_alloc_snapshot); 255 256 257 static int __init boot_snapshot(char *str) 258 { 259 snapshot_at_boot = true; 260 boot_alloc_snapshot(str); 261 return 1; 262 } 263 __setup("ftrace_boot_snapshot", boot_snapshot); 264 265 266 static int __init boot_instance(char *str) 267 { 268 char *slot = boot_instance_info + boot_instance_index; 269 int left = sizeof(boot_instance_info) - boot_instance_index; 270 int ret; 271 272 if (strlen(str) >= left) 273 return -1; 274 275 ret = snprintf(slot, left, "%s\t", str); 276 boot_instance_index += ret; 277 278 return 1; 279 } 280 __setup("trace_instance=", boot_instance); 281 282 283 static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata; 284 285 static int __init set_trace_boot_options(char *str) 286 { 287 strscpy(trace_boot_options_buf, str, MAX_TRACER_SIZE); 288 return 1; 289 } 290 __setup("trace_options=", set_trace_boot_options); 291 292 static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata; 293 static char *trace_boot_clock __initdata; 294 295 static int __init set_trace_boot_clock(char *str) 296 { 297 strscpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE); 298 trace_boot_clock = trace_boot_clock_buf; 299 return 1; 300 } 301 __setup("trace_clock=", set_trace_boot_clock); 302 303 static int __init set_tracepoint_printk(char *str) 304 { 305 /* Ignore the "tp_printk_stop_on_boot" param */ 306 if (*str == '_') 307 return 0; 308 309 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0)) 310 tracepoint_printk = 1; 311 return 1; 312 } 313 __setup("tp_printk", set_tracepoint_printk); 314 315 static int __init set_tracepoint_printk_stop(char *str) 316 { 317 tracepoint_printk_stop_on_boot = true; 318 return 1; 319 } 320 __setup("tp_printk_stop_on_boot", set_tracepoint_printk_stop); 321 322 unsigned long long ns2usecs(u64 nsec) 323 { 324 nsec += 500; 325 do_div(nsec, 1000); 326 return nsec; 327 } 328 329 static void 330 trace_process_export(struct trace_export *export, 331 struct ring_buffer_event *event, int flag) 332 { 333 struct trace_entry *entry; 334 unsigned int size = 0; 335 336 if (export->flags & flag) { 337 entry = ring_buffer_event_data(event); 338 size = ring_buffer_event_length(event); 339 export->write(export, entry, size); 340 } 341 } 342 343 static DEFINE_MUTEX(ftrace_export_lock); 344 345 static struct trace_export __rcu *ftrace_exports_list __read_mostly; 346 347 static DEFINE_STATIC_KEY_FALSE(trace_function_exports_enabled); 348 static DEFINE_STATIC_KEY_FALSE(trace_event_exports_enabled); 349 static DEFINE_STATIC_KEY_FALSE(trace_marker_exports_enabled); 350 351 static inline void ftrace_exports_enable(struct trace_export *export) 352 { 353 if (export->flags & TRACE_EXPORT_FUNCTION) 354 static_branch_inc(&trace_function_exports_enabled); 355 356 if (export->flags & TRACE_EXPORT_EVENT) 357 static_branch_inc(&trace_event_exports_enabled); 358 359 if (export->flags & TRACE_EXPORT_MARKER) 360 static_branch_inc(&trace_marker_exports_enabled); 361 } 362 363 static inline void ftrace_exports_disable(struct trace_export *export) 364 { 365 if (export->flags & TRACE_EXPORT_FUNCTION) 366 static_branch_dec(&trace_function_exports_enabled); 367 368 if (export->flags & TRACE_EXPORT_EVENT) 369 static_branch_dec(&trace_event_exports_enabled); 370 371 if (export->flags & TRACE_EXPORT_MARKER) 372 static_branch_dec(&trace_marker_exports_enabled); 373 } 374 375 static void ftrace_exports(struct ring_buffer_event *event, int flag) 376 { 377 struct trace_export *export; 378 379 preempt_disable_notrace(); 380 381 export = rcu_dereference_raw_check(ftrace_exports_list); 382 while (export) { 383 trace_process_export(export, event, flag); 384 export = rcu_dereference_raw_check(export->next); 385 } 386 387 preempt_enable_notrace(); 388 } 389 390 static inline void 391 add_trace_export(struct trace_export **list, struct trace_export *export) 392 { 393 rcu_assign_pointer(export->next, *list); 394 /* 395 * We are entering export into the list but another 396 * CPU might be walking that list. We need to make sure 397 * the export->next pointer is valid before another CPU sees 398 * the export pointer included into the list. 399 */ 400 rcu_assign_pointer(*list, export); 401 } 402 403 static inline int 404 rm_trace_export(struct trace_export **list, struct trace_export *export) 405 { 406 struct trace_export **p; 407 408 for (p = list; *p != NULL; p = &(*p)->next) 409 if (*p == export) 410 break; 411 412 if (*p != export) 413 return -1; 414 415 rcu_assign_pointer(*p, (*p)->next); 416 417 return 0; 418 } 419 420 static inline void 421 add_ftrace_export(struct trace_export **list, struct trace_export *export) 422 { 423 ftrace_exports_enable(export); 424 425 add_trace_export(list, export); 426 } 427 428 static inline int 429 rm_ftrace_export(struct trace_export **list, struct trace_export *export) 430 { 431 int ret; 432 433 ret = rm_trace_export(list, export); 434 ftrace_exports_disable(export); 435 436 return ret; 437 } 438 439 int register_ftrace_export(struct trace_export *export) 440 { 441 if (WARN_ON_ONCE(!export->write)) 442 return -1; 443 444 mutex_lock(&ftrace_export_lock); 445 446 add_ftrace_export(&ftrace_exports_list, export); 447 448 mutex_unlock(&ftrace_export_lock); 449 450 return 0; 451 } 452 EXPORT_SYMBOL_GPL(register_ftrace_export); 453 454 int unregister_ftrace_export(struct trace_export *export) 455 { 456 int ret; 457 458 mutex_lock(&ftrace_export_lock); 459 460 ret = rm_ftrace_export(&ftrace_exports_list, export); 461 462 mutex_unlock(&ftrace_export_lock); 463 464 return ret; 465 } 466 EXPORT_SYMBOL_GPL(unregister_ftrace_export); 467 468 /* trace_flags holds trace_options default values */ 469 #define TRACE_DEFAULT_FLAGS \ 470 (FUNCTION_DEFAULT_FLAGS | \ 471 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \ 472 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \ 473 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \ 474 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | \ 475 TRACE_ITER_HASH_PTR) 476 477 /* trace_options that are only supported by global_trace */ 478 #define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \ 479 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD) 480 481 /* trace_flags that are default zero for instances */ 482 #define ZEROED_TRACE_FLAGS \ 483 (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK) 484 485 /* 486 * The global_trace is the descriptor that holds the top-level tracing 487 * buffers for the live tracing. 488 */ 489 static struct trace_array global_trace = { 490 .trace_flags = TRACE_DEFAULT_FLAGS, 491 }; 492 493 LIST_HEAD(ftrace_trace_arrays); 494 495 int trace_array_get(struct trace_array *this_tr) 496 { 497 struct trace_array *tr; 498 int ret = -ENODEV; 499 500 mutex_lock(&trace_types_lock); 501 list_for_each_entry(tr, &ftrace_trace_arrays, list) { 502 if (tr == this_tr) { 503 tr->ref++; 504 ret = 0; 505 break; 506 } 507 } 508 mutex_unlock(&trace_types_lock); 509 510 return ret; 511 } 512 513 static void __trace_array_put(struct trace_array *this_tr) 514 { 515 WARN_ON(!this_tr->ref); 516 this_tr->ref--; 517 } 518 519 /** 520 * trace_array_put - Decrement the reference counter for this trace array. 521 * @this_tr : pointer to the trace array 522 * 523 * NOTE: Use this when we no longer need the trace array returned by 524 * trace_array_get_by_name(). This ensures the trace array can be later 525 * destroyed. 526 * 527 */ 528 void trace_array_put(struct trace_array *this_tr) 529 { 530 if (!this_tr) 531 return; 532 533 mutex_lock(&trace_types_lock); 534 __trace_array_put(this_tr); 535 mutex_unlock(&trace_types_lock); 536 } 537 EXPORT_SYMBOL_GPL(trace_array_put); 538 539 int tracing_check_open_get_tr(struct trace_array *tr) 540 { 541 int ret; 542 543 ret = security_locked_down(LOCKDOWN_TRACEFS); 544 if (ret) 545 return ret; 546 547 if (tracing_disabled) 548 return -ENODEV; 549 550 if (tr && trace_array_get(tr) < 0) 551 return -ENODEV; 552 553 return 0; 554 } 555 556 int call_filter_check_discard(struct trace_event_call *call, void *rec, 557 struct trace_buffer *buffer, 558 struct ring_buffer_event *event) 559 { 560 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) && 561 !filter_match_preds(call->filter, rec)) { 562 __trace_event_discard_commit(buffer, event); 563 return 1; 564 } 565 566 return 0; 567 } 568 569 /** 570 * trace_find_filtered_pid - check if a pid exists in a filtered_pid list 571 * @filtered_pids: The list of pids to check 572 * @search_pid: The PID to find in @filtered_pids 573 * 574 * Returns true if @search_pid is found in @filtered_pids, and false otherwise. 575 */ 576 bool 577 trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid) 578 { 579 return trace_pid_list_is_set(filtered_pids, search_pid); 580 } 581 582 /** 583 * trace_ignore_this_task - should a task be ignored for tracing 584 * @filtered_pids: The list of pids to check 585 * @filtered_no_pids: The list of pids not to be traced 586 * @task: The task that should be ignored if not filtered 587 * 588 * Checks if @task should be traced or not from @filtered_pids. 589 * Returns true if @task should *NOT* be traced. 590 * Returns false if @task should be traced. 591 */ 592 bool 593 trace_ignore_this_task(struct trace_pid_list *filtered_pids, 594 struct trace_pid_list *filtered_no_pids, 595 struct task_struct *task) 596 { 597 /* 598 * If filtered_no_pids is not empty, and the task's pid is listed 599 * in filtered_no_pids, then return true. 600 * Otherwise, if filtered_pids is empty, that means we can 601 * trace all tasks. If it has content, then only trace pids 602 * within filtered_pids. 603 */ 604 605 return (filtered_pids && 606 !trace_find_filtered_pid(filtered_pids, task->pid)) || 607 (filtered_no_pids && 608 trace_find_filtered_pid(filtered_no_pids, task->pid)); 609 } 610 611 /** 612 * trace_filter_add_remove_task - Add or remove a task from a pid_list 613 * @pid_list: The list to modify 614 * @self: The current task for fork or NULL for exit 615 * @task: The task to add or remove 616 * 617 * If adding a task, if @self is defined, the task is only added if @self 618 * is also included in @pid_list. This happens on fork and tasks should 619 * only be added when the parent is listed. If @self is NULL, then the 620 * @task pid will be removed from the list, which would happen on exit 621 * of a task. 622 */ 623 void trace_filter_add_remove_task(struct trace_pid_list *pid_list, 624 struct task_struct *self, 625 struct task_struct *task) 626 { 627 if (!pid_list) 628 return; 629 630 /* For forks, we only add if the forking task is listed */ 631 if (self) { 632 if (!trace_find_filtered_pid(pid_list, self->pid)) 633 return; 634 } 635 636 /* "self" is set for forks, and NULL for exits */ 637 if (self) 638 trace_pid_list_set(pid_list, task->pid); 639 else 640 trace_pid_list_clear(pid_list, task->pid); 641 } 642 643 /** 644 * trace_pid_next - Used for seq_file to get to the next pid of a pid_list 645 * @pid_list: The pid list to show 646 * @v: The last pid that was shown (+1 the actual pid to let zero be displayed) 647 * @pos: The position of the file 648 * 649 * This is used by the seq_file "next" operation to iterate the pids 650 * listed in a trace_pid_list structure. 651 * 652 * Returns the pid+1 as we want to display pid of zero, but NULL would 653 * stop the iteration. 654 */ 655 void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos) 656 { 657 long pid = (unsigned long)v; 658 unsigned int next; 659 660 (*pos)++; 661 662 /* pid already is +1 of the actual previous bit */ 663 if (trace_pid_list_next(pid_list, pid, &next) < 0) 664 return NULL; 665 666 pid = next; 667 668 /* Return pid + 1 to allow zero to be represented */ 669 return (void *)(pid + 1); 670 } 671 672 /** 673 * trace_pid_start - Used for seq_file to start reading pid lists 674 * @pid_list: The pid list to show 675 * @pos: The position of the file 676 * 677 * This is used by seq_file "start" operation to start the iteration 678 * of listing pids. 679 * 680 * Returns the pid+1 as we want to display pid of zero, but NULL would 681 * stop the iteration. 682 */ 683 void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos) 684 { 685 unsigned long pid; 686 unsigned int first; 687 loff_t l = 0; 688 689 if (trace_pid_list_first(pid_list, &first) < 0) 690 return NULL; 691 692 pid = first; 693 694 /* Return pid + 1 so that zero can be the exit value */ 695 for (pid++; pid && l < *pos; 696 pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l)) 697 ; 698 return (void *)pid; 699 } 700 701 /** 702 * trace_pid_show - show the current pid in seq_file processing 703 * @m: The seq_file structure to write into 704 * @v: A void pointer of the pid (+1) value to display 705 * 706 * Can be directly used by seq_file operations to display the current 707 * pid value. 708 */ 709 int trace_pid_show(struct seq_file *m, void *v) 710 { 711 unsigned long pid = (unsigned long)v - 1; 712 713 seq_printf(m, "%lu\n", pid); 714 return 0; 715 } 716 717 /* 128 should be much more than enough */ 718 #define PID_BUF_SIZE 127 719 720 int trace_pid_write(struct trace_pid_list *filtered_pids, 721 struct trace_pid_list **new_pid_list, 722 const char __user *ubuf, size_t cnt) 723 { 724 struct trace_pid_list *pid_list; 725 struct trace_parser parser; 726 unsigned long val; 727 int nr_pids = 0; 728 ssize_t read = 0; 729 ssize_t ret; 730 loff_t pos; 731 pid_t pid; 732 733 if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1)) 734 return -ENOMEM; 735 736 /* 737 * Always recreate a new array. The write is an all or nothing 738 * operation. Always create a new array when adding new pids by 739 * the user. If the operation fails, then the current list is 740 * not modified. 741 */ 742 pid_list = trace_pid_list_alloc(); 743 if (!pid_list) { 744 trace_parser_put(&parser); 745 return -ENOMEM; 746 } 747 748 if (filtered_pids) { 749 /* copy the current bits to the new max */ 750 ret = trace_pid_list_first(filtered_pids, &pid); 751 while (!ret) { 752 trace_pid_list_set(pid_list, pid); 753 ret = trace_pid_list_next(filtered_pids, pid + 1, &pid); 754 nr_pids++; 755 } 756 } 757 758 ret = 0; 759 while (cnt > 0) { 760 761 pos = 0; 762 763 ret = trace_get_user(&parser, ubuf, cnt, &pos); 764 if (ret < 0) 765 break; 766 767 read += ret; 768 ubuf += ret; 769 cnt -= ret; 770 771 if (!trace_parser_loaded(&parser)) 772 break; 773 774 ret = -EINVAL; 775 if (kstrtoul(parser.buffer, 0, &val)) 776 break; 777 778 pid = (pid_t)val; 779 780 if (trace_pid_list_set(pid_list, pid) < 0) { 781 ret = -1; 782 break; 783 } 784 nr_pids++; 785 786 trace_parser_clear(&parser); 787 ret = 0; 788 } 789 trace_parser_put(&parser); 790 791 if (ret < 0) { 792 trace_pid_list_free(pid_list); 793 return ret; 794 } 795 796 if (!nr_pids) { 797 /* Cleared the list of pids */ 798 trace_pid_list_free(pid_list); 799 pid_list = NULL; 800 } 801 802 *new_pid_list = pid_list; 803 804 return read; 805 } 806 807 static u64 buffer_ftrace_now(struct array_buffer *buf, int cpu) 808 { 809 u64 ts; 810 811 /* Early boot up does not have a buffer yet */ 812 if (!buf->buffer) 813 return trace_clock_local(); 814 815 ts = ring_buffer_time_stamp(buf->buffer); 816 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts); 817 818 return ts; 819 } 820 821 u64 ftrace_now(int cpu) 822 { 823 return buffer_ftrace_now(&global_trace.array_buffer, cpu); 824 } 825 826 /** 827 * tracing_is_enabled - Show if global_trace has been enabled 828 * 829 * Shows if the global trace has been enabled or not. It uses the 830 * mirror flag "buffer_disabled" to be used in fast paths such as for 831 * the irqsoff tracer. But it may be inaccurate due to races. If you 832 * need to know the accurate state, use tracing_is_on() which is a little 833 * slower, but accurate. 834 */ 835 int tracing_is_enabled(void) 836 { 837 /* 838 * For quick access (irqsoff uses this in fast path), just 839 * return the mirror variable of the state of the ring buffer. 840 * It's a little racy, but we don't really care. 841 */ 842 smp_rmb(); 843 return !global_trace.buffer_disabled; 844 } 845 846 /* 847 * trace_buf_size is the size in bytes that is allocated 848 * for a buffer. Note, the number of bytes is always rounded 849 * to page size. 850 * 851 * This number is purposely set to a low number of 16384. 852 * If the dump on oops happens, it will be much appreciated 853 * to not have to wait for all that output. Anyway this can be 854 * boot time and run time configurable. 855 */ 856 #define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */ 857 858 static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT; 859 860 /* trace_types holds a link list of available tracers. */ 861 static struct tracer *trace_types __read_mostly; 862 863 /* 864 * trace_types_lock is used to protect the trace_types list. 865 */ 866 DEFINE_MUTEX(trace_types_lock); 867 868 /* 869 * serialize the access of the ring buffer 870 * 871 * ring buffer serializes readers, but it is low level protection. 872 * The validity of the events (which returns by ring_buffer_peek() ..etc) 873 * are not protected by ring buffer. 874 * 875 * The content of events may become garbage if we allow other process consumes 876 * these events concurrently: 877 * A) the page of the consumed events may become a normal page 878 * (not reader page) in ring buffer, and this page will be rewritten 879 * by events producer. 880 * B) The page of the consumed events may become a page for splice_read, 881 * and this page will be returned to system. 882 * 883 * These primitives allow multi process access to different cpu ring buffer 884 * concurrently. 885 * 886 * These primitives don't distinguish read-only and read-consume access. 887 * Multi read-only access are also serialized. 888 */ 889 890 #ifdef CONFIG_SMP 891 static DECLARE_RWSEM(all_cpu_access_lock); 892 static DEFINE_PER_CPU(struct mutex, cpu_access_lock); 893 894 static inline void trace_access_lock(int cpu) 895 { 896 if (cpu == RING_BUFFER_ALL_CPUS) { 897 /* gain it for accessing the whole ring buffer. */ 898 down_write(&all_cpu_access_lock); 899 } else { 900 /* gain it for accessing a cpu ring buffer. */ 901 902 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */ 903 down_read(&all_cpu_access_lock); 904 905 /* Secondly block other access to this @cpu ring buffer. */ 906 mutex_lock(&per_cpu(cpu_access_lock, cpu)); 907 } 908 } 909 910 static inline void trace_access_unlock(int cpu) 911 { 912 if (cpu == RING_BUFFER_ALL_CPUS) { 913 up_write(&all_cpu_access_lock); 914 } else { 915 mutex_unlock(&per_cpu(cpu_access_lock, cpu)); 916 up_read(&all_cpu_access_lock); 917 } 918 } 919 920 static inline void trace_access_lock_init(void) 921 { 922 int cpu; 923 924 for_each_possible_cpu(cpu) 925 mutex_init(&per_cpu(cpu_access_lock, cpu)); 926 } 927 928 #else 929 930 static DEFINE_MUTEX(access_lock); 931 932 static inline void trace_access_lock(int cpu) 933 { 934 (void)cpu; 935 mutex_lock(&access_lock); 936 } 937 938 static inline void trace_access_unlock(int cpu) 939 { 940 (void)cpu; 941 mutex_unlock(&access_lock); 942 } 943 944 static inline void trace_access_lock_init(void) 945 { 946 } 947 948 #endif 949 950 #ifdef CONFIG_STACKTRACE 951 static void __ftrace_trace_stack(struct trace_buffer *buffer, 952 unsigned int trace_ctx, 953 int skip, struct pt_regs *regs); 954 static inline void ftrace_trace_stack(struct trace_array *tr, 955 struct trace_buffer *buffer, 956 unsigned int trace_ctx, 957 int skip, struct pt_regs *regs); 958 959 #else 960 static inline void __ftrace_trace_stack(struct trace_buffer *buffer, 961 unsigned int trace_ctx, 962 int skip, struct pt_regs *regs) 963 { 964 } 965 static inline void ftrace_trace_stack(struct trace_array *tr, 966 struct trace_buffer *buffer, 967 unsigned long trace_ctx, 968 int skip, struct pt_regs *regs) 969 { 970 } 971 972 #endif 973 974 static __always_inline void 975 trace_event_setup(struct ring_buffer_event *event, 976 int type, unsigned int trace_ctx) 977 { 978 struct trace_entry *ent = ring_buffer_event_data(event); 979 980 tracing_generic_entry_update(ent, type, trace_ctx); 981 } 982 983 static __always_inline struct ring_buffer_event * 984 __trace_buffer_lock_reserve(struct trace_buffer *buffer, 985 int type, 986 unsigned long len, 987 unsigned int trace_ctx) 988 { 989 struct ring_buffer_event *event; 990 991 event = ring_buffer_lock_reserve(buffer, len); 992 if (event != NULL) 993 trace_event_setup(event, type, trace_ctx); 994 995 return event; 996 } 997 998 void tracer_tracing_on(struct trace_array *tr) 999 { 1000 if (tr->array_buffer.buffer) 1001 ring_buffer_record_on(tr->array_buffer.buffer); 1002 /* 1003 * This flag is looked at when buffers haven't been allocated 1004 * yet, or by some tracers (like irqsoff), that just want to 1005 * know if the ring buffer has been disabled, but it can handle 1006 * races of where it gets disabled but we still do a record. 1007 * As the check is in the fast path of the tracers, it is more 1008 * important to be fast than accurate. 1009 */ 1010 tr->buffer_disabled = 0; 1011 /* Make the flag seen by readers */ 1012 smp_wmb(); 1013 } 1014 1015 /** 1016 * tracing_on - enable tracing buffers 1017 * 1018 * This function enables tracing buffers that may have been 1019 * disabled with tracing_off. 1020 */ 1021 void tracing_on(void) 1022 { 1023 tracer_tracing_on(&global_trace); 1024 } 1025 EXPORT_SYMBOL_GPL(tracing_on); 1026 1027 1028 static __always_inline void 1029 __buffer_unlock_commit(struct trace_buffer *buffer, struct ring_buffer_event *event) 1030 { 1031 __this_cpu_write(trace_taskinfo_save, true); 1032 1033 /* If this is the temp buffer, we need to commit fully */ 1034 if (this_cpu_read(trace_buffered_event) == event) { 1035 /* Length is in event->array[0] */ 1036 ring_buffer_write(buffer, event->array[0], &event->array[1]); 1037 /* Release the temp buffer */ 1038 this_cpu_dec(trace_buffered_event_cnt); 1039 /* ring_buffer_unlock_commit() enables preemption */ 1040 preempt_enable_notrace(); 1041 } else 1042 ring_buffer_unlock_commit(buffer); 1043 } 1044 1045 int __trace_array_puts(struct trace_array *tr, unsigned long ip, 1046 const char *str, int size) 1047 { 1048 struct ring_buffer_event *event; 1049 struct trace_buffer *buffer; 1050 struct print_entry *entry; 1051 unsigned int trace_ctx; 1052 int alloc; 1053 1054 if (!(tr->trace_flags & TRACE_ITER_PRINTK)) 1055 return 0; 1056 1057 if (unlikely(tracing_selftest_running && tr == &global_trace)) 1058 return 0; 1059 1060 if (unlikely(tracing_disabled)) 1061 return 0; 1062 1063 alloc = sizeof(*entry) + size + 2; /* possible \n added */ 1064 1065 trace_ctx = tracing_gen_ctx(); 1066 buffer = tr->array_buffer.buffer; 1067 ring_buffer_nest_start(buffer); 1068 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc, 1069 trace_ctx); 1070 if (!event) { 1071 size = 0; 1072 goto out; 1073 } 1074 1075 entry = ring_buffer_event_data(event); 1076 entry->ip = ip; 1077 1078 memcpy(&entry->buf, str, size); 1079 1080 /* Add a newline if necessary */ 1081 if (entry->buf[size - 1] != '\n') { 1082 entry->buf[size] = '\n'; 1083 entry->buf[size + 1] = '\0'; 1084 } else 1085 entry->buf[size] = '\0'; 1086 1087 __buffer_unlock_commit(buffer, event); 1088 ftrace_trace_stack(tr, buffer, trace_ctx, 4, NULL); 1089 out: 1090 ring_buffer_nest_end(buffer); 1091 return size; 1092 } 1093 EXPORT_SYMBOL_GPL(__trace_array_puts); 1094 1095 /** 1096 * __trace_puts - write a constant string into the trace buffer. 1097 * @ip: The address of the caller 1098 * @str: The constant string to write 1099 * @size: The size of the string. 1100 */ 1101 int __trace_puts(unsigned long ip, const char *str, int size) 1102 { 1103 return __trace_array_puts(&global_trace, ip, str, size); 1104 } 1105 EXPORT_SYMBOL_GPL(__trace_puts); 1106 1107 /** 1108 * __trace_bputs - write the pointer to a constant string into trace buffer 1109 * @ip: The address of the caller 1110 * @str: The constant string to write to the buffer to 1111 */ 1112 int __trace_bputs(unsigned long ip, const char *str) 1113 { 1114 struct ring_buffer_event *event; 1115 struct trace_buffer *buffer; 1116 struct bputs_entry *entry; 1117 unsigned int trace_ctx; 1118 int size = sizeof(struct bputs_entry); 1119 int ret = 0; 1120 1121 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK)) 1122 return 0; 1123 1124 if (unlikely(tracing_selftest_running || tracing_disabled)) 1125 return 0; 1126 1127 trace_ctx = tracing_gen_ctx(); 1128 buffer = global_trace.array_buffer.buffer; 1129 1130 ring_buffer_nest_start(buffer); 1131 event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size, 1132 trace_ctx); 1133 if (!event) 1134 goto out; 1135 1136 entry = ring_buffer_event_data(event); 1137 entry->ip = ip; 1138 entry->str = str; 1139 1140 __buffer_unlock_commit(buffer, event); 1141 ftrace_trace_stack(&global_trace, buffer, trace_ctx, 4, NULL); 1142 1143 ret = 1; 1144 out: 1145 ring_buffer_nest_end(buffer); 1146 return ret; 1147 } 1148 EXPORT_SYMBOL_GPL(__trace_bputs); 1149 1150 #ifdef CONFIG_TRACER_SNAPSHOT 1151 static void tracing_snapshot_instance_cond(struct trace_array *tr, 1152 void *cond_data) 1153 { 1154 struct tracer *tracer = tr->current_trace; 1155 unsigned long flags; 1156 1157 if (in_nmi()) { 1158 trace_array_puts(tr, "*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n"); 1159 trace_array_puts(tr, "*** snapshot is being ignored ***\n"); 1160 return; 1161 } 1162 1163 if (!tr->allocated_snapshot) { 1164 trace_array_puts(tr, "*** SNAPSHOT NOT ALLOCATED ***\n"); 1165 trace_array_puts(tr, "*** stopping trace here! ***\n"); 1166 tracer_tracing_off(tr); 1167 return; 1168 } 1169 1170 /* Note, snapshot can not be used when the tracer uses it */ 1171 if (tracer->use_max_tr) { 1172 trace_array_puts(tr, "*** LATENCY TRACER ACTIVE ***\n"); 1173 trace_array_puts(tr, "*** Can not use snapshot (sorry) ***\n"); 1174 return; 1175 } 1176 1177 local_irq_save(flags); 1178 update_max_tr(tr, current, smp_processor_id(), cond_data); 1179 local_irq_restore(flags); 1180 } 1181 1182 void tracing_snapshot_instance(struct trace_array *tr) 1183 { 1184 tracing_snapshot_instance_cond(tr, NULL); 1185 } 1186 1187 /** 1188 * tracing_snapshot - take a snapshot of the current buffer. 1189 * 1190 * This causes a swap between the snapshot buffer and the current live 1191 * tracing buffer. You can use this to take snapshots of the live 1192 * trace when some condition is triggered, but continue to trace. 1193 * 1194 * Note, make sure to allocate the snapshot with either 1195 * a tracing_snapshot_alloc(), or by doing it manually 1196 * with: echo 1 > /sys/kernel/tracing/snapshot 1197 * 1198 * If the snapshot buffer is not allocated, it will stop tracing. 1199 * Basically making a permanent snapshot. 1200 */ 1201 void tracing_snapshot(void) 1202 { 1203 struct trace_array *tr = &global_trace; 1204 1205 tracing_snapshot_instance(tr); 1206 } 1207 EXPORT_SYMBOL_GPL(tracing_snapshot); 1208 1209 /** 1210 * tracing_snapshot_cond - conditionally take a snapshot of the current buffer. 1211 * @tr: The tracing instance to snapshot 1212 * @cond_data: The data to be tested conditionally, and possibly saved 1213 * 1214 * This is the same as tracing_snapshot() except that the snapshot is 1215 * conditional - the snapshot will only happen if the 1216 * cond_snapshot.update() implementation receiving the cond_data 1217 * returns true, which means that the trace array's cond_snapshot 1218 * update() operation used the cond_data to determine whether the 1219 * snapshot should be taken, and if it was, presumably saved it along 1220 * with the snapshot. 1221 */ 1222 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data) 1223 { 1224 tracing_snapshot_instance_cond(tr, cond_data); 1225 } 1226 EXPORT_SYMBOL_GPL(tracing_snapshot_cond); 1227 1228 /** 1229 * tracing_cond_snapshot_data - get the user data associated with a snapshot 1230 * @tr: The tracing instance 1231 * 1232 * When the user enables a conditional snapshot using 1233 * tracing_snapshot_cond_enable(), the user-defined cond_data is saved 1234 * with the snapshot. This accessor is used to retrieve it. 1235 * 1236 * Should not be called from cond_snapshot.update(), since it takes 1237 * the tr->max_lock lock, which the code calling 1238 * cond_snapshot.update() has already done. 1239 * 1240 * Returns the cond_data associated with the trace array's snapshot. 1241 */ 1242 void *tracing_cond_snapshot_data(struct trace_array *tr) 1243 { 1244 void *cond_data = NULL; 1245 1246 local_irq_disable(); 1247 arch_spin_lock(&tr->max_lock); 1248 1249 if (tr->cond_snapshot) 1250 cond_data = tr->cond_snapshot->cond_data; 1251 1252 arch_spin_unlock(&tr->max_lock); 1253 local_irq_enable(); 1254 1255 return cond_data; 1256 } 1257 EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data); 1258 1259 static int resize_buffer_duplicate_size(struct array_buffer *trace_buf, 1260 struct array_buffer *size_buf, int cpu_id); 1261 static void set_buffer_entries(struct array_buffer *buf, unsigned long val); 1262 1263 int tracing_alloc_snapshot_instance(struct trace_array *tr) 1264 { 1265 int ret; 1266 1267 if (!tr->allocated_snapshot) { 1268 1269 /* allocate spare buffer */ 1270 ret = resize_buffer_duplicate_size(&tr->max_buffer, 1271 &tr->array_buffer, RING_BUFFER_ALL_CPUS); 1272 if (ret < 0) 1273 return ret; 1274 1275 tr->allocated_snapshot = true; 1276 } 1277 1278 return 0; 1279 } 1280 1281 static void free_snapshot(struct trace_array *tr) 1282 { 1283 /* 1284 * We don't free the ring buffer. instead, resize it because 1285 * The max_tr ring buffer has some state (e.g. ring->clock) and 1286 * we want preserve it. 1287 */ 1288 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS); 1289 set_buffer_entries(&tr->max_buffer, 1); 1290 tracing_reset_online_cpus(&tr->max_buffer); 1291 tr->allocated_snapshot = false; 1292 } 1293 1294 /** 1295 * tracing_alloc_snapshot - allocate snapshot buffer. 1296 * 1297 * This only allocates the snapshot buffer if it isn't already 1298 * allocated - it doesn't also take a snapshot. 1299 * 1300 * This is meant to be used in cases where the snapshot buffer needs 1301 * to be set up for events that can't sleep but need to be able to 1302 * trigger a snapshot. 1303 */ 1304 int tracing_alloc_snapshot(void) 1305 { 1306 struct trace_array *tr = &global_trace; 1307 int ret; 1308 1309 ret = tracing_alloc_snapshot_instance(tr); 1310 WARN_ON(ret < 0); 1311 1312 return ret; 1313 } 1314 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot); 1315 1316 /** 1317 * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer. 1318 * 1319 * This is similar to tracing_snapshot(), but it will allocate the 1320 * snapshot buffer if it isn't already allocated. Use this only 1321 * where it is safe to sleep, as the allocation may sleep. 1322 * 1323 * This causes a swap between the snapshot buffer and the current live 1324 * tracing buffer. You can use this to take snapshots of the live 1325 * trace when some condition is triggered, but continue to trace. 1326 */ 1327 void tracing_snapshot_alloc(void) 1328 { 1329 int ret; 1330 1331 ret = tracing_alloc_snapshot(); 1332 if (ret < 0) 1333 return; 1334 1335 tracing_snapshot(); 1336 } 1337 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc); 1338 1339 /** 1340 * tracing_snapshot_cond_enable - enable conditional snapshot for an instance 1341 * @tr: The tracing instance 1342 * @cond_data: User data to associate with the snapshot 1343 * @update: Implementation of the cond_snapshot update function 1344 * 1345 * Check whether the conditional snapshot for the given instance has 1346 * already been enabled, or if the current tracer is already using a 1347 * snapshot; if so, return -EBUSY, else create a cond_snapshot and 1348 * save the cond_data and update function inside. 1349 * 1350 * Returns 0 if successful, error otherwise. 1351 */ 1352 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, 1353 cond_update_fn_t update) 1354 { 1355 struct cond_snapshot *cond_snapshot; 1356 int ret = 0; 1357 1358 cond_snapshot = kzalloc(sizeof(*cond_snapshot), GFP_KERNEL); 1359 if (!cond_snapshot) 1360 return -ENOMEM; 1361 1362 cond_snapshot->cond_data = cond_data; 1363 cond_snapshot->update = update; 1364 1365 mutex_lock(&trace_types_lock); 1366 1367 ret = tracing_alloc_snapshot_instance(tr); 1368 if (ret) 1369 goto fail_unlock; 1370 1371 if (tr->current_trace->use_max_tr) { 1372 ret = -EBUSY; 1373 goto fail_unlock; 1374 } 1375 1376 /* 1377 * The cond_snapshot can only change to NULL without the 1378 * trace_types_lock. We don't care if we race with it going 1379 * to NULL, but we want to make sure that it's not set to 1380 * something other than NULL when we get here, which we can 1381 * do safely with only holding the trace_types_lock and not 1382 * having to take the max_lock. 1383 */ 1384 if (tr->cond_snapshot) { 1385 ret = -EBUSY; 1386 goto fail_unlock; 1387 } 1388 1389 local_irq_disable(); 1390 arch_spin_lock(&tr->max_lock); 1391 tr->cond_snapshot = cond_snapshot; 1392 arch_spin_unlock(&tr->max_lock); 1393 local_irq_enable(); 1394 1395 mutex_unlock(&trace_types_lock); 1396 1397 return ret; 1398 1399 fail_unlock: 1400 mutex_unlock(&trace_types_lock); 1401 kfree(cond_snapshot); 1402 return ret; 1403 } 1404 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable); 1405 1406 /** 1407 * tracing_snapshot_cond_disable - disable conditional snapshot for an instance 1408 * @tr: The tracing instance 1409 * 1410 * Check whether the conditional snapshot for the given instance is 1411 * enabled; if so, free the cond_snapshot associated with it, 1412 * otherwise return -EINVAL. 1413 * 1414 * Returns 0 if successful, error otherwise. 1415 */ 1416 int tracing_snapshot_cond_disable(struct trace_array *tr) 1417 { 1418 int ret = 0; 1419 1420 local_irq_disable(); 1421 arch_spin_lock(&tr->max_lock); 1422 1423 if (!tr->cond_snapshot) 1424 ret = -EINVAL; 1425 else { 1426 kfree(tr->cond_snapshot); 1427 tr->cond_snapshot = NULL; 1428 } 1429 1430 arch_spin_unlock(&tr->max_lock); 1431 local_irq_enable(); 1432 1433 return ret; 1434 } 1435 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable); 1436 #else 1437 void tracing_snapshot(void) 1438 { 1439 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used"); 1440 } 1441 EXPORT_SYMBOL_GPL(tracing_snapshot); 1442 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data) 1443 { 1444 WARN_ONCE(1, "Snapshot feature not enabled, but internal conditional snapshot used"); 1445 } 1446 EXPORT_SYMBOL_GPL(tracing_snapshot_cond); 1447 int tracing_alloc_snapshot(void) 1448 { 1449 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used"); 1450 return -ENODEV; 1451 } 1452 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot); 1453 void tracing_snapshot_alloc(void) 1454 { 1455 /* Give warning */ 1456 tracing_snapshot(); 1457 } 1458 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc); 1459 void *tracing_cond_snapshot_data(struct trace_array *tr) 1460 { 1461 return NULL; 1462 } 1463 EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data); 1464 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update) 1465 { 1466 return -ENODEV; 1467 } 1468 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable); 1469 int tracing_snapshot_cond_disable(struct trace_array *tr) 1470 { 1471 return false; 1472 } 1473 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable); 1474 #define free_snapshot(tr) do { } while (0) 1475 #endif /* CONFIG_TRACER_SNAPSHOT */ 1476 1477 void tracer_tracing_off(struct trace_array *tr) 1478 { 1479 if (tr->array_buffer.buffer) 1480 ring_buffer_record_off(tr->array_buffer.buffer); 1481 /* 1482 * This flag is looked at when buffers haven't been allocated 1483 * yet, or by some tracers (like irqsoff), that just want to 1484 * know if the ring buffer has been disabled, but it can handle 1485 * races of where it gets disabled but we still do a record. 1486 * As the check is in the fast path of the tracers, it is more 1487 * important to be fast than accurate. 1488 */ 1489 tr->buffer_disabled = 1; 1490 /* Make the flag seen by readers */ 1491 smp_wmb(); 1492 } 1493 1494 /** 1495 * tracing_off - turn off tracing buffers 1496 * 1497 * This function stops the tracing buffers from recording data. 1498 * It does not disable any overhead the tracers themselves may 1499 * be causing. This function simply causes all recording to 1500 * the ring buffers to fail. 1501 */ 1502 void tracing_off(void) 1503 { 1504 tracer_tracing_off(&global_trace); 1505 } 1506 EXPORT_SYMBOL_GPL(tracing_off); 1507 1508 void disable_trace_on_warning(void) 1509 { 1510 if (__disable_trace_on_warning) { 1511 trace_array_printk_buf(global_trace.array_buffer.buffer, _THIS_IP_, 1512 "Disabling tracing due to warning\n"); 1513 tracing_off(); 1514 } 1515 } 1516 1517 /** 1518 * tracer_tracing_is_on - show real state of ring buffer enabled 1519 * @tr : the trace array to know if ring buffer is enabled 1520 * 1521 * Shows real state of the ring buffer if it is enabled or not. 1522 */ 1523 bool tracer_tracing_is_on(struct trace_array *tr) 1524 { 1525 if (tr->array_buffer.buffer) 1526 return ring_buffer_record_is_on(tr->array_buffer.buffer); 1527 return !tr->buffer_disabled; 1528 } 1529 1530 /** 1531 * tracing_is_on - show state of ring buffers enabled 1532 */ 1533 int tracing_is_on(void) 1534 { 1535 return tracer_tracing_is_on(&global_trace); 1536 } 1537 EXPORT_SYMBOL_GPL(tracing_is_on); 1538 1539 static int __init set_buf_size(char *str) 1540 { 1541 unsigned long buf_size; 1542 1543 if (!str) 1544 return 0; 1545 buf_size = memparse(str, &str); 1546 /* 1547 * nr_entries can not be zero and the startup 1548 * tests require some buffer space. Therefore 1549 * ensure we have at least 4096 bytes of buffer. 1550 */ 1551 trace_buf_size = max(4096UL, buf_size); 1552 return 1; 1553 } 1554 __setup("trace_buf_size=", set_buf_size); 1555 1556 static int __init set_tracing_thresh(char *str) 1557 { 1558 unsigned long threshold; 1559 int ret; 1560 1561 if (!str) 1562 return 0; 1563 ret = kstrtoul(str, 0, &threshold); 1564 if (ret < 0) 1565 return 0; 1566 tracing_thresh = threshold * 1000; 1567 return 1; 1568 } 1569 __setup("tracing_thresh=", set_tracing_thresh); 1570 1571 unsigned long nsecs_to_usecs(unsigned long nsecs) 1572 { 1573 return nsecs / 1000; 1574 } 1575 1576 /* 1577 * TRACE_FLAGS is defined as a tuple matching bit masks with strings. 1578 * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that 1579 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list 1580 * of strings in the order that the evals (enum) were defined. 1581 */ 1582 #undef C 1583 #define C(a, b) b 1584 1585 /* These must match the bit positions in trace_iterator_flags */ 1586 static const char *trace_options[] = { 1587 TRACE_FLAGS 1588 NULL 1589 }; 1590 1591 static struct { 1592 u64 (*func)(void); 1593 const char *name; 1594 int in_ns; /* is this clock in nanoseconds? */ 1595 } trace_clocks[] = { 1596 { trace_clock_local, "local", 1 }, 1597 { trace_clock_global, "global", 1 }, 1598 { trace_clock_counter, "counter", 0 }, 1599 { trace_clock_jiffies, "uptime", 0 }, 1600 { trace_clock, "perf", 1 }, 1601 { ktime_get_mono_fast_ns, "mono", 1 }, 1602 { ktime_get_raw_fast_ns, "mono_raw", 1 }, 1603 { ktime_get_boot_fast_ns, "boot", 1 }, 1604 { ktime_get_tai_fast_ns, "tai", 1 }, 1605 ARCH_TRACE_CLOCKS 1606 }; 1607 1608 bool trace_clock_in_ns(struct trace_array *tr) 1609 { 1610 if (trace_clocks[tr->clock_id].in_ns) 1611 return true; 1612 1613 return false; 1614 } 1615 1616 /* 1617 * trace_parser_get_init - gets the buffer for trace parser 1618 */ 1619 int trace_parser_get_init(struct trace_parser *parser, int size) 1620 { 1621 memset(parser, 0, sizeof(*parser)); 1622 1623 parser->buffer = kmalloc(size, GFP_KERNEL); 1624 if (!parser->buffer) 1625 return 1; 1626 1627 parser->size = size; 1628 return 0; 1629 } 1630 1631 /* 1632 * trace_parser_put - frees the buffer for trace parser 1633 */ 1634 void trace_parser_put(struct trace_parser *parser) 1635 { 1636 kfree(parser->buffer); 1637 parser->buffer = NULL; 1638 } 1639 1640 /* 1641 * trace_get_user - reads the user input string separated by space 1642 * (matched by isspace(ch)) 1643 * 1644 * For each string found the 'struct trace_parser' is updated, 1645 * and the function returns. 1646 * 1647 * Returns number of bytes read. 1648 * 1649 * See kernel/trace/trace.h for 'struct trace_parser' details. 1650 */ 1651 int trace_get_user(struct trace_parser *parser, const char __user *ubuf, 1652 size_t cnt, loff_t *ppos) 1653 { 1654 char ch; 1655 size_t read = 0; 1656 ssize_t ret; 1657 1658 if (!*ppos) 1659 trace_parser_clear(parser); 1660 1661 ret = get_user(ch, ubuf++); 1662 if (ret) 1663 goto out; 1664 1665 read++; 1666 cnt--; 1667 1668 /* 1669 * The parser is not finished with the last write, 1670 * continue reading the user input without skipping spaces. 1671 */ 1672 if (!parser->cont) { 1673 /* skip white space */ 1674 while (cnt && isspace(ch)) { 1675 ret = get_user(ch, ubuf++); 1676 if (ret) 1677 goto out; 1678 read++; 1679 cnt--; 1680 } 1681 1682 parser->idx = 0; 1683 1684 /* only spaces were written */ 1685 if (isspace(ch) || !ch) { 1686 *ppos += read; 1687 ret = read; 1688 goto out; 1689 } 1690 } 1691 1692 /* read the non-space input */ 1693 while (cnt && !isspace(ch) && ch) { 1694 if (parser->idx < parser->size - 1) 1695 parser->buffer[parser->idx++] = ch; 1696 else { 1697 ret = -EINVAL; 1698 goto out; 1699 } 1700 ret = get_user(ch, ubuf++); 1701 if (ret) 1702 goto out; 1703 read++; 1704 cnt--; 1705 } 1706 1707 /* We either got finished input or we have to wait for another call. */ 1708 if (isspace(ch) || !ch) { 1709 parser->buffer[parser->idx] = 0; 1710 parser->cont = false; 1711 } else if (parser->idx < parser->size - 1) { 1712 parser->cont = true; 1713 parser->buffer[parser->idx++] = ch; 1714 /* Make sure the parsed string always terminates with '\0'. */ 1715 parser->buffer[parser->idx] = 0; 1716 } else { 1717 ret = -EINVAL; 1718 goto out; 1719 } 1720 1721 *ppos += read; 1722 ret = read; 1723 1724 out: 1725 return ret; 1726 } 1727 1728 /* TODO add a seq_buf_to_buffer() */ 1729 static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt) 1730 { 1731 int len; 1732 1733 if (trace_seq_used(s) <= s->seq.readpos) 1734 return -EBUSY; 1735 1736 len = trace_seq_used(s) - s->seq.readpos; 1737 if (cnt > len) 1738 cnt = len; 1739 memcpy(buf, s->buffer + s->seq.readpos, cnt); 1740 1741 s->seq.readpos += cnt; 1742 return cnt; 1743 } 1744 1745 unsigned long __read_mostly tracing_thresh; 1746 1747 #ifdef CONFIG_TRACER_MAX_TRACE 1748 static const struct file_operations tracing_max_lat_fops; 1749 1750 #ifdef LATENCY_FS_NOTIFY 1751 1752 static struct workqueue_struct *fsnotify_wq; 1753 1754 static void latency_fsnotify_workfn(struct work_struct *work) 1755 { 1756 struct trace_array *tr = container_of(work, struct trace_array, 1757 fsnotify_work); 1758 fsnotify_inode(tr->d_max_latency->d_inode, FS_MODIFY); 1759 } 1760 1761 static void latency_fsnotify_workfn_irq(struct irq_work *iwork) 1762 { 1763 struct trace_array *tr = container_of(iwork, struct trace_array, 1764 fsnotify_irqwork); 1765 queue_work(fsnotify_wq, &tr->fsnotify_work); 1766 } 1767 1768 static void trace_create_maxlat_file(struct trace_array *tr, 1769 struct dentry *d_tracer) 1770 { 1771 INIT_WORK(&tr->fsnotify_work, latency_fsnotify_workfn); 1772 init_irq_work(&tr->fsnotify_irqwork, latency_fsnotify_workfn_irq); 1773 tr->d_max_latency = trace_create_file("tracing_max_latency", 1774 TRACE_MODE_WRITE, 1775 d_tracer, tr, 1776 &tracing_max_lat_fops); 1777 } 1778 1779 __init static int latency_fsnotify_init(void) 1780 { 1781 fsnotify_wq = alloc_workqueue("tr_max_lat_wq", 1782 WQ_UNBOUND | WQ_HIGHPRI, 0); 1783 if (!fsnotify_wq) { 1784 pr_err("Unable to allocate tr_max_lat_wq\n"); 1785 return -ENOMEM; 1786 } 1787 return 0; 1788 } 1789 1790 late_initcall_sync(latency_fsnotify_init); 1791 1792 void latency_fsnotify(struct trace_array *tr) 1793 { 1794 if (!fsnotify_wq) 1795 return; 1796 /* 1797 * We cannot call queue_work(&tr->fsnotify_work) from here because it's 1798 * possible that we are called from __schedule() or do_idle(), which 1799 * could cause a deadlock. 1800 */ 1801 irq_work_queue(&tr->fsnotify_irqwork); 1802 } 1803 1804 #else /* !LATENCY_FS_NOTIFY */ 1805 1806 #define trace_create_maxlat_file(tr, d_tracer) \ 1807 trace_create_file("tracing_max_latency", TRACE_MODE_WRITE, \ 1808 d_tracer, tr, &tracing_max_lat_fops) 1809 1810 #endif 1811 1812 /* 1813 * Copy the new maximum trace into the separate maximum-trace 1814 * structure. (this way the maximum trace is permanently saved, 1815 * for later retrieval via /sys/kernel/tracing/tracing_max_latency) 1816 */ 1817 static void 1818 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) 1819 { 1820 struct array_buffer *trace_buf = &tr->array_buffer; 1821 struct array_buffer *max_buf = &tr->max_buffer; 1822 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu); 1823 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu); 1824 1825 max_buf->cpu = cpu; 1826 max_buf->time_start = data->preempt_timestamp; 1827 1828 max_data->saved_latency = tr->max_latency; 1829 max_data->critical_start = data->critical_start; 1830 max_data->critical_end = data->critical_end; 1831 1832 strncpy(max_data->comm, tsk->comm, TASK_COMM_LEN); 1833 max_data->pid = tsk->pid; 1834 /* 1835 * If tsk == current, then use current_uid(), as that does not use 1836 * RCU. The irq tracer can be called out of RCU scope. 1837 */ 1838 if (tsk == current) 1839 max_data->uid = current_uid(); 1840 else 1841 max_data->uid = task_uid(tsk); 1842 1843 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO; 1844 max_data->policy = tsk->policy; 1845 max_data->rt_priority = tsk->rt_priority; 1846 1847 /* record this tasks comm */ 1848 tracing_record_cmdline(tsk); 1849 latency_fsnotify(tr); 1850 } 1851 1852 /** 1853 * update_max_tr - snapshot all trace buffers from global_trace to max_tr 1854 * @tr: tracer 1855 * @tsk: the task with the latency 1856 * @cpu: The cpu that initiated the trace. 1857 * @cond_data: User data associated with a conditional snapshot 1858 * 1859 * Flip the buffers between the @tr and the max_tr and record information 1860 * about which task was the cause of this latency. 1861 */ 1862 void 1863 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu, 1864 void *cond_data) 1865 { 1866 if (tr->stop_count) 1867 return; 1868 1869 WARN_ON_ONCE(!irqs_disabled()); 1870 1871 if (!tr->allocated_snapshot) { 1872 /* Only the nop tracer should hit this when disabling */ 1873 WARN_ON_ONCE(tr->current_trace != &nop_trace); 1874 return; 1875 } 1876 1877 arch_spin_lock(&tr->max_lock); 1878 1879 /* Inherit the recordable setting from array_buffer */ 1880 if (ring_buffer_record_is_set_on(tr->array_buffer.buffer)) 1881 ring_buffer_record_on(tr->max_buffer.buffer); 1882 else 1883 ring_buffer_record_off(tr->max_buffer.buffer); 1884 1885 #ifdef CONFIG_TRACER_SNAPSHOT 1886 if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data)) { 1887 arch_spin_unlock(&tr->max_lock); 1888 return; 1889 } 1890 #endif 1891 swap(tr->array_buffer.buffer, tr->max_buffer.buffer); 1892 1893 __update_max_tr(tr, tsk, cpu); 1894 1895 arch_spin_unlock(&tr->max_lock); 1896 } 1897 1898 /** 1899 * update_max_tr_single - only copy one trace over, and reset the rest 1900 * @tr: tracer 1901 * @tsk: task with the latency 1902 * @cpu: the cpu of the buffer to copy. 1903 * 1904 * Flip the trace of a single CPU buffer between the @tr and the max_tr. 1905 */ 1906 void 1907 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) 1908 { 1909 int ret; 1910 1911 if (tr->stop_count) 1912 return; 1913 1914 WARN_ON_ONCE(!irqs_disabled()); 1915 if (!tr->allocated_snapshot) { 1916 /* Only the nop tracer should hit this when disabling */ 1917 WARN_ON_ONCE(tr->current_trace != &nop_trace); 1918 return; 1919 } 1920 1921 arch_spin_lock(&tr->max_lock); 1922 1923 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->array_buffer.buffer, cpu); 1924 1925 if (ret == -EBUSY) { 1926 /* 1927 * We failed to swap the buffer due to a commit taking 1928 * place on this CPU. We fail to record, but we reset 1929 * the max trace buffer (no one writes directly to it) 1930 * and flag that it failed. 1931 * Another reason is resize is in progress. 1932 */ 1933 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_, 1934 "Failed to swap buffers due to commit or resize in progress\n"); 1935 } 1936 1937 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY); 1938 1939 __update_max_tr(tr, tsk, cpu); 1940 arch_spin_unlock(&tr->max_lock); 1941 } 1942 1943 #endif /* CONFIG_TRACER_MAX_TRACE */ 1944 1945 static int wait_on_pipe(struct trace_iterator *iter, int full) 1946 { 1947 /* Iterators are static, they should be filled or empty */ 1948 if (trace_buffer_iter(iter, iter->cpu_file)) 1949 return 0; 1950 1951 return ring_buffer_wait(iter->array_buffer->buffer, iter->cpu_file, 1952 full); 1953 } 1954 1955 #ifdef CONFIG_FTRACE_STARTUP_TEST 1956 static bool selftests_can_run; 1957 1958 struct trace_selftests { 1959 struct list_head list; 1960 struct tracer *type; 1961 }; 1962 1963 static LIST_HEAD(postponed_selftests); 1964 1965 static int save_selftest(struct tracer *type) 1966 { 1967 struct trace_selftests *selftest; 1968 1969 selftest = kmalloc(sizeof(*selftest), GFP_KERNEL); 1970 if (!selftest) 1971 return -ENOMEM; 1972 1973 selftest->type = type; 1974 list_add(&selftest->list, &postponed_selftests); 1975 return 0; 1976 } 1977 1978 static int run_tracer_selftest(struct tracer *type) 1979 { 1980 struct trace_array *tr = &global_trace; 1981 struct tracer *saved_tracer = tr->current_trace; 1982 int ret; 1983 1984 if (!type->selftest || tracing_selftest_disabled) 1985 return 0; 1986 1987 /* 1988 * If a tracer registers early in boot up (before scheduling is 1989 * initialized and such), then do not run its selftests yet. 1990 * Instead, run it a little later in the boot process. 1991 */ 1992 if (!selftests_can_run) 1993 return save_selftest(type); 1994 1995 if (!tracing_is_on()) { 1996 pr_warn("Selftest for tracer %s skipped due to tracing disabled\n", 1997 type->name); 1998 return 0; 1999 } 2000 2001 /* 2002 * Run a selftest on this tracer. 2003 * Here we reset the trace buffer, and set the current 2004 * tracer to be this tracer. The tracer can then run some 2005 * internal tracing to verify that everything is in order. 2006 * If we fail, we do not register this tracer. 2007 */ 2008 tracing_reset_online_cpus(&tr->array_buffer); 2009 2010 tr->current_trace = type; 2011 2012 #ifdef CONFIG_TRACER_MAX_TRACE 2013 if (type->use_max_tr) { 2014 /* If we expanded the buffers, make sure the max is expanded too */ 2015 if (ring_buffer_expanded) 2016 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size, 2017 RING_BUFFER_ALL_CPUS); 2018 tr->allocated_snapshot = true; 2019 } 2020 #endif 2021 2022 /* the test is responsible for initializing and enabling */ 2023 pr_info("Testing tracer %s: ", type->name); 2024 ret = type->selftest(type, tr); 2025 /* the test is responsible for resetting too */ 2026 tr->current_trace = saved_tracer; 2027 if (ret) { 2028 printk(KERN_CONT "FAILED!\n"); 2029 /* Add the warning after printing 'FAILED' */ 2030 WARN_ON(1); 2031 return -1; 2032 } 2033 /* Only reset on passing, to avoid touching corrupted buffers */ 2034 tracing_reset_online_cpus(&tr->array_buffer); 2035 2036 #ifdef CONFIG_TRACER_MAX_TRACE 2037 if (type->use_max_tr) { 2038 tr->allocated_snapshot = false; 2039 2040 /* Shrink the max buffer again */ 2041 if (ring_buffer_expanded) 2042 ring_buffer_resize(tr->max_buffer.buffer, 1, 2043 RING_BUFFER_ALL_CPUS); 2044 } 2045 #endif 2046 2047 printk(KERN_CONT "PASSED\n"); 2048 return 0; 2049 } 2050 2051 static int do_run_tracer_selftest(struct tracer *type) 2052 { 2053 int ret; 2054 2055 /* 2056 * Tests can take a long time, especially if they are run one after the 2057 * other, as does happen during bootup when all the tracers are 2058 * registered. This could cause the soft lockup watchdog to trigger. 2059 */ 2060 cond_resched(); 2061 2062 tracing_selftest_running = true; 2063 ret = run_tracer_selftest(type); 2064 tracing_selftest_running = false; 2065 2066 return ret; 2067 } 2068 2069 static __init int init_trace_selftests(void) 2070 { 2071 struct trace_selftests *p, *n; 2072 struct tracer *t, **last; 2073 int ret; 2074 2075 selftests_can_run = true; 2076 2077 mutex_lock(&trace_types_lock); 2078 2079 if (list_empty(&postponed_selftests)) 2080 goto out; 2081 2082 pr_info("Running postponed tracer tests:\n"); 2083 2084 tracing_selftest_running = true; 2085 list_for_each_entry_safe(p, n, &postponed_selftests, list) { 2086 /* This loop can take minutes when sanitizers are enabled, so 2087 * lets make sure we allow RCU processing. 2088 */ 2089 cond_resched(); 2090 ret = run_tracer_selftest(p->type); 2091 /* If the test fails, then warn and remove from available_tracers */ 2092 if (ret < 0) { 2093 WARN(1, "tracer: %s failed selftest, disabling\n", 2094 p->type->name); 2095 last = &trace_types; 2096 for (t = trace_types; t; t = t->next) { 2097 if (t == p->type) { 2098 *last = t->next; 2099 break; 2100 } 2101 last = &t->next; 2102 } 2103 } 2104 list_del(&p->list); 2105 kfree(p); 2106 } 2107 tracing_selftest_running = false; 2108 2109 out: 2110 mutex_unlock(&trace_types_lock); 2111 2112 return 0; 2113 } 2114 core_initcall(init_trace_selftests); 2115 #else 2116 static inline int run_tracer_selftest(struct tracer *type) 2117 { 2118 return 0; 2119 } 2120 static inline int do_run_tracer_selftest(struct tracer *type) 2121 { 2122 return 0; 2123 } 2124 #endif /* CONFIG_FTRACE_STARTUP_TEST */ 2125 2126 static void add_tracer_options(struct trace_array *tr, struct tracer *t); 2127 2128 static void __init apply_trace_boot_options(void); 2129 2130 /** 2131 * register_tracer - register a tracer with the ftrace system. 2132 * @type: the plugin for the tracer 2133 * 2134 * Register a new plugin tracer. 2135 */ 2136 int __init register_tracer(struct tracer *type) 2137 { 2138 struct tracer *t; 2139 int ret = 0; 2140 2141 if (!type->name) { 2142 pr_info("Tracer must have a name\n"); 2143 return -1; 2144 } 2145 2146 if (strlen(type->name) >= MAX_TRACER_SIZE) { 2147 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE); 2148 return -1; 2149 } 2150 2151 if (security_locked_down(LOCKDOWN_TRACEFS)) { 2152 pr_warn("Can not register tracer %s due to lockdown\n", 2153 type->name); 2154 return -EPERM; 2155 } 2156 2157 mutex_lock(&trace_types_lock); 2158 2159 for (t = trace_types; t; t = t->next) { 2160 if (strcmp(type->name, t->name) == 0) { 2161 /* already found */ 2162 pr_info("Tracer %s already registered\n", 2163 type->name); 2164 ret = -1; 2165 goto out; 2166 } 2167 } 2168 2169 if (!type->set_flag) 2170 type->set_flag = &dummy_set_flag; 2171 if (!type->flags) { 2172 /*allocate a dummy tracer_flags*/ 2173 type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL); 2174 if (!type->flags) { 2175 ret = -ENOMEM; 2176 goto out; 2177 } 2178 type->flags->val = 0; 2179 type->flags->opts = dummy_tracer_opt; 2180 } else 2181 if (!type->flags->opts) 2182 type->flags->opts = dummy_tracer_opt; 2183 2184 /* store the tracer for __set_tracer_option */ 2185 type->flags->trace = type; 2186 2187 ret = do_run_tracer_selftest(type); 2188 if (ret < 0) 2189 goto out; 2190 2191 type->next = trace_types; 2192 trace_types = type; 2193 add_tracer_options(&global_trace, type); 2194 2195 out: 2196 mutex_unlock(&trace_types_lock); 2197 2198 if (ret || !default_bootup_tracer) 2199 goto out_unlock; 2200 2201 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE)) 2202 goto out_unlock; 2203 2204 printk(KERN_INFO "Starting tracer '%s'\n", type->name); 2205 /* Do we want this tracer to start on bootup? */ 2206 tracing_set_tracer(&global_trace, type->name); 2207 default_bootup_tracer = NULL; 2208 2209 apply_trace_boot_options(); 2210 2211 /* disable other selftests, since this will break it. */ 2212 disable_tracing_selftest("running a tracer"); 2213 2214 out_unlock: 2215 return ret; 2216 } 2217 2218 static void tracing_reset_cpu(struct array_buffer *buf, int cpu) 2219 { 2220 struct trace_buffer *buffer = buf->buffer; 2221 2222 if (!buffer) 2223 return; 2224 2225 ring_buffer_record_disable(buffer); 2226 2227 /* Make sure all commits have finished */ 2228 synchronize_rcu(); 2229 ring_buffer_reset_cpu(buffer, cpu); 2230 2231 ring_buffer_record_enable(buffer); 2232 } 2233 2234 void tracing_reset_online_cpus(struct array_buffer *buf) 2235 { 2236 struct trace_buffer *buffer = buf->buffer; 2237 2238 if (!buffer) 2239 return; 2240 2241 ring_buffer_record_disable(buffer); 2242 2243 /* Make sure all commits have finished */ 2244 synchronize_rcu(); 2245 2246 buf->time_start = buffer_ftrace_now(buf, buf->cpu); 2247 2248 ring_buffer_reset_online_cpus(buffer); 2249 2250 ring_buffer_record_enable(buffer); 2251 } 2252 2253 /* Must have trace_types_lock held */ 2254 void tracing_reset_all_online_cpus_unlocked(void) 2255 { 2256 struct trace_array *tr; 2257 2258 lockdep_assert_held(&trace_types_lock); 2259 2260 list_for_each_entry(tr, &ftrace_trace_arrays, list) { 2261 if (!tr->clear_trace) 2262 continue; 2263 tr->clear_trace = false; 2264 tracing_reset_online_cpus(&tr->array_buffer); 2265 #ifdef CONFIG_TRACER_MAX_TRACE 2266 tracing_reset_online_cpus(&tr->max_buffer); 2267 #endif 2268 } 2269 } 2270 2271 void tracing_reset_all_online_cpus(void) 2272 { 2273 mutex_lock(&trace_types_lock); 2274 tracing_reset_all_online_cpus_unlocked(); 2275 mutex_unlock(&trace_types_lock); 2276 } 2277 2278 /* 2279 * The tgid_map array maps from pid to tgid; i.e. the value stored at index i 2280 * is the tgid last observed corresponding to pid=i. 2281 */ 2282 static int *tgid_map; 2283 2284 /* The maximum valid index into tgid_map. */ 2285 static size_t tgid_map_max; 2286 2287 #define SAVED_CMDLINES_DEFAULT 128 2288 #define NO_CMDLINE_MAP UINT_MAX 2289 /* 2290 * Preemption must be disabled before acquiring trace_cmdline_lock. 2291 * The various trace_arrays' max_lock must be acquired in a context 2292 * where interrupt is disabled. 2293 */ 2294 static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED; 2295 struct saved_cmdlines_buffer { 2296 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1]; 2297 unsigned *map_cmdline_to_pid; 2298 unsigned cmdline_num; 2299 int cmdline_idx; 2300 char *saved_cmdlines; 2301 }; 2302 static struct saved_cmdlines_buffer *savedcmd; 2303 2304 static inline char *get_saved_cmdlines(int idx) 2305 { 2306 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN]; 2307 } 2308 2309 static inline void set_cmdline(int idx, const char *cmdline) 2310 { 2311 strncpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN); 2312 } 2313 2314 static int allocate_cmdlines_buffer(unsigned int val, 2315 struct saved_cmdlines_buffer *s) 2316 { 2317 s->map_cmdline_to_pid = kmalloc_array(val, 2318 sizeof(*s->map_cmdline_to_pid), 2319 GFP_KERNEL); 2320 if (!s->map_cmdline_to_pid) 2321 return -ENOMEM; 2322 2323 s->saved_cmdlines = kmalloc_array(TASK_COMM_LEN, val, GFP_KERNEL); 2324 if (!s->saved_cmdlines) { 2325 kfree(s->map_cmdline_to_pid); 2326 return -ENOMEM; 2327 } 2328 2329 s->cmdline_idx = 0; 2330 s->cmdline_num = val; 2331 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP, 2332 sizeof(s->map_pid_to_cmdline)); 2333 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP, 2334 val * sizeof(*s->map_cmdline_to_pid)); 2335 2336 return 0; 2337 } 2338 2339 static int trace_create_savedcmd(void) 2340 { 2341 int ret; 2342 2343 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL); 2344 if (!savedcmd) 2345 return -ENOMEM; 2346 2347 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd); 2348 if (ret < 0) { 2349 kfree(savedcmd); 2350 savedcmd = NULL; 2351 return -ENOMEM; 2352 } 2353 2354 return 0; 2355 } 2356 2357 int is_tracing_stopped(void) 2358 { 2359 return global_trace.stop_count; 2360 } 2361 2362 /** 2363 * tracing_start - quick start of the tracer 2364 * 2365 * If tracing is enabled but was stopped by tracing_stop, 2366 * this will start the tracer back up. 2367 */ 2368 void tracing_start(void) 2369 { 2370 struct trace_buffer *buffer; 2371 unsigned long flags; 2372 2373 if (tracing_disabled) 2374 return; 2375 2376 raw_spin_lock_irqsave(&global_trace.start_lock, flags); 2377 if (--global_trace.stop_count) { 2378 if (global_trace.stop_count < 0) { 2379 /* Someone screwed up their debugging */ 2380 WARN_ON_ONCE(1); 2381 global_trace.stop_count = 0; 2382 } 2383 goto out; 2384 } 2385 2386 /* Prevent the buffers from switching */ 2387 arch_spin_lock(&global_trace.max_lock); 2388 2389 buffer = global_trace.array_buffer.buffer; 2390 if (buffer) 2391 ring_buffer_record_enable(buffer); 2392 2393 #ifdef CONFIG_TRACER_MAX_TRACE 2394 buffer = global_trace.max_buffer.buffer; 2395 if (buffer) 2396 ring_buffer_record_enable(buffer); 2397 #endif 2398 2399 arch_spin_unlock(&global_trace.max_lock); 2400 2401 out: 2402 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags); 2403 } 2404 2405 static void tracing_start_tr(struct trace_array *tr) 2406 { 2407 struct trace_buffer *buffer; 2408 unsigned long flags; 2409 2410 if (tracing_disabled) 2411 return; 2412 2413 /* If global, we need to also start the max tracer */ 2414 if (tr->flags & TRACE_ARRAY_FL_GLOBAL) 2415 return tracing_start(); 2416 2417 raw_spin_lock_irqsave(&tr->start_lock, flags); 2418 2419 if (--tr->stop_count) { 2420 if (tr->stop_count < 0) { 2421 /* Someone screwed up their debugging */ 2422 WARN_ON_ONCE(1); 2423 tr->stop_count = 0; 2424 } 2425 goto out; 2426 } 2427 2428 buffer = tr->array_buffer.buffer; 2429 if (buffer) 2430 ring_buffer_record_enable(buffer); 2431 2432 out: 2433 raw_spin_unlock_irqrestore(&tr->start_lock, flags); 2434 } 2435 2436 /** 2437 * tracing_stop - quick stop of the tracer 2438 * 2439 * Light weight way to stop tracing. Use in conjunction with 2440 * tracing_start. 2441 */ 2442 void tracing_stop(void) 2443 { 2444 struct trace_buffer *buffer; 2445 unsigned long flags; 2446 2447 raw_spin_lock_irqsave(&global_trace.start_lock, flags); 2448 if (global_trace.stop_count++) 2449 goto out; 2450 2451 /* Prevent the buffers from switching */ 2452 arch_spin_lock(&global_trace.max_lock); 2453 2454 buffer = global_trace.array_buffer.buffer; 2455 if (buffer) 2456 ring_buffer_record_disable(buffer); 2457 2458 #ifdef CONFIG_TRACER_MAX_TRACE 2459 buffer = global_trace.max_buffer.buffer; 2460 if (buffer) 2461 ring_buffer_record_disable(buffer); 2462 #endif 2463 2464 arch_spin_unlock(&global_trace.max_lock); 2465 2466 out: 2467 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags); 2468 } 2469 2470 static void tracing_stop_tr(struct trace_array *tr) 2471 { 2472 struct trace_buffer *buffer; 2473 unsigned long flags; 2474 2475 /* If global, we need to also stop the max tracer */ 2476 if (tr->flags & TRACE_ARRAY_FL_GLOBAL) 2477 return tracing_stop(); 2478 2479 raw_spin_lock_irqsave(&tr->start_lock, flags); 2480 if (tr->stop_count++) 2481 goto out; 2482 2483 buffer = tr->array_buffer.buffer; 2484 if (buffer) 2485 ring_buffer_record_disable(buffer); 2486 2487 out: 2488 raw_spin_unlock_irqrestore(&tr->start_lock, flags); 2489 } 2490 2491 static int trace_save_cmdline(struct task_struct *tsk) 2492 { 2493 unsigned tpid, idx; 2494 2495 /* treat recording of idle task as a success */ 2496 if (!tsk->pid) 2497 return 1; 2498 2499 tpid = tsk->pid & (PID_MAX_DEFAULT - 1); 2500 2501 /* 2502 * It's not the end of the world if we don't get 2503 * the lock, but we also don't want to spin 2504 * nor do we want to disable interrupts, 2505 * so if we miss here, then better luck next time. 2506 * 2507 * This is called within the scheduler and wake up, so interrupts 2508 * had better been disabled and run queue lock been held. 2509 */ 2510 lockdep_assert_preemption_disabled(); 2511 if (!arch_spin_trylock(&trace_cmdline_lock)) 2512 return 0; 2513 2514 idx = savedcmd->map_pid_to_cmdline[tpid]; 2515 if (idx == NO_CMDLINE_MAP) { 2516 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num; 2517 2518 savedcmd->map_pid_to_cmdline[tpid] = idx; 2519 savedcmd->cmdline_idx = idx; 2520 } 2521 2522 savedcmd->map_cmdline_to_pid[idx] = tsk->pid; 2523 set_cmdline(idx, tsk->comm); 2524 2525 arch_spin_unlock(&trace_cmdline_lock); 2526 2527 return 1; 2528 } 2529 2530 static void __trace_find_cmdline(int pid, char comm[]) 2531 { 2532 unsigned map; 2533 int tpid; 2534 2535 if (!pid) { 2536 strcpy(comm, "<idle>"); 2537 return; 2538 } 2539 2540 if (WARN_ON_ONCE(pid < 0)) { 2541 strcpy(comm, "<XXX>"); 2542 return; 2543 } 2544 2545 tpid = pid & (PID_MAX_DEFAULT - 1); 2546 map = savedcmd->map_pid_to_cmdline[tpid]; 2547 if (map != NO_CMDLINE_MAP) { 2548 tpid = savedcmd->map_cmdline_to_pid[map]; 2549 if (tpid == pid) { 2550 strscpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN); 2551 return; 2552 } 2553 } 2554 strcpy(comm, "<...>"); 2555 } 2556 2557 void trace_find_cmdline(int pid, char comm[]) 2558 { 2559 preempt_disable(); 2560 arch_spin_lock(&trace_cmdline_lock); 2561 2562 __trace_find_cmdline(pid, comm); 2563 2564 arch_spin_unlock(&trace_cmdline_lock); 2565 preempt_enable(); 2566 } 2567 2568 static int *trace_find_tgid_ptr(int pid) 2569 { 2570 /* 2571 * Pairs with the smp_store_release in set_tracer_flag() to ensure that 2572 * if we observe a non-NULL tgid_map then we also observe the correct 2573 * tgid_map_max. 2574 */ 2575 int *map = smp_load_acquire(&tgid_map); 2576 2577 if (unlikely(!map || pid > tgid_map_max)) 2578 return NULL; 2579 2580 return &map[pid]; 2581 } 2582 2583 int trace_find_tgid(int pid) 2584 { 2585 int *ptr = trace_find_tgid_ptr(pid); 2586 2587 return ptr ? *ptr : 0; 2588 } 2589 2590 static int trace_save_tgid(struct task_struct *tsk) 2591 { 2592 int *ptr; 2593 2594 /* treat recording of idle task as a success */ 2595 if (!tsk->pid) 2596 return 1; 2597 2598 ptr = trace_find_tgid_ptr(tsk->pid); 2599 if (!ptr) 2600 return 0; 2601 2602 *ptr = tsk->tgid; 2603 return 1; 2604 } 2605 2606 static bool tracing_record_taskinfo_skip(int flags) 2607 { 2608 if (unlikely(!(flags & (TRACE_RECORD_CMDLINE | TRACE_RECORD_TGID)))) 2609 return true; 2610 if (!__this_cpu_read(trace_taskinfo_save)) 2611 return true; 2612 return false; 2613 } 2614 2615 /** 2616 * tracing_record_taskinfo - record the task info of a task 2617 * 2618 * @task: task to record 2619 * @flags: TRACE_RECORD_CMDLINE for recording comm 2620 * TRACE_RECORD_TGID for recording tgid 2621 */ 2622 void tracing_record_taskinfo(struct task_struct *task, int flags) 2623 { 2624 bool done; 2625 2626 if (tracing_record_taskinfo_skip(flags)) 2627 return; 2628 2629 /* 2630 * Record as much task information as possible. If some fail, continue 2631 * to try to record the others. 2632 */ 2633 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(task); 2634 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(task); 2635 2636 /* If recording any information failed, retry again soon. */ 2637 if (!done) 2638 return; 2639 2640 __this_cpu_write(trace_taskinfo_save, false); 2641 } 2642 2643 /** 2644 * tracing_record_taskinfo_sched_switch - record task info for sched_switch 2645 * 2646 * @prev: previous task during sched_switch 2647 * @next: next task during sched_switch 2648 * @flags: TRACE_RECORD_CMDLINE for recording comm 2649 * TRACE_RECORD_TGID for recording tgid 2650 */ 2651 void tracing_record_taskinfo_sched_switch(struct task_struct *prev, 2652 struct task_struct *next, int flags) 2653 { 2654 bool done; 2655 2656 if (tracing_record_taskinfo_skip(flags)) 2657 return; 2658 2659 /* 2660 * Record as much task information as possible. If some fail, continue 2661 * to try to record the others. 2662 */ 2663 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(prev); 2664 done &= !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(next); 2665 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(prev); 2666 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(next); 2667 2668 /* If recording any information failed, retry again soon. */ 2669 if (!done) 2670 return; 2671 2672 __this_cpu_write(trace_taskinfo_save, false); 2673 } 2674 2675 /* Helpers to record a specific task information */ 2676 void tracing_record_cmdline(struct task_struct *task) 2677 { 2678 tracing_record_taskinfo(task, TRACE_RECORD_CMDLINE); 2679 } 2680 2681 void tracing_record_tgid(struct task_struct *task) 2682 { 2683 tracing_record_taskinfo(task, TRACE_RECORD_TGID); 2684 } 2685 2686 /* 2687 * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq 2688 * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function 2689 * simplifies those functions and keeps them in sync. 2690 */ 2691 enum print_line_t trace_handle_return(struct trace_seq *s) 2692 { 2693 return trace_seq_has_overflowed(s) ? 2694 TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED; 2695 } 2696 EXPORT_SYMBOL_GPL(trace_handle_return); 2697 2698 static unsigned short migration_disable_value(void) 2699 { 2700 #if defined(CONFIG_SMP) 2701 return current->migration_disabled; 2702 #else 2703 return 0; 2704 #endif 2705 } 2706 2707 unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status) 2708 { 2709 unsigned int trace_flags = irqs_status; 2710 unsigned int pc; 2711 2712 pc = preempt_count(); 2713 2714 if (pc & NMI_MASK) 2715 trace_flags |= TRACE_FLAG_NMI; 2716 if (pc & HARDIRQ_MASK) 2717 trace_flags |= TRACE_FLAG_HARDIRQ; 2718 if (in_serving_softirq()) 2719 trace_flags |= TRACE_FLAG_SOFTIRQ; 2720 if (softirq_count() >> (SOFTIRQ_SHIFT + 1)) 2721 trace_flags |= TRACE_FLAG_BH_OFF; 2722 2723 if (tif_need_resched()) 2724 trace_flags |= TRACE_FLAG_NEED_RESCHED; 2725 if (test_preempt_need_resched()) 2726 trace_flags |= TRACE_FLAG_PREEMPT_RESCHED; 2727 return (trace_flags << 16) | (min_t(unsigned int, pc & 0xff, 0xf)) | 2728 (min_t(unsigned int, migration_disable_value(), 0xf)) << 4; 2729 } 2730 2731 struct ring_buffer_event * 2732 trace_buffer_lock_reserve(struct trace_buffer *buffer, 2733 int type, 2734 unsigned long len, 2735 unsigned int trace_ctx) 2736 { 2737 return __trace_buffer_lock_reserve(buffer, type, len, trace_ctx); 2738 } 2739 2740 DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event); 2741 DEFINE_PER_CPU(int, trace_buffered_event_cnt); 2742 static int trace_buffered_event_ref; 2743 2744 /** 2745 * trace_buffered_event_enable - enable buffering events 2746 * 2747 * When events are being filtered, it is quicker to use a temporary 2748 * buffer to write the event data into if there's a likely chance 2749 * that it will not be committed. The discard of the ring buffer 2750 * is not as fast as committing, and is much slower than copying 2751 * a commit. 2752 * 2753 * When an event is to be filtered, allocate per cpu buffers to 2754 * write the event data into, and if the event is filtered and discarded 2755 * it is simply dropped, otherwise, the entire data is to be committed 2756 * in one shot. 2757 */ 2758 void trace_buffered_event_enable(void) 2759 { 2760 struct ring_buffer_event *event; 2761 struct page *page; 2762 int cpu; 2763 2764 WARN_ON_ONCE(!mutex_is_locked(&event_mutex)); 2765 2766 if (trace_buffered_event_ref++) 2767 return; 2768 2769 for_each_tracing_cpu(cpu) { 2770 page = alloc_pages_node(cpu_to_node(cpu), 2771 GFP_KERNEL | __GFP_NORETRY, 0); 2772 if (!page) 2773 goto failed; 2774 2775 event = page_address(page); 2776 memset(event, 0, sizeof(*event)); 2777 2778 per_cpu(trace_buffered_event, cpu) = event; 2779 2780 preempt_disable(); 2781 if (cpu == smp_processor_id() && 2782 __this_cpu_read(trace_buffered_event) != 2783 per_cpu(trace_buffered_event, cpu)) 2784 WARN_ON_ONCE(1); 2785 preempt_enable(); 2786 } 2787 2788 return; 2789 failed: 2790 trace_buffered_event_disable(); 2791 } 2792 2793 static void enable_trace_buffered_event(void *data) 2794 { 2795 /* Probably not needed, but do it anyway */ 2796 smp_rmb(); 2797 this_cpu_dec(trace_buffered_event_cnt); 2798 } 2799 2800 static void disable_trace_buffered_event(void *data) 2801 { 2802 this_cpu_inc(trace_buffered_event_cnt); 2803 } 2804 2805 /** 2806 * trace_buffered_event_disable - disable buffering events 2807 * 2808 * When a filter is removed, it is faster to not use the buffered 2809 * events, and to commit directly into the ring buffer. Free up 2810 * the temp buffers when there are no more users. This requires 2811 * special synchronization with current events. 2812 */ 2813 void trace_buffered_event_disable(void) 2814 { 2815 int cpu; 2816 2817 WARN_ON_ONCE(!mutex_is_locked(&event_mutex)); 2818 2819 if (WARN_ON_ONCE(!trace_buffered_event_ref)) 2820 return; 2821 2822 if (--trace_buffered_event_ref) 2823 return; 2824 2825 preempt_disable(); 2826 /* For each CPU, set the buffer as used. */ 2827 smp_call_function_many(tracing_buffer_mask, 2828 disable_trace_buffered_event, NULL, 1); 2829 preempt_enable(); 2830 2831 /* Wait for all current users to finish */ 2832 synchronize_rcu(); 2833 2834 for_each_tracing_cpu(cpu) { 2835 free_page((unsigned long)per_cpu(trace_buffered_event, cpu)); 2836 per_cpu(trace_buffered_event, cpu) = NULL; 2837 } 2838 /* 2839 * Make sure trace_buffered_event is NULL before clearing 2840 * trace_buffered_event_cnt. 2841 */ 2842 smp_wmb(); 2843 2844 preempt_disable(); 2845 /* Do the work on each cpu */ 2846 smp_call_function_many(tracing_buffer_mask, 2847 enable_trace_buffered_event, NULL, 1); 2848 preempt_enable(); 2849 } 2850 2851 static struct trace_buffer *temp_buffer; 2852 2853 struct ring_buffer_event * 2854 trace_event_buffer_lock_reserve(struct trace_buffer **current_rb, 2855 struct trace_event_file *trace_file, 2856 int type, unsigned long len, 2857 unsigned int trace_ctx) 2858 { 2859 struct ring_buffer_event *entry; 2860 struct trace_array *tr = trace_file->tr; 2861 int val; 2862 2863 *current_rb = tr->array_buffer.buffer; 2864 2865 if (!tr->no_filter_buffering_ref && 2866 (trace_file->flags & (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED))) { 2867 preempt_disable_notrace(); 2868 /* 2869 * Filtering is on, so try to use the per cpu buffer first. 2870 * This buffer will simulate a ring_buffer_event, 2871 * where the type_len is zero and the array[0] will 2872 * hold the full length. 2873 * (see include/linux/ring-buffer.h for details on 2874 * how the ring_buffer_event is structured). 2875 * 2876 * Using a temp buffer during filtering and copying it 2877 * on a matched filter is quicker than writing directly 2878 * into the ring buffer and then discarding it when 2879 * it doesn't match. That is because the discard 2880 * requires several atomic operations to get right. 2881 * Copying on match and doing nothing on a failed match 2882 * is still quicker than no copy on match, but having 2883 * to discard out of the ring buffer on a failed match. 2884 */ 2885 if ((entry = __this_cpu_read(trace_buffered_event))) { 2886 int max_len = PAGE_SIZE - struct_size(entry, array, 1); 2887 2888 val = this_cpu_inc_return(trace_buffered_event_cnt); 2889 2890 /* 2891 * Preemption is disabled, but interrupts and NMIs 2892 * can still come in now. If that happens after 2893 * the above increment, then it will have to go 2894 * back to the old method of allocating the event 2895 * on the ring buffer, and if the filter fails, it 2896 * will have to call ring_buffer_discard_commit() 2897 * to remove it. 2898 * 2899 * Need to also check the unlikely case that the 2900 * length is bigger than the temp buffer size. 2901 * If that happens, then the reserve is pretty much 2902 * guaranteed to fail, as the ring buffer currently 2903 * only allows events less than a page. But that may 2904 * change in the future, so let the ring buffer reserve 2905 * handle the failure in that case. 2906 */ 2907 if (val == 1 && likely(len <= max_len)) { 2908 trace_event_setup(entry, type, trace_ctx); 2909 entry->array[0] = len; 2910 /* Return with preemption disabled */ 2911 return entry; 2912 } 2913 this_cpu_dec(trace_buffered_event_cnt); 2914 } 2915 /* __trace_buffer_lock_reserve() disables preemption */ 2916 preempt_enable_notrace(); 2917 } 2918 2919 entry = __trace_buffer_lock_reserve(*current_rb, type, len, 2920 trace_ctx); 2921 /* 2922 * If tracing is off, but we have triggers enabled 2923 * we still need to look at the event data. Use the temp_buffer 2924 * to store the trace event for the trigger to use. It's recursive 2925 * safe and will not be recorded anywhere. 2926 */ 2927 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) { 2928 *current_rb = temp_buffer; 2929 entry = __trace_buffer_lock_reserve(*current_rb, type, len, 2930 trace_ctx); 2931 } 2932 return entry; 2933 } 2934 EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve); 2935 2936 static DEFINE_RAW_SPINLOCK(tracepoint_iter_lock); 2937 static DEFINE_MUTEX(tracepoint_printk_mutex); 2938 2939 static void output_printk(struct trace_event_buffer *fbuffer) 2940 { 2941 struct trace_event_call *event_call; 2942 struct trace_event_file *file; 2943 struct trace_event *event; 2944 unsigned long flags; 2945 struct trace_iterator *iter = tracepoint_print_iter; 2946 2947 /* We should never get here if iter is NULL */ 2948 if (WARN_ON_ONCE(!iter)) 2949 return; 2950 2951 event_call = fbuffer->trace_file->event_call; 2952 if (!event_call || !event_call->event.funcs || 2953 !event_call->event.funcs->trace) 2954 return; 2955 2956 file = fbuffer->trace_file; 2957 if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) || 2958 (unlikely(file->flags & EVENT_FILE_FL_FILTERED) && 2959 !filter_match_preds(file->filter, fbuffer->entry))) 2960 return; 2961 2962 event = &fbuffer->trace_file->event_call->event; 2963 2964 raw_spin_lock_irqsave(&tracepoint_iter_lock, flags); 2965 trace_seq_init(&iter->seq); 2966 iter->ent = fbuffer->entry; 2967 event_call->event.funcs->trace(iter, 0, event); 2968 trace_seq_putc(&iter->seq, 0); 2969 printk("%s", iter->seq.buffer); 2970 2971 raw_spin_unlock_irqrestore(&tracepoint_iter_lock, flags); 2972 } 2973 2974 int tracepoint_printk_sysctl(struct ctl_table *table, int write, 2975 void *buffer, size_t *lenp, 2976 loff_t *ppos) 2977 { 2978 int save_tracepoint_printk; 2979 int ret; 2980 2981 mutex_lock(&tracepoint_printk_mutex); 2982 save_tracepoint_printk = tracepoint_printk; 2983 2984 ret = proc_dointvec(table, write, buffer, lenp, ppos); 2985 2986 /* 2987 * This will force exiting early, as tracepoint_printk 2988 * is always zero when tracepoint_printk_iter is not allocated 2989 */ 2990 if (!tracepoint_print_iter) 2991 tracepoint_printk = 0; 2992 2993 if (save_tracepoint_printk == tracepoint_printk) 2994 goto out; 2995 2996 if (tracepoint_printk) 2997 static_key_enable(&tracepoint_printk_key.key); 2998 else 2999 static_key_disable(&tracepoint_printk_key.key); 3000 3001 out: 3002 mutex_unlock(&tracepoint_printk_mutex); 3003 3004 return ret; 3005 } 3006 3007 void trace_event_buffer_commit(struct trace_event_buffer *fbuffer) 3008 { 3009 enum event_trigger_type tt = ETT_NONE; 3010 struct trace_event_file *file = fbuffer->trace_file; 3011 3012 if (__event_trigger_test_discard(file, fbuffer->buffer, fbuffer->event, 3013 fbuffer->entry, &tt)) 3014 goto discard; 3015 3016 if (static_key_false(&tracepoint_printk_key.key)) 3017 output_printk(fbuffer); 3018 3019 if (static_branch_unlikely(&trace_event_exports_enabled)) 3020 ftrace_exports(fbuffer->event, TRACE_EXPORT_EVENT); 3021 3022 trace_buffer_unlock_commit_regs(file->tr, fbuffer->buffer, 3023 fbuffer->event, fbuffer->trace_ctx, fbuffer->regs); 3024 3025 discard: 3026 if (tt) 3027 event_triggers_post_call(file, tt); 3028 3029 } 3030 EXPORT_SYMBOL_GPL(trace_event_buffer_commit); 3031 3032 /* 3033 * Skip 3: 3034 * 3035 * trace_buffer_unlock_commit_regs() 3036 * trace_event_buffer_commit() 3037 * trace_event_raw_event_xxx() 3038 */ 3039 # define STACK_SKIP 3 3040 3041 void trace_buffer_unlock_commit_regs(struct trace_array *tr, 3042 struct trace_buffer *buffer, 3043 struct ring_buffer_event *event, 3044 unsigned int trace_ctx, 3045 struct pt_regs *regs) 3046 { 3047 __buffer_unlock_commit(buffer, event); 3048 3049 /* 3050 * If regs is not set, then skip the necessary functions. 3051 * Note, we can still get here via blktrace, wakeup tracer 3052 * and mmiotrace, but that's ok if they lose a function or 3053 * two. They are not that meaningful. 3054 */ 3055 ftrace_trace_stack(tr, buffer, trace_ctx, regs ? 0 : STACK_SKIP, regs); 3056 ftrace_trace_userstack(tr, buffer, trace_ctx); 3057 } 3058 3059 /* 3060 * Similar to trace_buffer_unlock_commit_regs() but do not dump stack. 3061 */ 3062 void 3063 trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer, 3064 struct ring_buffer_event *event) 3065 { 3066 __buffer_unlock_commit(buffer, event); 3067 } 3068 3069 void 3070 trace_function(struct trace_array *tr, unsigned long ip, unsigned long 3071 parent_ip, unsigned int trace_ctx) 3072 { 3073 struct trace_event_call *call = &event_function; 3074 struct trace_buffer *buffer = tr->array_buffer.buffer; 3075 struct ring_buffer_event *event; 3076 struct ftrace_entry *entry; 3077 3078 event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry), 3079 trace_ctx); 3080 if (!event) 3081 return; 3082 entry = ring_buffer_event_data(event); 3083 entry->ip = ip; 3084 entry->parent_ip = parent_ip; 3085 3086 if (!call_filter_check_discard(call, entry, buffer, event)) { 3087 if (static_branch_unlikely(&trace_function_exports_enabled)) 3088 ftrace_exports(event, TRACE_EXPORT_FUNCTION); 3089 __buffer_unlock_commit(buffer, event); 3090 } 3091 } 3092 3093 #ifdef CONFIG_STACKTRACE 3094 3095 /* Allow 4 levels of nesting: normal, softirq, irq, NMI */ 3096 #define FTRACE_KSTACK_NESTING 4 3097 3098 #define FTRACE_KSTACK_ENTRIES (PAGE_SIZE / FTRACE_KSTACK_NESTING) 3099 3100 struct ftrace_stack { 3101 unsigned long calls[FTRACE_KSTACK_ENTRIES]; 3102 }; 3103 3104 3105 struct ftrace_stacks { 3106 struct ftrace_stack stacks[FTRACE_KSTACK_NESTING]; 3107 }; 3108 3109 static DEFINE_PER_CPU(struct ftrace_stacks, ftrace_stacks); 3110 static DEFINE_PER_CPU(int, ftrace_stack_reserve); 3111 3112 static void __ftrace_trace_stack(struct trace_buffer *buffer, 3113 unsigned int trace_ctx, 3114 int skip, struct pt_regs *regs) 3115 { 3116 struct trace_event_call *call = &event_kernel_stack; 3117 struct ring_buffer_event *event; 3118 unsigned int size, nr_entries; 3119 struct ftrace_stack *fstack; 3120 struct stack_entry *entry; 3121 int stackidx; 3122 3123 /* 3124 * Add one, for this function and the call to save_stack_trace() 3125 * If regs is set, then these functions will not be in the way. 3126 */ 3127 #ifndef CONFIG_UNWINDER_ORC 3128 if (!regs) 3129 skip++; 3130 #endif 3131 3132 preempt_disable_notrace(); 3133 3134 stackidx = __this_cpu_inc_return(ftrace_stack_reserve) - 1; 3135 3136 /* This should never happen. If it does, yell once and skip */ 3137 if (WARN_ON_ONCE(stackidx >= FTRACE_KSTACK_NESTING)) 3138 goto out; 3139 3140 /* 3141 * The above __this_cpu_inc_return() is 'atomic' cpu local. An 3142 * interrupt will either see the value pre increment or post 3143 * increment. If the interrupt happens pre increment it will have 3144 * restored the counter when it returns. We just need a barrier to 3145 * keep gcc from moving things around. 3146 */ 3147 barrier(); 3148 3149 fstack = this_cpu_ptr(ftrace_stacks.stacks) + stackidx; 3150 size = ARRAY_SIZE(fstack->calls); 3151 3152 if (regs) { 3153 nr_entries = stack_trace_save_regs(regs, fstack->calls, 3154 size, skip); 3155 } else { 3156 nr_entries = stack_trace_save(fstack->calls, size, skip); 3157 } 3158 3159 event = __trace_buffer_lock_reserve(buffer, TRACE_STACK, 3160 struct_size(entry, caller, nr_entries), 3161 trace_ctx); 3162 if (!event) 3163 goto out; 3164 entry = ring_buffer_event_data(event); 3165 3166 entry->size = nr_entries; 3167 memcpy(&entry->caller, fstack->calls, 3168 flex_array_size(entry, caller, nr_entries)); 3169 3170 if (!call_filter_check_discard(call, entry, buffer, event)) 3171 __buffer_unlock_commit(buffer, event); 3172 3173 out: 3174 /* Again, don't let gcc optimize things here */ 3175 barrier(); 3176 __this_cpu_dec(ftrace_stack_reserve); 3177 preempt_enable_notrace(); 3178 3179 } 3180 3181 static inline void ftrace_trace_stack(struct trace_array *tr, 3182 struct trace_buffer *buffer, 3183 unsigned int trace_ctx, 3184 int skip, struct pt_regs *regs) 3185 { 3186 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE)) 3187 return; 3188 3189 __ftrace_trace_stack(buffer, trace_ctx, skip, regs); 3190 } 3191 3192 void __trace_stack(struct trace_array *tr, unsigned int trace_ctx, 3193 int skip) 3194 { 3195 struct trace_buffer *buffer = tr->array_buffer.buffer; 3196 3197 if (rcu_is_watching()) { 3198 __ftrace_trace_stack(buffer, trace_ctx, skip, NULL); 3199 return; 3200 } 3201 3202 if (WARN_ON_ONCE(IS_ENABLED(CONFIG_GENERIC_ENTRY))) 3203 return; 3204 3205 /* 3206 * When an NMI triggers, RCU is enabled via ct_nmi_enter(), 3207 * but if the above rcu_is_watching() failed, then the NMI 3208 * triggered someplace critical, and ct_irq_enter() should 3209 * not be called from NMI. 3210 */ 3211 if (unlikely(in_nmi())) 3212 return; 3213 3214 ct_irq_enter_irqson(); 3215 __ftrace_trace_stack(buffer, trace_ctx, skip, NULL); 3216 ct_irq_exit_irqson(); 3217 } 3218 3219 /** 3220 * trace_dump_stack - record a stack back trace in the trace buffer 3221 * @skip: Number of functions to skip (helper handlers) 3222 */ 3223 void trace_dump_stack(int skip) 3224 { 3225 if (tracing_disabled || tracing_selftest_running) 3226 return; 3227 3228 #ifndef CONFIG_UNWINDER_ORC 3229 /* Skip 1 to skip this function. */ 3230 skip++; 3231 #endif 3232 __ftrace_trace_stack(global_trace.array_buffer.buffer, 3233 tracing_gen_ctx(), skip, NULL); 3234 } 3235 EXPORT_SYMBOL_GPL(trace_dump_stack); 3236 3237 #ifdef CONFIG_USER_STACKTRACE_SUPPORT 3238 static DEFINE_PER_CPU(int, user_stack_count); 3239 3240 static void 3241 ftrace_trace_userstack(struct trace_array *tr, 3242 struct trace_buffer *buffer, unsigned int trace_ctx) 3243 { 3244 struct trace_event_call *call = &event_user_stack; 3245 struct ring_buffer_event *event; 3246 struct userstack_entry *entry; 3247 3248 if (!(tr->trace_flags & TRACE_ITER_USERSTACKTRACE)) 3249 return; 3250 3251 /* 3252 * NMIs can not handle page faults, even with fix ups. 3253 * The save user stack can (and often does) fault. 3254 */ 3255 if (unlikely(in_nmi())) 3256 return; 3257 3258 /* 3259 * prevent recursion, since the user stack tracing may 3260 * trigger other kernel events. 3261 */ 3262 preempt_disable(); 3263 if (__this_cpu_read(user_stack_count)) 3264 goto out; 3265 3266 __this_cpu_inc(user_stack_count); 3267 3268 event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK, 3269 sizeof(*entry), trace_ctx); 3270 if (!event) 3271 goto out_drop_count; 3272 entry = ring_buffer_event_data(event); 3273 3274 entry->tgid = current->tgid; 3275 memset(&entry->caller, 0, sizeof(entry->caller)); 3276 3277 stack_trace_save_user(entry->caller, FTRACE_STACK_ENTRIES); 3278 if (!call_filter_check_discard(call, entry, buffer, event)) 3279 __buffer_unlock_commit(buffer, event); 3280 3281 out_drop_count: 3282 __this_cpu_dec(user_stack_count); 3283 out: 3284 preempt_enable(); 3285 } 3286 #else /* CONFIG_USER_STACKTRACE_SUPPORT */ 3287 static void ftrace_trace_userstack(struct trace_array *tr, 3288 struct trace_buffer *buffer, 3289 unsigned int trace_ctx) 3290 { 3291 } 3292 #endif /* !CONFIG_USER_STACKTRACE_SUPPORT */ 3293 3294 #endif /* CONFIG_STACKTRACE */ 3295 3296 static inline void 3297 func_repeats_set_delta_ts(struct func_repeats_entry *entry, 3298 unsigned long long delta) 3299 { 3300 entry->bottom_delta_ts = delta & U32_MAX; 3301 entry->top_delta_ts = (delta >> 32); 3302 } 3303 3304 void trace_last_func_repeats(struct trace_array *tr, 3305 struct trace_func_repeats *last_info, 3306 unsigned int trace_ctx) 3307 { 3308 struct trace_buffer *buffer = tr->array_buffer.buffer; 3309 struct func_repeats_entry *entry; 3310 struct ring_buffer_event *event; 3311 u64 delta; 3312 3313 event = __trace_buffer_lock_reserve(buffer, TRACE_FUNC_REPEATS, 3314 sizeof(*entry), trace_ctx); 3315 if (!event) 3316 return; 3317 3318 delta = ring_buffer_event_time_stamp(buffer, event) - 3319 last_info->ts_last_call; 3320 3321 entry = ring_buffer_event_data(event); 3322 entry->ip = last_info->ip; 3323 entry->parent_ip = last_info->parent_ip; 3324 entry->count = last_info->count; 3325 func_repeats_set_delta_ts(entry, delta); 3326 3327 __buffer_unlock_commit(buffer, event); 3328 } 3329 3330 /* created for use with alloc_percpu */ 3331 struct trace_buffer_struct { 3332 int nesting; 3333 char buffer[4][TRACE_BUF_SIZE]; 3334 }; 3335 3336 static struct trace_buffer_struct __percpu *trace_percpu_buffer; 3337 3338 /* 3339 * This allows for lockless recording. If we're nested too deeply, then 3340 * this returns NULL. 3341 */ 3342 static char *get_trace_buf(void) 3343 { 3344 struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer); 3345 3346 if (!trace_percpu_buffer || buffer->nesting >= 4) 3347 return NULL; 3348 3349 buffer->nesting++; 3350 3351 /* Interrupts must see nesting incremented before we use the buffer */ 3352 barrier(); 3353 return &buffer->buffer[buffer->nesting - 1][0]; 3354 } 3355 3356 static void put_trace_buf(void) 3357 { 3358 /* Don't let the decrement of nesting leak before this */ 3359 barrier(); 3360 this_cpu_dec(trace_percpu_buffer->nesting); 3361 } 3362 3363 static int alloc_percpu_trace_buffer(void) 3364 { 3365 struct trace_buffer_struct __percpu *buffers; 3366 3367 if (trace_percpu_buffer) 3368 return 0; 3369 3370 buffers = alloc_percpu(struct trace_buffer_struct); 3371 if (MEM_FAIL(!buffers, "Could not allocate percpu trace_printk buffer")) 3372 return -ENOMEM; 3373 3374 trace_percpu_buffer = buffers; 3375 return 0; 3376 } 3377 3378 static int buffers_allocated; 3379 3380 void trace_printk_init_buffers(void) 3381 { 3382 if (buffers_allocated) 3383 return; 3384 3385 if (alloc_percpu_trace_buffer()) 3386 return; 3387 3388 /* trace_printk() is for debug use only. Don't use it in production. */ 3389 3390 pr_warn("\n"); 3391 pr_warn("**********************************************************\n"); 3392 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n"); 3393 pr_warn("** **\n"); 3394 pr_warn("** trace_printk() being used. Allocating extra memory. **\n"); 3395 pr_warn("** **\n"); 3396 pr_warn("** This means that this is a DEBUG kernel and it is **\n"); 3397 pr_warn("** unsafe for production use. **\n"); 3398 pr_warn("** **\n"); 3399 pr_warn("** If you see this message and you are not debugging **\n"); 3400 pr_warn("** the kernel, report this immediately to your vendor! **\n"); 3401 pr_warn("** **\n"); 3402 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n"); 3403 pr_warn("**********************************************************\n"); 3404 3405 /* Expand the buffers to set size */ 3406 tracing_update_buffers(); 3407 3408 buffers_allocated = 1; 3409 3410 /* 3411 * trace_printk_init_buffers() can be called by modules. 3412 * If that happens, then we need to start cmdline recording 3413 * directly here. If the global_trace.buffer is already 3414 * allocated here, then this was called by module code. 3415 */ 3416 if (global_trace.array_buffer.buffer) 3417 tracing_start_cmdline_record(); 3418 } 3419 EXPORT_SYMBOL_GPL(trace_printk_init_buffers); 3420 3421 void trace_printk_start_comm(void) 3422 { 3423 /* Start tracing comms if trace printk is set */ 3424 if (!buffers_allocated) 3425 return; 3426 tracing_start_cmdline_record(); 3427 } 3428 3429 static void trace_printk_start_stop_comm(int enabled) 3430 { 3431 if (!buffers_allocated) 3432 return; 3433 3434 if (enabled) 3435 tracing_start_cmdline_record(); 3436 else 3437 tracing_stop_cmdline_record(); 3438 } 3439 3440 /** 3441 * trace_vbprintk - write binary msg to tracing buffer 3442 * @ip: The address of the caller 3443 * @fmt: The string format to write to the buffer 3444 * @args: Arguments for @fmt 3445 */ 3446 int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) 3447 { 3448 struct trace_event_call *call = &event_bprint; 3449 struct ring_buffer_event *event; 3450 struct trace_buffer *buffer; 3451 struct trace_array *tr = &global_trace; 3452 struct bprint_entry *entry; 3453 unsigned int trace_ctx; 3454 char *tbuffer; 3455 int len = 0, size; 3456 3457 if (unlikely(tracing_selftest_running || tracing_disabled)) 3458 return 0; 3459 3460 /* Don't pollute graph traces with trace_vprintk internals */ 3461 pause_graph_tracing(); 3462 3463 trace_ctx = tracing_gen_ctx(); 3464 preempt_disable_notrace(); 3465 3466 tbuffer = get_trace_buf(); 3467 if (!tbuffer) { 3468 len = 0; 3469 goto out_nobuffer; 3470 } 3471 3472 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args); 3473 3474 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0) 3475 goto out_put; 3476 3477 size = sizeof(*entry) + sizeof(u32) * len; 3478 buffer = tr->array_buffer.buffer; 3479 ring_buffer_nest_start(buffer); 3480 event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size, 3481 trace_ctx); 3482 if (!event) 3483 goto out; 3484 entry = ring_buffer_event_data(event); 3485 entry->ip = ip; 3486 entry->fmt = fmt; 3487 3488 memcpy(entry->buf, tbuffer, sizeof(u32) * len); 3489 if (!call_filter_check_discard(call, entry, buffer, event)) { 3490 __buffer_unlock_commit(buffer, event); 3491 ftrace_trace_stack(tr, buffer, trace_ctx, 6, NULL); 3492 } 3493 3494 out: 3495 ring_buffer_nest_end(buffer); 3496 out_put: 3497 put_trace_buf(); 3498 3499 out_nobuffer: 3500 preempt_enable_notrace(); 3501 unpause_graph_tracing(); 3502 3503 return len; 3504 } 3505 EXPORT_SYMBOL_GPL(trace_vbprintk); 3506 3507 __printf(3, 0) 3508 static int 3509 __trace_array_vprintk(struct trace_buffer *buffer, 3510 unsigned long ip, const char *fmt, va_list args) 3511 { 3512 struct trace_event_call *call = &event_print; 3513 struct ring_buffer_event *event; 3514 int len = 0, size; 3515 struct print_entry *entry; 3516 unsigned int trace_ctx; 3517 char *tbuffer; 3518 3519 if (tracing_disabled) 3520 return 0; 3521 3522 /* Don't pollute graph traces with trace_vprintk internals */ 3523 pause_graph_tracing(); 3524 3525 trace_ctx = tracing_gen_ctx(); 3526 preempt_disable_notrace(); 3527 3528 3529 tbuffer = get_trace_buf(); 3530 if (!tbuffer) { 3531 len = 0; 3532 goto out_nobuffer; 3533 } 3534 3535 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args); 3536 3537 size = sizeof(*entry) + len + 1; 3538 ring_buffer_nest_start(buffer); 3539 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size, 3540 trace_ctx); 3541 if (!event) 3542 goto out; 3543 entry = ring_buffer_event_data(event); 3544 entry->ip = ip; 3545 3546 memcpy(&entry->buf, tbuffer, len + 1); 3547 if (!call_filter_check_discard(call, entry, buffer, event)) { 3548 __buffer_unlock_commit(buffer, event); 3549 ftrace_trace_stack(&global_trace, buffer, trace_ctx, 6, NULL); 3550 } 3551 3552 out: 3553 ring_buffer_nest_end(buffer); 3554 put_trace_buf(); 3555 3556 out_nobuffer: 3557 preempt_enable_notrace(); 3558 unpause_graph_tracing(); 3559 3560 return len; 3561 } 3562 3563 __printf(3, 0) 3564 int trace_array_vprintk(struct trace_array *tr, 3565 unsigned long ip, const char *fmt, va_list args) 3566 { 3567 if (tracing_selftest_running && tr == &global_trace) 3568 return 0; 3569 3570 return __trace_array_vprintk(tr->array_buffer.buffer, ip, fmt, args); 3571 } 3572 3573 /** 3574 * trace_array_printk - Print a message to a specific instance 3575 * @tr: The instance trace_array descriptor 3576 * @ip: The instruction pointer that this is called from. 3577 * @fmt: The format to print (printf format) 3578 * 3579 * If a subsystem sets up its own instance, they have the right to 3580 * printk strings into their tracing instance buffer using this 3581 * function. Note, this function will not write into the top level 3582 * buffer (use trace_printk() for that), as writing into the top level 3583 * buffer should only have events that can be individually disabled. 3584 * trace_printk() is only used for debugging a kernel, and should not 3585 * be ever incorporated in normal use. 3586 * 3587 * trace_array_printk() can be used, as it will not add noise to the 3588 * top level tracing buffer. 3589 * 3590 * Note, trace_array_init_printk() must be called on @tr before this 3591 * can be used. 3592 */ 3593 __printf(3, 0) 3594 int trace_array_printk(struct trace_array *tr, 3595 unsigned long ip, const char *fmt, ...) 3596 { 3597 int ret; 3598 va_list ap; 3599 3600 if (!tr) 3601 return -ENOENT; 3602 3603 /* This is only allowed for created instances */ 3604 if (tr == &global_trace) 3605 return 0; 3606 3607 if (!(tr->trace_flags & TRACE_ITER_PRINTK)) 3608 return 0; 3609 3610 va_start(ap, fmt); 3611 ret = trace_array_vprintk(tr, ip, fmt, ap); 3612 va_end(ap); 3613 return ret; 3614 } 3615 EXPORT_SYMBOL_GPL(trace_array_printk); 3616 3617 /** 3618 * trace_array_init_printk - Initialize buffers for trace_array_printk() 3619 * @tr: The trace array to initialize the buffers for 3620 * 3621 * As trace_array_printk() only writes into instances, they are OK to 3622 * have in the kernel (unlike trace_printk()). This needs to be called 3623 * before trace_array_printk() can be used on a trace_array. 3624 */ 3625 int trace_array_init_printk(struct trace_array *tr) 3626 { 3627 if (!tr) 3628 return -ENOENT; 3629 3630 /* This is only allowed for created instances */ 3631 if (tr == &global_trace) 3632 return -EINVAL; 3633 3634 return alloc_percpu_trace_buffer(); 3635 } 3636 EXPORT_SYMBOL_GPL(trace_array_init_printk); 3637 3638 __printf(3, 4) 3639 int trace_array_printk_buf(struct trace_buffer *buffer, 3640 unsigned long ip, const char *fmt, ...) 3641 { 3642 int ret; 3643 va_list ap; 3644 3645 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK)) 3646 return 0; 3647 3648 va_start(ap, fmt); 3649 ret = __trace_array_vprintk(buffer, ip, fmt, ap); 3650 va_end(ap); 3651 return ret; 3652 } 3653 3654 __printf(2, 0) 3655 int trace_vprintk(unsigned long ip, const char *fmt, va_list args) 3656 { 3657 return trace_array_vprintk(&global_trace, ip, fmt, args); 3658 } 3659 EXPORT_SYMBOL_GPL(trace_vprintk); 3660 3661 static void trace_iterator_increment(struct trace_iterator *iter) 3662 { 3663 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu); 3664 3665 iter->idx++; 3666 if (buf_iter) 3667 ring_buffer_iter_advance(buf_iter); 3668 } 3669 3670 static struct trace_entry * 3671 peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts, 3672 unsigned long *lost_events) 3673 { 3674 struct ring_buffer_event *event; 3675 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu); 3676 3677 if (buf_iter) { 3678 event = ring_buffer_iter_peek(buf_iter, ts); 3679 if (lost_events) 3680 *lost_events = ring_buffer_iter_dropped(buf_iter) ? 3681 (unsigned long)-1 : 0; 3682 } else { 3683 event = ring_buffer_peek(iter->array_buffer->buffer, cpu, ts, 3684 lost_events); 3685 } 3686 3687 if (event) { 3688 iter->ent_size = ring_buffer_event_length(event); 3689 return ring_buffer_event_data(event); 3690 } 3691 iter->ent_size = 0; 3692 return NULL; 3693 } 3694 3695 static struct trace_entry * 3696 __find_next_entry(struct trace_iterator *iter, int *ent_cpu, 3697 unsigned long *missing_events, u64 *ent_ts) 3698 { 3699 struct trace_buffer *buffer = iter->array_buffer->buffer; 3700 struct trace_entry *ent, *next = NULL; 3701 unsigned long lost_events = 0, next_lost = 0; 3702 int cpu_file = iter->cpu_file; 3703 u64 next_ts = 0, ts; 3704 int next_cpu = -1; 3705 int next_size = 0; 3706 int cpu; 3707 3708 /* 3709 * If we are in a per_cpu trace file, don't bother by iterating over 3710 * all cpu and peek directly. 3711 */ 3712 if (cpu_file > RING_BUFFER_ALL_CPUS) { 3713 if (ring_buffer_empty_cpu(buffer, cpu_file)) 3714 return NULL; 3715 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events); 3716 if (ent_cpu) 3717 *ent_cpu = cpu_file; 3718 3719 return ent; 3720 } 3721 3722 for_each_tracing_cpu(cpu) { 3723 3724 if (ring_buffer_empty_cpu(buffer, cpu)) 3725 continue; 3726 3727 ent = peek_next_entry(iter, cpu, &ts, &lost_events); 3728 3729 /* 3730 * Pick the entry with the smallest timestamp: 3731 */ 3732 if (ent && (!next || ts < next_ts)) { 3733 next = ent; 3734 next_cpu = cpu; 3735 next_ts = ts; 3736 next_lost = lost_events; 3737 next_size = iter->ent_size; 3738 } 3739 } 3740 3741 iter->ent_size = next_size; 3742 3743 if (ent_cpu) 3744 *ent_cpu = next_cpu; 3745 3746 if (ent_ts) 3747 *ent_ts = next_ts; 3748 3749 if (missing_events) 3750 *missing_events = next_lost; 3751 3752 return next; 3753 } 3754 3755 #define STATIC_FMT_BUF_SIZE 128 3756 static char static_fmt_buf[STATIC_FMT_BUF_SIZE]; 3757 3758 char *trace_iter_expand_format(struct trace_iterator *iter) 3759 { 3760 char *tmp; 3761 3762 /* 3763 * iter->tr is NULL when used with tp_printk, which makes 3764 * this get called where it is not safe to call krealloc(). 3765 */ 3766 if (!iter->tr || iter->fmt == static_fmt_buf) 3767 return NULL; 3768 3769 tmp = krealloc(iter->fmt, iter->fmt_size + STATIC_FMT_BUF_SIZE, 3770 GFP_KERNEL); 3771 if (tmp) { 3772 iter->fmt_size += STATIC_FMT_BUF_SIZE; 3773 iter->fmt = tmp; 3774 } 3775 3776 return tmp; 3777 } 3778 3779 /* Returns true if the string is safe to dereference from an event */ 3780 static bool trace_safe_str(struct trace_iterator *iter, const char *str, 3781 bool star, int len) 3782 { 3783 unsigned long addr = (unsigned long)str; 3784 struct trace_event *trace_event; 3785 struct trace_event_call *event; 3786 3787 /* Ignore strings with no length */ 3788 if (star && !len) 3789 return true; 3790 3791 /* OK if part of the event data */ 3792 if ((addr >= (unsigned long)iter->ent) && 3793 (addr < (unsigned long)iter->ent + iter->ent_size)) 3794 return true; 3795 3796 /* OK if part of the temp seq buffer */ 3797 if ((addr >= (unsigned long)iter->tmp_seq.buffer) && 3798 (addr < (unsigned long)iter->tmp_seq.buffer + PAGE_SIZE)) 3799 return true; 3800 3801 /* Core rodata can not be freed */ 3802 if (is_kernel_rodata(addr)) 3803 return true; 3804 3805 if (trace_is_tracepoint_string(str)) 3806 return true; 3807 3808 /* 3809 * Now this could be a module event, referencing core module 3810 * data, which is OK. 3811 */ 3812 if (!iter->ent) 3813 return false; 3814 3815 trace_event = ftrace_find_event(iter->ent->type); 3816 if (!trace_event) 3817 return false; 3818 3819 event = container_of(trace_event, struct trace_event_call, event); 3820 if ((event->flags & TRACE_EVENT_FL_DYNAMIC) || !event->module) 3821 return false; 3822 3823 /* Would rather have rodata, but this will suffice */ 3824 if (within_module_core(addr, event->module)) 3825 return true; 3826 3827 return false; 3828 } 3829 3830 static const char *show_buffer(struct trace_seq *s) 3831 { 3832 struct seq_buf *seq = &s->seq; 3833 3834 seq_buf_terminate(seq); 3835 3836 return seq->buffer; 3837 } 3838 3839 static DEFINE_STATIC_KEY_FALSE(trace_no_verify); 3840 3841 static int test_can_verify_check(const char *fmt, ...) 3842 { 3843 char buf[16]; 3844 va_list ap; 3845 int ret; 3846 3847 /* 3848 * The verifier is dependent on vsnprintf() modifies the va_list 3849 * passed to it, where it is sent as a reference. Some architectures 3850 * (like x86_32) passes it by value, which means that vsnprintf() 3851 * does not modify the va_list passed to it, and the verifier 3852 * would then need to be able to understand all the values that 3853 * vsnprintf can use. If it is passed by value, then the verifier 3854 * is disabled. 3855 */ 3856 va_start(ap, fmt); 3857 vsnprintf(buf, 16, "%d", ap); 3858 ret = va_arg(ap, int); 3859 va_end(ap); 3860 3861 return ret; 3862 } 3863 3864 static void test_can_verify(void) 3865 { 3866 if (!test_can_verify_check("%d %d", 0, 1)) { 3867 pr_info("trace event string verifier disabled\n"); 3868 static_branch_inc(&trace_no_verify); 3869 } 3870 } 3871 3872 /** 3873 * trace_check_vprintf - Check dereferenced strings while writing to the seq buffer 3874 * @iter: The iterator that holds the seq buffer and the event being printed 3875 * @fmt: The format used to print the event 3876 * @ap: The va_list holding the data to print from @fmt. 3877 * 3878 * This writes the data into the @iter->seq buffer using the data from 3879 * @fmt and @ap. If the format has a %s, then the source of the string 3880 * is examined to make sure it is safe to print, otherwise it will 3881 * warn and print "[UNSAFE MEMORY]" in place of the dereferenced string 3882 * pointer. 3883 */ 3884 void trace_check_vprintf(struct trace_iterator *iter, const char *fmt, 3885 va_list ap) 3886 { 3887 const char *p = fmt; 3888 const char *str; 3889 int i, j; 3890 3891 if (WARN_ON_ONCE(!fmt)) 3892 return; 3893 3894 if (static_branch_unlikely(&trace_no_verify)) 3895 goto print; 3896 3897 /* Don't bother checking when doing a ftrace_dump() */ 3898 if (iter->fmt == static_fmt_buf) 3899 goto print; 3900 3901 while (*p) { 3902 bool star = false; 3903 int len = 0; 3904 3905 j = 0; 3906 3907 /* We only care about %s and variants */ 3908 for (i = 0; p[i]; i++) { 3909 if (i + 1 >= iter->fmt_size) { 3910 /* 3911 * If we can't expand the copy buffer, 3912 * just print it. 3913 */ 3914 if (!trace_iter_expand_format(iter)) 3915 goto print; 3916 } 3917 3918 if (p[i] == '\\' && p[i+1]) { 3919 i++; 3920 continue; 3921 } 3922 if (p[i] == '%') { 3923 /* Need to test cases like %08.*s */ 3924 for (j = 1; p[i+j]; j++) { 3925 if (isdigit(p[i+j]) || 3926 p[i+j] == '.') 3927 continue; 3928 if (p[i+j] == '*') { 3929 star = true; 3930 continue; 3931 } 3932 break; 3933 } 3934 if (p[i+j] == 's') 3935 break; 3936 star = false; 3937 } 3938 j = 0; 3939 } 3940 /* If no %s found then just print normally */ 3941 if (!p[i]) 3942 break; 3943 3944 /* Copy up to the %s, and print that */ 3945 strncpy(iter->fmt, p, i); 3946 iter->fmt[i] = '\0'; 3947 trace_seq_vprintf(&iter->seq, iter->fmt, ap); 3948 3949 /* 3950 * If iter->seq is full, the above call no longer guarantees 3951 * that ap is in sync with fmt processing, and further calls 3952 * to va_arg() can return wrong positional arguments. 3953 * 3954 * Ensure that ap is no longer used in this case. 3955 */ 3956 if (iter->seq.full) { 3957 p = ""; 3958 break; 3959 } 3960 3961 if (star) 3962 len = va_arg(ap, int); 3963 3964 /* The ap now points to the string data of the %s */ 3965 str = va_arg(ap, const char *); 3966 3967 /* 3968 * If you hit this warning, it is likely that the 3969 * trace event in question used %s on a string that 3970 * was saved at the time of the event, but may not be 3971 * around when the trace is read. Use __string(), 3972 * __assign_str() and __get_str() helpers in the TRACE_EVENT() 3973 * instead. See samples/trace_events/trace-events-sample.h 3974 * for reference. 3975 */ 3976 if (WARN_ONCE(!trace_safe_str(iter, str, star, len), 3977 "fmt: '%s' current_buffer: '%s'", 3978 fmt, show_buffer(&iter->seq))) { 3979 int ret; 3980 3981 /* Try to safely read the string */ 3982 if (star) { 3983 if (len + 1 > iter->fmt_size) 3984 len = iter->fmt_size - 1; 3985 if (len < 0) 3986 len = 0; 3987 ret = copy_from_kernel_nofault(iter->fmt, str, len); 3988 iter->fmt[len] = 0; 3989 star = false; 3990 } else { 3991 ret = strncpy_from_kernel_nofault(iter->fmt, str, 3992 iter->fmt_size); 3993 } 3994 if (ret < 0) 3995 trace_seq_printf(&iter->seq, "(0x%px)", str); 3996 else 3997 trace_seq_printf(&iter->seq, "(0x%px:%s)", 3998 str, iter->fmt); 3999 str = "[UNSAFE-MEMORY]"; 4000 strcpy(iter->fmt, "%s"); 4001 } else { 4002 strncpy(iter->fmt, p + i, j + 1); 4003 iter->fmt[j+1] = '\0'; 4004 } 4005 if (star) 4006 trace_seq_printf(&iter->seq, iter->fmt, len, str); 4007 else 4008 trace_seq_printf(&iter->seq, iter->fmt, str); 4009 4010 p += i + j + 1; 4011 } 4012 print: 4013 if (*p) 4014 trace_seq_vprintf(&iter->seq, p, ap); 4015 } 4016 4017 const char *trace_event_format(struct trace_iterator *iter, const char *fmt) 4018 { 4019 const char *p, *new_fmt; 4020 char *q; 4021 4022 if (WARN_ON_ONCE(!fmt)) 4023 return fmt; 4024 4025 if (!iter->tr || iter->tr->trace_flags & TRACE_ITER_HASH_PTR) 4026 return fmt; 4027 4028 p = fmt; 4029 new_fmt = q = iter->fmt; 4030 while (*p) { 4031 if (unlikely(q - new_fmt + 3 > iter->fmt_size)) { 4032 if (!trace_iter_expand_format(iter)) 4033 return fmt; 4034 4035 q += iter->fmt - new_fmt; 4036 new_fmt = iter->fmt; 4037 } 4038 4039 *q++ = *p++; 4040 4041 /* Replace %p with %px */ 4042 if (p[-1] == '%') { 4043 if (p[0] == '%') { 4044 *q++ = *p++; 4045 } else if (p[0] == 'p' && !isalnum(p[1])) { 4046 *q++ = *p++; 4047 *q++ = 'x'; 4048 } 4049 } 4050 } 4051 *q = '\0'; 4052 4053 return new_fmt; 4054 } 4055 4056 #define STATIC_TEMP_BUF_SIZE 128 4057 static char static_temp_buf[STATIC_TEMP_BUF_SIZE] __aligned(4); 4058 4059 /* Find the next real entry, without updating the iterator itself */ 4060 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, 4061 int *ent_cpu, u64 *ent_ts) 4062 { 4063 /* __find_next_entry will reset ent_size */ 4064 int ent_size = iter->ent_size; 4065 struct trace_entry *entry; 4066 4067 /* 4068 * If called from ftrace_dump(), then the iter->temp buffer 4069 * will be the static_temp_buf and not created from kmalloc. 4070 * If the entry size is greater than the buffer, we can 4071 * not save it. Just return NULL in that case. This is only 4072 * used to add markers when two consecutive events' time 4073 * stamps have a large delta. See trace_print_lat_context() 4074 */ 4075 if (iter->temp == static_temp_buf && 4076 STATIC_TEMP_BUF_SIZE < ent_size) 4077 return NULL; 4078 4079 /* 4080 * The __find_next_entry() may call peek_next_entry(), which may 4081 * call ring_buffer_peek() that may make the contents of iter->ent 4082 * undefined. Need to copy iter->ent now. 4083 */ 4084 if (iter->ent && iter->ent != iter->temp) { 4085 if ((!iter->temp || iter->temp_size < iter->ent_size) && 4086 !WARN_ON_ONCE(iter->temp == static_temp_buf)) { 4087 void *temp; 4088 temp = kmalloc(iter->ent_size, GFP_KERNEL); 4089 if (!temp) 4090 return NULL; 4091 kfree(iter->temp); 4092 iter->temp = temp; 4093 iter->temp_size = iter->ent_size; 4094 } 4095 memcpy(iter->temp, iter->ent, iter->ent_size); 4096 iter->ent = iter->temp; 4097 } 4098 entry = __find_next_entry(iter, ent_cpu, NULL, ent_ts); 4099 /* Put back the original ent_size */ 4100 iter->ent_size = ent_size; 4101 4102 return entry; 4103 } 4104 4105 /* Find the next real entry, and increment the iterator to the next entry */ 4106 void *trace_find_next_entry_inc(struct trace_iterator *iter) 4107 { 4108 iter->ent = __find_next_entry(iter, &iter->cpu, 4109 &iter->lost_events, &iter->ts); 4110 4111 if (iter->ent) 4112 trace_iterator_increment(iter); 4113 4114 return iter->ent ? iter : NULL; 4115 } 4116 4117 static void trace_consume(struct trace_iterator *iter) 4118 { 4119 ring_buffer_consume(iter->array_buffer->buffer, iter->cpu, &iter->ts, 4120 &iter->lost_events); 4121 } 4122 4123 static void *s_next(struct seq_file *m, void *v, loff_t *pos) 4124 { 4125 struct trace_iterator *iter = m->private; 4126 int i = (int)*pos; 4127 void *ent; 4128 4129 WARN_ON_ONCE(iter->leftover); 4130 4131 (*pos)++; 4132 4133 /* can't go backwards */ 4134 if (iter->idx > i) 4135 return NULL; 4136 4137 if (iter->idx < 0) 4138 ent = trace_find_next_entry_inc(iter); 4139 else 4140 ent = iter; 4141 4142 while (ent && iter->idx < i) 4143 ent = trace_find_next_entry_inc(iter); 4144 4145 iter->pos = *pos; 4146 4147 return ent; 4148 } 4149 4150 void tracing_iter_reset(struct trace_iterator *iter, int cpu) 4151 { 4152 struct ring_buffer_iter *buf_iter; 4153 unsigned long entries = 0; 4154 u64 ts; 4155 4156 per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = 0; 4157 4158 buf_iter = trace_buffer_iter(iter, cpu); 4159 if (!buf_iter) 4160 return; 4161 4162 ring_buffer_iter_reset(buf_iter); 4163 4164 /* 4165 * We could have the case with the max latency tracers 4166 * that a reset never took place on a cpu. This is evident 4167 * by the timestamp being before the start of the buffer. 4168 */ 4169 while (ring_buffer_iter_peek(buf_iter, &ts)) { 4170 if (ts >= iter->array_buffer->time_start) 4171 break; 4172 entries++; 4173 ring_buffer_iter_advance(buf_iter); 4174 } 4175 4176 per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = entries; 4177 } 4178 4179 /* 4180 * The current tracer is copied to avoid a global locking 4181 * all around. 4182 */ 4183 static void *s_start(struct seq_file *m, loff_t *pos) 4184 { 4185 struct trace_iterator *iter = m->private; 4186 struct trace_array *tr = iter->tr; 4187 int cpu_file = iter->cpu_file; 4188 void *p = NULL; 4189 loff_t l = 0; 4190 int cpu; 4191 4192 mutex_lock(&trace_types_lock); 4193 if (unlikely(tr->current_trace != iter->trace)) { 4194 /* Close iter->trace before switching to the new current tracer */ 4195 if (iter->trace->close) 4196 iter->trace->close(iter); 4197 iter->trace = tr->current_trace; 4198 /* Reopen the new current tracer */ 4199 if (iter->trace->open) 4200 iter->trace->open(iter); 4201 } 4202 mutex_unlock(&trace_types_lock); 4203 4204 #ifdef CONFIG_TRACER_MAX_TRACE 4205 if (iter->snapshot && iter->trace->use_max_tr) 4206 return ERR_PTR(-EBUSY); 4207 #endif 4208 4209 if (*pos != iter->pos) { 4210 iter->ent = NULL; 4211 iter->cpu = 0; 4212 iter->idx = -1; 4213 4214 if (cpu_file == RING_BUFFER_ALL_CPUS) { 4215 for_each_tracing_cpu(cpu) 4216 tracing_iter_reset(iter, cpu); 4217 } else 4218 tracing_iter_reset(iter, cpu_file); 4219 4220 iter->leftover = 0; 4221 for (p = iter; p && l < *pos; p = s_next(m, p, &l)) 4222 ; 4223 4224 } else { 4225 /* 4226 * If we overflowed the seq_file before, then we want 4227 * to just reuse the trace_seq buffer again. 4228 */ 4229 if (iter->leftover) 4230 p = iter; 4231 else { 4232 l = *pos - 1; 4233 p = s_next(m, p, &l); 4234 } 4235 } 4236 4237 trace_event_read_lock(); 4238 trace_access_lock(cpu_file); 4239 return p; 4240 } 4241 4242 static void s_stop(struct seq_file *m, void *p) 4243 { 4244 struct trace_iterator *iter = m->private; 4245 4246 #ifdef CONFIG_TRACER_MAX_TRACE 4247 if (iter->snapshot && iter->trace->use_max_tr) 4248 return; 4249 #endif 4250 4251 trace_access_unlock(iter->cpu_file); 4252 trace_event_read_unlock(); 4253 } 4254 4255 static void 4256 get_total_entries_cpu(struct array_buffer *buf, unsigned long *total, 4257 unsigned long *entries, int cpu) 4258 { 4259 unsigned long count; 4260 4261 count = ring_buffer_entries_cpu(buf->buffer, cpu); 4262 /* 4263 * If this buffer has skipped entries, then we hold all 4264 * entries for the trace and we need to ignore the 4265 * ones before the time stamp. 4266 */ 4267 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) { 4268 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries; 4269 /* total is the same as the entries */ 4270 *total = count; 4271 } else 4272 *total = count + 4273 ring_buffer_overrun_cpu(buf->buffer, cpu); 4274 *entries = count; 4275 } 4276 4277 static void 4278 get_total_entries(struct array_buffer *buf, 4279 unsigned long *total, unsigned long *entries) 4280 { 4281 unsigned long t, e; 4282 int cpu; 4283 4284 *total = 0; 4285 *entries = 0; 4286 4287 for_each_tracing_cpu(cpu) { 4288 get_total_entries_cpu(buf, &t, &e, cpu); 4289 *total += t; 4290 *entries += e; 4291 } 4292 } 4293 4294 unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu) 4295 { 4296 unsigned long total, entries; 4297 4298 if (!tr) 4299 tr = &global_trace; 4300 4301 get_total_entries_cpu(&tr->array_buffer, &total, &entries, cpu); 4302 4303 return entries; 4304 } 4305 4306 unsigned long trace_total_entries(struct trace_array *tr) 4307 { 4308 unsigned long total, entries; 4309 4310 if (!tr) 4311 tr = &global_trace; 4312 4313 get_total_entries(&tr->array_buffer, &total, &entries); 4314 4315 return entries; 4316 } 4317 4318 static void print_lat_help_header(struct seq_file *m) 4319 { 4320 seq_puts(m, "# _------=> CPU# \n" 4321 "# / _-----=> irqs-off/BH-disabled\n" 4322 "# | / _----=> need-resched \n" 4323 "# || / _---=> hardirq/softirq \n" 4324 "# ||| / _--=> preempt-depth \n" 4325 "# |||| / _-=> migrate-disable \n" 4326 "# ||||| / delay \n" 4327 "# cmd pid |||||| time | caller \n" 4328 "# \\ / |||||| \\ | / \n"); 4329 } 4330 4331 static void print_event_info(struct array_buffer *buf, struct seq_file *m) 4332 { 4333 unsigned long total; 4334 unsigned long entries; 4335 4336 get_total_entries(buf, &total, &entries); 4337 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n", 4338 entries, total, num_online_cpus()); 4339 seq_puts(m, "#\n"); 4340 } 4341 4342 static void print_func_help_header(struct array_buffer *buf, struct seq_file *m, 4343 unsigned int flags) 4344 { 4345 bool tgid = flags & TRACE_ITER_RECORD_TGID; 4346 4347 print_event_info(buf, m); 4348 4349 seq_printf(m, "# TASK-PID %s CPU# TIMESTAMP FUNCTION\n", tgid ? " TGID " : ""); 4350 seq_printf(m, "# | | %s | | |\n", tgid ? " | " : ""); 4351 } 4352 4353 static void print_func_help_header_irq(struct array_buffer *buf, struct seq_file *m, 4354 unsigned int flags) 4355 { 4356 bool tgid = flags & TRACE_ITER_RECORD_TGID; 4357 static const char space[] = " "; 4358 int prec = tgid ? 12 : 2; 4359 4360 print_event_info(buf, m); 4361 4362 seq_printf(m, "# %.*s _-----=> irqs-off/BH-disabled\n", prec, space); 4363 seq_printf(m, "# %.*s / _----=> need-resched\n", prec, space); 4364 seq_printf(m, "# %.*s| / _---=> hardirq/softirq\n", prec, space); 4365 seq_printf(m, "# %.*s|| / _--=> preempt-depth\n", prec, space); 4366 seq_printf(m, "# %.*s||| / _-=> migrate-disable\n", prec, space); 4367 seq_printf(m, "# %.*s|||| / delay\n", prec, space); 4368 seq_printf(m, "# TASK-PID %.*s CPU# ||||| TIMESTAMP FUNCTION\n", prec, " TGID "); 4369 seq_printf(m, "# | | %.*s | ||||| | |\n", prec, " | "); 4370 } 4371 4372 void 4373 print_trace_header(struct seq_file *m, struct trace_iterator *iter) 4374 { 4375 unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK); 4376 struct array_buffer *buf = iter->array_buffer; 4377 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu); 4378 struct tracer *type = iter->trace; 4379 unsigned long entries; 4380 unsigned long total; 4381 const char *name = type->name; 4382 4383 get_total_entries(buf, &total, &entries); 4384 4385 seq_printf(m, "# %s latency trace v1.1.5 on %s\n", 4386 name, UTS_RELEASE); 4387 seq_puts(m, "# -----------------------------------" 4388 "---------------------------------\n"); 4389 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |" 4390 " (M:%s VP:%d, KP:%d, SP:%d HP:%d", 4391 nsecs_to_usecs(data->saved_latency), 4392 entries, 4393 total, 4394 buf->cpu, 4395 preempt_model_none() ? "server" : 4396 preempt_model_voluntary() ? "desktop" : 4397 preempt_model_full() ? "preempt" : 4398 preempt_model_rt() ? "preempt_rt" : 4399 "unknown", 4400 /* These are reserved for later use */ 4401 0, 0, 0, 0); 4402 #ifdef CONFIG_SMP 4403 seq_printf(m, " #P:%d)\n", num_online_cpus()); 4404 #else 4405 seq_puts(m, ")\n"); 4406 #endif 4407 seq_puts(m, "# -----------------\n"); 4408 seq_printf(m, "# | task: %.16s-%d " 4409 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n", 4410 data->comm, data->pid, 4411 from_kuid_munged(seq_user_ns(m), data->uid), data->nice, 4412 data->policy, data->rt_priority); 4413 seq_puts(m, "# -----------------\n"); 4414 4415 if (data->critical_start) { 4416 seq_puts(m, "# => started at: "); 4417 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags); 4418 trace_print_seq(m, &iter->seq); 4419 seq_puts(m, "\n# => ended at: "); 4420 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags); 4421 trace_print_seq(m, &iter->seq); 4422 seq_puts(m, "\n#\n"); 4423 } 4424 4425 seq_puts(m, "#\n"); 4426 } 4427 4428 static void test_cpu_buff_start(struct trace_iterator *iter) 4429 { 4430 struct trace_seq *s = &iter->seq; 4431 struct trace_array *tr = iter->tr; 4432 4433 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE)) 4434 return; 4435 4436 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE)) 4437 return; 4438 4439 if (cpumask_available(iter->started) && 4440 cpumask_test_cpu(iter->cpu, iter->started)) 4441 return; 4442 4443 if (per_cpu_ptr(iter->array_buffer->data, iter->cpu)->skipped_entries) 4444 return; 4445 4446 if (cpumask_available(iter->started)) 4447 cpumask_set_cpu(iter->cpu, iter->started); 4448 4449 /* Don't print started cpu buffer for the first entry of the trace */ 4450 if (iter->idx > 1) 4451 trace_seq_printf(s, "##### CPU %u buffer started ####\n", 4452 iter->cpu); 4453 } 4454 4455 static enum print_line_t print_trace_fmt(struct trace_iterator *iter) 4456 { 4457 struct trace_array *tr = iter->tr; 4458 struct trace_seq *s = &iter->seq; 4459 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK); 4460 struct trace_entry *entry; 4461 struct trace_event *event; 4462 4463 entry = iter->ent; 4464 4465 test_cpu_buff_start(iter); 4466 4467 event = ftrace_find_event(entry->type); 4468 4469 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) { 4470 if (iter->iter_flags & TRACE_FILE_LAT_FMT) 4471 trace_print_lat_context(iter); 4472 else 4473 trace_print_context(iter); 4474 } 4475 4476 if (trace_seq_has_overflowed(s)) 4477 return TRACE_TYPE_PARTIAL_LINE; 4478 4479 if (event) { 4480 if (tr->trace_flags & TRACE_ITER_FIELDS) 4481 return print_event_fields(iter, event); 4482 return event->funcs->trace(iter, sym_flags, event); 4483 } 4484 4485 trace_seq_printf(s, "Unknown type %d\n", entry->type); 4486 4487 return trace_handle_return(s); 4488 } 4489 4490 static enum print_line_t print_raw_fmt(struct trace_iterator *iter) 4491 { 4492 struct trace_array *tr = iter->tr; 4493 struct trace_seq *s = &iter->seq; 4494 struct trace_entry *entry; 4495 struct trace_event *event; 4496 4497 entry = iter->ent; 4498 4499 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) 4500 trace_seq_printf(s, "%d %d %llu ", 4501 entry->pid, iter->cpu, iter->ts); 4502 4503 if (trace_seq_has_overflowed(s)) 4504 return TRACE_TYPE_PARTIAL_LINE; 4505 4506 event = ftrace_find_event(entry->type); 4507 if (event) 4508 return event->funcs->raw(iter, 0, event); 4509 4510 trace_seq_printf(s, "%d ?\n", entry->type); 4511 4512 return trace_handle_return(s); 4513 } 4514 4515 static enum print_line_t print_hex_fmt(struct trace_iterator *iter) 4516 { 4517 struct trace_array *tr = iter->tr; 4518 struct trace_seq *s = &iter->seq; 4519 unsigned char newline = '\n'; 4520 struct trace_entry *entry; 4521 struct trace_event *event; 4522 4523 entry = iter->ent; 4524 4525 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) { 4526 SEQ_PUT_HEX_FIELD(s, entry->pid); 4527 SEQ_PUT_HEX_FIELD(s, iter->cpu); 4528 SEQ_PUT_HEX_FIELD(s, iter->ts); 4529 if (trace_seq_has_overflowed(s)) 4530 return TRACE_TYPE_PARTIAL_LINE; 4531 } 4532 4533 event = ftrace_find_event(entry->type); 4534 if (event) { 4535 enum print_line_t ret = event->funcs->hex(iter, 0, event); 4536 if (ret != TRACE_TYPE_HANDLED) 4537 return ret; 4538 } 4539 4540 SEQ_PUT_FIELD(s, newline); 4541 4542 return trace_handle_return(s); 4543 } 4544 4545 static enum print_line_t print_bin_fmt(struct trace_iterator *iter) 4546 { 4547 struct trace_array *tr = iter->tr; 4548 struct trace_seq *s = &iter->seq; 4549 struct trace_entry *entry; 4550 struct trace_event *event; 4551 4552 entry = iter->ent; 4553 4554 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) { 4555 SEQ_PUT_FIELD(s, entry->pid); 4556 SEQ_PUT_FIELD(s, iter->cpu); 4557 SEQ_PUT_FIELD(s, iter->ts); 4558 if (trace_seq_has_overflowed(s)) 4559 return TRACE_TYPE_PARTIAL_LINE; 4560 } 4561 4562 event = ftrace_find_event(entry->type); 4563 return event ? event->funcs->binary(iter, 0, event) : 4564 TRACE_TYPE_HANDLED; 4565 } 4566 4567 int trace_empty(struct trace_iterator *iter) 4568 { 4569 struct ring_buffer_iter *buf_iter; 4570 int cpu; 4571 4572 /* If we are looking at one CPU buffer, only check that one */ 4573 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) { 4574 cpu = iter->cpu_file; 4575 buf_iter = trace_buffer_iter(iter, cpu); 4576 if (buf_iter) { 4577 if (!ring_buffer_iter_empty(buf_iter)) 4578 return 0; 4579 } else { 4580 if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu)) 4581 return 0; 4582 } 4583 return 1; 4584 } 4585 4586 for_each_tracing_cpu(cpu) { 4587 buf_iter = trace_buffer_iter(iter, cpu); 4588 if (buf_iter) { 4589 if (!ring_buffer_iter_empty(buf_iter)) 4590 return 0; 4591 } else { 4592 if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu)) 4593 return 0; 4594 } 4595 } 4596 4597 return 1; 4598 } 4599 4600 /* Called with trace_event_read_lock() held. */ 4601 enum print_line_t print_trace_line(struct trace_iterator *iter) 4602 { 4603 struct trace_array *tr = iter->tr; 4604 unsigned long trace_flags = tr->trace_flags; 4605 enum print_line_t ret; 4606 4607 if (iter->lost_events) { 4608 if (iter->lost_events == (unsigned long)-1) 4609 trace_seq_printf(&iter->seq, "CPU:%d [LOST EVENTS]\n", 4610 iter->cpu); 4611 else 4612 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n", 4613 iter->cpu, iter->lost_events); 4614 if (trace_seq_has_overflowed(&iter->seq)) 4615 return TRACE_TYPE_PARTIAL_LINE; 4616 } 4617 4618 if (iter->trace && iter->trace->print_line) { 4619 ret = iter->trace->print_line(iter); 4620 if (ret != TRACE_TYPE_UNHANDLED) 4621 return ret; 4622 } 4623 4624 if (iter->ent->type == TRACE_BPUTS && 4625 trace_flags & TRACE_ITER_PRINTK && 4626 trace_flags & TRACE_ITER_PRINTK_MSGONLY) 4627 return trace_print_bputs_msg_only(iter); 4628 4629 if (iter->ent->type == TRACE_BPRINT && 4630 trace_flags & TRACE_ITER_PRINTK && 4631 trace_flags & TRACE_ITER_PRINTK_MSGONLY) 4632 return trace_print_bprintk_msg_only(iter); 4633 4634 if (iter->ent->type == TRACE_PRINT && 4635 trace_flags & TRACE_ITER_PRINTK && 4636 trace_flags & TRACE_ITER_PRINTK_MSGONLY) 4637 return trace_print_printk_msg_only(iter); 4638 4639 if (trace_flags & TRACE_ITER_BIN) 4640 return print_bin_fmt(iter); 4641 4642 if (trace_flags & TRACE_ITER_HEX) 4643 return print_hex_fmt(iter); 4644 4645 if (trace_flags & TRACE_ITER_RAW) 4646 return print_raw_fmt(iter); 4647 4648 return print_trace_fmt(iter); 4649 } 4650 4651 void trace_latency_header(struct seq_file *m) 4652 { 4653 struct trace_iterator *iter = m->private; 4654 struct trace_array *tr = iter->tr; 4655 4656 /* print nothing if the buffers are empty */ 4657 if (trace_empty(iter)) 4658 return; 4659 4660 if (iter->iter_flags & TRACE_FILE_LAT_FMT) 4661 print_trace_header(m, iter); 4662 4663 if (!(tr->trace_flags & TRACE_ITER_VERBOSE)) 4664 print_lat_help_header(m); 4665 } 4666 4667 void trace_default_header(struct seq_file *m) 4668 { 4669 struct trace_iterator *iter = m->private; 4670 struct trace_array *tr = iter->tr; 4671 unsigned long trace_flags = tr->trace_flags; 4672 4673 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO)) 4674 return; 4675 4676 if (iter->iter_flags & TRACE_FILE_LAT_FMT) { 4677 /* print nothing if the buffers are empty */ 4678 if (trace_empty(iter)) 4679 return; 4680 print_trace_header(m, iter); 4681 if (!(trace_flags & TRACE_ITER_VERBOSE)) 4682 print_lat_help_header(m); 4683 } else { 4684 if (!(trace_flags & TRACE_ITER_VERBOSE)) { 4685 if (trace_flags & TRACE_ITER_IRQ_INFO) 4686 print_func_help_header_irq(iter->array_buffer, 4687 m, trace_flags); 4688 else 4689 print_func_help_header(iter->array_buffer, m, 4690 trace_flags); 4691 } 4692 } 4693 } 4694 4695 static void test_ftrace_alive(struct seq_file *m) 4696 { 4697 if (!ftrace_is_dead()) 4698 return; 4699 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n" 4700 "# MAY BE MISSING FUNCTION EVENTS\n"); 4701 } 4702 4703 #ifdef CONFIG_TRACER_MAX_TRACE 4704 static void show_snapshot_main_help(struct seq_file *m) 4705 { 4706 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n" 4707 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n" 4708 "# Takes a snapshot of the main buffer.\n" 4709 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n" 4710 "# (Doesn't have to be '2' works with any number that\n" 4711 "# is not a '0' or '1')\n"); 4712 } 4713 4714 static void show_snapshot_percpu_help(struct seq_file *m) 4715 { 4716 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n"); 4717 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP 4718 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n" 4719 "# Takes a snapshot of the main buffer for this cpu.\n"); 4720 #else 4721 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n" 4722 "# Must use main snapshot file to allocate.\n"); 4723 #endif 4724 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n" 4725 "# (Doesn't have to be '2' works with any number that\n" 4726 "# is not a '0' or '1')\n"); 4727 } 4728 4729 static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) 4730 { 4731 if (iter->tr->allocated_snapshot) 4732 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n"); 4733 else 4734 seq_puts(m, "#\n# * Snapshot is freed *\n#\n"); 4735 4736 seq_puts(m, "# Snapshot commands:\n"); 4737 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) 4738 show_snapshot_main_help(m); 4739 else 4740 show_snapshot_percpu_help(m); 4741 } 4742 #else 4743 /* Should never be called */ 4744 static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { } 4745 #endif 4746 4747 static int s_show(struct seq_file *m, void *v) 4748 { 4749 struct trace_iterator *iter = v; 4750 int ret; 4751 4752 if (iter->ent == NULL) { 4753 if (iter->tr) { 4754 seq_printf(m, "# tracer: %s\n", iter->trace->name); 4755 seq_puts(m, "#\n"); 4756 test_ftrace_alive(m); 4757 } 4758 if (iter->snapshot && trace_empty(iter)) 4759 print_snapshot_help(m, iter); 4760 else if (iter->trace && iter->trace->print_header) 4761 iter->trace->print_header(m); 4762 else 4763 trace_default_header(m); 4764 4765 } else if (iter->leftover) { 4766 /* 4767 * If we filled the seq_file buffer earlier, we 4768 * want to just show it now. 4769 */ 4770 ret = trace_print_seq(m, &iter->seq); 4771 4772 /* ret should this time be zero, but you never know */ 4773 iter->leftover = ret; 4774 4775 } else { 4776 print_trace_line(iter); 4777 ret = trace_print_seq(m, &iter->seq); 4778 /* 4779 * If we overflow the seq_file buffer, then it will 4780 * ask us for this data again at start up. 4781 * Use that instead. 4782 * ret is 0 if seq_file write succeeded. 4783 * -1 otherwise. 4784 */ 4785 iter->leftover = ret; 4786 } 4787 4788 return 0; 4789 } 4790 4791 /* 4792 * Should be used after trace_array_get(), trace_types_lock 4793 * ensures that i_cdev was already initialized. 4794 */ 4795 static inline int tracing_get_cpu(struct inode *inode) 4796 { 4797 if (inode->i_cdev) /* See trace_create_cpu_file() */ 4798 return (long)inode->i_cdev - 1; 4799 return RING_BUFFER_ALL_CPUS; 4800 } 4801 4802 static const struct seq_operations tracer_seq_ops = { 4803 .start = s_start, 4804 .next = s_next, 4805 .stop = s_stop, 4806 .show = s_show, 4807 }; 4808 4809 /* 4810 * Note, as iter itself can be allocated and freed in different 4811 * ways, this function is only used to free its content, and not 4812 * the iterator itself. The only requirement to all the allocations 4813 * is that it must zero all fields (kzalloc), as freeing works with 4814 * ethier allocated content or NULL. 4815 */ 4816 static void free_trace_iter_content(struct trace_iterator *iter) 4817 { 4818 /* The fmt is either NULL, allocated or points to static_fmt_buf */ 4819 if (iter->fmt != static_fmt_buf) 4820 kfree(iter->fmt); 4821 4822 kfree(iter->temp); 4823 kfree(iter->buffer_iter); 4824 mutex_destroy(&iter->mutex); 4825 free_cpumask_var(iter->started); 4826 } 4827 4828 static struct trace_iterator * 4829 __tracing_open(struct inode *inode, struct file *file, bool snapshot) 4830 { 4831 struct trace_array *tr = inode->i_private; 4832 struct trace_iterator *iter; 4833 int cpu; 4834 4835 if (tracing_disabled) 4836 return ERR_PTR(-ENODEV); 4837 4838 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter)); 4839 if (!iter) 4840 return ERR_PTR(-ENOMEM); 4841 4842 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter), 4843 GFP_KERNEL); 4844 if (!iter->buffer_iter) 4845 goto release; 4846 4847 /* 4848 * trace_find_next_entry() may need to save off iter->ent. 4849 * It will place it into the iter->temp buffer. As most 4850 * events are less than 128, allocate a buffer of that size. 4851 * If one is greater, then trace_find_next_entry() will 4852 * allocate a new buffer to adjust for the bigger iter->ent. 4853 * It's not critical if it fails to get allocated here. 4854 */ 4855 iter->temp = kmalloc(128, GFP_KERNEL); 4856 if (iter->temp) 4857 iter->temp_size = 128; 4858 4859 /* 4860 * trace_event_printf() may need to modify given format 4861 * string to replace %p with %px so that it shows real address 4862 * instead of hash value. However, that is only for the event 4863 * tracing, other tracer may not need. Defer the allocation 4864 * until it is needed. 4865 */ 4866 iter->fmt = NULL; 4867 iter->fmt_size = 0; 4868 4869 mutex_lock(&trace_types_lock); 4870 iter->trace = tr->current_trace; 4871 4872 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL)) 4873 goto fail; 4874 4875 iter->tr = tr; 4876 4877 #ifdef CONFIG_TRACER_MAX_TRACE 4878 /* Currently only the top directory has a snapshot */ 4879 if (tr->current_trace->print_max || snapshot) 4880 iter->array_buffer = &tr->max_buffer; 4881 else 4882 #endif 4883 iter->array_buffer = &tr->array_buffer; 4884 iter->snapshot = snapshot; 4885 iter->pos = -1; 4886 iter->cpu_file = tracing_get_cpu(inode); 4887 mutex_init(&iter->mutex); 4888 4889 /* Notify the tracer early; before we stop tracing. */ 4890 if (iter->trace->open) 4891 iter->trace->open(iter); 4892 4893 /* Annotate start of buffers if we had overruns */ 4894 if (ring_buffer_overruns(iter->array_buffer->buffer)) 4895 iter->iter_flags |= TRACE_FILE_ANNOTATE; 4896 4897 /* Output in nanoseconds only if we are using a clock in nanoseconds. */ 4898 if (trace_clocks[tr->clock_id].in_ns) 4899 iter->iter_flags |= TRACE_FILE_TIME_IN_NS; 4900 4901 /* 4902 * If pause-on-trace is enabled, then stop the trace while 4903 * dumping, unless this is the "snapshot" file 4904 */ 4905 if (!iter->snapshot && (tr->trace_flags & TRACE_ITER_PAUSE_ON_TRACE)) 4906 tracing_stop_tr(tr); 4907 4908 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) { 4909 for_each_tracing_cpu(cpu) { 4910 iter->buffer_iter[cpu] = 4911 ring_buffer_read_prepare(iter->array_buffer->buffer, 4912 cpu, GFP_KERNEL); 4913 } 4914 ring_buffer_read_prepare_sync(); 4915 for_each_tracing_cpu(cpu) { 4916 ring_buffer_read_start(iter->buffer_iter[cpu]); 4917 tracing_iter_reset(iter, cpu); 4918 } 4919 } else { 4920 cpu = iter->cpu_file; 4921 iter->buffer_iter[cpu] = 4922 ring_buffer_read_prepare(iter->array_buffer->buffer, 4923 cpu, GFP_KERNEL); 4924 ring_buffer_read_prepare_sync(); 4925 ring_buffer_read_start(iter->buffer_iter[cpu]); 4926 tracing_iter_reset(iter, cpu); 4927 } 4928 4929 mutex_unlock(&trace_types_lock); 4930 4931 return iter; 4932 4933 fail: 4934 mutex_unlock(&trace_types_lock); 4935 free_trace_iter_content(iter); 4936 release: 4937 seq_release_private(inode, file); 4938 return ERR_PTR(-ENOMEM); 4939 } 4940 4941 int tracing_open_generic(struct inode *inode, struct file *filp) 4942 { 4943 int ret; 4944 4945 ret = tracing_check_open_get_tr(NULL); 4946 if (ret) 4947 return ret; 4948 4949 filp->private_data = inode->i_private; 4950 return 0; 4951 } 4952 4953 bool tracing_is_disabled(void) 4954 { 4955 return (tracing_disabled) ? true: false; 4956 } 4957 4958 /* 4959 * Open and update trace_array ref count. 4960 * Must have the current trace_array passed to it. 4961 */ 4962 int tracing_open_generic_tr(struct inode *inode, struct file *filp) 4963 { 4964 struct trace_array *tr = inode->i_private; 4965 int ret; 4966 4967 ret = tracing_check_open_get_tr(tr); 4968 if (ret) 4969 return ret; 4970 4971 filp->private_data = inode->i_private; 4972 4973 return 0; 4974 } 4975 4976 /* 4977 * The private pointer of the inode is the trace_event_file. 4978 * Update the tr ref count associated to it. 4979 */ 4980 int tracing_open_file_tr(struct inode *inode, struct file *filp) 4981 { 4982 struct trace_event_file *file = inode->i_private; 4983 int ret; 4984 4985 ret = tracing_check_open_get_tr(file->tr); 4986 if (ret) 4987 return ret; 4988 4989 filp->private_data = inode->i_private; 4990 4991 return 0; 4992 } 4993 4994 int tracing_release_file_tr(struct inode *inode, struct file *filp) 4995 { 4996 struct trace_event_file *file = inode->i_private; 4997 4998 trace_array_put(file->tr); 4999 5000 return 0; 5001 } 5002 5003 static int tracing_mark_open(struct inode *inode, struct file *filp) 5004 { 5005 stream_open(inode, filp); 5006 return tracing_open_generic_tr(inode, filp); 5007 } 5008 5009 static int tracing_release(struct inode *inode, struct file *file) 5010 { 5011 struct trace_array *tr = inode->i_private; 5012 struct seq_file *m = file->private_data; 5013 struct trace_iterator *iter; 5014 int cpu; 5015 5016 if (!(file->f_mode & FMODE_READ)) { 5017 trace_array_put(tr); 5018 return 0; 5019 } 5020 5021 /* Writes do not use seq_file */ 5022 iter = m->private; 5023 mutex_lock(&trace_types_lock); 5024 5025 for_each_tracing_cpu(cpu) { 5026 if (iter->buffer_iter[cpu]) 5027 ring_buffer_read_finish(iter->buffer_iter[cpu]); 5028 } 5029 5030 if (iter->trace && iter->trace->close) 5031 iter->trace->close(iter); 5032 5033 if (!iter->snapshot && tr->stop_count) 5034 /* reenable tracing if it was previously enabled */ 5035 tracing_start_tr(tr); 5036 5037 __trace_array_put(tr); 5038 5039 mutex_unlock(&trace_types_lock); 5040 5041 free_trace_iter_content(iter); 5042 seq_release_private(inode, file); 5043 5044 return 0; 5045 } 5046 5047 static int tracing_release_generic_tr(struct inode *inode, struct file *file) 5048 { 5049 struct trace_array *tr = inode->i_private; 5050 5051 trace_array_put(tr); 5052 return 0; 5053 } 5054 5055 static int tracing_single_release_tr(struct inode *inode, struct file *file) 5056 { 5057 struct trace_array *tr = inode->i_private; 5058 5059 trace_array_put(tr); 5060 5061 return single_release(inode, file); 5062 } 5063 5064 static int tracing_open(struct inode *inode, struct file *file) 5065 { 5066 struct trace_array *tr = inode->i_private; 5067 struct trace_iterator *iter; 5068 int ret; 5069 5070 ret = tracing_check_open_get_tr(tr); 5071 if (ret) 5072 return ret; 5073 5074 /* If this file was open for write, then erase contents */ 5075 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) { 5076 int cpu = tracing_get_cpu(inode); 5077 struct array_buffer *trace_buf = &tr->array_buffer; 5078 5079 #ifdef CONFIG_TRACER_MAX_TRACE 5080 if (tr->current_trace->print_max) 5081 trace_buf = &tr->max_buffer; 5082 #endif 5083 5084 if (cpu == RING_BUFFER_ALL_CPUS) 5085 tracing_reset_online_cpus(trace_buf); 5086 else 5087 tracing_reset_cpu(trace_buf, cpu); 5088 } 5089 5090 if (file->f_mode & FMODE_READ) { 5091 iter = __tracing_open(inode, file, false); 5092 if (IS_ERR(iter)) 5093 ret = PTR_ERR(iter); 5094 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) 5095 iter->iter_flags |= TRACE_FILE_LAT_FMT; 5096 } 5097 5098 if (ret < 0) 5099 trace_array_put(tr); 5100 5101 return ret; 5102 } 5103 5104 /* 5105 * Some tracers are not suitable for instance buffers. 5106 * A tracer is always available for the global array (toplevel) 5107 * or if it explicitly states that it is. 5108 */ 5109 static bool 5110 trace_ok_for_array(struct tracer *t, struct trace_array *tr) 5111 { 5112 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances; 5113 } 5114 5115 /* Find the next tracer that this trace array may use */ 5116 static struct tracer * 5117 get_tracer_for_array(struct trace_array *tr, struct tracer *t) 5118 { 5119 while (t && !trace_ok_for_array(t, tr)) 5120 t = t->next; 5121 5122 return t; 5123 } 5124 5125 static void * 5126 t_next(struct seq_file *m, void *v, loff_t *pos) 5127 { 5128 struct trace_array *tr = m->private; 5129 struct tracer *t = v; 5130 5131 (*pos)++; 5132 5133 if (t) 5134 t = get_tracer_for_array(tr, t->next); 5135 5136 return t; 5137 } 5138 5139 static void *t_start(struct seq_file *m, loff_t *pos) 5140 { 5141 struct trace_array *tr = m->private; 5142 struct tracer *t; 5143 loff_t l = 0; 5144 5145 mutex_lock(&trace_types_lock); 5146 5147 t = get_tracer_for_array(tr, trace_types); 5148 for (; t && l < *pos; t = t_next(m, t, &l)) 5149 ; 5150 5151 return t; 5152 } 5153 5154 static void t_stop(struct seq_file *m, void *p) 5155 { 5156 mutex_unlock(&trace_types_lock); 5157 } 5158 5159 static int t_show(struct seq_file *m, void *v) 5160 { 5161 struct tracer *t = v; 5162 5163 if (!t) 5164 return 0; 5165 5166 seq_puts(m, t->name); 5167 if (t->next) 5168 seq_putc(m, ' '); 5169 else 5170 seq_putc(m, '\n'); 5171 5172 return 0; 5173 } 5174 5175 static const struct seq_operations show_traces_seq_ops = { 5176 .start = t_start, 5177 .next = t_next, 5178 .stop = t_stop, 5179 .show = t_show, 5180 }; 5181 5182 static int show_traces_open(struct inode *inode, struct file *file) 5183 { 5184 struct trace_array *tr = inode->i_private; 5185 struct seq_file *m; 5186 int ret; 5187 5188 ret = tracing_check_open_get_tr(tr); 5189 if (ret) 5190 return ret; 5191 5192 ret = seq_open(file, &show_traces_seq_ops); 5193 if (ret) { 5194 trace_array_put(tr); 5195 return ret; 5196 } 5197 5198 m = file->private_data; 5199 m->private = tr; 5200 5201 return 0; 5202 } 5203 5204 static int show_traces_release(struct inode *inode, struct file *file) 5205 { 5206 struct trace_array *tr = inode->i_private; 5207 5208 trace_array_put(tr); 5209 return seq_release(inode, file); 5210 } 5211 5212 static ssize_t 5213 tracing_write_stub(struct file *filp, const char __user *ubuf, 5214 size_t count, loff_t *ppos) 5215 { 5216 return count; 5217 } 5218 5219 loff_t tracing_lseek(struct file *file, loff_t offset, int whence) 5220 { 5221 int ret; 5222 5223 if (file->f_mode & FMODE_READ) 5224 ret = seq_lseek(file, offset, whence); 5225 else 5226 file->f_pos = ret = 0; 5227 5228 return ret; 5229 } 5230 5231 static const struct file_operations tracing_fops = { 5232 .open = tracing_open, 5233 .read = seq_read, 5234 .read_iter = seq_read_iter, 5235 .splice_read = copy_splice_read, 5236 .write = tracing_write_stub, 5237 .llseek = tracing_lseek, 5238 .release = tracing_release, 5239 }; 5240 5241 static const struct file_operations show_traces_fops = { 5242 .open = show_traces_open, 5243 .read = seq_read, 5244 .llseek = seq_lseek, 5245 .release = show_traces_release, 5246 }; 5247 5248 static ssize_t 5249 tracing_cpumask_read(struct file *filp, char __user *ubuf, 5250 size_t count, loff_t *ppos) 5251 { 5252 struct trace_array *tr = file_inode(filp)->i_private; 5253 char *mask_str; 5254 int len; 5255 5256 len = snprintf(NULL, 0, "%*pb\n", 5257 cpumask_pr_args(tr->tracing_cpumask)) + 1; 5258 mask_str = kmalloc(len, GFP_KERNEL); 5259 if (!mask_str) 5260 return -ENOMEM; 5261 5262 len = snprintf(mask_str, len, "%*pb\n", 5263 cpumask_pr_args(tr->tracing_cpumask)); 5264 if (len >= count) { 5265 count = -EINVAL; 5266 goto out_err; 5267 } 5268 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len); 5269 5270 out_err: 5271 kfree(mask_str); 5272 5273 return count; 5274 } 5275 5276 int tracing_set_cpumask(struct trace_array *tr, 5277 cpumask_var_t tracing_cpumask_new) 5278 { 5279 int cpu; 5280 5281 if (!tr) 5282 return -EINVAL; 5283 5284 local_irq_disable(); 5285 arch_spin_lock(&tr->max_lock); 5286 for_each_tracing_cpu(cpu) { 5287 /* 5288 * Increase/decrease the disabled counter if we are 5289 * about to flip a bit in the cpumask: 5290 */ 5291 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) && 5292 !cpumask_test_cpu(cpu, tracing_cpumask_new)) { 5293 atomic_inc(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled); 5294 ring_buffer_record_disable_cpu(tr->array_buffer.buffer, cpu); 5295 #ifdef CONFIG_TRACER_MAX_TRACE 5296 ring_buffer_record_disable_cpu(tr->max_buffer.buffer, cpu); 5297 #endif 5298 } 5299 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) && 5300 cpumask_test_cpu(cpu, tracing_cpumask_new)) { 5301 atomic_dec(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled); 5302 ring_buffer_record_enable_cpu(tr->array_buffer.buffer, cpu); 5303 #ifdef CONFIG_TRACER_MAX_TRACE 5304 ring_buffer_record_enable_cpu(tr->max_buffer.buffer, cpu); 5305 #endif 5306 } 5307 } 5308 arch_spin_unlock(&tr->max_lock); 5309 local_irq_enable(); 5310 5311 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new); 5312 5313 return 0; 5314 } 5315 5316 static ssize_t 5317 tracing_cpumask_write(struct file *filp, const char __user *ubuf, 5318 size_t count, loff_t *ppos) 5319 { 5320 struct trace_array *tr = file_inode(filp)->i_private; 5321 cpumask_var_t tracing_cpumask_new; 5322 int err; 5323 5324 if (!zalloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL)) 5325 return -ENOMEM; 5326 5327 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new); 5328 if (err) 5329 goto err_free; 5330 5331 err = tracing_set_cpumask(tr, tracing_cpumask_new); 5332 if (err) 5333 goto err_free; 5334 5335 free_cpumask_var(tracing_cpumask_new); 5336 5337 return count; 5338 5339 err_free: 5340 free_cpumask_var(tracing_cpumask_new); 5341 5342 return err; 5343 } 5344 5345 static const struct file_operations tracing_cpumask_fops = { 5346 .open = tracing_open_generic_tr, 5347 .read = tracing_cpumask_read, 5348 .write = tracing_cpumask_write, 5349 .release = tracing_release_generic_tr, 5350 .llseek = generic_file_llseek, 5351 }; 5352 5353 static int tracing_trace_options_show(struct seq_file *m, void *v) 5354 { 5355 struct tracer_opt *trace_opts; 5356 struct trace_array *tr = m->private; 5357 u32 tracer_flags; 5358 int i; 5359 5360 mutex_lock(&trace_types_lock); 5361 tracer_flags = tr->current_trace->flags->val; 5362 trace_opts = tr->current_trace->flags->opts; 5363 5364 for (i = 0; trace_options[i]; i++) { 5365 if (tr->trace_flags & (1 << i)) 5366 seq_printf(m, "%s\n", trace_options[i]); 5367 else 5368 seq_printf(m, "no%s\n", trace_options[i]); 5369 } 5370 5371 for (i = 0; trace_opts[i].name; i++) { 5372 if (tracer_flags & trace_opts[i].bit) 5373 seq_printf(m, "%s\n", trace_opts[i].name); 5374 else 5375 seq_printf(m, "no%s\n", trace_opts[i].name); 5376 } 5377 mutex_unlock(&trace_types_lock); 5378 5379 return 0; 5380 } 5381 5382 static int __set_tracer_option(struct trace_array *tr, 5383 struct tracer_flags *tracer_flags, 5384 struct tracer_opt *opts, int neg) 5385 { 5386 struct tracer *trace = tracer_flags->trace; 5387 int ret; 5388 5389 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg); 5390 if (ret) 5391 return ret; 5392 5393 if (neg) 5394 tracer_flags->val &= ~opts->bit; 5395 else 5396 tracer_flags->val |= opts->bit; 5397 return 0; 5398 } 5399 5400 /* Try to assign a tracer specific option */ 5401 static int set_tracer_option(struct trace_array *tr, char *cmp, int neg) 5402 { 5403 struct tracer *trace = tr->current_trace; 5404 struct tracer_flags *tracer_flags = trace->flags; 5405 struct tracer_opt *opts = NULL; 5406 int i; 5407 5408 for (i = 0; tracer_flags->opts[i].name; i++) { 5409 opts = &tracer_flags->opts[i]; 5410 5411 if (strcmp(cmp, opts->name) == 0) 5412 return __set_tracer_option(tr, trace->flags, opts, neg); 5413 } 5414 5415 return -EINVAL; 5416 } 5417 5418 /* Some tracers require overwrite to stay enabled */ 5419 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set) 5420 { 5421 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set) 5422 return -1; 5423 5424 return 0; 5425 } 5426 5427 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled) 5428 { 5429 int *map; 5430 5431 if ((mask == TRACE_ITER_RECORD_TGID) || 5432 (mask == TRACE_ITER_RECORD_CMD)) 5433 lockdep_assert_held(&event_mutex); 5434 5435 /* do nothing if flag is already set */ 5436 if (!!(tr->trace_flags & mask) == !!enabled) 5437 return 0; 5438 5439 /* Give the tracer a chance to approve the change */ 5440 if (tr->current_trace->flag_changed) 5441 if (tr->current_trace->flag_changed(tr, mask, !!enabled)) 5442 return -EINVAL; 5443 5444 if (enabled) 5445 tr->trace_flags |= mask; 5446 else 5447 tr->trace_flags &= ~mask; 5448 5449 if (mask == TRACE_ITER_RECORD_CMD) 5450 trace_event_enable_cmd_record(enabled); 5451 5452 if (mask == TRACE_ITER_RECORD_TGID) { 5453 if (!tgid_map) { 5454 tgid_map_max = pid_max; 5455 map = kvcalloc(tgid_map_max + 1, sizeof(*tgid_map), 5456 GFP_KERNEL); 5457 5458 /* 5459 * Pairs with smp_load_acquire() in 5460 * trace_find_tgid_ptr() to ensure that if it observes 5461 * the tgid_map we just allocated then it also observes 5462 * the corresponding tgid_map_max value. 5463 */ 5464 smp_store_release(&tgid_map, map); 5465 } 5466 if (!tgid_map) { 5467 tr->trace_flags &= ~TRACE_ITER_RECORD_TGID; 5468 return -ENOMEM; 5469 } 5470 5471 trace_event_enable_tgid_record(enabled); 5472 } 5473 5474 if (mask == TRACE_ITER_EVENT_FORK) 5475 trace_event_follow_fork(tr, enabled); 5476 5477 if (mask == TRACE_ITER_FUNC_FORK) 5478 ftrace_pid_follow_fork(tr, enabled); 5479 5480 if (mask == TRACE_ITER_OVERWRITE) { 5481 ring_buffer_change_overwrite(tr->array_buffer.buffer, enabled); 5482 #ifdef CONFIG_TRACER_MAX_TRACE 5483 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled); 5484 #endif 5485 } 5486 5487 if (mask == TRACE_ITER_PRINTK) { 5488 trace_printk_start_stop_comm(enabled); 5489 trace_printk_control(enabled); 5490 } 5491 5492 return 0; 5493 } 5494 5495 int trace_set_options(struct trace_array *tr, char *option) 5496 { 5497 char *cmp; 5498 int neg = 0; 5499 int ret; 5500 size_t orig_len = strlen(option); 5501 int len; 5502 5503 cmp = strstrip(option); 5504 5505 len = str_has_prefix(cmp, "no"); 5506 if (len) 5507 neg = 1; 5508 5509 cmp += len; 5510 5511 mutex_lock(&event_mutex); 5512 mutex_lock(&trace_types_lock); 5513 5514 ret = match_string(trace_options, -1, cmp); 5515 /* If no option could be set, test the specific tracer options */ 5516 if (ret < 0) 5517 ret = set_tracer_option(tr, cmp, neg); 5518 else 5519 ret = set_tracer_flag(tr, 1 << ret, !neg); 5520 5521 mutex_unlock(&trace_types_lock); 5522 mutex_unlock(&event_mutex); 5523 5524 /* 5525 * If the first trailing whitespace is replaced with '\0' by strstrip, 5526 * turn it back into a space. 5527 */ 5528 if (orig_len > strlen(option)) 5529 option[strlen(option)] = ' '; 5530 5531 return ret; 5532 } 5533 5534 static void __init apply_trace_boot_options(void) 5535 { 5536 char *buf = trace_boot_options_buf; 5537 char *option; 5538 5539 while (true) { 5540 option = strsep(&buf, ","); 5541 5542 if (!option) 5543 break; 5544 5545 if (*option) 5546 trace_set_options(&global_trace, option); 5547 5548 /* Put back the comma to allow this to be called again */ 5549 if (buf) 5550 *(buf - 1) = ','; 5551 } 5552 } 5553 5554 static ssize_t 5555 tracing_trace_options_write(struct file *filp, const char __user *ubuf, 5556 size_t cnt, loff_t *ppos) 5557 { 5558 struct seq_file *m = filp->private_data; 5559 struct trace_array *tr = m->private; 5560 char buf[64]; 5561 int ret; 5562 5563 if (cnt >= sizeof(buf)) 5564 return -EINVAL; 5565 5566 if (copy_from_user(buf, ubuf, cnt)) 5567 return -EFAULT; 5568 5569 buf[cnt] = 0; 5570 5571 ret = trace_set_options(tr, buf); 5572 if (ret < 0) 5573 return ret; 5574 5575 *ppos += cnt; 5576 5577 return cnt; 5578 } 5579 5580 static int tracing_trace_options_open(struct inode *inode, struct file *file) 5581 { 5582 struct trace_array *tr = inode->i_private; 5583 int ret; 5584 5585 ret = tracing_check_open_get_tr(tr); 5586 if (ret) 5587 return ret; 5588 5589 ret = single_open(file, tracing_trace_options_show, inode->i_private); 5590 if (ret < 0) 5591 trace_array_put(tr); 5592 5593 return ret; 5594 } 5595 5596 static const struct file_operations tracing_iter_fops = { 5597 .open = tracing_trace_options_open, 5598 .read = seq_read, 5599 .llseek = seq_lseek, 5600 .release = tracing_single_release_tr, 5601 .write = tracing_trace_options_write, 5602 }; 5603 5604 static const char readme_msg[] = 5605 "tracing mini-HOWTO:\n\n" 5606 "# echo 0 > tracing_on : quick way to disable tracing\n" 5607 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n" 5608 " Important files:\n" 5609 " trace\t\t\t- The static contents of the buffer\n" 5610 "\t\t\t To clear the buffer write into this file: echo > trace\n" 5611 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n" 5612 " current_tracer\t- function and latency tracers\n" 5613 " available_tracers\t- list of configured tracers for current_tracer\n" 5614 " error_log\t- error log for failed commands (that support it)\n" 5615 " buffer_size_kb\t- view and modify size of per cpu buffer\n" 5616 " buffer_total_size_kb - view total size of all cpu buffers\n\n" 5617 " trace_clock\t\t- change the clock used to order events\n" 5618 " local: Per cpu clock but may not be synced across CPUs\n" 5619 " global: Synced across CPUs but slows tracing down.\n" 5620 " counter: Not a clock, but just an increment\n" 5621 " uptime: Jiffy counter from time of boot\n" 5622 " perf: Same clock that perf events use\n" 5623 #ifdef CONFIG_X86_64 5624 " x86-tsc: TSC cycle counter\n" 5625 #endif 5626 "\n timestamp_mode\t- view the mode used to timestamp events\n" 5627 " delta: Delta difference against a buffer-wide timestamp\n" 5628 " absolute: Absolute (standalone) timestamp\n" 5629 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n" 5630 "\n trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n" 5631 " tracing_cpumask\t- Limit which CPUs to trace\n" 5632 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n" 5633 "\t\t\t Remove sub-buffer with rmdir\n" 5634 " trace_options\t\t- Set format or modify how tracing happens\n" 5635 "\t\t\t Disable an option by prefixing 'no' to the\n" 5636 "\t\t\t option name\n" 5637 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n" 5638 #ifdef CONFIG_DYNAMIC_FTRACE 5639 "\n available_filter_functions - list of functions that can be filtered on\n" 5640 " set_ftrace_filter\t- echo function name in here to only trace these\n" 5641 "\t\t\t functions\n" 5642 "\t accepts: func_full_name or glob-matching-pattern\n" 5643 "\t modules: Can select a group via module\n" 5644 "\t Format: :mod:<module-name>\n" 5645 "\t example: echo :mod:ext3 > set_ftrace_filter\n" 5646 "\t triggers: a command to perform when function is hit\n" 5647 "\t Format: <function>:<trigger>[:count]\n" 5648 "\t trigger: traceon, traceoff\n" 5649 "\t\t enable_event:<system>:<event>\n" 5650 "\t\t disable_event:<system>:<event>\n" 5651 #ifdef CONFIG_STACKTRACE 5652 "\t\t stacktrace\n" 5653 #endif 5654 #ifdef CONFIG_TRACER_SNAPSHOT 5655 "\t\t snapshot\n" 5656 #endif 5657 "\t\t dump\n" 5658 "\t\t cpudump\n" 5659 "\t example: echo do_fault:traceoff > set_ftrace_filter\n" 5660 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n" 5661 "\t The first one will disable tracing every time do_fault is hit\n" 5662 "\t The second will disable tracing at most 3 times when do_trap is hit\n" 5663 "\t The first time do trap is hit and it disables tracing, the\n" 5664 "\t counter will decrement to 2. If tracing is already disabled,\n" 5665 "\t the counter will not decrement. It only decrements when the\n" 5666 "\t trigger did work\n" 5667 "\t To remove trigger without count:\n" 5668 "\t echo '!<function>:<trigger> > set_ftrace_filter\n" 5669 "\t To remove trigger with a count:\n" 5670 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n" 5671 " set_ftrace_notrace\t- echo function name in here to never trace.\n" 5672 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n" 5673 "\t modules: Can select a group via module command :mod:\n" 5674 "\t Does not accept triggers\n" 5675 #endif /* CONFIG_DYNAMIC_FTRACE */ 5676 #ifdef CONFIG_FUNCTION_TRACER 5677 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n" 5678 "\t\t (function)\n" 5679 " set_ftrace_notrace_pid\t- Write pid(s) to not function trace those pids\n" 5680 "\t\t (function)\n" 5681 #endif 5682 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 5683 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n" 5684 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n" 5685 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n" 5686 #endif 5687 #ifdef CONFIG_TRACER_SNAPSHOT 5688 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n" 5689 "\t\t\t snapshot buffer. Read the contents for more\n" 5690 "\t\t\t information\n" 5691 #endif 5692 #ifdef CONFIG_STACK_TRACER 5693 " stack_trace\t\t- Shows the max stack trace when active\n" 5694 " stack_max_size\t- Shows current max stack size that was traced\n" 5695 "\t\t\t Write into this file to reset the max size (trigger a\n" 5696 "\t\t\t new trace)\n" 5697 #ifdef CONFIG_DYNAMIC_FTRACE 5698 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n" 5699 "\t\t\t traces\n" 5700 #endif 5701 #endif /* CONFIG_STACK_TRACER */ 5702 #ifdef CONFIG_DYNAMIC_EVENTS 5703 " dynamic_events\t\t- Create/append/remove/show the generic dynamic events\n" 5704 "\t\t\t Write into this file to define/undefine new trace events.\n" 5705 #endif 5706 #ifdef CONFIG_KPROBE_EVENTS 5707 " kprobe_events\t\t- Create/append/remove/show the kernel dynamic events\n" 5708 "\t\t\t Write into this file to define/undefine new trace events.\n" 5709 #endif 5710 #ifdef CONFIG_UPROBE_EVENTS 5711 " uprobe_events\t\t- Create/append/remove/show the userspace dynamic events\n" 5712 "\t\t\t Write into this file to define/undefine new trace events.\n" 5713 #endif 5714 #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS) || \ 5715 defined(CONFIG_FPROBE_EVENTS) 5716 "\t accepts: event-definitions (one definition per line)\n" 5717 #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS) 5718 "\t Format: p[:[<group>/][<event>]] <place> [<args>]\n" 5719 "\t r[maxactive][:[<group>/][<event>]] <place> [<args>]\n" 5720 #endif 5721 #ifdef CONFIG_FPROBE_EVENTS 5722 "\t f[:[<group>/][<event>]] <func-name>[%return] [<args>]\n" 5723 "\t t[:[<group>/][<event>]] <tracepoint> [<args>]\n" 5724 #endif 5725 #ifdef CONFIG_HIST_TRIGGERS 5726 "\t s:[synthetic/]<event> <field> [<field>]\n" 5727 #endif 5728 "\t e[:[<group>/][<event>]] <attached-group>.<attached-event> [<args>] [if <filter>]\n" 5729 "\t -:[<group>/][<event>]\n" 5730 #ifdef CONFIG_KPROBE_EVENTS 5731 "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n" 5732 "place (kretprobe): [<module>:]<symbol>[+<offset>]%return|<memaddr>\n" 5733 #endif 5734 #ifdef CONFIG_UPROBE_EVENTS 5735 " place (uprobe): <path>:<offset>[%return][(ref_ctr_offset)]\n" 5736 #endif 5737 "\t args: <name>=fetcharg[:type]\n" 5738 "\t fetcharg: (%<register>|$<efield>), @<address>, @<symbol>[+|-<offset>],\n" 5739 #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API 5740 #ifdef CONFIG_PROBE_EVENTS_BTF_ARGS 5741 "\t $stack<index>, $stack, $retval, $comm, $arg<N>, <argname>\n" 5742 #else 5743 "\t $stack<index>, $stack, $retval, $comm, $arg<N>,\n" 5744 #endif 5745 #else 5746 "\t $stack<index>, $stack, $retval, $comm,\n" 5747 #endif 5748 "\t +|-[u]<offset>(<fetcharg>), \\imm-value, \\\"imm-string\"\n" 5749 "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, char, string, symbol,\n" 5750 "\t b<bit-width>@<bit-offset>/<container-size>, ustring,\n" 5751 "\t symstr, <type>\\[<array-size>\\]\n" 5752 #ifdef CONFIG_HIST_TRIGGERS 5753 "\t field: <stype> <name>;\n" 5754 "\t stype: u8/u16/u32/u64, s8/s16/s32/s64, pid_t,\n" 5755 "\t [unsigned] char/int/long\n" 5756 #endif 5757 "\t efield: For event probes ('e' types), the field is on of the fields\n" 5758 "\t of the <attached-group>/<attached-event>.\n" 5759 #endif 5760 " events/\t\t- Directory containing all trace event subsystems:\n" 5761 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n" 5762 " events/<system>/\t- Directory containing all trace events for <system>:\n" 5763 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n" 5764 "\t\t\t events\n" 5765 " filter\t\t- If set, only events passing filter are traced\n" 5766 " events/<system>/<event>/\t- Directory containing control files for\n" 5767 "\t\t\t <event>:\n" 5768 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n" 5769 " filter\t\t- If set, only events passing filter are traced\n" 5770 " trigger\t\t- If set, a command to perform when event is hit\n" 5771 "\t Format: <trigger>[:count][if <filter>]\n" 5772 "\t trigger: traceon, traceoff\n" 5773 "\t enable_event:<system>:<event>\n" 5774 "\t disable_event:<system>:<event>\n" 5775 #ifdef CONFIG_HIST_TRIGGERS 5776 "\t enable_hist:<system>:<event>\n" 5777 "\t disable_hist:<system>:<event>\n" 5778 #endif 5779 #ifdef CONFIG_STACKTRACE 5780 "\t\t stacktrace\n" 5781 #endif 5782 #ifdef CONFIG_TRACER_SNAPSHOT 5783 "\t\t snapshot\n" 5784 #endif 5785 #ifdef CONFIG_HIST_TRIGGERS 5786 "\t\t hist (see below)\n" 5787 #endif 5788 "\t example: echo traceoff > events/block/block_unplug/trigger\n" 5789 "\t echo traceoff:3 > events/block/block_unplug/trigger\n" 5790 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n" 5791 "\t events/block/block_unplug/trigger\n" 5792 "\t The first disables tracing every time block_unplug is hit.\n" 5793 "\t The second disables tracing the first 3 times block_unplug is hit.\n" 5794 "\t The third enables the kmalloc event the first 3 times block_unplug\n" 5795 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n" 5796 "\t Like function triggers, the counter is only decremented if it\n" 5797 "\t enabled or disabled tracing.\n" 5798 "\t To remove a trigger without a count:\n" 5799 "\t echo '!<trigger> > <system>/<event>/trigger\n" 5800 "\t To remove a trigger with a count:\n" 5801 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n" 5802 "\t Filters can be ignored when removing a trigger.\n" 5803 #ifdef CONFIG_HIST_TRIGGERS 5804 " hist trigger\t- If set, event hits are aggregated into a hash table\n" 5805 "\t Format: hist:keys=<field1[,field2,...]>\n" 5806 "\t [:<var1>=<field|var_ref|numeric_literal>[,<var2>=...]]\n" 5807 "\t [:values=<field1[,field2,...]>]\n" 5808 "\t [:sort=<field1[,field2,...]>]\n" 5809 "\t [:size=#entries]\n" 5810 "\t [:pause][:continue][:clear]\n" 5811 "\t [:name=histname1]\n" 5812 "\t [:nohitcount]\n" 5813 "\t [:<handler>.<action>]\n" 5814 "\t [if <filter>]\n\n" 5815 "\t Note, special fields can be used as well:\n" 5816 "\t common_timestamp - to record current timestamp\n" 5817 "\t common_cpu - to record the CPU the event happened on\n" 5818 "\n" 5819 "\t A hist trigger variable can be:\n" 5820 "\t - a reference to a field e.g. x=current_timestamp,\n" 5821 "\t - a reference to another variable e.g. y=$x,\n" 5822 "\t - a numeric literal: e.g. ms_per_sec=1000,\n" 5823 "\t - an arithmetic expression: e.g. time_secs=current_timestamp/1000\n" 5824 "\n" 5825 "\t hist trigger arithmetic expressions support addition(+), subtraction(-),\n" 5826 "\t multiplication(*) and division(/) operators. An operand can be either a\n" 5827 "\t variable reference, field or numeric literal.\n" 5828 "\n" 5829 "\t When a matching event is hit, an entry is added to a hash\n" 5830 "\t table using the key(s) and value(s) named, and the value of a\n" 5831 "\t sum called 'hitcount' is incremented. Keys and values\n" 5832 "\t correspond to fields in the event's format description. Keys\n" 5833 "\t can be any field, or the special string 'common_stacktrace'.\n" 5834 "\t Compound keys consisting of up to two fields can be specified\n" 5835 "\t by the 'keys' keyword. Values must correspond to numeric\n" 5836 "\t fields. Sort keys consisting of up to two fields can be\n" 5837 "\t specified using the 'sort' keyword. The sort direction can\n" 5838 "\t be modified by appending '.descending' or '.ascending' to a\n" 5839 "\t sort field. The 'size' parameter can be used to specify more\n" 5840 "\t or fewer than the default 2048 entries for the hashtable size.\n" 5841 "\t If a hist trigger is given a name using the 'name' parameter,\n" 5842 "\t its histogram data will be shared with other triggers of the\n" 5843 "\t same name, and trigger hits will update this common data.\n\n" 5844 "\t Reading the 'hist' file for the event will dump the hash\n" 5845 "\t table in its entirety to stdout. If there are multiple hist\n" 5846 "\t triggers attached to an event, there will be a table for each\n" 5847 "\t trigger in the output. The table displayed for a named\n" 5848 "\t trigger will be the same as any other instance having the\n" 5849 "\t same name. The default format used to display a given field\n" 5850 "\t can be modified by appending any of the following modifiers\n" 5851 "\t to the field name, as applicable:\n\n" 5852 "\t .hex display a number as a hex value\n" 5853 "\t .sym display an address as a symbol\n" 5854 "\t .sym-offset display an address as a symbol and offset\n" 5855 "\t .execname display a common_pid as a program name\n" 5856 "\t .syscall display a syscall id as a syscall name\n" 5857 "\t .log2 display log2 value rather than raw number\n" 5858 "\t .buckets=size display values in groups of size rather than raw number\n" 5859 "\t .usecs display a common_timestamp in microseconds\n" 5860 "\t .percent display a number of percentage value\n" 5861 "\t .graph display a bar-graph of a value\n\n" 5862 "\t The 'pause' parameter can be used to pause an existing hist\n" 5863 "\t trigger or to start a hist trigger but not log any events\n" 5864 "\t until told to do so. 'continue' can be used to start or\n" 5865 "\t restart a paused hist trigger.\n\n" 5866 "\t The 'clear' parameter will clear the contents of a running\n" 5867 "\t hist trigger and leave its current paused/active state\n" 5868 "\t unchanged.\n\n" 5869 "\t The 'nohitcount' (or NOHC) parameter will suppress display of\n" 5870 "\t raw hitcount in the histogram.\n\n" 5871 "\t The enable_hist and disable_hist triggers can be used to\n" 5872 "\t have one event conditionally start and stop another event's\n" 5873 "\t already-attached hist trigger. The syntax is analogous to\n" 5874 "\t the enable_event and disable_event triggers.\n\n" 5875 "\t Hist trigger handlers and actions are executed whenever a\n" 5876 "\t a histogram entry is added or updated. They take the form:\n\n" 5877 "\t <handler>.<action>\n\n" 5878 "\t The available handlers are:\n\n" 5879 "\t onmatch(matching.event) - invoke on addition or update\n" 5880 "\t onmax(var) - invoke if var exceeds current max\n" 5881 "\t onchange(var) - invoke action if var changes\n\n" 5882 "\t The available actions are:\n\n" 5883 "\t trace(<synthetic_event>,param list) - generate synthetic event\n" 5884 "\t save(field,...) - save current event fields\n" 5885 #ifdef CONFIG_TRACER_SNAPSHOT 5886 "\t snapshot() - snapshot the trace buffer\n\n" 5887 #endif 5888 #ifdef CONFIG_SYNTH_EVENTS 5889 " events/synthetic_events\t- Create/append/remove/show synthetic events\n" 5890 "\t Write into this file to define/undefine new synthetic events.\n" 5891 "\t example: echo 'myevent u64 lat; char name[]; long[] stack' >> synthetic_events\n" 5892 #endif 5893 #endif 5894 ; 5895 5896 static ssize_t 5897 tracing_readme_read(struct file *filp, char __user *ubuf, 5898 size_t cnt, loff_t *ppos) 5899 { 5900 return simple_read_from_buffer(ubuf, cnt, ppos, 5901 readme_msg, strlen(readme_msg)); 5902 } 5903 5904 static const struct file_operations tracing_readme_fops = { 5905 .open = tracing_open_generic, 5906 .read = tracing_readme_read, 5907 .llseek = generic_file_llseek, 5908 }; 5909 5910 static void *saved_tgids_next(struct seq_file *m, void *v, loff_t *pos) 5911 { 5912 int pid = ++(*pos); 5913 5914 return trace_find_tgid_ptr(pid); 5915 } 5916 5917 static void *saved_tgids_start(struct seq_file *m, loff_t *pos) 5918 { 5919 int pid = *pos; 5920 5921 return trace_find_tgid_ptr(pid); 5922 } 5923 5924 static void saved_tgids_stop(struct seq_file *m, void *v) 5925 { 5926 } 5927 5928 static int saved_tgids_show(struct seq_file *m, void *v) 5929 { 5930 int *entry = (int *)v; 5931 int pid = entry - tgid_map; 5932 int tgid = *entry; 5933 5934 if (tgid == 0) 5935 return SEQ_SKIP; 5936 5937 seq_printf(m, "%d %d\n", pid, tgid); 5938 return 0; 5939 } 5940 5941 static const struct seq_operations tracing_saved_tgids_seq_ops = { 5942 .start = saved_tgids_start, 5943 .stop = saved_tgids_stop, 5944 .next = saved_tgids_next, 5945 .show = saved_tgids_show, 5946 }; 5947 5948 static int tracing_saved_tgids_open(struct inode *inode, struct file *filp) 5949 { 5950 int ret; 5951 5952 ret = tracing_check_open_get_tr(NULL); 5953 if (ret) 5954 return ret; 5955 5956 return seq_open(filp, &tracing_saved_tgids_seq_ops); 5957 } 5958 5959 5960 static const struct file_operations tracing_saved_tgids_fops = { 5961 .open = tracing_saved_tgids_open, 5962 .read = seq_read, 5963 .llseek = seq_lseek, 5964 .release = seq_release, 5965 }; 5966 5967 static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos) 5968 { 5969 unsigned int *ptr = v; 5970 5971 if (*pos || m->count) 5972 ptr++; 5973 5974 (*pos)++; 5975 5976 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num]; 5977 ptr++) { 5978 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP) 5979 continue; 5980 5981 return ptr; 5982 } 5983 5984 return NULL; 5985 } 5986 5987 static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos) 5988 { 5989 void *v; 5990 loff_t l = 0; 5991 5992 preempt_disable(); 5993 arch_spin_lock(&trace_cmdline_lock); 5994 5995 v = &savedcmd->map_cmdline_to_pid[0]; 5996 while (l <= *pos) { 5997 v = saved_cmdlines_next(m, v, &l); 5998 if (!v) 5999 return NULL; 6000 } 6001 6002 return v; 6003 } 6004 6005 static void saved_cmdlines_stop(struct seq_file *m, void *v) 6006 { 6007 arch_spin_unlock(&trace_cmdline_lock); 6008 preempt_enable(); 6009 } 6010 6011 static int saved_cmdlines_show(struct seq_file *m, void *v) 6012 { 6013 char buf[TASK_COMM_LEN]; 6014 unsigned int *pid = v; 6015 6016 __trace_find_cmdline(*pid, buf); 6017 seq_printf(m, "%d %s\n", *pid, buf); 6018 return 0; 6019 } 6020 6021 static const struct seq_operations tracing_saved_cmdlines_seq_ops = { 6022 .start = saved_cmdlines_start, 6023 .next = saved_cmdlines_next, 6024 .stop = saved_cmdlines_stop, 6025 .show = saved_cmdlines_show, 6026 }; 6027 6028 static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp) 6029 { 6030 int ret; 6031 6032 ret = tracing_check_open_get_tr(NULL); 6033 if (ret) 6034 return ret; 6035 6036 return seq_open(filp, &tracing_saved_cmdlines_seq_ops); 6037 } 6038 6039 static const struct file_operations tracing_saved_cmdlines_fops = { 6040 .open = tracing_saved_cmdlines_open, 6041 .read = seq_read, 6042 .llseek = seq_lseek, 6043 .release = seq_release, 6044 }; 6045 6046 static ssize_t 6047 tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf, 6048 size_t cnt, loff_t *ppos) 6049 { 6050 char buf[64]; 6051 int r; 6052 6053 preempt_disable(); 6054 arch_spin_lock(&trace_cmdline_lock); 6055 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num); 6056 arch_spin_unlock(&trace_cmdline_lock); 6057 preempt_enable(); 6058 6059 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 6060 } 6061 6062 static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s) 6063 { 6064 kfree(s->saved_cmdlines); 6065 kfree(s->map_cmdline_to_pid); 6066 kfree(s); 6067 } 6068 6069 static int tracing_resize_saved_cmdlines(unsigned int val) 6070 { 6071 struct saved_cmdlines_buffer *s, *savedcmd_temp; 6072 6073 s = kmalloc(sizeof(*s), GFP_KERNEL); 6074 if (!s) 6075 return -ENOMEM; 6076 6077 if (allocate_cmdlines_buffer(val, s) < 0) { 6078 kfree(s); 6079 return -ENOMEM; 6080 } 6081 6082 preempt_disable(); 6083 arch_spin_lock(&trace_cmdline_lock); 6084 savedcmd_temp = savedcmd; 6085 savedcmd = s; 6086 arch_spin_unlock(&trace_cmdline_lock); 6087 preempt_enable(); 6088 free_saved_cmdlines_buffer(savedcmd_temp); 6089 6090 return 0; 6091 } 6092 6093 static ssize_t 6094 tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf, 6095 size_t cnt, loff_t *ppos) 6096 { 6097 unsigned long val; 6098 int ret; 6099 6100 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 6101 if (ret) 6102 return ret; 6103 6104 /* must have at least 1 entry or less than PID_MAX_DEFAULT */ 6105 if (!val || val > PID_MAX_DEFAULT) 6106 return -EINVAL; 6107 6108 ret = tracing_resize_saved_cmdlines((unsigned int)val); 6109 if (ret < 0) 6110 return ret; 6111 6112 *ppos += cnt; 6113 6114 return cnt; 6115 } 6116 6117 static const struct file_operations tracing_saved_cmdlines_size_fops = { 6118 .open = tracing_open_generic, 6119 .read = tracing_saved_cmdlines_size_read, 6120 .write = tracing_saved_cmdlines_size_write, 6121 }; 6122 6123 #ifdef CONFIG_TRACE_EVAL_MAP_FILE 6124 static union trace_eval_map_item * 6125 update_eval_map(union trace_eval_map_item *ptr) 6126 { 6127 if (!ptr->map.eval_string) { 6128 if (ptr->tail.next) { 6129 ptr = ptr->tail.next; 6130 /* Set ptr to the next real item (skip head) */ 6131 ptr++; 6132 } else 6133 return NULL; 6134 } 6135 return ptr; 6136 } 6137 6138 static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos) 6139 { 6140 union trace_eval_map_item *ptr = v; 6141 6142 /* 6143 * Paranoid! If ptr points to end, we don't want to increment past it. 6144 * This really should never happen. 6145 */ 6146 (*pos)++; 6147 ptr = update_eval_map(ptr); 6148 if (WARN_ON_ONCE(!ptr)) 6149 return NULL; 6150 6151 ptr++; 6152 ptr = update_eval_map(ptr); 6153 6154 return ptr; 6155 } 6156 6157 static void *eval_map_start(struct seq_file *m, loff_t *pos) 6158 { 6159 union trace_eval_map_item *v; 6160 loff_t l = 0; 6161 6162 mutex_lock(&trace_eval_mutex); 6163 6164 v = trace_eval_maps; 6165 if (v) 6166 v++; 6167 6168 while (v && l < *pos) { 6169 v = eval_map_next(m, v, &l); 6170 } 6171 6172 return v; 6173 } 6174 6175 static void eval_map_stop(struct seq_file *m, void *v) 6176 { 6177 mutex_unlock(&trace_eval_mutex); 6178 } 6179 6180 static int eval_map_show(struct seq_file *m, void *v) 6181 { 6182 union trace_eval_map_item *ptr = v; 6183 6184 seq_printf(m, "%s %ld (%s)\n", 6185 ptr->map.eval_string, ptr->map.eval_value, 6186 ptr->map.system); 6187 6188 return 0; 6189 } 6190 6191 static const struct seq_operations tracing_eval_map_seq_ops = { 6192 .start = eval_map_start, 6193 .next = eval_map_next, 6194 .stop = eval_map_stop, 6195 .show = eval_map_show, 6196 }; 6197 6198 static int tracing_eval_map_open(struct inode *inode, struct file *filp) 6199 { 6200 int ret; 6201 6202 ret = tracing_check_open_get_tr(NULL); 6203 if (ret) 6204 return ret; 6205 6206 return seq_open(filp, &tracing_eval_map_seq_ops); 6207 } 6208 6209 static const struct file_operations tracing_eval_map_fops = { 6210 .open = tracing_eval_map_open, 6211 .read = seq_read, 6212 .llseek = seq_lseek, 6213 .release = seq_release, 6214 }; 6215 6216 static inline union trace_eval_map_item * 6217 trace_eval_jmp_to_tail(union trace_eval_map_item *ptr) 6218 { 6219 /* Return tail of array given the head */ 6220 return ptr + ptr->head.length + 1; 6221 } 6222 6223 static void 6224 trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start, 6225 int len) 6226 { 6227 struct trace_eval_map **stop; 6228 struct trace_eval_map **map; 6229 union trace_eval_map_item *map_array; 6230 union trace_eval_map_item *ptr; 6231 6232 stop = start + len; 6233 6234 /* 6235 * The trace_eval_maps contains the map plus a head and tail item, 6236 * where the head holds the module and length of array, and the 6237 * tail holds a pointer to the next list. 6238 */ 6239 map_array = kmalloc_array(len + 2, sizeof(*map_array), GFP_KERNEL); 6240 if (!map_array) { 6241 pr_warn("Unable to allocate trace eval mapping\n"); 6242 return; 6243 } 6244 6245 mutex_lock(&trace_eval_mutex); 6246 6247 if (!trace_eval_maps) 6248 trace_eval_maps = map_array; 6249 else { 6250 ptr = trace_eval_maps; 6251 for (;;) { 6252 ptr = trace_eval_jmp_to_tail(ptr); 6253 if (!ptr->tail.next) 6254 break; 6255 ptr = ptr->tail.next; 6256 6257 } 6258 ptr->tail.next = map_array; 6259 } 6260 map_array->head.mod = mod; 6261 map_array->head.length = len; 6262 map_array++; 6263 6264 for (map = start; (unsigned long)map < (unsigned long)stop; map++) { 6265 map_array->map = **map; 6266 map_array++; 6267 } 6268 memset(map_array, 0, sizeof(*map_array)); 6269 6270 mutex_unlock(&trace_eval_mutex); 6271 } 6272 6273 static void trace_create_eval_file(struct dentry *d_tracer) 6274 { 6275 trace_create_file("eval_map", TRACE_MODE_READ, d_tracer, 6276 NULL, &tracing_eval_map_fops); 6277 } 6278 6279 #else /* CONFIG_TRACE_EVAL_MAP_FILE */ 6280 static inline void trace_create_eval_file(struct dentry *d_tracer) { } 6281 static inline void trace_insert_eval_map_file(struct module *mod, 6282 struct trace_eval_map **start, int len) { } 6283 #endif /* !CONFIG_TRACE_EVAL_MAP_FILE */ 6284 6285 static void trace_insert_eval_map(struct module *mod, 6286 struct trace_eval_map **start, int len) 6287 { 6288 struct trace_eval_map **map; 6289 6290 if (len <= 0) 6291 return; 6292 6293 map = start; 6294 6295 trace_event_eval_update(map, len); 6296 6297 trace_insert_eval_map_file(mod, start, len); 6298 } 6299 6300 static ssize_t 6301 tracing_set_trace_read(struct file *filp, char __user *ubuf, 6302 size_t cnt, loff_t *ppos) 6303 { 6304 struct trace_array *tr = filp->private_data; 6305 char buf[MAX_TRACER_SIZE+2]; 6306 int r; 6307 6308 mutex_lock(&trace_types_lock); 6309 r = sprintf(buf, "%s\n", tr->current_trace->name); 6310 mutex_unlock(&trace_types_lock); 6311 6312 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 6313 } 6314 6315 int tracer_init(struct tracer *t, struct trace_array *tr) 6316 { 6317 tracing_reset_online_cpus(&tr->array_buffer); 6318 return t->init(tr); 6319 } 6320 6321 static void set_buffer_entries(struct array_buffer *buf, unsigned long val) 6322 { 6323 int cpu; 6324 6325 for_each_tracing_cpu(cpu) 6326 per_cpu_ptr(buf->data, cpu)->entries = val; 6327 } 6328 6329 static void update_buffer_entries(struct array_buffer *buf, int cpu) 6330 { 6331 if (cpu == RING_BUFFER_ALL_CPUS) { 6332 set_buffer_entries(buf, ring_buffer_size(buf->buffer, 0)); 6333 } else { 6334 per_cpu_ptr(buf->data, cpu)->entries = ring_buffer_size(buf->buffer, cpu); 6335 } 6336 } 6337 6338 #ifdef CONFIG_TRACER_MAX_TRACE 6339 /* resize @tr's buffer to the size of @size_tr's entries */ 6340 static int resize_buffer_duplicate_size(struct array_buffer *trace_buf, 6341 struct array_buffer *size_buf, int cpu_id) 6342 { 6343 int cpu, ret = 0; 6344 6345 if (cpu_id == RING_BUFFER_ALL_CPUS) { 6346 for_each_tracing_cpu(cpu) { 6347 ret = ring_buffer_resize(trace_buf->buffer, 6348 per_cpu_ptr(size_buf->data, cpu)->entries, cpu); 6349 if (ret < 0) 6350 break; 6351 per_cpu_ptr(trace_buf->data, cpu)->entries = 6352 per_cpu_ptr(size_buf->data, cpu)->entries; 6353 } 6354 } else { 6355 ret = ring_buffer_resize(trace_buf->buffer, 6356 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id); 6357 if (ret == 0) 6358 per_cpu_ptr(trace_buf->data, cpu_id)->entries = 6359 per_cpu_ptr(size_buf->data, cpu_id)->entries; 6360 } 6361 6362 return ret; 6363 } 6364 #endif /* CONFIG_TRACER_MAX_TRACE */ 6365 6366 static int __tracing_resize_ring_buffer(struct trace_array *tr, 6367 unsigned long size, int cpu) 6368 { 6369 int ret; 6370 6371 /* 6372 * If kernel or user changes the size of the ring buffer 6373 * we use the size that was given, and we can forget about 6374 * expanding it later. 6375 */ 6376 ring_buffer_expanded = true; 6377 6378 /* May be called before buffers are initialized */ 6379 if (!tr->array_buffer.buffer) 6380 return 0; 6381 6382 ret = ring_buffer_resize(tr->array_buffer.buffer, size, cpu); 6383 if (ret < 0) 6384 return ret; 6385 6386 #ifdef CONFIG_TRACER_MAX_TRACE 6387 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) || 6388 !tr->current_trace->use_max_tr) 6389 goto out; 6390 6391 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu); 6392 if (ret < 0) { 6393 int r = resize_buffer_duplicate_size(&tr->array_buffer, 6394 &tr->array_buffer, cpu); 6395 if (r < 0) { 6396 /* 6397 * AARGH! We are left with different 6398 * size max buffer!!!! 6399 * The max buffer is our "snapshot" buffer. 6400 * When a tracer needs a snapshot (one of the 6401 * latency tracers), it swaps the max buffer 6402 * with the saved snap shot. We succeeded to 6403 * update the size of the main buffer, but failed to 6404 * update the size of the max buffer. But when we tried 6405 * to reset the main buffer to the original size, we 6406 * failed there too. This is very unlikely to 6407 * happen, but if it does, warn and kill all 6408 * tracing. 6409 */ 6410 WARN_ON(1); 6411 tracing_disabled = 1; 6412 } 6413 return ret; 6414 } 6415 6416 update_buffer_entries(&tr->max_buffer, cpu); 6417 6418 out: 6419 #endif /* CONFIG_TRACER_MAX_TRACE */ 6420 6421 update_buffer_entries(&tr->array_buffer, cpu); 6422 6423 return ret; 6424 } 6425 6426 ssize_t tracing_resize_ring_buffer(struct trace_array *tr, 6427 unsigned long size, int cpu_id) 6428 { 6429 int ret; 6430 6431 mutex_lock(&trace_types_lock); 6432 6433 if (cpu_id != RING_BUFFER_ALL_CPUS) { 6434 /* make sure, this cpu is enabled in the mask */ 6435 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) { 6436 ret = -EINVAL; 6437 goto out; 6438 } 6439 } 6440 6441 ret = __tracing_resize_ring_buffer(tr, size, cpu_id); 6442 if (ret < 0) 6443 ret = -ENOMEM; 6444 6445 out: 6446 mutex_unlock(&trace_types_lock); 6447 6448 return ret; 6449 } 6450 6451 6452 /** 6453 * tracing_update_buffers - used by tracing facility to expand ring buffers 6454 * 6455 * To save on memory when the tracing is never used on a system with it 6456 * configured in. The ring buffers are set to a minimum size. But once 6457 * a user starts to use the tracing facility, then they need to grow 6458 * to their default size. 6459 * 6460 * This function is to be called when a tracer is about to be used. 6461 */ 6462 int tracing_update_buffers(void) 6463 { 6464 int ret = 0; 6465 6466 mutex_lock(&trace_types_lock); 6467 if (!ring_buffer_expanded) 6468 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size, 6469 RING_BUFFER_ALL_CPUS); 6470 mutex_unlock(&trace_types_lock); 6471 6472 return ret; 6473 } 6474 6475 struct trace_option_dentry; 6476 6477 static void 6478 create_trace_option_files(struct trace_array *tr, struct tracer *tracer); 6479 6480 /* 6481 * Used to clear out the tracer before deletion of an instance. 6482 * Must have trace_types_lock held. 6483 */ 6484 static void tracing_set_nop(struct trace_array *tr) 6485 { 6486 if (tr->current_trace == &nop_trace) 6487 return; 6488 6489 tr->current_trace->enabled--; 6490 6491 if (tr->current_trace->reset) 6492 tr->current_trace->reset(tr); 6493 6494 tr->current_trace = &nop_trace; 6495 } 6496 6497 static bool tracer_options_updated; 6498 6499 static void add_tracer_options(struct trace_array *tr, struct tracer *t) 6500 { 6501 /* Only enable if the directory has been created already. */ 6502 if (!tr->dir) 6503 return; 6504 6505 /* Only create trace option files after update_tracer_options finish */ 6506 if (!tracer_options_updated) 6507 return; 6508 6509 create_trace_option_files(tr, t); 6510 } 6511 6512 int tracing_set_tracer(struct trace_array *tr, const char *buf) 6513 { 6514 struct tracer *t; 6515 #ifdef CONFIG_TRACER_MAX_TRACE 6516 bool had_max_tr; 6517 #endif 6518 int ret = 0; 6519 6520 mutex_lock(&trace_types_lock); 6521 6522 if (!ring_buffer_expanded) { 6523 ret = __tracing_resize_ring_buffer(tr, trace_buf_size, 6524 RING_BUFFER_ALL_CPUS); 6525 if (ret < 0) 6526 goto out; 6527 ret = 0; 6528 } 6529 6530 for (t = trace_types; t; t = t->next) { 6531 if (strcmp(t->name, buf) == 0) 6532 break; 6533 } 6534 if (!t) { 6535 ret = -EINVAL; 6536 goto out; 6537 } 6538 if (t == tr->current_trace) 6539 goto out; 6540 6541 #ifdef CONFIG_TRACER_SNAPSHOT 6542 if (t->use_max_tr) { 6543 local_irq_disable(); 6544 arch_spin_lock(&tr->max_lock); 6545 if (tr->cond_snapshot) 6546 ret = -EBUSY; 6547 arch_spin_unlock(&tr->max_lock); 6548 local_irq_enable(); 6549 if (ret) 6550 goto out; 6551 } 6552 #endif 6553 /* Some tracers won't work on kernel command line */ 6554 if (system_state < SYSTEM_RUNNING && t->noboot) { 6555 pr_warn("Tracer '%s' is not allowed on command line, ignored\n", 6556 t->name); 6557 goto out; 6558 } 6559 6560 /* Some tracers are only allowed for the top level buffer */ 6561 if (!trace_ok_for_array(t, tr)) { 6562 ret = -EINVAL; 6563 goto out; 6564 } 6565 6566 /* If trace pipe files are being read, we can't change the tracer */ 6567 if (tr->trace_ref) { 6568 ret = -EBUSY; 6569 goto out; 6570 } 6571 6572 trace_branch_disable(); 6573 6574 tr->current_trace->enabled--; 6575 6576 if (tr->current_trace->reset) 6577 tr->current_trace->reset(tr); 6578 6579 #ifdef CONFIG_TRACER_MAX_TRACE 6580 had_max_tr = tr->current_trace->use_max_tr; 6581 6582 /* Current trace needs to be nop_trace before synchronize_rcu */ 6583 tr->current_trace = &nop_trace; 6584 6585 if (had_max_tr && !t->use_max_tr) { 6586 /* 6587 * We need to make sure that the update_max_tr sees that 6588 * current_trace changed to nop_trace to keep it from 6589 * swapping the buffers after we resize it. 6590 * The update_max_tr is called from interrupts disabled 6591 * so a synchronized_sched() is sufficient. 6592 */ 6593 synchronize_rcu(); 6594 free_snapshot(tr); 6595 } 6596 6597 if (t->use_max_tr && !tr->allocated_snapshot) { 6598 ret = tracing_alloc_snapshot_instance(tr); 6599 if (ret < 0) 6600 goto out; 6601 } 6602 #else 6603 tr->current_trace = &nop_trace; 6604 #endif 6605 6606 if (t->init) { 6607 ret = tracer_init(t, tr); 6608 if (ret) 6609 goto out; 6610 } 6611 6612 tr->current_trace = t; 6613 tr->current_trace->enabled++; 6614 trace_branch_enable(tr); 6615 out: 6616 mutex_unlock(&trace_types_lock); 6617 6618 return ret; 6619 } 6620 6621 static ssize_t 6622 tracing_set_trace_write(struct file *filp, const char __user *ubuf, 6623 size_t cnt, loff_t *ppos) 6624 { 6625 struct trace_array *tr = filp->private_data; 6626 char buf[MAX_TRACER_SIZE+1]; 6627 char *name; 6628 size_t ret; 6629 int err; 6630 6631 ret = cnt; 6632 6633 if (cnt > MAX_TRACER_SIZE) 6634 cnt = MAX_TRACER_SIZE; 6635 6636 if (copy_from_user(buf, ubuf, cnt)) 6637 return -EFAULT; 6638 6639 buf[cnt] = 0; 6640 6641 name = strim(buf); 6642 6643 err = tracing_set_tracer(tr, name); 6644 if (err) 6645 return err; 6646 6647 *ppos += ret; 6648 6649 return ret; 6650 } 6651 6652 static ssize_t 6653 tracing_nsecs_read(unsigned long *ptr, char __user *ubuf, 6654 size_t cnt, loff_t *ppos) 6655 { 6656 char buf[64]; 6657 int r; 6658 6659 r = snprintf(buf, sizeof(buf), "%ld\n", 6660 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr)); 6661 if (r > sizeof(buf)) 6662 r = sizeof(buf); 6663 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 6664 } 6665 6666 static ssize_t 6667 tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf, 6668 size_t cnt, loff_t *ppos) 6669 { 6670 unsigned long val; 6671 int ret; 6672 6673 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 6674 if (ret) 6675 return ret; 6676 6677 *ptr = val * 1000; 6678 6679 return cnt; 6680 } 6681 6682 static ssize_t 6683 tracing_thresh_read(struct file *filp, char __user *ubuf, 6684 size_t cnt, loff_t *ppos) 6685 { 6686 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos); 6687 } 6688 6689 static ssize_t 6690 tracing_thresh_write(struct file *filp, const char __user *ubuf, 6691 size_t cnt, loff_t *ppos) 6692 { 6693 struct trace_array *tr = filp->private_data; 6694 int ret; 6695 6696 mutex_lock(&trace_types_lock); 6697 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos); 6698 if (ret < 0) 6699 goto out; 6700 6701 if (tr->current_trace->update_thresh) { 6702 ret = tr->current_trace->update_thresh(tr); 6703 if (ret < 0) 6704 goto out; 6705 } 6706 6707 ret = cnt; 6708 out: 6709 mutex_unlock(&trace_types_lock); 6710 6711 return ret; 6712 } 6713 6714 #ifdef CONFIG_TRACER_MAX_TRACE 6715 6716 static ssize_t 6717 tracing_max_lat_read(struct file *filp, char __user *ubuf, 6718 size_t cnt, loff_t *ppos) 6719 { 6720 struct trace_array *tr = filp->private_data; 6721 6722 return tracing_nsecs_read(&tr->max_latency, ubuf, cnt, ppos); 6723 } 6724 6725 static ssize_t 6726 tracing_max_lat_write(struct file *filp, const char __user *ubuf, 6727 size_t cnt, loff_t *ppos) 6728 { 6729 struct trace_array *tr = filp->private_data; 6730 6731 return tracing_nsecs_write(&tr->max_latency, ubuf, cnt, ppos); 6732 } 6733 6734 #endif 6735 6736 static int open_pipe_on_cpu(struct trace_array *tr, int cpu) 6737 { 6738 if (cpu == RING_BUFFER_ALL_CPUS) { 6739 if (cpumask_empty(tr->pipe_cpumask)) { 6740 cpumask_setall(tr->pipe_cpumask); 6741 return 0; 6742 } 6743 } else if (!cpumask_test_cpu(cpu, tr->pipe_cpumask)) { 6744 cpumask_set_cpu(cpu, tr->pipe_cpumask); 6745 return 0; 6746 } 6747 return -EBUSY; 6748 } 6749 6750 static void close_pipe_on_cpu(struct trace_array *tr, int cpu) 6751 { 6752 if (cpu == RING_BUFFER_ALL_CPUS) { 6753 WARN_ON(!cpumask_full(tr->pipe_cpumask)); 6754 cpumask_clear(tr->pipe_cpumask); 6755 } else { 6756 WARN_ON(!cpumask_test_cpu(cpu, tr->pipe_cpumask)); 6757 cpumask_clear_cpu(cpu, tr->pipe_cpumask); 6758 } 6759 } 6760 6761 static int tracing_open_pipe(struct inode *inode, struct file *filp) 6762 { 6763 struct trace_array *tr = inode->i_private; 6764 struct trace_iterator *iter; 6765 int cpu; 6766 int ret; 6767 6768 ret = tracing_check_open_get_tr(tr); 6769 if (ret) 6770 return ret; 6771 6772 mutex_lock(&trace_types_lock); 6773 cpu = tracing_get_cpu(inode); 6774 ret = open_pipe_on_cpu(tr, cpu); 6775 if (ret) 6776 goto fail_pipe_on_cpu; 6777 6778 /* create a buffer to store the information to pass to userspace */ 6779 iter = kzalloc(sizeof(*iter), GFP_KERNEL); 6780 if (!iter) { 6781 ret = -ENOMEM; 6782 goto fail_alloc_iter; 6783 } 6784 6785 trace_seq_init(&iter->seq); 6786 iter->trace = tr->current_trace; 6787 6788 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) { 6789 ret = -ENOMEM; 6790 goto fail; 6791 } 6792 6793 /* trace pipe does not show start of buffer */ 6794 cpumask_setall(iter->started); 6795 6796 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) 6797 iter->iter_flags |= TRACE_FILE_LAT_FMT; 6798 6799 /* Output in nanoseconds only if we are using a clock in nanoseconds. */ 6800 if (trace_clocks[tr->clock_id].in_ns) 6801 iter->iter_flags |= TRACE_FILE_TIME_IN_NS; 6802 6803 iter->tr = tr; 6804 iter->array_buffer = &tr->array_buffer; 6805 iter->cpu_file = cpu; 6806 mutex_init(&iter->mutex); 6807 filp->private_data = iter; 6808 6809 if (iter->trace->pipe_open) 6810 iter->trace->pipe_open(iter); 6811 6812 nonseekable_open(inode, filp); 6813 6814 tr->trace_ref++; 6815 6816 mutex_unlock(&trace_types_lock); 6817 return ret; 6818 6819 fail: 6820 kfree(iter); 6821 fail_alloc_iter: 6822 close_pipe_on_cpu(tr, cpu); 6823 fail_pipe_on_cpu: 6824 __trace_array_put(tr); 6825 mutex_unlock(&trace_types_lock); 6826 return ret; 6827 } 6828 6829 static int tracing_release_pipe(struct inode *inode, struct file *file) 6830 { 6831 struct trace_iterator *iter = file->private_data; 6832 struct trace_array *tr = inode->i_private; 6833 6834 mutex_lock(&trace_types_lock); 6835 6836 tr->trace_ref--; 6837 6838 if (iter->trace->pipe_close) 6839 iter->trace->pipe_close(iter); 6840 close_pipe_on_cpu(tr, iter->cpu_file); 6841 mutex_unlock(&trace_types_lock); 6842 6843 free_trace_iter_content(iter); 6844 kfree(iter); 6845 6846 trace_array_put(tr); 6847 6848 return 0; 6849 } 6850 6851 static __poll_t 6852 trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table) 6853 { 6854 struct trace_array *tr = iter->tr; 6855 6856 /* Iterators are static, they should be filled or empty */ 6857 if (trace_buffer_iter(iter, iter->cpu_file)) 6858 return EPOLLIN | EPOLLRDNORM; 6859 6860 if (tr->trace_flags & TRACE_ITER_BLOCK) 6861 /* 6862 * Always select as readable when in blocking mode 6863 */ 6864 return EPOLLIN | EPOLLRDNORM; 6865 else 6866 return ring_buffer_poll_wait(iter->array_buffer->buffer, iter->cpu_file, 6867 filp, poll_table, iter->tr->buffer_percent); 6868 } 6869 6870 static __poll_t 6871 tracing_poll_pipe(struct file *filp, poll_table *poll_table) 6872 { 6873 struct trace_iterator *iter = filp->private_data; 6874 6875 return trace_poll(iter, filp, poll_table); 6876 } 6877 6878 /* Must be called with iter->mutex held. */ 6879 static int tracing_wait_pipe(struct file *filp) 6880 { 6881 struct trace_iterator *iter = filp->private_data; 6882 int ret; 6883 6884 while (trace_empty(iter)) { 6885 6886 if ((filp->f_flags & O_NONBLOCK)) { 6887 return -EAGAIN; 6888 } 6889 6890 /* 6891 * We block until we read something and tracing is disabled. 6892 * We still block if tracing is disabled, but we have never 6893 * read anything. This allows a user to cat this file, and 6894 * then enable tracing. But after we have read something, 6895 * we give an EOF when tracing is again disabled. 6896 * 6897 * iter->pos will be 0 if we haven't read anything. 6898 */ 6899 if (!tracer_tracing_is_on(iter->tr) && iter->pos) 6900 break; 6901 6902 mutex_unlock(&iter->mutex); 6903 6904 ret = wait_on_pipe(iter, 0); 6905 6906 mutex_lock(&iter->mutex); 6907 6908 if (ret) 6909 return ret; 6910 } 6911 6912 return 1; 6913 } 6914 6915 /* 6916 * Consumer reader. 6917 */ 6918 static ssize_t 6919 tracing_read_pipe(struct file *filp, char __user *ubuf, 6920 size_t cnt, loff_t *ppos) 6921 { 6922 struct trace_iterator *iter = filp->private_data; 6923 ssize_t sret; 6924 6925 /* 6926 * Avoid more than one consumer on a single file descriptor 6927 * This is just a matter of traces coherency, the ring buffer itself 6928 * is protected. 6929 */ 6930 mutex_lock(&iter->mutex); 6931 6932 /* return any leftover data */ 6933 sret = trace_seq_to_user(&iter->seq, ubuf, cnt); 6934 if (sret != -EBUSY) 6935 goto out; 6936 6937 trace_seq_init(&iter->seq); 6938 6939 if (iter->trace->read) { 6940 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos); 6941 if (sret) 6942 goto out; 6943 } 6944 6945 waitagain: 6946 sret = tracing_wait_pipe(filp); 6947 if (sret <= 0) 6948 goto out; 6949 6950 /* stop when tracing is finished */ 6951 if (trace_empty(iter)) { 6952 sret = 0; 6953 goto out; 6954 } 6955 6956 if (cnt >= PAGE_SIZE) 6957 cnt = PAGE_SIZE - 1; 6958 6959 /* reset all but tr, trace, and overruns */ 6960 trace_iterator_reset(iter); 6961 cpumask_clear(iter->started); 6962 trace_seq_init(&iter->seq); 6963 6964 trace_event_read_lock(); 6965 trace_access_lock(iter->cpu_file); 6966 while (trace_find_next_entry_inc(iter) != NULL) { 6967 enum print_line_t ret; 6968 int save_len = iter->seq.seq.len; 6969 6970 ret = print_trace_line(iter); 6971 if (ret == TRACE_TYPE_PARTIAL_LINE) { 6972 /* 6973 * If one print_trace_line() fills entire trace_seq in one shot, 6974 * trace_seq_to_user() will returns -EBUSY because save_len == 0, 6975 * In this case, we need to consume it, otherwise, loop will peek 6976 * this event next time, resulting in an infinite loop. 6977 */ 6978 if (save_len == 0) { 6979 iter->seq.full = 0; 6980 trace_seq_puts(&iter->seq, "[LINE TOO BIG]\n"); 6981 trace_consume(iter); 6982 break; 6983 } 6984 6985 /* In other cases, don't print partial lines */ 6986 iter->seq.seq.len = save_len; 6987 break; 6988 } 6989 if (ret != TRACE_TYPE_NO_CONSUME) 6990 trace_consume(iter); 6991 6992 if (trace_seq_used(&iter->seq) >= cnt) 6993 break; 6994 6995 /* 6996 * Setting the full flag means we reached the trace_seq buffer 6997 * size and we should leave by partial output condition above. 6998 * One of the trace_seq_* functions is not used properly. 6999 */ 7000 WARN_ONCE(iter->seq.full, "full flag set for trace type %d", 7001 iter->ent->type); 7002 } 7003 trace_access_unlock(iter->cpu_file); 7004 trace_event_read_unlock(); 7005 7006 /* Now copy what we have to the user */ 7007 sret = trace_seq_to_user(&iter->seq, ubuf, cnt); 7008 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq)) 7009 trace_seq_init(&iter->seq); 7010 7011 /* 7012 * If there was nothing to send to user, in spite of consuming trace 7013 * entries, go back to wait for more entries. 7014 */ 7015 if (sret == -EBUSY) 7016 goto waitagain; 7017 7018 out: 7019 mutex_unlock(&iter->mutex); 7020 7021 return sret; 7022 } 7023 7024 static void tracing_spd_release_pipe(struct splice_pipe_desc *spd, 7025 unsigned int idx) 7026 { 7027 __free_page(spd->pages[idx]); 7028 } 7029 7030 static size_t 7031 tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter) 7032 { 7033 size_t count; 7034 int save_len; 7035 int ret; 7036 7037 /* Seq buffer is page-sized, exactly what we need. */ 7038 for (;;) { 7039 save_len = iter->seq.seq.len; 7040 ret = print_trace_line(iter); 7041 7042 if (trace_seq_has_overflowed(&iter->seq)) { 7043 iter->seq.seq.len = save_len; 7044 break; 7045 } 7046 7047 /* 7048 * This should not be hit, because it should only 7049 * be set if the iter->seq overflowed. But check it 7050 * anyway to be safe. 7051 */ 7052 if (ret == TRACE_TYPE_PARTIAL_LINE) { 7053 iter->seq.seq.len = save_len; 7054 break; 7055 } 7056 7057 count = trace_seq_used(&iter->seq) - save_len; 7058 if (rem < count) { 7059 rem = 0; 7060 iter->seq.seq.len = save_len; 7061 break; 7062 } 7063 7064 if (ret != TRACE_TYPE_NO_CONSUME) 7065 trace_consume(iter); 7066 rem -= count; 7067 if (!trace_find_next_entry_inc(iter)) { 7068 rem = 0; 7069 iter->ent = NULL; 7070 break; 7071 } 7072 } 7073 7074 return rem; 7075 } 7076 7077 static ssize_t tracing_splice_read_pipe(struct file *filp, 7078 loff_t *ppos, 7079 struct pipe_inode_info *pipe, 7080 size_t len, 7081 unsigned int flags) 7082 { 7083 struct page *pages_def[PIPE_DEF_BUFFERS]; 7084 struct partial_page partial_def[PIPE_DEF_BUFFERS]; 7085 struct trace_iterator *iter = filp->private_data; 7086 struct splice_pipe_desc spd = { 7087 .pages = pages_def, 7088 .partial = partial_def, 7089 .nr_pages = 0, /* This gets updated below. */ 7090 .nr_pages_max = PIPE_DEF_BUFFERS, 7091 .ops = &default_pipe_buf_ops, 7092 .spd_release = tracing_spd_release_pipe, 7093 }; 7094 ssize_t ret; 7095 size_t rem; 7096 unsigned int i; 7097 7098 if (splice_grow_spd(pipe, &spd)) 7099 return -ENOMEM; 7100 7101 mutex_lock(&iter->mutex); 7102 7103 if (iter->trace->splice_read) { 7104 ret = iter->trace->splice_read(iter, filp, 7105 ppos, pipe, len, flags); 7106 if (ret) 7107 goto out_err; 7108 } 7109 7110 ret = tracing_wait_pipe(filp); 7111 if (ret <= 0) 7112 goto out_err; 7113 7114 if (!iter->ent && !trace_find_next_entry_inc(iter)) { 7115 ret = -EFAULT; 7116 goto out_err; 7117 } 7118 7119 trace_event_read_lock(); 7120 trace_access_lock(iter->cpu_file); 7121 7122 /* Fill as many pages as possible. */ 7123 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) { 7124 spd.pages[i] = alloc_page(GFP_KERNEL); 7125 if (!spd.pages[i]) 7126 break; 7127 7128 rem = tracing_fill_pipe_page(rem, iter); 7129 7130 /* Copy the data into the page, so we can start over. */ 7131 ret = trace_seq_to_buffer(&iter->seq, 7132 page_address(spd.pages[i]), 7133 trace_seq_used(&iter->seq)); 7134 if (ret < 0) { 7135 __free_page(spd.pages[i]); 7136 break; 7137 } 7138 spd.partial[i].offset = 0; 7139 spd.partial[i].len = trace_seq_used(&iter->seq); 7140 7141 trace_seq_init(&iter->seq); 7142 } 7143 7144 trace_access_unlock(iter->cpu_file); 7145 trace_event_read_unlock(); 7146 mutex_unlock(&iter->mutex); 7147 7148 spd.nr_pages = i; 7149 7150 if (i) 7151 ret = splice_to_pipe(pipe, &spd); 7152 else 7153 ret = 0; 7154 out: 7155 splice_shrink_spd(&spd); 7156 return ret; 7157 7158 out_err: 7159 mutex_unlock(&iter->mutex); 7160 goto out; 7161 } 7162 7163 static ssize_t 7164 tracing_entries_read(struct file *filp, char __user *ubuf, 7165 size_t cnt, loff_t *ppos) 7166 { 7167 struct inode *inode = file_inode(filp); 7168 struct trace_array *tr = inode->i_private; 7169 int cpu = tracing_get_cpu(inode); 7170 char buf[64]; 7171 int r = 0; 7172 ssize_t ret; 7173 7174 mutex_lock(&trace_types_lock); 7175 7176 if (cpu == RING_BUFFER_ALL_CPUS) { 7177 int cpu, buf_size_same; 7178 unsigned long size; 7179 7180 size = 0; 7181 buf_size_same = 1; 7182 /* check if all cpu sizes are same */ 7183 for_each_tracing_cpu(cpu) { 7184 /* fill in the size from first enabled cpu */ 7185 if (size == 0) 7186 size = per_cpu_ptr(tr->array_buffer.data, cpu)->entries; 7187 if (size != per_cpu_ptr(tr->array_buffer.data, cpu)->entries) { 7188 buf_size_same = 0; 7189 break; 7190 } 7191 } 7192 7193 if (buf_size_same) { 7194 if (!ring_buffer_expanded) 7195 r = sprintf(buf, "%lu (expanded: %lu)\n", 7196 size >> 10, 7197 trace_buf_size >> 10); 7198 else 7199 r = sprintf(buf, "%lu\n", size >> 10); 7200 } else 7201 r = sprintf(buf, "X\n"); 7202 } else 7203 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10); 7204 7205 mutex_unlock(&trace_types_lock); 7206 7207 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 7208 return ret; 7209 } 7210 7211 static ssize_t 7212 tracing_entries_write(struct file *filp, const char __user *ubuf, 7213 size_t cnt, loff_t *ppos) 7214 { 7215 struct inode *inode = file_inode(filp); 7216 struct trace_array *tr = inode->i_private; 7217 unsigned long val; 7218 int ret; 7219 7220 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 7221 if (ret) 7222 return ret; 7223 7224 /* must have at least 1 entry */ 7225 if (!val) 7226 return -EINVAL; 7227 7228 /* value is in KB */ 7229 val <<= 10; 7230 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode)); 7231 if (ret < 0) 7232 return ret; 7233 7234 *ppos += cnt; 7235 7236 return cnt; 7237 } 7238 7239 static ssize_t 7240 tracing_total_entries_read(struct file *filp, char __user *ubuf, 7241 size_t cnt, loff_t *ppos) 7242 { 7243 struct trace_array *tr = filp->private_data; 7244 char buf[64]; 7245 int r, cpu; 7246 unsigned long size = 0, expanded_size = 0; 7247 7248 mutex_lock(&trace_types_lock); 7249 for_each_tracing_cpu(cpu) { 7250 size += per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10; 7251 if (!ring_buffer_expanded) 7252 expanded_size += trace_buf_size >> 10; 7253 } 7254 if (ring_buffer_expanded) 7255 r = sprintf(buf, "%lu\n", size); 7256 else 7257 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size); 7258 mutex_unlock(&trace_types_lock); 7259 7260 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 7261 } 7262 7263 static ssize_t 7264 tracing_free_buffer_write(struct file *filp, const char __user *ubuf, 7265 size_t cnt, loff_t *ppos) 7266 { 7267 /* 7268 * There is no need to read what the user has written, this function 7269 * is just to make sure that there is no error when "echo" is used 7270 */ 7271 7272 *ppos += cnt; 7273 7274 return cnt; 7275 } 7276 7277 static int 7278 tracing_free_buffer_release(struct inode *inode, struct file *filp) 7279 { 7280 struct trace_array *tr = inode->i_private; 7281 7282 /* disable tracing ? */ 7283 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE) 7284 tracer_tracing_off(tr); 7285 /* resize the ring buffer to 0 */ 7286 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS); 7287 7288 trace_array_put(tr); 7289 7290 return 0; 7291 } 7292 7293 static ssize_t 7294 tracing_mark_write(struct file *filp, const char __user *ubuf, 7295 size_t cnt, loff_t *fpos) 7296 { 7297 struct trace_array *tr = filp->private_data; 7298 struct ring_buffer_event *event; 7299 enum event_trigger_type tt = ETT_NONE; 7300 struct trace_buffer *buffer; 7301 struct print_entry *entry; 7302 ssize_t written; 7303 int size; 7304 int len; 7305 7306 /* Used in tracing_mark_raw_write() as well */ 7307 #define FAULTED_STR "<faulted>" 7308 #define FAULTED_SIZE (sizeof(FAULTED_STR) - 1) /* '\0' is already accounted for */ 7309 7310 if (tracing_disabled) 7311 return -EINVAL; 7312 7313 if (!(tr->trace_flags & TRACE_ITER_MARKERS)) 7314 return -EINVAL; 7315 7316 if (cnt > TRACE_BUF_SIZE) 7317 cnt = TRACE_BUF_SIZE; 7318 7319 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE); 7320 7321 size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */ 7322 7323 /* If less than "<faulted>", then make sure we can still add that */ 7324 if (cnt < FAULTED_SIZE) 7325 size += FAULTED_SIZE - cnt; 7326 7327 buffer = tr->array_buffer.buffer; 7328 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size, 7329 tracing_gen_ctx()); 7330 if (unlikely(!event)) 7331 /* Ring buffer disabled, return as if not open for write */ 7332 return -EBADF; 7333 7334 entry = ring_buffer_event_data(event); 7335 entry->ip = _THIS_IP_; 7336 7337 len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt); 7338 if (len) { 7339 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE); 7340 cnt = FAULTED_SIZE; 7341 written = -EFAULT; 7342 } else 7343 written = cnt; 7344 7345 if (tr->trace_marker_file && !list_empty(&tr->trace_marker_file->triggers)) { 7346 /* do not add \n before testing triggers, but add \0 */ 7347 entry->buf[cnt] = '\0'; 7348 tt = event_triggers_call(tr->trace_marker_file, buffer, entry, event); 7349 } 7350 7351 if (entry->buf[cnt - 1] != '\n') { 7352 entry->buf[cnt] = '\n'; 7353 entry->buf[cnt + 1] = '\0'; 7354 } else 7355 entry->buf[cnt] = '\0'; 7356 7357 if (static_branch_unlikely(&trace_marker_exports_enabled)) 7358 ftrace_exports(event, TRACE_EXPORT_MARKER); 7359 __buffer_unlock_commit(buffer, event); 7360 7361 if (tt) 7362 event_triggers_post_call(tr->trace_marker_file, tt); 7363 7364 return written; 7365 } 7366 7367 /* Limit it for now to 3K (including tag) */ 7368 #define RAW_DATA_MAX_SIZE (1024*3) 7369 7370 static ssize_t 7371 tracing_mark_raw_write(struct file *filp, const char __user *ubuf, 7372 size_t cnt, loff_t *fpos) 7373 { 7374 struct trace_array *tr = filp->private_data; 7375 struct ring_buffer_event *event; 7376 struct trace_buffer *buffer; 7377 struct raw_data_entry *entry; 7378 ssize_t written; 7379 int size; 7380 int len; 7381 7382 #define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int)) 7383 7384 if (tracing_disabled) 7385 return -EINVAL; 7386 7387 if (!(tr->trace_flags & TRACE_ITER_MARKERS)) 7388 return -EINVAL; 7389 7390 /* The marker must at least have a tag id */ 7391 if (cnt < sizeof(unsigned int) || cnt > RAW_DATA_MAX_SIZE) 7392 return -EINVAL; 7393 7394 if (cnt > TRACE_BUF_SIZE) 7395 cnt = TRACE_BUF_SIZE; 7396 7397 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE); 7398 7399 size = sizeof(*entry) + cnt; 7400 if (cnt < FAULT_SIZE_ID) 7401 size += FAULT_SIZE_ID - cnt; 7402 7403 buffer = tr->array_buffer.buffer; 7404 event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size, 7405 tracing_gen_ctx()); 7406 if (!event) 7407 /* Ring buffer disabled, return as if not open for write */ 7408 return -EBADF; 7409 7410 entry = ring_buffer_event_data(event); 7411 7412 len = __copy_from_user_inatomic(&entry->id, ubuf, cnt); 7413 if (len) { 7414 entry->id = -1; 7415 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE); 7416 written = -EFAULT; 7417 } else 7418 written = cnt; 7419 7420 __buffer_unlock_commit(buffer, event); 7421 7422 return written; 7423 } 7424 7425 static int tracing_clock_show(struct seq_file *m, void *v) 7426 { 7427 struct trace_array *tr = m->private; 7428 int i; 7429 7430 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) 7431 seq_printf(m, 7432 "%s%s%s%s", i ? " " : "", 7433 i == tr->clock_id ? "[" : "", trace_clocks[i].name, 7434 i == tr->clock_id ? "]" : ""); 7435 seq_putc(m, '\n'); 7436 7437 return 0; 7438 } 7439 7440 int tracing_set_clock(struct trace_array *tr, const char *clockstr) 7441 { 7442 int i; 7443 7444 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) { 7445 if (strcmp(trace_clocks[i].name, clockstr) == 0) 7446 break; 7447 } 7448 if (i == ARRAY_SIZE(trace_clocks)) 7449 return -EINVAL; 7450 7451 mutex_lock(&trace_types_lock); 7452 7453 tr->clock_id = i; 7454 7455 ring_buffer_set_clock(tr->array_buffer.buffer, trace_clocks[i].func); 7456 7457 /* 7458 * New clock may not be consistent with the previous clock. 7459 * Reset the buffer so that it doesn't have incomparable timestamps. 7460 */ 7461 tracing_reset_online_cpus(&tr->array_buffer); 7462 7463 #ifdef CONFIG_TRACER_MAX_TRACE 7464 if (tr->max_buffer.buffer) 7465 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func); 7466 tracing_reset_online_cpus(&tr->max_buffer); 7467 #endif 7468 7469 mutex_unlock(&trace_types_lock); 7470 7471 return 0; 7472 } 7473 7474 static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf, 7475 size_t cnt, loff_t *fpos) 7476 { 7477 struct seq_file *m = filp->private_data; 7478 struct trace_array *tr = m->private; 7479 char buf[64]; 7480 const char *clockstr; 7481 int ret; 7482 7483 if (cnt >= sizeof(buf)) 7484 return -EINVAL; 7485 7486 if (copy_from_user(buf, ubuf, cnt)) 7487 return -EFAULT; 7488 7489 buf[cnt] = 0; 7490 7491 clockstr = strstrip(buf); 7492 7493 ret = tracing_set_clock(tr, clockstr); 7494 if (ret) 7495 return ret; 7496 7497 *fpos += cnt; 7498 7499 return cnt; 7500 } 7501 7502 static int tracing_clock_open(struct inode *inode, struct file *file) 7503 { 7504 struct trace_array *tr = inode->i_private; 7505 int ret; 7506 7507 ret = tracing_check_open_get_tr(tr); 7508 if (ret) 7509 return ret; 7510 7511 ret = single_open(file, tracing_clock_show, inode->i_private); 7512 if (ret < 0) 7513 trace_array_put(tr); 7514 7515 return ret; 7516 } 7517 7518 static int tracing_time_stamp_mode_show(struct seq_file *m, void *v) 7519 { 7520 struct trace_array *tr = m->private; 7521 7522 mutex_lock(&trace_types_lock); 7523 7524 if (ring_buffer_time_stamp_abs(tr->array_buffer.buffer)) 7525 seq_puts(m, "delta [absolute]\n"); 7526 else 7527 seq_puts(m, "[delta] absolute\n"); 7528 7529 mutex_unlock(&trace_types_lock); 7530 7531 return 0; 7532 } 7533 7534 static int tracing_time_stamp_mode_open(struct inode *inode, struct file *file) 7535 { 7536 struct trace_array *tr = inode->i_private; 7537 int ret; 7538 7539 ret = tracing_check_open_get_tr(tr); 7540 if (ret) 7541 return ret; 7542 7543 ret = single_open(file, tracing_time_stamp_mode_show, inode->i_private); 7544 if (ret < 0) 7545 trace_array_put(tr); 7546 7547 return ret; 7548 } 7549 7550 u64 tracing_event_time_stamp(struct trace_buffer *buffer, struct ring_buffer_event *rbe) 7551 { 7552 if (rbe == this_cpu_read(trace_buffered_event)) 7553 return ring_buffer_time_stamp(buffer); 7554 7555 return ring_buffer_event_time_stamp(buffer, rbe); 7556 } 7557 7558 /* 7559 * Set or disable using the per CPU trace_buffer_event when possible. 7560 */ 7561 int tracing_set_filter_buffering(struct trace_array *tr, bool set) 7562 { 7563 int ret = 0; 7564 7565 mutex_lock(&trace_types_lock); 7566 7567 if (set && tr->no_filter_buffering_ref++) 7568 goto out; 7569 7570 if (!set) { 7571 if (WARN_ON_ONCE(!tr->no_filter_buffering_ref)) { 7572 ret = -EINVAL; 7573 goto out; 7574 } 7575 7576 --tr->no_filter_buffering_ref; 7577 } 7578 out: 7579 mutex_unlock(&trace_types_lock); 7580 7581 return ret; 7582 } 7583 7584 struct ftrace_buffer_info { 7585 struct trace_iterator iter; 7586 void *spare; 7587 unsigned int spare_cpu; 7588 unsigned int read; 7589 }; 7590 7591 #ifdef CONFIG_TRACER_SNAPSHOT 7592 static int tracing_snapshot_open(struct inode *inode, struct file *file) 7593 { 7594 struct trace_array *tr = inode->i_private; 7595 struct trace_iterator *iter; 7596 struct seq_file *m; 7597 int ret; 7598 7599 ret = tracing_check_open_get_tr(tr); 7600 if (ret) 7601 return ret; 7602 7603 if (file->f_mode & FMODE_READ) { 7604 iter = __tracing_open(inode, file, true); 7605 if (IS_ERR(iter)) 7606 ret = PTR_ERR(iter); 7607 } else { 7608 /* Writes still need the seq_file to hold the private data */ 7609 ret = -ENOMEM; 7610 m = kzalloc(sizeof(*m), GFP_KERNEL); 7611 if (!m) 7612 goto out; 7613 iter = kzalloc(sizeof(*iter), GFP_KERNEL); 7614 if (!iter) { 7615 kfree(m); 7616 goto out; 7617 } 7618 ret = 0; 7619 7620 iter->tr = tr; 7621 iter->array_buffer = &tr->max_buffer; 7622 iter->cpu_file = tracing_get_cpu(inode); 7623 m->private = iter; 7624 file->private_data = m; 7625 } 7626 out: 7627 if (ret < 0) 7628 trace_array_put(tr); 7629 7630 return ret; 7631 } 7632 7633 static void tracing_swap_cpu_buffer(void *tr) 7634 { 7635 update_max_tr_single((struct trace_array *)tr, current, smp_processor_id()); 7636 } 7637 7638 static ssize_t 7639 tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt, 7640 loff_t *ppos) 7641 { 7642 struct seq_file *m = filp->private_data; 7643 struct trace_iterator *iter = m->private; 7644 struct trace_array *tr = iter->tr; 7645 unsigned long val; 7646 int ret; 7647 7648 ret = tracing_update_buffers(); 7649 if (ret < 0) 7650 return ret; 7651 7652 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 7653 if (ret) 7654 return ret; 7655 7656 mutex_lock(&trace_types_lock); 7657 7658 if (tr->current_trace->use_max_tr) { 7659 ret = -EBUSY; 7660 goto out; 7661 } 7662 7663 local_irq_disable(); 7664 arch_spin_lock(&tr->max_lock); 7665 if (tr->cond_snapshot) 7666 ret = -EBUSY; 7667 arch_spin_unlock(&tr->max_lock); 7668 local_irq_enable(); 7669 if (ret) 7670 goto out; 7671 7672 switch (val) { 7673 case 0: 7674 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) { 7675 ret = -EINVAL; 7676 break; 7677 } 7678 if (tr->allocated_snapshot) 7679 free_snapshot(tr); 7680 break; 7681 case 1: 7682 /* Only allow per-cpu swap if the ring buffer supports it */ 7683 #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP 7684 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) { 7685 ret = -EINVAL; 7686 break; 7687 } 7688 #endif 7689 if (tr->allocated_snapshot) 7690 ret = resize_buffer_duplicate_size(&tr->max_buffer, 7691 &tr->array_buffer, iter->cpu_file); 7692 else 7693 ret = tracing_alloc_snapshot_instance(tr); 7694 if (ret < 0) 7695 break; 7696 /* Now, we're going to swap */ 7697 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) { 7698 local_irq_disable(); 7699 update_max_tr(tr, current, smp_processor_id(), NULL); 7700 local_irq_enable(); 7701 } else { 7702 smp_call_function_single(iter->cpu_file, tracing_swap_cpu_buffer, 7703 (void *)tr, 1); 7704 } 7705 break; 7706 default: 7707 if (tr->allocated_snapshot) { 7708 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) 7709 tracing_reset_online_cpus(&tr->max_buffer); 7710 else 7711 tracing_reset_cpu(&tr->max_buffer, iter->cpu_file); 7712 } 7713 break; 7714 } 7715 7716 if (ret >= 0) { 7717 *ppos += cnt; 7718 ret = cnt; 7719 } 7720 out: 7721 mutex_unlock(&trace_types_lock); 7722 return ret; 7723 } 7724 7725 static int tracing_snapshot_release(struct inode *inode, struct file *file) 7726 { 7727 struct seq_file *m = file->private_data; 7728 int ret; 7729 7730 ret = tracing_release(inode, file); 7731 7732 if (file->f_mode & FMODE_READ) 7733 return ret; 7734 7735 /* If write only, the seq_file is just a stub */ 7736 if (m) 7737 kfree(m->private); 7738 kfree(m); 7739 7740 return 0; 7741 } 7742 7743 static int tracing_buffers_open(struct inode *inode, struct file *filp); 7744 static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf, 7745 size_t count, loff_t *ppos); 7746 static int tracing_buffers_release(struct inode *inode, struct file *file); 7747 static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos, 7748 struct pipe_inode_info *pipe, size_t len, unsigned int flags); 7749 7750 static int snapshot_raw_open(struct inode *inode, struct file *filp) 7751 { 7752 struct ftrace_buffer_info *info; 7753 int ret; 7754 7755 /* The following checks for tracefs lockdown */ 7756 ret = tracing_buffers_open(inode, filp); 7757 if (ret < 0) 7758 return ret; 7759 7760 info = filp->private_data; 7761 7762 if (info->iter.trace->use_max_tr) { 7763 tracing_buffers_release(inode, filp); 7764 return -EBUSY; 7765 } 7766 7767 info->iter.snapshot = true; 7768 info->iter.array_buffer = &info->iter.tr->max_buffer; 7769 7770 return ret; 7771 } 7772 7773 #endif /* CONFIG_TRACER_SNAPSHOT */ 7774 7775 7776 static const struct file_operations tracing_thresh_fops = { 7777 .open = tracing_open_generic, 7778 .read = tracing_thresh_read, 7779 .write = tracing_thresh_write, 7780 .llseek = generic_file_llseek, 7781 }; 7782 7783 #ifdef CONFIG_TRACER_MAX_TRACE 7784 static const struct file_operations tracing_max_lat_fops = { 7785 .open = tracing_open_generic_tr, 7786 .read = tracing_max_lat_read, 7787 .write = tracing_max_lat_write, 7788 .llseek = generic_file_llseek, 7789 .release = tracing_release_generic_tr, 7790 }; 7791 #endif 7792 7793 static const struct file_operations set_tracer_fops = { 7794 .open = tracing_open_generic_tr, 7795 .read = tracing_set_trace_read, 7796 .write = tracing_set_trace_write, 7797 .llseek = generic_file_llseek, 7798 .release = tracing_release_generic_tr, 7799 }; 7800 7801 static const struct file_operations tracing_pipe_fops = { 7802 .open = tracing_open_pipe, 7803 .poll = tracing_poll_pipe, 7804 .read = tracing_read_pipe, 7805 .splice_read = tracing_splice_read_pipe, 7806 .release = tracing_release_pipe, 7807 .llseek = no_llseek, 7808 }; 7809 7810 static const struct file_operations tracing_entries_fops = { 7811 .open = tracing_open_generic_tr, 7812 .read = tracing_entries_read, 7813 .write = tracing_entries_write, 7814 .llseek = generic_file_llseek, 7815 .release = tracing_release_generic_tr, 7816 }; 7817 7818 static const struct file_operations tracing_total_entries_fops = { 7819 .open = tracing_open_generic_tr, 7820 .read = tracing_total_entries_read, 7821 .llseek = generic_file_llseek, 7822 .release = tracing_release_generic_tr, 7823 }; 7824 7825 static const struct file_operations tracing_free_buffer_fops = { 7826 .open = tracing_open_generic_tr, 7827 .write = tracing_free_buffer_write, 7828 .release = tracing_free_buffer_release, 7829 }; 7830 7831 static const struct file_operations tracing_mark_fops = { 7832 .open = tracing_mark_open, 7833 .write = tracing_mark_write, 7834 .release = tracing_release_generic_tr, 7835 }; 7836 7837 static const struct file_operations tracing_mark_raw_fops = { 7838 .open = tracing_mark_open, 7839 .write = tracing_mark_raw_write, 7840 .release = tracing_release_generic_tr, 7841 }; 7842 7843 static const struct file_operations trace_clock_fops = { 7844 .open = tracing_clock_open, 7845 .read = seq_read, 7846 .llseek = seq_lseek, 7847 .release = tracing_single_release_tr, 7848 .write = tracing_clock_write, 7849 }; 7850 7851 static const struct file_operations trace_time_stamp_mode_fops = { 7852 .open = tracing_time_stamp_mode_open, 7853 .read = seq_read, 7854 .llseek = seq_lseek, 7855 .release = tracing_single_release_tr, 7856 }; 7857 7858 #ifdef CONFIG_TRACER_SNAPSHOT 7859 static const struct file_operations snapshot_fops = { 7860 .open = tracing_snapshot_open, 7861 .read = seq_read, 7862 .write = tracing_snapshot_write, 7863 .llseek = tracing_lseek, 7864 .release = tracing_snapshot_release, 7865 }; 7866 7867 static const struct file_operations snapshot_raw_fops = { 7868 .open = snapshot_raw_open, 7869 .read = tracing_buffers_read, 7870 .release = tracing_buffers_release, 7871 .splice_read = tracing_buffers_splice_read, 7872 .llseek = no_llseek, 7873 }; 7874 7875 #endif /* CONFIG_TRACER_SNAPSHOT */ 7876 7877 /* 7878 * trace_min_max_write - Write a u64 value to a trace_min_max_param struct 7879 * @filp: The active open file structure 7880 * @ubuf: The userspace provided buffer to read value into 7881 * @cnt: The maximum number of bytes to read 7882 * @ppos: The current "file" position 7883 * 7884 * This function implements the write interface for a struct trace_min_max_param. 7885 * The filp->private_data must point to a trace_min_max_param structure that 7886 * defines where to write the value, the min and the max acceptable values, 7887 * and a lock to protect the write. 7888 */ 7889 static ssize_t 7890 trace_min_max_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) 7891 { 7892 struct trace_min_max_param *param = filp->private_data; 7893 u64 val; 7894 int err; 7895 7896 if (!param) 7897 return -EFAULT; 7898 7899 err = kstrtoull_from_user(ubuf, cnt, 10, &val); 7900 if (err) 7901 return err; 7902 7903 if (param->lock) 7904 mutex_lock(param->lock); 7905 7906 if (param->min && val < *param->min) 7907 err = -EINVAL; 7908 7909 if (param->max && val > *param->max) 7910 err = -EINVAL; 7911 7912 if (!err) 7913 *param->val = val; 7914 7915 if (param->lock) 7916 mutex_unlock(param->lock); 7917 7918 if (err) 7919 return err; 7920 7921 return cnt; 7922 } 7923 7924 /* 7925 * trace_min_max_read - Read a u64 value from a trace_min_max_param struct 7926 * @filp: The active open file structure 7927 * @ubuf: The userspace provided buffer to read value into 7928 * @cnt: The maximum number of bytes to read 7929 * @ppos: The current "file" position 7930 * 7931 * This function implements the read interface for a struct trace_min_max_param. 7932 * The filp->private_data must point to a trace_min_max_param struct with valid 7933 * data. 7934 */ 7935 static ssize_t 7936 trace_min_max_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) 7937 { 7938 struct trace_min_max_param *param = filp->private_data; 7939 char buf[U64_STR_SIZE]; 7940 int len; 7941 u64 val; 7942 7943 if (!param) 7944 return -EFAULT; 7945 7946 val = *param->val; 7947 7948 if (cnt > sizeof(buf)) 7949 cnt = sizeof(buf); 7950 7951 len = snprintf(buf, sizeof(buf), "%llu\n", val); 7952 7953 return simple_read_from_buffer(ubuf, cnt, ppos, buf, len); 7954 } 7955 7956 const struct file_operations trace_min_max_fops = { 7957 .open = tracing_open_generic, 7958 .read = trace_min_max_read, 7959 .write = trace_min_max_write, 7960 }; 7961 7962 #define TRACING_LOG_ERRS_MAX 8 7963 #define TRACING_LOG_LOC_MAX 128 7964 7965 #define CMD_PREFIX " Command: " 7966 7967 struct err_info { 7968 const char **errs; /* ptr to loc-specific array of err strings */ 7969 u8 type; /* index into errs -> specific err string */ 7970 u16 pos; /* caret position */ 7971 u64 ts; 7972 }; 7973 7974 struct tracing_log_err { 7975 struct list_head list; 7976 struct err_info info; 7977 char loc[TRACING_LOG_LOC_MAX]; /* err location */ 7978 char *cmd; /* what caused err */ 7979 }; 7980 7981 static DEFINE_MUTEX(tracing_err_log_lock); 7982 7983 static struct tracing_log_err *alloc_tracing_log_err(int len) 7984 { 7985 struct tracing_log_err *err; 7986 7987 err = kzalloc(sizeof(*err), GFP_KERNEL); 7988 if (!err) 7989 return ERR_PTR(-ENOMEM); 7990 7991 err->cmd = kzalloc(len, GFP_KERNEL); 7992 if (!err->cmd) { 7993 kfree(err); 7994 return ERR_PTR(-ENOMEM); 7995 } 7996 7997 return err; 7998 } 7999 8000 static void free_tracing_log_err(struct tracing_log_err *err) 8001 { 8002 kfree(err->cmd); 8003 kfree(err); 8004 } 8005 8006 static struct tracing_log_err *get_tracing_log_err(struct trace_array *tr, 8007 int len) 8008 { 8009 struct tracing_log_err *err; 8010 char *cmd; 8011 8012 if (tr->n_err_log_entries < TRACING_LOG_ERRS_MAX) { 8013 err = alloc_tracing_log_err(len); 8014 if (PTR_ERR(err) != -ENOMEM) 8015 tr->n_err_log_entries++; 8016 8017 return err; 8018 } 8019 cmd = kzalloc(len, GFP_KERNEL); 8020 if (!cmd) 8021 return ERR_PTR(-ENOMEM); 8022 err = list_first_entry(&tr->err_log, struct tracing_log_err, list); 8023 kfree(err->cmd); 8024 err->cmd = cmd; 8025 list_del(&err->list); 8026 8027 return err; 8028 } 8029 8030 /** 8031 * err_pos - find the position of a string within a command for error careting 8032 * @cmd: The tracing command that caused the error 8033 * @str: The string to position the caret at within @cmd 8034 * 8035 * Finds the position of the first occurrence of @str within @cmd. The 8036 * return value can be passed to tracing_log_err() for caret placement 8037 * within @cmd. 8038 * 8039 * Returns the index within @cmd of the first occurrence of @str or 0 8040 * if @str was not found. 8041 */ 8042 unsigned int err_pos(char *cmd, const char *str) 8043 { 8044 char *found; 8045 8046 if (WARN_ON(!strlen(cmd))) 8047 return 0; 8048 8049 found = strstr(cmd, str); 8050 if (found) 8051 return found - cmd; 8052 8053 return 0; 8054 } 8055 8056 /** 8057 * tracing_log_err - write an error to the tracing error log 8058 * @tr: The associated trace array for the error (NULL for top level array) 8059 * @loc: A string describing where the error occurred 8060 * @cmd: The tracing command that caused the error 8061 * @errs: The array of loc-specific static error strings 8062 * @type: The index into errs[], which produces the specific static err string 8063 * @pos: The position the caret should be placed in the cmd 8064 * 8065 * Writes an error into tracing/error_log of the form: 8066 * 8067 * <loc>: error: <text> 8068 * Command: <cmd> 8069 * ^ 8070 * 8071 * tracing/error_log is a small log file containing the last 8072 * TRACING_LOG_ERRS_MAX errors (8). Memory for errors isn't allocated 8073 * unless there has been a tracing error, and the error log can be 8074 * cleared and have its memory freed by writing the empty string in 8075 * truncation mode to it i.e. echo > tracing/error_log. 8076 * 8077 * NOTE: the @errs array along with the @type param are used to 8078 * produce a static error string - this string is not copied and saved 8079 * when the error is logged - only a pointer to it is saved. See 8080 * existing callers for examples of how static strings are typically 8081 * defined for use with tracing_log_err(). 8082 */ 8083 void tracing_log_err(struct trace_array *tr, 8084 const char *loc, const char *cmd, 8085 const char **errs, u8 type, u16 pos) 8086 { 8087 struct tracing_log_err *err; 8088 int len = 0; 8089 8090 if (!tr) 8091 tr = &global_trace; 8092 8093 len += sizeof(CMD_PREFIX) + 2 * sizeof("\n") + strlen(cmd) + 1; 8094 8095 mutex_lock(&tracing_err_log_lock); 8096 err = get_tracing_log_err(tr, len); 8097 if (PTR_ERR(err) == -ENOMEM) { 8098 mutex_unlock(&tracing_err_log_lock); 8099 return; 8100 } 8101 8102 snprintf(err->loc, TRACING_LOG_LOC_MAX, "%s: error: ", loc); 8103 snprintf(err->cmd, len, "\n" CMD_PREFIX "%s\n", cmd); 8104 8105 err->info.errs = errs; 8106 err->info.type = type; 8107 err->info.pos = pos; 8108 err->info.ts = local_clock(); 8109 8110 list_add_tail(&err->list, &tr->err_log); 8111 mutex_unlock(&tracing_err_log_lock); 8112 } 8113 8114 static void clear_tracing_err_log(struct trace_array *tr) 8115 { 8116 struct tracing_log_err *err, *next; 8117 8118 mutex_lock(&tracing_err_log_lock); 8119 list_for_each_entry_safe(err, next, &tr->err_log, list) { 8120 list_del(&err->list); 8121 free_tracing_log_err(err); 8122 } 8123 8124 tr->n_err_log_entries = 0; 8125 mutex_unlock(&tracing_err_log_lock); 8126 } 8127 8128 static void *tracing_err_log_seq_start(struct seq_file *m, loff_t *pos) 8129 { 8130 struct trace_array *tr = m->private; 8131 8132 mutex_lock(&tracing_err_log_lock); 8133 8134 return seq_list_start(&tr->err_log, *pos); 8135 } 8136 8137 static void *tracing_err_log_seq_next(struct seq_file *m, void *v, loff_t *pos) 8138 { 8139 struct trace_array *tr = m->private; 8140 8141 return seq_list_next(v, &tr->err_log, pos); 8142 } 8143 8144 static void tracing_err_log_seq_stop(struct seq_file *m, void *v) 8145 { 8146 mutex_unlock(&tracing_err_log_lock); 8147 } 8148 8149 static void tracing_err_log_show_pos(struct seq_file *m, u16 pos) 8150 { 8151 u16 i; 8152 8153 for (i = 0; i < sizeof(CMD_PREFIX) - 1; i++) 8154 seq_putc(m, ' '); 8155 for (i = 0; i < pos; i++) 8156 seq_putc(m, ' '); 8157 seq_puts(m, "^\n"); 8158 } 8159 8160 static int tracing_err_log_seq_show(struct seq_file *m, void *v) 8161 { 8162 struct tracing_log_err *err = v; 8163 8164 if (err) { 8165 const char *err_text = err->info.errs[err->info.type]; 8166 u64 sec = err->info.ts; 8167 u32 nsec; 8168 8169 nsec = do_div(sec, NSEC_PER_SEC); 8170 seq_printf(m, "[%5llu.%06u] %s%s", sec, nsec / 1000, 8171 err->loc, err_text); 8172 seq_printf(m, "%s", err->cmd); 8173 tracing_err_log_show_pos(m, err->info.pos); 8174 } 8175 8176 return 0; 8177 } 8178 8179 static const struct seq_operations tracing_err_log_seq_ops = { 8180 .start = tracing_err_log_seq_start, 8181 .next = tracing_err_log_seq_next, 8182 .stop = tracing_err_log_seq_stop, 8183 .show = tracing_err_log_seq_show 8184 }; 8185 8186 static int tracing_err_log_open(struct inode *inode, struct file *file) 8187 { 8188 struct trace_array *tr = inode->i_private; 8189 int ret = 0; 8190 8191 ret = tracing_check_open_get_tr(tr); 8192 if (ret) 8193 return ret; 8194 8195 /* If this file was opened for write, then erase contents */ 8196 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) 8197 clear_tracing_err_log(tr); 8198 8199 if (file->f_mode & FMODE_READ) { 8200 ret = seq_open(file, &tracing_err_log_seq_ops); 8201 if (!ret) { 8202 struct seq_file *m = file->private_data; 8203 m->private = tr; 8204 } else { 8205 trace_array_put(tr); 8206 } 8207 } 8208 return ret; 8209 } 8210 8211 static ssize_t tracing_err_log_write(struct file *file, 8212 const char __user *buffer, 8213 size_t count, loff_t *ppos) 8214 { 8215 return count; 8216 } 8217 8218 static int tracing_err_log_release(struct inode *inode, struct file *file) 8219 { 8220 struct trace_array *tr = inode->i_private; 8221 8222 trace_array_put(tr); 8223 8224 if (file->f_mode & FMODE_READ) 8225 seq_release(inode, file); 8226 8227 return 0; 8228 } 8229 8230 static const struct file_operations tracing_err_log_fops = { 8231 .open = tracing_err_log_open, 8232 .write = tracing_err_log_write, 8233 .read = seq_read, 8234 .llseek = tracing_lseek, 8235 .release = tracing_err_log_release, 8236 }; 8237 8238 static int tracing_buffers_open(struct inode *inode, struct file *filp) 8239 { 8240 struct trace_array *tr = inode->i_private; 8241 struct ftrace_buffer_info *info; 8242 int ret; 8243 8244 ret = tracing_check_open_get_tr(tr); 8245 if (ret) 8246 return ret; 8247 8248 info = kvzalloc(sizeof(*info), GFP_KERNEL); 8249 if (!info) { 8250 trace_array_put(tr); 8251 return -ENOMEM; 8252 } 8253 8254 mutex_lock(&trace_types_lock); 8255 8256 info->iter.tr = tr; 8257 info->iter.cpu_file = tracing_get_cpu(inode); 8258 info->iter.trace = tr->current_trace; 8259 info->iter.array_buffer = &tr->array_buffer; 8260 info->spare = NULL; 8261 /* Force reading ring buffer for first read */ 8262 info->read = (unsigned int)-1; 8263 8264 filp->private_data = info; 8265 8266 tr->trace_ref++; 8267 8268 mutex_unlock(&trace_types_lock); 8269 8270 ret = nonseekable_open(inode, filp); 8271 if (ret < 0) 8272 trace_array_put(tr); 8273 8274 return ret; 8275 } 8276 8277 static __poll_t 8278 tracing_buffers_poll(struct file *filp, poll_table *poll_table) 8279 { 8280 struct ftrace_buffer_info *info = filp->private_data; 8281 struct trace_iterator *iter = &info->iter; 8282 8283 return trace_poll(iter, filp, poll_table); 8284 } 8285 8286 static ssize_t 8287 tracing_buffers_read(struct file *filp, char __user *ubuf, 8288 size_t count, loff_t *ppos) 8289 { 8290 struct ftrace_buffer_info *info = filp->private_data; 8291 struct trace_iterator *iter = &info->iter; 8292 ssize_t ret = 0; 8293 ssize_t size; 8294 8295 if (!count) 8296 return 0; 8297 8298 #ifdef CONFIG_TRACER_MAX_TRACE 8299 if (iter->snapshot && iter->tr->current_trace->use_max_tr) 8300 return -EBUSY; 8301 #endif 8302 8303 if (!info->spare) { 8304 info->spare = ring_buffer_alloc_read_page(iter->array_buffer->buffer, 8305 iter->cpu_file); 8306 if (IS_ERR(info->spare)) { 8307 ret = PTR_ERR(info->spare); 8308 info->spare = NULL; 8309 } else { 8310 info->spare_cpu = iter->cpu_file; 8311 } 8312 } 8313 if (!info->spare) 8314 return ret; 8315 8316 /* Do we have previous read data to read? */ 8317 if (info->read < PAGE_SIZE) 8318 goto read; 8319 8320 again: 8321 trace_access_lock(iter->cpu_file); 8322 ret = ring_buffer_read_page(iter->array_buffer->buffer, 8323 &info->spare, 8324 count, 8325 iter->cpu_file, 0); 8326 trace_access_unlock(iter->cpu_file); 8327 8328 if (ret < 0) { 8329 if (trace_empty(iter)) { 8330 if ((filp->f_flags & O_NONBLOCK)) 8331 return -EAGAIN; 8332 8333 ret = wait_on_pipe(iter, 0); 8334 if (ret) 8335 return ret; 8336 8337 goto again; 8338 } 8339 return 0; 8340 } 8341 8342 info->read = 0; 8343 read: 8344 size = PAGE_SIZE - info->read; 8345 if (size > count) 8346 size = count; 8347 8348 ret = copy_to_user(ubuf, info->spare + info->read, size); 8349 if (ret == size) 8350 return -EFAULT; 8351 8352 size -= ret; 8353 8354 *ppos += size; 8355 info->read += size; 8356 8357 return size; 8358 } 8359 8360 static int tracing_buffers_release(struct inode *inode, struct file *file) 8361 { 8362 struct ftrace_buffer_info *info = file->private_data; 8363 struct trace_iterator *iter = &info->iter; 8364 8365 mutex_lock(&trace_types_lock); 8366 8367 iter->tr->trace_ref--; 8368 8369 __trace_array_put(iter->tr); 8370 8371 iter->wait_index++; 8372 /* Make sure the waiters see the new wait_index */ 8373 smp_wmb(); 8374 8375 ring_buffer_wake_waiters(iter->array_buffer->buffer, iter->cpu_file); 8376 8377 if (info->spare) 8378 ring_buffer_free_read_page(iter->array_buffer->buffer, 8379 info->spare_cpu, info->spare); 8380 kvfree(info); 8381 8382 mutex_unlock(&trace_types_lock); 8383 8384 return 0; 8385 } 8386 8387 struct buffer_ref { 8388 struct trace_buffer *buffer; 8389 void *page; 8390 int cpu; 8391 refcount_t refcount; 8392 }; 8393 8394 static void buffer_ref_release(struct buffer_ref *ref) 8395 { 8396 if (!refcount_dec_and_test(&ref->refcount)) 8397 return; 8398 ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page); 8399 kfree(ref); 8400 } 8401 8402 static void buffer_pipe_buf_release(struct pipe_inode_info *pipe, 8403 struct pipe_buffer *buf) 8404 { 8405 struct buffer_ref *ref = (struct buffer_ref *)buf->private; 8406 8407 buffer_ref_release(ref); 8408 buf->private = 0; 8409 } 8410 8411 static bool buffer_pipe_buf_get(struct pipe_inode_info *pipe, 8412 struct pipe_buffer *buf) 8413 { 8414 struct buffer_ref *ref = (struct buffer_ref *)buf->private; 8415 8416 if (refcount_read(&ref->refcount) > INT_MAX/2) 8417 return false; 8418 8419 refcount_inc(&ref->refcount); 8420 return true; 8421 } 8422 8423 /* Pipe buffer operations for a buffer. */ 8424 static const struct pipe_buf_operations buffer_pipe_buf_ops = { 8425 .release = buffer_pipe_buf_release, 8426 .get = buffer_pipe_buf_get, 8427 }; 8428 8429 /* 8430 * Callback from splice_to_pipe(), if we need to release some pages 8431 * at the end of the spd in case we error'ed out in filling the pipe. 8432 */ 8433 static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i) 8434 { 8435 struct buffer_ref *ref = 8436 (struct buffer_ref *)spd->partial[i].private; 8437 8438 buffer_ref_release(ref); 8439 spd->partial[i].private = 0; 8440 } 8441 8442 static ssize_t 8443 tracing_buffers_splice_read(struct file *file, loff_t *ppos, 8444 struct pipe_inode_info *pipe, size_t len, 8445 unsigned int flags) 8446 { 8447 struct ftrace_buffer_info *info = file->private_data; 8448 struct trace_iterator *iter = &info->iter; 8449 struct partial_page partial_def[PIPE_DEF_BUFFERS]; 8450 struct page *pages_def[PIPE_DEF_BUFFERS]; 8451 struct splice_pipe_desc spd = { 8452 .pages = pages_def, 8453 .partial = partial_def, 8454 .nr_pages_max = PIPE_DEF_BUFFERS, 8455 .ops = &buffer_pipe_buf_ops, 8456 .spd_release = buffer_spd_release, 8457 }; 8458 struct buffer_ref *ref; 8459 int entries, i; 8460 ssize_t ret = 0; 8461 8462 #ifdef CONFIG_TRACER_MAX_TRACE 8463 if (iter->snapshot && iter->tr->current_trace->use_max_tr) 8464 return -EBUSY; 8465 #endif 8466 8467 if (*ppos & (PAGE_SIZE - 1)) 8468 return -EINVAL; 8469 8470 if (len & (PAGE_SIZE - 1)) { 8471 if (len < PAGE_SIZE) 8472 return -EINVAL; 8473 len &= PAGE_MASK; 8474 } 8475 8476 if (splice_grow_spd(pipe, &spd)) 8477 return -ENOMEM; 8478 8479 again: 8480 trace_access_lock(iter->cpu_file); 8481 entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file); 8482 8483 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) { 8484 struct page *page; 8485 int r; 8486 8487 ref = kzalloc(sizeof(*ref), GFP_KERNEL); 8488 if (!ref) { 8489 ret = -ENOMEM; 8490 break; 8491 } 8492 8493 refcount_set(&ref->refcount, 1); 8494 ref->buffer = iter->array_buffer->buffer; 8495 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file); 8496 if (IS_ERR(ref->page)) { 8497 ret = PTR_ERR(ref->page); 8498 ref->page = NULL; 8499 kfree(ref); 8500 break; 8501 } 8502 ref->cpu = iter->cpu_file; 8503 8504 r = ring_buffer_read_page(ref->buffer, &ref->page, 8505 len, iter->cpu_file, 1); 8506 if (r < 0) { 8507 ring_buffer_free_read_page(ref->buffer, ref->cpu, 8508 ref->page); 8509 kfree(ref); 8510 break; 8511 } 8512 8513 page = virt_to_page(ref->page); 8514 8515 spd.pages[i] = page; 8516 spd.partial[i].len = PAGE_SIZE; 8517 spd.partial[i].offset = 0; 8518 spd.partial[i].private = (unsigned long)ref; 8519 spd.nr_pages++; 8520 *ppos += PAGE_SIZE; 8521 8522 entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file); 8523 } 8524 8525 trace_access_unlock(iter->cpu_file); 8526 spd.nr_pages = i; 8527 8528 /* did we read anything? */ 8529 if (!spd.nr_pages) { 8530 long wait_index; 8531 8532 if (ret) 8533 goto out; 8534 8535 ret = -EAGAIN; 8536 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK)) 8537 goto out; 8538 8539 wait_index = READ_ONCE(iter->wait_index); 8540 8541 ret = wait_on_pipe(iter, iter->tr->buffer_percent); 8542 if (ret) 8543 goto out; 8544 8545 /* No need to wait after waking up when tracing is off */ 8546 if (!tracer_tracing_is_on(iter->tr)) 8547 goto out; 8548 8549 /* Make sure we see the new wait_index */ 8550 smp_rmb(); 8551 if (wait_index != iter->wait_index) 8552 goto out; 8553 8554 goto again; 8555 } 8556 8557 ret = splice_to_pipe(pipe, &spd); 8558 out: 8559 splice_shrink_spd(&spd); 8560 8561 return ret; 8562 } 8563 8564 /* An ioctl call with cmd 0 to the ring buffer file will wake up all waiters */ 8565 static long tracing_buffers_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 8566 { 8567 struct ftrace_buffer_info *info = file->private_data; 8568 struct trace_iterator *iter = &info->iter; 8569 8570 if (cmd) 8571 return -ENOIOCTLCMD; 8572 8573 mutex_lock(&trace_types_lock); 8574 8575 iter->wait_index++; 8576 /* Make sure the waiters see the new wait_index */ 8577 smp_wmb(); 8578 8579 ring_buffer_wake_waiters(iter->array_buffer->buffer, iter->cpu_file); 8580 8581 mutex_unlock(&trace_types_lock); 8582 return 0; 8583 } 8584 8585 static const struct file_operations tracing_buffers_fops = { 8586 .open = tracing_buffers_open, 8587 .read = tracing_buffers_read, 8588 .poll = tracing_buffers_poll, 8589 .release = tracing_buffers_release, 8590 .splice_read = tracing_buffers_splice_read, 8591 .unlocked_ioctl = tracing_buffers_ioctl, 8592 .llseek = no_llseek, 8593 }; 8594 8595 static ssize_t 8596 tracing_stats_read(struct file *filp, char __user *ubuf, 8597 size_t count, loff_t *ppos) 8598 { 8599 struct inode *inode = file_inode(filp); 8600 struct trace_array *tr = inode->i_private; 8601 struct array_buffer *trace_buf = &tr->array_buffer; 8602 int cpu = tracing_get_cpu(inode); 8603 struct trace_seq *s; 8604 unsigned long cnt; 8605 unsigned long long t; 8606 unsigned long usec_rem; 8607 8608 s = kmalloc(sizeof(*s), GFP_KERNEL); 8609 if (!s) 8610 return -ENOMEM; 8611 8612 trace_seq_init(s); 8613 8614 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu); 8615 trace_seq_printf(s, "entries: %ld\n", cnt); 8616 8617 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu); 8618 trace_seq_printf(s, "overrun: %ld\n", cnt); 8619 8620 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu); 8621 trace_seq_printf(s, "commit overrun: %ld\n", cnt); 8622 8623 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu); 8624 trace_seq_printf(s, "bytes: %ld\n", cnt); 8625 8626 if (trace_clocks[tr->clock_id].in_ns) { 8627 /* local or global for trace_clock */ 8628 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu)); 8629 usec_rem = do_div(t, USEC_PER_SEC); 8630 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n", 8631 t, usec_rem); 8632 8633 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer)); 8634 usec_rem = do_div(t, USEC_PER_SEC); 8635 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem); 8636 } else { 8637 /* counter or tsc mode for trace_clock */ 8638 trace_seq_printf(s, "oldest event ts: %llu\n", 8639 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu)); 8640 8641 trace_seq_printf(s, "now ts: %llu\n", 8642 ring_buffer_time_stamp(trace_buf->buffer)); 8643 } 8644 8645 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu); 8646 trace_seq_printf(s, "dropped events: %ld\n", cnt); 8647 8648 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu); 8649 trace_seq_printf(s, "read events: %ld\n", cnt); 8650 8651 count = simple_read_from_buffer(ubuf, count, ppos, 8652 s->buffer, trace_seq_used(s)); 8653 8654 kfree(s); 8655 8656 return count; 8657 } 8658 8659 static const struct file_operations tracing_stats_fops = { 8660 .open = tracing_open_generic_tr, 8661 .read = tracing_stats_read, 8662 .llseek = generic_file_llseek, 8663 .release = tracing_release_generic_tr, 8664 }; 8665 8666 #ifdef CONFIG_DYNAMIC_FTRACE 8667 8668 static ssize_t 8669 tracing_read_dyn_info(struct file *filp, char __user *ubuf, 8670 size_t cnt, loff_t *ppos) 8671 { 8672 ssize_t ret; 8673 char *buf; 8674 int r; 8675 8676 /* 256 should be plenty to hold the amount needed */ 8677 buf = kmalloc(256, GFP_KERNEL); 8678 if (!buf) 8679 return -ENOMEM; 8680 8681 r = scnprintf(buf, 256, "%ld pages:%ld groups: %ld\n", 8682 ftrace_update_tot_cnt, 8683 ftrace_number_of_pages, 8684 ftrace_number_of_groups); 8685 8686 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 8687 kfree(buf); 8688 return ret; 8689 } 8690 8691 static const struct file_operations tracing_dyn_info_fops = { 8692 .open = tracing_open_generic, 8693 .read = tracing_read_dyn_info, 8694 .llseek = generic_file_llseek, 8695 }; 8696 #endif /* CONFIG_DYNAMIC_FTRACE */ 8697 8698 #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) 8699 static void 8700 ftrace_snapshot(unsigned long ip, unsigned long parent_ip, 8701 struct trace_array *tr, struct ftrace_probe_ops *ops, 8702 void *data) 8703 { 8704 tracing_snapshot_instance(tr); 8705 } 8706 8707 static void 8708 ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, 8709 struct trace_array *tr, struct ftrace_probe_ops *ops, 8710 void *data) 8711 { 8712 struct ftrace_func_mapper *mapper = data; 8713 long *count = NULL; 8714 8715 if (mapper) 8716 count = (long *)ftrace_func_mapper_find_ip(mapper, ip); 8717 8718 if (count) { 8719 8720 if (*count <= 0) 8721 return; 8722 8723 (*count)--; 8724 } 8725 8726 tracing_snapshot_instance(tr); 8727 } 8728 8729 static int 8730 ftrace_snapshot_print(struct seq_file *m, unsigned long ip, 8731 struct ftrace_probe_ops *ops, void *data) 8732 { 8733 struct ftrace_func_mapper *mapper = data; 8734 long *count = NULL; 8735 8736 seq_printf(m, "%ps:", (void *)ip); 8737 8738 seq_puts(m, "snapshot"); 8739 8740 if (mapper) 8741 count = (long *)ftrace_func_mapper_find_ip(mapper, ip); 8742 8743 if (count) 8744 seq_printf(m, ":count=%ld\n", *count); 8745 else 8746 seq_puts(m, ":unlimited\n"); 8747 8748 return 0; 8749 } 8750 8751 static int 8752 ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr, 8753 unsigned long ip, void *init_data, void **data) 8754 { 8755 struct ftrace_func_mapper *mapper = *data; 8756 8757 if (!mapper) { 8758 mapper = allocate_ftrace_func_mapper(); 8759 if (!mapper) 8760 return -ENOMEM; 8761 *data = mapper; 8762 } 8763 8764 return ftrace_func_mapper_add_ip(mapper, ip, init_data); 8765 } 8766 8767 static void 8768 ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr, 8769 unsigned long ip, void *data) 8770 { 8771 struct ftrace_func_mapper *mapper = data; 8772 8773 if (!ip) { 8774 if (!mapper) 8775 return; 8776 free_ftrace_func_mapper(mapper, NULL); 8777 return; 8778 } 8779 8780 ftrace_func_mapper_remove_ip(mapper, ip); 8781 } 8782 8783 static struct ftrace_probe_ops snapshot_probe_ops = { 8784 .func = ftrace_snapshot, 8785 .print = ftrace_snapshot_print, 8786 }; 8787 8788 static struct ftrace_probe_ops snapshot_count_probe_ops = { 8789 .func = ftrace_count_snapshot, 8790 .print = ftrace_snapshot_print, 8791 .init = ftrace_snapshot_init, 8792 .free = ftrace_snapshot_free, 8793 }; 8794 8795 static int 8796 ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash, 8797 char *glob, char *cmd, char *param, int enable) 8798 { 8799 struct ftrace_probe_ops *ops; 8800 void *count = (void *)-1; 8801 char *number; 8802 int ret; 8803 8804 if (!tr) 8805 return -ENODEV; 8806 8807 /* hash funcs only work with set_ftrace_filter */ 8808 if (!enable) 8809 return -EINVAL; 8810 8811 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops; 8812 8813 if (glob[0] == '!') 8814 return unregister_ftrace_function_probe_func(glob+1, tr, ops); 8815 8816 if (!param) 8817 goto out_reg; 8818 8819 number = strsep(¶m, ":"); 8820 8821 if (!strlen(number)) 8822 goto out_reg; 8823 8824 /* 8825 * We use the callback data field (which is a pointer) 8826 * as our counter. 8827 */ 8828 ret = kstrtoul(number, 0, (unsigned long *)&count); 8829 if (ret) 8830 return ret; 8831 8832 out_reg: 8833 ret = tracing_alloc_snapshot_instance(tr); 8834 if (ret < 0) 8835 goto out; 8836 8837 ret = register_ftrace_function_probe(glob, tr, ops, count); 8838 8839 out: 8840 return ret < 0 ? ret : 0; 8841 } 8842 8843 static struct ftrace_func_command ftrace_snapshot_cmd = { 8844 .name = "snapshot", 8845 .func = ftrace_trace_snapshot_callback, 8846 }; 8847 8848 static __init int register_snapshot_cmd(void) 8849 { 8850 return register_ftrace_command(&ftrace_snapshot_cmd); 8851 } 8852 #else 8853 static inline __init int register_snapshot_cmd(void) { return 0; } 8854 #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */ 8855 8856 static struct dentry *tracing_get_dentry(struct trace_array *tr) 8857 { 8858 if (WARN_ON(!tr->dir)) 8859 return ERR_PTR(-ENODEV); 8860 8861 /* Top directory uses NULL as the parent */ 8862 if (tr->flags & TRACE_ARRAY_FL_GLOBAL) 8863 return NULL; 8864 8865 /* All sub buffers have a descriptor */ 8866 return tr->dir; 8867 } 8868 8869 static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu) 8870 { 8871 struct dentry *d_tracer; 8872 8873 if (tr->percpu_dir) 8874 return tr->percpu_dir; 8875 8876 d_tracer = tracing_get_dentry(tr); 8877 if (IS_ERR(d_tracer)) 8878 return NULL; 8879 8880 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer); 8881 8882 MEM_FAIL(!tr->percpu_dir, 8883 "Could not create tracefs directory 'per_cpu/%d'\n", cpu); 8884 8885 return tr->percpu_dir; 8886 } 8887 8888 static struct dentry * 8889 trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent, 8890 void *data, long cpu, const struct file_operations *fops) 8891 { 8892 struct dentry *ret = trace_create_file(name, mode, parent, data, fops); 8893 8894 if (ret) /* See tracing_get_cpu() */ 8895 d_inode(ret)->i_cdev = (void *)(cpu + 1); 8896 return ret; 8897 } 8898 8899 static void 8900 tracing_init_tracefs_percpu(struct trace_array *tr, long cpu) 8901 { 8902 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu); 8903 struct dentry *d_cpu; 8904 char cpu_dir[30]; /* 30 characters should be more than enough */ 8905 8906 if (!d_percpu) 8907 return; 8908 8909 snprintf(cpu_dir, 30, "cpu%ld", cpu); 8910 d_cpu = tracefs_create_dir(cpu_dir, d_percpu); 8911 if (!d_cpu) { 8912 pr_warn("Could not create tracefs '%s' entry\n", cpu_dir); 8913 return; 8914 } 8915 8916 /* per cpu trace_pipe */ 8917 trace_create_cpu_file("trace_pipe", TRACE_MODE_READ, d_cpu, 8918 tr, cpu, &tracing_pipe_fops); 8919 8920 /* per cpu trace */ 8921 trace_create_cpu_file("trace", TRACE_MODE_WRITE, d_cpu, 8922 tr, cpu, &tracing_fops); 8923 8924 trace_create_cpu_file("trace_pipe_raw", TRACE_MODE_READ, d_cpu, 8925 tr, cpu, &tracing_buffers_fops); 8926 8927 trace_create_cpu_file("stats", TRACE_MODE_READ, d_cpu, 8928 tr, cpu, &tracing_stats_fops); 8929 8930 trace_create_cpu_file("buffer_size_kb", TRACE_MODE_READ, d_cpu, 8931 tr, cpu, &tracing_entries_fops); 8932 8933 #ifdef CONFIG_TRACER_SNAPSHOT 8934 trace_create_cpu_file("snapshot", TRACE_MODE_WRITE, d_cpu, 8935 tr, cpu, &snapshot_fops); 8936 8937 trace_create_cpu_file("snapshot_raw", TRACE_MODE_READ, d_cpu, 8938 tr, cpu, &snapshot_raw_fops); 8939 #endif 8940 } 8941 8942 #ifdef CONFIG_FTRACE_SELFTEST 8943 /* Let selftest have access to static functions in this file */ 8944 #include "trace_selftest.c" 8945 #endif 8946 8947 static ssize_t 8948 trace_options_read(struct file *filp, char __user *ubuf, size_t cnt, 8949 loff_t *ppos) 8950 { 8951 struct trace_option_dentry *topt = filp->private_data; 8952 char *buf; 8953 8954 if (topt->flags->val & topt->opt->bit) 8955 buf = "1\n"; 8956 else 8957 buf = "0\n"; 8958 8959 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2); 8960 } 8961 8962 static ssize_t 8963 trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt, 8964 loff_t *ppos) 8965 { 8966 struct trace_option_dentry *topt = filp->private_data; 8967 unsigned long val; 8968 int ret; 8969 8970 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 8971 if (ret) 8972 return ret; 8973 8974 if (val != 0 && val != 1) 8975 return -EINVAL; 8976 8977 if (!!(topt->flags->val & topt->opt->bit) != val) { 8978 mutex_lock(&trace_types_lock); 8979 ret = __set_tracer_option(topt->tr, topt->flags, 8980 topt->opt, !val); 8981 mutex_unlock(&trace_types_lock); 8982 if (ret) 8983 return ret; 8984 } 8985 8986 *ppos += cnt; 8987 8988 return cnt; 8989 } 8990 8991 static int tracing_open_options(struct inode *inode, struct file *filp) 8992 { 8993 struct trace_option_dentry *topt = inode->i_private; 8994 int ret; 8995 8996 ret = tracing_check_open_get_tr(topt->tr); 8997 if (ret) 8998 return ret; 8999 9000 filp->private_data = inode->i_private; 9001 return 0; 9002 } 9003 9004 static int tracing_release_options(struct inode *inode, struct file *file) 9005 { 9006 struct trace_option_dentry *topt = file->private_data; 9007 9008 trace_array_put(topt->tr); 9009 return 0; 9010 } 9011 9012 static const struct file_operations trace_options_fops = { 9013 .open = tracing_open_options, 9014 .read = trace_options_read, 9015 .write = trace_options_write, 9016 .llseek = generic_file_llseek, 9017 .release = tracing_release_options, 9018 }; 9019 9020 /* 9021 * In order to pass in both the trace_array descriptor as well as the index 9022 * to the flag that the trace option file represents, the trace_array 9023 * has a character array of trace_flags_index[], which holds the index 9024 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc. 9025 * The address of this character array is passed to the flag option file 9026 * read/write callbacks. 9027 * 9028 * In order to extract both the index and the trace_array descriptor, 9029 * get_tr_index() uses the following algorithm. 9030 * 9031 * idx = *ptr; 9032 * 9033 * As the pointer itself contains the address of the index (remember 9034 * index[1] == 1). 9035 * 9036 * Then to get the trace_array descriptor, by subtracting that index 9037 * from the ptr, we get to the start of the index itself. 9038 * 9039 * ptr - idx == &index[0] 9040 * 9041 * Then a simple container_of() from that pointer gets us to the 9042 * trace_array descriptor. 9043 */ 9044 static void get_tr_index(void *data, struct trace_array **ptr, 9045 unsigned int *pindex) 9046 { 9047 *pindex = *(unsigned char *)data; 9048 9049 *ptr = container_of(data - *pindex, struct trace_array, 9050 trace_flags_index); 9051 } 9052 9053 static ssize_t 9054 trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt, 9055 loff_t *ppos) 9056 { 9057 void *tr_index = filp->private_data; 9058 struct trace_array *tr; 9059 unsigned int index; 9060 char *buf; 9061 9062 get_tr_index(tr_index, &tr, &index); 9063 9064 if (tr->trace_flags & (1 << index)) 9065 buf = "1\n"; 9066 else 9067 buf = "0\n"; 9068 9069 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2); 9070 } 9071 9072 static ssize_t 9073 trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt, 9074 loff_t *ppos) 9075 { 9076 void *tr_index = filp->private_data; 9077 struct trace_array *tr; 9078 unsigned int index; 9079 unsigned long val; 9080 int ret; 9081 9082 get_tr_index(tr_index, &tr, &index); 9083 9084 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 9085 if (ret) 9086 return ret; 9087 9088 if (val != 0 && val != 1) 9089 return -EINVAL; 9090 9091 mutex_lock(&event_mutex); 9092 mutex_lock(&trace_types_lock); 9093 ret = set_tracer_flag(tr, 1 << index, val); 9094 mutex_unlock(&trace_types_lock); 9095 mutex_unlock(&event_mutex); 9096 9097 if (ret < 0) 9098 return ret; 9099 9100 *ppos += cnt; 9101 9102 return cnt; 9103 } 9104 9105 static const struct file_operations trace_options_core_fops = { 9106 .open = tracing_open_generic, 9107 .read = trace_options_core_read, 9108 .write = trace_options_core_write, 9109 .llseek = generic_file_llseek, 9110 }; 9111 9112 struct dentry *trace_create_file(const char *name, 9113 umode_t mode, 9114 struct dentry *parent, 9115 void *data, 9116 const struct file_operations *fops) 9117 { 9118 struct dentry *ret; 9119 9120 ret = tracefs_create_file(name, mode, parent, data, fops); 9121 if (!ret) 9122 pr_warn("Could not create tracefs '%s' entry\n", name); 9123 9124 return ret; 9125 } 9126 9127 9128 static struct dentry *trace_options_init_dentry(struct trace_array *tr) 9129 { 9130 struct dentry *d_tracer; 9131 9132 if (tr->options) 9133 return tr->options; 9134 9135 d_tracer = tracing_get_dentry(tr); 9136 if (IS_ERR(d_tracer)) 9137 return NULL; 9138 9139 tr->options = tracefs_create_dir("options", d_tracer); 9140 if (!tr->options) { 9141 pr_warn("Could not create tracefs directory 'options'\n"); 9142 return NULL; 9143 } 9144 9145 return tr->options; 9146 } 9147 9148 static void 9149 create_trace_option_file(struct trace_array *tr, 9150 struct trace_option_dentry *topt, 9151 struct tracer_flags *flags, 9152 struct tracer_opt *opt) 9153 { 9154 struct dentry *t_options; 9155 9156 t_options = trace_options_init_dentry(tr); 9157 if (!t_options) 9158 return; 9159 9160 topt->flags = flags; 9161 topt->opt = opt; 9162 topt->tr = tr; 9163 9164 topt->entry = trace_create_file(opt->name, TRACE_MODE_WRITE, 9165 t_options, topt, &trace_options_fops); 9166 9167 } 9168 9169 static void 9170 create_trace_option_files(struct trace_array *tr, struct tracer *tracer) 9171 { 9172 struct trace_option_dentry *topts; 9173 struct trace_options *tr_topts; 9174 struct tracer_flags *flags; 9175 struct tracer_opt *opts; 9176 int cnt; 9177 int i; 9178 9179 if (!tracer) 9180 return; 9181 9182 flags = tracer->flags; 9183 9184 if (!flags || !flags->opts) 9185 return; 9186 9187 /* 9188 * If this is an instance, only create flags for tracers 9189 * the instance may have. 9190 */ 9191 if (!trace_ok_for_array(tracer, tr)) 9192 return; 9193 9194 for (i = 0; i < tr->nr_topts; i++) { 9195 /* Make sure there's no duplicate flags. */ 9196 if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags)) 9197 return; 9198 } 9199 9200 opts = flags->opts; 9201 9202 for (cnt = 0; opts[cnt].name; cnt++) 9203 ; 9204 9205 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL); 9206 if (!topts) 9207 return; 9208 9209 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1), 9210 GFP_KERNEL); 9211 if (!tr_topts) { 9212 kfree(topts); 9213 return; 9214 } 9215 9216 tr->topts = tr_topts; 9217 tr->topts[tr->nr_topts].tracer = tracer; 9218 tr->topts[tr->nr_topts].topts = topts; 9219 tr->nr_topts++; 9220 9221 for (cnt = 0; opts[cnt].name; cnt++) { 9222 create_trace_option_file(tr, &topts[cnt], flags, 9223 &opts[cnt]); 9224 MEM_FAIL(topts[cnt].entry == NULL, 9225 "Failed to create trace option: %s", 9226 opts[cnt].name); 9227 } 9228 } 9229 9230 static struct dentry * 9231 create_trace_option_core_file(struct trace_array *tr, 9232 const char *option, long index) 9233 { 9234 struct dentry *t_options; 9235 9236 t_options = trace_options_init_dentry(tr); 9237 if (!t_options) 9238 return NULL; 9239 9240 return trace_create_file(option, TRACE_MODE_WRITE, t_options, 9241 (void *)&tr->trace_flags_index[index], 9242 &trace_options_core_fops); 9243 } 9244 9245 static void create_trace_options_dir(struct trace_array *tr) 9246 { 9247 struct dentry *t_options; 9248 bool top_level = tr == &global_trace; 9249 int i; 9250 9251 t_options = trace_options_init_dentry(tr); 9252 if (!t_options) 9253 return; 9254 9255 for (i = 0; trace_options[i]; i++) { 9256 if (top_level || 9257 !((1 << i) & TOP_LEVEL_TRACE_FLAGS)) 9258 create_trace_option_core_file(tr, trace_options[i], i); 9259 } 9260 } 9261 9262 static ssize_t 9263 rb_simple_read(struct file *filp, char __user *ubuf, 9264 size_t cnt, loff_t *ppos) 9265 { 9266 struct trace_array *tr = filp->private_data; 9267 char buf[64]; 9268 int r; 9269 9270 r = tracer_tracing_is_on(tr); 9271 r = sprintf(buf, "%d\n", r); 9272 9273 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 9274 } 9275 9276 static ssize_t 9277 rb_simple_write(struct file *filp, const char __user *ubuf, 9278 size_t cnt, loff_t *ppos) 9279 { 9280 struct trace_array *tr = filp->private_data; 9281 struct trace_buffer *buffer = tr->array_buffer.buffer; 9282 unsigned long val; 9283 int ret; 9284 9285 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 9286 if (ret) 9287 return ret; 9288 9289 if (buffer) { 9290 mutex_lock(&trace_types_lock); 9291 if (!!val == tracer_tracing_is_on(tr)) { 9292 val = 0; /* do nothing */ 9293 } else if (val) { 9294 tracer_tracing_on(tr); 9295 if (tr->current_trace->start) 9296 tr->current_trace->start(tr); 9297 } else { 9298 tracer_tracing_off(tr); 9299 if (tr->current_trace->stop) 9300 tr->current_trace->stop(tr); 9301 /* Wake up any waiters */ 9302 ring_buffer_wake_waiters(buffer, RING_BUFFER_ALL_CPUS); 9303 } 9304 mutex_unlock(&trace_types_lock); 9305 } 9306 9307 (*ppos)++; 9308 9309 return cnt; 9310 } 9311 9312 static const struct file_operations rb_simple_fops = { 9313 .open = tracing_open_generic_tr, 9314 .read = rb_simple_read, 9315 .write = rb_simple_write, 9316 .release = tracing_release_generic_tr, 9317 .llseek = default_llseek, 9318 }; 9319 9320 static ssize_t 9321 buffer_percent_read(struct file *filp, char __user *ubuf, 9322 size_t cnt, loff_t *ppos) 9323 { 9324 struct trace_array *tr = filp->private_data; 9325 char buf[64]; 9326 int r; 9327 9328 r = tr->buffer_percent; 9329 r = sprintf(buf, "%d\n", r); 9330 9331 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 9332 } 9333 9334 static ssize_t 9335 buffer_percent_write(struct file *filp, const char __user *ubuf, 9336 size_t cnt, loff_t *ppos) 9337 { 9338 struct trace_array *tr = filp->private_data; 9339 unsigned long val; 9340 int ret; 9341 9342 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 9343 if (ret) 9344 return ret; 9345 9346 if (val > 100) 9347 return -EINVAL; 9348 9349 tr->buffer_percent = val; 9350 9351 (*ppos)++; 9352 9353 return cnt; 9354 } 9355 9356 static const struct file_operations buffer_percent_fops = { 9357 .open = tracing_open_generic_tr, 9358 .read = buffer_percent_read, 9359 .write = buffer_percent_write, 9360 .release = tracing_release_generic_tr, 9361 .llseek = default_llseek, 9362 }; 9363 9364 static struct dentry *trace_instance_dir; 9365 9366 static void 9367 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer); 9368 9369 static int 9370 allocate_trace_buffer(struct trace_array *tr, struct array_buffer *buf, int size) 9371 { 9372 enum ring_buffer_flags rb_flags; 9373 9374 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0; 9375 9376 buf->tr = tr; 9377 9378 buf->buffer = ring_buffer_alloc(size, rb_flags); 9379 if (!buf->buffer) 9380 return -ENOMEM; 9381 9382 buf->data = alloc_percpu(struct trace_array_cpu); 9383 if (!buf->data) { 9384 ring_buffer_free(buf->buffer); 9385 buf->buffer = NULL; 9386 return -ENOMEM; 9387 } 9388 9389 /* Allocate the first page for all buffers */ 9390 set_buffer_entries(&tr->array_buffer, 9391 ring_buffer_size(tr->array_buffer.buffer, 0)); 9392 9393 return 0; 9394 } 9395 9396 static void free_trace_buffer(struct array_buffer *buf) 9397 { 9398 if (buf->buffer) { 9399 ring_buffer_free(buf->buffer); 9400 buf->buffer = NULL; 9401 free_percpu(buf->data); 9402 buf->data = NULL; 9403 } 9404 } 9405 9406 static int allocate_trace_buffers(struct trace_array *tr, int size) 9407 { 9408 int ret; 9409 9410 ret = allocate_trace_buffer(tr, &tr->array_buffer, size); 9411 if (ret) 9412 return ret; 9413 9414 #ifdef CONFIG_TRACER_MAX_TRACE 9415 ret = allocate_trace_buffer(tr, &tr->max_buffer, 9416 allocate_snapshot ? size : 1); 9417 if (MEM_FAIL(ret, "Failed to allocate trace buffer\n")) { 9418 free_trace_buffer(&tr->array_buffer); 9419 return -ENOMEM; 9420 } 9421 tr->allocated_snapshot = allocate_snapshot; 9422 9423 allocate_snapshot = false; 9424 #endif 9425 9426 return 0; 9427 } 9428 9429 static void free_trace_buffers(struct trace_array *tr) 9430 { 9431 if (!tr) 9432 return; 9433 9434 free_trace_buffer(&tr->array_buffer); 9435 9436 #ifdef CONFIG_TRACER_MAX_TRACE 9437 free_trace_buffer(&tr->max_buffer); 9438 #endif 9439 } 9440 9441 static void init_trace_flags_index(struct trace_array *tr) 9442 { 9443 int i; 9444 9445 /* Used by the trace options files */ 9446 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) 9447 tr->trace_flags_index[i] = i; 9448 } 9449 9450 static void __update_tracer_options(struct trace_array *tr) 9451 { 9452 struct tracer *t; 9453 9454 for (t = trace_types; t; t = t->next) 9455 add_tracer_options(tr, t); 9456 } 9457 9458 static void update_tracer_options(struct trace_array *tr) 9459 { 9460 mutex_lock(&trace_types_lock); 9461 tracer_options_updated = true; 9462 __update_tracer_options(tr); 9463 mutex_unlock(&trace_types_lock); 9464 } 9465 9466 /* Must have trace_types_lock held */ 9467 struct trace_array *trace_array_find(const char *instance) 9468 { 9469 struct trace_array *tr, *found = NULL; 9470 9471 list_for_each_entry(tr, &ftrace_trace_arrays, list) { 9472 if (tr->name && strcmp(tr->name, instance) == 0) { 9473 found = tr; 9474 break; 9475 } 9476 } 9477 9478 return found; 9479 } 9480 9481 struct trace_array *trace_array_find_get(const char *instance) 9482 { 9483 struct trace_array *tr; 9484 9485 mutex_lock(&trace_types_lock); 9486 tr = trace_array_find(instance); 9487 if (tr) 9488 tr->ref++; 9489 mutex_unlock(&trace_types_lock); 9490 9491 return tr; 9492 } 9493 9494 static int trace_array_create_dir(struct trace_array *tr) 9495 { 9496 int ret; 9497 9498 tr->dir = tracefs_create_dir(tr->name, trace_instance_dir); 9499 if (!tr->dir) 9500 return -EINVAL; 9501 9502 ret = event_trace_add_tracer(tr->dir, tr); 9503 if (ret) { 9504 tracefs_remove(tr->dir); 9505 return ret; 9506 } 9507 9508 init_tracer_tracefs(tr, tr->dir); 9509 __update_tracer_options(tr); 9510 9511 return ret; 9512 } 9513 9514 static struct trace_array *trace_array_create(const char *name) 9515 { 9516 struct trace_array *tr; 9517 int ret; 9518 9519 ret = -ENOMEM; 9520 tr = kzalloc(sizeof(*tr), GFP_KERNEL); 9521 if (!tr) 9522 return ERR_PTR(ret); 9523 9524 tr->name = kstrdup(name, GFP_KERNEL); 9525 if (!tr->name) 9526 goto out_free_tr; 9527 9528 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL)) 9529 goto out_free_tr; 9530 9531 if (!zalloc_cpumask_var(&tr->pipe_cpumask, GFP_KERNEL)) 9532 goto out_free_tr; 9533 9534 tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS; 9535 9536 cpumask_copy(tr->tracing_cpumask, cpu_all_mask); 9537 9538 raw_spin_lock_init(&tr->start_lock); 9539 9540 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; 9541 9542 tr->current_trace = &nop_trace; 9543 9544 INIT_LIST_HEAD(&tr->systems); 9545 INIT_LIST_HEAD(&tr->events); 9546 INIT_LIST_HEAD(&tr->hist_vars); 9547 INIT_LIST_HEAD(&tr->err_log); 9548 9549 if (allocate_trace_buffers(tr, trace_buf_size) < 0) 9550 goto out_free_tr; 9551 9552 if (ftrace_allocate_ftrace_ops(tr) < 0) 9553 goto out_free_tr; 9554 9555 ftrace_init_trace_array(tr); 9556 9557 init_trace_flags_index(tr); 9558 9559 if (trace_instance_dir) { 9560 ret = trace_array_create_dir(tr); 9561 if (ret) 9562 goto out_free_tr; 9563 } else 9564 __trace_early_add_events(tr); 9565 9566 list_add(&tr->list, &ftrace_trace_arrays); 9567 9568 tr->ref++; 9569 9570 return tr; 9571 9572 out_free_tr: 9573 ftrace_free_ftrace_ops(tr); 9574 free_trace_buffers(tr); 9575 free_cpumask_var(tr->pipe_cpumask); 9576 free_cpumask_var(tr->tracing_cpumask); 9577 kfree(tr->name); 9578 kfree(tr); 9579 9580 return ERR_PTR(ret); 9581 } 9582 9583 static int instance_mkdir(const char *name) 9584 { 9585 struct trace_array *tr; 9586 int ret; 9587 9588 mutex_lock(&event_mutex); 9589 mutex_lock(&trace_types_lock); 9590 9591 ret = -EEXIST; 9592 if (trace_array_find(name)) 9593 goto out_unlock; 9594 9595 tr = trace_array_create(name); 9596 9597 ret = PTR_ERR_OR_ZERO(tr); 9598 9599 out_unlock: 9600 mutex_unlock(&trace_types_lock); 9601 mutex_unlock(&event_mutex); 9602 return ret; 9603 } 9604 9605 /** 9606 * trace_array_get_by_name - Create/Lookup a trace array, given its name. 9607 * @name: The name of the trace array to be looked up/created. 9608 * 9609 * Returns pointer to trace array with given name. 9610 * NULL, if it cannot be created. 9611 * 9612 * NOTE: This function increments the reference counter associated with the 9613 * trace array returned. This makes sure it cannot be freed while in use. 9614 * Use trace_array_put() once the trace array is no longer needed. 9615 * If the trace_array is to be freed, trace_array_destroy() needs to 9616 * be called after the trace_array_put(), or simply let user space delete 9617 * it from the tracefs instances directory. But until the 9618 * trace_array_put() is called, user space can not delete it. 9619 * 9620 */ 9621 struct trace_array *trace_array_get_by_name(const char *name) 9622 { 9623 struct trace_array *tr; 9624 9625 mutex_lock(&event_mutex); 9626 mutex_lock(&trace_types_lock); 9627 9628 list_for_each_entry(tr, &ftrace_trace_arrays, list) { 9629 if (tr->name && strcmp(tr->name, name) == 0) 9630 goto out_unlock; 9631 } 9632 9633 tr = trace_array_create(name); 9634 9635 if (IS_ERR(tr)) 9636 tr = NULL; 9637 out_unlock: 9638 if (tr) 9639 tr->ref++; 9640 9641 mutex_unlock(&trace_types_lock); 9642 mutex_unlock(&event_mutex); 9643 return tr; 9644 } 9645 EXPORT_SYMBOL_GPL(trace_array_get_by_name); 9646 9647 static int __remove_instance(struct trace_array *tr) 9648 { 9649 int i; 9650 9651 /* Reference counter for a newly created trace array = 1. */ 9652 if (tr->ref > 1 || (tr->current_trace && tr->trace_ref)) 9653 return -EBUSY; 9654 9655 list_del(&tr->list); 9656 9657 /* Disable all the flags that were enabled coming in */ 9658 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) { 9659 if ((1 << i) & ZEROED_TRACE_FLAGS) 9660 set_tracer_flag(tr, 1 << i, 0); 9661 } 9662 9663 tracing_set_nop(tr); 9664 clear_ftrace_function_probes(tr); 9665 event_trace_del_tracer(tr); 9666 ftrace_clear_pids(tr); 9667 ftrace_destroy_function_files(tr); 9668 tracefs_remove(tr->dir); 9669 free_percpu(tr->last_func_repeats); 9670 free_trace_buffers(tr); 9671 clear_tracing_err_log(tr); 9672 9673 for (i = 0; i < tr->nr_topts; i++) { 9674 kfree(tr->topts[i].topts); 9675 } 9676 kfree(tr->topts); 9677 9678 free_cpumask_var(tr->pipe_cpumask); 9679 free_cpumask_var(tr->tracing_cpumask); 9680 kfree(tr->name); 9681 kfree(tr); 9682 9683 return 0; 9684 } 9685 9686 int trace_array_destroy(struct trace_array *this_tr) 9687 { 9688 struct trace_array *tr; 9689 int ret; 9690 9691 if (!this_tr) 9692 return -EINVAL; 9693 9694 mutex_lock(&event_mutex); 9695 mutex_lock(&trace_types_lock); 9696 9697 ret = -ENODEV; 9698 9699 /* Making sure trace array exists before destroying it. */ 9700 list_for_each_entry(tr, &ftrace_trace_arrays, list) { 9701 if (tr == this_tr) { 9702 ret = __remove_instance(tr); 9703 break; 9704 } 9705 } 9706 9707 mutex_unlock(&trace_types_lock); 9708 mutex_unlock(&event_mutex); 9709 9710 return ret; 9711 } 9712 EXPORT_SYMBOL_GPL(trace_array_destroy); 9713 9714 static int instance_rmdir(const char *name) 9715 { 9716 struct trace_array *tr; 9717 int ret; 9718 9719 mutex_lock(&event_mutex); 9720 mutex_lock(&trace_types_lock); 9721 9722 ret = -ENODEV; 9723 tr = trace_array_find(name); 9724 if (tr) 9725 ret = __remove_instance(tr); 9726 9727 mutex_unlock(&trace_types_lock); 9728 mutex_unlock(&event_mutex); 9729 9730 return ret; 9731 } 9732 9733 static __init void create_trace_instances(struct dentry *d_tracer) 9734 { 9735 struct trace_array *tr; 9736 9737 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer, 9738 instance_mkdir, 9739 instance_rmdir); 9740 if (MEM_FAIL(!trace_instance_dir, "Failed to create instances directory\n")) 9741 return; 9742 9743 mutex_lock(&event_mutex); 9744 mutex_lock(&trace_types_lock); 9745 9746 list_for_each_entry(tr, &ftrace_trace_arrays, list) { 9747 if (!tr->name) 9748 continue; 9749 if (MEM_FAIL(trace_array_create_dir(tr) < 0, 9750 "Failed to create instance directory\n")) 9751 break; 9752 } 9753 9754 mutex_unlock(&trace_types_lock); 9755 mutex_unlock(&event_mutex); 9756 } 9757 9758 static void 9759 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer) 9760 { 9761 struct trace_event_file *file; 9762 int cpu; 9763 9764 trace_create_file("available_tracers", TRACE_MODE_READ, d_tracer, 9765 tr, &show_traces_fops); 9766 9767 trace_create_file("current_tracer", TRACE_MODE_WRITE, d_tracer, 9768 tr, &set_tracer_fops); 9769 9770 trace_create_file("tracing_cpumask", TRACE_MODE_WRITE, d_tracer, 9771 tr, &tracing_cpumask_fops); 9772 9773 trace_create_file("trace_options", TRACE_MODE_WRITE, d_tracer, 9774 tr, &tracing_iter_fops); 9775 9776 trace_create_file("trace", TRACE_MODE_WRITE, d_tracer, 9777 tr, &tracing_fops); 9778 9779 trace_create_file("trace_pipe", TRACE_MODE_READ, d_tracer, 9780 tr, &tracing_pipe_fops); 9781 9782 trace_create_file("buffer_size_kb", TRACE_MODE_WRITE, d_tracer, 9783 tr, &tracing_entries_fops); 9784 9785 trace_create_file("buffer_total_size_kb", TRACE_MODE_READ, d_tracer, 9786 tr, &tracing_total_entries_fops); 9787 9788 trace_create_file("free_buffer", 0200, d_tracer, 9789 tr, &tracing_free_buffer_fops); 9790 9791 trace_create_file("trace_marker", 0220, d_tracer, 9792 tr, &tracing_mark_fops); 9793 9794 file = __find_event_file(tr, "ftrace", "print"); 9795 if (file && file->ef) 9796 eventfs_add_file("trigger", TRACE_MODE_WRITE, file->ef, 9797 file, &event_trigger_fops); 9798 tr->trace_marker_file = file; 9799 9800 trace_create_file("trace_marker_raw", 0220, d_tracer, 9801 tr, &tracing_mark_raw_fops); 9802 9803 trace_create_file("trace_clock", TRACE_MODE_WRITE, d_tracer, tr, 9804 &trace_clock_fops); 9805 9806 trace_create_file("tracing_on", TRACE_MODE_WRITE, d_tracer, 9807 tr, &rb_simple_fops); 9808 9809 trace_create_file("timestamp_mode", TRACE_MODE_READ, d_tracer, tr, 9810 &trace_time_stamp_mode_fops); 9811 9812 tr->buffer_percent = 50; 9813 9814 trace_create_file("buffer_percent", TRACE_MODE_WRITE, d_tracer, 9815 tr, &buffer_percent_fops); 9816 9817 create_trace_options_dir(tr); 9818 9819 #ifdef CONFIG_TRACER_MAX_TRACE 9820 trace_create_maxlat_file(tr, d_tracer); 9821 #endif 9822 9823 if (ftrace_create_function_files(tr, d_tracer)) 9824 MEM_FAIL(1, "Could not allocate function filter files"); 9825 9826 #ifdef CONFIG_TRACER_SNAPSHOT 9827 trace_create_file("snapshot", TRACE_MODE_WRITE, d_tracer, 9828 tr, &snapshot_fops); 9829 #endif 9830 9831 trace_create_file("error_log", TRACE_MODE_WRITE, d_tracer, 9832 tr, &tracing_err_log_fops); 9833 9834 for_each_tracing_cpu(cpu) 9835 tracing_init_tracefs_percpu(tr, cpu); 9836 9837 ftrace_init_tracefs(tr, d_tracer); 9838 } 9839 9840 static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore) 9841 { 9842 struct vfsmount *mnt; 9843 struct file_system_type *type; 9844 9845 /* 9846 * To maintain backward compatibility for tools that mount 9847 * debugfs to get to the tracing facility, tracefs is automatically 9848 * mounted to the debugfs/tracing directory. 9849 */ 9850 type = get_fs_type("tracefs"); 9851 if (!type) 9852 return NULL; 9853 mnt = vfs_submount(mntpt, type, "tracefs", NULL); 9854 put_filesystem(type); 9855 if (IS_ERR(mnt)) 9856 return NULL; 9857 mntget(mnt); 9858 9859 return mnt; 9860 } 9861 9862 /** 9863 * tracing_init_dentry - initialize top level trace array 9864 * 9865 * This is called when creating files or directories in the tracing 9866 * directory. It is called via fs_initcall() by any of the boot up code 9867 * and expects to return the dentry of the top level tracing directory. 9868 */ 9869 int tracing_init_dentry(void) 9870 { 9871 struct trace_array *tr = &global_trace; 9872 9873 if (security_locked_down(LOCKDOWN_TRACEFS)) { 9874 pr_warn("Tracing disabled due to lockdown\n"); 9875 return -EPERM; 9876 } 9877 9878 /* The top level trace array uses NULL as parent */ 9879 if (tr->dir) 9880 return 0; 9881 9882 if (WARN_ON(!tracefs_initialized())) 9883 return -ENODEV; 9884 9885 /* 9886 * As there may still be users that expect the tracing 9887 * files to exist in debugfs/tracing, we must automount 9888 * the tracefs file system there, so older tools still 9889 * work with the newer kernel. 9890 */ 9891 tr->dir = debugfs_create_automount("tracing", NULL, 9892 trace_automount, NULL); 9893 9894 return 0; 9895 } 9896 9897 extern struct trace_eval_map *__start_ftrace_eval_maps[]; 9898 extern struct trace_eval_map *__stop_ftrace_eval_maps[]; 9899 9900 static struct workqueue_struct *eval_map_wq __initdata; 9901 static struct work_struct eval_map_work __initdata; 9902 static struct work_struct tracerfs_init_work __initdata; 9903 9904 static void __init eval_map_work_func(struct work_struct *work) 9905 { 9906 int len; 9907 9908 len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps; 9909 trace_insert_eval_map(NULL, __start_ftrace_eval_maps, len); 9910 } 9911 9912 static int __init trace_eval_init(void) 9913 { 9914 INIT_WORK(&eval_map_work, eval_map_work_func); 9915 9916 eval_map_wq = alloc_workqueue("eval_map_wq", WQ_UNBOUND, 0); 9917 if (!eval_map_wq) { 9918 pr_err("Unable to allocate eval_map_wq\n"); 9919 /* Do work here */ 9920 eval_map_work_func(&eval_map_work); 9921 return -ENOMEM; 9922 } 9923 9924 queue_work(eval_map_wq, &eval_map_work); 9925 return 0; 9926 } 9927 9928 subsys_initcall(trace_eval_init); 9929 9930 static int __init trace_eval_sync(void) 9931 { 9932 /* Make sure the eval map updates are finished */ 9933 if (eval_map_wq) 9934 destroy_workqueue(eval_map_wq); 9935 return 0; 9936 } 9937 9938 late_initcall_sync(trace_eval_sync); 9939 9940 9941 #ifdef CONFIG_MODULES 9942 static void trace_module_add_evals(struct module *mod) 9943 { 9944 if (!mod->num_trace_evals) 9945 return; 9946 9947 /* 9948 * Modules with bad taint do not have events created, do 9949 * not bother with enums either. 9950 */ 9951 if (trace_module_has_bad_taint(mod)) 9952 return; 9953 9954 trace_insert_eval_map(mod, mod->trace_evals, mod->num_trace_evals); 9955 } 9956 9957 #ifdef CONFIG_TRACE_EVAL_MAP_FILE 9958 static void trace_module_remove_evals(struct module *mod) 9959 { 9960 union trace_eval_map_item *map; 9961 union trace_eval_map_item **last = &trace_eval_maps; 9962 9963 if (!mod->num_trace_evals) 9964 return; 9965 9966 mutex_lock(&trace_eval_mutex); 9967 9968 map = trace_eval_maps; 9969 9970 while (map) { 9971 if (map->head.mod == mod) 9972 break; 9973 map = trace_eval_jmp_to_tail(map); 9974 last = &map->tail.next; 9975 map = map->tail.next; 9976 } 9977 if (!map) 9978 goto out; 9979 9980 *last = trace_eval_jmp_to_tail(map)->tail.next; 9981 kfree(map); 9982 out: 9983 mutex_unlock(&trace_eval_mutex); 9984 } 9985 #else 9986 static inline void trace_module_remove_evals(struct module *mod) { } 9987 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */ 9988 9989 static int trace_module_notify(struct notifier_block *self, 9990 unsigned long val, void *data) 9991 { 9992 struct module *mod = data; 9993 9994 switch (val) { 9995 case MODULE_STATE_COMING: 9996 trace_module_add_evals(mod); 9997 break; 9998 case MODULE_STATE_GOING: 9999 trace_module_remove_evals(mod); 10000 break; 10001 } 10002 10003 return NOTIFY_OK; 10004 } 10005 10006 static struct notifier_block trace_module_nb = { 10007 .notifier_call = trace_module_notify, 10008 .priority = 0, 10009 }; 10010 #endif /* CONFIG_MODULES */ 10011 10012 static __init void tracer_init_tracefs_work_func(struct work_struct *work) 10013 { 10014 10015 event_trace_init(); 10016 10017 init_tracer_tracefs(&global_trace, NULL); 10018 ftrace_init_tracefs_toplevel(&global_trace, NULL); 10019 10020 trace_create_file("tracing_thresh", TRACE_MODE_WRITE, NULL, 10021 &global_trace, &tracing_thresh_fops); 10022 10023 trace_create_file("README", TRACE_MODE_READ, NULL, 10024 NULL, &tracing_readme_fops); 10025 10026 trace_create_file("saved_cmdlines", TRACE_MODE_READ, NULL, 10027 NULL, &tracing_saved_cmdlines_fops); 10028 10029 trace_create_file("saved_cmdlines_size", TRACE_MODE_WRITE, NULL, 10030 NULL, &tracing_saved_cmdlines_size_fops); 10031 10032 trace_create_file("saved_tgids", TRACE_MODE_READ, NULL, 10033 NULL, &tracing_saved_tgids_fops); 10034 10035 trace_create_eval_file(NULL); 10036 10037 #ifdef CONFIG_MODULES 10038 register_module_notifier(&trace_module_nb); 10039 #endif 10040 10041 #ifdef CONFIG_DYNAMIC_FTRACE 10042 trace_create_file("dyn_ftrace_total_info", TRACE_MODE_READ, NULL, 10043 NULL, &tracing_dyn_info_fops); 10044 #endif 10045 10046 create_trace_instances(NULL); 10047 10048 update_tracer_options(&global_trace); 10049 } 10050 10051 static __init int tracer_init_tracefs(void) 10052 { 10053 int ret; 10054 10055 trace_access_lock_init(); 10056 10057 ret = tracing_init_dentry(); 10058 if (ret) 10059 return 0; 10060 10061 if (eval_map_wq) { 10062 INIT_WORK(&tracerfs_init_work, tracer_init_tracefs_work_func); 10063 queue_work(eval_map_wq, &tracerfs_init_work); 10064 } else { 10065 tracer_init_tracefs_work_func(NULL); 10066 } 10067 10068 rv_init_interface(); 10069 10070 return 0; 10071 } 10072 10073 fs_initcall(tracer_init_tracefs); 10074 10075 static int trace_die_panic_handler(struct notifier_block *self, 10076 unsigned long ev, void *unused); 10077 10078 static struct notifier_block trace_panic_notifier = { 10079 .notifier_call = trace_die_panic_handler, 10080 .priority = INT_MAX - 1, 10081 }; 10082 10083 static struct notifier_block trace_die_notifier = { 10084 .notifier_call = trace_die_panic_handler, 10085 .priority = INT_MAX - 1, 10086 }; 10087 10088 /* 10089 * The idea is to execute the following die/panic callback early, in order 10090 * to avoid showing irrelevant information in the trace (like other panic 10091 * notifier functions); we are the 2nd to run, after hung_task/rcu_stall 10092 * warnings get disabled (to prevent potential log flooding). 10093 */ 10094 static int trace_die_panic_handler(struct notifier_block *self, 10095 unsigned long ev, void *unused) 10096 { 10097 if (!ftrace_dump_on_oops) 10098 return NOTIFY_DONE; 10099 10100 /* The die notifier requires DIE_OOPS to trigger */ 10101 if (self == &trace_die_notifier && ev != DIE_OOPS) 10102 return NOTIFY_DONE; 10103 10104 ftrace_dump(ftrace_dump_on_oops); 10105 10106 return NOTIFY_DONE; 10107 } 10108 10109 /* 10110 * printk is set to max of 1024, we really don't need it that big. 10111 * Nothing should be printing 1000 characters anyway. 10112 */ 10113 #define TRACE_MAX_PRINT 1000 10114 10115 /* 10116 * Define here KERN_TRACE so that we have one place to modify 10117 * it if we decide to change what log level the ftrace dump 10118 * should be at. 10119 */ 10120 #define KERN_TRACE KERN_EMERG 10121 10122 void 10123 trace_printk_seq(struct trace_seq *s) 10124 { 10125 /* Probably should print a warning here. */ 10126 if (s->seq.len >= TRACE_MAX_PRINT) 10127 s->seq.len = TRACE_MAX_PRINT; 10128 10129 /* 10130 * More paranoid code. Although the buffer size is set to 10131 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just 10132 * an extra layer of protection. 10133 */ 10134 if (WARN_ON_ONCE(s->seq.len >= s->seq.size)) 10135 s->seq.len = s->seq.size - 1; 10136 10137 /* should be zero ended, but we are paranoid. */ 10138 s->buffer[s->seq.len] = 0; 10139 10140 printk(KERN_TRACE "%s", s->buffer); 10141 10142 trace_seq_init(s); 10143 } 10144 10145 void trace_init_global_iter(struct trace_iterator *iter) 10146 { 10147 iter->tr = &global_trace; 10148 iter->trace = iter->tr->current_trace; 10149 iter->cpu_file = RING_BUFFER_ALL_CPUS; 10150 iter->array_buffer = &global_trace.array_buffer; 10151 10152 if (iter->trace && iter->trace->open) 10153 iter->trace->open(iter); 10154 10155 /* Annotate start of buffers if we had overruns */ 10156 if (ring_buffer_overruns(iter->array_buffer->buffer)) 10157 iter->iter_flags |= TRACE_FILE_ANNOTATE; 10158 10159 /* Output in nanoseconds only if we are using a clock in nanoseconds. */ 10160 if (trace_clocks[iter->tr->clock_id].in_ns) 10161 iter->iter_flags |= TRACE_FILE_TIME_IN_NS; 10162 10163 /* Can not use kmalloc for iter.temp and iter.fmt */ 10164 iter->temp = static_temp_buf; 10165 iter->temp_size = STATIC_TEMP_BUF_SIZE; 10166 iter->fmt = static_fmt_buf; 10167 iter->fmt_size = STATIC_FMT_BUF_SIZE; 10168 } 10169 10170 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) 10171 { 10172 /* use static because iter can be a bit big for the stack */ 10173 static struct trace_iterator iter; 10174 static atomic_t dump_running; 10175 struct trace_array *tr = &global_trace; 10176 unsigned int old_userobj; 10177 unsigned long flags; 10178 int cnt = 0, cpu; 10179 10180 /* Only allow one dump user at a time. */ 10181 if (atomic_inc_return(&dump_running) != 1) { 10182 atomic_dec(&dump_running); 10183 return; 10184 } 10185 10186 /* 10187 * Always turn off tracing when we dump. 10188 * We don't need to show trace output of what happens 10189 * between multiple crashes. 10190 * 10191 * If the user does a sysrq-z, then they can re-enable 10192 * tracing with echo 1 > tracing_on. 10193 */ 10194 tracing_off(); 10195 10196 local_irq_save(flags); 10197 10198 /* Simulate the iterator */ 10199 trace_init_global_iter(&iter); 10200 10201 for_each_tracing_cpu(cpu) { 10202 atomic_inc(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled); 10203 } 10204 10205 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ; 10206 10207 /* don't look at user memory in panic mode */ 10208 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ; 10209 10210 switch (oops_dump_mode) { 10211 case DUMP_ALL: 10212 iter.cpu_file = RING_BUFFER_ALL_CPUS; 10213 break; 10214 case DUMP_ORIG: 10215 iter.cpu_file = raw_smp_processor_id(); 10216 break; 10217 case DUMP_NONE: 10218 goto out_enable; 10219 default: 10220 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n"); 10221 iter.cpu_file = RING_BUFFER_ALL_CPUS; 10222 } 10223 10224 printk(KERN_TRACE "Dumping ftrace buffer:\n"); 10225 10226 /* Did function tracer already get disabled? */ 10227 if (ftrace_is_dead()) { 10228 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n"); 10229 printk("# MAY BE MISSING FUNCTION EVENTS\n"); 10230 } 10231 10232 /* 10233 * We need to stop all tracing on all CPUS to read 10234 * the next buffer. This is a bit expensive, but is 10235 * not done often. We fill all what we can read, 10236 * and then release the locks again. 10237 */ 10238 10239 while (!trace_empty(&iter)) { 10240 10241 if (!cnt) 10242 printk(KERN_TRACE "---------------------------------\n"); 10243 10244 cnt++; 10245 10246 trace_iterator_reset(&iter); 10247 iter.iter_flags |= TRACE_FILE_LAT_FMT; 10248 10249 if (trace_find_next_entry_inc(&iter) != NULL) { 10250 int ret; 10251 10252 ret = print_trace_line(&iter); 10253 if (ret != TRACE_TYPE_NO_CONSUME) 10254 trace_consume(&iter); 10255 } 10256 touch_nmi_watchdog(); 10257 10258 trace_printk_seq(&iter.seq); 10259 } 10260 10261 if (!cnt) 10262 printk(KERN_TRACE " (ftrace buffer empty)\n"); 10263 else 10264 printk(KERN_TRACE "---------------------------------\n"); 10265 10266 out_enable: 10267 tr->trace_flags |= old_userobj; 10268 10269 for_each_tracing_cpu(cpu) { 10270 atomic_dec(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled); 10271 } 10272 atomic_dec(&dump_running); 10273 local_irq_restore(flags); 10274 } 10275 EXPORT_SYMBOL_GPL(ftrace_dump); 10276 10277 #define WRITE_BUFSIZE 4096 10278 10279 ssize_t trace_parse_run_command(struct file *file, const char __user *buffer, 10280 size_t count, loff_t *ppos, 10281 int (*createfn)(const char *)) 10282 { 10283 char *kbuf, *buf, *tmp; 10284 int ret = 0; 10285 size_t done = 0; 10286 size_t size; 10287 10288 kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL); 10289 if (!kbuf) 10290 return -ENOMEM; 10291 10292 while (done < count) { 10293 size = count - done; 10294 10295 if (size >= WRITE_BUFSIZE) 10296 size = WRITE_BUFSIZE - 1; 10297 10298 if (copy_from_user(kbuf, buffer + done, size)) { 10299 ret = -EFAULT; 10300 goto out; 10301 } 10302 kbuf[size] = '\0'; 10303 buf = kbuf; 10304 do { 10305 tmp = strchr(buf, '\n'); 10306 if (tmp) { 10307 *tmp = '\0'; 10308 size = tmp - buf + 1; 10309 } else { 10310 size = strlen(buf); 10311 if (done + size < count) { 10312 if (buf != kbuf) 10313 break; 10314 /* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */ 10315 pr_warn("Line length is too long: Should be less than %d\n", 10316 WRITE_BUFSIZE - 2); 10317 ret = -EINVAL; 10318 goto out; 10319 } 10320 } 10321 done += size; 10322 10323 /* Remove comments */ 10324 tmp = strchr(buf, '#'); 10325 10326 if (tmp) 10327 *tmp = '\0'; 10328 10329 ret = createfn(buf); 10330 if (ret) 10331 goto out; 10332 buf += size; 10333 10334 } while (done < count); 10335 } 10336 ret = done; 10337 10338 out: 10339 kfree(kbuf); 10340 10341 return ret; 10342 } 10343 10344 #ifdef CONFIG_TRACER_MAX_TRACE 10345 __init static bool tr_needs_alloc_snapshot(const char *name) 10346 { 10347 char *test; 10348 int len = strlen(name); 10349 bool ret; 10350 10351 if (!boot_snapshot_index) 10352 return false; 10353 10354 if (strncmp(name, boot_snapshot_info, len) == 0 && 10355 boot_snapshot_info[len] == '\t') 10356 return true; 10357 10358 test = kmalloc(strlen(name) + 3, GFP_KERNEL); 10359 if (!test) 10360 return false; 10361 10362 sprintf(test, "\t%s\t", name); 10363 ret = strstr(boot_snapshot_info, test) == NULL; 10364 kfree(test); 10365 return ret; 10366 } 10367 10368 __init static void do_allocate_snapshot(const char *name) 10369 { 10370 if (!tr_needs_alloc_snapshot(name)) 10371 return; 10372 10373 /* 10374 * When allocate_snapshot is set, the next call to 10375 * allocate_trace_buffers() (called by trace_array_get_by_name()) 10376 * will allocate the snapshot buffer. That will alse clear 10377 * this flag. 10378 */ 10379 allocate_snapshot = true; 10380 } 10381 #else 10382 static inline void do_allocate_snapshot(const char *name) { } 10383 #endif 10384 10385 __init static void enable_instances(void) 10386 { 10387 struct trace_array *tr; 10388 char *curr_str; 10389 char *str; 10390 char *tok; 10391 10392 /* A tab is always appended */ 10393 boot_instance_info[boot_instance_index - 1] = '\0'; 10394 str = boot_instance_info; 10395 10396 while ((curr_str = strsep(&str, "\t"))) { 10397 10398 tok = strsep(&curr_str, ","); 10399 10400 if (IS_ENABLED(CONFIG_TRACER_MAX_TRACE)) 10401 do_allocate_snapshot(tok); 10402 10403 tr = trace_array_get_by_name(tok); 10404 if (!tr) { 10405 pr_warn("Failed to create instance buffer %s\n", curr_str); 10406 continue; 10407 } 10408 /* Allow user space to delete it */ 10409 trace_array_put(tr); 10410 10411 while ((tok = strsep(&curr_str, ","))) { 10412 early_enable_events(tr, tok, true); 10413 } 10414 } 10415 } 10416 10417 __init static int tracer_alloc_buffers(void) 10418 { 10419 int ring_buf_size; 10420 int ret = -ENOMEM; 10421 10422 10423 if (security_locked_down(LOCKDOWN_TRACEFS)) { 10424 pr_warn("Tracing disabled due to lockdown\n"); 10425 return -EPERM; 10426 } 10427 10428 /* 10429 * Make sure we don't accidentally add more trace options 10430 * than we have bits for. 10431 */ 10432 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE); 10433 10434 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL)) 10435 goto out; 10436 10437 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL)) 10438 goto out_free_buffer_mask; 10439 10440 /* Only allocate trace_printk buffers if a trace_printk exists */ 10441 if (&__stop___trace_bprintk_fmt != &__start___trace_bprintk_fmt) 10442 /* Must be called before global_trace.buffer is allocated */ 10443 trace_printk_init_buffers(); 10444 10445 /* To save memory, keep the ring buffer size to its minimum */ 10446 if (ring_buffer_expanded) 10447 ring_buf_size = trace_buf_size; 10448 else 10449 ring_buf_size = 1; 10450 10451 cpumask_copy(tracing_buffer_mask, cpu_possible_mask); 10452 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask); 10453 10454 raw_spin_lock_init(&global_trace.start_lock); 10455 10456 /* 10457 * The prepare callbacks allocates some memory for the ring buffer. We 10458 * don't free the buffer if the CPU goes down. If we were to free 10459 * the buffer, then the user would lose any trace that was in the 10460 * buffer. The memory will be removed once the "instance" is removed. 10461 */ 10462 ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE, 10463 "trace/RB:prepare", trace_rb_cpu_prepare, 10464 NULL); 10465 if (ret < 0) 10466 goto out_free_cpumask; 10467 /* Used for event triggers */ 10468 ret = -ENOMEM; 10469 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE); 10470 if (!temp_buffer) 10471 goto out_rm_hp_state; 10472 10473 if (trace_create_savedcmd() < 0) 10474 goto out_free_temp_buffer; 10475 10476 if (!zalloc_cpumask_var(&global_trace.pipe_cpumask, GFP_KERNEL)) 10477 goto out_free_savedcmd; 10478 10479 /* TODO: make the number of buffers hot pluggable with CPUS */ 10480 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) { 10481 MEM_FAIL(1, "tracer: failed to allocate ring buffer!\n"); 10482 goto out_free_pipe_cpumask; 10483 } 10484 if (global_trace.buffer_disabled) 10485 tracing_off(); 10486 10487 if (trace_boot_clock) { 10488 ret = tracing_set_clock(&global_trace, trace_boot_clock); 10489 if (ret < 0) 10490 pr_warn("Trace clock %s not defined, going back to default\n", 10491 trace_boot_clock); 10492 } 10493 10494 /* 10495 * register_tracer() might reference current_trace, so it 10496 * needs to be set before we register anything. This is 10497 * just a bootstrap of current_trace anyway. 10498 */ 10499 global_trace.current_trace = &nop_trace; 10500 10501 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; 10502 10503 ftrace_init_global_array_ops(&global_trace); 10504 10505 init_trace_flags_index(&global_trace); 10506 10507 register_tracer(&nop_trace); 10508 10509 /* Function tracing may start here (via kernel command line) */ 10510 init_function_trace(); 10511 10512 /* All seems OK, enable tracing */ 10513 tracing_disabled = 0; 10514 10515 atomic_notifier_chain_register(&panic_notifier_list, 10516 &trace_panic_notifier); 10517 10518 register_die_notifier(&trace_die_notifier); 10519 10520 global_trace.flags = TRACE_ARRAY_FL_GLOBAL; 10521 10522 INIT_LIST_HEAD(&global_trace.systems); 10523 INIT_LIST_HEAD(&global_trace.events); 10524 INIT_LIST_HEAD(&global_trace.hist_vars); 10525 INIT_LIST_HEAD(&global_trace.err_log); 10526 list_add(&global_trace.list, &ftrace_trace_arrays); 10527 10528 apply_trace_boot_options(); 10529 10530 register_snapshot_cmd(); 10531 10532 test_can_verify(); 10533 10534 return 0; 10535 10536 out_free_pipe_cpumask: 10537 free_cpumask_var(global_trace.pipe_cpumask); 10538 out_free_savedcmd: 10539 free_saved_cmdlines_buffer(savedcmd); 10540 out_free_temp_buffer: 10541 ring_buffer_free(temp_buffer); 10542 out_rm_hp_state: 10543 cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE); 10544 out_free_cpumask: 10545 free_cpumask_var(global_trace.tracing_cpumask); 10546 out_free_buffer_mask: 10547 free_cpumask_var(tracing_buffer_mask); 10548 out: 10549 return ret; 10550 } 10551 10552 void __init ftrace_boot_snapshot(void) 10553 { 10554 #ifdef CONFIG_TRACER_MAX_TRACE 10555 struct trace_array *tr; 10556 10557 if (!snapshot_at_boot) 10558 return; 10559 10560 list_for_each_entry(tr, &ftrace_trace_arrays, list) { 10561 if (!tr->allocated_snapshot) 10562 continue; 10563 10564 tracing_snapshot_instance(tr); 10565 trace_array_puts(tr, "** Boot snapshot taken **\n"); 10566 } 10567 #endif 10568 } 10569 10570 void __init early_trace_init(void) 10571 { 10572 if (tracepoint_printk) { 10573 tracepoint_print_iter = 10574 kzalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL); 10575 if (MEM_FAIL(!tracepoint_print_iter, 10576 "Failed to allocate trace iterator\n")) 10577 tracepoint_printk = 0; 10578 else 10579 static_key_enable(&tracepoint_printk_key.key); 10580 } 10581 tracer_alloc_buffers(); 10582 10583 init_events(); 10584 } 10585 10586 void __init trace_init(void) 10587 { 10588 trace_event_init(); 10589 10590 if (boot_instance_index) 10591 enable_instances(); 10592 } 10593 10594 __init static void clear_boot_tracer(void) 10595 { 10596 /* 10597 * The default tracer at boot buffer is an init section. 10598 * This function is called in lateinit. If we did not 10599 * find the boot tracer, then clear it out, to prevent 10600 * later registration from accessing the buffer that is 10601 * about to be freed. 10602 */ 10603 if (!default_bootup_tracer) 10604 return; 10605 10606 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n", 10607 default_bootup_tracer); 10608 default_bootup_tracer = NULL; 10609 } 10610 10611 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK 10612 __init static void tracing_set_default_clock(void) 10613 { 10614 /* sched_clock_stable() is determined in late_initcall */ 10615 if (!trace_boot_clock && !sched_clock_stable()) { 10616 if (security_locked_down(LOCKDOWN_TRACEFS)) { 10617 pr_warn("Can not set tracing clock due to lockdown\n"); 10618 return; 10619 } 10620 10621 printk(KERN_WARNING 10622 "Unstable clock detected, switching default tracing clock to \"global\"\n" 10623 "If you want to keep using the local clock, then add:\n" 10624 " \"trace_clock=local\"\n" 10625 "on the kernel command line\n"); 10626 tracing_set_clock(&global_trace, "global"); 10627 } 10628 } 10629 #else 10630 static inline void tracing_set_default_clock(void) { } 10631 #endif 10632 10633 __init static int late_trace_init(void) 10634 { 10635 if (tracepoint_printk && tracepoint_printk_stop_on_boot) { 10636 static_key_disable(&tracepoint_printk_key.key); 10637 tracepoint_printk = 0; 10638 } 10639 10640 tracing_set_default_clock(); 10641 clear_boot_tracer(); 10642 return 0; 10643 } 10644 10645 late_initcall_sync(late_trace_init); 10646