1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * ring buffer based function tracer 4 * 5 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com> 6 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com> 7 * 8 * Originally taken from the RT patch by: 9 * Arnaldo Carvalho de Melo <acme@redhat.com> 10 * 11 * Based on code from the latency_tracer, that is: 12 * Copyright (C) 2004-2006 Ingo Molnar 13 * Copyright (C) 2004 Nadia Yvette Chambers 14 */ 15 #include <linux/ring_buffer.h> 16 #include <linux/utsname.h> 17 #include <linux/stacktrace.h> 18 #include <linux/writeback.h> 19 #include <linux/kallsyms.h> 20 #include <linux/security.h> 21 #include <linux/seq_file.h> 22 #include <linux/irqflags.h> 23 #include <linux/debugfs.h> 24 #include <linux/tracefs.h> 25 #include <linux/pagemap.h> 26 #include <linux/hardirq.h> 27 #include <linux/linkage.h> 28 #include <linux/uaccess.h> 29 #include <linux/cleanup.h> 30 #include <linux/vmalloc.h> 31 #include <linux/ftrace.h> 32 #include <linux/module.h> 33 #include <linux/percpu.h> 34 #include <linux/splice.h> 35 #include <linux/kdebug.h> 36 #include <linux/string.h> 37 #include <linux/mount.h> 38 #include <linux/rwsem.h> 39 #include <linux/slab.h> 40 #include <linux/ctype.h> 41 #include <linux/init.h> 42 #include <linux/panic_notifier.h> 43 #include <linux/poll.h> 44 #include <linux/nmi.h> 45 #include <linux/fs.h> 46 #include <linux/trace.h> 47 #include <linux/sched/clock.h> 48 #include <linux/sched/rt.h> 49 #include <linux/fsnotify.h> 50 #include <linux/irq_work.h> 51 #include <linux/workqueue.h> 52 53 #include <asm/setup.h> /* COMMAND_LINE_SIZE */ 54 55 #include "trace.h" 56 #include "trace_output.h" 57 58 #ifdef CONFIG_FTRACE_STARTUP_TEST 59 /* 60 * We need to change this state when a selftest is running. 61 * A selftest will lurk into the ring-buffer to count the 62 * entries inserted during the selftest although some concurrent 63 * insertions into the ring-buffer such as trace_printk could occurred 64 * at the same time, giving false positive or negative results. 65 */ 66 static bool __read_mostly tracing_selftest_running; 67 68 /* 69 * If boot-time tracing including tracers/events via kernel cmdline 70 * is running, we do not want to run SELFTEST. 71 */ 72 bool __read_mostly tracing_selftest_disabled; 73 74 void __init disable_tracing_selftest(const char *reason) 75 { 76 if (!tracing_selftest_disabled) { 77 tracing_selftest_disabled = true; 78 pr_info("Ftrace startup test is disabled due to %s\n", reason); 79 } 80 } 81 #else 82 #define tracing_selftest_running 0 83 #define tracing_selftest_disabled 0 84 #endif 85 86 /* Pipe tracepoints to printk */ 87 static struct trace_iterator *tracepoint_print_iter; 88 int tracepoint_printk; 89 static bool tracepoint_printk_stop_on_boot __initdata; 90 static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key); 91 92 /* For tracers that don't implement custom flags */ 93 static struct tracer_opt dummy_tracer_opt[] = { 94 { } 95 }; 96 97 static int 98 dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) 99 { 100 return 0; 101 } 102 103 /* 104 * To prevent the comm cache from being overwritten when no 105 * tracing is active, only save the comm when a trace event 106 * occurred. 107 */ 108 DEFINE_PER_CPU(bool, trace_taskinfo_save); 109 110 /* 111 * Kill all tracing for good (never come back). 112 * It is initialized to 1 but will turn to zero if the initialization 113 * of the tracer is successful. But that is the only place that sets 114 * this back to zero. 115 */ 116 static int tracing_disabled = 1; 117 118 cpumask_var_t __read_mostly tracing_buffer_mask; 119 120 /* 121 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops 122 * 123 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops 124 * is set, then ftrace_dump is called. This will output the contents 125 * of the ftrace buffers to the console. This is very useful for 126 * capturing traces that lead to crashes and outputing it to a 127 * serial console. 128 * 129 * It is default off, but you can enable it with either specifying 130 * "ftrace_dump_on_oops" in the kernel command line, or setting 131 * /proc/sys/kernel/ftrace_dump_on_oops 132 * Set 1 if you want to dump buffers of all CPUs 133 * Set 2 if you want to dump the buffer of the CPU that triggered oops 134 * Set instance name if you want to dump the specific trace instance 135 * Multiple instance dump is also supported, and instances are seperated 136 * by commas. 137 */ 138 /* Set to string format zero to disable by default */ 139 char ftrace_dump_on_oops[MAX_TRACER_SIZE] = "0"; 140 141 /* When set, tracing will stop when a WARN*() is hit */ 142 int __disable_trace_on_warning; 143 144 #ifdef CONFIG_TRACE_EVAL_MAP_FILE 145 /* Map of enums to their values, for "eval_map" file */ 146 struct trace_eval_map_head { 147 struct module *mod; 148 unsigned long length; 149 }; 150 151 union trace_eval_map_item; 152 153 struct trace_eval_map_tail { 154 /* 155 * "end" is first and points to NULL as it must be different 156 * than "mod" or "eval_string" 157 */ 158 union trace_eval_map_item *next; 159 const char *end; /* points to NULL */ 160 }; 161 162 static DEFINE_MUTEX(trace_eval_mutex); 163 164 /* 165 * The trace_eval_maps are saved in an array with two extra elements, 166 * one at the beginning, and one at the end. The beginning item contains 167 * the count of the saved maps (head.length), and the module they 168 * belong to if not built in (head.mod). The ending item contains a 169 * pointer to the next array of saved eval_map items. 170 */ 171 union trace_eval_map_item { 172 struct trace_eval_map map; 173 struct trace_eval_map_head head; 174 struct trace_eval_map_tail tail; 175 }; 176 177 static union trace_eval_map_item *trace_eval_maps; 178 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */ 179 180 int tracing_set_tracer(struct trace_array *tr, const char *buf); 181 static void ftrace_trace_userstack(struct trace_array *tr, 182 struct trace_buffer *buffer, 183 unsigned int trace_ctx); 184 185 static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata; 186 static char *default_bootup_tracer; 187 188 static bool allocate_snapshot; 189 static bool snapshot_at_boot; 190 191 static char boot_instance_info[COMMAND_LINE_SIZE] __initdata; 192 static int boot_instance_index; 193 194 static char boot_snapshot_info[COMMAND_LINE_SIZE] __initdata; 195 static int boot_snapshot_index; 196 197 static int __init set_cmdline_ftrace(char *str) 198 { 199 strscpy(bootup_tracer_buf, str, MAX_TRACER_SIZE); 200 default_bootup_tracer = bootup_tracer_buf; 201 /* We are using ftrace early, expand it */ 202 trace_set_ring_buffer_expanded(NULL); 203 return 1; 204 } 205 __setup("ftrace=", set_cmdline_ftrace); 206 207 int ftrace_dump_on_oops_enabled(void) 208 { 209 if (!strcmp("0", ftrace_dump_on_oops)) 210 return 0; 211 else 212 return 1; 213 } 214 215 static int __init set_ftrace_dump_on_oops(char *str) 216 { 217 if (!*str) { 218 strscpy(ftrace_dump_on_oops, "1", MAX_TRACER_SIZE); 219 return 1; 220 } 221 222 if (*str == ',') { 223 strscpy(ftrace_dump_on_oops, "1", MAX_TRACER_SIZE); 224 strscpy(ftrace_dump_on_oops + 1, str, MAX_TRACER_SIZE - 1); 225 return 1; 226 } 227 228 if (*str++ == '=') { 229 strscpy(ftrace_dump_on_oops, str, MAX_TRACER_SIZE); 230 return 1; 231 } 232 233 return 0; 234 } 235 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops); 236 237 static int __init stop_trace_on_warning(char *str) 238 { 239 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0)) 240 __disable_trace_on_warning = 1; 241 return 1; 242 } 243 __setup("traceoff_on_warning", stop_trace_on_warning); 244 245 static int __init boot_alloc_snapshot(char *str) 246 { 247 char *slot = boot_snapshot_info + boot_snapshot_index; 248 int left = sizeof(boot_snapshot_info) - boot_snapshot_index; 249 int ret; 250 251 if (str[0] == '=') { 252 str++; 253 if (strlen(str) >= left) 254 return -1; 255 256 ret = snprintf(slot, left, "%s\t", str); 257 boot_snapshot_index += ret; 258 } else { 259 allocate_snapshot = true; 260 /* We also need the main ring buffer expanded */ 261 trace_set_ring_buffer_expanded(NULL); 262 } 263 return 1; 264 } 265 __setup("alloc_snapshot", boot_alloc_snapshot); 266 267 268 static int __init boot_snapshot(char *str) 269 { 270 snapshot_at_boot = true; 271 boot_alloc_snapshot(str); 272 return 1; 273 } 274 __setup("ftrace_boot_snapshot", boot_snapshot); 275 276 277 static int __init boot_instance(char *str) 278 { 279 char *slot = boot_instance_info + boot_instance_index; 280 int left = sizeof(boot_instance_info) - boot_instance_index; 281 int ret; 282 283 if (strlen(str) >= left) 284 return -1; 285 286 ret = snprintf(slot, left, "%s\t", str); 287 boot_instance_index += ret; 288 289 return 1; 290 } 291 __setup("trace_instance=", boot_instance); 292 293 294 static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata; 295 296 static int __init set_trace_boot_options(char *str) 297 { 298 strscpy(trace_boot_options_buf, str, MAX_TRACER_SIZE); 299 return 1; 300 } 301 __setup("trace_options=", set_trace_boot_options); 302 303 static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata; 304 static char *trace_boot_clock __initdata; 305 306 static int __init set_trace_boot_clock(char *str) 307 { 308 strscpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE); 309 trace_boot_clock = trace_boot_clock_buf; 310 return 1; 311 } 312 __setup("trace_clock=", set_trace_boot_clock); 313 314 static int __init set_tracepoint_printk(char *str) 315 { 316 /* Ignore the "tp_printk_stop_on_boot" param */ 317 if (*str == '_') 318 return 0; 319 320 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0)) 321 tracepoint_printk = 1; 322 return 1; 323 } 324 __setup("tp_printk", set_tracepoint_printk); 325 326 static int __init set_tracepoint_printk_stop(char *str) 327 { 328 tracepoint_printk_stop_on_boot = true; 329 return 1; 330 } 331 __setup("tp_printk_stop_on_boot", set_tracepoint_printk_stop); 332 333 unsigned long long ns2usecs(u64 nsec) 334 { 335 nsec += 500; 336 do_div(nsec, 1000); 337 return nsec; 338 } 339 340 static void 341 trace_process_export(struct trace_export *export, 342 struct ring_buffer_event *event, int flag) 343 { 344 struct trace_entry *entry; 345 unsigned int size = 0; 346 347 if (export->flags & flag) { 348 entry = ring_buffer_event_data(event); 349 size = ring_buffer_event_length(event); 350 export->write(export, entry, size); 351 } 352 } 353 354 static DEFINE_MUTEX(ftrace_export_lock); 355 356 static struct trace_export __rcu *ftrace_exports_list __read_mostly; 357 358 static DEFINE_STATIC_KEY_FALSE(trace_function_exports_enabled); 359 static DEFINE_STATIC_KEY_FALSE(trace_event_exports_enabled); 360 static DEFINE_STATIC_KEY_FALSE(trace_marker_exports_enabled); 361 362 static inline void ftrace_exports_enable(struct trace_export *export) 363 { 364 if (export->flags & TRACE_EXPORT_FUNCTION) 365 static_branch_inc(&trace_function_exports_enabled); 366 367 if (export->flags & TRACE_EXPORT_EVENT) 368 static_branch_inc(&trace_event_exports_enabled); 369 370 if (export->flags & TRACE_EXPORT_MARKER) 371 static_branch_inc(&trace_marker_exports_enabled); 372 } 373 374 static inline void ftrace_exports_disable(struct trace_export *export) 375 { 376 if (export->flags & TRACE_EXPORT_FUNCTION) 377 static_branch_dec(&trace_function_exports_enabled); 378 379 if (export->flags & TRACE_EXPORT_EVENT) 380 static_branch_dec(&trace_event_exports_enabled); 381 382 if (export->flags & TRACE_EXPORT_MARKER) 383 static_branch_dec(&trace_marker_exports_enabled); 384 } 385 386 static void ftrace_exports(struct ring_buffer_event *event, int flag) 387 { 388 struct trace_export *export; 389 390 preempt_disable_notrace(); 391 392 export = rcu_dereference_raw_check(ftrace_exports_list); 393 while (export) { 394 trace_process_export(export, event, flag); 395 export = rcu_dereference_raw_check(export->next); 396 } 397 398 preempt_enable_notrace(); 399 } 400 401 static inline void 402 add_trace_export(struct trace_export **list, struct trace_export *export) 403 { 404 rcu_assign_pointer(export->next, *list); 405 /* 406 * We are entering export into the list but another 407 * CPU might be walking that list. We need to make sure 408 * the export->next pointer is valid before another CPU sees 409 * the export pointer included into the list. 410 */ 411 rcu_assign_pointer(*list, export); 412 } 413 414 static inline int 415 rm_trace_export(struct trace_export **list, struct trace_export *export) 416 { 417 struct trace_export **p; 418 419 for (p = list; *p != NULL; p = &(*p)->next) 420 if (*p == export) 421 break; 422 423 if (*p != export) 424 return -1; 425 426 rcu_assign_pointer(*p, (*p)->next); 427 428 return 0; 429 } 430 431 static inline void 432 add_ftrace_export(struct trace_export **list, struct trace_export *export) 433 { 434 ftrace_exports_enable(export); 435 436 add_trace_export(list, export); 437 } 438 439 static inline int 440 rm_ftrace_export(struct trace_export **list, struct trace_export *export) 441 { 442 int ret; 443 444 ret = rm_trace_export(list, export); 445 ftrace_exports_disable(export); 446 447 return ret; 448 } 449 450 int register_ftrace_export(struct trace_export *export) 451 { 452 if (WARN_ON_ONCE(!export->write)) 453 return -1; 454 455 mutex_lock(&ftrace_export_lock); 456 457 add_ftrace_export(&ftrace_exports_list, export); 458 459 mutex_unlock(&ftrace_export_lock); 460 461 return 0; 462 } 463 EXPORT_SYMBOL_GPL(register_ftrace_export); 464 465 int unregister_ftrace_export(struct trace_export *export) 466 { 467 int ret; 468 469 mutex_lock(&ftrace_export_lock); 470 471 ret = rm_ftrace_export(&ftrace_exports_list, export); 472 473 mutex_unlock(&ftrace_export_lock); 474 475 return ret; 476 } 477 EXPORT_SYMBOL_GPL(unregister_ftrace_export); 478 479 /* trace_flags holds trace_options default values */ 480 #define TRACE_DEFAULT_FLAGS \ 481 (FUNCTION_DEFAULT_FLAGS | \ 482 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \ 483 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \ 484 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \ 485 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | \ 486 TRACE_ITER_HASH_PTR | TRACE_ITER_TRACE_PRINTK) 487 488 /* trace_options that are only supported by global_trace */ 489 #define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \ 490 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD) 491 492 /* trace_flags that are default zero for instances */ 493 #define ZEROED_TRACE_FLAGS \ 494 (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK | TRACE_ITER_TRACE_PRINTK) 495 496 /* 497 * The global_trace is the descriptor that holds the top-level tracing 498 * buffers for the live tracing. 499 */ 500 static struct trace_array global_trace = { 501 .trace_flags = TRACE_DEFAULT_FLAGS, 502 }; 503 504 static struct trace_array *printk_trace = &global_trace; 505 506 static __always_inline bool printk_binsafe(struct trace_array *tr) 507 { 508 /* 509 * The binary format of traceprintk can cause a crash if used 510 * by a buffer from another boot. Force the use of the 511 * non binary version of trace_printk if the trace_printk 512 * buffer is a boot mapped ring buffer. 513 */ 514 return !(tr->flags & TRACE_ARRAY_FL_BOOT); 515 } 516 517 static void update_printk_trace(struct trace_array *tr) 518 { 519 if (printk_trace == tr) 520 return; 521 522 printk_trace->trace_flags &= ~TRACE_ITER_TRACE_PRINTK; 523 printk_trace = tr; 524 tr->trace_flags |= TRACE_ITER_TRACE_PRINTK; 525 } 526 527 void trace_set_ring_buffer_expanded(struct trace_array *tr) 528 { 529 if (!tr) 530 tr = &global_trace; 531 tr->ring_buffer_expanded = true; 532 } 533 534 LIST_HEAD(ftrace_trace_arrays); 535 536 int trace_array_get(struct trace_array *this_tr) 537 { 538 struct trace_array *tr; 539 540 guard(mutex)(&trace_types_lock); 541 list_for_each_entry(tr, &ftrace_trace_arrays, list) { 542 if (tr == this_tr) { 543 tr->ref++; 544 return 0; 545 } 546 } 547 548 return -ENODEV; 549 } 550 551 static void __trace_array_put(struct trace_array *this_tr) 552 { 553 WARN_ON(!this_tr->ref); 554 this_tr->ref--; 555 } 556 557 /** 558 * trace_array_put - Decrement the reference counter for this trace array. 559 * @this_tr : pointer to the trace array 560 * 561 * NOTE: Use this when we no longer need the trace array returned by 562 * trace_array_get_by_name(). This ensures the trace array can be later 563 * destroyed. 564 * 565 */ 566 void trace_array_put(struct trace_array *this_tr) 567 { 568 if (!this_tr) 569 return; 570 571 mutex_lock(&trace_types_lock); 572 __trace_array_put(this_tr); 573 mutex_unlock(&trace_types_lock); 574 } 575 EXPORT_SYMBOL_GPL(trace_array_put); 576 577 int tracing_check_open_get_tr(struct trace_array *tr) 578 { 579 int ret; 580 581 ret = security_locked_down(LOCKDOWN_TRACEFS); 582 if (ret) 583 return ret; 584 585 if (tracing_disabled) 586 return -ENODEV; 587 588 if (tr && trace_array_get(tr) < 0) 589 return -ENODEV; 590 591 return 0; 592 } 593 594 /** 595 * trace_find_filtered_pid - check if a pid exists in a filtered_pid list 596 * @filtered_pids: The list of pids to check 597 * @search_pid: The PID to find in @filtered_pids 598 * 599 * Returns true if @search_pid is found in @filtered_pids, and false otherwise. 600 */ 601 bool 602 trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid) 603 { 604 return trace_pid_list_is_set(filtered_pids, search_pid); 605 } 606 607 /** 608 * trace_ignore_this_task - should a task be ignored for tracing 609 * @filtered_pids: The list of pids to check 610 * @filtered_no_pids: The list of pids not to be traced 611 * @task: The task that should be ignored if not filtered 612 * 613 * Checks if @task should be traced or not from @filtered_pids. 614 * Returns true if @task should *NOT* be traced. 615 * Returns false if @task should be traced. 616 */ 617 bool 618 trace_ignore_this_task(struct trace_pid_list *filtered_pids, 619 struct trace_pid_list *filtered_no_pids, 620 struct task_struct *task) 621 { 622 /* 623 * If filtered_no_pids is not empty, and the task's pid is listed 624 * in filtered_no_pids, then return true. 625 * Otherwise, if filtered_pids is empty, that means we can 626 * trace all tasks. If it has content, then only trace pids 627 * within filtered_pids. 628 */ 629 630 return (filtered_pids && 631 !trace_find_filtered_pid(filtered_pids, task->pid)) || 632 (filtered_no_pids && 633 trace_find_filtered_pid(filtered_no_pids, task->pid)); 634 } 635 636 /** 637 * trace_filter_add_remove_task - Add or remove a task from a pid_list 638 * @pid_list: The list to modify 639 * @self: The current task for fork or NULL for exit 640 * @task: The task to add or remove 641 * 642 * If adding a task, if @self is defined, the task is only added if @self 643 * is also included in @pid_list. This happens on fork and tasks should 644 * only be added when the parent is listed. If @self is NULL, then the 645 * @task pid will be removed from the list, which would happen on exit 646 * of a task. 647 */ 648 void trace_filter_add_remove_task(struct trace_pid_list *pid_list, 649 struct task_struct *self, 650 struct task_struct *task) 651 { 652 if (!pid_list) 653 return; 654 655 /* For forks, we only add if the forking task is listed */ 656 if (self) { 657 if (!trace_find_filtered_pid(pid_list, self->pid)) 658 return; 659 } 660 661 /* "self" is set for forks, and NULL for exits */ 662 if (self) 663 trace_pid_list_set(pid_list, task->pid); 664 else 665 trace_pid_list_clear(pid_list, task->pid); 666 } 667 668 /** 669 * trace_pid_next - Used for seq_file to get to the next pid of a pid_list 670 * @pid_list: The pid list to show 671 * @v: The last pid that was shown (+1 the actual pid to let zero be displayed) 672 * @pos: The position of the file 673 * 674 * This is used by the seq_file "next" operation to iterate the pids 675 * listed in a trace_pid_list structure. 676 * 677 * Returns the pid+1 as we want to display pid of zero, but NULL would 678 * stop the iteration. 679 */ 680 void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos) 681 { 682 long pid = (unsigned long)v; 683 unsigned int next; 684 685 (*pos)++; 686 687 /* pid already is +1 of the actual previous bit */ 688 if (trace_pid_list_next(pid_list, pid, &next) < 0) 689 return NULL; 690 691 pid = next; 692 693 /* Return pid + 1 to allow zero to be represented */ 694 return (void *)(pid + 1); 695 } 696 697 /** 698 * trace_pid_start - Used for seq_file to start reading pid lists 699 * @pid_list: The pid list to show 700 * @pos: The position of the file 701 * 702 * This is used by seq_file "start" operation to start the iteration 703 * of listing pids. 704 * 705 * Returns the pid+1 as we want to display pid of zero, but NULL would 706 * stop the iteration. 707 */ 708 void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos) 709 { 710 unsigned long pid; 711 unsigned int first; 712 loff_t l = 0; 713 714 if (trace_pid_list_first(pid_list, &first) < 0) 715 return NULL; 716 717 pid = first; 718 719 /* Return pid + 1 so that zero can be the exit value */ 720 for (pid++; pid && l < *pos; 721 pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l)) 722 ; 723 return (void *)pid; 724 } 725 726 /** 727 * trace_pid_show - show the current pid in seq_file processing 728 * @m: The seq_file structure to write into 729 * @v: A void pointer of the pid (+1) value to display 730 * 731 * Can be directly used by seq_file operations to display the current 732 * pid value. 733 */ 734 int trace_pid_show(struct seq_file *m, void *v) 735 { 736 unsigned long pid = (unsigned long)v - 1; 737 738 seq_printf(m, "%lu\n", pid); 739 return 0; 740 } 741 742 /* 128 should be much more than enough */ 743 #define PID_BUF_SIZE 127 744 745 int trace_pid_write(struct trace_pid_list *filtered_pids, 746 struct trace_pid_list **new_pid_list, 747 const char __user *ubuf, size_t cnt) 748 { 749 struct trace_pid_list *pid_list; 750 struct trace_parser parser; 751 unsigned long val; 752 int nr_pids = 0; 753 ssize_t read = 0; 754 ssize_t ret; 755 loff_t pos; 756 pid_t pid; 757 758 if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1)) 759 return -ENOMEM; 760 761 /* 762 * Always recreate a new array. The write is an all or nothing 763 * operation. Always create a new array when adding new pids by 764 * the user. If the operation fails, then the current list is 765 * not modified. 766 */ 767 pid_list = trace_pid_list_alloc(); 768 if (!pid_list) { 769 trace_parser_put(&parser); 770 return -ENOMEM; 771 } 772 773 if (filtered_pids) { 774 /* copy the current bits to the new max */ 775 ret = trace_pid_list_first(filtered_pids, &pid); 776 while (!ret) { 777 trace_pid_list_set(pid_list, pid); 778 ret = trace_pid_list_next(filtered_pids, pid + 1, &pid); 779 nr_pids++; 780 } 781 } 782 783 ret = 0; 784 while (cnt > 0) { 785 786 pos = 0; 787 788 ret = trace_get_user(&parser, ubuf, cnt, &pos); 789 if (ret < 0) 790 break; 791 792 read += ret; 793 ubuf += ret; 794 cnt -= ret; 795 796 if (!trace_parser_loaded(&parser)) 797 break; 798 799 ret = -EINVAL; 800 if (kstrtoul(parser.buffer, 0, &val)) 801 break; 802 803 pid = (pid_t)val; 804 805 if (trace_pid_list_set(pid_list, pid) < 0) { 806 ret = -1; 807 break; 808 } 809 nr_pids++; 810 811 trace_parser_clear(&parser); 812 ret = 0; 813 } 814 trace_parser_put(&parser); 815 816 if (ret < 0) { 817 trace_pid_list_free(pid_list); 818 return ret; 819 } 820 821 if (!nr_pids) { 822 /* Cleared the list of pids */ 823 trace_pid_list_free(pid_list); 824 pid_list = NULL; 825 } 826 827 *new_pid_list = pid_list; 828 829 return read; 830 } 831 832 static u64 buffer_ftrace_now(struct array_buffer *buf, int cpu) 833 { 834 u64 ts; 835 836 /* Early boot up does not have a buffer yet */ 837 if (!buf->buffer) 838 return trace_clock_local(); 839 840 ts = ring_buffer_time_stamp(buf->buffer); 841 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts); 842 843 return ts; 844 } 845 846 u64 ftrace_now(int cpu) 847 { 848 return buffer_ftrace_now(&global_trace.array_buffer, cpu); 849 } 850 851 /** 852 * tracing_is_enabled - Show if global_trace has been enabled 853 * 854 * Shows if the global trace has been enabled or not. It uses the 855 * mirror flag "buffer_disabled" to be used in fast paths such as for 856 * the irqsoff tracer. But it may be inaccurate due to races. If you 857 * need to know the accurate state, use tracing_is_on() which is a little 858 * slower, but accurate. 859 */ 860 int tracing_is_enabled(void) 861 { 862 /* 863 * For quick access (irqsoff uses this in fast path), just 864 * return the mirror variable of the state of the ring buffer. 865 * It's a little racy, but we don't really care. 866 */ 867 smp_rmb(); 868 return !global_trace.buffer_disabled; 869 } 870 871 /* 872 * trace_buf_size is the size in bytes that is allocated 873 * for a buffer. Note, the number of bytes is always rounded 874 * to page size. 875 * 876 * This number is purposely set to a low number of 16384. 877 * If the dump on oops happens, it will be much appreciated 878 * to not have to wait for all that output. Anyway this can be 879 * boot time and run time configurable. 880 */ 881 #define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */ 882 883 static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT; 884 885 /* trace_types holds a link list of available tracers. */ 886 static struct tracer *trace_types __read_mostly; 887 888 /* 889 * trace_types_lock is used to protect the trace_types list. 890 */ 891 DEFINE_MUTEX(trace_types_lock); 892 893 /* 894 * serialize the access of the ring buffer 895 * 896 * ring buffer serializes readers, but it is low level protection. 897 * The validity of the events (which returns by ring_buffer_peek() ..etc) 898 * are not protected by ring buffer. 899 * 900 * The content of events may become garbage if we allow other process consumes 901 * these events concurrently: 902 * A) the page of the consumed events may become a normal page 903 * (not reader page) in ring buffer, and this page will be rewritten 904 * by events producer. 905 * B) The page of the consumed events may become a page for splice_read, 906 * and this page will be returned to system. 907 * 908 * These primitives allow multi process access to different cpu ring buffer 909 * concurrently. 910 * 911 * These primitives don't distinguish read-only and read-consume access. 912 * Multi read-only access are also serialized. 913 */ 914 915 #ifdef CONFIG_SMP 916 static DECLARE_RWSEM(all_cpu_access_lock); 917 static DEFINE_PER_CPU(struct mutex, cpu_access_lock); 918 919 static inline void trace_access_lock(int cpu) 920 { 921 if (cpu == RING_BUFFER_ALL_CPUS) { 922 /* gain it for accessing the whole ring buffer. */ 923 down_write(&all_cpu_access_lock); 924 } else { 925 /* gain it for accessing a cpu ring buffer. */ 926 927 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */ 928 down_read(&all_cpu_access_lock); 929 930 /* Secondly block other access to this @cpu ring buffer. */ 931 mutex_lock(&per_cpu(cpu_access_lock, cpu)); 932 } 933 } 934 935 static inline void trace_access_unlock(int cpu) 936 { 937 if (cpu == RING_BUFFER_ALL_CPUS) { 938 up_write(&all_cpu_access_lock); 939 } else { 940 mutex_unlock(&per_cpu(cpu_access_lock, cpu)); 941 up_read(&all_cpu_access_lock); 942 } 943 } 944 945 static inline void trace_access_lock_init(void) 946 { 947 int cpu; 948 949 for_each_possible_cpu(cpu) 950 mutex_init(&per_cpu(cpu_access_lock, cpu)); 951 } 952 953 #else 954 955 static DEFINE_MUTEX(access_lock); 956 957 static inline void trace_access_lock(int cpu) 958 { 959 (void)cpu; 960 mutex_lock(&access_lock); 961 } 962 963 static inline void trace_access_unlock(int cpu) 964 { 965 (void)cpu; 966 mutex_unlock(&access_lock); 967 } 968 969 static inline void trace_access_lock_init(void) 970 { 971 } 972 973 #endif 974 975 #ifdef CONFIG_STACKTRACE 976 static void __ftrace_trace_stack(struct trace_array *tr, 977 struct trace_buffer *buffer, 978 unsigned int trace_ctx, 979 int skip, struct pt_regs *regs); 980 static inline void ftrace_trace_stack(struct trace_array *tr, 981 struct trace_buffer *buffer, 982 unsigned int trace_ctx, 983 int skip, struct pt_regs *regs); 984 985 #else 986 static inline void __ftrace_trace_stack(struct trace_array *tr, 987 struct trace_buffer *buffer, 988 unsigned int trace_ctx, 989 int skip, struct pt_regs *regs) 990 { 991 } 992 static inline void ftrace_trace_stack(struct trace_array *tr, 993 struct trace_buffer *buffer, 994 unsigned long trace_ctx, 995 int skip, struct pt_regs *regs) 996 { 997 } 998 999 #endif 1000 1001 static __always_inline void 1002 trace_event_setup(struct ring_buffer_event *event, 1003 int type, unsigned int trace_ctx) 1004 { 1005 struct trace_entry *ent = ring_buffer_event_data(event); 1006 1007 tracing_generic_entry_update(ent, type, trace_ctx); 1008 } 1009 1010 static __always_inline struct ring_buffer_event * 1011 __trace_buffer_lock_reserve(struct trace_buffer *buffer, 1012 int type, 1013 unsigned long len, 1014 unsigned int trace_ctx) 1015 { 1016 struct ring_buffer_event *event; 1017 1018 event = ring_buffer_lock_reserve(buffer, len); 1019 if (event != NULL) 1020 trace_event_setup(event, type, trace_ctx); 1021 1022 return event; 1023 } 1024 1025 void tracer_tracing_on(struct trace_array *tr) 1026 { 1027 if (tr->array_buffer.buffer) 1028 ring_buffer_record_on(tr->array_buffer.buffer); 1029 /* 1030 * This flag is looked at when buffers haven't been allocated 1031 * yet, or by some tracers (like irqsoff), that just want to 1032 * know if the ring buffer has been disabled, but it can handle 1033 * races of where it gets disabled but we still do a record. 1034 * As the check is in the fast path of the tracers, it is more 1035 * important to be fast than accurate. 1036 */ 1037 tr->buffer_disabled = 0; 1038 /* Make the flag seen by readers */ 1039 smp_wmb(); 1040 } 1041 1042 /** 1043 * tracing_on - enable tracing buffers 1044 * 1045 * This function enables tracing buffers that may have been 1046 * disabled with tracing_off. 1047 */ 1048 void tracing_on(void) 1049 { 1050 tracer_tracing_on(&global_trace); 1051 } 1052 EXPORT_SYMBOL_GPL(tracing_on); 1053 1054 1055 static __always_inline void 1056 __buffer_unlock_commit(struct trace_buffer *buffer, struct ring_buffer_event *event) 1057 { 1058 __this_cpu_write(trace_taskinfo_save, true); 1059 1060 /* If this is the temp buffer, we need to commit fully */ 1061 if (this_cpu_read(trace_buffered_event) == event) { 1062 /* Length is in event->array[0] */ 1063 ring_buffer_write(buffer, event->array[0], &event->array[1]); 1064 /* Release the temp buffer */ 1065 this_cpu_dec(trace_buffered_event_cnt); 1066 /* ring_buffer_unlock_commit() enables preemption */ 1067 preempt_enable_notrace(); 1068 } else 1069 ring_buffer_unlock_commit(buffer); 1070 } 1071 1072 int __trace_array_puts(struct trace_array *tr, unsigned long ip, 1073 const char *str, int size) 1074 { 1075 struct ring_buffer_event *event; 1076 struct trace_buffer *buffer; 1077 struct print_entry *entry; 1078 unsigned int trace_ctx; 1079 int alloc; 1080 1081 if (!(tr->trace_flags & TRACE_ITER_PRINTK)) 1082 return 0; 1083 1084 if (unlikely(tracing_selftest_running && tr == &global_trace)) 1085 return 0; 1086 1087 if (unlikely(tracing_disabled)) 1088 return 0; 1089 1090 alloc = sizeof(*entry) + size + 2; /* possible \n added */ 1091 1092 trace_ctx = tracing_gen_ctx(); 1093 buffer = tr->array_buffer.buffer; 1094 ring_buffer_nest_start(buffer); 1095 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc, 1096 trace_ctx); 1097 if (!event) { 1098 size = 0; 1099 goto out; 1100 } 1101 1102 entry = ring_buffer_event_data(event); 1103 entry->ip = ip; 1104 1105 memcpy(&entry->buf, str, size); 1106 1107 /* Add a newline if necessary */ 1108 if (entry->buf[size - 1] != '\n') { 1109 entry->buf[size] = '\n'; 1110 entry->buf[size + 1] = '\0'; 1111 } else 1112 entry->buf[size] = '\0'; 1113 1114 __buffer_unlock_commit(buffer, event); 1115 ftrace_trace_stack(tr, buffer, trace_ctx, 4, NULL); 1116 out: 1117 ring_buffer_nest_end(buffer); 1118 return size; 1119 } 1120 EXPORT_SYMBOL_GPL(__trace_array_puts); 1121 1122 /** 1123 * __trace_puts - write a constant string into the trace buffer. 1124 * @ip: The address of the caller 1125 * @str: The constant string to write 1126 * @size: The size of the string. 1127 */ 1128 int __trace_puts(unsigned long ip, const char *str, int size) 1129 { 1130 return __trace_array_puts(printk_trace, ip, str, size); 1131 } 1132 EXPORT_SYMBOL_GPL(__trace_puts); 1133 1134 /** 1135 * __trace_bputs - write the pointer to a constant string into trace buffer 1136 * @ip: The address of the caller 1137 * @str: The constant string to write to the buffer to 1138 */ 1139 int __trace_bputs(unsigned long ip, const char *str) 1140 { 1141 struct trace_array *tr = READ_ONCE(printk_trace); 1142 struct ring_buffer_event *event; 1143 struct trace_buffer *buffer; 1144 struct bputs_entry *entry; 1145 unsigned int trace_ctx; 1146 int size = sizeof(struct bputs_entry); 1147 int ret = 0; 1148 1149 if (!printk_binsafe(tr)) 1150 return __trace_puts(ip, str, strlen(str)); 1151 1152 if (!(tr->trace_flags & TRACE_ITER_PRINTK)) 1153 return 0; 1154 1155 if (unlikely(tracing_selftest_running || tracing_disabled)) 1156 return 0; 1157 1158 trace_ctx = tracing_gen_ctx(); 1159 buffer = tr->array_buffer.buffer; 1160 1161 ring_buffer_nest_start(buffer); 1162 event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size, 1163 trace_ctx); 1164 if (!event) 1165 goto out; 1166 1167 entry = ring_buffer_event_data(event); 1168 entry->ip = ip; 1169 entry->str = str; 1170 1171 __buffer_unlock_commit(buffer, event); 1172 ftrace_trace_stack(tr, buffer, trace_ctx, 4, NULL); 1173 1174 ret = 1; 1175 out: 1176 ring_buffer_nest_end(buffer); 1177 return ret; 1178 } 1179 EXPORT_SYMBOL_GPL(__trace_bputs); 1180 1181 #ifdef CONFIG_TRACER_SNAPSHOT 1182 static void tracing_snapshot_instance_cond(struct trace_array *tr, 1183 void *cond_data) 1184 { 1185 struct tracer *tracer = tr->current_trace; 1186 unsigned long flags; 1187 1188 if (in_nmi()) { 1189 trace_array_puts(tr, "*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n"); 1190 trace_array_puts(tr, "*** snapshot is being ignored ***\n"); 1191 return; 1192 } 1193 1194 if (!tr->allocated_snapshot) { 1195 trace_array_puts(tr, "*** SNAPSHOT NOT ALLOCATED ***\n"); 1196 trace_array_puts(tr, "*** stopping trace here! ***\n"); 1197 tracer_tracing_off(tr); 1198 return; 1199 } 1200 1201 /* Note, snapshot can not be used when the tracer uses it */ 1202 if (tracer->use_max_tr) { 1203 trace_array_puts(tr, "*** LATENCY TRACER ACTIVE ***\n"); 1204 trace_array_puts(tr, "*** Can not use snapshot (sorry) ***\n"); 1205 return; 1206 } 1207 1208 if (tr->mapped) { 1209 trace_array_puts(tr, "*** BUFFER MEMORY MAPPED ***\n"); 1210 trace_array_puts(tr, "*** Can not use snapshot (sorry) ***\n"); 1211 return; 1212 } 1213 1214 local_irq_save(flags); 1215 update_max_tr(tr, current, smp_processor_id(), cond_data); 1216 local_irq_restore(flags); 1217 } 1218 1219 void tracing_snapshot_instance(struct trace_array *tr) 1220 { 1221 tracing_snapshot_instance_cond(tr, NULL); 1222 } 1223 1224 /** 1225 * tracing_snapshot - take a snapshot of the current buffer. 1226 * 1227 * This causes a swap between the snapshot buffer and the current live 1228 * tracing buffer. You can use this to take snapshots of the live 1229 * trace when some condition is triggered, but continue to trace. 1230 * 1231 * Note, make sure to allocate the snapshot with either 1232 * a tracing_snapshot_alloc(), or by doing it manually 1233 * with: echo 1 > /sys/kernel/tracing/snapshot 1234 * 1235 * If the snapshot buffer is not allocated, it will stop tracing. 1236 * Basically making a permanent snapshot. 1237 */ 1238 void tracing_snapshot(void) 1239 { 1240 struct trace_array *tr = &global_trace; 1241 1242 tracing_snapshot_instance(tr); 1243 } 1244 EXPORT_SYMBOL_GPL(tracing_snapshot); 1245 1246 /** 1247 * tracing_snapshot_cond - conditionally take a snapshot of the current buffer. 1248 * @tr: The tracing instance to snapshot 1249 * @cond_data: The data to be tested conditionally, and possibly saved 1250 * 1251 * This is the same as tracing_snapshot() except that the snapshot is 1252 * conditional - the snapshot will only happen if the 1253 * cond_snapshot.update() implementation receiving the cond_data 1254 * returns true, which means that the trace array's cond_snapshot 1255 * update() operation used the cond_data to determine whether the 1256 * snapshot should be taken, and if it was, presumably saved it along 1257 * with the snapshot. 1258 */ 1259 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data) 1260 { 1261 tracing_snapshot_instance_cond(tr, cond_data); 1262 } 1263 EXPORT_SYMBOL_GPL(tracing_snapshot_cond); 1264 1265 /** 1266 * tracing_cond_snapshot_data - get the user data associated with a snapshot 1267 * @tr: The tracing instance 1268 * 1269 * When the user enables a conditional snapshot using 1270 * tracing_snapshot_cond_enable(), the user-defined cond_data is saved 1271 * with the snapshot. This accessor is used to retrieve it. 1272 * 1273 * Should not be called from cond_snapshot.update(), since it takes 1274 * the tr->max_lock lock, which the code calling 1275 * cond_snapshot.update() has already done. 1276 * 1277 * Returns the cond_data associated with the trace array's snapshot. 1278 */ 1279 void *tracing_cond_snapshot_data(struct trace_array *tr) 1280 { 1281 void *cond_data = NULL; 1282 1283 local_irq_disable(); 1284 arch_spin_lock(&tr->max_lock); 1285 1286 if (tr->cond_snapshot) 1287 cond_data = tr->cond_snapshot->cond_data; 1288 1289 arch_spin_unlock(&tr->max_lock); 1290 local_irq_enable(); 1291 1292 return cond_data; 1293 } 1294 EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data); 1295 1296 static int resize_buffer_duplicate_size(struct array_buffer *trace_buf, 1297 struct array_buffer *size_buf, int cpu_id); 1298 static void set_buffer_entries(struct array_buffer *buf, unsigned long val); 1299 1300 int tracing_alloc_snapshot_instance(struct trace_array *tr) 1301 { 1302 int order; 1303 int ret; 1304 1305 if (!tr->allocated_snapshot) { 1306 1307 /* Make the snapshot buffer have the same order as main buffer */ 1308 order = ring_buffer_subbuf_order_get(tr->array_buffer.buffer); 1309 ret = ring_buffer_subbuf_order_set(tr->max_buffer.buffer, order); 1310 if (ret < 0) 1311 return ret; 1312 1313 /* allocate spare buffer */ 1314 ret = resize_buffer_duplicate_size(&tr->max_buffer, 1315 &tr->array_buffer, RING_BUFFER_ALL_CPUS); 1316 if (ret < 0) 1317 return ret; 1318 1319 tr->allocated_snapshot = true; 1320 } 1321 1322 return 0; 1323 } 1324 1325 static void free_snapshot(struct trace_array *tr) 1326 { 1327 /* 1328 * We don't free the ring buffer. instead, resize it because 1329 * The max_tr ring buffer has some state (e.g. ring->clock) and 1330 * we want preserve it. 1331 */ 1332 ring_buffer_subbuf_order_set(tr->max_buffer.buffer, 0); 1333 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS); 1334 set_buffer_entries(&tr->max_buffer, 1); 1335 tracing_reset_online_cpus(&tr->max_buffer); 1336 tr->allocated_snapshot = false; 1337 } 1338 1339 static int tracing_arm_snapshot_locked(struct trace_array *tr) 1340 { 1341 int ret; 1342 1343 lockdep_assert_held(&trace_types_lock); 1344 1345 spin_lock(&tr->snapshot_trigger_lock); 1346 if (tr->snapshot == UINT_MAX || tr->mapped) { 1347 spin_unlock(&tr->snapshot_trigger_lock); 1348 return -EBUSY; 1349 } 1350 1351 tr->snapshot++; 1352 spin_unlock(&tr->snapshot_trigger_lock); 1353 1354 ret = tracing_alloc_snapshot_instance(tr); 1355 if (ret) { 1356 spin_lock(&tr->snapshot_trigger_lock); 1357 tr->snapshot--; 1358 spin_unlock(&tr->snapshot_trigger_lock); 1359 } 1360 1361 return ret; 1362 } 1363 1364 int tracing_arm_snapshot(struct trace_array *tr) 1365 { 1366 int ret; 1367 1368 mutex_lock(&trace_types_lock); 1369 ret = tracing_arm_snapshot_locked(tr); 1370 mutex_unlock(&trace_types_lock); 1371 1372 return ret; 1373 } 1374 1375 void tracing_disarm_snapshot(struct trace_array *tr) 1376 { 1377 spin_lock(&tr->snapshot_trigger_lock); 1378 if (!WARN_ON(!tr->snapshot)) 1379 tr->snapshot--; 1380 spin_unlock(&tr->snapshot_trigger_lock); 1381 } 1382 1383 /** 1384 * tracing_alloc_snapshot - allocate snapshot buffer. 1385 * 1386 * This only allocates the snapshot buffer if it isn't already 1387 * allocated - it doesn't also take a snapshot. 1388 * 1389 * This is meant to be used in cases where the snapshot buffer needs 1390 * to be set up for events that can't sleep but need to be able to 1391 * trigger a snapshot. 1392 */ 1393 int tracing_alloc_snapshot(void) 1394 { 1395 struct trace_array *tr = &global_trace; 1396 int ret; 1397 1398 ret = tracing_alloc_snapshot_instance(tr); 1399 WARN_ON(ret < 0); 1400 1401 return ret; 1402 } 1403 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot); 1404 1405 /** 1406 * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer. 1407 * 1408 * This is similar to tracing_snapshot(), but it will allocate the 1409 * snapshot buffer if it isn't already allocated. Use this only 1410 * where it is safe to sleep, as the allocation may sleep. 1411 * 1412 * This causes a swap between the snapshot buffer and the current live 1413 * tracing buffer. You can use this to take snapshots of the live 1414 * trace when some condition is triggered, but continue to trace. 1415 */ 1416 void tracing_snapshot_alloc(void) 1417 { 1418 int ret; 1419 1420 ret = tracing_alloc_snapshot(); 1421 if (ret < 0) 1422 return; 1423 1424 tracing_snapshot(); 1425 } 1426 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc); 1427 1428 /** 1429 * tracing_snapshot_cond_enable - enable conditional snapshot for an instance 1430 * @tr: The tracing instance 1431 * @cond_data: User data to associate with the snapshot 1432 * @update: Implementation of the cond_snapshot update function 1433 * 1434 * Check whether the conditional snapshot for the given instance has 1435 * already been enabled, or if the current tracer is already using a 1436 * snapshot; if so, return -EBUSY, else create a cond_snapshot and 1437 * save the cond_data and update function inside. 1438 * 1439 * Returns 0 if successful, error otherwise. 1440 */ 1441 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, 1442 cond_update_fn_t update) 1443 { 1444 struct cond_snapshot *cond_snapshot __free(kfree) = 1445 kzalloc(sizeof(*cond_snapshot), GFP_KERNEL); 1446 int ret; 1447 1448 if (!cond_snapshot) 1449 return -ENOMEM; 1450 1451 cond_snapshot->cond_data = cond_data; 1452 cond_snapshot->update = update; 1453 1454 guard(mutex)(&trace_types_lock); 1455 1456 if (tr->current_trace->use_max_tr) 1457 return -EBUSY; 1458 1459 /* 1460 * The cond_snapshot can only change to NULL without the 1461 * trace_types_lock. We don't care if we race with it going 1462 * to NULL, but we want to make sure that it's not set to 1463 * something other than NULL when we get here, which we can 1464 * do safely with only holding the trace_types_lock and not 1465 * having to take the max_lock. 1466 */ 1467 if (tr->cond_snapshot) 1468 return -EBUSY; 1469 1470 ret = tracing_arm_snapshot_locked(tr); 1471 if (ret) 1472 return ret; 1473 1474 local_irq_disable(); 1475 arch_spin_lock(&tr->max_lock); 1476 tr->cond_snapshot = no_free_ptr(cond_snapshot); 1477 arch_spin_unlock(&tr->max_lock); 1478 local_irq_enable(); 1479 1480 return 0; 1481 } 1482 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable); 1483 1484 /** 1485 * tracing_snapshot_cond_disable - disable conditional snapshot for an instance 1486 * @tr: The tracing instance 1487 * 1488 * Check whether the conditional snapshot for the given instance is 1489 * enabled; if so, free the cond_snapshot associated with it, 1490 * otherwise return -EINVAL. 1491 * 1492 * Returns 0 if successful, error otherwise. 1493 */ 1494 int tracing_snapshot_cond_disable(struct trace_array *tr) 1495 { 1496 int ret = 0; 1497 1498 local_irq_disable(); 1499 arch_spin_lock(&tr->max_lock); 1500 1501 if (!tr->cond_snapshot) 1502 ret = -EINVAL; 1503 else { 1504 kfree(tr->cond_snapshot); 1505 tr->cond_snapshot = NULL; 1506 } 1507 1508 arch_spin_unlock(&tr->max_lock); 1509 local_irq_enable(); 1510 1511 tracing_disarm_snapshot(tr); 1512 1513 return ret; 1514 } 1515 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable); 1516 #else 1517 void tracing_snapshot(void) 1518 { 1519 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used"); 1520 } 1521 EXPORT_SYMBOL_GPL(tracing_snapshot); 1522 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data) 1523 { 1524 WARN_ONCE(1, "Snapshot feature not enabled, but internal conditional snapshot used"); 1525 } 1526 EXPORT_SYMBOL_GPL(tracing_snapshot_cond); 1527 int tracing_alloc_snapshot(void) 1528 { 1529 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used"); 1530 return -ENODEV; 1531 } 1532 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot); 1533 void tracing_snapshot_alloc(void) 1534 { 1535 /* Give warning */ 1536 tracing_snapshot(); 1537 } 1538 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc); 1539 void *tracing_cond_snapshot_data(struct trace_array *tr) 1540 { 1541 return NULL; 1542 } 1543 EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data); 1544 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update) 1545 { 1546 return -ENODEV; 1547 } 1548 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable); 1549 int tracing_snapshot_cond_disable(struct trace_array *tr) 1550 { 1551 return false; 1552 } 1553 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable); 1554 #define free_snapshot(tr) do { } while (0) 1555 #define tracing_arm_snapshot_locked(tr) ({ -EBUSY; }) 1556 #endif /* CONFIG_TRACER_SNAPSHOT */ 1557 1558 void tracer_tracing_off(struct trace_array *tr) 1559 { 1560 if (tr->array_buffer.buffer) 1561 ring_buffer_record_off(tr->array_buffer.buffer); 1562 /* 1563 * This flag is looked at when buffers haven't been allocated 1564 * yet, or by some tracers (like irqsoff), that just want to 1565 * know if the ring buffer has been disabled, but it can handle 1566 * races of where it gets disabled but we still do a record. 1567 * As the check is in the fast path of the tracers, it is more 1568 * important to be fast than accurate. 1569 */ 1570 tr->buffer_disabled = 1; 1571 /* Make the flag seen by readers */ 1572 smp_wmb(); 1573 } 1574 1575 /** 1576 * tracing_off - turn off tracing buffers 1577 * 1578 * This function stops the tracing buffers from recording data. 1579 * It does not disable any overhead the tracers themselves may 1580 * be causing. This function simply causes all recording to 1581 * the ring buffers to fail. 1582 */ 1583 void tracing_off(void) 1584 { 1585 tracer_tracing_off(&global_trace); 1586 } 1587 EXPORT_SYMBOL_GPL(tracing_off); 1588 1589 void disable_trace_on_warning(void) 1590 { 1591 if (__disable_trace_on_warning) { 1592 trace_array_printk_buf(global_trace.array_buffer.buffer, _THIS_IP_, 1593 "Disabling tracing due to warning\n"); 1594 tracing_off(); 1595 } 1596 } 1597 1598 /** 1599 * tracer_tracing_is_on - show real state of ring buffer enabled 1600 * @tr : the trace array to know if ring buffer is enabled 1601 * 1602 * Shows real state of the ring buffer if it is enabled or not. 1603 */ 1604 bool tracer_tracing_is_on(struct trace_array *tr) 1605 { 1606 if (tr->array_buffer.buffer) 1607 return ring_buffer_record_is_set_on(tr->array_buffer.buffer); 1608 return !tr->buffer_disabled; 1609 } 1610 1611 /** 1612 * tracing_is_on - show state of ring buffers enabled 1613 */ 1614 int tracing_is_on(void) 1615 { 1616 return tracer_tracing_is_on(&global_trace); 1617 } 1618 EXPORT_SYMBOL_GPL(tracing_is_on); 1619 1620 static int __init set_buf_size(char *str) 1621 { 1622 unsigned long buf_size; 1623 1624 if (!str) 1625 return 0; 1626 buf_size = memparse(str, &str); 1627 /* 1628 * nr_entries can not be zero and the startup 1629 * tests require some buffer space. Therefore 1630 * ensure we have at least 4096 bytes of buffer. 1631 */ 1632 trace_buf_size = max(4096UL, buf_size); 1633 return 1; 1634 } 1635 __setup("trace_buf_size=", set_buf_size); 1636 1637 static int __init set_tracing_thresh(char *str) 1638 { 1639 unsigned long threshold; 1640 int ret; 1641 1642 if (!str) 1643 return 0; 1644 ret = kstrtoul(str, 0, &threshold); 1645 if (ret < 0) 1646 return 0; 1647 tracing_thresh = threshold * 1000; 1648 return 1; 1649 } 1650 __setup("tracing_thresh=", set_tracing_thresh); 1651 1652 unsigned long nsecs_to_usecs(unsigned long nsecs) 1653 { 1654 return nsecs / 1000; 1655 } 1656 1657 /* 1658 * TRACE_FLAGS is defined as a tuple matching bit masks with strings. 1659 * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that 1660 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list 1661 * of strings in the order that the evals (enum) were defined. 1662 */ 1663 #undef C 1664 #define C(a, b) b 1665 1666 /* These must match the bit positions in trace_iterator_flags */ 1667 static const char *trace_options[] = { 1668 TRACE_FLAGS 1669 NULL 1670 }; 1671 1672 static struct { 1673 u64 (*func)(void); 1674 const char *name; 1675 int in_ns; /* is this clock in nanoseconds? */ 1676 } trace_clocks[] = { 1677 { trace_clock_local, "local", 1 }, 1678 { trace_clock_global, "global", 1 }, 1679 { trace_clock_counter, "counter", 0 }, 1680 { trace_clock_jiffies, "uptime", 0 }, 1681 { trace_clock, "perf", 1 }, 1682 { ktime_get_mono_fast_ns, "mono", 1 }, 1683 { ktime_get_raw_fast_ns, "mono_raw", 1 }, 1684 { ktime_get_boot_fast_ns, "boot", 1 }, 1685 { ktime_get_tai_fast_ns, "tai", 1 }, 1686 ARCH_TRACE_CLOCKS 1687 }; 1688 1689 bool trace_clock_in_ns(struct trace_array *tr) 1690 { 1691 if (trace_clocks[tr->clock_id].in_ns) 1692 return true; 1693 1694 return false; 1695 } 1696 1697 /* 1698 * trace_parser_get_init - gets the buffer for trace parser 1699 */ 1700 int trace_parser_get_init(struct trace_parser *parser, int size) 1701 { 1702 memset(parser, 0, sizeof(*parser)); 1703 1704 parser->buffer = kmalloc(size, GFP_KERNEL); 1705 if (!parser->buffer) 1706 return 1; 1707 1708 parser->size = size; 1709 return 0; 1710 } 1711 1712 /* 1713 * trace_parser_put - frees the buffer for trace parser 1714 */ 1715 void trace_parser_put(struct trace_parser *parser) 1716 { 1717 kfree(parser->buffer); 1718 parser->buffer = NULL; 1719 } 1720 1721 /* 1722 * trace_get_user - reads the user input string separated by space 1723 * (matched by isspace(ch)) 1724 * 1725 * For each string found the 'struct trace_parser' is updated, 1726 * and the function returns. 1727 * 1728 * Returns number of bytes read. 1729 * 1730 * See kernel/trace/trace.h for 'struct trace_parser' details. 1731 */ 1732 int trace_get_user(struct trace_parser *parser, const char __user *ubuf, 1733 size_t cnt, loff_t *ppos) 1734 { 1735 char ch; 1736 size_t read = 0; 1737 ssize_t ret; 1738 1739 if (!*ppos) 1740 trace_parser_clear(parser); 1741 1742 ret = get_user(ch, ubuf++); 1743 if (ret) 1744 goto out; 1745 1746 read++; 1747 cnt--; 1748 1749 /* 1750 * The parser is not finished with the last write, 1751 * continue reading the user input without skipping spaces. 1752 */ 1753 if (!parser->cont) { 1754 /* skip white space */ 1755 while (cnt && isspace(ch)) { 1756 ret = get_user(ch, ubuf++); 1757 if (ret) 1758 goto out; 1759 read++; 1760 cnt--; 1761 } 1762 1763 parser->idx = 0; 1764 1765 /* only spaces were written */ 1766 if (isspace(ch) || !ch) { 1767 *ppos += read; 1768 ret = read; 1769 goto out; 1770 } 1771 } 1772 1773 /* read the non-space input */ 1774 while (cnt && !isspace(ch) && ch) { 1775 if (parser->idx < parser->size - 1) 1776 parser->buffer[parser->idx++] = ch; 1777 else { 1778 ret = -EINVAL; 1779 goto out; 1780 } 1781 ret = get_user(ch, ubuf++); 1782 if (ret) 1783 goto out; 1784 read++; 1785 cnt--; 1786 } 1787 1788 /* We either got finished input or we have to wait for another call. */ 1789 if (isspace(ch) || !ch) { 1790 parser->buffer[parser->idx] = 0; 1791 parser->cont = false; 1792 } else if (parser->idx < parser->size - 1) { 1793 parser->cont = true; 1794 parser->buffer[parser->idx++] = ch; 1795 /* Make sure the parsed string always terminates with '\0'. */ 1796 parser->buffer[parser->idx] = 0; 1797 } else { 1798 ret = -EINVAL; 1799 goto out; 1800 } 1801 1802 *ppos += read; 1803 ret = read; 1804 1805 out: 1806 return ret; 1807 } 1808 1809 /* TODO add a seq_buf_to_buffer() */ 1810 static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt) 1811 { 1812 int len; 1813 1814 if (trace_seq_used(s) <= s->readpos) 1815 return -EBUSY; 1816 1817 len = trace_seq_used(s) - s->readpos; 1818 if (cnt > len) 1819 cnt = len; 1820 memcpy(buf, s->buffer + s->readpos, cnt); 1821 1822 s->readpos += cnt; 1823 return cnt; 1824 } 1825 1826 unsigned long __read_mostly tracing_thresh; 1827 1828 #ifdef CONFIG_TRACER_MAX_TRACE 1829 static const struct file_operations tracing_max_lat_fops; 1830 1831 #ifdef LATENCY_FS_NOTIFY 1832 1833 static struct workqueue_struct *fsnotify_wq; 1834 1835 static void latency_fsnotify_workfn(struct work_struct *work) 1836 { 1837 struct trace_array *tr = container_of(work, struct trace_array, 1838 fsnotify_work); 1839 fsnotify_inode(tr->d_max_latency->d_inode, FS_MODIFY); 1840 } 1841 1842 static void latency_fsnotify_workfn_irq(struct irq_work *iwork) 1843 { 1844 struct trace_array *tr = container_of(iwork, struct trace_array, 1845 fsnotify_irqwork); 1846 queue_work(fsnotify_wq, &tr->fsnotify_work); 1847 } 1848 1849 static void trace_create_maxlat_file(struct trace_array *tr, 1850 struct dentry *d_tracer) 1851 { 1852 INIT_WORK(&tr->fsnotify_work, latency_fsnotify_workfn); 1853 init_irq_work(&tr->fsnotify_irqwork, latency_fsnotify_workfn_irq); 1854 tr->d_max_latency = trace_create_file("tracing_max_latency", 1855 TRACE_MODE_WRITE, 1856 d_tracer, tr, 1857 &tracing_max_lat_fops); 1858 } 1859 1860 __init static int latency_fsnotify_init(void) 1861 { 1862 fsnotify_wq = alloc_workqueue("tr_max_lat_wq", 1863 WQ_UNBOUND | WQ_HIGHPRI, 0); 1864 if (!fsnotify_wq) { 1865 pr_err("Unable to allocate tr_max_lat_wq\n"); 1866 return -ENOMEM; 1867 } 1868 return 0; 1869 } 1870 1871 late_initcall_sync(latency_fsnotify_init); 1872 1873 void latency_fsnotify(struct trace_array *tr) 1874 { 1875 if (!fsnotify_wq) 1876 return; 1877 /* 1878 * We cannot call queue_work(&tr->fsnotify_work) from here because it's 1879 * possible that we are called from __schedule() or do_idle(), which 1880 * could cause a deadlock. 1881 */ 1882 irq_work_queue(&tr->fsnotify_irqwork); 1883 } 1884 1885 #else /* !LATENCY_FS_NOTIFY */ 1886 1887 #define trace_create_maxlat_file(tr, d_tracer) \ 1888 trace_create_file("tracing_max_latency", TRACE_MODE_WRITE, \ 1889 d_tracer, tr, &tracing_max_lat_fops) 1890 1891 #endif 1892 1893 /* 1894 * Copy the new maximum trace into the separate maximum-trace 1895 * structure. (this way the maximum trace is permanently saved, 1896 * for later retrieval via /sys/kernel/tracing/tracing_max_latency) 1897 */ 1898 static void 1899 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) 1900 { 1901 struct array_buffer *trace_buf = &tr->array_buffer; 1902 struct array_buffer *max_buf = &tr->max_buffer; 1903 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu); 1904 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu); 1905 1906 max_buf->cpu = cpu; 1907 max_buf->time_start = data->preempt_timestamp; 1908 1909 max_data->saved_latency = tr->max_latency; 1910 max_data->critical_start = data->critical_start; 1911 max_data->critical_end = data->critical_end; 1912 1913 strscpy(max_data->comm, tsk->comm); 1914 max_data->pid = tsk->pid; 1915 /* 1916 * If tsk == current, then use current_uid(), as that does not use 1917 * RCU. The irq tracer can be called out of RCU scope. 1918 */ 1919 if (tsk == current) 1920 max_data->uid = current_uid(); 1921 else 1922 max_data->uid = task_uid(tsk); 1923 1924 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO; 1925 max_data->policy = tsk->policy; 1926 max_data->rt_priority = tsk->rt_priority; 1927 1928 /* record this tasks comm */ 1929 tracing_record_cmdline(tsk); 1930 latency_fsnotify(tr); 1931 } 1932 1933 /** 1934 * update_max_tr - snapshot all trace buffers from global_trace to max_tr 1935 * @tr: tracer 1936 * @tsk: the task with the latency 1937 * @cpu: The cpu that initiated the trace. 1938 * @cond_data: User data associated with a conditional snapshot 1939 * 1940 * Flip the buffers between the @tr and the max_tr and record information 1941 * about which task was the cause of this latency. 1942 */ 1943 void 1944 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu, 1945 void *cond_data) 1946 { 1947 if (tr->stop_count) 1948 return; 1949 1950 WARN_ON_ONCE(!irqs_disabled()); 1951 1952 if (!tr->allocated_snapshot) { 1953 /* Only the nop tracer should hit this when disabling */ 1954 WARN_ON_ONCE(tr->current_trace != &nop_trace); 1955 return; 1956 } 1957 1958 arch_spin_lock(&tr->max_lock); 1959 1960 /* Inherit the recordable setting from array_buffer */ 1961 if (ring_buffer_record_is_set_on(tr->array_buffer.buffer)) 1962 ring_buffer_record_on(tr->max_buffer.buffer); 1963 else 1964 ring_buffer_record_off(tr->max_buffer.buffer); 1965 1966 #ifdef CONFIG_TRACER_SNAPSHOT 1967 if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data)) { 1968 arch_spin_unlock(&tr->max_lock); 1969 return; 1970 } 1971 #endif 1972 swap(tr->array_buffer.buffer, tr->max_buffer.buffer); 1973 1974 __update_max_tr(tr, tsk, cpu); 1975 1976 arch_spin_unlock(&tr->max_lock); 1977 1978 /* Any waiters on the old snapshot buffer need to wake up */ 1979 ring_buffer_wake_waiters(tr->array_buffer.buffer, RING_BUFFER_ALL_CPUS); 1980 } 1981 1982 /** 1983 * update_max_tr_single - only copy one trace over, and reset the rest 1984 * @tr: tracer 1985 * @tsk: task with the latency 1986 * @cpu: the cpu of the buffer to copy. 1987 * 1988 * Flip the trace of a single CPU buffer between the @tr and the max_tr. 1989 */ 1990 void 1991 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) 1992 { 1993 int ret; 1994 1995 if (tr->stop_count) 1996 return; 1997 1998 WARN_ON_ONCE(!irqs_disabled()); 1999 if (!tr->allocated_snapshot) { 2000 /* Only the nop tracer should hit this when disabling */ 2001 WARN_ON_ONCE(tr->current_trace != &nop_trace); 2002 return; 2003 } 2004 2005 arch_spin_lock(&tr->max_lock); 2006 2007 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->array_buffer.buffer, cpu); 2008 2009 if (ret == -EBUSY) { 2010 /* 2011 * We failed to swap the buffer due to a commit taking 2012 * place on this CPU. We fail to record, but we reset 2013 * the max trace buffer (no one writes directly to it) 2014 * and flag that it failed. 2015 * Another reason is resize is in progress. 2016 */ 2017 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_, 2018 "Failed to swap buffers due to commit or resize in progress\n"); 2019 } 2020 2021 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY); 2022 2023 __update_max_tr(tr, tsk, cpu); 2024 arch_spin_unlock(&tr->max_lock); 2025 } 2026 2027 #endif /* CONFIG_TRACER_MAX_TRACE */ 2028 2029 struct pipe_wait { 2030 struct trace_iterator *iter; 2031 int wait_index; 2032 }; 2033 2034 static bool wait_pipe_cond(void *data) 2035 { 2036 struct pipe_wait *pwait = data; 2037 struct trace_iterator *iter = pwait->iter; 2038 2039 if (atomic_read_acquire(&iter->wait_index) != pwait->wait_index) 2040 return true; 2041 2042 return iter->closed; 2043 } 2044 2045 static int wait_on_pipe(struct trace_iterator *iter, int full) 2046 { 2047 struct pipe_wait pwait; 2048 int ret; 2049 2050 /* Iterators are static, they should be filled or empty */ 2051 if (trace_buffer_iter(iter, iter->cpu_file)) 2052 return 0; 2053 2054 pwait.wait_index = atomic_read_acquire(&iter->wait_index); 2055 pwait.iter = iter; 2056 2057 ret = ring_buffer_wait(iter->array_buffer->buffer, iter->cpu_file, full, 2058 wait_pipe_cond, &pwait); 2059 2060 #ifdef CONFIG_TRACER_MAX_TRACE 2061 /* 2062 * Make sure this is still the snapshot buffer, as if a snapshot were 2063 * to happen, this would now be the main buffer. 2064 */ 2065 if (iter->snapshot) 2066 iter->array_buffer = &iter->tr->max_buffer; 2067 #endif 2068 return ret; 2069 } 2070 2071 #ifdef CONFIG_FTRACE_STARTUP_TEST 2072 static bool selftests_can_run; 2073 2074 struct trace_selftests { 2075 struct list_head list; 2076 struct tracer *type; 2077 }; 2078 2079 static LIST_HEAD(postponed_selftests); 2080 2081 static int save_selftest(struct tracer *type) 2082 { 2083 struct trace_selftests *selftest; 2084 2085 selftest = kmalloc(sizeof(*selftest), GFP_KERNEL); 2086 if (!selftest) 2087 return -ENOMEM; 2088 2089 selftest->type = type; 2090 list_add(&selftest->list, &postponed_selftests); 2091 return 0; 2092 } 2093 2094 static int run_tracer_selftest(struct tracer *type) 2095 { 2096 struct trace_array *tr = &global_trace; 2097 struct tracer *saved_tracer = tr->current_trace; 2098 int ret; 2099 2100 if (!type->selftest || tracing_selftest_disabled) 2101 return 0; 2102 2103 /* 2104 * If a tracer registers early in boot up (before scheduling is 2105 * initialized and such), then do not run its selftests yet. 2106 * Instead, run it a little later in the boot process. 2107 */ 2108 if (!selftests_can_run) 2109 return save_selftest(type); 2110 2111 if (!tracing_is_on()) { 2112 pr_warn("Selftest for tracer %s skipped due to tracing disabled\n", 2113 type->name); 2114 return 0; 2115 } 2116 2117 /* 2118 * Run a selftest on this tracer. 2119 * Here we reset the trace buffer, and set the current 2120 * tracer to be this tracer. The tracer can then run some 2121 * internal tracing to verify that everything is in order. 2122 * If we fail, we do not register this tracer. 2123 */ 2124 tracing_reset_online_cpus(&tr->array_buffer); 2125 2126 tr->current_trace = type; 2127 2128 #ifdef CONFIG_TRACER_MAX_TRACE 2129 if (type->use_max_tr) { 2130 /* If we expanded the buffers, make sure the max is expanded too */ 2131 if (tr->ring_buffer_expanded) 2132 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size, 2133 RING_BUFFER_ALL_CPUS); 2134 tr->allocated_snapshot = true; 2135 } 2136 #endif 2137 2138 /* the test is responsible for initializing and enabling */ 2139 pr_info("Testing tracer %s: ", type->name); 2140 ret = type->selftest(type, tr); 2141 /* the test is responsible for resetting too */ 2142 tr->current_trace = saved_tracer; 2143 if (ret) { 2144 printk(KERN_CONT "FAILED!\n"); 2145 /* Add the warning after printing 'FAILED' */ 2146 WARN_ON(1); 2147 return -1; 2148 } 2149 /* Only reset on passing, to avoid touching corrupted buffers */ 2150 tracing_reset_online_cpus(&tr->array_buffer); 2151 2152 #ifdef CONFIG_TRACER_MAX_TRACE 2153 if (type->use_max_tr) { 2154 tr->allocated_snapshot = false; 2155 2156 /* Shrink the max buffer again */ 2157 if (tr->ring_buffer_expanded) 2158 ring_buffer_resize(tr->max_buffer.buffer, 1, 2159 RING_BUFFER_ALL_CPUS); 2160 } 2161 #endif 2162 2163 printk(KERN_CONT "PASSED\n"); 2164 return 0; 2165 } 2166 2167 static int do_run_tracer_selftest(struct tracer *type) 2168 { 2169 int ret; 2170 2171 /* 2172 * Tests can take a long time, especially if they are run one after the 2173 * other, as does happen during bootup when all the tracers are 2174 * registered. This could cause the soft lockup watchdog to trigger. 2175 */ 2176 cond_resched(); 2177 2178 tracing_selftest_running = true; 2179 ret = run_tracer_selftest(type); 2180 tracing_selftest_running = false; 2181 2182 return ret; 2183 } 2184 2185 static __init int init_trace_selftests(void) 2186 { 2187 struct trace_selftests *p, *n; 2188 struct tracer *t, **last; 2189 int ret; 2190 2191 selftests_can_run = true; 2192 2193 guard(mutex)(&trace_types_lock); 2194 2195 if (list_empty(&postponed_selftests)) 2196 return 0; 2197 2198 pr_info("Running postponed tracer tests:\n"); 2199 2200 tracing_selftest_running = true; 2201 list_for_each_entry_safe(p, n, &postponed_selftests, list) { 2202 /* This loop can take minutes when sanitizers are enabled, so 2203 * lets make sure we allow RCU processing. 2204 */ 2205 cond_resched(); 2206 ret = run_tracer_selftest(p->type); 2207 /* If the test fails, then warn and remove from available_tracers */ 2208 if (ret < 0) { 2209 WARN(1, "tracer: %s failed selftest, disabling\n", 2210 p->type->name); 2211 last = &trace_types; 2212 for (t = trace_types; t; t = t->next) { 2213 if (t == p->type) { 2214 *last = t->next; 2215 break; 2216 } 2217 last = &t->next; 2218 } 2219 } 2220 list_del(&p->list); 2221 kfree(p); 2222 } 2223 tracing_selftest_running = false; 2224 2225 return 0; 2226 } 2227 core_initcall(init_trace_selftests); 2228 #else 2229 static inline int do_run_tracer_selftest(struct tracer *type) 2230 { 2231 return 0; 2232 } 2233 #endif /* CONFIG_FTRACE_STARTUP_TEST */ 2234 2235 static void add_tracer_options(struct trace_array *tr, struct tracer *t); 2236 2237 static void __init apply_trace_boot_options(void); 2238 2239 /** 2240 * register_tracer - register a tracer with the ftrace system. 2241 * @type: the plugin for the tracer 2242 * 2243 * Register a new plugin tracer. 2244 */ 2245 int __init register_tracer(struct tracer *type) 2246 { 2247 struct tracer *t; 2248 int ret = 0; 2249 2250 if (!type->name) { 2251 pr_info("Tracer must have a name\n"); 2252 return -1; 2253 } 2254 2255 if (strlen(type->name) >= MAX_TRACER_SIZE) { 2256 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE); 2257 return -1; 2258 } 2259 2260 if (security_locked_down(LOCKDOWN_TRACEFS)) { 2261 pr_warn("Can not register tracer %s due to lockdown\n", 2262 type->name); 2263 return -EPERM; 2264 } 2265 2266 mutex_lock(&trace_types_lock); 2267 2268 for (t = trace_types; t; t = t->next) { 2269 if (strcmp(type->name, t->name) == 0) { 2270 /* already found */ 2271 pr_info("Tracer %s already registered\n", 2272 type->name); 2273 ret = -1; 2274 goto out; 2275 } 2276 } 2277 2278 if (!type->set_flag) 2279 type->set_flag = &dummy_set_flag; 2280 if (!type->flags) { 2281 /*allocate a dummy tracer_flags*/ 2282 type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL); 2283 if (!type->flags) { 2284 ret = -ENOMEM; 2285 goto out; 2286 } 2287 type->flags->val = 0; 2288 type->flags->opts = dummy_tracer_opt; 2289 } else 2290 if (!type->flags->opts) 2291 type->flags->opts = dummy_tracer_opt; 2292 2293 /* store the tracer for __set_tracer_option */ 2294 type->flags->trace = type; 2295 2296 ret = do_run_tracer_selftest(type); 2297 if (ret < 0) 2298 goto out; 2299 2300 type->next = trace_types; 2301 trace_types = type; 2302 add_tracer_options(&global_trace, type); 2303 2304 out: 2305 mutex_unlock(&trace_types_lock); 2306 2307 if (ret || !default_bootup_tracer) 2308 goto out_unlock; 2309 2310 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE)) 2311 goto out_unlock; 2312 2313 printk(KERN_INFO "Starting tracer '%s'\n", type->name); 2314 /* Do we want this tracer to start on bootup? */ 2315 tracing_set_tracer(&global_trace, type->name); 2316 default_bootup_tracer = NULL; 2317 2318 apply_trace_boot_options(); 2319 2320 /* disable other selftests, since this will break it. */ 2321 disable_tracing_selftest("running a tracer"); 2322 2323 out_unlock: 2324 return ret; 2325 } 2326 2327 static void tracing_reset_cpu(struct array_buffer *buf, int cpu) 2328 { 2329 struct trace_buffer *buffer = buf->buffer; 2330 2331 if (!buffer) 2332 return; 2333 2334 ring_buffer_record_disable(buffer); 2335 2336 /* Make sure all commits have finished */ 2337 synchronize_rcu(); 2338 ring_buffer_reset_cpu(buffer, cpu); 2339 2340 ring_buffer_record_enable(buffer); 2341 } 2342 2343 void tracing_reset_online_cpus(struct array_buffer *buf) 2344 { 2345 struct trace_buffer *buffer = buf->buffer; 2346 2347 if (!buffer) 2348 return; 2349 2350 ring_buffer_record_disable(buffer); 2351 2352 /* Make sure all commits have finished */ 2353 synchronize_rcu(); 2354 2355 buf->time_start = buffer_ftrace_now(buf, buf->cpu); 2356 2357 ring_buffer_reset_online_cpus(buffer); 2358 2359 ring_buffer_record_enable(buffer); 2360 } 2361 2362 static void tracing_reset_all_cpus(struct array_buffer *buf) 2363 { 2364 struct trace_buffer *buffer = buf->buffer; 2365 2366 if (!buffer) 2367 return; 2368 2369 ring_buffer_record_disable(buffer); 2370 2371 /* Make sure all commits have finished */ 2372 synchronize_rcu(); 2373 2374 buf->time_start = buffer_ftrace_now(buf, buf->cpu); 2375 2376 ring_buffer_reset(buffer); 2377 2378 ring_buffer_record_enable(buffer); 2379 } 2380 2381 /* Must have trace_types_lock held */ 2382 void tracing_reset_all_online_cpus_unlocked(void) 2383 { 2384 struct trace_array *tr; 2385 2386 lockdep_assert_held(&trace_types_lock); 2387 2388 list_for_each_entry(tr, &ftrace_trace_arrays, list) { 2389 if (!tr->clear_trace) 2390 continue; 2391 tr->clear_trace = false; 2392 tracing_reset_online_cpus(&tr->array_buffer); 2393 #ifdef CONFIG_TRACER_MAX_TRACE 2394 tracing_reset_online_cpus(&tr->max_buffer); 2395 #endif 2396 } 2397 } 2398 2399 void tracing_reset_all_online_cpus(void) 2400 { 2401 mutex_lock(&trace_types_lock); 2402 tracing_reset_all_online_cpus_unlocked(); 2403 mutex_unlock(&trace_types_lock); 2404 } 2405 2406 int is_tracing_stopped(void) 2407 { 2408 return global_trace.stop_count; 2409 } 2410 2411 static void tracing_start_tr(struct trace_array *tr) 2412 { 2413 struct trace_buffer *buffer; 2414 unsigned long flags; 2415 2416 if (tracing_disabled) 2417 return; 2418 2419 raw_spin_lock_irqsave(&tr->start_lock, flags); 2420 if (--tr->stop_count) { 2421 if (WARN_ON_ONCE(tr->stop_count < 0)) { 2422 /* Someone screwed up their debugging */ 2423 tr->stop_count = 0; 2424 } 2425 goto out; 2426 } 2427 2428 /* Prevent the buffers from switching */ 2429 arch_spin_lock(&tr->max_lock); 2430 2431 buffer = tr->array_buffer.buffer; 2432 if (buffer) 2433 ring_buffer_record_enable(buffer); 2434 2435 #ifdef CONFIG_TRACER_MAX_TRACE 2436 buffer = tr->max_buffer.buffer; 2437 if (buffer) 2438 ring_buffer_record_enable(buffer); 2439 #endif 2440 2441 arch_spin_unlock(&tr->max_lock); 2442 2443 out: 2444 raw_spin_unlock_irqrestore(&tr->start_lock, flags); 2445 } 2446 2447 /** 2448 * tracing_start - quick start of the tracer 2449 * 2450 * If tracing is enabled but was stopped by tracing_stop, 2451 * this will start the tracer back up. 2452 */ 2453 void tracing_start(void) 2454 2455 { 2456 return tracing_start_tr(&global_trace); 2457 } 2458 2459 static void tracing_stop_tr(struct trace_array *tr) 2460 { 2461 struct trace_buffer *buffer; 2462 unsigned long flags; 2463 2464 raw_spin_lock_irqsave(&tr->start_lock, flags); 2465 if (tr->stop_count++) 2466 goto out; 2467 2468 /* Prevent the buffers from switching */ 2469 arch_spin_lock(&tr->max_lock); 2470 2471 buffer = tr->array_buffer.buffer; 2472 if (buffer) 2473 ring_buffer_record_disable(buffer); 2474 2475 #ifdef CONFIG_TRACER_MAX_TRACE 2476 buffer = tr->max_buffer.buffer; 2477 if (buffer) 2478 ring_buffer_record_disable(buffer); 2479 #endif 2480 2481 arch_spin_unlock(&tr->max_lock); 2482 2483 out: 2484 raw_spin_unlock_irqrestore(&tr->start_lock, flags); 2485 } 2486 2487 /** 2488 * tracing_stop - quick stop of the tracer 2489 * 2490 * Light weight way to stop tracing. Use in conjunction with 2491 * tracing_start. 2492 */ 2493 void tracing_stop(void) 2494 { 2495 return tracing_stop_tr(&global_trace); 2496 } 2497 2498 /* 2499 * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq 2500 * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function 2501 * simplifies those functions and keeps them in sync. 2502 */ 2503 enum print_line_t trace_handle_return(struct trace_seq *s) 2504 { 2505 return trace_seq_has_overflowed(s) ? 2506 TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED; 2507 } 2508 EXPORT_SYMBOL_GPL(trace_handle_return); 2509 2510 static unsigned short migration_disable_value(void) 2511 { 2512 #if defined(CONFIG_SMP) 2513 return current->migration_disabled; 2514 #else 2515 return 0; 2516 #endif 2517 } 2518 2519 unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status) 2520 { 2521 unsigned int trace_flags = irqs_status; 2522 unsigned int pc; 2523 2524 pc = preempt_count(); 2525 2526 if (pc & NMI_MASK) 2527 trace_flags |= TRACE_FLAG_NMI; 2528 if (pc & HARDIRQ_MASK) 2529 trace_flags |= TRACE_FLAG_HARDIRQ; 2530 if (in_serving_softirq()) 2531 trace_flags |= TRACE_FLAG_SOFTIRQ; 2532 if (softirq_count() >> (SOFTIRQ_SHIFT + 1)) 2533 trace_flags |= TRACE_FLAG_BH_OFF; 2534 2535 if (tif_need_resched()) 2536 trace_flags |= TRACE_FLAG_NEED_RESCHED; 2537 if (test_preempt_need_resched()) 2538 trace_flags |= TRACE_FLAG_PREEMPT_RESCHED; 2539 if (IS_ENABLED(CONFIG_ARCH_HAS_PREEMPT_LAZY) && tif_test_bit(TIF_NEED_RESCHED_LAZY)) 2540 trace_flags |= TRACE_FLAG_NEED_RESCHED_LAZY; 2541 return (trace_flags << 16) | (min_t(unsigned int, pc & 0xff, 0xf)) | 2542 (min_t(unsigned int, migration_disable_value(), 0xf)) << 4; 2543 } 2544 2545 struct ring_buffer_event * 2546 trace_buffer_lock_reserve(struct trace_buffer *buffer, 2547 int type, 2548 unsigned long len, 2549 unsigned int trace_ctx) 2550 { 2551 return __trace_buffer_lock_reserve(buffer, type, len, trace_ctx); 2552 } 2553 2554 DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event); 2555 DEFINE_PER_CPU(int, trace_buffered_event_cnt); 2556 static int trace_buffered_event_ref; 2557 2558 /** 2559 * trace_buffered_event_enable - enable buffering events 2560 * 2561 * When events are being filtered, it is quicker to use a temporary 2562 * buffer to write the event data into if there's a likely chance 2563 * that it will not be committed. The discard of the ring buffer 2564 * is not as fast as committing, and is much slower than copying 2565 * a commit. 2566 * 2567 * When an event is to be filtered, allocate per cpu buffers to 2568 * write the event data into, and if the event is filtered and discarded 2569 * it is simply dropped, otherwise, the entire data is to be committed 2570 * in one shot. 2571 */ 2572 void trace_buffered_event_enable(void) 2573 { 2574 struct ring_buffer_event *event; 2575 struct page *page; 2576 int cpu; 2577 2578 WARN_ON_ONCE(!mutex_is_locked(&event_mutex)); 2579 2580 if (trace_buffered_event_ref++) 2581 return; 2582 2583 for_each_tracing_cpu(cpu) { 2584 page = alloc_pages_node(cpu_to_node(cpu), 2585 GFP_KERNEL | __GFP_NORETRY, 0); 2586 /* This is just an optimization and can handle failures */ 2587 if (!page) { 2588 pr_err("Failed to allocate event buffer\n"); 2589 break; 2590 } 2591 2592 event = page_address(page); 2593 memset(event, 0, sizeof(*event)); 2594 2595 per_cpu(trace_buffered_event, cpu) = event; 2596 2597 preempt_disable(); 2598 if (cpu == smp_processor_id() && 2599 __this_cpu_read(trace_buffered_event) != 2600 per_cpu(trace_buffered_event, cpu)) 2601 WARN_ON_ONCE(1); 2602 preempt_enable(); 2603 } 2604 } 2605 2606 static void enable_trace_buffered_event(void *data) 2607 { 2608 /* Probably not needed, but do it anyway */ 2609 smp_rmb(); 2610 this_cpu_dec(trace_buffered_event_cnt); 2611 } 2612 2613 static void disable_trace_buffered_event(void *data) 2614 { 2615 this_cpu_inc(trace_buffered_event_cnt); 2616 } 2617 2618 /** 2619 * trace_buffered_event_disable - disable buffering events 2620 * 2621 * When a filter is removed, it is faster to not use the buffered 2622 * events, and to commit directly into the ring buffer. Free up 2623 * the temp buffers when there are no more users. This requires 2624 * special synchronization with current events. 2625 */ 2626 void trace_buffered_event_disable(void) 2627 { 2628 int cpu; 2629 2630 WARN_ON_ONCE(!mutex_is_locked(&event_mutex)); 2631 2632 if (WARN_ON_ONCE(!trace_buffered_event_ref)) 2633 return; 2634 2635 if (--trace_buffered_event_ref) 2636 return; 2637 2638 /* For each CPU, set the buffer as used. */ 2639 on_each_cpu_mask(tracing_buffer_mask, disable_trace_buffered_event, 2640 NULL, true); 2641 2642 /* Wait for all current users to finish */ 2643 synchronize_rcu(); 2644 2645 for_each_tracing_cpu(cpu) { 2646 free_page((unsigned long)per_cpu(trace_buffered_event, cpu)); 2647 per_cpu(trace_buffered_event, cpu) = NULL; 2648 } 2649 2650 /* 2651 * Wait for all CPUs that potentially started checking if they can use 2652 * their event buffer only after the previous synchronize_rcu() call and 2653 * they still read a valid pointer from trace_buffered_event. It must be 2654 * ensured they don't see cleared trace_buffered_event_cnt else they 2655 * could wrongly decide to use the pointed-to buffer which is now freed. 2656 */ 2657 synchronize_rcu(); 2658 2659 /* For each CPU, relinquish the buffer */ 2660 on_each_cpu_mask(tracing_buffer_mask, enable_trace_buffered_event, NULL, 2661 true); 2662 } 2663 2664 static struct trace_buffer *temp_buffer; 2665 2666 struct ring_buffer_event * 2667 trace_event_buffer_lock_reserve(struct trace_buffer **current_rb, 2668 struct trace_event_file *trace_file, 2669 int type, unsigned long len, 2670 unsigned int trace_ctx) 2671 { 2672 struct ring_buffer_event *entry; 2673 struct trace_array *tr = trace_file->tr; 2674 int val; 2675 2676 *current_rb = tr->array_buffer.buffer; 2677 2678 if (!tr->no_filter_buffering_ref && 2679 (trace_file->flags & (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED))) { 2680 preempt_disable_notrace(); 2681 /* 2682 * Filtering is on, so try to use the per cpu buffer first. 2683 * This buffer will simulate a ring_buffer_event, 2684 * where the type_len is zero and the array[0] will 2685 * hold the full length. 2686 * (see include/linux/ring-buffer.h for details on 2687 * how the ring_buffer_event is structured). 2688 * 2689 * Using a temp buffer during filtering and copying it 2690 * on a matched filter is quicker than writing directly 2691 * into the ring buffer and then discarding it when 2692 * it doesn't match. That is because the discard 2693 * requires several atomic operations to get right. 2694 * Copying on match and doing nothing on a failed match 2695 * is still quicker than no copy on match, but having 2696 * to discard out of the ring buffer on a failed match. 2697 */ 2698 if ((entry = __this_cpu_read(trace_buffered_event))) { 2699 int max_len = PAGE_SIZE - struct_size(entry, array, 1); 2700 2701 val = this_cpu_inc_return(trace_buffered_event_cnt); 2702 2703 /* 2704 * Preemption is disabled, but interrupts and NMIs 2705 * can still come in now. If that happens after 2706 * the above increment, then it will have to go 2707 * back to the old method of allocating the event 2708 * on the ring buffer, and if the filter fails, it 2709 * will have to call ring_buffer_discard_commit() 2710 * to remove it. 2711 * 2712 * Need to also check the unlikely case that the 2713 * length is bigger than the temp buffer size. 2714 * If that happens, then the reserve is pretty much 2715 * guaranteed to fail, as the ring buffer currently 2716 * only allows events less than a page. But that may 2717 * change in the future, so let the ring buffer reserve 2718 * handle the failure in that case. 2719 */ 2720 if (val == 1 && likely(len <= max_len)) { 2721 trace_event_setup(entry, type, trace_ctx); 2722 entry->array[0] = len; 2723 /* Return with preemption disabled */ 2724 return entry; 2725 } 2726 this_cpu_dec(trace_buffered_event_cnt); 2727 } 2728 /* __trace_buffer_lock_reserve() disables preemption */ 2729 preempt_enable_notrace(); 2730 } 2731 2732 entry = __trace_buffer_lock_reserve(*current_rb, type, len, 2733 trace_ctx); 2734 /* 2735 * If tracing is off, but we have triggers enabled 2736 * we still need to look at the event data. Use the temp_buffer 2737 * to store the trace event for the trigger to use. It's recursive 2738 * safe and will not be recorded anywhere. 2739 */ 2740 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) { 2741 *current_rb = temp_buffer; 2742 entry = __trace_buffer_lock_reserve(*current_rb, type, len, 2743 trace_ctx); 2744 } 2745 return entry; 2746 } 2747 EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve); 2748 2749 static DEFINE_RAW_SPINLOCK(tracepoint_iter_lock); 2750 static DEFINE_MUTEX(tracepoint_printk_mutex); 2751 2752 static void output_printk(struct trace_event_buffer *fbuffer) 2753 { 2754 struct trace_event_call *event_call; 2755 struct trace_event_file *file; 2756 struct trace_event *event; 2757 unsigned long flags; 2758 struct trace_iterator *iter = tracepoint_print_iter; 2759 2760 /* We should never get here if iter is NULL */ 2761 if (WARN_ON_ONCE(!iter)) 2762 return; 2763 2764 event_call = fbuffer->trace_file->event_call; 2765 if (!event_call || !event_call->event.funcs || 2766 !event_call->event.funcs->trace) 2767 return; 2768 2769 file = fbuffer->trace_file; 2770 if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) || 2771 (unlikely(file->flags & EVENT_FILE_FL_FILTERED) && 2772 !filter_match_preds(file->filter, fbuffer->entry))) 2773 return; 2774 2775 event = &fbuffer->trace_file->event_call->event; 2776 2777 raw_spin_lock_irqsave(&tracepoint_iter_lock, flags); 2778 trace_seq_init(&iter->seq); 2779 iter->ent = fbuffer->entry; 2780 event_call->event.funcs->trace(iter, 0, event); 2781 trace_seq_putc(&iter->seq, 0); 2782 printk("%s", iter->seq.buffer); 2783 2784 raw_spin_unlock_irqrestore(&tracepoint_iter_lock, flags); 2785 } 2786 2787 int tracepoint_printk_sysctl(const struct ctl_table *table, int write, 2788 void *buffer, size_t *lenp, 2789 loff_t *ppos) 2790 { 2791 int save_tracepoint_printk; 2792 int ret; 2793 2794 guard(mutex)(&tracepoint_printk_mutex); 2795 save_tracepoint_printk = tracepoint_printk; 2796 2797 ret = proc_dointvec(table, write, buffer, lenp, ppos); 2798 2799 /* 2800 * This will force exiting early, as tracepoint_printk 2801 * is always zero when tracepoint_printk_iter is not allocated 2802 */ 2803 if (!tracepoint_print_iter) 2804 tracepoint_printk = 0; 2805 2806 if (save_tracepoint_printk == tracepoint_printk) 2807 return ret; 2808 2809 if (tracepoint_printk) 2810 static_key_enable(&tracepoint_printk_key.key); 2811 else 2812 static_key_disable(&tracepoint_printk_key.key); 2813 2814 return ret; 2815 } 2816 2817 void trace_event_buffer_commit(struct trace_event_buffer *fbuffer) 2818 { 2819 enum event_trigger_type tt = ETT_NONE; 2820 struct trace_event_file *file = fbuffer->trace_file; 2821 2822 if (__event_trigger_test_discard(file, fbuffer->buffer, fbuffer->event, 2823 fbuffer->entry, &tt)) 2824 goto discard; 2825 2826 if (static_key_false(&tracepoint_printk_key.key)) 2827 output_printk(fbuffer); 2828 2829 if (static_branch_unlikely(&trace_event_exports_enabled)) 2830 ftrace_exports(fbuffer->event, TRACE_EXPORT_EVENT); 2831 2832 trace_buffer_unlock_commit_regs(file->tr, fbuffer->buffer, 2833 fbuffer->event, fbuffer->trace_ctx, fbuffer->regs); 2834 2835 discard: 2836 if (tt) 2837 event_triggers_post_call(file, tt); 2838 2839 } 2840 EXPORT_SYMBOL_GPL(trace_event_buffer_commit); 2841 2842 /* 2843 * Skip 3: 2844 * 2845 * trace_buffer_unlock_commit_regs() 2846 * trace_event_buffer_commit() 2847 * trace_event_raw_event_xxx() 2848 */ 2849 # define STACK_SKIP 3 2850 2851 void trace_buffer_unlock_commit_regs(struct trace_array *tr, 2852 struct trace_buffer *buffer, 2853 struct ring_buffer_event *event, 2854 unsigned int trace_ctx, 2855 struct pt_regs *regs) 2856 { 2857 __buffer_unlock_commit(buffer, event); 2858 2859 /* 2860 * If regs is not set, then skip the necessary functions. 2861 * Note, we can still get here via blktrace, wakeup tracer 2862 * and mmiotrace, but that's ok if they lose a function or 2863 * two. They are not that meaningful. 2864 */ 2865 ftrace_trace_stack(tr, buffer, trace_ctx, regs ? 0 : STACK_SKIP, regs); 2866 ftrace_trace_userstack(tr, buffer, trace_ctx); 2867 } 2868 2869 /* 2870 * Similar to trace_buffer_unlock_commit_regs() but do not dump stack. 2871 */ 2872 void 2873 trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer, 2874 struct ring_buffer_event *event) 2875 { 2876 __buffer_unlock_commit(buffer, event); 2877 } 2878 2879 void 2880 trace_function(struct trace_array *tr, unsigned long ip, unsigned long 2881 parent_ip, unsigned int trace_ctx) 2882 { 2883 struct trace_buffer *buffer = tr->array_buffer.buffer; 2884 struct ring_buffer_event *event; 2885 struct ftrace_entry *entry; 2886 2887 event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry), 2888 trace_ctx); 2889 if (!event) 2890 return; 2891 entry = ring_buffer_event_data(event); 2892 entry->ip = ip; 2893 entry->parent_ip = parent_ip; 2894 2895 if (static_branch_unlikely(&trace_function_exports_enabled)) 2896 ftrace_exports(event, TRACE_EXPORT_FUNCTION); 2897 __buffer_unlock_commit(buffer, event); 2898 } 2899 2900 #ifdef CONFIG_STACKTRACE 2901 2902 /* Allow 4 levels of nesting: normal, softirq, irq, NMI */ 2903 #define FTRACE_KSTACK_NESTING 4 2904 2905 #define FTRACE_KSTACK_ENTRIES (SZ_4K / FTRACE_KSTACK_NESTING) 2906 2907 struct ftrace_stack { 2908 unsigned long calls[FTRACE_KSTACK_ENTRIES]; 2909 }; 2910 2911 2912 struct ftrace_stacks { 2913 struct ftrace_stack stacks[FTRACE_KSTACK_NESTING]; 2914 }; 2915 2916 static DEFINE_PER_CPU(struct ftrace_stacks, ftrace_stacks); 2917 static DEFINE_PER_CPU(int, ftrace_stack_reserve); 2918 2919 static void __ftrace_trace_stack(struct trace_array *tr, 2920 struct trace_buffer *buffer, 2921 unsigned int trace_ctx, 2922 int skip, struct pt_regs *regs) 2923 { 2924 struct ring_buffer_event *event; 2925 unsigned int size, nr_entries; 2926 struct ftrace_stack *fstack; 2927 struct stack_entry *entry; 2928 int stackidx; 2929 2930 /* 2931 * Add one, for this function and the call to save_stack_trace() 2932 * If regs is set, then these functions will not be in the way. 2933 */ 2934 #ifndef CONFIG_UNWINDER_ORC 2935 if (!regs) 2936 skip++; 2937 #endif 2938 2939 preempt_disable_notrace(); 2940 2941 stackidx = __this_cpu_inc_return(ftrace_stack_reserve) - 1; 2942 2943 /* This should never happen. If it does, yell once and skip */ 2944 if (WARN_ON_ONCE(stackidx >= FTRACE_KSTACK_NESTING)) 2945 goto out; 2946 2947 /* 2948 * The above __this_cpu_inc_return() is 'atomic' cpu local. An 2949 * interrupt will either see the value pre increment or post 2950 * increment. If the interrupt happens pre increment it will have 2951 * restored the counter when it returns. We just need a barrier to 2952 * keep gcc from moving things around. 2953 */ 2954 barrier(); 2955 2956 fstack = this_cpu_ptr(ftrace_stacks.stacks) + stackidx; 2957 size = ARRAY_SIZE(fstack->calls); 2958 2959 if (regs) { 2960 nr_entries = stack_trace_save_regs(regs, fstack->calls, 2961 size, skip); 2962 } else { 2963 nr_entries = stack_trace_save(fstack->calls, size, skip); 2964 } 2965 2966 #ifdef CONFIG_DYNAMIC_FTRACE 2967 /* Mark entry of stack trace as trampoline code */ 2968 if (tr->ops && tr->ops->trampoline) { 2969 unsigned long tramp_start = tr->ops->trampoline; 2970 unsigned long tramp_end = tramp_start + tr->ops->trampoline_size; 2971 unsigned long *calls = fstack->calls; 2972 2973 for (int i = 0; i < nr_entries; i++) { 2974 if (calls[i] >= tramp_start && calls[i] < tramp_end) 2975 calls[i] = FTRACE_TRAMPOLINE_MARKER; 2976 } 2977 } 2978 #endif 2979 2980 event = __trace_buffer_lock_reserve(buffer, TRACE_STACK, 2981 struct_size(entry, caller, nr_entries), 2982 trace_ctx); 2983 if (!event) 2984 goto out; 2985 entry = ring_buffer_event_data(event); 2986 2987 entry->size = nr_entries; 2988 memcpy(&entry->caller, fstack->calls, 2989 flex_array_size(entry, caller, nr_entries)); 2990 2991 __buffer_unlock_commit(buffer, event); 2992 2993 out: 2994 /* Again, don't let gcc optimize things here */ 2995 barrier(); 2996 __this_cpu_dec(ftrace_stack_reserve); 2997 preempt_enable_notrace(); 2998 2999 } 3000 3001 static inline void ftrace_trace_stack(struct trace_array *tr, 3002 struct trace_buffer *buffer, 3003 unsigned int trace_ctx, 3004 int skip, struct pt_regs *regs) 3005 { 3006 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE)) 3007 return; 3008 3009 __ftrace_trace_stack(tr, buffer, trace_ctx, skip, regs); 3010 } 3011 3012 void __trace_stack(struct trace_array *tr, unsigned int trace_ctx, 3013 int skip) 3014 { 3015 struct trace_buffer *buffer = tr->array_buffer.buffer; 3016 3017 if (rcu_is_watching()) { 3018 __ftrace_trace_stack(tr, buffer, trace_ctx, skip, NULL); 3019 return; 3020 } 3021 3022 if (WARN_ON_ONCE(IS_ENABLED(CONFIG_GENERIC_ENTRY))) 3023 return; 3024 3025 /* 3026 * When an NMI triggers, RCU is enabled via ct_nmi_enter(), 3027 * but if the above rcu_is_watching() failed, then the NMI 3028 * triggered someplace critical, and ct_irq_enter() should 3029 * not be called from NMI. 3030 */ 3031 if (unlikely(in_nmi())) 3032 return; 3033 3034 ct_irq_enter_irqson(); 3035 __ftrace_trace_stack(tr, buffer, trace_ctx, skip, NULL); 3036 ct_irq_exit_irqson(); 3037 } 3038 3039 /** 3040 * trace_dump_stack - record a stack back trace in the trace buffer 3041 * @skip: Number of functions to skip (helper handlers) 3042 */ 3043 void trace_dump_stack(int skip) 3044 { 3045 if (tracing_disabled || tracing_selftest_running) 3046 return; 3047 3048 #ifndef CONFIG_UNWINDER_ORC 3049 /* Skip 1 to skip this function. */ 3050 skip++; 3051 #endif 3052 __ftrace_trace_stack(printk_trace, printk_trace->array_buffer.buffer, 3053 tracing_gen_ctx(), skip, NULL); 3054 } 3055 EXPORT_SYMBOL_GPL(trace_dump_stack); 3056 3057 #ifdef CONFIG_USER_STACKTRACE_SUPPORT 3058 static DEFINE_PER_CPU(int, user_stack_count); 3059 3060 static void 3061 ftrace_trace_userstack(struct trace_array *tr, 3062 struct trace_buffer *buffer, unsigned int trace_ctx) 3063 { 3064 struct ring_buffer_event *event; 3065 struct userstack_entry *entry; 3066 3067 if (!(tr->trace_flags & TRACE_ITER_USERSTACKTRACE)) 3068 return; 3069 3070 /* 3071 * NMIs can not handle page faults, even with fix ups. 3072 * The save user stack can (and often does) fault. 3073 */ 3074 if (unlikely(in_nmi())) 3075 return; 3076 3077 /* 3078 * prevent recursion, since the user stack tracing may 3079 * trigger other kernel events. 3080 */ 3081 preempt_disable(); 3082 if (__this_cpu_read(user_stack_count)) 3083 goto out; 3084 3085 __this_cpu_inc(user_stack_count); 3086 3087 event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK, 3088 sizeof(*entry), trace_ctx); 3089 if (!event) 3090 goto out_drop_count; 3091 entry = ring_buffer_event_data(event); 3092 3093 entry->tgid = current->tgid; 3094 memset(&entry->caller, 0, sizeof(entry->caller)); 3095 3096 stack_trace_save_user(entry->caller, FTRACE_STACK_ENTRIES); 3097 __buffer_unlock_commit(buffer, event); 3098 3099 out_drop_count: 3100 __this_cpu_dec(user_stack_count); 3101 out: 3102 preempt_enable(); 3103 } 3104 #else /* CONFIG_USER_STACKTRACE_SUPPORT */ 3105 static void ftrace_trace_userstack(struct trace_array *tr, 3106 struct trace_buffer *buffer, 3107 unsigned int trace_ctx) 3108 { 3109 } 3110 #endif /* !CONFIG_USER_STACKTRACE_SUPPORT */ 3111 3112 #endif /* CONFIG_STACKTRACE */ 3113 3114 static inline void 3115 func_repeats_set_delta_ts(struct func_repeats_entry *entry, 3116 unsigned long long delta) 3117 { 3118 entry->bottom_delta_ts = delta & U32_MAX; 3119 entry->top_delta_ts = (delta >> 32); 3120 } 3121 3122 void trace_last_func_repeats(struct trace_array *tr, 3123 struct trace_func_repeats *last_info, 3124 unsigned int trace_ctx) 3125 { 3126 struct trace_buffer *buffer = tr->array_buffer.buffer; 3127 struct func_repeats_entry *entry; 3128 struct ring_buffer_event *event; 3129 u64 delta; 3130 3131 event = __trace_buffer_lock_reserve(buffer, TRACE_FUNC_REPEATS, 3132 sizeof(*entry), trace_ctx); 3133 if (!event) 3134 return; 3135 3136 delta = ring_buffer_event_time_stamp(buffer, event) - 3137 last_info->ts_last_call; 3138 3139 entry = ring_buffer_event_data(event); 3140 entry->ip = last_info->ip; 3141 entry->parent_ip = last_info->parent_ip; 3142 entry->count = last_info->count; 3143 func_repeats_set_delta_ts(entry, delta); 3144 3145 __buffer_unlock_commit(buffer, event); 3146 } 3147 3148 /* created for use with alloc_percpu */ 3149 struct trace_buffer_struct { 3150 int nesting; 3151 char buffer[4][TRACE_BUF_SIZE]; 3152 }; 3153 3154 static struct trace_buffer_struct __percpu *trace_percpu_buffer; 3155 3156 /* 3157 * This allows for lockless recording. If we're nested too deeply, then 3158 * this returns NULL. 3159 */ 3160 static char *get_trace_buf(void) 3161 { 3162 struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer); 3163 3164 if (!trace_percpu_buffer || buffer->nesting >= 4) 3165 return NULL; 3166 3167 buffer->nesting++; 3168 3169 /* Interrupts must see nesting incremented before we use the buffer */ 3170 barrier(); 3171 return &buffer->buffer[buffer->nesting - 1][0]; 3172 } 3173 3174 static void put_trace_buf(void) 3175 { 3176 /* Don't let the decrement of nesting leak before this */ 3177 barrier(); 3178 this_cpu_dec(trace_percpu_buffer->nesting); 3179 } 3180 3181 static int alloc_percpu_trace_buffer(void) 3182 { 3183 struct trace_buffer_struct __percpu *buffers; 3184 3185 if (trace_percpu_buffer) 3186 return 0; 3187 3188 buffers = alloc_percpu(struct trace_buffer_struct); 3189 if (MEM_FAIL(!buffers, "Could not allocate percpu trace_printk buffer")) 3190 return -ENOMEM; 3191 3192 trace_percpu_buffer = buffers; 3193 return 0; 3194 } 3195 3196 static int buffers_allocated; 3197 3198 void trace_printk_init_buffers(void) 3199 { 3200 if (buffers_allocated) 3201 return; 3202 3203 if (alloc_percpu_trace_buffer()) 3204 return; 3205 3206 /* trace_printk() is for debug use only. Don't use it in production. */ 3207 3208 pr_warn("\n"); 3209 pr_warn("**********************************************************\n"); 3210 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n"); 3211 pr_warn("** **\n"); 3212 pr_warn("** trace_printk() being used. Allocating extra memory. **\n"); 3213 pr_warn("** **\n"); 3214 pr_warn("** This means that this is a DEBUG kernel and it is **\n"); 3215 pr_warn("** unsafe for production use. **\n"); 3216 pr_warn("** **\n"); 3217 pr_warn("** If you see this message and you are not debugging **\n"); 3218 pr_warn("** the kernel, report this immediately to your vendor! **\n"); 3219 pr_warn("** **\n"); 3220 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n"); 3221 pr_warn("**********************************************************\n"); 3222 3223 /* Expand the buffers to set size */ 3224 tracing_update_buffers(&global_trace); 3225 3226 buffers_allocated = 1; 3227 3228 /* 3229 * trace_printk_init_buffers() can be called by modules. 3230 * If that happens, then we need to start cmdline recording 3231 * directly here. If the global_trace.buffer is already 3232 * allocated here, then this was called by module code. 3233 */ 3234 if (global_trace.array_buffer.buffer) 3235 tracing_start_cmdline_record(); 3236 } 3237 EXPORT_SYMBOL_GPL(trace_printk_init_buffers); 3238 3239 void trace_printk_start_comm(void) 3240 { 3241 /* Start tracing comms if trace printk is set */ 3242 if (!buffers_allocated) 3243 return; 3244 tracing_start_cmdline_record(); 3245 } 3246 3247 static void trace_printk_start_stop_comm(int enabled) 3248 { 3249 if (!buffers_allocated) 3250 return; 3251 3252 if (enabled) 3253 tracing_start_cmdline_record(); 3254 else 3255 tracing_stop_cmdline_record(); 3256 } 3257 3258 /** 3259 * trace_vbprintk - write binary msg to tracing buffer 3260 * @ip: The address of the caller 3261 * @fmt: The string format to write to the buffer 3262 * @args: Arguments for @fmt 3263 */ 3264 int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) 3265 { 3266 struct ring_buffer_event *event; 3267 struct trace_buffer *buffer; 3268 struct trace_array *tr = READ_ONCE(printk_trace); 3269 struct bprint_entry *entry; 3270 unsigned int trace_ctx; 3271 char *tbuffer; 3272 int len = 0, size; 3273 3274 if (!printk_binsafe(tr)) 3275 return trace_vprintk(ip, fmt, args); 3276 3277 if (unlikely(tracing_selftest_running || tracing_disabled)) 3278 return 0; 3279 3280 /* Don't pollute graph traces with trace_vprintk internals */ 3281 pause_graph_tracing(); 3282 3283 trace_ctx = tracing_gen_ctx(); 3284 preempt_disable_notrace(); 3285 3286 tbuffer = get_trace_buf(); 3287 if (!tbuffer) { 3288 len = 0; 3289 goto out_nobuffer; 3290 } 3291 3292 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args); 3293 3294 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0) 3295 goto out_put; 3296 3297 size = sizeof(*entry) + sizeof(u32) * len; 3298 buffer = tr->array_buffer.buffer; 3299 ring_buffer_nest_start(buffer); 3300 event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size, 3301 trace_ctx); 3302 if (!event) 3303 goto out; 3304 entry = ring_buffer_event_data(event); 3305 entry->ip = ip; 3306 entry->fmt = fmt; 3307 3308 memcpy(entry->buf, tbuffer, sizeof(u32) * len); 3309 __buffer_unlock_commit(buffer, event); 3310 ftrace_trace_stack(tr, buffer, trace_ctx, 6, NULL); 3311 3312 out: 3313 ring_buffer_nest_end(buffer); 3314 out_put: 3315 put_trace_buf(); 3316 3317 out_nobuffer: 3318 preempt_enable_notrace(); 3319 unpause_graph_tracing(); 3320 3321 return len; 3322 } 3323 EXPORT_SYMBOL_GPL(trace_vbprintk); 3324 3325 __printf(3, 0) 3326 static int 3327 __trace_array_vprintk(struct trace_buffer *buffer, 3328 unsigned long ip, const char *fmt, va_list args) 3329 { 3330 struct ring_buffer_event *event; 3331 int len = 0, size; 3332 struct print_entry *entry; 3333 unsigned int trace_ctx; 3334 char *tbuffer; 3335 3336 if (tracing_disabled) 3337 return 0; 3338 3339 /* Don't pollute graph traces with trace_vprintk internals */ 3340 pause_graph_tracing(); 3341 3342 trace_ctx = tracing_gen_ctx(); 3343 preempt_disable_notrace(); 3344 3345 3346 tbuffer = get_trace_buf(); 3347 if (!tbuffer) { 3348 len = 0; 3349 goto out_nobuffer; 3350 } 3351 3352 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args); 3353 3354 size = sizeof(*entry) + len + 1; 3355 ring_buffer_nest_start(buffer); 3356 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size, 3357 trace_ctx); 3358 if (!event) 3359 goto out; 3360 entry = ring_buffer_event_data(event); 3361 entry->ip = ip; 3362 3363 memcpy(&entry->buf, tbuffer, len + 1); 3364 __buffer_unlock_commit(buffer, event); 3365 ftrace_trace_stack(printk_trace, buffer, trace_ctx, 6, NULL); 3366 3367 out: 3368 ring_buffer_nest_end(buffer); 3369 put_trace_buf(); 3370 3371 out_nobuffer: 3372 preempt_enable_notrace(); 3373 unpause_graph_tracing(); 3374 3375 return len; 3376 } 3377 3378 __printf(3, 0) 3379 int trace_array_vprintk(struct trace_array *tr, 3380 unsigned long ip, const char *fmt, va_list args) 3381 { 3382 if (tracing_selftest_running && tr == &global_trace) 3383 return 0; 3384 3385 return __trace_array_vprintk(tr->array_buffer.buffer, ip, fmt, args); 3386 } 3387 3388 /** 3389 * trace_array_printk - Print a message to a specific instance 3390 * @tr: The instance trace_array descriptor 3391 * @ip: The instruction pointer that this is called from. 3392 * @fmt: The format to print (printf format) 3393 * 3394 * If a subsystem sets up its own instance, they have the right to 3395 * printk strings into their tracing instance buffer using this 3396 * function. Note, this function will not write into the top level 3397 * buffer (use trace_printk() for that), as writing into the top level 3398 * buffer should only have events that can be individually disabled. 3399 * trace_printk() is only used for debugging a kernel, and should not 3400 * be ever incorporated in normal use. 3401 * 3402 * trace_array_printk() can be used, as it will not add noise to the 3403 * top level tracing buffer. 3404 * 3405 * Note, trace_array_init_printk() must be called on @tr before this 3406 * can be used. 3407 */ 3408 __printf(3, 0) 3409 int trace_array_printk(struct trace_array *tr, 3410 unsigned long ip, const char *fmt, ...) 3411 { 3412 int ret; 3413 va_list ap; 3414 3415 if (!tr) 3416 return -ENOENT; 3417 3418 /* This is only allowed for created instances */ 3419 if (tr == &global_trace) 3420 return 0; 3421 3422 if (!(tr->trace_flags & TRACE_ITER_PRINTK)) 3423 return 0; 3424 3425 va_start(ap, fmt); 3426 ret = trace_array_vprintk(tr, ip, fmt, ap); 3427 va_end(ap); 3428 return ret; 3429 } 3430 EXPORT_SYMBOL_GPL(trace_array_printk); 3431 3432 /** 3433 * trace_array_init_printk - Initialize buffers for trace_array_printk() 3434 * @tr: The trace array to initialize the buffers for 3435 * 3436 * As trace_array_printk() only writes into instances, they are OK to 3437 * have in the kernel (unlike trace_printk()). This needs to be called 3438 * before trace_array_printk() can be used on a trace_array. 3439 */ 3440 int trace_array_init_printk(struct trace_array *tr) 3441 { 3442 if (!tr) 3443 return -ENOENT; 3444 3445 /* This is only allowed for created instances */ 3446 if (tr == &global_trace) 3447 return -EINVAL; 3448 3449 return alloc_percpu_trace_buffer(); 3450 } 3451 EXPORT_SYMBOL_GPL(trace_array_init_printk); 3452 3453 __printf(3, 4) 3454 int trace_array_printk_buf(struct trace_buffer *buffer, 3455 unsigned long ip, const char *fmt, ...) 3456 { 3457 int ret; 3458 va_list ap; 3459 3460 if (!(printk_trace->trace_flags & TRACE_ITER_PRINTK)) 3461 return 0; 3462 3463 va_start(ap, fmt); 3464 ret = __trace_array_vprintk(buffer, ip, fmt, ap); 3465 va_end(ap); 3466 return ret; 3467 } 3468 3469 __printf(2, 0) 3470 int trace_vprintk(unsigned long ip, const char *fmt, va_list args) 3471 { 3472 return trace_array_vprintk(printk_trace, ip, fmt, args); 3473 } 3474 EXPORT_SYMBOL_GPL(trace_vprintk); 3475 3476 static void trace_iterator_increment(struct trace_iterator *iter) 3477 { 3478 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu); 3479 3480 iter->idx++; 3481 if (buf_iter) 3482 ring_buffer_iter_advance(buf_iter); 3483 } 3484 3485 static struct trace_entry * 3486 peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts, 3487 unsigned long *lost_events) 3488 { 3489 struct ring_buffer_event *event; 3490 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu); 3491 3492 if (buf_iter) { 3493 event = ring_buffer_iter_peek(buf_iter, ts); 3494 if (lost_events) 3495 *lost_events = ring_buffer_iter_dropped(buf_iter) ? 3496 (unsigned long)-1 : 0; 3497 } else { 3498 event = ring_buffer_peek(iter->array_buffer->buffer, cpu, ts, 3499 lost_events); 3500 } 3501 3502 if (event) { 3503 iter->ent_size = ring_buffer_event_length(event); 3504 return ring_buffer_event_data(event); 3505 } 3506 iter->ent_size = 0; 3507 return NULL; 3508 } 3509 3510 static struct trace_entry * 3511 __find_next_entry(struct trace_iterator *iter, int *ent_cpu, 3512 unsigned long *missing_events, u64 *ent_ts) 3513 { 3514 struct trace_buffer *buffer = iter->array_buffer->buffer; 3515 struct trace_entry *ent, *next = NULL; 3516 unsigned long lost_events = 0, next_lost = 0; 3517 int cpu_file = iter->cpu_file; 3518 u64 next_ts = 0, ts; 3519 int next_cpu = -1; 3520 int next_size = 0; 3521 int cpu; 3522 3523 /* 3524 * If we are in a per_cpu trace file, don't bother by iterating over 3525 * all cpu and peek directly. 3526 */ 3527 if (cpu_file > RING_BUFFER_ALL_CPUS) { 3528 if (ring_buffer_empty_cpu(buffer, cpu_file)) 3529 return NULL; 3530 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events); 3531 if (ent_cpu) 3532 *ent_cpu = cpu_file; 3533 3534 return ent; 3535 } 3536 3537 for_each_tracing_cpu(cpu) { 3538 3539 if (ring_buffer_empty_cpu(buffer, cpu)) 3540 continue; 3541 3542 ent = peek_next_entry(iter, cpu, &ts, &lost_events); 3543 3544 /* 3545 * Pick the entry with the smallest timestamp: 3546 */ 3547 if (ent && (!next || ts < next_ts)) { 3548 next = ent; 3549 next_cpu = cpu; 3550 next_ts = ts; 3551 next_lost = lost_events; 3552 next_size = iter->ent_size; 3553 } 3554 } 3555 3556 iter->ent_size = next_size; 3557 3558 if (ent_cpu) 3559 *ent_cpu = next_cpu; 3560 3561 if (ent_ts) 3562 *ent_ts = next_ts; 3563 3564 if (missing_events) 3565 *missing_events = next_lost; 3566 3567 return next; 3568 } 3569 3570 #define STATIC_FMT_BUF_SIZE 128 3571 static char static_fmt_buf[STATIC_FMT_BUF_SIZE]; 3572 3573 char *trace_iter_expand_format(struct trace_iterator *iter) 3574 { 3575 char *tmp; 3576 3577 /* 3578 * iter->tr is NULL when used with tp_printk, which makes 3579 * this get called where it is not safe to call krealloc(). 3580 */ 3581 if (!iter->tr || iter->fmt == static_fmt_buf) 3582 return NULL; 3583 3584 tmp = krealloc(iter->fmt, iter->fmt_size + STATIC_FMT_BUF_SIZE, 3585 GFP_KERNEL); 3586 if (tmp) { 3587 iter->fmt_size += STATIC_FMT_BUF_SIZE; 3588 iter->fmt = tmp; 3589 } 3590 3591 return tmp; 3592 } 3593 3594 /* Returns true if the string is safe to dereference from an event */ 3595 static bool trace_safe_str(struct trace_iterator *iter, const char *str) 3596 { 3597 unsigned long addr = (unsigned long)str; 3598 struct trace_event *trace_event; 3599 struct trace_event_call *event; 3600 3601 /* OK if part of the event data */ 3602 if ((addr >= (unsigned long)iter->ent) && 3603 (addr < (unsigned long)iter->ent + iter->ent_size)) 3604 return true; 3605 3606 /* OK if part of the temp seq buffer */ 3607 if ((addr >= (unsigned long)iter->tmp_seq.buffer) && 3608 (addr < (unsigned long)iter->tmp_seq.buffer + TRACE_SEQ_BUFFER_SIZE)) 3609 return true; 3610 3611 /* Core rodata can not be freed */ 3612 if (is_kernel_rodata(addr)) 3613 return true; 3614 3615 if (trace_is_tracepoint_string(str)) 3616 return true; 3617 3618 /* 3619 * Now this could be a module event, referencing core module 3620 * data, which is OK. 3621 */ 3622 if (!iter->ent) 3623 return false; 3624 3625 trace_event = ftrace_find_event(iter->ent->type); 3626 if (!trace_event) 3627 return false; 3628 3629 event = container_of(trace_event, struct trace_event_call, event); 3630 if ((event->flags & TRACE_EVENT_FL_DYNAMIC) || !event->module) 3631 return false; 3632 3633 /* Would rather have rodata, but this will suffice */ 3634 if (within_module_core(addr, event->module)) 3635 return true; 3636 3637 return false; 3638 } 3639 3640 /** 3641 * ignore_event - Check dereferenced fields while writing to the seq buffer 3642 * @iter: The iterator that holds the seq buffer and the event being printed 3643 * 3644 * At boot up, test_event_printk() will flag any event that dereferences 3645 * a string with "%s" that does exist in the ring buffer. It may still 3646 * be valid, as the string may point to a static string in the kernel 3647 * rodata that never gets freed. But if the string pointer is pointing 3648 * to something that was allocated, there's a chance that it can be freed 3649 * by the time the user reads the trace. This would cause a bad memory 3650 * access by the kernel and possibly crash the system. 3651 * 3652 * This function will check if the event has any fields flagged as needing 3653 * to be checked at runtime and perform those checks. 3654 * 3655 * If it is found that a field is unsafe, it will write into the @iter->seq 3656 * a message stating what was found to be unsafe. 3657 * 3658 * @return: true if the event is unsafe and should be ignored, 3659 * false otherwise. 3660 */ 3661 bool ignore_event(struct trace_iterator *iter) 3662 { 3663 struct ftrace_event_field *field; 3664 struct trace_event *trace_event; 3665 struct trace_event_call *event; 3666 struct list_head *head; 3667 struct trace_seq *seq; 3668 const void *ptr; 3669 3670 trace_event = ftrace_find_event(iter->ent->type); 3671 3672 seq = &iter->seq; 3673 3674 if (!trace_event) { 3675 trace_seq_printf(seq, "EVENT ID %d NOT FOUND?\n", iter->ent->type); 3676 return true; 3677 } 3678 3679 event = container_of(trace_event, struct trace_event_call, event); 3680 if (!(event->flags & TRACE_EVENT_FL_TEST_STR)) 3681 return false; 3682 3683 head = trace_get_fields(event); 3684 if (!head) { 3685 trace_seq_printf(seq, "FIELDS FOR EVENT '%s' NOT FOUND?\n", 3686 trace_event_name(event)); 3687 return true; 3688 } 3689 3690 /* Offsets are from the iter->ent that points to the raw event */ 3691 ptr = iter->ent; 3692 3693 list_for_each_entry(field, head, link) { 3694 const char *str; 3695 bool good; 3696 3697 if (!field->needs_test) 3698 continue; 3699 3700 str = *(const char **)(ptr + field->offset); 3701 3702 good = trace_safe_str(iter, str); 3703 3704 /* 3705 * If you hit this warning, it is likely that the 3706 * trace event in question used %s on a string that 3707 * was saved at the time of the event, but may not be 3708 * around when the trace is read. Use __string(), 3709 * __assign_str() and __get_str() helpers in the TRACE_EVENT() 3710 * instead. See samples/trace_events/trace-events-sample.h 3711 * for reference. 3712 */ 3713 if (WARN_ONCE(!good, "event '%s' has unsafe pointer field '%s'", 3714 trace_event_name(event), field->name)) { 3715 trace_seq_printf(seq, "EVENT %s: HAS UNSAFE POINTER FIELD '%s'\n", 3716 trace_event_name(event), field->name); 3717 return true; 3718 } 3719 } 3720 return false; 3721 } 3722 3723 const char *trace_event_format(struct trace_iterator *iter, const char *fmt) 3724 { 3725 const char *p, *new_fmt; 3726 char *q; 3727 3728 if (WARN_ON_ONCE(!fmt)) 3729 return fmt; 3730 3731 if (!iter->tr || iter->tr->trace_flags & TRACE_ITER_HASH_PTR) 3732 return fmt; 3733 3734 p = fmt; 3735 new_fmt = q = iter->fmt; 3736 while (*p) { 3737 if (unlikely(q - new_fmt + 3 > iter->fmt_size)) { 3738 if (!trace_iter_expand_format(iter)) 3739 return fmt; 3740 3741 q += iter->fmt - new_fmt; 3742 new_fmt = iter->fmt; 3743 } 3744 3745 *q++ = *p++; 3746 3747 /* Replace %p with %px */ 3748 if (p[-1] == '%') { 3749 if (p[0] == '%') { 3750 *q++ = *p++; 3751 } else if (p[0] == 'p' && !isalnum(p[1])) { 3752 *q++ = *p++; 3753 *q++ = 'x'; 3754 } 3755 } 3756 } 3757 *q = '\0'; 3758 3759 return new_fmt; 3760 } 3761 3762 #define STATIC_TEMP_BUF_SIZE 128 3763 static char static_temp_buf[STATIC_TEMP_BUF_SIZE] __aligned(4); 3764 3765 /* Find the next real entry, without updating the iterator itself */ 3766 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, 3767 int *ent_cpu, u64 *ent_ts) 3768 { 3769 /* __find_next_entry will reset ent_size */ 3770 int ent_size = iter->ent_size; 3771 struct trace_entry *entry; 3772 3773 /* 3774 * If called from ftrace_dump(), then the iter->temp buffer 3775 * will be the static_temp_buf and not created from kmalloc. 3776 * If the entry size is greater than the buffer, we can 3777 * not save it. Just return NULL in that case. This is only 3778 * used to add markers when two consecutive events' time 3779 * stamps have a large delta. See trace_print_lat_context() 3780 */ 3781 if (iter->temp == static_temp_buf && 3782 STATIC_TEMP_BUF_SIZE < ent_size) 3783 return NULL; 3784 3785 /* 3786 * The __find_next_entry() may call peek_next_entry(), which may 3787 * call ring_buffer_peek() that may make the contents of iter->ent 3788 * undefined. Need to copy iter->ent now. 3789 */ 3790 if (iter->ent && iter->ent != iter->temp) { 3791 if ((!iter->temp || iter->temp_size < iter->ent_size) && 3792 !WARN_ON_ONCE(iter->temp == static_temp_buf)) { 3793 void *temp; 3794 temp = kmalloc(iter->ent_size, GFP_KERNEL); 3795 if (!temp) 3796 return NULL; 3797 kfree(iter->temp); 3798 iter->temp = temp; 3799 iter->temp_size = iter->ent_size; 3800 } 3801 memcpy(iter->temp, iter->ent, iter->ent_size); 3802 iter->ent = iter->temp; 3803 } 3804 entry = __find_next_entry(iter, ent_cpu, NULL, ent_ts); 3805 /* Put back the original ent_size */ 3806 iter->ent_size = ent_size; 3807 3808 return entry; 3809 } 3810 3811 /* Find the next real entry, and increment the iterator to the next entry */ 3812 void *trace_find_next_entry_inc(struct trace_iterator *iter) 3813 { 3814 iter->ent = __find_next_entry(iter, &iter->cpu, 3815 &iter->lost_events, &iter->ts); 3816 3817 if (iter->ent) 3818 trace_iterator_increment(iter); 3819 3820 return iter->ent ? iter : NULL; 3821 } 3822 3823 static void trace_consume(struct trace_iterator *iter) 3824 { 3825 ring_buffer_consume(iter->array_buffer->buffer, iter->cpu, &iter->ts, 3826 &iter->lost_events); 3827 } 3828 3829 static void *s_next(struct seq_file *m, void *v, loff_t *pos) 3830 { 3831 struct trace_iterator *iter = m->private; 3832 int i = (int)*pos; 3833 void *ent; 3834 3835 WARN_ON_ONCE(iter->leftover); 3836 3837 (*pos)++; 3838 3839 /* can't go backwards */ 3840 if (iter->idx > i) 3841 return NULL; 3842 3843 if (iter->idx < 0) 3844 ent = trace_find_next_entry_inc(iter); 3845 else 3846 ent = iter; 3847 3848 while (ent && iter->idx < i) 3849 ent = trace_find_next_entry_inc(iter); 3850 3851 iter->pos = *pos; 3852 3853 return ent; 3854 } 3855 3856 void tracing_iter_reset(struct trace_iterator *iter, int cpu) 3857 { 3858 struct ring_buffer_iter *buf_iter; 3859 unsigned long entries = 0; 3860 u64 ts; 3861 3862 per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = 0; 3863 3864 buf_iter = trace_buffer_iter(iter, cpu); 3865 if (!buf_iter) 3866 return; 3867 3868 ring_buffer_iter_reset(buf_iter); 3869 3870 /* 3871 * We could have the case with the max latency tracers 3872 * that a reset never took place on a cpu. This is evident 3873 * by the timestamp being before the start of the buffer. 3874 */ 3875 while (ring_buffer_iter_peek(buf_iter, &ts)) { 3876 if (ts >= iter->array_buffer->time_start) 3877 break; 3878 entries++; 3879 ring_buffer_iter_advance(buf_iter); 3880 /* This could be a big loop */ 3881 cond_resched(); 3882 } 3883 3884 per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = entries; 3885 } 3886 3887 /* 3888 * The current tracer is copied to avoid a global locking 3889 * all around. 3890 */ 3891 static void *s_start(struct seq_file *m, loff_t *pos) 3892 { 3893 struct trace_iterator *iter = m->private; 3894 struct trace_array *tr = iter->tr; 3895 int cpu_file = iter->cpu_file; 3896 void *p = NULL; 3897 loff_t l = 0; 3898 int cpu; 3899 3900 mutex_lock(&trace_types_lock); 3901 if (unlikely(tr->current_trace != iter->trace)) { 3902 /* Close iter->trace before switching to the new current tracer */ 3903 if (iter->trace->close) 3904 iter->trace->close(iter); 3905 iter->trace = tr->current_trace; 3906 /* Reopen the new current tracer */ 3907 if (iter->trace->open) 3908 iter->trace->open(iter); 3909 } 3910 mutex_unlock(&trace_types_lock); 3911 3912 #ifdef CONFIG_TRACER_MAX_TRACE 3913 if (iter->snapshot && iter->trace->use_max_tr) 3914 return ERR_PTR(-EBUSY); 3915 #endif 3916 3917 if (*pos != iter->pos) { 3918 iter->ent = NULL; 3919 iter->cpu = 0; 3920 iter->idx = -1; 3921 3922 if (cpu_file == RING_BUFFER_ALL_CPUS) { 3923 for_each_tracing_cpu(cpu) 3924 tracing_iter_reset(iter, cpu); 3925 } else 3926 tracing_iter_reset(iter, cpu_file); 3927 3928 iter->leftover = 0; 3929 for (p = iter; p && l < *pos; p = s_next(m, p, &l)) 3930 ; 3931 3932 } else { 3933 /* 3934 * If we overflowed the seq_file before, then we want 3935 * to just reuse the trace_seq buffer again. 3936 */ 3937 if (iter->leftover) 3938 p = iter; 3939 else { 3940 l = *pos - 1; 3941 p = s_next(m, p, &l); 3942 } 3943 } 3944 3945 trace_event_read_lock(); 3946 trace_access_lock(cpu_file); 3947 return p; 3948 } 3949 3950 static void s_stop(struct seq_file *m, void *p) 3951 { 3952 struct trace_iterator *iter = m->private; 3953 3954 #ifdef CONFIG_TRACER_MAX_TRACE 3955 if (iter->snapshot && iter->trace->use_max_tr) 3956 return; 3957 #endif 3958 3959 trace_access_unlock(iter->cpu_file); 3960 trace_event_read_unlock(); 3961 } 3962 3963 static void 3964 get_total_entries_cpu(struct array_buffer *buf, unsigned long *total, 3965 unsigned long *entries, int cpu) 3966 { 3967 unsigned long count; 3968 3969 count = ring_buffer_entries_cpu(buf->buffer, cpu); 3970 /* 3971 * If this buffer has skipped entries, then we hold all 3972 * entries for the trace and we need to ignore the 3973 * ones before the time stamp. 3974 */ 3975 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) { 3976 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries; 3977 /* total is the same as the entries */ 3978 *total = count; 3979 } else 3980 *total = count + 3981 ring_buffer_overrun_cpu(buf->buffer, cpu); 3982 *entries = count; 3983 } 3984 3985 static void 3986 get_total_entries(struct array_buffer *buf, 3987 unsigned long *total, unsigned long *entries) 3988 { 3989 unsigned long t, e; 3990 int cpu; 3991 3992 *total = 0; 3993 *entries = 0; 3994 3995 for_each_tracing_cpu(cpu) { 3996 get_total_entries_cpu(buf, &t, &e, cpu); 3997 *total += t; 3998 *entries += e; 3999 } 4000 } 4001 4002 unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu) 4003 { 4004 unsigned long total, entries; 4005 4006 if (!tr) 4007 tr = &global_trace; 4008 4009 get_total_entries_cpu(&tr->array_buffer, &total, &entries, cpu); 4010 4011 return entries; 4012 } 4013 4014 unsigned long trace_total_entries(struct trace_array *tr) 4015 { 4016 unsigned long total, entries; 4017 4018 if (!tr) 4019 tr = &global_trace; 4020 4021 get_total_entries(&tr->array_buffer, &total, &entries); 4022 4023 return entries; 4024 } 4025 4026 static void print_lat_help_header(struct seq_file *m) 4027 { 4028 seq_puts(m, "# _------=> CPU# \n" 4029 "# / _-----=> irqs-off/BH-disabled\n" 4030 "# | / _----=> need-resched \n" 4031 "# || / _---=> hardirq/softirq \n" 4032 "# ||| / _--=> preempt-depth \n" 4033 "# |||| / _-=> migrate-disable \n" 4034 "# ||||| / delay \n" 4035 "# cmd pid |||||| time | caller \n" 4036 "# \\ / |||||| \\ | / \n"); 4037 } 4038 4039 static void print_event_info(struct array_buffer *buf, struct seq_file *m) 4040 { 4041 unsigned long total; 4042 unsigned long entries; 4043 4044 get_total_entries(buf, &total, &entries); 4045 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n", 4046 entries, total, num_online_cpus()); 4047 seq_puts(m, "#\n"); 4048 } 4049 4050 static void print_func_help_header(struct array_buffer *buf, struct seq_file *m, 4051 unsigned int flags) 4052 { 4053 bool tgid = flags & TRACE_ITER_RECORD_TGID; 4054 4055 print_event_info(buf, m); 4056 4057 seq_printf(m, "# TASK-PID %s CPU# TIMESTAMP FUNCTION\n", tgid ? " TGID " : ""); 4058 seq_printf(m, "# | | %s | | |\n", tgid ? " | " : ""); 4059 } 4060 4061 static void print_func_help_header_irq(struct array_buffer *buf, struct seq_file *m, 4062 unsigned int flags) 4063 { 4064 bool tgid = flags & TRACE_ITER_RECORD_TGID; 4065 static const char space[] = " "; 4066 int prec = tgid ? 12 : 2; 4067 4068 print_event_info(buf, m); 4069 4070 seq_printf(m, "# %.*s _-----=> irqs-off/BH-disabled\n", prec, space); 4071 seq_printf(m, "# %.*s / _----=> need-resched\n", prec, space); 4072 seq_printf(m, "# %.*s| / _---=> hardirq/softirq\n", prec, space); 4073 seq_printf(m, "# %.*s|| / _--=> preempt-depth\n", prec, space); 4074 seq_printf(m, "# %.*s||| / _-=> migrate-disable\n", prec, space); 4075 seq_printf(m, "# %.*s|||| / delay\n", prec, space); 4076 seq_printf(m, "# TASK-PID %.*s CPU# ||||| TIMESTAMP FUNCTION\n", prec, " TGID "); 4077 seq_printf(m, "# | | %.*s | ||||| | |\n", prec, " | "); 4078 } 4079 4080 void 4081 print_trace_header(struct seq_file *m, struct trace_iterator *iter) 4082 { 4083 unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK); 4084 struct array_buffer *buf = iter->array_buffer; 4085 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu); 4086 struct tracer *type = iter->trace; 4087 unsigned long entries; 4088 unsigned long total; 4089 const char *name = type->name; 4090 4091 get_total_entries(buf, &total, &entries); 4092 4093 seq_printf(m, "# %s latency trace v1.1.5 on %s\n", 4094 name, init_utsname()->release); 4095 seq_puts(m, "# -----------------------------------" 4096 "---------------------------------\n"); 4097 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |" 4098 " (M:%s VP:%d, KP:%d, SP:%d HP:%d", 4099 nsecs_to_usecs(data->saved_latency), 4100 entries, 4101 total, 4102 buf->cpu, 4103 preempt_model_none() ? "server" : 4104 preempt_model_voluntary() ? "desktop" : 4105 preempt_model_full() ? "preempt" : 4106 preempt_model_lazy() ? "lazy" : 4107 preempt_model_rt() ? "preempt_rt" : 4108 "unknown", 4109 /* These are reserved for later use */ 4110 0, 0, 0, 0); 4111 #ifdef CONFIG_SMP 4112 seq_printf(m, " #P:%d)\n", num_online_cpus()); 4113 #else 4114 seq_puts(m, ")\n"); 4115 #endif 4116 seq_puts(m, "# -----------------\n"); 4117 seq_printf(m, "# | task: %.16s-%d " 4118 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n", 4119 data->comm, data->pid, 4120 from_kuid_munged(seq_user_ns(m), data->uid), data->nice, 4121 data->policy, data->rt_priority); 4122 seq_puts(m, "# -----------------\n"); 4123 4124 if (data->critical_start) { 4125 seq_puts(m, "# => started at: "); 4126 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags); 4127 trace_print_seq(m, &iter->seq); 4128 seq_puts(m, "\n# => ended at: "); 4129 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags); 4130 trace_print_seq(m, &iter->seq); 4131 seq_puts(m, "\n#\n"); 4132 } 4133 4134 seq_puts(m, "#\n"); 4135 } 4136 4137 static void test_cpu_buff_start(struct trace_iterator *iter) 4138 { 4139 struct trace_seq *s = &iter->seq; 4140 struct trace_array *tr = iter->tr; 4141 4142 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE)) 4143 return; 4144 4145 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE)) 4146 return; 4147 4148 if (cpumask_available(iter->started) && 4149 cpumask_test_cpu(iter->cpu, iter->started)) 4150 return; 4151 4152 if (per_cpu_ptr(iter->array_buffer->data, iter->cpu)->skipped_entries) 4153 return; 4154 4155 if (cpumask_available(iter->started)) 4156 cpumask_set_cpu(iter->cpu, iter->started); 4157 4158 /* Don't print started cpu buffer for the first entry of the trace */ 4159 if (iter->idx > 1) 4160 trace_seq_printf(s, "##### CPU %u buffer started ####\n", 4161 iter->cpu); 4162 } 4163 4164 static enum print_line_t print_trace_fmt(struct trace_iterator *iter) 4165 { 4166 struct trace_array *tr = iter->tr; 4167 struct trace_seq *s = &iter->seq; 4168 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK); 4169 struct trace_entry *entry; 4170 struct trace_event *event; 4171 4172 entry = iter->ent; 4173 4174 test_cpu_buff_start(iter); 4175 4176 event = ftrace_find_event(entry->type); 4177 4178 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) { 4179 if (iter->iter_flags & TRACE_FILE_LAT_FMT) 4180 trace_print_lat_context(iter); 4181 else 4182 trace_print_context(iter); 4183 } 4184 4185 if (trace_seq_has_overflowed(s)) 4186 return TRACE_TYPE_PARTIAL_LINE; 4187 4188 if (event) { 4189 if (tr->trace_flags & TRACE_ITER_FIELDS) 4190 return print_event_fields(iter, event); 4191 /* 4192 * For TRACE_EVENT() events, the print_fmt is not 4193 * safe to use if the array has delta offsets 4194 * Force printing via the fields. 4195 */ 4196 if ((tr->text_delta || tr->data_delta) && 4197 event->type > __TRACE_LAST_TYPE) 4198 return print_event_fields(iter, event); 4199 4200 return event->funcs->trace(iter, sym_flags, event); 4201 } 4202 4203 trace_seq_printf(s, "Unknown type %d\n", entry->type); 4204 4205 return trace_handle_return(s); 4206 } 4207 4208 static enum print_line_t print_raw_fmt(struct trace_iterator *iter) 4209 { 4210 struct trace_array *tr = iter->tr; 4211 struct trace_seq *s = &iter->seq; 4212 struct trace_entry *entry; 4213 struct trace_event *event; 4214 4215 entry = iter->ent; 4216 4217 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) 4218 trace_seq_printf(s, "%d %d %llu ", 4219 entry->pid, iter->cpu, iter->ts); 4220 4221 if (trace_seq_has_overflowed(s)) 4222 return TRACE_TYPE_PARTIAL_LINE; 4223 4224 event = ftrace_find_event(entry->type); 4225 if (event) 4226 return event->funcs->raw(iter, 0, event); 4227 4228 trace_seq_printf(s, "%d ?\n", entry->type); 4229 4230 return trace_handle_return(s); 4231 } 4232 4233 static enum print_line_t print_hex_fmt(struct trace_iterator *iter) 4234 { 4235 struct trace_array *tr = iter->tr; 4236 struct trace_seq *s = &iter->seq; 4237 unsigned char newline = '\n'; 4238 struct trace_entry *entry; 4239 struct trace_event *event; 4240 4241 entry = iter->ent; 4242 4243 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) { 4244 SEQ_PUT_HEX_FIELD(s, entry->pid); 4245 SEQ_PUT_HEX_FIELD(s, iter->cpu); 4246 SEQ_PUT_HEX_FIELD(s, iter->ts); 4247 if (trace_seq_has_overflowed(s)) 4248 return TRACE_TYPE_PARTIAL_LINE; 4249 } 4250 4251 event = ftrace_find_event(entry->type); 4252 if (event) { 4253 enum print_line_t ret = event->funcs->hex(iter, 0, event); 4254 if (ret != TRACE_TYPE_HANDLED) 4255 return ret; 4256 } 4257 4258 SEQ_PUT_FIELD(s, newline); 4259 4260 return trace_handle_return(s); 4261 } 4262 4263 static enum print_line_t print_bin_fmt(struct trace_iterator *iter) 4264 { 4265 struct trace_array *tr = iter->tr; 4266 struct trace_seq *s = &iter->seq; 4267 struct trace_entry *entry; 4268 struct trace_event *event; 4269 4270 entry = iter->ent; 4271 4272 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) { 4273 SEQ_PUT_FIELD(s, entry->pid); 4274 SEQ_PUT_FIELD(s, iter->cpu); 4275 SEQ_PUT_FIELD(s, iter->ts); 4276 if (trace_seq_has_overflowed(s)) 4277 return TRACE_TYPE_PARTIAL_LINE; 4278 } 4279 4280 event = ftrace_find_event(entry->type); 4281 return event ? event->funcs->binary(iter, 0, event) : 4282 TRACE_TYPE_HANDLED; 4283 } 4284 4285 int trace_empty(struct trace_iterator *iter) 4286 { 4287 struct ring_buffer_iter *buf_iter; 4288 int cpu; 4289 4290 /* If we are looking at one CPU buffer, only check that one */ 4291 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) { 4292 cpu = iter->cpu_file; 4293 buf_iter = trace_buffer_iter(iter, cpu); 4294 if (buf_iter) { 4295 if (!ring_buffer_iter_empty(buf_iter)) 4296 return 0; 4297 } else { 4298 if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu)) 4299 return 0; 4300 } 4301 return 1; 4302 } 4303 4304 for_each_tracing_cpu(cpu) { 4305 buf_iter = trace_buffer_iter(iter, cpu); 4306 if (buf_iter) { 4307 if (!ring_buffer_iter_empty(buf_iter)) 4308 return 0; 4309 } else { 4310 if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu)) 4311 return 0; 4312 } 4313 } 4314 4315 return 1; 4316 } 4317 4318 /* Called with trace_event_read_lock() held. */ 4319 enum print_line_t print_trace_line(struct trace_iterator *iter) 4320 { 4321 struct trace_array *tr = iter->tr; 4322 unsigned long trace_flags = tr->trace_flags; 4323 enum print_line_t ret; 4324 4325 if (iter->lost_events) { 4326 if (iter->lost_events == (unsigned long)-1) 4327 trace_seq_printf(&iter->seq, "CPU:%d [LOST EVENTS]\n", 4328 iter->cpu); 4329 else 4330 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n", 4331 iter->cpu, iter->lost_events); 4332 if (trace_seq_has_overflowed(&iter->seq)) 4333 return TRACE_TYPE_PARTIAL_LINE; 4334 } 4335 4336 if (iter->trace && iter->trace->print_line) { 4337 ret = iter->trace->print_line(iter); 4338 if (ret != TRACE_TYPE_UNHANDLED) 4339 return ret; 4340 } 4341 4342 if (iter->ent->type == TRACE_BPUTS && 4343 trace_flags & TRACE_ITER_PRINTK && 4344 trace_flags & TRACE_ITER_PRINTK_MSGONLY) 4345 return trace_print_bputs_msg_only(iter); 4346 4347 if (iter->ent->type == TRACE_BPRINT && 4348 trace_flags & TRACE_ITER_PRINTK && 4349 trace_flags & TRACE_ITER_PRINTK_MSGONLY) 4350 return trace_print_bprintk_msg_only(iter); 4351 4352 if (iter->ent->type == TRACE_PRINT && 4353 trace_flags & TRACE_ITER_PRINTK && 4354 trace_flags & TRACE_ITER_PRINTK_MSGONLY) 4355 return trace_print_printk_msg_only(iter); 4356 4357 if (trace_flags & TRACE_ITER_BIN) 4358 return print_bin_fmt(iter); 4359 4360 if (trace_flags & TRACE_ITER_HEX) 4361 return print_hex_fmt(iter); 4362 4363 if (trace_flags & TRACE_ITER_RAW) 4364 return print_raw_fmt(iter); 4365 4366 return print_trace_fmt(iter); 4367 } 4368 4369 void trace_latency_header(struct seq_file *m) 4370 { 4371 struct trace_iterator *iter = m->private; 4372 struct trace_array *tr = iter->tr; 4373 4374 /* print nothing if the buffers are empty */ 4375 if (trace_empty(iter)) 4376 return; 4377 4378 if (iter->iter_flags & TRACE_FILE_LAT_FMT) 4379 print_trace_header(m, iter); 4380 4381 if (!(tr->trace_flags & TRACE_ITER_VERBOSE)) 4382 print_lat_help_header(m); 4383 } 4384 4385 void trace_default_header(struct seq_file *m) 4386 { 4387 struct trace_iterator *iter = m->private; 4388 struct trace_array *tr = iter->tr; 4389 unsigned long trace_flags = tr->trace_flags; 4390 4391 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO)) 4392 return; 4393 4394 if (iter->iter_flags & TRACE_FILE_LAT_FMT) { 4395 /* print nothing if the buffers are empty */ 4396 if (trace_empty(iter)) 4397 return; 4398 print_trace_header(m, iter); 4399 if (!(trace_flags & TRACE_ITER_VERBOSE)) 4400 print_lat_help_header(m); 4401 } else { 4402 if (!(trace_flags & TRACE_ITER_VERBOSE)) { 4403 if (trace_flags & TRACE_ITER_IRQ_INFO) 4404 print_func_help_header_irq(iter->array_buffer, 4405 m, trace_flags); 4406 else 4407 print_func_help_header(iter->array_buffer, m, 4408 trace_flags); 4409 } 4410 } 4411 } 4412 4413 static void test_ftrace_alive(struct seq_file *m) 4414 { 4415 if (!ftrace_is_dead()) 4416 return; 4417 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n" 4418 "# MAY BE MISSING FUNCTION EVENTS\n"); 4419 } 4420 4421 #ifdef CONFIG_TRACER_MAX_TRACE 4422 static void show_snapshot_main_help(struct seq_file *m) 4423 { 4424 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n" 4425 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n" 4426 "# Takes a snapshot of the main buffer.\n" 4427 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n" 4428 "# (Doesn't have to be '2' works with any number that\n" 4429 "# is not a '0' or '1')\n"); 4430 } 4431 4432 static void show_snapshot_percpu_help(struct seq_file *m) 4433 { 4434 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n"); 4435 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP 4436 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n" 4437 "# Takes a snapshot of the main buffer for this cpu.\n"); 4438 #else 4439 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n" 4440 "# Must use main snapshot file to allocate.\n"); 4441 #endif 4442 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n" 4443 "# (Doesn't have to be '2' works with any number that\n" 4444 "# is not a '0' or '1')\n"); 4445 } 4446 4447 static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) 4448 { 4449 if (iter->tr->allocated_snapshot) 4450 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n"); 4451 else 4452 seq_puts(m, "#\n# * Snapshot is freed *\n#\n"); 4453 4454 seq_puts(m, "# Snapshot commands:\n"); 4455 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) 4456 show_snapshot_main_help(m); 4457 else 4458 show_snapshot_percpu_help(m); 4459 } 4460 #else 4461 /* Should never be called */ 4462 static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { } 4463 #endif 4464 4465 static int s_show(struct seq_file *m, void *v) 4466 { 4467 struct trace_iterator *iter = v; 4468 int ret; 4469 4470 if (iter->ent == NULL) { 4471 if (iter->tr) { 4472 seq_printf(m, "# tracer: %s\n", iter->trace->name); 4473 seq_puts(m, "#\n"); 4474 test_ftrace_alive(m); 4475 } 4476 if (iter->snapshot && trace_empty(iter)) 4477 print_snapshot_help(m, iter); 4478 else if (iter->trace && iter->trace->print_header) 4479 iter->trace->print_header(m); 4480 else 4481 trace_default_header(m); 4482 4483 } else if (iter->leftover) { 4484 /* 4485 * If we filled the seq_file buffer earlier, we 4486 * want to just show it now. 4487 */ 4488 ret = trace_print_seq(m, &iter->seq); 4489 4490 /* ret should this time be zero, but you never know */ 4491 iter->leftover = ret; 4492 4493 } else { 4494 ret = print_trace_line(iter); 4495 if (ret == TRACE_TYPE_PARTIAL_LINE) { 4496 iter->seq.full = 0; 4497 trace_seq_puts(&iter->seq, "[LINE TOO BIG]\n"); 4498 } 4499 ret = trace_print_seq(m, &iter->seq); 4500 /* 4501 * If we overflow the seq_file buffer, then it will 4502 * ask us for this data again at start up. 4503 * Use that instead. 4504 * ret is 0 if seq_file write succeeded. 4505 * -1 otherwise. 4506 */ 4507 iter->leftover = ret; 4508 } 4509 4510 return 0; 4511 } 4512 4513 /* 4514 * Should be used after trace_array_get(), trace_types_lock 4515 * ensures that i_cdev was already initialized. 4516 */ 4517 static inline int tracing_get_cpu(struct inode *inode) 4518 { 4519 if (inode->i_cdev) /* See trace_create_cpu_file() */ 4520 return (long)inode->i_cdev - 1; 4521 return RING_BUFFER_ALL_CPUS; 4522 } 4523 4524 static const struct seq_operations tracer_seq_ops = { 4525 .start = s_start, 4526 .next = s_next, 4527 .stop = s_stop, 4528 .show = s_show, 4529 }; 4530 4531 /* 4532 * Note, as iter itself can be allocated and freed in different 4533 * ways, this function is only used to free its content, and not 4534 * the iterator itself. The only requirement to all the allocations 4535 * is that it must zero all fields (kzalloc), as freeing works with 4536 * ethier allocated content or NULL. 4537 */ 4538 static void free_trace_iter_content(struct trace_iterator *iter) 4539 { 4540 /* The fmt is either NULL, allocated or points to static_fmt_buf */ 4541 if (iter->fmt != static_fmt_buf) 4542 kfree(iter->fmt); 4543 4544 kfree(iter->temp); 4545 kfree(iter->buffer_iter); 4546 mutex_destroy(&iter->mutex); 4547 free_cpumask_var(iter->started); 4548 } 4549 4550 static struct trace_iterator * 4551 __tracing_open(struct inode *inode, struct file *file, bool snapshot) 4552 { 4553 struct trace_array *tr = inode->i_private; 4554 struct trace_iterator *iter; 4555 int cpu; 4556 4557 if (tracing_disabled) 4558 return ERR_PTR(-ENODEV); 4559 4560 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter)); 4561 if (!iter) 4562 return ERR_PTR(-ENOMEM); 4563 4564 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter), 4565 GFP_KERNEL); 4566 if (!iter->buffer_iter) 4567 goto release; 4568 4569 /* 4570 * trace_find_next_entry() may need to save off iter->ent. 4571 * It will place it into the iter->temp buffer. As most 4572 * events are less than 128, allocate a buffer of that size. 4573 * If one is greater, then trace_find_next_entry() will 4574 * allocate a new buffer to adjust for the bigger iter->ent. 4575 * It's not critical if it fails to get allocated here. 4576 */ 4577 iter->temp = kmalloc(128, GFP_KERNEL); 4578 if (iter->temp) 4579 iter->temp_size = 128; 4580 4581 /* 4582 * trace_event_printf() may need to modify given format 4583 * string to replace %p with %px so that it shows real address 4584 * instead of hash value. However, that is only for the event 4585 * tracing, other tracer may not need. Defer the allocation 4586 * until it is needed. 4587 */ 4588 iter->fmt = NULL; 4589 iter->fmt_size = 0; 4590 4591 mutex_lock(&trace_types_lock); 4592 iter->trace = tr->current_trace; 4593 4594 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL)) 4595 goto fail; 4596 4597 iter->tr = tr; 4598 4599 #ifdef CONFIG_TRACER_MAX_TRACE 4600 /* Currently only the top directory has a snapshot */ 4601 if (tr->current_trace->print_max || snapshot) 4602 iter->array_buffer = &tr->max_buffer; 4603 else 4604 #endif 4605 iter->array_buffer = &tr->array_buffer; 4606 iter->snapshot = snapshot; 4607 iter->pos = -1; 4608 iter->cpu_file = tracing_get_cpu(inode); 4609 mutex_init(&iter->mutex); 4610 4611 /* Notify the tracer early; before we stop tracing. */ 4612 if (iter->trace->open) 4613 iter->trace->open(iter); 4614 4615 /* Annotate start of buffers if we had overruns */ 4616 if (ring_buffer_overruns(iter->array_buffer->buffer)) 4617 iter->iter_flags |= TRACE_FILE_ANNOTATE; 4618 4619 /* Output in nanoseconds only if we are using a clock in nanoseconds. */ 4620 if (trace_clocks[tr->clock_id].in_ns) 4621 iter->iter_flags |= TRACE_FILE_TIME_IN_NS; 4622 4623 /* 4624 * If pause-on-trace is enabled, then stop the trace while 4625 * dumping, unless this is the "snapshot" file 4626 */ 4627 if (!iter->snapshot && (tr->trace_flags & TRACE_ITER_PAUSE_ON_TRACE)) 4628 tracing_stop_tr(tr); 4629 4630 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) { 4631 for_each_tracing_cpu(cpu) { 4632 iter->buffer_iter[cpu] = 4633 ring_buffer_read_prepare(iter->array_buffer->buffer, 4634 cpu, GFP_KERNEL); 4635 } 4636 ring_buffer_read_prepare_sync(); 4637 for_each_tracing_cpu(cpu) { 4638 ring_buffer_read_start(iter->buffer_iter[cpu]); 4639 tracing_iter_reset(iter, cpu); 4640 } 4641 } else { 4642 cpu = iter->cpu_file; 4643 iter->buffer_iter[cpu] = 4644 ring_buffer_read_prepare(iter->array_buffer->buffer, 4645 cpu, GFP_KERNEL); 4646 ring_buffer_read_prepare_sync(); 4647 ring_buffer_read_start(iter->buffer_iter[cpu]); 4648 tracing_iter_reset(iter, cpu); 4649 } 4650 4651 mutex_unlock(&trace_types_lock); 4652 4653 return iter; 4654 4655 fail: 4656 mutex_unlock(&trace_types_lock); 4657 free_trace_iter_content(iter); 4658 release: 4659 seq_release_private(inode, file); 4660 return ERR_PTR(-ENOMEM); 4661 } 4662 4663 int tracing_open_generic(struct inode *inode, struct file *filp) 4664 { 4665 int ret; 4666 4667 ret = tracing_check_open_get_tr(NULL); 4668 if (ret) 4669 return ret; 4670 4671 filp->private_data = inode->i_private; 4672 return 0; 4673 } 4674 4675 bool tracing_is_disabled(void) 4676 { 4677 return (tracing_disabled) ? true: false; 4678 } 4679 4680 /* 4681 * Open and update trace_array ref count. 4682 * Must have the current trace_array passed to it. 4683 */ 4684 int tracing_open_generic_tr(struct inode *inode, struct file *filp) 4685 { 4686 struct trace_array *tr = inode->i_private; 4687 int ret; 4688 4689 ret = tracing_check_open_get_tr(tr); 4690 if (ret) 4691 return ret; 4692 4693 filp->private_data = inode->i_private; 4694 4695 return 0; 4696 } 4697 4698 /* 4699 * The private pointer of the inode is the trace_event_file. 4700 * Update the tr ref count associated to it. 4701 */ 4702 int tracing_open_file_tr(struct inode *inode, struct file *filp) 4703 { 4704 struct trace_event_file *file = inode->i_private; 4705 int ret; 4706 4707 ret = tracing_check_open_get_tr(file->tr); 4708 if (ret) 4709 return ret; 4710 4711 mutex_lock(&event_mutex); 4712 4713 /* Fail if the file is marked for removal */ 4714 if (file->flags & EVENT_FILE_FL_FREED) { 4715 trace_array_put(file->tr); 4716 ret = -ENODEV; 4717 } else { 4718 event_file_get(file); 4719 } 4720 4721 mutex_unlock(&event_mutex); 4722 if (ret) 4723 return ret; 4724 4725 filp->private_data = inode->i_private; 4726 4727 return 0; 4728 } 4729 4730 int tracing_release_file_tr(struct inode *inode, struct file *filp) 4731 { 4732 struct trace_event_file *file = inode->i_private; 4733 4734 trace_array_put(file->tr); 4735 event_file_put(file); 4736 4737 return 0; 4738 } 4739 4740 int tracing_single_release_file_tr(struct inode *inode, struct file *filp) 4741 { 4742 tracing_release_file_tr(inode, filp); 4743 return single_release(inode, filp); 4744 } 4745 4746 static int tracing_mark_open(struct inode *inode, struct file *filp) 4747 { 4748 stream_open(inode, filp); 4749 return tracing_open_generic_tr(inode, filp); 4750 } 4751 4752 static int tracing_release(struct inode *inode, struct file *file) 4753 { 4754 struct trace_array *tr = inode->i_private; 4755 struct seq_file *m = file->private_data; 4756 struct trace_iterator *iter; 4757 int cpu; 4758 4759 if (!(file->f_mode & FMODE_READ)) { 4760 trace_array_put(tr); 4761 return 0; 4762 } 4763 4764 /* Writes do not use seq_file */ 4765 iter = m->private; 4766 mutex_lock(&trace_types_lock); 4767 4768 for_each_tracing_cpu(cpu) { 4769 if (iter->buffer_iter[cpu]) 4770 ring_buffer_read_finish(iter->buffer_iter[cpu]); 4771 } 4772 4773 if (iter->trace && iter->trace->close) 4774 iter->trace->close(iter); 4775 4776 if (!iter->snapshot && tr->stop_count) 4777 /* reenable tracing if it was previously enabled */ 4778 tracing_start_tr(tr); 4779 4780 __trace_array_put(tr); 4781 4782 mutex_unlock(&trace_types_lock); 4783 4784 free_trace_iter_content(iter); 4785 seq_release_private(inode, file); 4786 4787 return 0; 4788 } 4789 4790 int tracing_release_generic_tr(struct inode *inode, struct file *file) 4791 { 4792 struct trace_array *tr = inode->i_private; 4793 4794 trace_array_put(tr); 4795 return 0; 4796 } 4797 4798 static int tracing_single_release_tr(struct inode *inode, struct file *file) 4799 { 4800 struct trace_array *tr = inode->i_private; 4801 4802 trace_array_put(tr); 4803 4804 return single_release(inode, file); 4805 } 4806 4807 static int tracing_open(struct inode *inode, struct file *file) 4808 { 4809 struct trace_array *tr = inode->i_private; 4810 struct trace_iterator *iter; 4811 int ret; 4812 4813 ret = tracing_check_open_get_tr(tr); 4814 if (ret) 4815 return ret; 4816 4817 /* If this file was open for write, then erase contents */ 4818 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) { 4819 int cpu = tracing_get_cpu(inode); 4820 struct array_buffer *trace_buf = &tr->array_buffer; 4821 4822 #ifdef CONFIG_TRACER_MAX_TRACE 4823 if (tr->current_trace->print_max) 4824 trace_buf = &tr->max_buffer; 4825 #endif 4826 4827 if (cpu == RING_BUFFER_ALL_CPUS) 4828 tracing_reset_online_cpus(trace_buf); 4829 else 4830 tracing_reset_cpu(trace_buf, cpu); 4831 } 4832 4833 if (file->f_mode & FMODE_READ) { 4834 iter = __tracing_open(inode, file, false); 4835 if (IS_ERR(iter)) 4836 ret = PTR_ERR(iter); 4837 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) 4838 iter->iter_flags |= TRACE_FILE_LAT_FMT; 4839 } 4840 4841 if (ret < 0) 4842 trace_array_put(tr); 4843 4844 return ret; 4845 } 4846 4847 /* 4848 * Some tracers are not suitable for instance buffers. 4849 * A tracer is always available for the global array (toplevel) 4850 * or if it explicitly states that it is. 4851 */ 4852 static bool 4853 trace_ok_for_array(struct tracer *t, struct trace_array *tr) 4854 { 4855 #ifdef CONFIG_TRACER_SNAPSHOT 4856 /* arrays with mapped buffer range do not have snapshots */ 4857 if (tr->range_addr_start && t->use_max_tr) 4858 return false; 4859 #endif 4860 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances; 4861 } 4862 4863 /* Find the next tracer that this trace array may use */ 4864 static struct tracer * 4865 get_tracer_for_array(struct trace_array *tr, struct tracer *t) 4866 { 4867 while (t && !trace_ok_for_array(t, tr)) 4868 t = t->next; 4869 4870 return t; 4871 } 4872 4873 static void * 4874 t_next(struct seq_file *m, void *v, loff_t *pos) 4875 { 4876 struct trace_array *tr = m->private; 4877 struct tracer *t = v; 4878 4879 (*pos)++; 4880 4881 if (t) 4882 t = get_tracer_for_array(tr, t->next); 4883 4884 return t; 4885 } 4886 4887 static void *t_start(struct seq_file *m, loff_t *pos) 4888 { 4889 struct trace_array *tr = m->private; 4890 struct tracer *t; 4891 loff_t l = 0; 4892 4893 mutex_lock(&trace_types_lock); 4894 4895 t = get_tracer_for_array(tr, trace_types); 4896 for (; t && l < *pos; t = t_next(m, t, &l)) 4897 ; 4898 4899 return t; 4900 } 4901 4902 static void t_stop(struct seq_file *m, void *p) 4903 { 4904 mutex_unlock(&trace_types_lock); 4905 } 4906 4907 static int t_show(struct seq_file *m, void *v) 4908 { 4909 struct tracer *t = v; 4910 4911 if (!t) 4912 return 0; 4913 4914 seq_puts(m, t->name); 4915 if (t->next) 4916 seq_putc(m, ' '); 4917 else 4918 seq_putc(m, '\n'); 4919 4920 return 0; 4921 } 4922 4923 static const struct seq_operations show_traces_seq_ops = { 4924 .start = t_start, 4925 .next = t_next, 4926 .stop = t_stop, 4927 .show = t_show, 4928 }; 4929 4930 static int show_traces_open(struct inode *inode, struct file *file) 4931 { 4932 struct trace_array *tr = inode->i_private; 4933 struct seq_file *m; 4934 int ret; 4935 4936 ret = tracing_check_open_get_tr(tr); 4937 if (ret) 4938 return ret; 4939 4940 ret = seq_open(file, &show_traces_seq_ops); 4941 if (ret) { 4942 trace_array_put(tr); 4943 return ret; 4944 } 4945 4946 m = file->private_data; 4947 m->private = tr; 4948 4949 return 0; 4950 } 4951 4952 static int tracing_seq_release(struct inode *inode, struct file *file) 4953 { 4954 struct trace_array *tr = inode->i_private; 4955 4956 trace_array_put(tr); 4957 return seq_release(inode, file); 4958 } 4959 4960 static ssize_t 4961 tracing_write_stub(struct file *filp, const char __user *ubuf, 4962 size_t count, loff_t *ppos) 4963 { 4964 return count; 4965 } 4966 4967 loff_t tracing_lseek(struct file *file, loff_t offset, int whence) 4968 { 4969 int ret; 4970 4971 if (file->f_mode & FMODE_READ) 4972 ret = seq_lseek(file, offset, whence); 4973 else 4974 file->f_pos = ret = 0; 4975 4976 return ret; 4977 } 4978 4979 static const struct file_operations tracing_fops = { 4980 .open = tracing_open, 4981 .read = seq_read, 4982 .read_iter = seq_read_iter, 4983 .splice_read = copy_splice_read, 4984 .write = tracing_write_stub, 4985 .llseek = tracing_lseek, 4986 .release = tracing_release, 4987 }; 4988 4989 static const struct file_operations show_traces_fops = { 4990 .open = show_traces_open, 4991 .read = seq_read, 4992 .llseek = seq_lseek, 4993 .release = tracing_seq_release, 4994 }; 4995 4996 static ssize_t 4997 tracing_cpumask_read(struct file *filp, char __user *ubuf, 4998 size_t count, loff_t *ppos) 4999 { 5000 struct trace_array *tr = file_inode(filp)->i_private; 5001 char *mask_str; 5002 int len; 5003 5004 len = snprintf(NULL, 0, "%*pb\n", 5005 cpumask_pr_args(tr->tracing_cpumask)) + 1; 5006 mask_str = kmalloc(len, GFP_KERNEL); 5007 if (!mask_str) 5008 return -ENOMEM; 5009 5010 len = snprintf(mask_str, len, "%*pb\n", 5011 cpumask_pr_args(tr->tracing_cpumask)); 5012 if (len >= count) { 5013 count = -EINVAL; 5014 goto out_err; 5015 } 5016 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len); 5017 5018 out_err: 5019 kfree(mask_str); 5020 5021 return count; 5022 } 5023 5024 int tracing_set_cpumask(struct trace_array *tr, 5025 cpumask_var_t tracing_cpumask_new) 5026 { 5027 int cpu; 5028 5029 if (!tr) 5030 return -EINVAL; 5031 5032 local_irq_disable(); 5033 arch_spin_lock(&tr->max_lock); 5034 for_each_tracing_cpu(cpu) { 5035 /* 5036 * Increase/decrease the disabled counter if we are 5037 * about to flip a bit in the cpumask: 5038 */ 5039 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) && 5040 !cpumask_test_cpu(cpu, tracing_cpumask_new)) { 5041 atomic_inc(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled); 5042 ring_buffer_record_disable_cpu(tr->array_buffer.buffer, cpu); 5043 #ifdef CONFIG_TRACER_MAX_TRACE 5044 ring_buffer_record_disable_cpu(tr->max_buffer.buffer, cpu); 5045 #endif 5046 } 5047 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) && 5048 cpumask_test_cpu(cpu, tracing_cpumask_new)) { 5049 atomic_dec(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled); 5050 ring_buffer_record_enable_cpu(tr->array_buffer.buffer, cpu); 5051 #ifdef CONFIG_TRACER_MAX_TRACE 5052 ring_buffer_record_enable_cpu(tr->max_buffer.buffer, cpu); 5053 #endif 5054 } 5055 } 5056 arch_spin_unlock(&tr->max_lock); 5057 local_irq_enable(); 5058 5059 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new); 5060 5061 return 0; 5062 } 5063 5064 static ssize_t 5065 tracing_cpumask_write(struct file *filp, const char __user *ubuf, 5066 size_t count, loff_t *ppos) 5067 { 5068 struct trace_array *tr = file_inode(filp)->i_private; 5069 cpumask_var_t tracing_cpumask_new; 5070 int err; 5071 5072 if (count == 0 || count > KMALLOC_MAX_SIZE) 5073 return -EINVAL; 5074 5075 if (!zalloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL)) 5076 return -ENOMEM; 5077 5078 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new); 5079 if (err) 5080 goto err_free; 5081 5082 err = tracing_set_cpumask(tr, tracing_cpumask_new); 5083 if (err) 5084 goto err_free; 5085 5086 free_cpumask_var(tracing_cpumask_new); 5087 5088 return count; 5089 5090 err_free: 5091 free_cpumask_var(tracing_cpumask_new); 5092 5093 return err; 5094 } 5095 5096 static const struct file_operations tracing_cpumask_fops = { 5097 .open = tracing_open_generic_tr, 5098 .read = tracing_cpumask_read, 5099 .write = tracing_cpumask_write, 5100 .release = tracing_release_generic_tr, 5101 .llseek = generic_file_llseek, 5102 }; 5103 5104 static int tracing_trace_options_show(struct seq_file *m, void *v) 5105 { 5106 struct tracer_opt *trace_opts; 5107 struct trace_array *tr = m->private; 5108 u32 tracer_flags; 5109 int i; 5110 5111 guard(mutex)(&trace_types_lock); 5112 5113 tracer_flags = tr->current_trace->flags->val; 5114 trace_opts = tr->current_trace->flags->opts; 5115 5116 for (i = 0; trace_options[i]; i++) { 5117 if (tr->trace_flags & (1 << i)) 5118 seq_printf(m, "%s\n", trace_options[i]); 5119 else 5120 seq_printf(m, "no%s\n", trace_options[i]); 5121 } 5122 5123 for (i = 0; trace_opts[i].name; i++) { 5124 if (tracer_flags & trace_opts[i].bit) 5125 seq_printf(m, "%s\n", trace_opts[i].name); 5126 else 5127 seq_printf(m, "no%s\n", trace_opts[i].name); 5128 } 5129 5130 return 0; 5131 } 5132 5133 static int __set_tracer_option(struct trace_array *tr, 5134 struct tracer_flags *tracer_flags, 5135 struct tracer_opt *opts, int neg) 5136 { 5137 struct tracer *trace = tracer_flags->trace; 5138 int ret; 5139 5140 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg); 5141 if (ret) 5142 return ret; 5143 5144 if (neg) 5145 tracer_flags->val &= ~opts->bit; 5146 else 5147 tracer_flags->val |= opts->bit; 5148 return 0; 5149 } 5150 5151 /* Try to assign a tracer specific option */ 5152 static int set_tracer_option(struct trace_array *tr, char *cmp, int neg) 5153 { 5154 struct tracer *trace = tr->current_trace; 5155 struct tracer_flags *tracer_flags = trace->flags; 5156 struct tracer_opt *opts = NULL; 5157 int i; 5158 5159 for (i = 0; tracer_flags->opts[i].name; i++) { 5160 opts = &tracer_flags->opts[i]; 5161 5162 if (strcmp(cmp, opts->name) == 0) 5163 return __set_tracer_option(tr, trace->flags, opts, neg); 5164 } 5165 5166 return -EINVAL; 5167 } 5168 5169 /* Some tracers require overwrite to stay enabled */ 5170 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set) 5171 { 5172 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set) 5173 return -1; 5174 5175 return 0; 5176 } 5177 5178 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled) 5179 { 5180 if ((mask == TRACE_ITER_RECORD_TGID) || 5181 (mask == TRACE_ITER_RECORD_CMD) || 5182 (mask == TRACE_ITER_TRACE_PRINTK)) 5183 lockdep_assert_held(&event_mutex); 5184 5185 /* do nothing if flag is already set */ 5186 if (!!(tr->trace_flags & mask) == !!enabled) 5187 return 0; 5188 5189 /* Give the tracer a chance to approve the change */ 5190 if (tr->current_trace->flag_changed) 5191 if (tr->current_trace->flag_changed(tr, mask, !!enabled)) 5192 return -EINVAL; 5193 5194 if (mask == TRACE_ITER_TRACE_PRINTK) { 5195 if (enabled) { 5196 update_printk_trace(tr); 5197 } else { 5198 /* 5199 * The global_trace cannot clear this. 5200 * It's flag only gets cleared if another instance sets it. 5201 */ 5202 if (printk_trace == &global_trace) 5203 return -EINVAL; 5204 /* 5205 * An instance must always have it set. 5206 * by default, that's the global_trace instane. 5207 */ 5208 if (printk_trace == tr) 5209 update_printk_trace(&global_trace); 5210 } 5211 } 5212 5213 if (enabled) 5214 tr->trace_flags |= mask; 5215 else 5216 tr->trace_flags &= ~mask; 5217 5218 if (mask == TRACE_ITER_RECORD_CMD) 5219 trace_event_enable_cmd_record(enabled); 5220 5221 if (mask == TRACE_ITER_RECORD_TGID) { 5222 5223 if (trace_alloc_tgid_map() < 0) { 5224 tr->trace_flags &= ~TRACE_ITER_RECORD_TGID; 5225 return -ENOMEM; 5226 } 5227 5228 trace_event_enable_tgid_record(enabled); 5229 } 5230 5231 if (mask == TRACE_ITER_EVENT_FORK) 5232 trace_event_follow_fork(tr, enabled); 5233 5234 if (mask == TRACE_ITER_FUNC_FORK) 5235 ftrace_pid_follow_fork(tr, enabled); 5236 5237 if (mask == TRACE_ITER_OVERWRITE) { 5238 ring_buffer_change_overwrite(tr->array_buffer.buffer, enabled); 5239 #ifdef CONFIG_TRACER_MAX_TRACE 5240 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled); 5241 #endif 5242 } 5243 5244 if (mask == TRACE_ITER_PRINTK) { 5245 trace_printk_start_stop_comm(enabled); 5246 trace_printk_control(enabled); 5247 } 5248 5249 return 0; 5250 } 5251 5252 int trace_set_options(struct trace_array *tr, char *option) 5253 { 5254 char *cmp; 5255 int neg = 0; 5256 int ret; 5257 size_t orig_len = strlen(option); 5258 int len; 5259 5260 cmp = strstrip(option); 5261 5262 len = str_has_prefix(cmp, "no"); 5263 if (len) 5264 neg = 1; 5265 5266 cmp += len; 5267 5268 mutex_lock(&event_mutex); 5269 mutex_lock(&trace_types_lock); 5270 5271 ret = match_string(trace_options, -1, cmp); 5272 /* If no option could be set, test the specific tracer options */ 5273 if (ret < 0) 5274 ret = set_tracer_option(tr, cmp, neg); 5275 else 5276 ret = set_tracer_flag(tr, 1 << ret, !neg); 5277 5278 mutex_unlock(&trace_types_lock); 5279 mutex_unlock(&event_mutex); 5280 5281 /* 5282 * If the first trailing whitespace is replaced with '\0' by strstrip, 5283 * turn it back into a space. 5284 */ 5285 if (orig_len > strlen(option)) 5286 option[strlen(option)] = ' '; 5287 5288 return ret; 5289 } 5290 5291 static void __init apply_trace_boot_options(void) 5292 { 5293 char *buf = trace_boot_options_buf; 5294 char *option; 5295 5296 while (true) { 5297 option = strsep(&buf, ","); 5298 5299 if (!option) 5300 break; 5301 5302 if (*option) 5303 trace_set_options(&global_trace, option); 5304 5305 /* Put back the comma to allow this to be called again */ 5306 if (buf) 5307 *(buf - 1) = ','; 5308 } 5309 } 5310 5311 static ssize_t 5312 tracing_trace_options_write(struct file *filp, const char __user *ubuf, 5313 size_t cnt, loff_t *ppos) 5314 { 5315 struct seq_file *m = filp->private_data; 5316 struct trace_array *tr = m->private; 5317 char buf[64]; 5318 int ret; 5319 5320 if (cnt >= sizeof(buf)) 5321 return -EINVAL; 5322 5323 if (copy_from_user(buf, ubuf, cnt)) 5324 return -EFAULT; 5325 5326 buf[cnt] = 0; 5327 5328 ret = trace_set_options(tr, buf); 5329 if (ret < 0) 5330 return ret; 5331 5332 *ppos += cnt; 5333 5334 return cnt; 5335 } 5336 5337 static int tracing_trace_options_open(struct inode *inode, struct file *file) 5338 { 5339 struct trace_array *tr = inode->i_private; 5340 int ret; 5341 5342 ret = tracing_check_open_get_tr(tr); 5343 if (ret) 5344 return ret; 5345 5346 ret = single_open(file, tracing_trace_options_show, inode->i_private); 5347 if (ret < 0) 5348 trace_array_put(tr); 5349 5350 return ret; 5351 } 5352 5353 static const struct file_operations tracing_iter_fops = { 5354 .open = tracing_trace_options_open, 5355 .read = seq_read, 5356 .llseek = seq_lseek, 5357 .release = tracing_single_release_tr, 5358 .write = tracing_trace_options_write, 5359 }; 5360 5361 static const char readme_msg[] = 5362 "tracing mini-HOWTO:\n\n" 5363 "By default tracefs removes all OTH file permission bits.\n" 5364 "When mounting tracefs an optional group id can be specified\n" 5365 "which adds the group to every directory and file in tracefs:\n\n" 5366 "\t e.g. mount -t tracefs [-o [gid=<gid>]] nodev /sys/kernel/tracing\n\n" 5367 "# echo 0 > tracing_on : quick way to disable tracing\n" 5368 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n" 5369 " Important files:\n" 5370 " trace\t\t\t- The static contents of the buffer\n" 5371 "\t\t\t To clear the buffer write into this file: echo > trace\n" 5372 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n" 5373 " current_tracer\t- function and latency tracers\n" 5374 " available_tracers\t- list of configured tracers for current_tracer\n" 5375 " error_log\t- error log for failed commands (that support it)\n" 5376 " buffer_size_kb\t- view and modify size of per cpu buffer\n" 5377 " buffer_total_size_kb - view total size of all cpu buffers\n\n" 5378 " trace_clock\t\t- change the clock used to order events\n" 5379 " local: Per cpu clock but may not be synced across CPUs\n" 5380 " global: Synced across CPUs but slows tracing down.\n" 5381 " counter: Not a clock, but just an increment\n" 5382 " uptime: Jiffy counter from time of boot\n" 5383 " perf: Same clock that perf events use\n" 5384 #ifdef CONFIG_X86_64 5385 " x86-tsc: TSC cycle counter\n" 5386 #endif 5387 "\n timestamp_mode\t- view the mode used to timestamp events\n" 5388 " delta: Delta difference against a buffer-wide timestamp\n" 5389 " absolute: Absolute (standalone) timestamp\n" 5390 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n" 5391 "\n trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n" 5392 " tracing_cpumask\t- Limit which CPUs to trace\n" 5393 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n" 5394 "\t\t\t Remove sub-buffer with rmdir\n" 5395 " trace_options\t\t- Set format or modify how tracing happens\n" 5396 "\t\t\t Disable an option by prefixing 'no' to the\n" 5397 "\t\t\t option name\n" 5398 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n" 5399 #ifdef CONFIG_DYNAMIC_FTRACE 5400 "\n available_filter_functions - list of functions that can be filtered on\n" 5401 " set_ftrace_filter\t- echo function name in here to only trace these\n" 5402 "\t\t\t functions\n" 5403 "\t accepts: func_full_name or glob-matching-pattern\n" 5404 "\t modules: Can select a group via module\n" 5405 "\t Format: :mod:<module-name>\n" 5406 "\t example: echo :mod:ext3 > set_ftrace_filter\n" 5407 "\t triggers: a command to perform when function is hit\n" 5408 "\t Format: <function>:<trigger>[:count]\n" 5409 "\t trigger: traceon, traceoff\n" 5410 "\t\t enable_event:<system>:<event>\n" 5411 "\t\t disable_event:<system>:<event>\n" 5412 #ifdef CONFIG_STACKTRACE 5413 "\t\t stacktrace\n" 5414 #endif 5415 #ifdef CONFIG_TRACER_SNAPSHOT 5416 "\t\t snapshot\n" 5417 #endif 5418 "\t\t dump\n" 5419 "\t\t cpudump\n" 5420 "\t example: echo do_fault:traceoff > set_ftrace_filter\n" 5421 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n" 5422 "\t The first one will disable tracing every time do_fault is hit\n" 5423 "\t The second will disable tracing at most 3 times when do_trap is hit\n" 5424 "\t The first time do trap is hit and it disables tracing, the\n" 5425 "\t counter will decrement to 2. If tracing is already disabled,\n" 5426 "\t the counter will not decrement. It only decrements when the\n" 5427 "\t trigger did work\n" 5428 "\t To remove trigger without count:\n" 5429 "\t echo '!<function>:<trigger> > set_ftrace_filter\n" 5430 "\t To remove trigger with a count:\n" 5431 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n" 5432 " set_ftrace_notrace\t- echo function name in here to never trace.\n" 5433 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n" 5434 "\t modules: Can select a group via module command :mod:\n" 5435 "\t Does not accept triggers\n" 5436 #endif /* CONFIG_DYNAMIC_FTRACE */ 5437 #ifdef CONFIG_FUNCTION_TRACER 5438 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n" 5439 "\t\t (function)\n" 5440 " set_ftrace_notrace_pid\t- Write pid(s) to not function trace those pids\n" 5441 "\t\t (function)\n" 5442 #endif 5443 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 5444 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n" 5445 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n" 5446 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n" 5447 #endif 5448 #ifdef CONFIG_TRACER_SNAPSHOT 5449 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n" 5450 "\t\t\t snapshot buffer. Read the contents for more\n" 5451 "\t\t\t information\n" 5452 #endif 5453 #ifdef CONFIG_STACK_TRACER 5454 " stack_trace\t\t- Shows the max stack trace when active\n" 5455 " stack_max_size\t- Shows current max stack size that was traced\n" 5456 "\t\t\t Write into this file to reset the max size (trigger a\n" 5457 "\t\t\t new trace)\n" 5458 #ifdef CONFIG_DYNAMIC_FTRACE 5459 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n" 5460 "\t\t\t traces\n" 5461 #endif 5462 #endif /* CONFIG_STACK_TRACER */ 5463 #ifdef CONFIG_DYNAMIC_EVENTS 5464 " dynamic_events\t\t- Create/append/remove/show the generic dynamic events\n" 5465 "\t\t\t Write into this file to define/undefine new trace events.\n" 5466 #endif 5467 #ifdef CONFIG_KPROBE_EVENTS 5468 " kprobe_events\t\t- Create/append/remove/show the kernel dynamic events\n" 5469 "\t\t\t Write into this file to define/undefine new trace events.\n" 5470 #endif 5471 #ifdef CONFIG_UPROBE_EVENTS 5472 " uprobe_events\t\t- Create/append/remove/show the userspace dynamic events\n" 5473 "\t\t\t Write into this file to define/undefine new trace events.\n" 5474 #endif 5475 #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS) || \ 5476 defined(CONFIG_FPROBE_EVENTS) 5477 "\t accepts: event-definitions (one definition per line)\n" 5478 #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS) 5479 "\t Format: p[:[<group>/][<event>]] <place> [<args>]\n" 5480 "\t r[maxactive][:[<group>/][<event>]] <place> [<args>]\n" 5481 #endif 5482 #ifdef CONFIG_FPROBE_EVENTS 5483 "\t f[:[<group>/][<event>]] <func-name>[%return] [<args>]\n" 5484 "\t t[:[<group>/][<event>]] <tracepoint> [<args>]\n" 5485 #endif 5486 #ifdef CONFIG_HIST_TRIGGERS 5487 "\t s:[synthetic/]<event> <field> [<field>]\n" 5488 #endif 5489 "\t e[:[<group>/][<event>]] <attached-group>.<attached-event> [<args>] [if <filter>]\n" 5490 "\t -:[<group>/][<event>]\n" 5491 #ifdef CONFIG_KPROBE_EVENTS 5492 "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n" 5493 "place (kretprobe): [<module>:]<symbol>[+<offset>]%return|<memaddr>\n" 5494 #endif 5495 #ifdef CONFIG_UPROBE_EVENTS 5496 " place (uprobe): <path>:<offset>[%return][(ref_ctr_offset)]\n" 5497 #endif 5498 "\t args: <name>=fetcharg[:type]\n" 5499 "\t fetcharg: (%<register>|$<efield>), @<address>, @<symbol>[+|-<offset>],\n" 5500 #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API 5501 "\t $stack<index>, $stack, $retval, $comm, $arg<N>,\n" 5502 #ifdef CONFIG_PROBE_EVENTS_BTF_ARGS 5503 "\t <argname>[->field[->field|.field...]],\n" 5504 #endif 5505 #else 5506 "\t $stack<index>, $stack, $retval, $comm,\n" 5507 #endif 5508 "\t +|-[u]<offset>(<fetcharg>), \\imm-value, \\\"imm-string\"\n" 5509 "\t kernel return probes support: $retval, $arg<N>, $comm\n" 5510 "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, char, string, symbol,\n" 5511 "\t b<bit-width>@<bit-offset>/<container-size>, ustring,\n" 5512 "\t symstr, %pd/%pD, <type>\\[<array-size>\\]\n" 5513 #ifdef CONFIG_HIST_TRIGGERS 5514 "\t field: <stype> <name>;\n" 5515 "\t stype: u8/u16/u32/u64, s8/s16/s32/s64, pid_t,\n" 5516 "\t [unsigned] char/int/long\n" 5517 #endif 5518 "\t efield: For event probes ('e' types), the field is on of the fields\n" 5519 "\t of the <attached-group>/<attached-event>.\n" 5520 #endif 5521 " set_event\t\t- Enables events by name written into it\n" 5522 "\t\t\t Can enable module events via: :mod:<module>\n" 5523 " events/\t\t- Directory containing all trace event subsystems:\n" 5524 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n" 5525 " events/<system>/\t- Directory containing all trace events for <system>:\n" 5526 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n" 5527 "\t\t\t events\n" 5528 " filter\t\t- If set, only events passing filter are traced\n" 5529 " events/<system>/<event>/\t- Directory containing control files for\n" 5530 "\t\t\t <event>:\n" 5531 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n" 5532 " filter\t\t- If set, only events passing filter are traced\n" 5533 " trigger\t\t- If set, a command to perform when event is hit\n" 5534 "\t Format: <trigger>[:count][if <filter>]\n" 5535 "\t trigger: traceon, traceoff\n" 5536 "\t enable_event:<system>:<event>\n" 5537 "\t disable_event:<system>:<event>\n" 5538 #ifdef CONFIG_HIST_TRIGGERS 5539 "\t enable_hist:<system>:<event>\n" 5540 "\t disable_hist:<system>:<event>\n" 5541 #endif 5542 #ifdef CONFIG_STACKTRACE 5543 "\t\t stacktrace\n" 5544 #endif 5545 #ifdef CONFIG_TRACER_SNAPSHOT 5546 "\t\t snapshot\n" 5547 #endif 5548 #ifdef CONFIG_HIST_TRIGGERS 5549 "\t\t hist (see below)\n" 5550 #endif 5551 "\t example: echo traceoff > events/block/block_unplug/trigger\n" 5552 "\t echo traceoff:3 > events/block/block_unplug/trigger\n" 5553 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n" 5554 "\t events/block/block_unplug/trigger\n" 5555 "\t The first disables tracing every time block_unplug is hit.\n" 5556 "\t The second disables tracing the first 3 times block_unplug is hit.\n" 5557 "\t The third enables the kmalloc event the first 3 times block_unplug\n" 5558 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n" 5559 "\t Like function triggers, the counter is only decremented if it\n" 5560 "\t enabled or disabled tracing.\n" 5561 "\t To remove a trigger without a count:\n" 5562 "\t echo '!<trigger> > <system>/<event>/trigger\n" 5563 "\t To remove a trigger with a count:\n" 5564 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n" 5565 "\t Filters can be ignored when removing a trigger.\n" 5566 #ifdef CONFIG_HIST_TRIGGERS 5567 " hist trigger\t- If set, event hits are aggregated into a hash table\n" 5568 "\t Format: hist:keys=<field1[,field2,...]>\n" 5569 "\t [:<var1>=<field|var_ref|numeric_literal>[,<var2>=...]]\n" 5570 "\t [:values=<field1[,field2,...]>]\n" 5571 "\t [:sort=<field1[,field2,...]>]\n" 5572 "\t [:size=#entries]\n" 5573 "\t [:pause][:continue][:clear]\n" 5574 "\t [:name=histname1]\n" 5575 "\t [:nohitcount]\n" 5576 "\t [:<handler>.<action>]\n" 5577 "\t [if <filter>]\n\n" 5578 "\t Note, special fields can be used as well:\n" 5579 "\t common_timestamp - to record current timestamp\n" 5580 "\t common_cpu - to record the CPU the event happened on\n" 5581 "\n" 5582 "\t A hist trigger variable can be:\n" 5583 "\t - a reference to a field e.g. x=current_timestamp,\n" 5584 "\t - a reference to another variable e.g. y=$x,\n" 5585 "\t - a numeric literal: e.g. ms_per_sec=1000,\n" 5586 "\t - an arithmetic expression: e.g. time_secs=current_timestamp/1000\n" 5587 "\n" 5588 "\t hist trigger arithmetic expressions support addition(+), subtraction(-),\n" 5589 "\t multiplication(*) and division(/) operators. An operand can be either a\n" 5590 "\t variable reference, field or numeric literal.\n" 5591 "\n" 5592 "\t When a matching event is hit, an entry is added to a hash\n" 5593 "\t table using the key(s) and value(s) named, and the value of a\n" 5594 "\t sum called 'hitcount' is incremented. Keys and values\n" 5595 "\t correspond to fields in the event's format description. Keys\n" 5596 "\t can be any field, or the special string 'common_stacktrace'.\n" 5597 "\t Compound keys consisting of up to two fields can be specified\n" 5598 "\t by the 'keys' keyword. Values must correspond to numeric\n" 5599 "\t fields. Sort keys consisting of up to two fields can be\n" 5600 "\t specified using the 'sort' keyword. The sort direction can\n" 5601 "\t be modified by appending '.descending' or '.ascending' to a\n" 5602 "\t sort field. The 'size' parameter can be used to specify more\n" 5603 "\t or fewer than the default 2048 entries for the hashtable size.\n" 5604 "\t If a hist trigger is given a name using the 'name' parameter,\n" 5605 "\t its histogram data will be shared with other triggers of the\n" 5606 "\t same name, and trigger hits will update this common data.\n\n" 5607 "\t Reading the 'hist' file for the event will dump the hash\n" 5608 "\t table in its entirety to stdout. If there are multiple hist\n" 5609 "\t triggers attached to an event, there will be a table for each\n" 5610 "\t trigger in the output. The table displayed for a named\n" 5611 "\t trigger will be the same as any other instance having the\n" 5612 "\t same name. The default format used to display a given field\n" 5613 "\t can be modified by appending any of the following modifiers\n" 5614 "\t to the field name, as applicable:\n\n" 5615 "\t .hex display a number as a hex value\n" 5616 "\t .sym display an address as a symbol\n" 5617 "\t .sym-offset display an address as a symbol and offset\n" 5618 "\t .execname display a common_pid as a program name\n" 5619 "\t .syscall display a syscall id as a syscall name\n" 5620 "\t .log2 display log2 value rather than raw number\n" 5621 "\t .buckets=size display values in groups of size rather than raw number\n" 5622 "\t .usecs display a common_timestamp in microseconds\n" 5623 "\t .percent display a number of percentage value\n" 5624 "\t .graph display a bar-graph of a value\n\n" 5625 "\t The 'pause' parameter can be used to pause an existing hist\n" 5626 "\t trigger or to start a hist trigger but not log any events\n" 5627 "\t until told to do so. 'continue' can be used to start or\n" 5628 "\t restart a paused hist trigger.\n\n" 5629 "\t The 'clear' parameter will clear the contents of a running\n" 5630 "\t hist trigger and leave its current paused/active state\n" 5631 "\t unchanged.\n\n" 5632 "\t The 'nohitcount' (or NOHC) parameter will suppress display of\n" 5633 "\t raw hitcount in the histogram.\n\n" 5634 "\t The enable_hist and disable_hist triggers can be used to\n" 5635 "\t have one event conditionally start and stop another event's\n" 5636 "\t already-attached hist trigger. The syntax is analogous to\n" 5637 "\t the enable_event and disable_event triggers.\n\n" 5638 "\t Hist trigger handlers and actions are executed whenever a\n" 5639 "\t a histogram entry is added or updated. They take the form:\n\n" 5640 "\t <handler>.<action>\n\n" 5641 "\t The available handlers are:\n\n" 5642 "\t onmatch(matching.event) - invoke on addition or update\n" 5643 "\t onmax(var) - invoke if var exceeds current max\n" 5644 "\t onchange(var) - invoke action if var changes\n\n" 5645 "\t The available actions are:\n\n" 5646 "\t trace(<synthetic_event>,param list) - generate synthetic event\n" 5647 "\t save(field,...) - save current event fields\n" 5648 #ifdef CONFIG_TRACER_SNAPSHOT 5649 "\t snapshot() - snapshot the trace buffer\n\n" 5650 #endif 5651 #ifdef CONFIG_SYNTH_EVENTS 5652 " events/synthetic_events\t- Create/append/remove/show synthetic events\n" 5653 "\t Write into this file to define/undefine new synthetic events.\n" 5654 "\t example: echo 'myevent u64 lat; char name[]; long[] stack' >> synthetic_events\n" 5655 #endif 5656 #endif 5657 ; 5658 5659 static ssize_t 5660 tracing_readme_read(struct file *filp, char __user *ubuf, 5661 size_t cnt, loff_t *ppos) 5662 { 5663 return simple_read_from_buffer(ubuf, cnt, ppos, 5664 readme_msg, strlen(readme_msg)); 5665 } 5666 5667 static const struct file_operations tracing_readme_fops = { 5668 .open = tracing_open_generic, 5669 .read = tracing_readme_read, 5670 .llseek = generic_file_llseek, 5671 }; 5672 5673 #ifdef CONFIG_TRACE_EVAL_MAP_FILE 5674 static union trace_eval_map_item * 5675 update_eval_map(union trace_eval_map_item *ptr) 5676 { 5677 if (!ptr->map.eval_string) { 5678 if (ptr->tail.next) { 5679 ptr = ptr->tail.next; 5680 /* Set ptr to the next real item (skip head) */ 5681 ptr++; 5682 } else 5683 return NULL; 5684 } 5685 return ptr; 5686 } 5687 5688 static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos) 5689 { 5690 union trace_eval_map_item *ptr = v; 5691 5692 /* 5693 * Paranoid! If ptr points to end, we don't want to increment past it. 5694 * This really should never happen. 5695 */ 5696 (*pos)++; 5697 ptr = update_eval_map(ptr); 5698 if (WARN_ON_ONCE(!ptr)) 5699 return NULL; 5700 5701 ptr++; 5702 ptr = update_eval_map(ptr); 5703 5704 return ptr; 5705 } 5706 5707 static void *eval_map_start(struct seq_file *m, loff_t *pos) 5708 { 5709 union trace_eval_map_item *v; 5710 loff_t l = 0; 5711 5712 mutex_lock(&trace_eval_mutex); 5713 5714 v = trace_eval_maps; 5715 if (v) 5716 v++; 5717 5718 while (v && l < *pos) { 5719 v = eval_map_next(m, v, &l); 5720 } 5721 5722 return v; 5723 } 5724 5725 static void eval_map_stop(struct seq_file *m, void *v) 5726 { 5727 mutex_unlock(&trace_eval_mutex); 5728 } 5729 5730 static int eval_map_show(struct seq_file *m, void *v) 5731 { 5732 union trace_eval_map_item *ptr = v; 5733 5734 seq_printf(m, "%s %ld (%s)\n", 5735 ptr->map.eval_string, ptr->map.eval_value, 5736 ptr->map.system); 5737 5738 return 0; 5739 } 5740 5741 static const struct seq_operations tracing_eval_map_seq_ops = { 5742 .start = eval_map_start, 5743 .next = eval_map_next, 5744 .stop = eval_map_stop, 5745 .show = eval_map_show, 5746 }; 5747 5748 static int tracing_eval_map_open(struct inode *inode, struct file *filp) 5749 { 5750 int ret; 5751 5752 ret = tracing_check_open_get_tr(NULL); 5753 if (ret) 5754 return ret; 5755 5756 return seq_open(filp, &tracing_eval_map_seq_ops); 5757 } 5758 5759 static const struct file_operations tracing_eval_map_fops = { 5760 .open = tracing_eval_map_open, 5761 .read = seq_read, 5762 .llseek = seq_lseek, 5763 .release = seq_release, 5764 }; 5765 5766 static inline union trace_eval_map_item * 5767 trace_eval_jmp_to_tail(union trace_eval_map_item *ptr) 5768 { 5769 /* Return tail of array given the head */ 5770 return ptr + ptr->head.length + 1; 5771 } 5772 5773 static void 5774 trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start, 5775 int len) 5776 { 5777 struct trace_eval_map **stop; 5778 struct trace_eval_map **map; 5779 union trace_eval_map_item *map_array; 5780 union trace_eval_map_item *ptr; 5781 5782 stop = start + len; 5783 5784 /* 5785 * The trace_eval_maps contains the map plus a head and tail item, 5786 * where the head holds the module and length of array, and the 5787 * tail holds a pointer to the next list. 5788 */ 5789 map_array = kmalloc_array(len + 2, sizeof(*map_array), GFP_KERNEL); 5790 if (!map_array) { 5791 pr_warn("Unable to allocate trace eval mapping\n"); 5792 return; 5793 } 5794 5795 guard(mutex)(&trace_eval_mutex); 5796 5797 if (!trace_eval_maps) 5798 trace_eval_maps = map_array; 5799 else { 5800 ptr = trace_eval_maps; 5801 for (;;) { 5802 ptr = trace_eval_jmp_to_tail(ptr); 5803 if (!ptr->tail.next) 5804 break; 5805 ptr = ptr->tail.next; 5806 5807 } 5808 ptr->tail.next = map_array; 5809 } 5810 map_array->head.mod = mod; 5811 map_array->head.length = len; 5812 map_array++; 5813 5814 for (map = start; (unsigned long)map < (unsigned long)stop; map++) { 5815 map_array->map = **map; 5816 map_array++; 5817 } 5818 memset(map_array, 0, sizeof(*map_array)); 5819 } 5820 5821 static void trace_create_eval_file(struct dentry *d_tracer) 5822 { 5823 trace_create_file("eval_map", TRACE_MODE_READ, d_tracer, 5824 NULL, &tracing_eval_map_fops); 5825 } 5826 5827 #else /* CONFIG_TRACE_EVAL_MAP_FILE */ 5828 static inline void trace_create_eval_file(struct dentry *d_tracer) { } 5829 static inline void trace_insert_eval_map_file(struct module *mod, 5830 struct trace_eval_map **start, int len) { } 5831 #endif /* !CONFIG_TRACE_EVAL_MAP_FILE */ 5832 5833 static void trace_insert_eval_map(struct module *mod, 5834 struct trace_eval_map **start, int len) 5835 { 5836 struct trace_eval_map **map; 5837 5838 if (len <= 0) 5839 return; 5840 5841 map = start; 5842 5843 trace_event_eval_update(map, len); 5844 5845 trace_insert_eval_map_file(mod, start, len); 5846 } 5847 5848 static ssize_t 5849 tracing_set_trace_read(struct file *filp, char __user *ubuf, 5850 size_t cnt, loff_t *ppos) 5851 { 5852 struct trace_array *tr = filp->private_data; 5853 char buf[MAX_TRACER_SIZE+2]; 5854 int r; 5855 5856 mutex_lock(&trace_types_lock); 5857 r = sprintf(buf, "%s\n", tr->current_trace->name); 5858 mutex_unlock(&trace_types_lock); 5859 5860 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 5861 } 5862 5863 int tracer_init(struct tracer *t, struct trace_array *tr) 5864 { 5865 tracing_reset_online_cpus(&tr->array_buffer); 5866 return t->init(tr); 5867 } 5868 5869 static void set_buffer_entries(struct array_buffer *buf, unsigned long val) 5870 { 5871 int cpu; 5872 5873 for_each_tracing_cpu(cpu) 5874 per_cpu_ptr(buf->data, cpu)->entries = val; 5875 } 5876 5877 static void update_buffer_entries(struct array_buffer *buf, int cpu) 5878 { 5879 if (cpu == RING_BUFFER_ALL_CPUS) { 5880 set_buffer_entries(buf, ring_buffer_size(buf->buffer, 0)); 5881 } else { 5882 per_cpu_ptr(buf->data, cpu)->entries = ring_buffer_size(buf->buffer, cpu); 5883 } 5884 } 5885 5886 #ifdef CONFIG_TRACER_MAX_TRACE 5887 /* resize @tr's buffer to the size of @size_tr's entries */ 5888 static int resize_buffer_duplicate_size(struct array_buffer *trace_buf, 5889 struct array_buffer *size_buf, int cpu_id) 5890 { 5891 int cpu, ret = 0; 5892 5893 if (cpu_id == RING_BUFFER_ALL_CPUS) { 5894 for_each_tracing_cpu(cpu) { 5895 ret = ring_buffer_resize(trace_buf->buffer, 5896 per_cpu_ptr(size_buf->data, cpu)->entries, cpu); 5897 if (ret < 0) 5898 break; 5899 per_cpu_ptr(trace_buf->data, cpu)->entries = 5900 per_cpu_ptr(size_buf->data, cpu)->entries; 5901 } 5902 } else { 5903 ret = ring_buffer_resize(trace_buf->buffer, 5904 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id); 5905 if (ret == 0) 5906 per_cpu_ptr(trace_buf->data, cpu_id)->entries = 5907 per_cpu_ptr(size_buf->data, cpu_id)->entries; 5908 } 5909 5910 return ret; 5911 } 5912 #endif /* CONFIG_TRACER_MAX_TRACE */ 5913 5914 static int __tracing_resize_ring_buffer(struct trace_array *tr, 5915 unsigned long size, int cpu) 5916 { 5917 int ret; 5918 5919 /* 5920 * If kernel or user changes the size of the ring buffer 5921 * we use the size that was given, and we can forget about 5922 * expanding it later. 5923 */ 5924 trace_set_ring_buffer_expanded(tr); 5925 5926 /* May be called before buffers are initialized */ 5927 if (!tr->array_buffer.buffer) 5928 return 0; 5929 5930 /* Do not allow tracing while resizing ring buffer */ 5931 tracing_stop_tr(tr); 5932 5933 ret = ring_buffer_resize(tr->array_buffer.buffer, size, cpu); 5934 if (ret < 0) 5935 goto out_start; 5936 5937 #ifdef CONFIG_TRACER_MAX_TRACE 5938 if (!tr->allocated_snapshot) 5939 goto out; 5940 5941 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu); 5942 if (ret < 0) { 5943 int r = resize_buffer_duplicate_size(&tr->array_buffer, 5944 &tr->array_buffer, cpu); 5945 if (r < 0) { 5946 /* 5947 * AARGH! We are left with different 5948 * size max buffer!!!! 5949 * The max buffer is our "snapshot" buffer. 5950 * When a tracer needs a snapshot (one of the 5951 * latency tracers), it swaps the max buffer 5952 * with the saved snap shot. We succeeded to 5953 * update the size of the main buffer, but failed to 5954 * update the size of the max buffer. But when we tried 5955 * to reset the main buffer to the original size, we 5956 * failed there too. This is very unlikely to 5957 * happen, but if it does, warn and kill all 5958 * tracing. 5959 */ 5960 WARN_ON(1); 5961 tracing_disabled = 1; 5962 } 5963 goto out_start; 5964 } 5965 5966 update_buffer_entries(&tr->max_buffer, cpu); 5967 5968 out: 5969 #endif /* CONFIG_TRACER_MAX_TRACE */ 5970 5971 update_buffer_entries(&tr->array_buffer, cpu); 5972 out_start: 5973 tracing_start_tr(tr); 5974 return ret; 5975 } 5976 5977 ssize_t tracing_resize_ring_buffer(struct trace_array *tr, 5978 unsigned long size, int cpu_id) 5979 { 5980 int ret; 5981 5982 guard(mutex)(&trace_types_lock); 5983 5984 if (cpu_id != RING_BUFFER_ALL_CPUS) { 5985 /* make sure, this cpu is enabled in the mask */ 5986 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) 5987 return -EINVAL; 5988 } 5989 5990 ret = __tracing_resize_ring_buffer(tr, size, cpu_id); 5991 if (ret < 0) 5992 ret = -ENOMEM; 5993 5994 return ret; 5995 } 5996 5997 static void update_last_data(struct trace_array *tr) 5998 { 5999 if (!tr->text_delta && !tr->data_delta) 6000 return; 6001 6002 /* 6003 * Need to clear all CPU buffers as there cannot be events 6004 * from the previous boot mixed with events with this boot 6005 * as that will cause a confusing trace. Need to clear all 6006 * CPU buffers, even for those that may currently be offline. 6007 */ 6008 tracing_reset_all_cpus(&tr->array_buffer); 6009 6010 /* Using current data now */ 6011 tr->text_delta = 0; 6012 tr->data_delta = 0; 6013 } 6014 6015 /** 6016 * tracing_update_buffers - used by tracing facility to expand ring buffers 6017 * @tr: The tracing instance 6018 * 6019 * To save on memory when the tracing is never used on a system with it 6020 * configured in. The ring buffers are set to a minimum size. But once 6021 * a user starts to use the tracing facility, then they need to grow 6022 * to their default size. 6023 * 6024 * This function is to be called when a tracer is about to be used. 6025 */ 6026 int tracing_update_buffers(struct trace_array *tr) 6027 { 6028 int ret = 0; 6029 6030 mutex_lock(&trace_types_lock); 6031 6032 update_last_data(tr); 6033 6034 if (!tr->ring_buffer_expanded) 6035 ret = __tracing_resize_ring_buffer(tr, trace_buf_size, 6036 RING_BUFFER_ALL_CPUS); 6037 mutex_unlock(&trace_types_lock); 6038 6039 return ret; 6040 } 6041 6042 struct trace_option_dentry; 6043 6044 static void 6045 create_trace_option_files(struct trace_array *tr, struct tracer *tracer); 6046 6047 /* 6048 * Used to clear out the tracer before deletion of an instance. 6049 * Must have trace_types_lock held. 6050 */ 6051 static void tracing_set_nop(struct trace_array *tr) 6052 { 6053 if (tr->current_trace == &nop_trace) 6054 return; 6055 6056 tr->current_trace->enabled--; 6057 6058 if (tr->current_trace->reset) 6059 tr->current_trace->reset(tr); 6060 6061 tr->current_trace = &nop_trace; 6062 } 6063 6064 static bool tracer_options_updated; 6065 6066 static void add_tracer_options(struct trace_array *tr, struct tracer *t) 6067 { 6068 /* Only enable if the directory has been created already. */ 6069 if (!tr->dir) 6070 return; 6071 6072 /* Only create trace option files after update_tracer_options finish */ 6073 if (!tracer_options_updated) 6074 return; 6075 6076 create_trace_option_files(tr, t); 6077 } 6078 6079 int tracing_set_tracer(struct trace_array *tr, const char *buf) 6080 { 6081 struct tracer *t; 6082 #ifdef CONFIG_TRACER_MAX_TRACE 6083 bool had_max_tr; 6084 #endif 6085 int ret; 6086 6087 guard(mutex)(&trace_types_lock); 6088 6089 update_last_data(tr); 6090 6091 if (!tr->ring_buffer_expanded) { 6092 ret = __tracing_resize_ring_buffer(tr, trace_buf_size, 6093 RING_BUFFER_ALL_CPUS); 6094 if (ret < 0) 6095 return ret; 6096 ret = 0; 6097 } 6098 6099 for (t = trace_types; t; t = t->next) { 6100 if (strcmp(t->name, buf) == 0) 6101 break; 6102 } 6103 if (!t) 6104 return -EINVAL; 6105 6106 if (t == tr->current_trace) 6107 return 0; 6108 6109 #ifdef CONFIG_TRACER_SNAPSHOT 6110 if (t->use_max_tr) { 6111 local_irq_disable(); 6112 arch_spin_lock(&tr->max_lock); 6113 ret = tr->cond_snapshot ? -EBUSY : 0; 6114 arch_spin_unlock(&tr->max_lock); 6115 local_irq_enable(); 6116 if (ret) 6117 return ret; 6118 } 6119 #endif 6120 /* Some tracers won't work on kernel command line */ 6121 if (system_state < SYSTEM_RUNNING && t->noboot) { 6122 pr_warn("Tracer '%s' is not allowed on command line, ignored\n", 6123 t->name); 6124 return -EINVAL; 6125 } 6126 6127 /* Some tracers are only allowed for the top level buffer */ 6128 if (!trace_ok_for_array(t, tr)) 6129 return -EINVAL; 6130 6131 /* If trace pipe files are being read, we can't change the tracer */ 6132 if (tr->trace_ref) 6133 return -EBUSY; 6134 6135 trace_branch_disable(); 6136 6137 tr->current_trace->enabled--; 6138 6139 if (tr->current_trace->reset) 6140 tr->current_trace->reset(tr); 6141 6142 #ifdef CONFIG_TRACER_MAX_TRACE 6143 had_max_tr = tr->current_trace->use_max_tr; 6144 6145 /* Current trace needs to be nop_trace before synchronize_rcu */ 6146 tr->current_trace = &nop_trace; 6147 6148 if (had_max_tr && !t->use_max_tr) { 6149 /* 6150 * We need to make sure that the update_max_tr sees that 6151 * current_trace changed to nop_trace to keep it from 6152 * swapping the buffers after we resize it. 6153 * The update_max_tr is called from interrupts disabled 6154 * so a synchronized_sched() is sufficient. 6155 */ 6156 synchronize_rcu(); 6157 free_snapshot(tr); 6158 tracing_disarm_snapshot(tr); 6159 } 6160 6161 if (!had_max_tr && t->use_max_tr) { 6162 ret = tracing_arm_snapshot_locked(tr); 6163 if (ret) 6164 return ret; 6165 } 6166 #else 6167 tr->current_trace = &nop_trace; 6168 #endif 6169 6170 if (t->init) { 6171 ret = tracer_init(t, tr); 6172 if (ret) { 6173 #ifdef CONFIG_TRACER_MAX_TRACE 6174 if (t->use_max_tr) 6175 tracing_disarm_snapshot(tr); 6176 #endif 6177 return ret; 6178 } 6179 } 6180 6181 tr->current_trace = t; 6182 tr->current_trace->enabled++; 6183 trace_branch_enable(tr); 6184 6185 return 0; 6186 } 6187 6188 static ssize_t 6189 tracing_set_trace_write(struct file *filp, const char __user *ubuf, 6190 size_t cnt, loff_t *ppos) 6191 { 6192 struct trace_array *tr = filp->private_data; 6193 char buf[MAX_TRACER_SIZE+1]; 6194 char *name; 6195 size_t ret; 6196 int err; 6197 6198 ret = cnt; 6199 6200 if (cnt > MAX_TRACER_SIZE) 6201 cnt = MAX_TRACER_SIZE; 6202 6203 if (copy_from_user(buf, ubuf, cnt)) 6204 return -EFAULT; 6205 6206 buf[cnt] = 0; 6207 6208 name = strim(buf); 6209 6210 err = tracing_set_tracer(tr, name); 6211 if (err) 6212 return err; 6213 6214 *ppos += ret; 6215 6216 return ret; 6217 } 6218 6219 static ssize_t 6220 tracing_nsecs_read(unsigned long *ptr, char __user *ubuf, 6221 size_t cnt, loff_t *ppos) 6222 { 6223 char buf[64]; 6224 int r; 6225 6226 r = snprintf(buf, sizeof(buf), "%ld\n", 6227 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr)); 6228 if (r > sizeof(buf)) 6229 r = sizeof(buf); 6230 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 6231 } 6232 6233 static ssize_t 6234 tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf, 6235 size_t cnt, loff_t *ppos) 6236 { 6237 unsigned long val; 6238 int ret; 6239 6240 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 6241 if (ret) 6242 return ret; 6243 6244 *ptr = val * 1000; 6245 6246 return cnt; 6247 } 6248 6249 static ssize_t 6250 tracing_thresh_read(struct file *filp, char __user *ubuf, 6251 size_t cnt, loff_t *ppos) 6252 { 6253 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos); 6254 } 6255 6256 static ssize_t 6257 tracing_thresh_write(struct file *filp, const char __user *ubuf, 6258 size_t cnt, loff_t *ppos) 6259 { 6260 struct trace_array *tr = filp->private_data; 6261 int ret; 6262 6263 guard(mutex)(&trace_types_lock); 6264 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos); 6265 if (ret < 0) 6266 return ret; 6267 6268 if (tr->current_trace->update_thresh) { 6269 ret = tr->current_trace->update_thresh(tr); 6270 if (ret < 0) 6271 return ret; 6272 } 6273 6274 return cnt; 6275 } 6276 6277 #ifdef CONFIG_TRACER_MAX_TRACE 6278 6279 static ssize_t 6280 tracing_max_lat_read(struct file *filp, char __user *ubuf, 6281 size_t cnt, loff_t *ppos) 6282 { 6283 struct trace_array *tr = filp->private_data; 6284 6285 return tracing_nsecs_read(&tr->max_latency, ubuf, cnt, ppos); 6286 } 6287 6288 static ssize_t 6289 tracing_max_lat_write(struct file *filp, const char __user *ubuf, 6290 size_t cnt, loff_t *ppos) 6291 { 6292 struct trace_array *tr = filp->private_data; 6293 6294 return tracing_nsecs_write(&tr->max_latency, ubuf, cnt, ppos); 6295 } 6296 6297 #endif 6298 6299 static int open_pipe_on_cpu(struct trace_array *tr, int cpu) 6300 { 6301 if (cpu == RING_BUFFER_ALL_CPUS) { 6302 if (cpumask_empty(tr->pipe_cpumask)) { 6303 cpumask_setall(tr->pipe_cpumask); 6304 return 0; 6305 } 6306 } else if (!cpumask_test_cpu(cpu, tr->pipe_cpumask)) { 6307 cpumask_set_cpu(cpu, tr->pipe_cpumask); 6308 return 0; 6309 } 6310 return -EBUSY; 6311 } 6312 6313 static void close_pipe_on_cpu(struct trace_array *tr, int cpu) 6314 { 6315 if (cpu == RING_BUFFER_ALL_CPUS) { 6316 WARN_ON(!cpumask_full(tr->pipe_cpumask)); 6317 cpumask_clear(tr->pipe_cpumask); 6318 } else { 6319 WARN_ON(!cpumask_test_cpu(cpu, tr->pipe_cpumask)); 6320 cpumask_clear_cpu(cpu, tr->pipe_cpumask); 6321 } 6322 } 6323 6324 static int tracing_open_pipe(struct inode *inode, struct file *filp) 6325 { 6326 struct trace_array *tr = inode->i_private; 6327 struct trace_iterator *iter; 6328 int cpu; 6329 int ret; 6330 6331 ret = tracing_check_open_get_tr(tr); 6332 if (ret) 6333 return ret; 6334 6335 mutex_lock(&trace_types_lock); 6336 cpu = tracing_get_cpu(inode); 6337 ret = open_pipe_on_cpu(tr, cpu); 6338 if (ret) 6339 goto fail_pipe_on_cpu; 6340 6341 /* create a buffer to store the information to pass to userspace */ 6342 iter = kzalloc(sizeof(*iter), GFP_KERNEL); 6343 if (!iter) { 6344 ret = -ENOMEM; 6345 goto fail_alloc_iter; 6346 } 6347 6348 trace_seq_init(&iter->seq); 6349 iter->trace = tr->current_trace; 6350 6351 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) { 6352 ret = -ENOMEM; 6353 goto fail; 6354 } 6355 6356 /* trace pipe does not show start of buffer */ 6357 cpumask_setall(iter->started); 6358 6359 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) 6360 iter->iter_flags |= TRACE_FILE_LAT_FMT; 6361 6362 /* Output in nanoseconds only if we are using a clock in nanoseconds. */ 6363 if (trace_clocks[tr->clock_id].in_ns) 6364 iter->iter_flags |= TRACE_FILE_TIME_IN_NS; 6365 6366 iter->tr = tr; 6367 iter->array_buffer = &tr->array_buffer; 6368 iter->cpu_file = cpu; 6369 mutex_init(&iter->mutex); 6370 filp->private_data = iter; 6371 6372 if (iter->trace->pipe_open) 6373 iter->trace->pipe_open(iter); 6374 6375 nonseekable_open(inode, filp); 6376 6377 tr->trace_ref++; 6378 6379 mutex_unlock(&trace_types_lock); 6380 return ret; 6381 6382 fail: 6383 kfree(iter); 6384 fail_alloc_iter: 6385 close_pipe_on_cpu(tr, cpu); 6386 fail_pipe_on_cpu: 6387 __trace_array_put(tr); 6388 mutex_unlock(&trace_types_lock); 6389 return ret; 6390 } 6391 6392 static int tracing_release_pipe(struct inode *inode, struct file *file) 6393 { 6394 struct trace_iterator *iter = file->private_data; 6395 struct trace_array *tr = inode->i_private; 6396 6397 mutex_lock(&trace_types_lock); 6398 6399 tr->trace_ref--; 6400 6401 if (iter->trace->pipe_close) 6402 iter->trace->pipe_close(iter); 6403 close_pipe_on_cpu(tr, iter->cpu_file); 6404 mutex_unlock(&trace_types_lock); 6405 6406 free_trace_iter_content(iter); 6407 kfree(iter); 6408 6409 trace_array_put(tr); 6410 6411 return 0; 6412 } 6413 6414 static __poll_t 6415 trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table) 6416 { 6417 struct trace_array *tr = iter->tr; 6418 6419 /* Iterators are static, they should be filled or empty */ 6420 if (trace_buffer_iter(iter, iter->cpu_file)) 6421 return EPOLLIN | EPOLLRDNORM; 6422 6423 if (tr->trace_flags & TRACE_ITER_BLOCK) 6424 /* 6425 * Always select as readable when in blocking mode 6426 */ 6427 return EPOLLIN | EPOLLRDNORM; 6428 else 6429 return ring_buffer_poll_wait(iter->array_buffer->buffer, iter->cpu_file, 6430 filp, poll_table, iter->tr->buffer_percent); 6431 } 6432 6433 static __poll_t 6434 tracing_poll_pipe(struct file *filp, poll_table *poll_table) 6435 { 6436 struct trace_iterator *iter = filp->private_data; 6437 6438 return trace_poll(iter, filp, poll_table); 6439 } 6440 6441 /* Must be called with iter->mutex held. */ 6442 static int tracing_wait_pipe(struct file *filp) 6443 { 6444 struct trace_iterator *iter = filp->private_data; 6445 int ret; 6446 6447 while (trace_empty(iter)) { 6448 6449 if ((filp->f_flags & O_NONBLOCK)) { 6450 return -EAGAIN; 6451 } 6452 6453 /* 6454 * We block until we read something and tracing is disabled. 6455 * We still block if tracing is disabled, but we have never 6456 * read anything. This allows a user to cat this file, and 6457 * then enable tracing. But after we have read something, 6458 * we give an EOF when tracing is again disabled. 6459 * 6460 * iter->pos will be 0 if we haven't read anything. 6461 */ 6462 if (!tracer_tracing_is_on(iter->tr) && iter->pos) 6463 break; 6464 6465 mutex_unlock(&iter->mutex); 6466 6467 ret = wait_on_pipe(iter, 0); 6468 6469 mutex_lock(&iter->mutex); 6470 6471 if (ret) 6472 return ret; 6473 } 6474 6475 return 1; 6476 } 6477 6478 /* 6479 * Consumer reader. 6480 */ 6481 static ssize_t 6482 tracing_read_pipe(struct file *filp, char __user *ubuf, 6483 size_t cnt, loff_t *ppos) 6484 { 6485 struct trace_iterator *iter = filp->private_data; 6486 ssize_t sret; 6487 6488 /* 6489 * Avoid more than one consumer on a single file descriptor 6490 * This is just a matter of traces coherency, the ring buffer itself 6491 * is protected. 6492 */ 6493 guard(mutex)(&iter->mutex); 6494 6495 /* return any leftover data */ 6496 sret = trace_seq_to_user(&iter->seq, ubuf, cnt); 6497 if (sret != -EBUSY) 6498 return sret; 6499 6500 trace_seq_init(&iter->seq); 6501 6502 if (iter->trace->read) { 6503 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos); 6504 if (sret) 6505 return sret; 6506 } 6507 6508 waitagain: 6509 sret = tracing_wait_pipe(filp); 6510 if (sret <= 0) 6511 return sret; 6512 6513 /* stop when tracing is finished */ 6514 if (trace_empty(iter)) 6515 return 0; 6516 6517 if (cnt >= TRACE_SEQ_BUFFER_SIZE) 6518 cnt = TRACE_SEQ_BUFFER_SIZE - 1; 6519 6520 /* reset all but tr, trace, and overruns */ 6521 trace_iterator_reset(iter); 6522 cpumask_clear(iter->started); 6523 trace_seq_init(&iter->seq); 6524 6525 trace_event_read_lock(); 6526 trace_access_lock(iter->cpu_file); 6527 while (trace_find_next_entry_inc(iter) != NULL) { 6528 enum print_line_t ret; 6529 int save_len = iter->seq.seq.len; 6530 6531 ret = print_trace_line(iter); 6532 if (ret == TRACE_TYPE_PARTIAL_LINE) { 6533 /* 6534 * If one print_trace_line() fills entire trace_seq in one shot, 6535 * trace_seq_to_user() will returns -EBUSY because save_len == 0, 6536 * In this case, we need to consume it, otherwise, loop will peek 6537 * this event next time, resulting in an infinite loop. 6538 */ 6539 if (save_len == 0) { 6540 iter->seq.full = 0; 6541 trace_seq_puts(&iter->seq, "[LINE TOO BIG]\n"); 6542 trace_consume(iter); 6543 break; 6544 } 6545 6546 /* In other cases, don't print partial lines */ 6547 iter->seq.seq.len = save_len; 6548 break; 6549 } 6550 if (ret != TRACE_TYPE_NO_CONSUME) 6551 trace_consume(iter); 6552 6553 if (trace_seq_used(&iter->seq) >= cnt) 6554 break; 6555 6556 /* 6557 * Setting the full flag means we reached the trace_seq buffer 6558 * size and we should leave by partial output condition above. 6559 * One of the trace_seq_* functions is not used properly. 6560 */ 6561 WARN_ONCE(iter->seq.full, "full flag set for trace type %d", 6562 iter->ent->type); 6563 } 6564 trace_access_unlock(iter->cpu_file); 6565 trace_event_read_unlock(); 6566 6567 /* Now copy what we have to the user */ 6568 sret = trace_seq_to_user(&iter->seq, ubuf, cnt); 6569 if (iter->seq.readpos >= trace_seq_used(&iter->seq)) 6570 trace_seq_init(&iter->seq); 6571 6572 /* 6573 * If there was nothing to send to user, in spite of consuming trace 6574 * entries, go back to wait for more entries. 6575 */ 6576 if (sret == -EBUSY) 6577 goto waitagain; 6578 6579 return sret; 6580 } 6581 6582 static void tracing_spd_release_pipe(struct splice_pipe_desc *spd, 6583 unsigned int idx) 6584 { 6585 __free_page(spd->pages[idx]); 6586 } 6587 6588 static size_t 6589 tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter) 6590 { 6591 size_t count; 6592 int save_len; 6593 int ret; 6594 6595 /* Seq buffer is page-sized, exactly what we need. */ 6596 for (;;) { 6597 save_len = iter->seq.seq.len; 6598 ret = print_trace_line(iter); 6599 6600 if (trace_seq_has_overflowed(&iter->seq)) { 6601 iter->seq.seq.len = save_len; 6602 break; 6603 } 6604 6605 /* 6606 * This should not be hit, because it should only 6607 * be set if the iter->seq overflowed. But check it 6608 * anyway to be safe. 6609 */ 6610 if (ret == TRACE_TYPE_PARTIAL_LINE) { 6611 iter->seq.seq.len = save_len; 6612 break; 6613 } 6614 6615 count = trace_seq_used(&iter->seq) - save_len; 6616 if (rem < count) { 6617 rem = 0; 6618 iter->seq.seq.len = save_len; 6619 break; 6620 } 6621 6622 if (ret != TRACE_TYPE_NO_CONSUME) 6623 trace_consume(iter); 6624 rem -= count; 6625 if (!trace_find_next_entry_inc(iter)) { 6626 rem = 0; 6627 iter->ent = NULL; 6628 break; 6629 } 6630 } 6631 6632 return rem; 6633 } 6634 6635 static ssize_t tracing_splice_read_pipe(struct file *filp, 6636 loff_t *ppos, 6637 struct pipe_inode_info *pipe, 6638 size_t len, 6639 unsigned int flags) 6640 { 6641 struct page *pages_def[PIPE_DEF_BUFFERS]; 6642 struct partial_page partial_def[PIPE_DEF_BUFFERS]; 6643 struct trace_iterator *iter = filp->private_data; 6644 struct splice_pipe_desc spd = { 6645 .pages = pages_def, 6646 .partial = partial_def, 6647 .nr_pages = 0, /* This gets updated below. */ 6648 .nr_pages_max = PIPE_DEF_BUFFERS, 6649 .ops = &default_pipe_buf_ops, 6650 .spd_release = tracing_spd_release_pipe, 6651 }; 6652 ssize_t ret; 6653 size_t rem; 6654 unsigned int i; 6655 6656 if (splice_grow_spd(pipe, &spd)) 6657 return -ENOMEM; 6658 6659 mutex_lock(&iter->mutex); 6660 6661 if (iter->trace->splice_read) { 6662 ret = iter->trace->splice_read(iter, filp, 6663 ppos, pipe, len, flags); 6664 if (ret) 6665 goto out_err; 6666 } 6667 6668 ret = tracing_wait_pipe(filp); 6669 if (ret <= 0) 6670 goto out_err; 6671 6672 if (!iter->ent && !trace_find_next_entry_inc(iter)) { 6673 ret = -EFAULT; 6674 goto out_err; 6675 } 6676 6677 trace_event_read_lock(); 6678 trace_access_lock(iter->cpu_file); 6679 6680 /* Fill as many pages as possible. */ 6681 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) { 6682 spd.pages[i] = alloc_page(GFP_KERNEL); 6683 if (!spd.pages[i]) 6684 break; 6685 6686 rem = tracing_fill_pipe_page(rem, iter); 6687 6688 /* Copy the data into the page, so we can start over. */ 6689 ret = trace_seq_to_buffer(&iter->seq, 6690 page_address(spd.pages[i]), 6691 trace_seq_used(&iter->seq)); 6692 if (ret < 0) { 6693 __free_page(spd.pages[i]); 6694 break; 6695 } 6696 spd.partial[i].offset = 0; 6697 spd.partial[i].len = trace_seq_used(&iter->seq); 6698 6699 trace_seq_init(&iter->seq); 6700 } 6701 6702 trace_access_unlock(iter->cpu_file); 6703 trace_event_read_unlock(); 6704 mutex_unlock(&iter->mutex); 6705 6706 spd.nr_pages = i; 6707 6708 if (i) 6709 ret = splice_to_pipe(pipe, &spd); 6710 else 6711 ret = 0; 6712 out: 6713 splice_shrink_spd(&spd); 6714 return ret; 6715 6716 out_err: 6717 mutex_unlock(&iter->mutex); 6718 goto out; 6719 } 6720 6721 static ssize_t 6722 tracing_entries_read(struct file *filp, char __user *ubuf, 6723 size_t cnt, loff_t *ppos) 6724 { 6725 struct inode *inode = file_inode(filp); 6726 struct trace_array *tr = inode->i_private; 6727 int cpu = tracing_get_cpu(inode); 6728 char buf[64]; 6729 int r = 0; 6730 ssize_t ret; 6731 6732 mutex_lock(&trace_types_lock); 6733 6734 if (cpu == RING_BUFFER_ALL_CPUS) { 6735 int cpu, buf_size_same; 6736 unsigned long size; 6737 6738 size = 0; 6739 buf_size_same = 1; 6740 /* check if all cpu sizes are same */ 6741 for_each_tracing_cpu(cpu) { 6742 /* fill in the size from first enabled cpu */ 6743 if (size == 0) 6744 size = per_cpu_ptr(tr->array_buffer.data, cpu)->entries; 6745 if (size != per_cpu_ptr(tr->array_buffer.data, cpu)->entries) { 6746 buf_size_same = 0; 6747 break; 6748 } 6749 } 6750 6751 if (buf_size_same) { 6752 if (!tr->ring_buffer_expanded) 6753 r = sprintf(buf, "%lu (expanded: %lu)\n", 6754 size >> 10, 6755 trace_buf_size >> 10); 6756 else 6757 r = sprintf(buf, "%lu\n", size >> 10); 6758 } else 6759 r = sprintf(buf, "X\n"); 6760 } else 6761 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10); 6762 6763 mutex_unlock(&trace_types_lock); 6764 6765 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 6766 return ret; 6767 } 6768 6769 static ssize_t 6770 tracing_entries_write(struct file *filp, const char __user *ubuf, 6771 size_t cnt, loff_t *ppos) 6772 { 6773 struct inode *inode = file_inode(filp); 6774 struct trace_array *tr = inode->i_private; 6775 unsigned long val; 6776 int ret; 6777 6778 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 6779 if (ret) 6780 return ret; 6781 6782 /* must have at least 1 entry */ 6783 if (!val) 6784 return -EINVAL; 6785 6786 /* value is in KB */ 6787 val <<= 10; 6788 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode)); 6789 if (ret < 0) 6790 return ret; 6791 6792 *ppos += cnt; 6793 6794 return cnt; 6795 } 6796 6797 static ssize_t 6798 tracing_total_entries_read(struct file *filp, char __user *ubuf, 6799 size_t cnt, loff_t *ppos) 6800 { 6801 struct trace_array *tr = filp->private_data; 6802 char buf[64]; 6803 int r, cpu; 6804 unsigned long size = 0, expanded_size = 0; 6805 6806 mutex_lock(&trace_types_lock); 6807 for_each_tracing_cpu(cpu) { 6808 size += per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10; 6809 if (!tr->ring_buffer_expanded) 6810 expanded_size += trace_buf_size >> 10; 6811 } 6812 if (tr->ring_buffer_expanded) 6813 r = sprintf(buf, "%lu\n", size); 6814 else 6815 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size); 6816 mutex_unlock(&trace_types_lock); 6817 6818 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 6819 } 6820 6821 static ssize_t 6822 tracing_last_boot_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) 6823 { 6824 struct trace_array *tr = filp->private_data; 6825 struct seq_buf seq; 6826 char buf[64]; 6827 6828 seq_buf_init(&seq, buf, 64); 6829 6830 seq_buf_printf(&seq, "text delta:\t%ld\n", tr->text_delta); 6831 seq_buf_printf(&seq, "data delta:\t%ld\n", tr->data_delta); 6832 6833 return simple_read_from_buffer(ubuf, cnt, ppos, buf, seq_buf_used(&seq)); 6834 } 6835 6836 static int tracing_buffer_meta_open(struct inode *inode, struct file *filp) 6837 { 6838 struct trace_array *tr = inode->i_private; 6839 int cpu = tracing_get_cpu(inode); 6840 int ret; 6841 6842 ret = tracing_check_open_get_tr(tr); 6843 if (ret) 6844 return ret; 6845 6846 ret = ring_buffer_meta_seq_init(filp, tr->array_buffer.buffer, cpu); 6847 if (ret < 0) 6848 __trace_array_put(tr); 6849 return ret; 6850 } 6851 6852 static ssize_t 6853 tracing_free_buffer_write(struct file *filp, const char __user *ubuf, 6854 size_t cnt, loff_t *ppos) 6855 { 6856 /* 6857 * There is no need to read what the user has written, this function 6858 * is just to make sure that there is no error when "echo" is used 6859 */ 6860 6861 *ppos += cnt; 6862 6863 return cnt; 6864 } 6865 6866 static int 6867 tracing_free_buffer_release(struct inode *inode, struct file *filp) 6868 { 6869 struct trace_array *tr = inode->i_private; 6870 6871 /* disable tracing ? */ 6872 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE) 6873 tracer_tracing_off(tr); 6874 /* resize the ring buffer to 0 */ 6875 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS); 6876 6877 trace_array_put(tr); 6878 6879 return 0; 6880 } 6881 6882 #define TRACE_MARKER_MAX_SIZE 4096 6883 6884 static ssize_t 6885 tracing_mark_write(struct file *filp, const char __user *ubuf, 6886 size_t cnt, loff_t *fpos) 6887 { 6888 struct trace_array *tr = filp->private_data; 6889 struct ring_buffer_event *event; 6890 enum event_trigger_type tt = ETT_NONE; 6891 struct trace_buffer *buffer; 6892 struct print_entry *entry; 6893 int meta_size; 6894 ssize_t written; 6895 size_t size; 6896 int len; 6897 6898 /* Used in tracing_mark_raw_write() as well */ 6899 #define FAULTED_STR "<faulted>" 6900 #define FAULTED_SIZE (sizeof(FAULTED_STR) - 1) /* '\0' is already accounted for */ 6901 6902 if (tracing_disabled) 6903 return -EINVAL; 6904 6905 if (!(tr->trace_flags & TRACE_ITER_MARKERS)) 6906 return -EINVAL; 6907 6908 if ((ssize_t)cnt < 0) 6909 return -EINVAL; 6910 6911 if (cnt > TRACE_MARKER_MAX_SIZE) 6912 cnt = TRACE_MARKER_MAX_SIZE; 6913 6914 meta_size = sizeof(*entry) + 2; /* add '\0' and possible '\n' */ 6915 again: 6916 size = cnt + meta_size; 6917 6918 /* If less than "<faulted>", then make sure we can still add that */ 6919 if (cnt < FAULTED_SIZE) 6920 size += FAULTED_SIZE - cnt; 6921 6922 buffer = tr->array_buffer.buffer; 6923 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size, 6924 tracing_gen_ctx()); 6925 if (unlikely(!event)) { 6926 /* 6927 * If the size was greater than what was allowed, then 6928 * make it smaller and try again. 6929 */ 6930 if (size > ring_buffer_max_event_size(buffer)) { 6931 /* cnt < FAULTED size should never be bigger than max */ 6932 if (WARN_ON_ONCE(cnt < FAULTED_SIZE)) 6933 return -EBADF; 6934 cnt = ring_buffer_max_event_size(buffer) - meta_size; 6935 /* The above should only happen once */ 6936 if (WARN_ON_ONCE(cnt + meta_size == size)) 6937 return -EBADF; 6938 goto again; 6939 } 6940 6941 /* Ring buffer disabled, return as if not open for write */ 6942 return -EBADF; 6943 } 6944 6945 entry = ring_buffer_event_data(event); 6946 entry->ip = _THIS_IP_; 6947 6948 len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt); 6949 if (len) { 6950 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE); 6951 cnt = FAULTED_SIZE; 6952 written = -EFAULT; 6953 } else 6954 written = cnt; 6955 6956 if (tr->trace_marker_file && !list_empty(&tr->trace_marker_file->triggers)) { 6957 /* do not add \n before testing triggers, but add \0 */ 6958 entry->buf[cnt] = '\0'; 6959 tt = event_triggers_call(tr->trace_marker_file, buffer, entry, event); 6960 } 6961 6962 if (entry->buf[cnt - 1] != '\n') { 6963 entry->buf[cnt] = '\n'; 6964 entry->buf[cnt + 1] = '\0'; 6965 } else 6966 entry->buf[cnt] = '\0'; 6967 6968 if (static_branch_unlikely(&trace_marker_exports_enabled)) 6969 ftrace_exports(event, TRACE_EXPORT_MARKER); 6970 __buffer_unlock_commit(buffer, event); 6971 6972 if (tt) 6973 event_triggers_post_call(tr->trace_marker_file, tt); 6974 6975 return written; 6976 } 6977 6978 static ssize_t 6979 tracing_mark_raw_write(struct file *filp, const char __user *ubuf, 6980 size_t cnt, loff_t *fpos) 6981 { 6982 struct trace_array *tr = filp->private_data; 6983 struct ring_buffer_event *event; 6984 struct trace_buffer *buffer; 6985 struct raw_data_entry *entry; 6986 ssize_t written; 6987 int size; 6988 int len; 6989 6990 #define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int)) 6991 6992 if (tracing_disabled) 6993 return -EINVAL; 6994 6995 if (!(tr->trace_flags & TRACE_ITER_MARKERS)) 6996 return -EINVAL; 6997 6998 /* The marker must at least have a tag id */ 6999 if (cnt < sizeof(unsigned int)) 7000 return -EINVAL; 7001 7002 size = sizeof(*entry) + cnt; 7003 if (cnt < FAULT_SIZE_ID) 7004 size += FAULT_SIZE_ID - cnt; 7005 7006 buffer = tr->array_buffer.buffer; 7007 7008 if (size > ring_buffer_max_event_size(buffer)) 7009 return -EINVAL; 7010 7011 event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size, 7012 tracing_gen_ctx()); 7013 if (!event) 7014 /* Ring buffer disabled, return as if not open for write */ 7015 return -EBADF; 7016 7017 entry = ring_buffer_event_data(event); 7018 7019 len = __copy_from_user_inatomic(&entry->id, ubuf, cnt); 7020 if (len) { 7021 entry->id = -1; 7022 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE); 7023 written = -EFAULT; 7024 } else 7025 written = cnt; 7026 7027 __buffer_unlock_commit(buffer, event); 7028 7029 return written; 7030 } 7031 7032 static int tracing_clock_show(struct seq_file *m, void *v) 7033 { 7034 struct trace_array *tr = m->private; 7035 int i; 7036 7037 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) 7038 seq_printf(m, 7039 "%s%s%s%s", i ? " " : "", 7040 i == tr->clock_id ? "[" : "", trace_clocks[i].name, 7041 i == tr->clock_id ? "]" : ""); 7042 seq_putc(m, '\n'); 7043 7044 return 0; 7045 } 7046 7047 int tracing_set_clock(struct trace_array *tr, const char *clockstr) 7048 { 7049 int i; 7050 7051 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) { 7052 if (strcmp(trace_clocks[i].name, clockstr) == 0) 7053 break; 7054 } 7055 if (i == ARRAY_SIZE(trace_clocks)) 7056 return -EINVAL; 7057 7058 mutex_lock(&trace_types_lock); 7059 7060 tr->clock_id = i; 7061 7062 ring_buffer_set_clock(tr->array_buffer.buffer, trace_clocks[i].func); 7063 7064 /* 7065 * New clock may not be consistent with the previous clock. 7066 * Reset the buffer so that it doesn't have incomparable timestamps. 7067 */ 7068 tracing_reset_online_cpus(&tr->array_buffer); 7069 7070 #ifdef CONFIG_TRACER_MAX_TRACE 7071 if (tr->max_buffer.buffer) 7072 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func); 7073 tracing_reset_online_cpus(&tr->max_buffer); 7074 #endif 7075 7076 mutex_unlock(&trace_types_lock); 7077 7078 return 0; 7079 } 7080 7081 static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf, 7082 size_t cnt, loff_t *fpos) 7083 { 7084 struct seq_file *m = filp->private_data; 7085 struct trace_array *tr = m->private; 7086 char buf[64]; 7087 const char *clockstr; 7088 int ret; 7089 7090 if (cnt >= sizeof(buf)) 7091 return -EINVAL; 7092 7093 if (copy_from_user(buf, ubuf, cnt)) 7094 return -EFAULT; 7095 7096 buf[cnt] = 0; 7097 7098 clockstr = strstrip(buf); 7099 7100 ret = tracing_set_clock(tr, clockstr); 7101 if (ret) 7102 return ret; 7103 7104 *fpos += cnt; 7105 7106 return cnt; 7107 } 7108 7109 static int tracing_clock_open(struct inode *inode, struct file *file) 7110 { 7111 struct trace_array *tr = inode->i_private; 7112 int ret; 7113 7114 ret = tracing_check_open_get_tr(tr); 7115 if (ret) 7116 return ret; 7117 7118 ret = single_open(file, tracing_clock_show, inode->i_private); 7119 if (ret < 0) 7120 trace_array_put(tr); 7121 7122 return ret; 7123 } 7124 7125 static int tracing_time_stamp_mode_show(struct seq_file *m, void *v) 7126 { 7127 struct trace_array *tr = m->private; 7128 7129 mutex_lock(&trace_types_lock); 7130 7131 if (ring_buffer_time_stamp_abs(tr->array_buffer.buffer)) 7132 seq_puts(m, "delta [absolute]\n"); 7133 else 7134 seq_puts(m, "[delta] absolute\n"); 7135 7136 mutex_unlock(&trace_types_lock); 7137 7138 return 0; 7139 } 7140 7141 static int tracing_time_stamp_mode_open(struct inode *inode, struct file *file) 7142 { 7143 struct trace_array *tr = inode->i_private; 7144 int ret; 7145 7146 ret = tracing_check_open_get_tr(tr); 7147 if (ret) 7148 return ret; 7149 7150 ret = single_open(file, tracing_time_stamp_mode_show, inode->i_private); 7151 if (ret < 0) 7152 trace_array_put(tr); 7153 7154 return ret; 7155 } 7156 7157 u64 tracing_event_time_stamp(struct trace_buffer *buffer, struct ring_buffer_event *rbe) 7158 { 7159 if (rbe == this_cpu_read(trace_buffered_event)) 7160 return ring_buffer_time_stamp(buffer); 7161 7162 return ring_buffer_event_time_stamp(buffer, rbe); 7163 } 7164 7165 /* 7166 * Set or disable using the per CPU trace_buffer_event when possible. 7167 */ 7168 int tracing_set_filter_buffering(struct trace_array *tr, bool set) 7169 { 7170 guard(mutex)(&trace_types_lock); 7171 7172 if (set && tr->no_filter_buffering_ref++) 7173 return 0; 7174 7175 if (!set) { 7176 if (WARN_ON_ONCE(!tr->no_filter_buffering_ref)) 7177 return -EINVAL; 7178 7179 --tr->no_filter_buffering_ref; 7180 } 7181 7182 return 0; 7183 } 7184 7185 struct ftrace_buffer_info { 7186 struct trace_iterator iter; 7187 void *spare; 7188 unsigned int spare_cpu; 7189 unsigned int spare_size; 7190 unsigned int read; 7191 }; 7192 7193 #ifdef CONFIG_TRACER_SNAPSHOT 7194 static int tracing_snapshot_open(struct inode *inode, struct file *file) 7195 { 7196 struct trace_array *tr = inode->i_private; 7197 struct trace_iterator *iter; 7198 struct seq_file *m; 7199 int ret; 7200 7201 ret = tracing_check_open_get_tr(tr); 7202 if (ret) 7203 return ret; 7204 7205 if (file->f_mode & FMODE_READ) { 7206 iter = __tracing_open(inode, file, true); 7207 if (IS_ERR(iter)) 7208 ret = PTR_ERR(iter); 7209 } else { 7210 /* Writes still need the seq_file to hold the private data */ 7211 ret = -ENOMEM; 7212 m = kzalloc(sizeof(*m), GFP_KERNEL); 7213 if (!m) 7214 goto out; 7215 iter = kzalloc(sizeof(*iter), GFP_KERNEL); 7216 if (!iter) { 7217 kfree(m); 7218 goto out; 7219 } 7220 ret = 0; 7221 7222 iter->tr = tr; 7223 iter->array_buffer = &tr->max_buffer; 7224 iter->cpu_file = tracing_get_cpu(inode); 7225 m->private = iter; 7226 file->private_data = m; 7227 } 7228 out: 7229 if (ret < 0) 7230 trace_array_put(tr); 7231 7232 return ret; 7233 } 7234 7235 static void tracing_swap_cpu_buffer(void *tr) 7236 { 7237 update_max_tr_single((struct trace_array *)tr, current, smp_processor_id()); 7238 } 7239 7240 static ssize_t 7241 tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt, 7242 loff_t *ppos) 7243 { 7244 struct seq_file *m = filp->private_data; 7245 struct trace_iterator *iter = m->private; 7246 struct trace_array *tr = iter->tr; 7247 unsigned long val; 7248 int ret; 7249 7250 ret = tracing_update_buffers(tr); 7251 if (ret < 0) 7252 return ret; 7253 7254 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 7255 if (ret) 7256 return ret; 7257 7258 guard(mutex)(&trace_types_lock); 7259 7260 if (tr->current_trace->use_max_tr) 7261 return -EBUSY; 7262 7263 local_irq_disable(); 7264 arch_spin_lock(&tr->max_lock); 7265 if (tr->cond_snapshot) 7266 ret = -EBUSY; 7267 arch_spin_unlock(&tr->max_lock); 7268 local_irq_enable(); 7269 if (ret) 7270 return ret; 7271 7272 switch (val) { 7273 case 0: 7274 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) 7275 return -EINVAL; 7276 if (tr->allocated_snapshot) 7277 free_snapshot(tr); 7278 break; 7279 case 1: 7280 /* Only allow per-cpu swap if the ring buffer supports it */ 7281 #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP 7282 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) 7283 return -EINVAL; 7284 #endif 7285 if (tr->allocated_snapshot) 7286 ret = resize_buffer_duplicate_size(&tr->max_buffer, 7287 &tr->array_buffer, iter->cpu_file); 7288 7289 ret = tracing_arm_snapshot_locked(tr); 7290 if (ret) 7291 return ret; 7292 7293 /* Now, we're going to swap */ 7294 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) { 7295 local_irq_disable(); 7296 update_max_tr(tr, current, smp_processor_id(), NULL); 7297 local_irq_enable(); 7298 } else { 7299 smp_call_function_single(iter->cpu_file, tracing_swap_cpu_buffer, 7300 (void *)tr, 1); 7301 } 7302 tracing_disarm_snapshot(tr); 7303 break; 7304 default: 7305 if (tr->allocated_snapshot) { 7306 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) 7307 tracing_reset_online_cpus(&tr->max_buffer); 7308 else 7309 tracing_reset_cpu(&tr->max_buffer, iter->cpu_file); 7310 } 7311 break; 7312 } 7313 7314 if (ret >= 0) { 7315 *ppos += cnt; 7316 ret = cnt; 7317 } 7318 7319 return ret; 7320 } 7321 7322 static int tracing_snapshot_release(struct inode *inode, struct file *file) 7323 { 7324 struct seq_file *m = file->private_data; 7325 int ret; 7326 7327 ret = tracing_release(inode, file); 7328 7329 if (file->f_mode & FMODE_READ) 7330 return ret; 7331 7332 /* If write only, the seq_file is just a stub */ 7333 if (m) 7334 kfree(m->private); 7335 kfree(m); 7336 7337 return 0; 7338 } 7339 7340 static int tracing_buffers_open(struct inode *inode, struct file *filp); 7341 static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf, 7342 size_t count, loff_t *ppos); 7343 static int tracing_buffers_release(struct inode *inode, struct file *file); 7344 static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos, 7345 struct pipe_inode_info *pipe, size_t len, unsigned int flags); 7346 7347 static int snapshot_raw_open(struct inode *inode, struct file *filp) 7348 { 7349 struct ftrace_buffer_info *info; 7350 int ret; 7351 7352 /* The following checks for tracefs lockdown */ 7353 ret = tracing_buffers_open(inode, filp); 7354 if (ret < 0) 7355 return ret; 7356 7357 info = filp->private_data; 7358 7359 if (info->iter.trace->use_max_tr) { 7360 tracing_buffers_release(inode, filp); 7361 return -EBUSY; 7362 } 7363 7364 info->iter.snapshot = true; 7365 info->iter.array_buffer = &info->iter.tr->max_buffer; 7366 7367 return ret; 7368 } 7369 7370 #endif /* CONFIG_TRACER_SNAPSHOT */ 7371 7372 7373 static const struct file_operations tracing_thresh_fops = { 7374 .open = tracing_open_generic, 7375 .read = tracing_thresh_read, 7376 .write = tracing_thresh_write, 7377 .llseek = generic_file_llseek, 7378 }; 7379 7380 #ifdef CONFIG_TRACER_MAX_TRACE 7381 static const struct file_operations tracing_max_lat_fops = { 7382 .open = tracing_open_generic_tr, 7383 .read = tracing_max_lat_read, 7384 .write = tracing_max_lat_write, 7385 .llseek = generic_file_llseek, 7386 .release = tracing_release_generic_tr, 7387 }; 7388 #endif 7389 7390 static const struct file_operations set_tracer_fops = { 7391 .open = tracing_open_generic_tr, 7392 .read = tracing_set_trace_read, 7393 .write = tracing_set_trace_write, 7394 .llseek = generic_file_llseek, 7395 .release = tracing_release_generic_tr, 7396 }; 7397 7398 static const struct file_operations tracing_pipe_fops = { 7399 .open = tracing_open_pipe, 7400 .poll = tracing_poll_pipe, 7401 .read = tracing_read_pipe, 7402 .splice_read = tracing_splice_read_pipe, 7403 .release = tracing_release_pipe, 7404 }; 7405 7406 static const struct file_operations tracing_entries_fops = { 7407 .open = tracing_open_generic_tr, 7408 .read = tracing_entries_read, 7409 .write = tracing_entries_write, 7410 .llseek = generic_file_llseek, 7411 .release = tracing_release_generic_tr, 7412 }; 7413 7414 static const struct file_operations tracing_buffer_meta_fops = { 7415 .open = tracing_buffer_meta_open, 7416 .read = seq_read, 7417 .llseek = seq_lseek, 7418 .release = tracing_seq_release, 7419 }; 7420 7421 static const struct file_operations tracing_total_entries_fops = { 7422 .open = tracing_open_generic_tr, 7423 .read = tracing_total_entries_read, 7424 .llseek = generic_file_llseek, 7425 .release = tracing_release_generic_tr, 7426 }; 7427 7428 static const struct file_operations tracing_free_buffer_fops = { 7429 .open = tracing_open_generic_tr, 7430 .write = tracing_free_buffer_write, 7431 .release = tracing_free_buffer_release, 7432 }; 7433 7434 static const struct file_operations tracing_mark_fops = { 7435 .open = tracing_mark_open, 7436 .write = tracing_mark_write, 7437 .release = tracing_release_generic_tr, 7438 }; 7439 7440 static const struct file_operations tracing_mark_raw_fops = { 7441 .open = tracing_mark_open, 7442 .write = tracing_mark_raw_write, 7443 .release = tracing_release_generic_tr, 7444 }; 7445 7446 static const struct file_operations trace_clock_fops = { 7447 .open = tracing_clock_open, 7448 .read = seq_read, 7449 .llseek = seq_lseek, 7450 .release = tracing_single_release_tr, 7451 .write = tracing_clock_write, 7452 }; 7453 7454 static const struct file_operations trace_time_stamp_mode_fops = { 7455 .open = tracing_time_stamp_mode_open, 7456 .read = seq_read, 7457 .llseek = seq_lseek, 7458 .release = tracing_single_release_tr, 7459 }; 7460 7461 static const struct file_operations last_boot_fops = { 7462 .open = tracing_open_generic_tr, 7463 .read = tracing_last_boot_read, 7464 .llseek = generic_file_llseek, 7465 .release = tracing_release_generic_tr, 7466 }; 7467 7468 #ifdef CONFIG_TRACER_SNAPSHOT 7469 static const struct file_operations snapshot_fops = { 7470 .open = tracing_snapshot_open, 7471 .read = seq_read, 7472 .write = tracing_snapshot_write, 7473 .llseek = tracing_lseek, 7474 .release = tracing_snapshot_release, 7475 }; 7476 7477 static const struct file_operations snapshot_raw_fops = { 7478 .open = snapshot_raw_open, 7479 .read = tracing_buffers_read, 7480 .release = tracing_buffers_release, 7481 .splice_read = tracing_buffers_splice_read, 7482 }; 7483 7484 #endif /* CONFIG_TRACER_SNAPSHOT */ 7485 7486 /* 7487 * trace_min_max_write - Write a u64 value to a trace_min_max_param struct 7488 * @filp: The active open file structure 7489 * @ubuf: The userspace provided buffer to read value into 7490 * @cnt: The maximum number of bytes to read 7491 * @ppos: The current "file" position 7492 * 7493 * This function implements the write interface for a struct trace_min_max_param. 7494 * The filp->private_data must point to a trace_min_max_param structure that 7495 * defines where to write the value, the min and the max acceptable values, 7496 * and a lock to protect the write. 7497 */ 7498 static ssize_t 7499 trace_min_max_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) 7500 { 7501 struct trace_min_max_param *param = filp->private_data; 7502 u64 val; 7503 int err; 7504 7505 if (!param) 7506 return -EFAULT; 7507 7508 err = kstrtoull_from_user(ubuf, cnt, 10, &val); 7509 if (err) 7510 return err; 7511 7512 if (param->lock) 7513 mutex_lock(param->lock); 7514 7515 if (param->min && val < *param->min) 7516 err = -EINVAL; 7517 7518 if (param->max && val > *param->max) 7519 err = -EINVAL; 7520 7521 if (!err) 7522 *param->val = val; 7523 7524 if (param->lock) 7525 mutex_unlock(param->lock); 7526 7527 if (err) 7528 return err; 7529 7530 return cnt; 7531 } 7532 7533 /* 7534 * trace_min_max_read - Read a u64 value from a trace_min_max_param struct 7535 * @filp: The active open file structure 7536 * @ubuf: The userspace provided buffer to read value into 7537 * @cnt: The maximum number of bytes to read 7538 * @ppos: The current "file" position 7539 * 7540 * This function implements the read interface for a struct trace_min_max_param. 7541 * The filp->private_data must point to a trace_min_max_param struct with valid 7542 * data. 7543 */ 7544 static ssize_t 7545 trace_min_max_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) 7546 { 7547 struct trace_min_max_param *param = filp->private_data; 7548 char buf[U64_STR_SIZE]; 7549 int len; 7550 u64 val; 7551 7552 if (!param) 7553 return -EFAULT; 7554 7555 val = *param->val; 7556 7557 if (cnt > sizeof(buf)) 7558 cnt = sizeof(buf); 7559 7560 len = snprintf(buf, sizeof(buf), "%llu\n", val); 7561 7562 return simple_read_from_buffer(ubuf, cnt, ppos, buf, len); 7563 } 7564 7565 const struct file_operations trace_min_max_fops = { 7566 .open = tracing_open_generic, 7567 .read = trace_min_max_read, 7568 .write = trace_min_max_write, 7569 }; 7570 7571 #define TRACING_LOG_ERRS_MAX 8 7572 #define TRACING_LOG_LOC_MAX 128 7573 7574 #define CMD_PREFIX " Command: " 7575 7576 struct err_info { 7577 const char **errs; /* ptr to loc-specific array of err strings */ 7578 u8 type; /* index into errs -> specific err string */ 7579 u16 pos; /* caret position */ 7580 u64 ts; 7581 }; 7582 7583 struct tracing_log_err { 7584 struct list_head list; 7585 struct err_info info; 7586 char loc[TRACING_LOG_LOC_MAX]; /* err location */ 7587 char *cmd; /* what caused err */ 7588 }; 7589 7590 static DEFINE_MUTEX(tracing_err_log_lock); 7591 7592 static struct tracing_log_err *alloc_tracing_log_err(int len) 7593 { 7594 struct tracing_log_err *err; 7595 7596 err = kzalloc(sizeof(*err), GFP_KERNEL); 7597 if (!err) 7598 return ERR_PTR(-ENOMEM); 7599 7600 err->cmd = kzalloc(len, GFP_KERNEL); 7601 if (!err->cmd) { 7602 kfree(err); 7603 return ERR_PTR(-ENOMEM); 7604 } 7605 7606 return err; 7607 } 7608 7609 static void free_tracing_log_err(struct tracing_log_err *err) 7610 { 7611 kfree(err->cmd); 7612 kfree(err); 7613 } 7614 7615 static struct tracing_log_err *get_tracing_log_err(struct trace_array *tr, 7616 int len) 7617 { 7618 struct tracing_log_err *err; 7619 char *cmd; 7620 7621 if (tr->n_err_log_entries < TRACING_LOG_ERRS_MAX) { 7622 err = alloc_tracing_log_err(len); 7623 if (PTR_ERR(err) != -ENOMEM) 7624 tr->n_err_log_entries++; 7625 7626 return err; 7627 } 7628 cmd = kzalloc(len, GFP_KERNEL); 7629 if (!cmd) 7630 return ERR_PTR(-ENOMEM); 7631 err = list_first_entry(&tr->err_log, struct tracing_log_err, list); 7632 kfree(err->cmd); 7633 err->cmd = cmd; 7634 list_del(&err->list); 7635 7636 return err; 7637 } 7638 7639 /** 7640 * err_pos - find the position of a string within a command for error careting 7641 * @cmd: The tracing command that caused the error 7642 * @str: The string to position the caret at within @cmd 7643 * 7644 * Finds the position of the first occurrence of @str within @cmd. The 7645 * return value can be passed to tracing_log_err() for caret placement 7646 * within @cmd. 7647 * 7648 * Returns the index within @cmd of the first occurrence of @str or 0 7649 * if @str was not found. 7650 */ 7651 unsigned int err_pos(char *cmd, const char *str) 7652 { 7653 char *found; 7654 7655 if (WARN_ON(!strlen(cmd))) 7656 return 0; 7657 7658 found = strstr(cmd, str); 7659 if (found) 7660 return found - cmd; 7661 7662 return 0; 7663 } 7664 7665 /** 7666 * tracing_log_err - write an error to the tracing error log 7667 * @tr: The associated trace array for the error (NULL for top level array) 7668 * @loc: A string describing where the error occurred 7669 * @cmd: The tracing command that caused the error 7670 * @errs: The array of loc-specific static error strings 7671 * @type: The index into errs[], which produces the specific static err string 7672 * @pos: The position the caret should be placed in the cmd 7673 * 7674 * Writes an error into tracing/error_log of the form: 7675 * 7676 * <loc>: error: <text> 7677 * Command: <cmd> 7678 * ^ 7679 * 7680 * tracing/error_log is a small log file containing the last 7681 * TRACING_LOG_ERRS_MAX errors (8). Memory for errors isn't allocated 7682 * unless there has been a tracing error, and the error log can be 7683 * cleared and have its memory freed by writing the empty string in 7684 * truncation mode to it i.e. echo > tracing/error_log. 7685 * 7686 * NOTE: the @errs array along with the @type param are used to 7687 * produce a static error string - this string is not copied and saved 7688 * when the error is logged - only a pointer to it is saved. See 7689 * existing callers for examples of how static strings are typically 7690 * defined for use with tracing_log_err(). 7691 */ 7692 void tracing_log_err(struct trace_array *tr, 7693 const char *loc, const char *cmd, 7694 const char **errs, u8 type, u16 pos) 7695 { 7696 struct tracing_log_err *err; 7697 int len = 0; 7698 7699 if (!tr) 7700 tr = &global_trace; 7701 7702 len += sizeof(CMD_PREFIX) + 2 * sizeof("\n") + strlen(cmd) + 1; 7703 7704 guard(mutex)(&tracing_err_log_lock); 7705 7706 err = get_tracing_log_err(tr, len); 7707 if (PTR_ERR(err) == -ENOMEM) 7708 return; 7709 7710 snprintf(err->loc, TRACING_LOG_LOC_MAX, "%s: error: ", loc); 7711 snprintf(err->cmd, len, "\n" CMD_PREFIX "%s\n", cmd); 7712 7713 err->info.errs = errs; 7714 err->info.type = type; 7715 err->info.pos = pos; 7716 err->info.ts = local_clock(); 7717 7718 list_add_tail(&err->list, &tr->err_log); 7719 } 7720 7721 static void clear_tracing_err_log(struct trace_array *tr) 7722 { 7723 struct tracing_log_err *err, *next; 7724 7725 mutex_lock(&tracing_err_log_lock); 7726 list_for_each_entry_safe(err, next, &tr->err_log, list) { 7727 list_del(&err->list); 7728 free_tracing_log_err(err); 7729 } 7730 7731 tr->n_err_log_entries = 0; 7732 mutex_unlock(&tracing_err_log_lock); 7733 } 7734 7735 static void *tracing_err_log_seq_start(struct seq_file *m, loff_t *pos) 7736 { 7737 struct trace_array *tr = m->private; 7738 7739 mutex_lock(&tracing_err_log_lock); 7740 7741 return seq_list_start(&tr->err_log, *pos); 7742 } 7743 7744 static void *tracing_err_log_seq_next(struct seq_file *m, void *v, loff_t *pos) 7745 { 7746 struct trace_array *tr = m->private; 7747 7748 return seq_list_next(v, &tr->err_log, pos); 7749 } 7750 7751 static void tracing_err_log_seq_stop(struct seq_file *m, void *v) 7752 { 7753 mutex_unlock(&tracing_err_log_lock); 7754 } 7755 7756 static void tracing_err_log_show_pos(struct seq_file *m, u16 pos) 7757 { 7758 u16 i; 7759 7760 for (i = 0; i < sizeof(CMD_PREFIX) - 1; i++) 7761 seq_putc(m, ' '); 7762 for (i = 0; i < pos; i++) 7763 seq_putc(m, ' '); 7764 seq_puts(m, "^\n"); 7765 } 7766 7767 static int tracing_err_log_seq_show(struct seq_file *m, void *v) 7768 { 7769 struct tracing_log_err *err = v; 7770 7771 if (err) { 7772 const char *err_text = err->info.errs[err->info.type]; 7773 u64 sec = err->info.ts; 7774 u32 nsec; 7775 7776 nsec = do_div(sec, NSEC_PER_SEC); 7777 seq_printf(m, "[%5llu.%06u] %s%s", sec, nsec / 1000, 7778 err->loc, err_text); 7779 seq_printf(m, "%s", err->cmd); 7780 tracing_err_log_show_pos(m, err->info.pos); 7781 } 7782 7783 return 0; 7784 } 7785 7786 static const struct seq_operations tracing_err_log_seq_ops = { 7787 .start = tracing_err_log_seq_start, 7788 .next = tracing_err_log_seq_next, 7789 .stop = tracing_err_log_seq_stop, 7790 .show = tracing_err_log_seq_show 7791 }; 7792 7793 static int tracing_err_log_open(struct inode *inode, struct file *file) 7794 { 7795 struct trace_array *tr = inode->i_private; 7796 int ret = 0; 7797 7798 ret = tracing_check_open_get_tr(tr); 7799 if (ret) 7800 return ret; 7801 7802 /* If this file was opened for write, then erase contents */ 7803 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) 7804 clear_tracing_err_log(tr); 7805 7806 if (file->f_mode & FMODE_READ) { 7807 ret = seq_open(file, &tracing_err_log_seq_ops); 7808 if (!ret) { 7809 struct seq_file *m = file->private_data; 7810 m->private = tr; 7811 } else { 7812 trace_array_put(tr); 7813 } 7814 } 7815 return ret; 7816 } 7817 7818 static ssize_t tracing_err_log_write(struct file *file, 7819 const char __user *buffer, 7820 size_t count, loff_t *ppos) 7821 { 7822 return count; 7823 } 7824 7825 static int tracing_err_log_release(struct inode *inode, struct file *file) 7826 { 7827 struct trace_array *tr = inode->i_private; 7828 7829 trace_array_put(tr); 7830 7831 if (file->f_mode & FMODE_READ) 7832 seq_release(inode, file); 7833 7834 return 0; 7835 } 7836 7837 static const struct file_operations tracing_err_log_fops = { 7838 .open = tracing_err_log_open, 7839 .write = tracing_err_log_write, 7840 .read = seq_read, 7841 .llseek = tracing_lseek, 7842 .release = tracing_err_log_release, 7843 }; 7844 7845 static int tracing_buffers_open(struct inode *inode, struct file *filp) 7846 { 7847 struct trace_array *tr = inode->i_private; 7848 struct ftrace_buffer_info *info; 7849 int ret; 7850 7851 ret = tracing_check_open_get_tr(tr); 7852 if (ret) 7853 return ret; 7854 7855 info = kvzalloc(sizeof(*info), GFP_KERNEL); 7856 if (!info) { 7857 trace_array_put(tr); 7858 return -ENOMEM; 7859 } 7860 7861 mutex_lock(&trace_types_lock); 7862 7863 info->iter.tr = tr; 7864 info->iter.cpu_file = tracing_get_cpu(inode); 7865 info->iter.trace = tr->current_trace; 7866 info->iter.array_buffer = &tr->array_buffer; 7867 info->spare = NULL; 7868 /* Force reading ring buffer for first read */ 7869 info->read = (unsigned int)-1; 7870 7871 filp->private_data = info; 7872 7873 tr->trace_ref++; 7874 7875 mutex_unlock(&trace_types_lock); 7876 7877 ret = nonseekable_open(inode, filp); 7878 if (ret < 0) 7879 trace_array_put(tr); 7880 7881 return ret; 7882 } 7883 7884 static __poll_t 7885 tracing_buffers_poll(struct file *filp, poll_table *poll_table) 7886 { 7887 struct ftrace_buffer_info *info = filp->private_data; 7888 struct trace_iterator *iter = &info->iter; 7889 7890 return trace_poll(iter, filp, poll_table); 7891 } 7892 7893 static ssize_t 7894 tracing_buffers_read(struct file *filp, char __user *ubuf, 7895 size_t count, loff_t *ppos) 7896 { 7897 struct ftrace_buffer_info *info = filp->private_data; 7898 struct trace_iterator *iter = &info->iter; 7899 void *trace_data; 7900 int page_size; 7901 ssize_t ret = 0; 7902 ssize_t size; 7903 7904 if (!count) 7905 return 0; 7906 7907 #ifdef CONFIG_TRACER_MAX_TRACE 7908 if (iter->snapshot && iter->tr->current_trace->use_max_tr) 7909 return -EBUSY; 7910 #endif 7911 7912 page_size = ring_buffer_subbuf_size_get(iter->array_buffer->buffer); 7913 7914 /* Make sure the spare matches the current sub buffer size */ 7915 if (info->spare) { 7916 if (page_size != info->spare_size) { 7917 ring_buffer_free_read_page(iter->array_buffer->buffer, 7918 info->spare_cpu, info->spare); 7919 info->spare = NULL; 7920 } 7921 } 7922 7923 if (!info->spare) { 7924 info->spare = ring_buffer_alloc_read_page(iter->array_buffer->buffer, 7925 iter->cpu_file); 7926 if (IS_ERR(info->spare)) { 7927 ret = PTR_ERR(info->spare); 7928 info->spare = NULL; 7929 } else { 7930 info->spare_cpu = iter->cpu_file; 7931 info->spare_size = page_size; 7932 } 7933 } 7934 if (!info->spare) 7935 return ret; 7936 7937 /* Do we have previous read data to read? */ 7938 if (info->read < page_size) 7939 goto read; 7940 7941 again: 7942 trace_access_lock(iter->cpu_file); 7943 ret = ring_buffer_read_page(iter->array_buffer->buffer, 7944 info->spare, 7945 count, 7946 iter->cpu_file, 0); 7947 trace_access_unlock(iter->cpu_file); 7948 7949 if (ret < 0) { 7950 if (trace_empty(iter) && !iter->closed) { 7951 if ((filp->f_flags & O_NONBLOCK)) 7952 return -EAGAIN; 7953 7954 ret = wait_on_pipe(iter, 0); 7955 if (ret) 7956 return ret; 7957 7958 goto again; 7959 } 7960 return 0; 7961 } 7962 7963 info->read = 0; 7964 read: 7965 size = page_size - info->read; 7966 if (size > count) 7967 size = count; 7968 trace_data = ring_buffer_read_page_data(info->spare); 7969 ret = copy_to_user(ubuf, trace_data + info->read, size); 7970 if (ret == size) 7971 return -EFAULT; 7972 7973 size -= ret; 7974 7975 *ppos += size; 7976 info->read += size; 7977 7978 return size; 7979 } 7980 7981 static int tracing_buffers_flush(struct file *file, fl_owner_t id) 7982 { 7983 struct ftrace_buffer_info *info = file->private_data; 7984 struct trace_iterator *iter = &info->iter; 7985 7986 iter->closed = true; 7987 /* Make sure the waiters see the new wait_index */ 7988 (void)atomic_fetch_inc_release(&iter->wait_index); 7989 7990 ring_buffer_wake_waiters(iter->array_buffer->buffer, iter->cpu_file); 7991 7992 return 0; 7993 } 7994 7995 static int tracing_buffers_release(struct inode *inode, struct file *file) 7996 { 7997 struct ftrace_buffer_info *info = file->private_data; 7998 struct trace_iterator *iter = &info->iter; 7999 8000 mutex_lock(&trace_types_lock); 8001 8002 iter->tr->trace_ref--; 8003 8004 __trace_array_put(iter->tr); 8005 8006 if (info->spare) 8007 ring_buffer_free_read_page(iter->array_buffer->buffer, 8008 info->spare_cpu, info->spare); 8009 kvfree(info); 8010 8011 mutex_unlock(&trace_types_lock); 8012 8013 return 0; 8014 } 8015 8016 struct buffer_ref { 8017 struct trace_buffer *buffer; 8018 void *page; 8019 int cpu; 8020 refcount_t refcount; 8021 }; 8022 8023 static void buffer_ref_release(struct buffer_ref *ref) 8024 { 8025 if (!refcount_dec_and_test(&ref->refcount)) 8026 return; 8027 ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page); 8028 kfree(ref); 8029 } 8030 8031 static void buffer_pipe_buf_release(struct pipe_inode_info *pipe, 8032 struct pipe_buffer *buf) 8033 { 8034 struct buffer_ref *ref = (struct buffer_ref *)buf->private; 8035 8036 buffer_ref_release(ref); 8037 buf->private = 0; 8038 } 8039 8040 static bool buffer_pipe_buf_get(struct pipe_inode_info *pipe, 8041 struct pipe_buffer *buf) 8042 { 8043 struct buffer_ref *ref = (struct buffer_ref *)buf->private; 8044 8045 if (refcount_read(&ref->refcount) > INT_MAX/2) 8046 return false; 8047 8048 refcount_inc(&ref->refcount); 8049 return true; 8050 } 8051 8052 /* Pipe buffer operations for a buffer. */ 8053 static const struct pipe_buf_operations buffer_pipe_buf_ops = { 8054 .release = buffer_pipe_buf_release, 8055 .get = buffer_pipe_buf_get, 8056 }; 8057 8058 /* 8059 * Callback from splice_to_pipe(), if we need to release some pages 8060 * at the end of the spd in case we error'ed out in filling the pipe. 8061 */ 8062 static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i) 8063 { 8064 struct buffer_ref *ref = 8065 (struct buffer_ref *)spd->partial[i].private; 8066 8067 buffer_ref_release(ref); 8068 spd->partial[i].private = 0; 8069 } 8070 8071 static ssize_t 8072 tracing_buffers_splice_read(struct file *file, loff_t *ppos, 8073 struct pipe_inode_info *pipe, size_t len, 8074 unsigned int flags) 8075 { 8076 struct ftrace_buffer_info *info = file->private_data; 8077 struct trace_iterator *iter = &info->iter; 8078 struct partial_page partial_def[PIPE_DEF_BUFFERS]; 8079 struct page *pages_def[PIPE_DEF_BUFFERS]; 8080 struct splice_pipe_desc spd = { 8081 .pages = pages_def, 8082 .partial = partial_def, 8083 .nr_pages_max = PIPE_DEF_BUFFERS, 8084 .ops = &buffer_pipe_buf_ops, 8085 .spd_release = buffer_spd_release, 8086 }; 8087 struct buffer_ref *ref; 8088 bool woken = false; 8089 int page_size; 8090 int entries, i; 8091 ssize_t ret = 0; 8092 8093 #ifdef CONFIG_TRACER_MAX_TRACE 8094 if (iter->snapshot && iter->tr->current_trace->use_max_tr) 8095 return -EBUSY; 8096 #endif 8097 8098 page_size = ring_buffer_subbuf_size_get(iter->array_buffer->buffer); 8099 if (*ppos & (page_size - 1)) 8100 return -EINVAL; 8101 8102 if (len & (page_size - 1)) { 8103 if (len < page_size) 8104 return -EINVAL; 8105 len &= (~(page_size - 1)); 8106 } 8107 8108 if (splice_grow_spd(pipe, &spd)) 8109 return -ENOMEM; 8110 8111 again: 8112 trace_access_lock(iter->cpu_file); 8113 entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file); 8114 8115 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= page_size) { 8116 struct page *page; 8117 int r; 8118 8119 ref = kzalloc(sizeof(*ref), GFP_KERNEL); 8120 if (!ref) { 8121 ret = -ENOMEM; 8122 break; 8123 } 8124 8125 refcount_set(&ref->refcount, 1); 8126 ref->buffer = iter->array_buffer->buffer; 8127 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file); 8128 if (IS_ERR(ref->page)) { 8129 ret = PTR_ERR(ref->page); 8130 ref->page = NULL; 8131 kfree(ref); 8132 break; 8133 } 8134 ref->cpu = iter->cpu_file; 8135 8136 r = ring_buffer_read_page(ref->buffer, ref->page, 8137 len, iter->cpu_file, 1); 8138 if (r < 0) { 8139 ring_buffer_free_read_page(ref->buffer, ref->cpu, 8140 ref->page); 8141 kfree(ref); 8142 break; 8143 } 8144 8145 page = virt_to_page(ring_buffer_read_page_data(ref->page)); 8146 8147 spd.pages[i] = page; 8148 spd.partial[i].len = page_size; 8149 spd.partial[i].offset = 0; 8150 spd.partial[i].private = (unsigned long)ref; 8151 spd.nr_pages++; 8152 *ppos += page_size; 8153 8154 entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file); 8155 } 8156 8157 trace_access_unlock(iter->cpu_file); 8158 spd.nr_pages = i; 8159 8160 /* did we read anything? */ 8161 if (!spd.nr_pages) { 8162 8163 if (ret) 8164 goto out; 8165 8166 if (woken) 8167 goto out; 8168 8169 ret = -EAGAIN; 8170 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK)) 8171 goto out; 8172 8173 ret = wait_on_pipe(iter, iter->snapshot ? 0 : iter->tr->buffer_percent); 8174 if (ret) 8175 goto out; 8176 8177 /* No need to wait after waking up when tracing is off */ 8178 if (!tracer_tracing_is_on(iter->tr)) 8179 goto out; 8180 8181 /* Iterate one more time to collect any new data then exit */ 8182 woken = true; 8183 8184 goto again; 8185 } 8186 8187 ret = splice_to_pipe(pipe, &spd); 8188 out: 8189 splice_shrink_spd(&spd); 8190 8191 return ret; 8192 } 8193 8194 static long tracing_buffers_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 8195 { 8196 struct ftrace_buffer_info *info = file->private_data; 8197 struct trace_iterator *iter = &info->iter; 8198 int err; 8199 8200 if (cmd == TRACE_MMAP_IOCTL_GET_READER) { 8201 if (!(file->f_flags & O_NONBLOCK)) { 8202 err = ring_buffer_wait(iter->array_buffer->buffer, 8203 iter->cpu_file, 8204 iter->tr->buffer_percent, 8205 NULL, NULL); 8206 if (err) 8207 return err; 8208 } 8209 8210 return ring_buffer_map_get_reader(iter->array_buffer->buffer, 8211 iter->cpu_file); 8212 } else if (cmd) { 8213 return -ENOTTY; 8214 } 8215 8216 /* 8217 * An ioctl call with cmd 0 to the ring buffer file will wake up all 8218 * waiters 8219 */ 8220 mutex_lock(&trace_types_lock); 8221 8222 /* Make sure the waiters see the new wait_index */ 8223 (void)atomic_fetch_inc_release(&iter->wait_index); 8224 8225 ring_buffer_wake_waiters(iter->array_buffer->buffer, iter->cpu_file); 8226 8227 mutex_unlock(&trace_types_lock); 8228 return 0; 8229 } 8230 8231 #ifdef CONFIG_TRACER_MAX_TRACE 8232 static int get_snapshot_map(struct trace_array *tr) 8233 { 8234 int err = 0; 8235 8236 /* 8237 * Called with mmap_lock held. lockdep would be unhappy if we would now 8238 * take trace_types_lock. Instead use the specific 8239 * snapshot_trigger_lock. 8240 */ 8241 spin_lock(&tr->snapshot_trigger_lock); 8242 8243 if (tr->snapshot || tr->mapped == UINT_MAX) 8244 err = -EBUSY; 8245 else 8246 tr->mapped++; 8247 8248 spin_unlock(&tr->snapshot_trigger_lock); 8249 8250 /* Wait for update_max_tr() to observe iter->tr->mapped */ 8251 if (tr->mapped == 1) 8252 synchronize_rcu(); 8253 8254 return err; 8255 8256 } 8257 static void put_snapshot_map(struct trace_array *tr) 8258 { 8259 spin_lock(&tr->snapshot_trigger_lock); 8260 if (!WARN_ON(!tr->mapped)) 8261 tr->mapped--; 8262 spin_unlock(&tr->snapshot_trigger_lock); 8263 } 8264 #else 8265 static inline int get_snapshot_map(struct trace_array *tr) { return 0; } 8266 static inline void put_snapshot_map(struct trace_array *tr) { } 8267 #endif 8268 8269 static void tracing_buffers_mmap_close(struct vm_area_struct *vma) 8270 { 8271 struct ftrace_buffer_info *info = vma->vm_file->private_data; 8272 struct trace_iterator *iter = &info->iter; 8273 8274 WARN_ON(ring_buffer_unmap(iter->array_buffer->buffer, iter->cpu_file)); 8275 put_snapshot_map(iter->tr); 8276 } 8277 8278 static const struct vm_operations_struct tracing_buffers_vmops = { 8279 .close = tracing_buffers_mmap_close, 8280 }; 8281 8282 static int tracing_buffers_mmap(struct file *filp, struct vm_area_struct *vma) 8283 { 8284 struct ftrace_buffer_info *info = filp->private_data; 8285 struct trace_iterator *iter = &info->iter; 8286 int ret = 0; 8287 8288 ret = get_snapshot_map(iter->tr); 8289 if (ret) 8290 return ret; 8291 8292 ret = ring_buffer_map(iter->array_buffer->buffer, iter->cpu_file, vma); 8293 if (ret) 8294 put_snapshot_map(iter->tr); 8295 8296 vma->vm_ops = &tracing_buffers_vmops; 8297 8298 return ret; 8299 } 8300 8301 static const struct file_operations tracing_buffers_fops = { 8302 .open = tracing_buffers_open, 8303 .read = tracing_buffers_read, 8304 .poll = tracing_buffers_poll, 8305 .release = tracing_buffers_release, 8306 .flush = tracing_buffers_flush, 8307 .splice_read = tracing_buffers_splice_read, 8308 .unlocked_ioctl = tracing_buffers_ioctl, 8309 .mmap = tracing_buffers_mmap, 8310 }; 8311 8312 static ssize_t 8313 tracing_stats_read(struct file *filp, char __user *ubuf, 8314 size_t count, loff_t *ppos) 8315 { 8316 struct inode *inode = file_inode(filp); 8317 struct trace_array *tr = inode->i_private; 8318 struct array_buffer *trace_buf = &tr->array_buffer; 8319 int cpu = tracing_get_cpu(inode); 8320 struct trace_seq *s; 8321 unsigned long cnt; 8322 unsigned long long t; 8323 unsigned long usec_rem; 8324 8325 s = kmalloc(sizeof(*s), GFP_KERNEL); 8326 if (!s) 8327 return -ENOMEM; 8328 8329 trace_seq_init(s); 8330 8331 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu); 8332 trace_seq_printf(s, "entries: %ld\n", cnt); 8333 8334 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu); 8335 trace_seq_printf(s, "overrun: %ld\n", cnt); 8336 8337 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu); 8338 trace_seq_printf(s, "commit overrun: %ld\n", cnt); 8339 8340 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu); 8341 trace_seq_printf(s, "bytes: %ld\n", cnt); 8342 8343 if (trace_clocks[tr->clock_id].in_ns) { 8344 /* local or global for trace_clock */ 8345 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu)); 8346 usec_rem = do_div(t, USEC_PER_SEC); 8347 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n", 8348 t, usec_rem); 8349 8350 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer)); 8351 usec_rem = do_div(t, USEC_PER_SEC); 8352 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem); 8353 } else { 8354 /* counter or tsc mode for trace_clock */ 8355 trace_seq_printf(s, "oldest event ts: %llu\n", 8356 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu)); 8357 8358 trace_seq_printf(s, "now ts: %llu\n", 8359 ring_buffer_time_stamp(trace_buf->buffer)); 8360 } 8361 8362 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu); 8363 trace_seq_printf(s, "dropped events: %ld\n", cnt); 8364 8365 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu); 8366 trace_seq_printf(s, "read events: %ld\n", cnt); 8367 8368 count = simple_read_from_buffer(ubuf, count, ppos, 8369 s->buffer, trace_seq_used(s)); 8370 8371 kfree(s); 8372 8373 return count; 8374 } 8375 8376 static const struct file_operations tracing_stats_fops = { 8377 .open = tracing_open_generic_tr, 8378 .read = tracing_stats_read, 8379 .llseek = generic_file_llseek, 8380 .release = tracing_release_generic_tr, 8381 }; 8382 8383 #ifdef CONFIG_DYNAMIC_FTRACE 8384 8385 static ssize_t 8386 tracing_read_dyn_info(struct file *filp, char __user *ubuf, 8387 size_t cnt, loff_t *ppos) 8388 { 8389 ssize_t ret; 8390 char *buf; 8391 int r; 8392 8393 /* 512 should be plenty to hold the amount needed */ 8394 #define DYN_INFO_BUF_SIZE 512 8395 8396 buf = kmalloc(DYN_INFO_BUF_SIZE, GFP_KERNEL); 8397 if (!buf) 8398 return -ENOMEM; 8399 8400 r = scnprintf(buf, DYN_INFO_BUF_SIZE, 8401 "%ld pages:%ld groups: %ld\n" 8402 "ftrace boot update time = %llu (ns)\n" 8403 "ftrace module total update time = %llu (ns)\n", 8404 ftrace_update_tot_cnt, 8405 ftrace_number_of_pages, 8406 ftrace_number_of_groups, 8407 ftrace_update_time, 8408 ftrace_total_mod_time); 8409 8410 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 8411 kfree(buf); 8412 return ret; 8413 } 8414 8415 static const struct file_operations tracing_dyn_info_fops = { 8416 .open = tracing_open_generic, 8417 .read = tracing_read_dyn_info, 8418 .llseek = generic_file_llseek, 8419 }; 8420 #endif /* CONFIG_DYNAMIC_FTRACE */ 8421 8422 #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) 8423 static void 8424 ftrace_snapshot(unsigned long ip, unsigned long parent_ip, 8425 struct trace_array *tr, struct ftrace_probe_ops *ops, 8426 void *data) 8427 { 8428 tracing_snapshot_instance(tr); 8429 } 8430 8431 static void 8432 ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, 8433 struct trace_array *tr, struct ftrace_probe_ops *ops, 8434 void *data) 8435 { 8436 struct ftrace_func_mapper *mapper = data; 8437 long *count = NULL; 8438 8439 if (mapper) 8440 count = (long *)ftrace_func_mapper_find_ip(mapper, ip); 8441 8442 if (count) { 8443 8444 if (*count <= 0) 8445 return; 8446 8447 (*count)--; 8448 } 8449 8450 tracing_snapshot_instance(tr); 8451 } 8452 8453 static int 8454 ftrace_snapshot_print(struct seq_file *m, unsigned long ip, 8455 struct ftrace_probe_ops *ops, void *data) 8456 { 8457 struct ftrace_func_mapper *mapper = data; 8458 long *count = NULL; 8459 8460 seq_printf(m, "%ps:", (void *)ip); 8461 8462 seq_puts(m, "snapshot"); 8463 8464 if (mapper) 8465 count = (long *)ftrace_func_mapper_find_ip(mapper, ip); 8466 8467 if (count) 8468 seq_printf(m, ":count=%ld\n", *count); 8469 else 8470 seq_puts(m, ":unlimited\n"); 8471 8472 return 0; 8473 } 8474 8475 static int 8476 ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr, 8477 unsigned long ip, void *init_data, void **data) 8478 { 8479 struct ftrace_func_mapper *mapper = *data; 8480 8481 if (!mapper) { 8482 mapper = allocate_ftrace_func_mapper(); 8483 if (!mapper) 8484 return -ENOMEM; 8485 *data = mapper; 8486 } 8487 8488 return ftrace_func_mapper_add_ip(mapper, ip, init_data); 8489 } 8490 8491 static void 8492 ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr, 8493 unsigned long ip, void *data) 8494 { 8495 struct ftrace_func_mapper *mapper = data; 8496 8497 if (!ip) { 8498 if (!mapper) 8499 return; 8500 free_ftrace_func_mapper(mapper, NULL); 8501 return; 8502 } 8503 8504 ftrace_func_mapper_remove_ip(mapper, ip); 8505 } 8506 8507 static struct ftrace_probe_ops snapshot_probe_ops = { 8508 .func = ftrace_snapshot, 8509 .print = ftrace_snapshot_print, 8510 }; 8511 8512 static struct ftrace_probe_ops snapshot_count_probe_ops = { 8513 .func = ftrace_count_snapshot, 8514 .print = ftrace_snapshot_print, 8515 .init = ftrace_snapshot_init, 8516 .free = ftrace_snapshot_free, 8517 }; 8518 8519 static int 8520 ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash, 8521 char *glob, char *cmd, char *param, int enable) 8522 { 8523 struct ftrace_probe_ops *ops; 8524 void *count = (void *)-1; 8525 char *number; 8526 int ret; 8527 8528 if (!tr) 8529 return -ENODEV; 8530 8531 /* hash funcs only work with set_ftrace_filter */ 8532 if (!enable) 8533 return -EINVAL; 8534 8535 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops; 8536 8537 if (glob[0] == '!') { 8538 ret = unregister_ftrace_function_probe_func(glob+1, tr, ops); 8539 if (!ret) 8540 tracing_disarm_snapshot(tr); 8541 8542 return ret; 8543 } 8544 8545 if (!param) 8546 goto out_reg; 8547 8548 number = strsep(¶m, ":"); 8549 8550 if (!strlen(number)) 8551 goto out_reg; 8552 8553 /* 8554 * We use the callback data field (which is a pointer) 8555 * as our counter. 8556 */ 8557 ret = kstrtoul(number, 0, (unsigned long *)&count); 8558 if (ret) 8559 return ret; 8560 8561 out_reg: 8562 ret = tracing_arm_snapshot(tr); 8563 if (ret < 0) 8564 goto out; 8565 8566 ret = register_ftrace_function_probe(glob, tr, ops, count); 8567 if (ret < 0) 8568 tracing_disarm_snapshot(tr); 8569 out: 8570 return ret < 0 ? ret : 0; 8571 } 8572 8573 static struct ftrace_func_command ftrace_snapshot_cmd = { 8574 .name = "snapshot", 8575 .func = ftrace_trace_snapshot_callback, 8576 }; 8577 8578 static __init int register_snapshot_cmd(void) 8579 { 8580 return register_ftrace_command(&ftrace_snapshot_cmd); 8581 } 8582 #else 8583 static inline __init int register_snapshot_cmd(void) { return 0; } 8584 #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */ 8585 8586 static struct dentry *tracing_get_dentry(struct trace_array *tr) 8587 { 8588 if (WARN_ON(!tr->dir)) 8589 return ERR_PTR(-ENODEV); 8590 8591 /* Top directory uses NULL as the parent */ 8592 if (tr->flags & TRACE_ARRAY_FL_GLOBAL) 8593 return NULL; 8594 8595 /* All sub buffers have a descriptor */ 8596 return tr->dir; 8597 } 8598 8599 static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu) 8600 { 8601 struct dentry *d_tracer; 8602 8603 if (tr->percpu_dir) 8604 return tr->percpu_dir; 8605 8606 d_tracer = tracing_get_dentry(tr); 8607 if (IS_ERR(d_tracer)) 8608 return NULL; 8609 8610 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer); 8611 8612 MEM_FAIL(!tr->percpu_dir, 8613 "Could not create tracefs directory 'per_cpu/%d'\n", cpu); 8614 8615 return tr->percpu_dir; 8616 } 8617 8618 static struct dentry * 8619 trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent, 8620 void *data, long cpu, const struct file_operations *fops) 8621 { 8622 struct dentry *ret = trace_create_file(name, mode, parent, data, fops); 8623 8624 if (ret) /* See tracing_get_cpu() */ 8625 d_inode(ret)->i_cdev = (void *)(cpu + 1); 8626 return ret; 8627 } 8628 8629 static void 8630 tracing_init_tracefs_percpu(struct trace_array *tr, long cpu) 8631 { 8632 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu); 8633 struct dentry *d_cpu; 8634 char cpu_dir[30]; /* 30 characters should be more than enough */ 8635 8636 if (!d_percpu) 8637 return; 8638 8639 snprintf(cpu_dir, 30, "cpu%ld", cpu); 8640 d_cpu = tracefs_create_dir(cpu_dir, d_percpu); 8641 if (!d_cpu) { 8642 pr_warn("Could not create tracefs '%s' entry\n", cpu_dir); 8643 return; 8644 } 8645 8646 /* per cpu trace_pipe */ 8647 trace_create_cpu_file("trace_pipe", TRACE_MODE_READ, d_cpu, 8648 tr, cpu, &tracing_pipe_fops); 8649 8650 /* per cpu trace */ 8651 trace_create_cpu_file("trace", TRACE_MODE_WRITE, d_cpu, 8652 tr, cpu, &tracing_fops); 8653 8654 trace_create_cpu_file("trace_pipe_raw", TRACE_MODE_READ, d_cpu, 8655 tr, cpu, &tracing_buffers_fops); 8656 8657 trace_create_cpu_file("stats", TRACE_MODE_READ, d_cpu, 8658 tr, cpu, &tracing_stats_fops); 8659 8660 trace_create_cpu_file("buffer_size_kb", TRACE_MODE_READ, d_cpu, 8661 tr, cpu, &tracing_entries_fops); 8662 8663 if (tr->range_addr_start) 8664 trace_create_cpu_file("buffer_meta", TRACE_MODE_READ, d_cpu, 8665 tr, cpu, &tracing_buffer_meta_fops); 8666 #ifdef CONFIG_TRACER_SNAPSHOT 8667 if (!tr->range_addr_start) { 8668 trace_create_cpu_file("snapshot", TRACE_MODE_WRITE, d_cpu, 8669 tr, cpu, &snapshot_fops); 8670 8671 trace_create_cpu_file("snapshot_raw", TRACE_MODE_READ, d_cpu, 8672 tr, cpu, &snapshot_raw_fops); 8673 } 8674 #endif 8675 } 8676 8677 #ifdef CONFIG_FTRACE_SELFTEST 8678 /* Let selftest have access to static functions in this file */ 8679 #include "trace_selftest.c" 8680 #endif 8681 8682 static ssize_t 8683 trace_options_read(struct file *filp, char __user *ubuf, size_t cnt, 8684 loff_t *ppos) 8685 { 8686 struct trace_option_dentry *topt = filp->private_data; 8687 char *buf; 8688 8689 if (topt->flags->val & topt->opt->bit) 8690 buf = "1\n"; 8691 else 8692 buf = "0\n"; 8693 8694 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2); 8695 } 8696 8697 static ssize_t 8698 trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt, 8699 loff_t *ppos) 8700 { 8701 struct trace_option_dentry *topt = filp->private_data; 8702 unsigned long val; 8703 int ret; 8704 8705 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 8706 if (ret) 8707 return ret; 8708 8709 if (val != 0 && val != 1) 8710 return -EINVAL; 8711 8712 if (!!(topt->flags->val & topt->opt->bit) != val) { 8713 mutex_lock(&trace_types_lock); 8714 ret = __set_tracer_option(topt->tr, topt->flags, 8715 topt->opt, !val); 8716 mutex_unlock(&trace_types_lock); 8717 if (ret) 8718 return ret; 8719 } 8720 8721 *ppos += cnt; 8722 8723 return cnt; 8724 } 8725 8726 static int tracing_open_options(struct inode *inode, struct file *filp) 8727 { 8728 struct trace_option_dentry *topt = inode->i_private; 8729 int ret; 8730 8731 ret = tracing_check_open_get_tr(topt->tr); 8732 if (ret) 8733 return ret; 8734 8735 filp->private_data = inode->i_private; 8736 return 0; 8737 } 8738 8739 static int tracing_release_options(struct inode *inode, struct file *file) 8740 { 8741 struct trace_option_dentry *topt = file->private_data; 8742 8743 trace_array_put(topt->tr); 8744 return 0; 8745 } 8746 8747 static const struct file_operations trace_options_fops = { 8748 .open = tracing_open_options, 8749 .read = trace_options_read, 8750 .write = trace_options_write, 8751 .llseek = generic_file_llseek, 8752 .release = tracing_release_options, 8753 }; 8754 8755 /* 8756 * In order to pass in both the trace_array descriptor as well as the index 8757 * to the flag that the trace option file represents, the trace_array 8758 * has a character array of trace_flags_index[], which holds the index 8759 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc. 8760 * The address of this character array is passed to the flag option file 8761 * read/write callbacks. 8762 * 8763 * In order to extract both the index and the trace_array descriptor, 8764 * get_tr_index() uses the following algorithm. 8765 * 8766 * idx = *ptr; 8767 * 8768 * As the pointer itself contains the address of the index (remember 8769 * index[1] == 1). 8770 * 8771 * Then to get the trace_array descriptor, by subtracting that index 8772 * from the ptr, we get to the start of the index itself. 8773 * 8774 * ptr - idx == &index[0] 8775 * 8776 * Then a simple container_of() from that pointer gets us to the 8777 * trace_array descriptor. 8778 */ 8779 static void get_tr_index(void *data, struct trace_array **ptr, 8780 unsigned int *pindex) 8781 { 8782 *pindex = *(unsigned char *)data; 8783 8784 *ptr = container_of(data - *pindex, struct trace_array, 8785 trace_flags_index); 8786 } 8787 8788 static ssize_t 8789 trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt, 8790 loff_t *ppos) 8791 { 8792 void *tr_index = filp->private_data; 8793 struct trace_array *tr; 8794 unsigned int index; 8795 char *buf; 8796 8797 get_tr_index(tr_index, &tr, &index); 8798 8799 if (tr->trace_flags & (1 << index)) 8800 buf = "1\n"; 8801 else 8802 buf = "0\n"; 8803 8804 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2); 8805 } 8806 8807 static ssize_t 8808 trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt, 8809 loff_t *ppos) 8810 { 8811 void *tr_index = filp->private_data; 8812 struct trace_array *tr; 8813 unsigned int index; 8814 unsigned long val; 8815 int ret; 8816 8817 get_tr_index(tr_index, &tr, &index); 8818 8819 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 8820 if (ret) 8821 return ret; 8822 8823 if (val != 0 && val != 1) 8824 return -EINVAL; 8825 8826 mutex_lock(&event_mutex); 8827 mutex_lock(&trace_types_lock); 8828 ret = set_tracer_flag(tr, 1 << index, val); 8829 mutex_unlock(&trace_types_lock); 8830 mutex_unlock(&event_mutex); 8831 8832 if (ret < 0) 8833 return ret; 8834 8835 *ppos += cnt; 8836 8837 return cnt; 8838 } 8839 8840 static const struct file_operations trace_options_core_fops = { 8841 .open = tracing_open_generic, 8842 .read = trace_options_core_read, 8843 .write = trace_options_core_write, 8844 .llseek = generic_file_llseek, 8845 }; 8846 8847 struct dentry *trace_create_file(const char *name, 8848 umode_t mode, 8849 struct dentry *parent, 8850 void *data, 8851 const struct file_operations *fops) 8852 { 8853 struct dentry *ret; 8854 8855 ret = tracefs_create_file(name, mode, parent, data, fops); 8856 if (!ret) 8857 pr_warn("Could not create tracefs '%s' entry\n", name); 8858 8859 return ret; 8860 } 8861 8862 8863 static struct dentry *trace_options_init_dentry(struct trace_array *tr) 8864 { 8865 struct dentry *d_tracer; 8866 8867 if (tr->options) 8868 return tr->options; 8869 8870 d_tracer = tracing_get_dentry(tr); 8871 if (IS_ERR(d_tracer)) 8872 return NULL; 8873 8874 tr->options = tracefs_create_dir("options", d_tracer); 8875 if (!tr->options) { 8876 pr_warn("Could not create tracefs directory 'options'\n"); 8877 return NULL; 8878 } 8879 8880 return tr->options; 8881 } 8882 8883 static void 8884 create_trace_option_file(struct trace_array *tr, 8885 struct trace_option_dentry *topt, 8886 struct tracer_flags *flags, 8887 struct tracer_opt *opt) 8888 { 8889 struct dentry *t_options; 8890 8891 t_options = trace_options_init_dentry(tr); 8892 if (!t_options) 8893 return; 8894 8895 topt->flags = flags; 8896 topt->opt = opt; 8897 topt->tr = tr; 8898 8899 topt->entry = trace_create_file(opt->name, TRACE_MODE_WRITE, 8900 t_options, topt, &trace_options_fops); 8901 8902 } 8903 8904 static void 8905 create_trace_option_files(struct trace_array *tr, struct tracer *tracer) 8906 { 8907 struct trace_option_dentry *topts; 8908 struct trace_options *tr_topts; 8909 struct tracer_flags *flags; 8910 struct tracer_opt *opts; 8911 int cnt; 8912 int i; 8913 8914 if (!tracer) 8915 return; 8916 8917 flags = tracer->flags; 8918 8919 if (!flags || !flags->opts) 8920 return; 8921 8922 /* 8923 * If this is an instance, only create flags for tracers 8924 * the instance may have. 8925 */ 8926 if (!trace_ok_for_array(tracer, tr)) 8927 return; 8928 8929 for (i = 0; i < tr->nr_topts; i++) { 8930 /* Make sure there's no duplicate flags. */ 8931 if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags)) 8932 return; 8933 } 8934 8935 opts = flags->opts; 8936 8937 for (cnt = 0; opts[cnt].name; cnt++) 8938 ; 8939 8940 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL); 8941 if (!topts) 8942 return; 8943 8944 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1), 8945 GFP_KERNEL); 8946 if (!tr_topts) { 8947 kfree(topts); 8948 return; 8949 } 8950 8951 tr->topts = tr_topts; 8952 tr->topts[tr->nr_topts].tracer = tracer; 8953 tr->topts[tr->nr_topts].topts = topts; 8954 tr->nr_topts++; 8955 8956 for (cnt = 0; opts[cnt].name; cnt++) { 8957 create_trace_option_file(tr, &topts[cnt], flags, 8958 &opts[cnt]); 8959 MEM_FAIL(topts[cnt].entry == NULL, 8960 "Failed to create trace option: %s", 8961 opts[cnt].name); 8962 } 8963 } 8964 8965 static struct dentry * 8966 create_trace_option_core_file(struct trace_array *tr, 8967 const char *option, long index) 8968 { 8969 struct dentry *t_options; 8970 8971 t_options = trace_options_init_dentry(tr); 8972 if (!t_options) 8973 return NULL; 8974 8975 return trace_create_file(option, TRACE_MODE_WRITE, t_options, 8976 (void *)&tr->trace_flags_index[index], 8977 &trace_options_core_fops); 8978 } 8979 8980 static void create_trace_options_dir(struct trace_array *tr) 8981 { 8982 struct dentry *t_options; 8983 bool top_level = tr == &global_trace; 8984 int i; 8985 8986 t_options = trace_options_init_dentry(tr); 8987 if (!t_options) 8988 return; 8989 8990 for (i = 0; trace_options[i]; i++) { 8991 if (top_level || 8992 !((1 << i) & TOP_LEVEL_TRACE_FLAGS)) 8993 create_trace_option_core_file(tr, trace_options[i], i); 8994 } 8995 } 8996 8997 static ssize_t 8998 rb_simple_read(struct file *filp, char __user *ubuf, 8999 size_t cnt, loff_t *ppos) 9000 { 9001 struct trace_array *tr = filp->private_data; 9002 char buf[64]; 9003 int r; 9004 9005 r = tracer_tracing_is_on(tr); 9006 r = sprintf(buf, "%d\n", r); 9007 9008 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 9009 } 9010 9011 static ssize_t 9012 rb_simple_write(struct file *filp, const char __user *ubuf, 9013 size_t cnt, loff_t *ppos) 9014 { 9015 struct trace_array *tr = filp->private_data; 9016 struct trace_buffer *buffer = tr->array_buffer.buffer; 9017 unsigned long val; 9018 int ret; 9019 9020 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 9021 if (ret) 9022 return ret; 9023 9024 if (buffer) { 9025 mutex_lock(&trace_types_lock); 9026 if (!!val == tracer_tracing_is_on(tr)) { 9027 val = 0; /* do nothing */ 9028 } else if (val) { 9029 tracer_tracing_on(tr); 9030 if (tr->current_trace->start) 9031 tr->current_trace->start(tr); 9032 } else { 9033 tracer_tracing_off(tr); 9034 if (tr->current_trace->stop) 9035 tr->current_trace->stop(tr); 9036 /* Wake up any waiters */ 9037 ring_buffer_wake_waiters(buffer, RING_BUFFER_ALL_CPUS); 9038 } 9039 mutex_unlock(&trace_types_lock); 9040 } 9041 9042 (*ppos)++; 9043 9044 return cnt; 9045 } 9046 9047 static const struct file_operations rb_simple_fops = { 9048 .open = tracing_open_generic_tr, 9049 .read = rb_simple_read, 9050 .write = rb_simple_write, 9051 .release = tracing_release_generic_tr, 9052 .llseek = default_llseek, 9053 }; 9054 9055 static ssize_t 9056 buffer_percent_read(struct file *filp, char __user *ubuf, 9057 size_t cnt, loff_t *ppos) 9058 { 9059 struct trace_array *tr = filp->private_data; 9060 char buf[64]; 9061 int r; 9062 9063 r = tr->buffer_percent; 9064 r = sprintf(buf, "%d\n", r); 9065 9066 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 9067 } 9068 9069 static ssize_t 9070 buffer_percent_write(struct file *filp, const char __user *ubuf, 9071 size_t cnt, loff_t *ppos) 9072 { 9073 struct trace_array *tr = filp->private_data; 9074 unsigned long val; 9075 int ret; 9076 9077 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 9078 if (ret) 9079 return ret; 9080 9081 if (val > 100) 9082 return -EINVAL; 9083 9084 tr->buffer_percent = val; 9085 9086 (*ppos)++; 9087 9088 return cnt; 9089 } 9090 9091 static const struct file_operations buffer_percent_fops = { 9092 .open = tracing_open_generic_tr, 9093 .read = buffer_percent_read, 9094 .write = buffer_percent_write, 9095 .release = tracing_release_generic_tr, 9096 .llseek = default_llseek, 9097 }; 9098 9099 static ssize_t 9100 buffer_subbuf_size_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) 9101 { 9102 struct trace_array *tr = filp->private_data; 9103 size_t size; 9104 char buf[64]; 9105 int order; 9106 int r; 9107 9108 order = ring_buffer_subbuf_order_get(tr->array_buffer.buffer); 9109 size = (PAGE_SIZE << order) / 1024; 9110 9111 r = sprintf(buf, "%zd\n", size); 9112 9113 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 9114 } 9115 9116 static ssize_t 9117 buffer_subbuf_size_write(struct file *filp, const char __user *ubuf, 9118 size_t cnt, loff_t *ppos) 9119 { 9120 struct trace_array *tr = filp->private_data; 9121 unsigned long val; 9122 int old_order; 9123 int order; 9124 int pages; 9125 int ret; 9126 9127 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 9128 if (ret) 9129 return ret; 9130 9131 val *= 1024; /* value passed in is in KB */ 9132 9133 pages = DIV_ROUND_UP(val, PAGE_SIZE); 9134 order = fls(pages - 1); 9135 9136 /* limit between 1 and 128 system pages */ 9137 if (order < 0 || order > 7) 9138 return -EINVAL; 9139 9140 /* Do not allow tracing while changing the order of the ring buffer */ 9141 tracing_stop_tr(tr); 9142 9143 old_order = ring_buffer_subbuf_order_get(tr->array_buffer.buffer); 9144 if (old_order == order) 9145 goto out; 9146 9147 ret = ring_buffer_subbuf_order_set(tr->array_buffer.buffer, order); 9148 if (ret) 9149 goto out; 9150 9151 #ifdef CONFIG_TRACER_MAX_TRACE 9152 9153 if (!tr->allocated_snapshot) 9154 goto out_max; 9155 9156 ret = ring_buffer_subbuf_order_set(tr->max_buffer.buffer, order); 9157 if (ret) { 9158 /* Put back the old order */ 9159 cnt = ring_buffer_subbuf_order_set(tr->array_buffer.buffer, old_order); 9160 if (WARN_ON_ONCE(cnt)) { 9161 /* 9162 * AARGH! We are left with different orders! 9163 * The max buffer is our "snapshot" buffer. 9164 * When a tracer needs a snapshot (one of the 9165 * latency tracers), it swaps the max buffer 9166 * with the saved snap shot. We succeeded to 9167 * update the order of the main buffer, but failed to 9168 * update the order of the max buffer. But when we tried 9169 * to reset the main buffer to the original size, we 9170 * failed there too. This is very unlikely to 9171 * happen, but if it does, warn and kill all 9172 * tracing. 9173 */ 9174 tracing_disabled = 1; 9175 } 9176 goto out; 9177 } 9178 out_max: 9179 #endif 9180 (*ppos)++; 9181 out: 9182 if (ret) 9183 cnt = ret; 9184 tracing_start_tr(tr); 9185 return cnt; 9186 } 9187 9188 static const struct file_operations buffer_subbuf_size_fops = { 9189 .open = tracing_open_generic_tr, 9190 .read = buffer_subbuf_size_read, 9191 .write = buffer_subbuf_size_write, 9192 .release = tracing_release_generic_tr, 9193 .llseek = default_llseek, 9194 }; 9195 9196 static struct dentry *trace_instance_dir; 9197 9198 static void 9199 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer); 9200 9201 static int 9202 allocate_trace_buffer(struct trace_array *tr, struct array_buffer *buf, int size) 9203 { 9204 enum ring_buffer_flags rb_flags; 9205 9206 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0; 9207 9208 buf->tr = tr; 9209 9210 if (tr->range_addr_start && tr->range_addr_size) { 9211 buf->buffer = ring_buffer_alloc_range(size, rb_flags, 0, 9212 tr->range_addr_start, 9213 tr->range_addr_size); 9214 9215 ring_buffer_last_boot_delta(buf->buffer, 9216 &tr->text_delta, &tr->data_delta); 9217 /* 9218 * This is basically the same as a mapped buffer, 9219 * with the same restrictions. 9220 */ 9221 tr->mapped++; 9222 } else { 9223 buf->buffer = ring_buffer_alloc(size, rb_flags); 9224 } 9225 if (!buf->buffer) 9226 return -ENOMEM; 9227 9228 buf->data = alloc_percpu(struct trace_array_cpu); 9229 if (!buf->data) { 9230 ring_buffer_free(buf->buffer); 9231 buf->buffer = NULL; 9232 return -ENOMEM; 9233 } 9234 9235 /* Allocate the first page for all buffers */ 9236 set_buffer_entries(&tr->array_buffer, 9237 ring_buffer_size(tr->array_buffer.buffer, 0)); 9238 9239 return 0; 9240 } 9241 9242 static void free_trace_buffer(struct array_buffer *buf) 9243 { 9244 if (buf->buffer) { 9245 ring_buffer_free(buf->buffer); 9246 buf->buffer = NULL; 9247 free_percpu(buf->data); 9248 buf->data = NULL; 9249 } 9250 } 9251 9252 static int allocate_trace_buffers(struct trace_array *tr, int size) 9253 { 9254 int ret; 9255 9256 ret = allocate_trace_buffer(tr, &tr->array_buffer, size); 9257 if (ret) 9258 return ret; 9259 9260 #ifdef CONFIG_TRACER_MAX_TRACE 9261 /* Fix mapped buffer trace arrays do not have snapshot buffers */ 9262 if (tr->range_addr_start) 9263 return 0; 9264 9265 ret = allocate_trace_buffer(tr, &tr->max_buffer, 9266 allocate_snapshot ? size : 1); 9267 if (MEM_FAIL(ret, "Failed to allocate trace buffer\n")) { 9268 free_trace_buffer(&tr->array_buffer); 9269 return -ENOMEM; 9270 } 9271 tr->allocated_snapshot = allocate_snapshot; 9272 9273 allocate_snapshot = false; 9274 #endif 9275 9276 return 0; 9277 } 9278 9279 static void free_trace_buffers(struct trace_array *tr) 9280 { 9281 if (!tr) 9282 return; 9283 9284 free_trace_buffer(&tr->array_buffer); 9285 9286 #ifdef CONFIG_TRACER_MAX_TRACE 9287 free_trace_buffer(&tr->max_buffer); 9288 #endif 9289 } 9290 9291 static void init_trace_flags_index(struct trace_array *tr) 9292 { 9293 int i; 9294 9295 /* Used by the trace options files */ 9296 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) 9297 tr->trace_flags_index[i] = i; 9298 } 9299 9300 static void __update_tracer_options(struct trace_array *tr) 9301 { 9302 struct tracer *t; 9303 9304 for (t = trace_types; t; t = t->next) 9305 add_tracer_options(tr, t); 9306 } 9307 9308 static void update_tracer_options(struct trace_array *tr) 9309 { 9310 mutex_lock(&trace_types_lock); 9311 tracer_options_updated = true; 9312 __update_tracer_options(tr); 9313 mutex_unlock(&trace_types_lock); 9314 } 9315 9316 /* Must have trace_types_lock held */ 9317 struct trace_array *trace_array_find(const char *instance) 9318 { 9319 struct trace_array *tr, *found = NULL; 9320 9321 list_for_each_entry(tr, &ftrace_trace_arrays, list) { 9322 if (tr->name && strcmp(tr->name, instance) == 0) { 9323 found = tr; 9324 break; 9325 } 9326 } 9327 9328 return found; 9329 } 9330 9331 struct trace_array *trace_array_find_get(const char *instance) 9332 { 9333 struct trace_array *tr; 9334 9335 mutex_lock(&trace_types_lock); 9336 tr = trace_array_find(instance); 9337 if (tr) 9338 tr->ref++; 9339 mutex_unlock(&trace_types_lock); 9340 9341 return tr; 9342 } 9343 9344 static int trace_array_create_dir(struct trace_array *tr) 9345 { 9346 int ret; 9347 9348 tr->dir = tracefs_create_dir(tr->name, trace_instance_dir); 9349 if (!tr->dir) 9350 return -EINVAL; 9351 9352 ret = event_trace_add_tracer(tr->dir, tr); 9353 if (ret) { 9354 tracefs_remove(tr->dir); 9355 return ret; 9356 } 9357 9358 init_tracer_tracefs(tr, tr->dir); 9359 __update_tracer_options(tr); 9360 9361 return ret; 9362 } 9363 9364 static struct trace_array * 9365 trace_array_create_systems(const char *name, const char *systems, 9366 unsigned long range_addr_start, 9367 unsigned long range_addr_size) 9368 { 9369 struct trace_array *tr; 9370 int ret; 9371 9372 ret = -ENOMEM; 9373 tr = kzalloc(sizeof(*tr), GFP_KERNEL); 9374 if (!tr) 9375 return ERR_PTR(ret); 9376 9377 tr->name = kstrdup(name, GFP_KERNEL); 9378 if (!tr->name) 9379 goto out_free_tr; 9380 9381 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL)) 9382 goto out_free_tr; 9383 9384 if (!zalloc_cpumask_var(&tr->pipe_cpumask, GFP_KERNEL)) 9385 goto out_free_tr; 9386 9387 if (systems) { 9388 tr->system_names = kstrdup_const(systems, GFP_KERNEL); 9389 if (!tr->system_names) 9390 goto out_free_tr; 9391 } 9392 9393 /* Only for boot up memory mapped ring buffers */ 9394 tr->range_addr_start = range_addr_start; 9395 tr->range_addr_size = range_addr_size; 9396 9397 tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS; 9398 9399 cpumask_copy(tr->tracing_cpumask, cpu_all_mask); 9400 9401 raw_spin_lock_init(&tr->start_lock); 9402 9403 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; 9404 #ifdef CONFIG_TRACER_MAX_TRACE 9405 spin_lock_init(&tr->snapshot_trigger_lock); 9406 #endif 9407 tr->current_trace = &nop_trace; 9408 9409 INIT_LIST_HEAD(&tr->systems); 9410 INIT_LIST_HEAD(&tr->events); 9411 INIT_LIST_HEAD(&tr->hist_vars); 9412 INIT_LIST_HEAD(&tr->err_log); 9413 9414 #ifdef CONFIG_MODULES 9415 INIT_LIST_HEAD(&tr->mod_events); 9416 #endif 9417 9418 if (allocate_trace_buffers(tr, trace_buf_size) < 0) 9419 goto out_free_tr; 9420 9421 /* The ring buffer is defaultly expanded */ 9422 trace_set_ring_buffer_expanded(tr); 9423 9424 if (ftrace_allocate_ftrace_ops(tr) < 0) 9425 goto out_free_tr; 9426 9427 ftrace_init_trace_array(tr); 9428 9429 init_trace_flags_index(tr); 9430 9431 if (trace_instance_dir) { 9432 ret = trace_array_create_dir(tr); 9433 if (ret) 9434 goto out_free_tr; 9435 } else 9436 __trace_early_add_events(tr); 9437 9438 list_add(&tr->list, &ftrace_trace_arrays); 9439 9440 tr->ref++; 9441 9442 return tr; 9443 9444 out_free_tr: 9445 ftrace_free_ftrace_ops(tr); 9446 free_trace_buffers(tr); 9447 free_cpumask_var(tr->pipe_cpumask); 9448 free_cpumask_var(tr->tracing_cpumask); 9449 kfree_const(tr->system_names); 9450 kfree(tr->name); 9451 kfree(tr); 9452 9453 return ERR_PTR(ret); 9454 } 9455 9456 static struct trace_array *trace_array_create(const char *name) 9457 { 9458 return trace_array_create_systems(name, NULL, 0, 0); 9459 } 9460 9461 static int instance_mkdir(const char *name) 9462 { 9463 struct trace_array *tr; 9464 int ret; 9465 9466 guard(mutex)(&event_mutex); 9467 guard(mutex)(&trace_types_lock); 9468 9469 ret = -EEXIST; 9470 if (trace_array_find(name)) 9471 return -EEXIST; 9472 9473 tr = trace_array_create(name); 9474 9475 ret = PTR_ERR_OR_ZERO(tr); 9476 9477 return ret; 9478 } 9479 9480 static u64 map_pages(u64 start, u64 size) 9481 { 9482 struct page **pages; 9483 phys_addr_t page_start; 9484 unsigned int page_count; 9485 unsigned int i; 9486 void *vaddr; 9487 9488 page_count = DIV_ROUND_UP(size, PAGE_SIZE); 9489 9490 page_start = start; 9491 pages = kmalloc_array(page_count, sizeof(struct page *), GFP_KERNEL); 9492 if (!pages) 9493 return 0; 9494 9495 for (i = 0; i < page_count; i++) { 9496 phys_addr_t addr = page_start + i * PAGE_SIZE; 9497 pages[i] = pfn_to_page(addr >> PAGE_SHIFT); 9498 } 9499 vaddr = vmap(pages, page_count, VM_MAP, PAGE_KERNEL); 9500 kfree(pages); 9501 9502 return (u64)(unsigned long)vaddr; 9503 } 9504 9505 /** 9506 * trace_array_get_by_name - Create/Lookup a trace array, given its name. 9507 * @name: The name of the trace array to be looked up/created. 9508 * @systems: A list of systems to create event directories for (NULL for all) 9509 * 9510 * Returns pointer to trace array with given name. 9511 * NULL, if it cannot be created. 9512 * 9513 * NOTE: This function increments the reference counter associated with the 9514 * trace array returned. This makes sure it cannot be freed while in use. 9515 * Use trace_array_put() once the trace array is no longer needed. 9516 * If the trace_array is to be freed, trace_array_destroy() needs to 9517 * be called after the trace_array_put(), or simply let user space delete 9518 * it from the tracefs instances directory. But until the 9519 * trace_array_put() is called, user space can not delete it. 9520 * 9521 */ 9522 struct trace_array *trace_array_get_by_name(const char *name, const char *systems) 9523 { 9524 struct trace_array *tr; 9525 9526 guard(mutex)(&event_mutex); 9527 guard(mutex)(&trace_types_lock); 9528 9529 list_for_each_entry(tr, &ftrace_trace_arrays, list) { 9530 if (tr->name && strcmp(tr->name, name) == 0) { 9531 tr->ref++; 9532 return tr; 9533 } 9534 } 9535 9536 tr = trace_array_create_systems(name, systems, 0, 0); 9537 9538 if (IS_ERR(tr)) 9539 tr = NULL; 9540 else 9541 tr->ref++; 9542 9543 return tr; 9544 } 9545 EXPORT_SYMBOL_GPL(trace_array_get_by_name); 9546 9547 static int __remove_instance(struct trace_array *tr) 9548 { 9549 int i; 9550 9551 /* Reference counter for a newly created trace array = 1. */ 9552 if (tr->ref > 1 || (tr->current_trace && tr->trace_ref)) 9553 return -EBUSY; 9554 9555 list_del(&tr->list); 9556 9557 /* Disable all the flags that were enabled coming in */ 9558 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) { 9559 if ((1 << i) & ZEROED_TRACE_FLAGS) 9560 set_tracer_flag(tr, 1 << i, 0); 9561 } 9562 9563 if (printk_trace == tr) 9564 update_printk_trace(&global_trace); 9565 9566 tracing_set_nop(tr); 9567 clear_ftrace_function_probes(tr); 9568 event_trace_del_tracer(tr); 9569 ftrace_clear_pids(tr); 9570 ftrace_destroy_function_files(tr); 9571 tracefs_remove(tr->dir); 9572 free_percpu(tr->last_func_repeats); 9573 free_trace_buffers(tr); 9574 clear_tracing_err_log(tr); 9575 9576 for (i = 0; i < tr->nr_topts; i++) { 9577 kfree(tr->topts[i].topts); 9578 } 9579 kfree(tr->topts); 9580 9581 free_cpumask_var(tr->pipe_cpumask); 9582 free_cpumask_var(tr->tracing_cpumask); 9583 kfree_const(tr->system_names); 9584 kfree(tr->name); 9585 kfree(tr); 9586 9587 return 0; 9588 } 9589 9590 int trace_array_destroy(struct trace_array *this_tr) 9591 { 9592 struct trace_array *tr; 9593 9594 if (!this_tr) 9595 return -EINVAL; 9596 9597 guard(mutex)(&event_mutex); 9598 guard(mutex)(&trace_types_lock); 9599 9600 9601 /* Making sure trace array exists before destroying it. */ 9602 list_for_each_entry(tr, &ftrace_trace_arrays, list) { 9603 if (tr == this_tr) 9604 return __remove_instance(tr); 9605 } 9606 9607 return -ENODEV; 9608 } 9609 EXPORT_SYMBOL_GPL(trace_array_destroy); 9610 9611 static int instance_rmdir(const char *name) 9612 { 9613 struct trace_array *tr; 9614 9615 guard(mutex)(&event_mutex); 9616 guard(mutex)(&trace_types_lock); 9617 9618 tr = trace_array_find(name); 9619 if (!tr) 9620 return -ENODEV; 9621 9622 return __remove_instance(tr); 9623 } 9624 9625 static __init void create_trace_instances(struct dentry *d_tracer) 9626 { 9627 struct trace_array *tr; 9628 9629 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer, 9630 instance_mkdir, 9631 instance_rmdir); 9632 if (MEM_FAIL(!trace_instance_dir, "Failed to create instances directory\n")) 9633 return; 9634 9635 guard(mutex)(&event_mutex); 9636 guard(mutex)(&trace_types_lock); 9637 9638 list_for_each_entry(tr, &ftrace_trace_arrays, list) { 9639 if (!tr->name) 9640 continue; 9641 if (MEM_FAIL(trace_array_create_dir(tr) < 0, 9642 "Failed to create instance directory\n")) 9643 return; 9644 } 9645 } 9646 9647 static void 9648 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer) 9649 { 9650 int cpu; 9651 9652 trace_create_file("available_tracers", TRACE_MODE_READ, d_tracer, 9653 tr, &show_traces_fops); 9654 9655 trace_create_file("current_tracer", TRACE_MODE_WRITE, d_tracer, 9656 tr, &set_tracer_fops); 9657 9658 trace_create_file("tracing_cpumask", TRACE_MODE_WRITE, d_tracer, 9659 tr, &tracing_cpumask_fops); 9660 9661 trace_create_file("trace_options", TRACE_MODE_WRITE, d_tracer, 9662 tr, &tracing_iter_fops); 9663 9664 trace_create_file("trace", TRACE_MODE_WRITE, d_tracer, 9665 tr, &tracing_fops); 9666 9667 trace_create_file("trace_pipe", TRACE_MODE_READ, d_tracer, 9668 tr, &tracing_pipe_fops); 9669 9670 trace_create_file("buffer_size_kb", TRACE_MODE_WRITE, d_tracer, 9671 tr, &tracing_entries_fops); 9672 9673 trace_create_file("buffer_total_size_kb", TRACE_MODE_READ, d_tracer, 9674 tr, &tracing_total_entries_fops); 9675 9676 trace_create_file("free_buffer", 0200, d_tracer, 9677 tr, &tracing_free_buffer_fops); 9678 9679 trace_create_file("trace_marker", 0220, d_tracer, 9680 tr, &tracing_mark_fops); 9681 9682 tr->trace_marker_file = __find_event_file(tr, "ftrace", "print"); 9683 9684 trace_create_file("trace_marker_raw", 0220, d_tracer, 9685 tr, &tracing_mark_raw_fops); 9686 9687 trace_create_file("trace_clock", TRACE_MODE_WRITE, d_tracer, tr, 9688 &trace_clock_fops); 9689 9690 trace_create_file("tracing_on", TRACE_MODE_WRITE, d_tracer, 9691 tr, &rb_simple_fops); 9692 9693 trace_create_file("timestamp_mode", TRACE_MODE_READ, d_tracer, tr, 9694 &trace_time_stamp_mode_fops); 9695 9696 tr->buffer_percent = 50; 9697 9698 trace_create_file("buffer_percent", TRACE_MODE_WRITE, d_tracer, 9699 tr, &buffer_percent_fops); 9700 9701 trace_create_file("buffer_subbuf_size_kb", TRACE_MODE_WRITE, d_tracer, 9702 tr, &buffer_subbuf_size_fops); 9703 9704 create_trace_options_dir(tr); 9705 9706 #ifdef CONFIG_TRACER_MAX_TRACE 9707 trace_create_maxlat_file(tr, d_tracer); 9708 #endif 9709 9710 if (ftrace_create_function_files(tr, d_tracer)) 9711 MEM_FAIL(1, "Could not allocate function filter files"); 9712 9713 if (tr->range_addr_start) { 9714 trace_create_file("last_boot_info", TRACE_MODE_READ, d_tracer, 9715 tr, &last_boot_fops); 9716 #ifdef CONFIG_TRACER_SNAPSHOT 9717 } else { 9718 trace_create_file("snapshot", TRACE_MODE_WRITE, d_tracer, 9719 tr, &snapshot_fops); 9720 #endif 9721 } 9722 9723 trace_create_file("error_log", TRACE_MODE_WRITE, d_tracer, 9724 tr, &tracing_err_log_fops); 9725 9726 for_each_tracing_cpu(cpu) 9727 tracing_init_tracefs_percpu(tr, cpu); 9728 9729 ftrace_init_tracefs(tr, d_tracer); 9730 } 9731 9732 static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore) 9733 { 9734 struct vfsmount *mnt; 9735 struct file_system_type *type; 9736 9737 /* 9738 * To maintain backward compatibility for tools that mount 9739 * debugfs to get to the tracing facility, tracefs is automatically 9740 * mounted to the debugfs/tracing directory. 9741 */ 9742 type = get_fs_type("tracefs"); 9743 if (!type) 9744 return NULL; 9745 mnt = vfs_submount(mntpt, type, "tracefs", NULL); 9746 put_filesystem(type); 9747 if (IS_ERR(mnt)) 9748 return NULL; 9749 mntget(mnt); 9750 9751 return mnt; 9752 } 9753 9754 /** 9755 * tracing_init_dentry - initialize top level trace array 9756 * 9757 * This is called when creating files or directories in the tracing 9758 * directory. It is called via fs_initcall() by any of the boot up code 9759 * and expects to return the dentry of the top level tracing directory. 9760 */ 9761 int tracing_init_dentry(void) 9762 { 9763 struct trace_array *tr = &global_trace; 9764 9765 if (security_locked_down(LOCKDOWN_TRACEFS)) { 9766 pr_warn("Tracing disabled due to lockdown\n"); 9767 return -EPERM; 9768 } 9769 9770 /* The top level trace array uses NULL as parent */ 9771 if (tr->dir) 9772 return 0; 9773 9774 if (WARN_ON(!tracefs_initialized())) 9775 return -ENODEV; 9776 9777 /* 9778 * As there may still be users that expect the tracing 9779 * files to exist in debugfs/tracing, we must automount 9780 * the tracefs file system there, so older tools still 9781 * work with the newer kernel. 9782 */ 9783 tr->dir = debugfs_create_automount("tracing", NULL, 9784 trace_automount, NULL); 9785 9786 return 0; 9787 } 9788 9789 extern struct trace_eval_map *__start_ftrace_eval_maps[]; 9790 extern struct trace_eval_map *__stop_ftrace_eval_maps[]; 9791 9792 static struct workqueue_struct *eval_map_wq __initdata; 9793 static struct work_struct eval_map_work __initdata; 9794 static struct work_struct tracerfs_init_work __initdata; 9795 9796 static void __init eval_map_work_func(struct work_struct *work) 9797 { 9798 int len; 9799 9800 len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps; 9801 trace_insert_eval_map(NULL, __start_ftrace_eval_maps, len); 9802 } 9803 9804 static int __init trace_eval_init(void) 9805 { 9806 INIT_WORK(&eval_map_work, eval_map_work_func); 9807 9808 eval_map_wq = alloc_workqueue("eval_map_wq", WQ_UNBOUND, 0); 9809 if (!eval_map_wq) { 9810 pr_err("Unable to allocate eval_map_wq\n"); 9811 /* Do work here */ 9812 eval_map_work_func(&eval_map_work); 9813 return -ENOMEM; 9814 } 9815 9816 queue_work(eval_map_wq, &eval_map_work); 9817 return 0; 9818 } 9819 9820 subsys_initcall(trace_eval_init); 9821 9822 static int __init trace_eval_sync(void) 9823 { 9824 /* Make sure the eval map updates are finished */ 9825 if (eval_map_wq) 9826 destroy_workqueue(eval_map_wq); 9827 return 0; 9828 } 9829 9830 late_initcall_sync(trace_eval_sync); 9831 9832 9833 #ifdef CONFIG_MODULES 9834 9835 bool module_exists(const char *module) 9836 { 9837 /* All modules have the symbol __this_module */ 9838 static const char this_mod[] = "__this_module"; 9839 char modname[MAX_PARAM_PREFIX_LEN + sizeof(this_mod) + 2]; 9840 unsigned long val; 9841 int n; 9842 9843 n = snprintf(modname, sizeof(modname), "%s:%s", module, this_mod); 9844 9845 if (n > sizeof(modname) - 1) 9846 return false; 9847 9848 val = module_kallsyms_lookup_name(modname); 9849 return val != 0; 9850 } 9851 9852 static void trace_module_add_evals(struct module *mod) 9853 { 9854 if (!mod->num_trace_evals) 9855 return; 9856 9857 /* 9858 * Modules with bad taint do not have events created, do 9859 * not bother with enums either. 9860 */ 9861 if (trace_module_has_bad_taint(mod)) 9862 return; 9863 9864 trace_insert_eval_map(mod, mod->trace_evals, mod->num_trace_evals); 9865 } 9866 9867 #ifdef CONFIG_TRACE_EVAL_MAP_FILE 9868 static void trace_module_remove_evals(struct module *mod) 9869 { 9870 union trace_eval_map_item *map; 9871 union trace_eval_map_item **last = &trace_eval_maps; 9872 9873 if (!mod->num_trace_evals) 9874 return; 9875 9876 guard(mutex)(&trace_eval_mutex); 9877 9878 map = trace_eval_maps; 9879 9880 while (map) { 9881 if (map->head.mod == mod) 9882 break; 9883 map = trace_eval_jmp_to_tail(map); 9884 last = &map->tail.next; 9885 map = map->tail.next; 9886 } 9887 if (!map) 9888 return; 9889 9890 *last = trace_eval_jmp_to_tail(map)->tail.next; 9891 kfree(map); 9892 } 9893 #else 9894 static inline void trace_module_remove_evals(struct module *mod) { } 9895 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */ 9896 9897 static int trace_module_notify(struct notifier_block *self, 9898 unsigned long val, void *data) 9899 { 9900 struct module *mod = data; 9901 9902 switch (val) { 9903 case MODULE_STATE_COMING: 9904 trace_module_add_evals(mod); 9905 break; 9906 case MODULE_STATE_GOING: 9907 trace_module_remove_evals(mod); 9908 break; 9909 } 9910 9911 return NOTIFY_OK; 9912 } 9913 9914 static struct notifier_block trace_module_nb = { 9915 .notifier_call = trace_module_notify, 9916 .priority = 0, 9917 }; 9918 #endif /* CONFIG_MODULES */ 9919 9920 static __init void tracer_init_tracefs_work_func(struct work_struct *work) 9921 { 9922 9923 event_trace_init(); 9924 9925 init_tracer_tracefs(&global_trace, NULL); 9926 ftrace_init_tracefs_toplevel(&global_trace, NULL); 9927 9928 trace_create_file("tracing_thresh", TRACE_MODE_WRITE, NULL, 9929 &global_trace, &tracing_thresh_fops); 9930 9931 trace_create_file("README", TRACE_MODE_READ, NULL, 9932 NULL, &tracing_readme_fops); 9933 9934 trace_create_file("saved_cmdlines", TRACE_MODE_READ, NULL, 9935 NULL, &tracing_saved_cmdlines_fops); 9936 9937 trace_create_file("saved_cmdlines_size", TRACE_MODE_WRITE, NULL, 9938 NULL, &tracing_saved_cmdlines_size_fops); 9939 9940 trace_create_file("saved_tgids", TRACE_MODE_READ, NULL, 9941 NULL, &tracing_saved_tgids_fops); 9942 9943 trace_create_eval_file(NULL); 9944 9945 #ifdef CONFIG_MODULES 9946 register_module_notifier(&trace_module_nb); 9947 #endif 9948 9949 #ifdef CONFIG_DYNAMIC_FTRACE 9950 trace_create_file("dyn_ftrace_total_info", TRACE_MODE_READ, NULL, 9951 NULL, &tracing_dyn_info_fops); 9952 #endif 9953 9954 create_trace_instances(NULL); 9955 9956 update_tracer_options(&global_trace); 9957 } 9958 9959 static __init int tracer_init_tracefs(void) 9960 { 9961 int ret; 9962 9963 trace_access_lock_init(); 9964 9965 ret = tracing_init_dentry(); 9966 if (ret) 9967 return 0; 9968 9969 if (eval_map_wq) { 9970 INIT_WORK(&tracerfs_init_work, tracer_init_tracefs_work_func); 9971 queue_work(eval_map_wq, &tracerfs_init_work); 9972 } else { 9973 tracer_init_tracefs_work_func(NULL); 9974 } 9975 9976 rv_init_interface(); 9977 9978 return 0; 9979 } 9980 9981 fs_initcall(tracer_init_tracefs); 9982 9983 static int trace_die_panic_handler(struct notifier_block *self, 9984 unsigned long ev, void *unused); 9985 9986 static struct notifier_block trace_panic_notifier = { 9987 .notifier_call = trace_die_panic_handler, 9988 .priority = INT_MAX - 1, 9989 }; 9990 9991 static struct notifier_block trace_die_notifier = { 9992 .notifier_call = trace_die_panic_handler, 9993 .priority = INT_MAX - 1, 9994 }; 9995 9996 /* 9997 * The idea is to execute the following die/panic callback early, in order 9998 * to avoid showing irrelevant information in the trace (like other panic 9999 * notifier functions); we are the 2nd to run, after hung_task/rcu_stall 10000 * warnings get disabled (to prevent potential log flooding). 10001 */ 10002 static int trace_die_panic_handler(struct notifier_block *self, 10003 unsigned long ev, void *unused) 10004 { 10005 if (!ftrace_dump_on_oops_enabled()) 10006 return NOTIFY_DONE; 10007 10008 /* The die notifier requires DIE_OOPS to trigger */ 10009 if (self == &trace_die_notifier && ev != DIE_OOPS) 10010 return NOTIFY_DONE; 10011 10012 ftrace_dump(DUMP_PARAM); 10013 10014 return NOTIFY_DONE; 10015 } 10016 10017 /* 10018 * printk is set to max of 1024, we really don't need it that big. 10019 * Nothing should be printing 1000 characters anyway. 10020 */ 10021 #define TRACE_MAX_PRINT 1000 10022 10023 /* 10024 * Define here KERN_TRACE so that we have one place to modify 10025 * it if we decide to change what log level the ftrace dump 10026 * should be at. 10027 */ 10028 #define KERN_TRACE KERN_EMERG 10029 10030 void 10031 trace_printk_seq(struct trace_seq *s) 10032 { 10033 /* Probably should print a warning here. */ 10034 if (s->seq.len >= TRACE_MAX_PRINT) 10035 s->seq.len = TRACE_MAX_PRINT; 10036 10037 /* 10038 * More paranoid code. Although the buffer size is set to 10039 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just 10040 * an extra layer of protection. 10041 */ 10042 if (WARN_ON_ONCE(s->seq.len >= s->seq.size)) 10043 s->seq.len = s->seq.size - 1; 10044 10045 /* should be zero ended, but we are paranoid. */ 10046 s->buffer[s->seq.len] = 0; 10047 10048 printk(KERN_TRACE "%s", s->buffer); 10049 10050 trace_seq_init(s); 10051 } 10052 10053 static void trace_init_iter(struct trace_iterator *iter, struct trace_array *tr) 10054 { 10055 iter->tr = tr; 10056 iter->trace = iter->tr->current_trace; 10057 iter->cpu_file = RING_BUFFER_ALL_CPUS; 10058 iter->array_buffer = &tr->array_buffer; 10059 10060 if (iter->trace && iter->trace->open) 10061 iter->trace->open(iter); 10062 10063 /* Annotate start of buffers if we had overruns */ 10064 if (ring_buffer_overruns(iter->array_buffer->buffer)) 10065 iter->iter_flags |= TRACE_FILE_ANNOTATE; 10066 10067 /* Output in nanoseconds only if we are using a clock in nanoseconds. */ 10068 if (trace_clocks[iter->tr->clock_id].in_ns) 10069 iter->iter_flags |= TRACE_FILE_TIME_IN_NS; 10070 10071 /* Can not use kmalloc for iter.temp and iter.fmt */ 10072 iter->temp = static_temp_buf; 10073 iter->temp_size = STATIC_TEMP_BUF_SIZE; 10074 iter->fmt = static_fmt_buf; 10075 iter->fmt_size = STATIC_FMT_BUF_SIZE; 10076 } 10077 10078 void trace_init_global_iter(struct trace_iterator *iter) 10079 { 10080 trace_init_iter(iter, &global_trace); 10081 } 10082 10083 static void ftrace_dump_one(struct trace_array *tr, enum ftrace_dump_mode dump_mode) 10084 { 10085 /* use static because iter can be a bit big for the stack */ 10086 static struct trace_iterator iter; 10087 unsigned int old_userobj; 10088 unsigned long flags; 10089 int cnt = 0, cpu; 10090 10091 /* 10092 * Always turn off tracing when we dump. 10093 * We don't need to show trace output of what happens 10094 * between multiple crashes. 10095 * 10096 * If the user does a sysrq-z, then they can re-enable 10097 * tracing with echo 1 > tracing_on. 10098 */ 10099 tracer_tracing_off(tr); 10100 10101 local_irq_save(flags); 10102 10103 /* Simulate the iterator */ 10104 trace_init_iter(&iter, tr); 10105 10106 for_each_tracing_cpu(cpu) { 10107 atomic_inc(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled); 10108 } 10109 10110 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ; 10111 10112 /* don't look at user memory in panic mode */ 10113 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ; 10114 10115 if (dump_mode == DUMP_ORIG) 10116 iter.cpu_file = raw_smp_processor_id(); 10117 else 10118 iter.cpu_file = RING_BUFFER_ALL_CPUS; 10119 10120 if (tr == &global_trace) 10121 printk(KERN_TRACE "Dumping ftrace buffer:\n"); 10122 else 10123 printk(KERN_TRACE "Dumping ftrace instance %s buffer:\n", tr->name); 10124 10125 /* Did function tracer already get disabled? */ 10126 if (ftrace_is_dead()) { 10127 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n"); 10128 printk("# MAY BE MISSING FUNCTION EVENTS\n"); 10129 } 10130 10131 /* 10132 * We need to stop all tracing on all CPUS to read 10133 * the next buffer. This is a bit expensive, but is 10134 * not done often. We fill all what we can read, 10135 * and then release the locks again. 10136 */ 10137 10138 while (!trace_empty(&iter)) { 10139 10140 if (!cnt) 10141 printk(KERN_TRACE "---------------------------------\n"); 10142 10143 cnt++; 10144 10145 trace_iterator_reset(&iter); 10146 iter.iter_flags |= TRACE_FILE_LAT_FMT; 10147 10148 if (trace_find_next_entry_inc(&iter) != NULL) { 10149 int ret; 10150 10151 ret = print_trace_line(&iter); 10152 if (ret != TRACE_TYPE_NO_CONSUME) 10153 trace_consume(&iter); 10154 } 10155 touch_nmi_watchdog(); 10156 10157 trace_printk_seq(&iter.seq); 10158 } 10159 10160 if (!cnt) 10161 printk(KERN_TRACE " (ftrace buffer empty)\n"); 10162 else 10163 printk(KERN_TRACE "---------------------------------\n"); 10164 10165 tr->trace_flags |= old_userobj; 10166 10167 for_each_tracing_cpu(cpu) { 10168 atomic_dec(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled); 10169 } 10170 local_irq_restore(flags); 10171 } 10172 10173 static void ftrace_dump_by_param(void) 10174 { 10175 bool first_param = true; 10176 char dump_param[MAX_TRACER_SIZE]; 10177 char *buf, *token, *inst_name; 10178 struct trace_array *tr; 10179 10180 strscpy(dump_param, ftrace_dump_on_oops, MAX_TRACER_SIZE); 10181 buf = dump_param; 10182 10183 while ((token = strsep(&buf, ",")) != NULL) { 10184 if (first_param) { 10185 first_param = false; 10186 if (!strcmp("0", token)) 10187 continue; 10188 else if (!strcmp("1", token)) { 10189 ftrace_dump_one(&global_trace, DUMP_ALL); 10190 continue; 10191 } 10192 else if (!strcmp("2", token) || 10193 !strcmp("orig_cpu", token)) { 10194 ftrace_dump_one(&global_trace, DUMP_ORIG); 10195 continue; 10196 } 10197 } 10198 10199 inst_name = strsep(&token, "="); 10200 tr = trace_array_find(inst_name); 10201 if (!tr) { 10202 printk(KERN_TRACE "Instance %s not found\n", inst_name); 10203 continue; 10204 } 10205 10206 if (token && (!strcmp("2", token) || 10207 !strcmp("orig_cpu", token))) 10208 ftrace_dump_one(tr, DUMP_ORIG); 10209 else 10210 ftrace_dump_one(tr, DUMP_ALL); 10211 } 10212 } 10213 10214 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) 10215 { 10216 static atomic_t dump_running; 10217 10218 /* Only allow one dump user at a time. */ 10219 if (atomic_inc_return(&dump_running) != 1) { 10220 atomic_dec(&dump_running); 10221 return; 10222 } 10223 10224 switch (oops_dump_mode) { 10225 case DUMP_ALL: 10226 ftrace_dump_one(&global_trace, DUMP_ALL); 10227 break; 10228 case DUMP_ORIG: 10229 ftrace_dump_one(&global_trace, DUMP_ORIG); 10230 break; 10231 case DUMP_PARAM: 10232 ftrace_dump_by_param(); 10233 break; 10234 case DUMP_NONE: 10235 break; 10236 default: 10237 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n"); 10238 ftrace_dump_one(&global_trace, DUMP_ALL); 10239 } 10240 10241 atomic_dec(&dump_running); 10242 } 10243 EXPORT_SYMBOL_GPL(ftrace_dump); 10244 10245 #define WRITE_BUFSIZE 4096 10246 10247 ssize_t trace_parse_run_command(struct file *file, const char __user *buffer, 10248 size_t count, loff_t *ppos, 10249 int (*createfn)(const char *)) 10250 { 10251 char *kbuf, *buf, *tmp; 10252 int ret = 0; 10253 size_t done = 0; 10254 size_t size; 10255 10256 kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL); 10257 if (!kbuf) 10258 return -ENOMEM; 10259 10260 while (done < count) { 10261 size = count - done; 10262 10263 if (size >= WRITE_BUFSIZE) 10264 size = WRITE_BUFSIZE - 1; 10265 10266 if (copy_from_user(kbuf, buffer + done, size)) { 10267 ret = -EFAULT; 10268 goto out; 10269 } 10270 kbuf[size] = '\0'; 10271 buf = kbuf; 10272 do { 10273 tmp = strchr(buf, '\n'); 10274 if (tmp) { 10275 *tmp = '\0'; 10276 size = tmp - buf + 1; 10277 } else { 10278 size = strlen(buf); 10279 if (done + size < count) { 10280 if (buf != kbuf) 10281 break; 10282 /* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */ 10283 pr_warn("Line length is too long: Should be less than %d\n", 10284 WRITE_BUFSIZE - 2); 10285 ret = -EINVAL; 10286 goto out; 10287 } 10288 } 10289 done += size; 10290 10291 /* Remove comments */ 10292 tmp = strchr(buf, '#'); 10293 10294 if (tmp) 10295 *tmp = '\0'; 10296 10297 ret = createfn(buf); 10298 if (ret) 10299 goto out; 10300 buf += size; 10301 10302 } while (done < count); 10303 } 10304 ret = done; 10305 10306 out: 10307 kfree(kbuf); 10308 10309 return ret; 10310 } 10311 10312 #ifdef CONFIG_TRACER_MAX_TRACE 10313 __init static bool tr_needs_alloc_snapshot(const char *name) 10314 { 10315 char *test; 10316 int len = strlen(name); 10317 bool ret; 10318 10319 if (!boot_snapshot_index) 10320 return false; 10321 10322 if (strncmp(name, boot_snapshot_info, len) == 0 && 10323 boot_snapshot_info[len] == '\t') 10324 return true; 10325 10326 test = kmalloc(strlen(name) + 3, GFP_KERNEL); 10327 if (!test) 10328 return false; 10329 10330 sprintf(test, "\t%s\t", name); 10331 ret = strstr(boot_snapshot_info, test) == NULL; 10332 kfree(test); 10333 return ret; 10334 } 10335 10336 __init static void do_allocate_snapshot(const char *name) 10337 { 10338 if (!tr_needs_alloc_snapshot(name)) 10339 return; 10340 10341 /* 10342 * When allocate_snapshot is set, the next call to 10343 * allocate_trace_buffers() (called by trace_array_get_by_name()) 10344 * will allocate the snapshot buffer. That will alse clear 10345 * this flag. 10346 */ 10347 allocate_snapshot = true; 10348 } 10349 #else 10350 static inline void do_allocate_snapshot(const char *name) { } 10351 #endif 10352 10353 __init static void enable_instances(void) 10354 { 10355 struct trace_array *tr; 10356 char *curr_str; 10357 char *name; 10358 char *str; 10359 char *tok; 10360 10361 /* A tab is always appended */ 10362 boot_instance_info[boot_instance_index - 1] = '\0'; 10363 str = boot_instance_info; 10364 10365 while ((curr_str = strsep(&str, "\t"))) { 10366 phys_addr_t start = 0; 10367 phys_addr_t size = 0; 10368 unsigned long addr = 0; 10369 bool traceprintk = false; 10370 bool traceoff = false; 10371 char *flag_delim; 10372 char *addr_delim; 10373 10374 tok = strsep(&curr_str, ","); 10375 10376 flag_delim = strchr(tok, '^'); 10377 addr_delim = strchr(tok, '@'); 10378 10379 if (addr_delim) 10380 *addr_delim++ = '\0'; 10381 10382 if (flag_delim) 10383 *flag_delim++ = '\0'; 10384 10385 name = tok; 10386 10387 if (flag_delim) { 10388 char *flag; 10389 10390 while ((flag = strsep(&flag_delim, "^"))) { 10391 if (strcmp(flag, "traceoff") == 0) { 10392 traceoff = true; 10393 } else if ((strcmp(flag, "printk") == 0) || 10394 (strcmp(flag, "traceprintk") == 0) || 10395 (strcmp(flag, "trace_printk") == 0)) { 10396 traceprintk = true; 10397 } else { 10398 pr_info("Tracing: Invalid instance flag '%s' for %s\n", 10399 flag, name); 10400 } 10401 } 10402 } 10403 10404 tok = addr_delim; 10405 if (tok && isdigit(*tok)) { 10406 start = memparse(tok, &tok); 10407 if (!start) { 10408 pr_warn("Tracing: Invalid boot instance address for %s\n", 10409 name); 10410 continue; 10411 } 10412 if (*tok != ':') { 10413 pr_warn("Tracing: No size specified for instance %s\n", name); 10414 continue; 10415 } 10416 tok++; 10417 size = memparse(tok, &tok); 10418 if (!size) { 10419 pr_warn("Tracing: Invalid boot instance size for %s\n", 10420 name); 10421 continue; 10422 } 10423 } else if (tok) { 10424 if (!reserve_mem_find_by_name(tok, &start, &size)) { 10425 start = 0; 10426 pr_warn("Failed to map boot instance %s to %s\n", name, tok); 10427 continue; 10428 } 10429 } 10430 10431 if (start) { 10432 addr = map_pages(start, size); 10433 if (addr) { 10434 pr_info("Tracing: mapped boot instance %s at physical memory %pa of size 0x%lx\n", 10435 name, &start, (unsigned long)size); 10436 } else { 10437 pr_warn("Tracing: Failed to map boot instance %s\n", name); 10438 continue; 10439 } 10440 } else { 10441 /* Only non mapped buffers have snapshot buffers */ 10442 if (IS_ENABLED(CONFIG_TRACER_MAX_TRACE)) 10443 do_allocate_snapshot(name); 10444 } 10445 10446 tr = trace_array_create_systems(name, NULL, addr, size); 10447 if (IS_ERR(tr)) { 10448 pr_warn("Tracing: Failed to create instance buffer %s\n", curr_str); 10449 continue; 10450 } 10451 10452 if (traceoff) 10453 tracer_tracing_off(tr); 10454 10455 if (traceprintk) 10456 update_printk_trace(tr); 10457 10458 /* 10459 * If start is set, then this is a mapped buffer, and 10460 * cannot be deleted by user space, so keep the reference 10461 * to it. 10462 */ 10463 if (start) { 10464 tr->flags |= TRACE_ARRAY_FL_BOOT; 10465 tr->ref++; 10466 } 10467 10468 while ((tok = strsep(&curr_str, ","))) { 10469 early_enable_events(tr, tok, true); 10470 } 10471 } 10472 } 10473 10474 __init static int tracer_alloc_buffers(void) 10475 { 10476 int ring_buf_size; 10477 int ret = -ENOMEM; 10478 10479 10480 if (security_locked_down(LOCKDOWN_TRACEFS)) { 10481 pr_warn("Tracing disabled due to lockdown\n"); 10482 return -EPERM; 10483 } 10484 10485 /* 10486 * Make sure we don't accidentally add more trace options 10487 * than we have bits for. 10488 */ 10489 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE); 10490 10491 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL)) 10492 goto out; 10493 10494 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL)) 10495 goto out_free_buffer_mask; 10496 10497 /* Only allocate trace_printk buffers if a trace_printk exists */ 10498 if (&__stop___trace_bprintk_fmt != &__start___trace_bprintk_fmt) 10499 /* Must be called before global_trace.buffer is allocated */ 10500 trace_printk_init_buffers(); 10501 10502 /* To save memory, keep the ring buffer size to its minimum */ 10503 if (global_trace.ring_buffer_expanded) 10504 ring_buf_size = trace_buf_size; 10505 else 10506 ring_buf_size = 1; 10507 10508 cpumask_copy(tracing_buffer_mask, cpu_possible_mask); 10509 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask); 10510 10511 raw_spin_lock_init(&global_trace.start_lock); 10512 10513 /* 10514 * The prepare callbacks allocates some memory for the ring buffer. We 10515 * don't free the buffer if the CPU goes down. If we were to free 10516 * the buffer, then the user would lose any trace that was in the 10517 * buffer. The memory will be removed once the "instance" is removed. 10518 */ 10519 ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE, 10520 "trace/RB:prepare", trace_rb_cpu_prepare, 10521 NULL); 10522 if (ret < 0) 10523 goto out_free_cpumask; 10524 /* Used for event triggers */ 10525 ret = -ENOMEM; 10526 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE); 10527 if (!temp_buffer) 10528 goto out_rm_hp_state; 10529 10530 if (trace_create_savedcmd() < 0) 10531 goto out_free_temp_buffer; 10532 10533 if (!zalloc_cpumask_var(&global_trace.pipe_cpumask, GFP_KERNEL)) 10534 goto out_free_savedcmd; 10535 10536 /* TODO: make the number of buffers hot pluggable with CPUS */ 10537 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) { 10538 MEM_FAIL(1, "tracer: failed to allocate ring buffer!\n"); 10539 goto out_free_pipe_cpumask; 10540 } 10541 if (global_trace.buffer_disabled) 10542 tracing_off(); 10543 10544 if (trace_boot_clock) { 10545 ret = tracing_set_clock(&global_trace, trace_boot_clock); 10546 if (ret < 0) 10547 pr_warn("Trace clock %s not defined, going back to default\n", 10548 trace_boot_clock); 10549 } 10550 10551 /* 10552 * register_tracer() might reference current_trace, so it 10553 * needs to be set before we register anything. This is 10554 * just a bootstrap of current_trace anyway. 10555 */ 10556 global_trace.current_trace = &nop_trace; 10557 10558 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; 10559 #ifdef CONFIG_TRACER_MAX_TRACE 10560 spin_lock_init(&global_trace.snapshot_trigger_lock); 10561 #endif 10562 ftrace_init_global_array_ops(&global_trace); 10563 10564 #ifdef CONFIG_MODULES 10565 INIT_LIST_HEAD(&global_trace.mod_events); 10566 #endif 10567 10568 init_trace_flags_index(&global_trace); 10569 10570 register_tracer(&nop_trace); 10571 10572 /* Function tracing may start here (via kernel command line) */ 10573 init_function_trace(); 10574 10575 /* All seems OK, enable tracing */ 10576 tracing_disabled = 0; 10577 10578 atomic_notifier_chain_register(&panic_notifier_list, 10579 &trace_panic_notifier); 10580 10581 register_die_notifier(&trace_die_notifier); 10582 10583 global_trace.flags = TRACE_ARRAY_FL_GLOBAL; 10584 10585 INIT_LIST_HEAD(&global_trace.systems); 10586 INIT_LIST_HEAD(&global_trace.events); 10587 INIT_LIST_HEAD(&global_trace.hist_vars); 10588 INIT_LIST_HEAD(&global_trace.err_log); 10589 list_add(&global_trace.list, &ftrace_trace_arrays); 10590 10591 apply_trace_boot_options(); 10592 10593 register_snapshot_cmd(); 10594 10595 return 0; 10596 10597 out_free_pipe_cpumask: 10598 free_cpumask_var(global_trace.pipe_cpumask); 10599 out_free_savedcmd: 10600 trace_free_saved_cmdlines_buffer(); 10601 out_free_temp_buffer: 10602 ring_buffer_free(temp_buffer); 10603 out_rm_hp_state: 10604 cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE); 10605 out_free_cpumask: 10606 free_cpumask_var(global_trace.tracing_cpumask); 10607 out_free_buffer_mask: 10608 free_cpumask_var(tracing_buffer_mask); 10609 out: 10610 return ret; 10611 } 10612 10613 #ifdef CONFIG_FUNCTION_TRACER 10614 /* Used to set module cached ftrace filtering at boot up */ 10615 __init struct trace_array *trace_get_global_array(void) 10616 { 10617 return &global_trace; 10618 } 10619 #endif 10620 10621 void __init ftrace_boot_snapshot(void) 10622 { 10623 #ifdef CONFIG_TRACER_MAX_TRACE 10624 struct trace_array *tr; 10625 10626 if (!snapshot_at_boot) 10627 return; 10628 10629 list_for_each_entry(tr, &ftrace_trace_arrays, list) { 10630 if (!tr->allocated_snapshot) 10631 continue; 10632 10633 tracing_snapshot_instance(tr); 10634 trace_array_puts(tr, "** Boot snapshot taken **\n"); 10635 } 10636 #endif 10637 } 10638 10639 void __init early_trace_init(void) 10640 { 10641 if (tracepoint_printk) { 10642 tracepoint_print_iter = 10643 kzalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL); 10644 if (MEM_FAIL(!tracepoint_print_iter, 10645 "Failed to allocate trace iterator\n")) 10646 tracepoint_printk = 0; 10647 else 10648 static_key_enable(&tracepoint_printk_key.key); 10649 } 10650 tracer_alloc_buffers(); 10651 10652 init_events(); 10653 } 10654 10655 void __init trace_init(void) 10656 { 10657 trace_event_init(); 10658 10659 if (boot_instance_index) 10660 enable_instances(); 10661 } 10662 10663 __init static void clear_boot_tracer(void) 10664 { 10665 /* 10666 * The default tracer at boot buffer is an init section. 10667 * This function is called in lateinit. If we did not 10668 * find the boot tracer, then clear it out, to prevent 10669 * later registration from accessing the buffer that is 10670 * about to be freed. 10671 */ 10672 if (!default_bootup_tracer) 10673 return; 10674 10675 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n", 10676 default_bootup_tracer); 10677 default_bootup_tracer = NULL; 10678 } 10679 10680 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK 10681 __init static void tracing_set_default_clock(void) 10682 { 10683 /* sched_clock_stable() is determined in late_initcall */ 10684 if (!trace_boot_clock && !sched_clock_stable()) { 10685 if (security_locked_down(LOCKDOWN_TRACEFS)) { 10686 pr_warn("Can not set tracing clock due to lockdown\n"); 10687 return; 10688 } 10689 10690 printk(KERN_WARNING 10691 "Unstable clock detected, switching default tracing clock to \"global\"\n" 10692 "If you want to keep using the local clock, then add:\n" 10693 " \"trace_clock=local\"\n" 10694 "on the kernel command line\n"); 10695 tracing_set_clock(&global_trace, "global"); 10696 } 10697 } 10698 #else 10699 static inline void tracing_set_default_clock(void) { } 10700 #endif 10701 10702 __init static int late_trace_init(void) 10703 { 10704 if (tracepoint_printk && tracepoint_printk_stop_on_boot) { 10705 static_key_disable(&tracepoint_printk_key.key); 10706 tracepoint_printk = 0; 10707 } 10708 10709 tracing_set_default_clock(); 10710 clear_boot_tracer(); 10711 return 0; 10712 } 10713 10714 late_initcall_sync(late_trace_init); 10715