1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * ring buffer based function tracer 4 * 5 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com> 6 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com> 7 * 8 * Originally taken from the RT patch by: 9 * Arnaldo Carvalho de Melo <acme@redhat.com> 10 * 11 * Based on code from the latency_tracer, that is: 12 * Copyright (C) 2004-2006 Ingo Molnar 13 * Copyright (C) 2004 Nadia Yvette Chambers 14 */ 15 #include <linux/ring_buffer.h> 16 #include <linux/utsname.h> 17 #include <linux/stacktrace.h> 18 #include <linux/writeback.h> 19 #include <linux/kallsyms.h> 20 #include <linux/security.h> 21 #include <linux/seq_file.h> 22 #include <linux/irqflags.h> 23 #include <linux/debugfs.h> 24 #include <linux/tracefs.h> 25 #include <linux/pagemap.h> 26 #include <linux/hardirq.h> 27 #include <linux/linkage.h> 28 #include <linux/uaccess.h> 29 #include <linux/cleanup.h> 30 #include <linux/vmalloc.h> 31 #include <linux/ftrace.h> 32 #include <linux/module.h> 33 #include <linux/percpu.h> 34 #include <linux/splice.h> 35 #include <linux/kdebug.h> 36 #include <linux/string.h> 37 #include <linux/mount.h> 38 #include <linux/rwsem.h> 39 #include <linux/slab.h> 40 #include <linux/ctype.h> 41 #include <linux/init.h> 42 #include <linux/panic_notifier.h> 43 #include <linux/poll.h> 44 #include <linux/nmi.h> 45 #include <linux/fs.h> 46 #include <linux/trace.h> 47 #include <linux/sched/clock.h> 48 #include <linux/sched/rt.h> 49 #include <linux/fsnotify.h> 50 #include <linux/irq_work.h> 51 #include <linux/workqueue.h> 52 53 #include <asm/setup.h> /* COMMAND_LINE_SIZE */ 54 55 #include "trace.h" 56 #include "trace_output.h" 57 58 #ifdef CONFIG_FTRACE_STARTUP_TEST 59 /* 60 * We need to change this state when a selftest is running. 61 * A selftest will lurk into the ring-buffer to count the 62 * entries inserted during the selftest although some concurrent 63 * insertions into the ring-buffer such as trace_printk could occurred 64 * at the same time, giving false positive or negative results. 65 */ 66 static bool __read_mostly tracing_selftest_running; 67 68 /* 69 * If boot-time tracing including tracers/events via kernel cmdline 70 * is running, we do not want to run SELFTEST. 71 */ 72 bool __read_mostly tracing_selftest_disabled; 73 74 void __init disable_tracing_selftest(const char *reason) 75 { 76 if (!tracing_selftest_disabled) { 77 tracing_selftest_disabled = true; 78 pr_info("Ftrace startup test is disabled due to %s\n", reason); 79 } 80 } 81 #else 82 #define tracing_selftest_running 0 83 #define tracing_selftest_disabled 0 84 #endif 85 86 /* Pipe tracepoints to printk */ 87 static struct trace_iterator *tracepoint_print_iter; 88 int tracepoint_printk; 89 static bool tracepoint_printk_stop_on_boot __initdata; 90 static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key); 91 92 /* For tracers that don't implement custom flags */ 93 static struct tracer_opt dummy_tracer_opt[] = { 94 { } 95 }; 96 97 static int 98 dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) 99 { 100 return 0; 101 } 102 103 /* 104 * To prevent the comm cache from being overwritten when no 105 * tracing is active, only save the comm when a trace event 106 * occurred. 107 */ 108 DEFINE_PER_CPU(bool, trace_taskinfo_save); 109 110 /* 111 * Kill all tracing for good (never come back). 112 * It is initialized to 1 but will turn to zero if the initialization 113 * of the tracer is successful. But that is the only place that sets 114 * this back to zero. 115 */ 116 static int tracing_disabled = 1; 117 118 cpumask_var_t __read_mostly tracing_buffer_mask; 119 120 /* 121 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops 122 * 123 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops 124 * is set, then ftrace_dump is called. This will output the contents 125 * of the ftrace buffers to the console. This is very useful for 126 * capturing traces that lead to crashes and outputing it to a 127 * serial console. 128 * 129 * It is default off, but you can enable it with either specifying 130 * "ftrace_dump_on_oops" in the kernel command line, or setting 131 * /proc/sys/kernel/ftrace_dump_on_oops 132 * Set 1 if you want to dump buffers of all CPUs 133 * Set 2 if you want to dump the buffer of the CPU that triggered oops 134 * Set instance name if you want to dump the specific trace instance 135 * Multiple instance dump is also supported, and instances are seperated 136 * by commas. 137 */ 138 /* Set to string format zero to disable by default */ 139 char ftrace_dump_on_oops[MAX_TRACER_SIZE] = "0"; 140 141 /* When set, tracing will stop when a WARN*() is hit */ 142 int __disable_trace_on_warning; 143 144 #ifdef CONFIG_TRACE_EVAL_MAP_FILE 145 /* Map of enums to their values, for "eval_map" file */ 146 struct trace_eval_map_head { 147 struct module *mod; 148 unsigned long length; 149 }; 150 151 union trace_eval_map_item; 152 153 struct trace_eval_map_tail { 154 /* 155 * "end" is first and points to NULL as it must be different 156 * than "mod" or "eval_string" 157 */ 158 union trace_eval_map_item *next; 159 const char *end; /* points to NULL */ 160 }; 161 162 static DEFINE_MUTEX(trace_eval_mutex); 163 164 /* 165 * The trace_eval_maps are saved in an array with two extra elements, 166 * one at the beginning, and one at the end. The beginning item contains 167 * the count of the saved maps (head.length), and the module they 168 * belong to if not built in (head.mod). The ending item contains a 169 * pointer to the next array of saved eval_map items. 170 */ 171 union trace_eval_map_item { 172 struct trace_eval_map map; 173 struct trace_eval_map_head head; 174 struct trace_eval_map_tail tail; 175 }; 176 177 static union trace_eval_map_item *trace_eval_maps; 178 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */ 179 180 int tracing_set_tracer(struct trace_array *tr, const char *buf); 181 static void ftrace_trace_userstack(struct trace_array *tr, 182 struct trace_buffer *buffer, 183 unsigned int trace_ctx); 184 185 static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata; 186 static char *default_bootup_tracer; 187 188 static bool allocate_snapshot; 189 static bool snapshot_at_boot; 190 191 static char boot_instance_info[COMMAND_LINE_SIZE] __initdata; 192 static int boot_instance_index; 193 194 static char boot_snapshot_info[COMMAND_LINE_SIZE] __initdata; 195 static int boot_snapshot_index; 196 197 static int __init set_cmdline_ftrace(char *str) 198 { 199 strscpy(bootup_tracer_buf, str, MAX_TRACER_SIZE); 200 default_bootup_tracer = bootup_tracer_buf; 201 /* We are using ftrace early, expand it */ 202 trace_set_ring_buffer_expanded(NULL); 203 return 1; 204 } 205 __setup("ftrace=", set_cmdline_ftrace); 206 207 int ftrace_dump_on_oops_enabled(void) 208 { 209 if (!strcmp("0", ftrace_dump_on_oops)) 210 return 0; 211 else 212 return 1; 213 } 214 215 static int __init set_ftrace_dump_on_oops(char *str) 216 { 217 if (!*str) { 218 strscpy(ftrace_dump_on_oops, "1", MAX_TRACER_SIZE); 219 return 1; 220 } 221 222 if (*str == ',') { 223 strscpy(ftrace_dump_on_oops, "1", MAX_TRACER_SIZE); 224 strscpy(ftrace_dump_on_oops + 1, str, MAX_TRACER_SIZE - 1); 225 return 1; 226 } 227 228 if (*str++ == '=') { 229 strscpy(ftrace_dump_on_oops, str, MAX_TRACER_SIZE); 230 return 1; 231 } 232 233 return 0; 234 } 235 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops); 236 237 static int __init stop_trace_on_warning(char *str) 238 { 239 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0)) 240 __disable_trace_on_warning = 1; 241 return 1; 242 } 243 __setup("traceoff_on_warning", stop_trace_on_warning); 244 245 static int __init boot_alloc_snapshot(char *str) 246 { 247 char *slot = boot_snapshot_info + boot_snapshot_index; 248 int left = sizeof(boot_snapshot_info) - boot_snapshot_index; 249 int ret; 250 251 if (str[0] == '=') { 252 str++; 253 if (strlen(str) >= left) 254 return -1; 255 256 ret = snprintf(slot, left, "%s\t", str); 257 boot_snapshot_index += ret; 258 } else { 259 allocate_snapshot = true; 260 /* We also need the main ring buffer expanded */ 261 trace_set_ring_buffer_expanded(NULL); 262 } 263 return 1; 264 } 265 __setup("alloc_snapshot", boot_alloc_snapshot); 266 267 268 static int __init boot_snapshot(char *str) 269 { 270 snapshot_at_boot = true; 271 boot_alloc_snapshot(str); 272 return 1; 273 } 274 __setup("ftrace_boot_snapshot", boot_snapshot); 275 276 277 static int __init boot_instance(char *str) 278 { 279 char *slot = boot_instance_info + boot_instance_index; 280 int left = sizeof(boot_instance_info) - boot_instance_index; 281 int ret; 282 283 if (strlen(str) >= left) 284 return -1; 285 286 ret = snprintf(slot, left, "%s\t", str); 287 boot_instance_index += ret; 288 289 return 1; 290 } 291 __setup("trace_instance=", boot_instance); 292 293 294 static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata; 295 296 static int __init set_trace_boot_options(char *str) 297 { 298 strscpy(trace_boot_options_buf, str, MAX_TRACER_SIZE); 299 return 1; 300 } 301 __setup("trace_options=", set_trace_boot_options); 302 303 static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata; 304 static char *trace_boot_clock __initdata; 305 306 static int __init set_trace_boot_clock(char *str) 307 { 308 strscpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE); 309 trace_boot_clock = trace_boot_clock_buf; 310 return 1; 311 } 312 __setup("trace_clock=", set_trace_boot_clock); 313 314 static int __init set_tracepoint_printk(char *str) 315 { 316 /* Ignore the "tp_printk_stop_on_boot" param */ 317 if (*str == '_') 318 return 0; 319 320 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0)) 321 tracepoint_printk = 1; 322 return 1; 323 } 324 __setup("tp_printk", set_tracepoint_printk); 325 326 static int __init set_tracepoint_printk_stop(char *str) 327 { 328 tracepoint_printk_stop_on_boot = true; 329 return 1; 330 } 331 __setup("tp_printk_stop_on_boot", set_tracepoint_printk_stop); 332 333 unsigned long long ns2usecs(u64 nsec) 334 { 335 nsec += 500; 336 do_div(nsec, 1000); 337 return nsec; 338 } 339 340 static void 341 trace_process_export(struct trace_export *export, 342 struct ring_buffer_event *event, int flag) 343 { 344 struct trace_entry *entry; 345 unsigned int size = 0; 346 347 if (export->flags & flag) { 348 entry = ring_buffer_event_data(event); 349 size = ring_buffer_event_length(event); 350 export->write(export, entry, size); 351 } 352 } 353 354 static DEFINE_MUTEX(ftrace_export_lock); 355 356 static struct trace_export __rcu *ftrace_exports_list __read_mostly; 357 358 static DEFINE_STATIC_KEY_FALSE(trace_function_exports_enabled); 359 static DEFINE_STATIC_KEY_FALSE(trace_event_exports_enabled); 360 static DEFINE_STATIC_KEY_FALSE(trace_marker_exports_enabled); 361 362 static inline void ftrace_exports_enable(struct trace_export *export) 363 { 364 if (export->flags & TRACE_EXPORT_FUNCTION) 365 static_branch_inc(&trace_function_exports_enabled); 366 367 if (export->flags & TRACE_EXPORT_EVENT) 368 static_branch_inc(&trace_event_exports_enabled); 369 370 if (export->flags & TRACE_EXPORT_MARKER) 371 static_branch_inc(&trace_marker_exports_enabled); 372 } 373 374 static inline void ftrace_exports_disable(struct trace_export *export) 375 { 376 if (export->flags & TRACE_EXPORT_FUNCTION) 377 static_branch_dec(&trace_function_exports_enabled); 378 379 if (export->flags & TRACE_EXPORT_EVENT) 380 static_branch_dec(&trace_event_exports_enabled); 381 382 if (export->flags & TRACE_EXPORT_MARKER) 383 static_branch_dec(&trace_marker_exports_enabled); 384 } 385 386 static void ftrace_exports(struct ring_buffer_event *event, int flag) 387 { 388 struct trace_export *export; 389 390 preempt_disable_notrace(); 391 392 export = rcu_dereference_raw_check(ftrace_exports_list); 393 while (export) { 394 trace_process_export(export, event, flag); 395 export = rcu_dereference_raw_check(export->next); 396 } 397 398 preempt_enable_notrace(); 399 } 400 401 static inline void 402 add_trace_export(struct trace_export **list, struct trace_export *export) 403 { 404 rcu_assign_pointer(export->next, *list); 405 /* 406 * We are entering export into the list but another 407 * CPU might be walking that list. We need to make sure 408 * the export->next pointer is valid before another CPU sees 409 * the export pointer included into the list. 410 */ 411 rcu_assign_pointer(*list, export); 412 } 413 414 static inline int 415 rm_trace_export(struct trace_export **list, struct trace_export *export) 416 { 417 struct trace_export **p; 418 419 for (p = list; *p != NULL; p = &(*p)->next) 420 if (*p == export) 421 break; 422 423 if (*p != export) 424 return -1; 425 426 rcu_assign_pointer(*p, (*p)->next); 427 428 return 0; 429 } 430 431 static inline void 432 add_ftrace_export(struct trace_export **list, struct trace_export *export) 433 { 434 ftrace_exports_enable(export); 435 436 add_trace_export(list, export); 437 } 438 439 static inline int 440 rm_ftrace_export(struct trace_export **list, struct trace_export *export) 441 { 442 int ret; 443 444 ret = rm_trace_export(list, export); 445 ftrace_exports_disable(export); 446 447 return ret; 448 } 449 450 int register_ftrace_export(struct trace_export *export) 451 { 452 if (WARN_ON_ONCE(!export->write)) 453 return -1; 454 455 mutex_lock(&ftrace_export_lock); 456 457 add_ftrace_export(&ftrace_exports_list, export); 458 459 mutex_unlock(&ftrace_export_lock); 460 461 return 0; 462 } 463 EXPORT_SYMBOL_GPL(register_ftrace_export); 464 465 int unregister_ftrace_export(struct trace_export *export) 466 { 467 int ret; 468 469 mutex_lock(&ftrace_export_lock); 470 471 ret = rm_ftrace_export(&ftrace_exports_list, export); 472 473 mutex_unlock(&ftrace_export_lock); 474 475 return ret; 476 } 477 EXPORT_SYMBOL_GPL(unregister_ftrace_export); 478 479 /* trace_flags holds trace_options default values */ 480 #define TRACE_DEFAULT_FLAGS \ 481 (FUNCTION_DEFAULT_FLAGS | \ 482 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \ 483 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \ 484 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \ 485 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | \ 486 TRACE_ITER_HASH_PTR | TRACE_ITER_TRACE_PRINTK) 487 488 /* trace_options that are only supported by global_trace */ 489 #define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \ 490 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD) 491 492 /* trace_flags that are default zero for instances */ 493 #define ZEROED_TRACE_FLAGS \ 494 (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK | TRACE_ITER_TRACE_PRINTK) 495 496 /* 497 * The global_trace is the descriptor that holds the top-level tracing 498 * buffers for the live tracing. 499 */ 500 static struct trace_array global_trace = { 501 .trace_flags = TRACE_DEFAULT_FLAGS, 502 }; 503 504 static struct trace_array *printk_trace = &global_trace; 505 506 static __always_inline bool printk_binsafe(struct trace_array *tr) 507 { 508 /* 509 * The binary format of traceprintk can cause a crash if used 510 * by a buffer from another boot. Force the use of the 511 * non binary version of trace_printk if the trace_printk 512 * buffer is a boot mapped ring buffer. 513 */ 514 return !(tr->flags & TRACE_ARRAY_FL_BOOT); 515 } 516 517 static void update_printk_trace(struct trace_array *tr) 518 { 519 if (printk_trace == tr) 520 return; 521 522 printk_trace->trace_flags &= ~TRACE_ITER_TRACE_PRINTK; 523 printk_trace = tr; 524 tr->trace_flags |= TRACE_ITER_TRACE_PRINTK; 525 } 526 527 void trace_set_ring_buffer_expanded(struct trace_array *tr) 528 { 529 if (!tr) 530 tr = &global_trace; 531 tr->ring_buffer_expanded = true; 532 } 533 534 LIST_HEAD(ftrace_trace_arrays); 535 536 int trace_array_get(struct trace_array *this_tr) 537 { 538 struct trace_array *tr; 539 540 guard(mutex)(&trace_types_lock); 541 list_for_each_entry(tr, &ftrace_trace_arrays, list) { 542 if (tr == this_tr) { 543 tr->ref++; 544 return 0; 545 } 546 } 547 548 return -ENODEV; 549 } 550 551 static void __trace_array_put(struct trace_array *this_tr) 552 { 553 WARN_ON(!this_tr->ref); 554 this_tr->ref--; 555 } 556 557 /** 558 * trace_array_put - Decrement the reference counter for this trace array. 559 * @this_tr : pointer to the trace array 560 * 561 * NOTE: Use this when we no longer need the trace array returned by 562 * trace_array_get_by_name(). This ensures the trace array can be later 563 * destroyed. 564 * 565 */ 566 void trace_array_put(struct trace_array *this_tr) 567 { 568 if (!this_tr) 569 return; 570 571 mutex_lock(&trace_types_lock); 572 __trace_array_put(this_tr); 573 mutex_unlock(&trace_types_lock); 574 } 575 EXPORT_SYMBOL_GPL(trace_array_put); 576 577 int tracing_check_open_get_tr(struct trace_array *tr) 578 { 579 int ret; 580 581 ret = security_locked_down(LOCKDOWN_TRACEFS); 582 if (ret) 583 return ret; 584 585 if (tracing_disabled) 586 return -ENODEV; 587 588 if (tr && trace_array_get(tr) < 0) 589 return -ENODEV; 590 591 return 0; 592 } 593 594 /** 595 * trace_find_filtered_pid - check if a pid exists in a filtered_pid list 596 * @filtered_pids: The list of pids to check 597 * @search_pid: The PID to find in @filtered_pids 598 * 599 * Returns true if @search_pid is found in @filtered_pids, and false otherwise. 600 */ 601 bool 602 trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid) 603 { 604 return trace_pid_list_is_set(filtered_pids, search_pid); 605 } 606 607 /** 608 * trace_ignore_this_task - should a task be ignored for tracing 609 * @filtered_pids: The list of pids to check 610 * @filtered_no_pids: The list of pids not to be traced 611 * @task: The task that should be ignored if not filtered 612 * 613 * Checks if @task should be traced or not from @filtered_pids. 614 * Returns true if @task should *NOT* be traced. 615 * Returns false if @task should be traced. 616 */ 617 bool 618 trace_ignore_this_task(struct trace_pid_list *filtered_pids, 619 struct trace_pid_list *filtered_no_pids, 620 struct task_struct *task) 621 { 622 /* 623 * If filtered_no_pids is not empty, and the task's pid is listed 624 * in filtered_no_pids, then return true. 625 * Otherwise, if filtered_pids is empty, that means we can 626 * trace all tasks. If it has content, then only trace pids 627 * within filtered_pids. 628 */ 629 630 return (filtered_pids && 631 !trace_find_filtered_pid(filtered_pids, task->pid)) || 632 (filtered_no_pids && 633 trace_find_filtered_pid(filtered_no_pids, task->pid)); 634 } 635 636 /** 637 * trace_filter_add_remove_task - Add or remove a task from a pid_list 638 * @pid_list: The list to modify 639 * @self: The current task for fork or NULL for exit 640 * @task: The task to add or remove 641 * 642 * If adding a task, if @self is defined, the task is only added if @self 643 * is also included in @pid_list. This happens on fork and tasks should 644 * only be added when the parent is listed. If @self is NULL, then the 645 * @task pid will be removed from the list, which would happen on exit 646 * of a task. 647 */ 648 void trace_filter_add_remove_task(struct trace_pid_list *pid_list, 649 struct task_struct *self, 650 struct task_struct *task) 651 { 652 if (!pid_list) 653 return; 654 655 /* For forks, we only add if the forking task is listed */ 656 if (self) { 657 if (!trace_find_filtered_pid(pid_list, self->pid)) 658 return; 659 } 660 661 /* "self" is set for forks, and NULL for exits */ 662 if (self) 663 trace_pid_list_set(pid_list, task->pid); 664 else 665 trace_pid_list_clear(pid_list, task->pid); 666 } 667 668 /** 669 * trace_pid_next - Used for seq_file to get to the next pid of a pid_list 670 * @pid_list: The pid list to show 671 * @v: The last pid that was shown (+1 the actual pid to let zero be displayed) 672 * @pos: The position of the file 673 * 674 * This is used by the seq_file "next" operation to iterate the pids 675 * listed in a trace_pid_list structure. 676 * 677 * Returns the pid+1 as we want to display pid of zero, but NULL would 678 * stop the iteration. 679 */ 680 void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos) 681 { 682 long pid = (unsigned long)v; 683 unsigned int next; 684 685 (*pos)++; 686 687 /* pid already is +1 of the actual previous bit */ 688 if (trace_pid_list_next(pid_list, pid, &next) < 0) 689 return NULL; 690 691 pid = next; 692 693 /* Return pid + 1 to allow zero to be represented */ 694 return (void *)(pid + 1); 695 } 696 697 /** 698 * trace_pid_start - Used for seq_file to start reading pid lists 699 * @pid_list: The pid list to show 700 * @pos: The position of the file 701 * 702 * This is used by seq_file "start" operation to start the iteration 703 * of listing pids. 704 * 705 * Returns the pid+1 as we want to display pid of zero, but NULL would 706 * stop the iteration. 707 */ 708 void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos) 709 { 710 unsigned long pid; 711 unsigned int first; 712 loff_t l = 0; 713 714 if (trace_pid_list_first(pid_list, &first) < 0) 715 return NULL; 716 717 pid = first; 718 719 /* Return pid + 1 so that zero can be the exit value */ 720 for (pid++; pid && l < *pos; 721 pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l)) 722 ; 723 return (void *)pid; 724 } 725 726 /** 727 * trace_pid_show - show the current pid in seq_file processing 728 * @m: The seq_file structure to write into 729 * @v: A void pointer of the pid (+1) value to display 730 * 731 * Can be directly used by seq_file operations to display the current 732 * pid value. 733 */ 734 int trace_pid_show(struct seq_file *m, void *v) 735 { 736 unsigned long pid = (unsigned long)v - 1; 737 738 seq_printf(m, "%lu\n", pid); 739 return 0; 740 } 741 742 /* 128 should be much more than enough */ 743 #define PID_BUF_SIZE 127 744 745 int trace_pid_write(struct trace_pid_list *filtered_pids, 746 struct trace_pid_list **new_pid_list, 747 const char __user *ubuf, size_t cnt) 748 { 749 struct trace_pid_list *pid_list; 750 struct trace_parser parser; 751 unsigned long val; 752 int nr_pids = 0; 753 ssize_t read = 0; 754 ssize_t ret; 755 loff_t pos; 756 pid_t pid; 757 758 if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1)) 759 return -ENOMEM; 760 761 /* 762 * Always recreate a new array. The write is an all or nothing 763 * operation. Always create a new array when adding new pids by 764 * the user. If the operation fails, then the current list is 765 * not modified. 766 */ 767 pid_list = trace_pid_list_alloc(); 768 if (!pid_list) { 769 trace_parser_put(&parser); 770 return -ENOMEM; 771 } 772 773 if (filtered_pids) { 774 /* copy the current bits to the new max */ 775 ret = trace_pid_list_first(filtered_pids, &pid); 776 while (!ret) { 777 trace_pid_list_set(pid_list, pid); 778 ret = trace_pid_list_next(filtered_pids, pid + 1, &pid); 779 nr_pids++; 780 } 781 } 782 783 ret = 0; 784 while (cnt > 0) { 785 786 pos = 0; 787 788 ret = trace_get_user(&parser, ubuf, cnt, &pos); 789 if (ret < 0) 790 break; 791 792 read += ret; 793 ubuf += ret; 794 cnt -= ret; 795 796 if (!trace_parser_loaded(&parser)) 797 break; 798 799 ret = -EINVAL; 800 if (kstrtoul(parser.buffer, 0, &val)) 801 break; 802 803 pid = (pid_t)val; 804 805 if (trace_pid_list_set(pid_list, pid) < 0) { 806 ret = -1; 807 break; 808 } 809 nr_pids++; 810 811 trace_parser_clear(&parser); 812 ret = 0; 813 } 814 trace_parser_put(&parser); 815 816 if (ret < 0) { 817 trace_pid_list_free(pid_list); 818 return ret; 819 } 820 821 if (!nr_pids) { 822 /* Cleared the list of pids */ 823 trace_pid_list_free(pid_list); 824 pid_list = NULL; 825 } 826 827 *new_pid_list = pid_list; 828 829 return read; 830 } 831 832 static u64 buffer_ftrace_now(struct array_buffer *buf, int cpu) 833 { 834 u64 ts; 835 836 /* Early boot up does not have a buffer yet */ 837 if (!buf->buffer) 838 return trace_clock_local(); 839 840 ts = ring_buffer_time_stamp(buf->buffer); 841 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts); 842 843 return ts; 844 } 845 846 u64 ftrace_now(int cpu) 847 { 848 return buffer_ftrace_now(&global_trace.array_buffer, cpu); 849 } 850 851 /** 852 * tracing_is_enabled - Show if global_trace has been enabled 853 * 854 * Shows if the global trace has been enabled or not. It uses the 855 * mirror flag "buffer_disabled" to be used in fast paths such as for 856 * the irqsoff tracer. But it may be inaccurate due to races. If you 857 * need to know the accurate state, use tracing_is_on() which is a little 858 * slower, but accurate. 859 */ 860 int tracing_is_enabled(void) 861 { 862 /* 863 * For quick access (irqsoff uses this in fast path), just 864 * return the mirror variable of the state of the ring buffer. 865 * It's a little racy, but we don't really care. 866 */ 867 smp_rmb(); 868 return !global_trace.buffer_disabled; 869 } 870 871 /* 872 * trace_buf_size is the size in bytes that is allocated 873 * for a buffer. Note, the number of bytes is always rounded 874 * to page size. 875 * 876 * This number is purposely set to a low number of 16384. 877 * If the dump on oops happens, it will be much appreciated 878 * to not have to wait for all that output. Anyway this can be 879 * boot time and run time configurable. 880 */ 881 #define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */ 882 883 static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT; 884 885 /* trace_types holds a link list of available tracers. */ 886 static struct tracer *trace_types __read_mostly; 887 888 /* 889 * trace_types_lock is used to protect the trace_types list. 890 */ 891 DEFINE_MUTEX(trace_types_lock); 892 893 /* 894 * serialize the access of the ring buffer 895 * 896 * ring buffer serializes readers, but it is low level protection. 897 * The validity of the events (which returns by ring_buffer_peek() ..etc) 898 * are not protected by ring buffer. 899 * 900 * The content of events may become garbage if we allow other process consumes 901 * these events concurrently: 902 * A) the page of the consumed events may become a normal page 903 * (not reader page) in ring buffer, and this page will be rewritten 904 * by events producer. 905 * B) The page of the consumed events may become a page for splice_read, 906 * and this page will be returned to system. 907 * 908 * These primitives allow multi process access to different cpu ring buffer 909 * concurrently. 910 * 911 * These primitives don't distinguish read-only and read-consume access. 912 * Multi read-only access are also serialized. 913 */ 914 915 #ifdef CONFIG_SMP 916 static DECLARE_RWSEM(all_cpu_access_lock); 917 static DEFINE_PER_CPU(struct mutex, cpu_access_lock); 918 919 static inline void trace_access_lock(int cpu) 920 { 921 if (cpu == RING_BUFFER_ALL_CPUS) { 922 /* gain it for accessing the whole ring buffer. */ 923 down_write(&all_cpu_access_lock); 924 } else { 925 /* gain it for accessing a cpu ring buffer. */ 926 927 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */ 928 down_read(&all_cpu_access_lock); 929 930 /* Secondly block other access to this @cpu ring buffer. */ 931 mutex_lock(&per_cpu(cpu_access_lock, cpu)); 932 } 933 } 934 935 static inline void trace_access_unlock(int cpu) 936 { 937 if (cpu == RING_BUFFER_ALL_CPUS) { 938 up_write(&all_cpu_access_lock); 939 } else { 940 mutex_unlock(&per_cpu(cpu_access_lock, cpu)); 941 up_read(&all_cpu_access_lock); 942 } 943 } 944 945 static inline void trace_access_lock_init(void) 946 { 947 int cpu; 948 949 for_each_possible_cpu(cpu) 950 mutex_init(&per_cpu(cpu_access_lock, cpu)); 951 } 952 953 #else 954 955 static DEFINE_MUTEX(access_lock); 956 957 static inline void trace_access_lock(int cpu) 958 { 959 (void)cpu; 960 mutex_lock(&access_lock); 961 } 962 963 static inline void trace_access_unlock(int cpu) 964 { 965 (void)cpu; 966 mutex_unlock(&access_lock); 967 } 968 969 static inline void trace_access_lock_init(void) 970 { 971 } 972 973 #endif 974 975 #ifdef CONFIG_STACKTRACE 976 static void __ftrace_trace_stack(struct trace_array *tr, 977 struct trace_buffer *buffer, 978 unsigned int trace_ctx, 979 int skip, struct pt_regs *regs); 980 static inline void ftrace_trace_stack(struct trace_array *tr, 981 struct trace_buffer *buffer, 982 unsigned int trace_ctx, 983 int skip, struct pt_regs *regs); 984 985 #else 986 static inline void __ftrace_trace_stack(struct trace_array *tr, 987 struct trace_buffer *buffer, 988 unsigned int trace_ctx, 989 int skip, struct pt_regs *regs) 990 { 991 } 992 static inline void ftrace_trace_stack(struct trace_array *tr, 993 struct trace_buffer *buffer, 994 unsigned long trace_ctx, 995 int skip, struct pt_regs *regs) 996 { 997 } 998 999 #endif 1000 1001 static __always_inline void 1002 trace_event_setup(struct ring_buffer_event *event, 1003 int type, unsigned int trace_ctx) 1004 { 1005 struct trace_entry *ent = ring_buffer_event_data(event); 1006 1007 tracing_generic_entry_update(ent, type, trace_ctx); 1008 } 1009 1010 static __always_inline struct ring_buffer_event * 1011 __trace_buffer_lock_reserve(struct trace_buffer *buffer, 1012 int type, 1013 unsigned long len, 1014 unsigned int trace_ctx) 1015 { 1016 struct ring_buffer_event *event; 1017 1018 event = ring_buffer_lock_reserve(buffer, len); 1019 if (event != NULL) 1020 trace_event_setup(event, type, trace_ctx); 1021 1022 return event; 1023 } 1024 1025 void tracer_tracing_on(struct trace_array *tr) 1026 { 1027 if (tr->array_buffer.buffer) 1028 ring_buffer_record_on(tr->array_buffer.buffer); 1029 /* 1030 * This flag is looked at when buffers haven't been allocated 1031 * yet, or by some tracers (like irqsoff), that just want to 1032 * know if the ring buffer has been disabled, but it can handle 1033 * races of where it gets disabled but we still do a record. 1034 * As the check is in the fast path of the tracers, it is more 1035 * important to be fast than accurate. 1036 */ 1037 tr->buffer_disabled = 0; 1038 /* Make the flag seen by readers */ 1039 smp_wmb(); 1040 } 1041 1042 /** 1043 * tracing_on - enable tracing buffers 1044 * 1045 * This function enables tracing buffers that may have been 1046 * disabled with tracing_off. 1047 */ 1048 void tracing_on(void) 1049 { 1050 tracer_tracing_on(&global_trace); 1051 } 1052 EXPORT_SYMBOL_GPL(tracing_on); 1053 1054 1055 static __always_inline void 1056 __buffer_unlock_commit(struct trace_buffer *buffer, struct ring_buffer_event *event) 1057 { 1058 __this_cpu_write(trace_taskinfo_save, true); 1059 1060 /* If this is the temp buffer, we need to commit fully */ 1061 if (this_cpu_read(trace_buffered_event) == event) { 1062 /* Length is in event->array[0] */ 1063 ring_buffer_write(buffer, event->array[0], &event->array[1]); 1064 /* Release the temp buffer */ 1065 this_cpu_dec(trace_buffered_event_cnt); 1066 /* ring_buffer_unlock_commit() enables preemption */ 1067 preempt_enable_notrace(); 1068 } else 1069 ring_buffer_unlock_commit(buffer); 1070 } 1071 1072 int __trace_array_puts(struct trace_array *tr, unsigned long ip, 1073 const char *str, int size) 1074 { 1075 struct ring_buffer_event *event; 1076 struct trace_buffer *buffer; 1077 struct print_entry *entry; 1078 unsigned int trace_ctx; 1079 int alloc; 1080 1081 if (!(tr->trace_flags & TRACE_ITER_PRINTK)) 1082 return 0; 1083 1084 if (unlikely(tracing_selftest_running && tr == &global_trace)) 1085 return 0; 1086 1087 if (unlikely(tracing_disabled)) 1088 return 0; 1089 1090 alloc = sizeof(*entry) + size + 2; /* possible \n added */ 1091 1092 trace_ctx = tracing_gen_ctx(); 1093 buffer = tr->array_buffer.buffer; 1094 ring_buffer_nest_start(buffer); 1095 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc, 1096 trace_ctx); 1097 if (!event) { 1098 size = 0; 1099 goto out; 1100 } 1101 1102 entry = ring_buffer_event_data(event); 1103 entry->ip = ip; 1104 1105 memcpy(&entry->buf, str, size); 1106 1107 /* Add a newline if necessary */ 1108 if (entry->buf[size - 1] != '\n') { 1109 entry->buf[size] = '\n'; 1110 entry->buf[size + 1] = '\0'; 1111 } else 1112 entry->buf[size] = '\0'; 1113 1114 __buffer_unlock_commit(buffer, event); 1115 ftrace_trace_stack(tr, buffer, trace_ctx, 4, NULL); 1116 out: 1117 ring_buffer_nest_end(buffer); 1118 return size; 1119 } 1120 EXPORT_SYMBOL_GPL(__trace_array_puts); 1121 1122 /** 1123 * __trace_puts - write a constant string into the trace buffer. 1124 * @ip: The address of the caller 1125 * @str: The constant string to write 1126 * @size: The size of the string. 1127 */ 1128 int __trace_puts(unsigned long ip, const char *str, int size) 1129 { 1130 return __trace_array_puts(printk_trace, ip, str, size); 1131 } 1132 EXPORT_SYMBOL_GPL(__trace_puts); 1133 1134 /** 1135 * __trace_bputs - write the pointer to a constant string into trace buffer 1136 * @ip: The address of the caller 1137 * @str: The constant string to write to the buffer to 1138 */ 1139 int __trace_bputs(unsigned long ip, const char *str) 1140 { 1141 struct trace_array *tr = READ_ONCE(printk_trace); 1142 struct ring_buffer_event *event; 1143 struct trace_buffer *buffer; 1144 struct bputs_entry *entry; 1145 unsigned int trace_ctx; 1146 int size = sizeof(struct bputs_entry); 1147 int ret = 0; 1148 1149 if (!printk_binsafe(tr)) 1150 return __trace_puts(ip, str, strlen(str)); 1151 1152 if (!(tr->trace_flags & TRACE_ITER_PRINTK)) 1153 return 0; 1154 1155 if (unlikely(tracing_selftest_running || tracing_disabled)) 1156 return 0; 1157 1158 trace_ctx = tracing_gen_ctx(); 1159 buffer = tr->array_buffer.buffer; 1160 1161 ring_buffer_nest_start(buffer); 1162 event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size, 1163 trace_ctx); 1164 if (!event) 1165 goto out; 1166 1167 entry = ring_buffer_event_data(event); 1168 entry->ip = ip; 1169 entry->str = str; 1170 1171 __buffer_unlock_commit(buffer, event); 1172 ftrace_trace_stack(tr, buffer, trace_ctx, 4, NULL); 1173 1174 ret = 1; 1175 out: 1176 ring_buffer_nest_end(buffer); 1177 return ret; 1178 } 1179 EXPORT_SYMBOL_GPL(__trace_bputs); 1180 1181 #ifdef CONFIG_TRACER_SNAPSHOT 1182 static void tracing_snapshot_instance_cond(struct trace_array *tr, 1183 void *cond_data) 1184 { 1185 struct tracer *tracer = tr->current_trace; 1186 unsigned long flags; 1187 1188 if (in_nmi()) { 1189 trace_array_puts(tr, "*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n"); 1190 trace_array_puts(tr, "*** snapshot is being ignored ***\n"); 1191 return; 1192 } 1193 1194 if (!tr->allocated_snapshot) { 1195 trace_array_puts(tr, "*** SNAPSHOT NOT ALLOCATED ***\n"); 1196 trace_array_puts(tr, "*** stopping trace here! ***\n"); 1197 tracer_tracing_off(tr); 1198 return; 1199 } 1200 1201 /* Note, snapshot can not be used when the tracer uses it */ 1202 if (tracer->use_max_tr) { 1203 trace_array_puts(tr, "*** LATENCY TRACER ACTIVE ***\n"); 1204 trace_array_puts(tr, "*** Can not use snapshot (sorry) ***\n"); 1205 return; 1206 } 1207 1208 if (tr->mapped) { 1209 trace_array_puts(tr, "*** BUFFER MEMORY MAPPED ***\n"); 1210 trace_array_puts(tr, "*** Can not use snapshot (sorry) ***\n"); 1211 return; 1212 } 1213 1214 local_irq_save(flags); 1215 update_max_tr(tr, current, smp_processor_id(), cond_data); 1216 local_irq_restore(flags); 1217 } 1218 1219 void tracing_snapshot_instance(struct trace_array *tr) 1220 { 1221 tracing_snapshot_instance_cond(tr, NULL); 1222 } 1223 1224 /** 1225 * tracing_snapshot - take a snapshot of the current buffer. 1226 * 1227 * This causes a swap between the snapshot buffer and the current live 1228 * tracing buffer. You can use this to take snapshots of the live 1229 * trace when some condition is triggered, but continue to trace. 1230 * 1231 * Note, make sure to allocate the snapshot with either 1232 * a tracing_snapshot_alloc(), or by doing it manually 1233 * with: echo 1 > /sys/kernel/tracing/snapshot 1234 * 1235 * If the snapshot buffer is not allocated, it will stop tracing. 1236 * Basically making a permanent snapshot. 1237 */ 1238 void tracing_snapshot(void) 1239 { 1240 struct trace_array *tr = &global_trace; 1241 1242 tracing_snapshot_instance(tr); 1243 } 1244 EXPORT_SYMBOL_GPL(tracing_snapshot); 1245 1246 /** 1247 * tracing_snapshot_cond - conditionally take a snapshot of the current buffer. 1248 * @tr: The tracing instance to snapshot 1249 * @cond_data: The data to be tested conditionally, and possibly saved 1250 * 1251 * This is the same as tracing_snapshot() except that the snapshot is 1252 * conditional - the snapshot will only happen if the 1253 * cond_snapshot.update() implementation receiving the cond_data 1254 * returns true, which means that the trace array's cond_snapshot 1255 * update() operation used the cond_data to determine whether the 1256 * snapshot should be taken, and if it was, presumably saved it along 1257 * with the snapshot. 1258 */ 1259 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data) 1260 { 1261 tracing_snapshot_instance_cond(tr, cond_data); 1262 } 1263 EXPORT_SYMBOL_GPL(tracing_snapshot_cond); 1264 1265 /** 1266 * tracing_cond_snapshot_data - get the user data associated with a snapshot 1267 * @tr: The tracing instance 1268 * 1269 * When the user enables a conditional snapshot using 1270 * tracing_snapshot_cond_enable(), the user-defined cond_data is saved 1271 * with the snapshot. This accessor is used to retrieve it. 1272 * 1273 * Should not be called from cond_snapshot.update(), since it takes 1274 * the tr->max_lock lock, which the code calling 1275 * cond_snapshot.update() has already done. 1276 * 1277 * Returns the cond_data associated with the trace array's snapshot. 1278 */ 1279 void *tracing_cond_snapshot_data(struct trace_array *tr) 1280 { 1281 void *cond_data = NULL; 1282 1283 local_irq_disable(); 1284 arch_spin_lock(&tr->max_lock); 1285 1286 if (tr->cond_snapshot) 1287 cond_data = tr->cond_snapshot->cond_data; 1288 1289 arch_spin_unlock(&tr->max_lock); 1290 local_irq_enable(); 1291 1292 return cond_data; 1293 } 1294 EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data); 1295 1296 static int resize_buffer_duplicate_size(struct array_buffer *trace_buf, 1297 struct array_buffer *size_buf, int cpu_id); 1298 static void set_buffer_entries(struct array_buffer *buf, unsigned long val); 1299 1300 int tracing_alloc_snapshot_instance(struct trace_array *tr) 1301 { 1302 int order; 1303 int ret; 1304 1305 if (!tr->allocated_snapshot) { 1306 1307 /* Make the snapshot buffer have the same order as main buffer */ 1308 order = ring_buffer_subbuf_order_get(tr->array_buffer.buffer); 1309 ret = ring_buffer_subbuf_order_set(tr->max_buffer.buffer, order); 1310 if (ret < 0) 1311 return ret; 1312 1313 /* allocate spare buffer */ 1314 ret = resize_buffer_duplicate_size(&tr->max_buffer, 1315 &tr->array_buffer, RING_BUFFER_ALL_CPUS); 1316 if (ret < 0) 1317 return ret; 1318 1319 tr->allocated_snapshot = true; 1320 } 1321 1322 return 0; 1323 } 1324 1325 static void free_snapshot(struct trace_array *tr) 1326 { 1327 /* 1328 * We don't free the ring buffer. instead, resize it because 1329 * The max_tr ring buffer has some state (e.g. ring->clock) and 1330 * we want preserve it. 1331 */ 1332 ring_buffer_subbuf_order_set(tr->max_buffer.buffer, 0); 1333 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS); 1334 set_buffer_entries(&tr->max_buffer, 1); 1335 tracing_reset_online_cpus(&tr->max_buffer); 1336 tr->allocated_snapshot = false; 1337 } 1338 1339 static int tracing_arm_snapshot_locked(struct trace_array *tr) 1340 { 1341 int ret; 1342 1343 lockdep_assert_held(&trace_types_lock); 1344 1345 spin_lock(&tr->snapshot_trigger_lock); 1346 if (tr->snapshot == UINT_MAX || tr->mapped) { 1347 spin_unlock(&tr->snapshot_trigger_lock); 1348 return -EBUSY; 1349 } 1350 1351 tr->snapshot++; 1352 spin_unlock(&tr->snapshot_trigger_lock); 1353 1354 ret = tracing_alloc_snapshot_instance(tr); 1355 if (ret) { 1356 spin_lock(&tr->snapshot_trigger_lock); 1357 tr->snapshot--; 1358 spin_unlock(&tr->snapshot_trigger_lock); 1359 } 1360 1361 return ret; 1362 } 1363 1364 int tracing_arm_snapshot(struct trace_array *tr) 1365 { 1366 int ret; 1367 1368 mutex_lock(&trace_types_lock); 1369 ret = tracing_arm_snapshot_locked(tr); 1370 mutex_unlock(&trace_types_lock); 1371 1372 return ret; 1373 } 1374 1375 void tracing_disarm_snapshot(struct trace_array *tr) 1376 { 1377 spin_lock(&tr->snapshot_trigger_lock); 1378 if (!WARN_ON(!tr->snapshot)) 1379 tr->snapshot--; 1380 spin_unlock(&tr->snapshot_trigger_lock); 1381 } 1382 1383 /** 1384 * tracing_alloc_snapshot - allocate snapshot buffer. 1385 * 1386 * This only allocates the snapshot buffer if it isn't already 1387 * allocated - it doesn't also take a snapshot. 1388 * 1389 * This is meant to be used in cases where the snapshot buffer needs 1390 * to be set up for events that can't sleep but need to be able to 1391 * trigger a snapshot. 1392 */ 1393 int tracing_alloc_snapshot(void) 1394 { 1395 struct trace_array *tr = &global_trace; 1396 int ret; 1397 1398 ret = tracing_alloc_snapshot_instance(tr); 1399 WARN_ON(ret < 0); 1400 1401 return ret; 1402 } 1403 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot); 1404 1405 /** 1406 * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer. 1407 * 1408 * This is similar to tracing_snapshot(), but it will allocate the 1409 * snapshot buffer if it isn't already allocated. Use this only 1410 * where it is safe to sleep, as the allocation may sleep. 1411 * 1412 * This causes a swap between the snapshot buffer and the current live 1413 * tracing buffer. You can use this to take snapshots of the live 1414 * trace when some condition is triggered, but continue to trace. 1415 */ 1416 void tracing_snapshot_alloc(void) 1417 { 1418 int ret; 1419 1420 ret = tracing_alloc_snapshot(); 1421 if (ret < 0) 1422 return; 1423 1424 tracing_snapshot(); 1425 } 1426 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc); 1427 1428 /** 1429 * tracing_snapshot_cond_enable - enable conditional snapshot for an instance 1430 * @tr: The tracing instance 1431 * @cond_data: User data to associate with the snapshot 1432 * @update: Implementation of the cond_snapshot update function 1433 * 1434 * Check whether the conditional snapshot for the given instance has 1435 * already been enabled, or if the current tracer is already using a 1436 * snapshot; if so, return -EBUSY, else create a cond_snapshot and 1437 * save the cond_data and update function inside. 1438 * 1439 * Returns 0 if successful, error otherwise. 1440 */ 1441 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, 1442 cond_update_fn_t update) 1443 { 1444 struct cond_snapshot *cond_snapshot __free(kfree) = 1445 kzalloc(sizeof(*cond_snapshot), GFP_KERNEL); 1446 int ret; 1447 1448 if (!cond_snapshot) 1449 return -ENOMEM; 1450 1451 cond_snapshot->cond_data = cond_data; 1452 cond_snapshot->update = update; 1453 1454 guard(mutex)(&trace_types_lock); 1455 1456 if (tr->current_trace->use_max_tr) 1457 return -EBUSY; 1458 1459 /* 1460 * The cond_snapshot can only change to NULL without the 1461 * trace_types_lock. We don't care if we race with it going 1462 * to NULL, but we want to make sure that it's not set to 1463 * something other than NULL when we get here, which we can 1464 * do safely with only holding the trace_types_lock and not 1465 * having to take the max_lock. 1466 */ 1467 if (tr->cond_snapshot) 1468 return -EBUSY; 1469 1470 ret = tracing_arm_snapshot_locked(tr); 1471 if (ret) 1472 return ret; 1473 1474 local_irq_disable(); 1475 arch_spin_lock(&tr->max_lock); 1476 tr->cond_snapshot = no_free_ptr(cond_snapshot); 1477 arch_spin_unlock(&tr->max_lock); 1478 local_irq_enable(); 1479 1480 return 0; 1481 } 1482 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable); 1483 1484 /** 1485 * tracing_snapshot_cond_disable - disable conditional snapshot for an instance 1486 * @tr: The tracing instance 1487 * 1488 * Check whether the conditional snapshot for the given instance is 1489 * enabled; if so, free the cond_snapshot associated with it, 1490 * otherwise return -EINVAL. 1491 * 1492 * Returns 0 if successful, error otherwise. 1493 */ 1494 int tracing_snapshot_cond_disable(struct trace_array *tr) 1495 { 1496 int ret = 0; 1497 1498 local_irq_disable(); 1499 arch_spin_lock(&tr->max_lock); 1500 1501 if (!tr->cond_snapshot) 1502 ret = -EINVAL; 1503 else { 1504 kfree(tr->cond_snapshot); 1505 tr->cond_snapshot = NULL; 1506 } 1507 1508 arch_spin_unlock(&tr->max_lock); 1509 local_irq_enable(); 1510 1511 tracing_disarm_snapshot(tr); 1512 1513 return ret; 1514 } 1515 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable); 1516 #else 1517 void tracing_snapshot(void) 1518 { 1519 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used"); 1520 } 1521 EXPORT_SYMBOL_GPL(tracing_snapshot); 1522 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data) 1523 { 1524 WARN_ONCE(1, "Snapshot feature not enabled, but internal conditional snapshot used"); 1525 } 1526 EXPORT_SYMBOL_GPL(tracing_snapshot_cond); 1527 int tracing_alloc_snapshot(void) 1528 { 1529 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used"); 1530 return -ENODEV; 1531 } 1532 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot); 1533 void tracing_snapshot_alloc(void) 1534 { 1535 /* Give warning */ 1536 tracing_snapshot(); 1537 } 1538 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc); 1539 void *tracing_cond_snapshot_data(struct trace_array *tr) 1540 { 1541 return NULL; 1542 } 1543 EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data); 1544 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update) 1545 { 1546 return -ENODEV; 1547 } 1548 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable); 1549 int tracing_snapshot_cond_disable(struct trace_array *tr) 1550 { 1551 return false; 1552 } 1553 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable); 1554 #define free_snapshot(tr) do { } while (0) 1555 #define tracing_arm_snapshot_locked(tr) ({ -EBUSY; }) 1556 #endif /* CONFIG_TRACER_SNAPSHOT */ 1557 1558 void tracer_tracing_off(struct trace_array *tr) 1559 { 1560 if (tr->array_buffer.buffer) 1561 ring_buffer_record_off(tr->array_buffer.buffer); 1562 /* 1563 * This flag is looked at when buffers haven't been allocated 1564 * yet, or by some tracers (like irqsoff), that just want to 1565 * know if the ring buffer has been disabled, but it can handle 1566 * races of where it gets disabled but we still do a record. 1567 * As the check is in the fast path of the tracers, it is more 1568 * important to be fast than accurate. 1569 */ 1570 tr->buffer_disabled = 1; 1571 /* Make the flag seen by readers */ 1572 smp_wmb(); 1573 } 1574 1575 /** 1576 * tracing_off - turn off tracing buffers 1577 * 1578 * This function stops the tracing buffers from recording data. 1579 * It does not disable any overhead the tracers themselves may 1580 * be causing. This function simply causes all recording to 1581 * the ring buffers to fail. 1582 */ 1583 void tracing_off(void) 1584 { 1585 tracer_tracing_off(&global_trace); 1586 } 1587 EXPORT_SYMBOL_GPL(tracing_off); 1588 1589 void disable_trace_on_warning(void) 1590 { 1591 if (__disable_trace_on_warning) { 1592 trace_array_printk_buf(global_trace.array_buffer.buffer, _THIS_IP_, 1593 "Disabling tracing due to warning\n"); 1594 tracing_off(); 1595 } 1596 } 1597 1598 /** 1599 * tracer_tracing_is_on - show real state of ring buffer enabled 1600 * @tr : the trace array to know if ring buffer is enabled 1601 * 1602 * Shows real state of the ring buffer if it is enabled or not. 1603 */ 1604 bool tracer_tracing_is_on(struct trace_array *tr) 1605 { 1606 if (tr->array_buffer.buffer) 1607 return ring_buffer_record_is_set_on(tr->array_buffer.buffer); 1608 return !tr->buffer_disabled; 1609 } 1610 1611 /** 1612 * tracing_is_on - show state of ring buffers enabled 1613 */ 1614 int tracing_is_on(void) 1615 { 1616 return tracer_tracing_is_on(&global_trace); 1617 } 1618 EXPORT_SYMBOL_GPL(tracing_is_on); 1619 1620 static int __init set_buf_size(char *str) 1621 { 1622 unsigned long buf_size; 1623 1624 if (!str) 1625 return 0; 1626 buf_size = memparse(str, &str); 1627 /* 1628 * nr_entries can not be zero and the startup 1629 * tests require some buffer space. Therefore 1630 * ensure we have at least 4096 bytes of buffer. 1631 */ 1632 trace_buf_size = max(4096UL, buf_size); 1633 return 1; 1634 } 1635 __setup("trace_buf_size=", set_buf_size); 1636 1637 static int __init set_tracing_thresh(char *str) 1638 { 1639 unsigned long threshold; 1640 int ret; 1641 1642 if (!str) 1643 return 0; 1644 ret = kstrtoul(str, 0, &threshold); 1645 if (ret < 0) 1646 return 0; 1647 tracing_thresh = threshold * 1000; 1648 return 1; 1649 } 1650 __setup("tracing_thresh=", set_tracing_thresh); 1651 1652 unsigned long nsecs_to_usecs(unsigned long nsecs) 1653 { 1654 return nsecs / 1000; 1655 } 1656 1657 /* 1658 * TRACE_FLAGS is defined as a tuple matching bit masks with strings. 1659 * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that 1660 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list 1661 * of strings in the order that the evals (enum) were defined. 1662 */ 1663 #undef C 1664 #define C(a, b) b 1665 1666 /* These must match the bit positions in trace_iterator_flags */ 1667 static const char *trace_options[] = { 1668 TRACE_FLAGS 1669 NULL 1670 }; 1671 1672 static struct { 1673 u64 (*func)(void); 1674 const char *name; 1675 int in_ns; /* is this clock in nanoseconds? */ 1676 } trace_clocks[] = { 1677 { trace_clock_local, "local", 1 }, 1678 { trace_clock_global, "global", 1 }, 1679 { trace_clock_counter, "counter", 0 }, 1680 { trace_clock_jiffies, "uptime", 0 }, 1681 { trace_clock, "perf", 1 }, 1682 { ktime_get_mono_fast_ns, "mono", 1 }, 1683 { ktime_get_raw_fast_ns, "mono_raw", 1 }, 1684 { ktime_get_boot_fast_ns, "boot", 1 }, 1685 { ktime_get_tai_fast_ns, "tai", 1 }, 1686 ARCH_TRACE_CLOCKS 1687 }; 1688 1689 bool trace_clock_in_ns(struct trace_array *tr) 1690 { 1691 if (trace_clocks[tr->clock_id].in_ns) 1692 return true; 1693 1694 return false; 1695 } 1696 1697 /* 1698 * trace_parser_get_init - gets the buffer for trace parser 1699 */ 1700 int trace_parser_get_init(struct trace_parser *parser, int size) 1701 { 1702 memset(parser, 0, sizeof(*parser)); 1703 1704 parser->buffer = kmalloc(size, GFP_KERNEL); 1705 if (!parser->buffer) 1706 return 1; 1707 1708 parser->size = size; 1709 return 0; 1710 } 1711 1712 /* 1713 * trace_parser_put - frees the buffer for trace parser 1714 */ 1715 void trace_parser_put(struct trace_parser *parser) 1716 { 1717 kfree(parser->buffer); 1718 parser->buffer = NULL; 1719 } 1720 1721 /* 1722 * trace_get_user - reads the user input string separated by space 1723 * (matched by isspace(ch)) 1724 * 1725 * For each string found the 'struct trace_parser' is updated, 1726 * and the function returns. 1727 * 1728 * Returns number of bytes read. 1729 * 1730 * See kernel/trace/trace.h for 'struct trace_parser' details. 1731 */ 1732 int trace_get_user(struct trace_parser *parser, const char __user *ubuf, 1733 size_t cnt, loff_t *ppos) 1734 { 1735 char ch; 1736 size_t read = 0; 1737 ssize_t ret; 1738 1739 if (!*ppos) 1740 trace_parser_clear(parser); 1741 1742 ret = get_user(ch, ubuf++); 1743 if (ret) 1744 goto out; 1745 1746 read++; 1747 cnt--; 1748 1749 /* 1750 * The parser is not finished with the last write, 1751 * continue reading the user input without skipping spaces. 1752 */ 1753 if (!parser->cont) { 1754 /* skip white space */ 1755 while (cnt && isspace(ch)) { 1756 ret = get_user(ch, ubuf++); 1757 if (ret) 1758 goto out; 1759 read++; 1760 cnt--; 1761 } 1762 1763 parser->idx = 0; 1764 1765 /* only spaces were written */ 1766 if (isspace(ch) || !ch) { 1767 *ppos += read; 1768 ret = read; 1769 goto out; 1770 } 1771 } 1772 1773 /* read the non-space input */ 1774 while (cnt && !isspace(ch) && ch) { 1775 if (parser->idx < parser->size - 1) 1776 parser->buffer[parser->idx++] = ch; 1777 else { 1778 ret = -EINVAL; 1779 goto out; 1780 } 1781 ret = get_user(ch, ubuf++); 1782 if (ret) 1783 goto out; 1784 read++; 1785 cnt--; 1786 } 1787 1788 /* We either got finished input or we have to wait for another call. */ 1789 if (isspace(ch) || !ch) { 1790 parser->buffer[parser->idx] = 0; 1791 parser->cont = false; 1792 } else if (parser->idx < parser->size - 1) { 1793 parser->cont = true; 1794 parser->buffer[parser->idx++] = ch; 1795 /* Make sure the parsed string always terminates with '\0'. */ 1796 parser->buffer[parser->idx] = 0; 1797 } else { 1798 ret = -EINVAL; 1799 goto out; 1800 } 1801 1802 *ppos += read; 1803 ret = read; 1804 1805 out: 1806 return ret; 1807 } 1808 1809 /* TODO add a seq_buf_to_buffer() */ 1810 static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt) 1811 { 1812 int len; 1813 1814 if (trace_seq_used(s) <= s->readpos) 1815 return -EBUSY; 1816 1817 len = trace_seq_used(s) - s->readpos; 1818 if (cnt > len) 1819 cnt = len; 1820 memcpy(buf, s->buffer + s->readpos, cnt); 1821 1822 s->readpos += cnt; 1823 return cnt; 1824 } 1825 1826 unsigned long __read_mostly tracing_thresh; 1827 1828 #ifdef CONFIG_TRACER_MAX_TRACE 1829 static const struct file_operations tracing_max_lat_fops; 1830 1831 #ifdef LATENCY_FS_NOTIFY 1832 1833 static struct workqueue_struct *fsnotify_wq; 1834 1835 static void latency_fsnotify_workfn(struct work_struct *work) 1836 { 1837 struct trace_array *tr = container_of(work, struct trace_array, 1838 fsnotify_work); 1839 fsnotify_inode(tr->d_max_latency->d_inode, FS_MODIFY); 1840 } 1841 1842 static void latency_fsnotify_workfn_irq(struct irq_work *iwork) 1843 { 1844 struct trace_array *tr = container_of(iwork, struct trace_array, 1845 fsnotify_irqwork); 1846 queue_work(fsnotify_wq, &tr->fsnotify_work); 1847 } 1848 1849 static void trace_create_maxlat_file(struct trace_array *tr, 1850 struct dentry *d_tracer) 1851 { 1852 INIT_WORK(&tr->fsnotify_work, latency_fsnotify_workfn); 1853 init_irq_work(&tr->fsnotify_irqwork, latency_fsnotify_workfn_irq); 1854 tr->d_max_latency = trace_create_file("tracing_max_latency", 1855 TRACE_MODE_WRITE, 1856 d_tracer, tr, 1857 &tracing_max_lat_fops); 1858 } 1859 1860 __init static int latency_fsnotify_init(void) 1861 { 1862 fsnotify_wq = alloc_workqueue("tr_max_lat_wq", 1863 WQ_UNBOUND | WQ_HIGHPRI, 0); 1864 if (!fsnotify_wq) { 1865 pr_err("Unable to allocate tr_max_lat_wq\n"); 1866 return -ENOMEM; 1867 } 1868 return 0; 1869 } 1870 1871 late_initcall_sync(latency_fsnotify_init); 1872 1873 void latency_fsnotify(struct trace_array *tr) 1874 { 1875 if (!fsnotify_wq) 1876 return; 1877 /* 1878 * We cannot call queue_work(&tr->fsnotify_work) from here because it's 1879 * possible that we are called from __schedule() or do_idle(), which 1880 * could cause a deadlock. 1881 */ 1882 irq_work_queue(&tr->fsnotify_irqwork); 1883 } 1884 1885 #else /* !LATENCY_FS_NOTIFY */ 1886 1887 #define trace_create_maxlat_file(tr, d_tracer) \ 1888 trace_create_file("tracing_max_latency", TRACE_MODE_WRITE, \ 1889 d_tracer, tr, &tracing_max_lat_fops) 1890 1891 #endif 1892 1893 /* 1894 * Copy the new maximum trace into the separate maximum-trace 1895 * structure. (this way the maximum trace is permanently saved, 1896 * for later retrieval via /sys/kernel/tracing/tracing_max_latency) 1897 */ 1898 static void 1899 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) 1900 { 1901 struct array_buffer *trace_buf = &tr->array_buffer; 1902 struct array_buffer *max_buf = &tr->max_buffer; 1903 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu); 1904 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu); 1905 1906 max_buf->cpu = cpu; 1907 max_buf->time_start = data->preempt_timestamp; 1908 1909 max_data->saved_latency = tr->max_latency; 1910 max_data->critical_start = data->critical_start; 1911 max_data->critical_end = data->critical_end; 1912 1913 strscpy(max_data->comm, tsk->comm); 1914 max_data->pid = tsk->pid; 1915 /* 1916 * If tsk == current, then use current_uid(), as that does not use 1917 * RCU. The irq tracer can be called out of RCU scope. 1918 */ 1919 if (tsk == current) 1920 max_data->uid = current_uid(); 1921 else 1922 max_data->uid = task_uid(tsk); 1923 1924 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO; 1925 max_data->policy = tsk->policy; 1926 max_data->rt_priority = tsk->rt_priority; 1927 1928 /* record this tasks comm */ 1929 tracing_record_cmdline(tsk); 1930 latency_fsnotify(tr); 1931 } 1932 1933 /** 1934 * update_max_tr - snapshot all trace buffers from global_trace to max_tr 1935 * @tr: tracer 1936 * @tsk: the task with the latency 1937 * @cpu: The cpu that initiated the trace. 1938 * @cond_data: User data associated with a conditional snapshot 1939 * 1940 * Flip the buffers between the @tr and the max_tr and record information 1941 * about which task was the cause of this latency. 1942 */ 1943 void 1944 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu, 1945 void *cond_data) 1946 { 1947 if (tr->stop_count) 1948 return; 1949 1950 WARN_ON_ONCE(!irqs_disabled()); 1951 1952 if (!tr->allocated_snapshot) { 1953 /* Only the nop tracer should hit this when disabling */ 1954 WARN_ON_ONCE(tr->current_trace != &nop_trace); 1955 return; 1956 } 1957 1958 arch_spin_lock(&tr->max_lock); 1959 1960 /* Inherit the recordable setting from array_buffer */ 1961 if (ring_buffer_record_is_set_on(tr->array_buffer.buffer)) 1962 ring_buffer_record_on(tr->max_buffer.buffer); 1963 else 1964 ring_buffer_record_off(tr->max_buffer.buffer); 1965 1966 #ifdef CONFIG_TRACER_SNAPSHOT 1967 if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data)) { 1968 arch_spin_unlock(&tr->max_lock); 1969 return; 1970 } 1971 #endif 1972 swap(tr->array_buffer.buffer, tr->max_buffer.buffer); 1973 1974 __update_max_tr(tr, tsk, cpu); 1975 1976 arch_spin_unlock(&tr->max_lock); 1977 1978 /* Any waiters on the old snapshot buffer need to wake up */ 1979 ring_buffer_wake_waiters(tr->array_buffer.buffer, RING_BUFFER_ALL_CPUS); 1980 } 1981 1982 /** 1983 * update_max_tr_single - only copy one trace over, and reset the rest 1984 * @tr: tracer 1985 * @tsk: task with the latency 1986 * @cpu: the cpu of the buffer to copy. 1987 * 1988 * Flip the trace of a single CPU buffer between the @tr and the max_tr. 1989 */ 1990 void 1991 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) 1992 { 1993 int ret; 1994 1995 if (tr->stop_count) 1996 return; 1997 1998 WARN_ON_ONCE(!irqs_disabled()); 1999 if (!tr->allocated_snapshot) { 2000 /* Only the nop tracer should hit this when disabling */ 2001 WARN_ON_ONCE(tr->current_trace != &nop_trace); 2002 return; 2003 } 2004 2005 arch_spin_lock(&tr->max_lock); 2006 2007 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->array_buffer.buffer, cpu); 2008 2009 if (ret == -EBUSY) { 2010 /* 2011 * We failed to swap the buffer due to a commit taking 2012 * place on this CPU. We fail to record, but we reset 2013 * the max trace buffer (no one writes directly to it) 2014 * and flag that it failed. 2015 * Another reason is resize is in progress. 2016 */ 2017 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_, 2018 "Failed to swap buffers due to commit or resize in progress\n"); 2019 } 2020 2021 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY); 2022 2023 __update_max_tr(tr, tsk, cpu); 2024 arch_spin_unlock(&tr->max_lock); 2025 } 2026 2027 #endif /* CONFIG_TRACER_MAX_TRACE */ 2028 2029 struct pipe_wait { 2030 struct trace_iterator *iter; 2031 int wait_index; 2032 }; 2033 2034 static bool wait_pipe_cond(void *data) 2035 { 2036 struct pipe_wait *pwait = data; 2037 struct trace_iterator *iter = pwait->iter; 2038 2039 if (atomic_read_acquire(&iter->wait_index) != pwait->wait_index) 2040 return true; 2041 2042 return iter->closed; 2043 } 2044 2045 static int wait_on_pipe(struct trace_iterator *iter, int full) 2046 { 2047 struct pipe_wait pwait; 2048 int ret; 2049 2050 /* Iterators are static, they should be filled or empty */ 2051 if (trace_buffer_iter(iter, iter->cpu_file)) 2052 return 0; 2053 2054 pwait.wait_index = atomic_read_acquire(&iter->wait_index); 2055 pwait.iter = iter; 2056 2057 ret = ring_buffer_wait(iter->array_buffer->buffer, iter->cpu_file, full, 2058 wait_pipe_cond, &pwait); 2059 2060 #ifdef CONFIG_TRACER_MAX_TRACE 2061 /* 2062 * Make sure this is still the snapshot buffer, as if a snapshot were 2063 * to happen, this would now be the main buffer. 2064 */ 2065 if (iter->snapshot) 2066 iter->array_buffer = &iter->tr->max_buffer; 2067 #endif 2068 return ret; 2069 } 2070 2071 #ifdef CONFIG_FTRACE_STARTUP_TEST 2072 static bool selftests_can_run; 2073 2074 struct trace_selftests { 2075 struct list_head list; 2076 struct tracer *type; 2077 }; 2078 2079 static LIST_HEAD(postponed_selftests); 2080 2081 static int save_selftest(struct tracer *type) 2082 { 2083 struct trace_selftests *selftest; 2084 2085 selftest = kmalloc(sizeof(*selftest), GFP_KERNEL); 2086 if (!selftest) 2087 return -ENOMEM; 2088 2089 selftest->type = type; 2090 list_add(&selftest->list, &postponed_selftests); 2091 return 0; 2092 } 2093 2094 static int run_tracer_selftest(struct tracer *type) 2095 { 2096 struct trace_array *tr = &global_trace; 2097 struct tracer *saved_tracer = tr->current_trace; 2098 int ret; 2099 2100 if (!type->selftest || tracing_selftest_disabled) 2101 return 0; 2102 2103 /* 2104 * If a tracer registers early in boot up (before scheduling is 2105 * initialized and such), then do not run its selftests yet. 2106 * Instead, run it a little later in the boot process. 2107 */ 2108 if (!selftests_can_run) 2109 return save_selftest(type); 2110 2111 if (!tracing_is_on()) { 2112 pr_warn("Selftest for tracer %s skipped due to tracing disabled\n", 2113 type->name); 2114 return 0; 2115 } 2116 2117 /* 2118 * Run a selftest on this tracer. 2119 * Here we reset the trace buffer, and set the current 2120 * tracer to be this tracer. The tracer can then run some 2121 * internal tracing to verify that everything is in order. 2122 * If we fail, we do not register this tracer. 2123 */ 2124 tracing_reset_online_cpus(&tr->array_buffer); 2125 2126 tr->current_trace = type; 2127 2128 #ifdef CONFIG_TRACER_MAX_TRACE 2129 if (type->use_max_tr) { 2130 /* If we expanded the buffers, make sure the max is expanded too */ 2131 if (tr->ring_buffer_expanded) 2132 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size, 2133 RING_BUFFER_ALL_CPUS); 2134 tr->allocated_snapshot = true; 2135 } 2136 #endif 2137 2138 /* the test is responsible for initializing and enabling */ 2139 pr_info("Testing tracer %s: ", type->name); 2140 ret = type->selftest(type, tr); 2141 /* the test is responsible for resetting too */ 2142 tr->current_trace = saved_tracer; 2143 if (ret) { 2144 printk(KERN_CONT "FAILED!\n"); 2145 /* Add the warning after printing 'FAILED' */ 2146 WARN_ON(1); 2147 return -1; 2148 } 2149 /* Only reset on passing, to avoid touching corrupted buffers */ 2150 tracing_reset_online_cpus(&tr->array_buffer); 2151 2152 #ifdef CONFIG_TRACER_MAX_TRACE 2153 if (type->use_max_tr) { 2154 tr->allocated_snapshot = false; 2155 2156 /* Shrink the max buffer again */ 2157 if (tr->ring_buffer_expanded) 2158 ring_buffer_resize(tr->max_buffer.buffer, 1, 2159 RING_BUFFER_ALL_CPUS); 2160 } 2161 #endif 2162 2163 printk(KERN_CONT "PASSED\n"); 2164 return 0; 2165 } 2166 2167 static int do_run_tracer_selftest(struct tracer *type) 2168 { 2169 int ret; 2170 2171 /* 2172 * Tests can take a long time, especially if they are run one after the 2173 * other, as does happen during bootup when all the tracers are 2174 * registered. This could cause the soft lockup watchdog to trigger. 2175 */ 2176 cond_resched(); 2177 2178 tracing_selftest_running = true; 2179 ret = run_tracer_selftest(type); 2180 tracing_selftest_running = false; 2181 2182 return ret; 2183 } 2184 2185 static __init int init_trace_selftests(void) 2186 { 2187 struct trace_selftests *p, *n; 2188 struct tracer *t, **last; 2189 int ret; 2190 2191 selftests_can_run = true; 2192 2193 guard(mutex)(&trace_types_lock); 2194 2195 if (list_empty(&postponed_selftests)) 2196 return 0; 2197 2198 pr_info("Running postponed tracer tests:\n"); 2199 2200 tracing_selftest_running = true; 2201 list_for_each_entry_safe(p, n, &postponed_selftests, list) { 2202 /* This loop can take minutes when sanitizers are enabled, so 2203 * lets make sure we allow RCU processing. 2204 */ 2205 cond_resched(); 2206 ret = run_tracer_selftest(p->type); 2207 /* If the test fails, then warn and remove from available_tracers */ 2208 if (ret < 0) { 2209 WARN(1, "tracer: %s failed selftest, disabling\n", 2210 p->type->name); 2211 last = &trace_types; 2212 for (t = trace_types; t; t = t->next) { 2213 if (t == p->type) { 2214 *last = t->next; 2215 break; 2216 } 2217 last = &t->next; 2218 } 2219 } 2220 list_del(&p->list); 2221 kfree(p); 2222 } 2223 tracing_selftest_running = false; 2224 2225 return 0; 2226 } 2227 core_initcall(init_trace_selftests); 2228 #else 2229 static inline int do_run_tracer_selftest(struct tracer *type) 2230 { 2231 return 0; 2232 } 2233 #endif /* CONFIG_FTRACE_STARTUP_TEST */ 2234 2235 static void add_tracer_options(struct trace_array *tr, struct tracer *t); 2236 2237 static void __init apply_trace_boot_options(void); 2238 2239 /** 2240 * register_tracer - register a tracer with the ftrace system. 2241 * @type: the plugin for the tracer 2242 * 2243 * Register a new plugin tracer. 2244 */ 2245 int __init register_tracer(struct tracer *type) 2246 { 2247 struct tracer *t; 2248 int ret = 0; 2249 2250 if (!type->name) { 2251 pr_info("Tracer must have a name\n"); 2252 return -1; 2253 } 2254 2255 if (strlen(type->name) >= MAX_TRACER_SIZE) { 2256 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE); 2257 return -1; 2258 } 2259 2260 if (security_locked_down(LOCKDOWN_TRACEFS)) { 2261 pr_warn("Can not register tracer %s due to lockdown\n", 2262 type->name); 2263 return -EPERM; 2264 } 2265 2266 mutex_lock(&trace_types_lock); 2267 2268 for (t = trace_types; t; t = t->next) { 2269 if (strcmp(type->name, t->name) == 0) { 2270 /* already found */ 2271 pr_info("Tracer %s already registered\n", 2272 type->name); 2273 ret = -1; 2274 goto out; 2275 } 2276 } 2277 2278 if (!type->set_flag) 2279 type->set_flag = &dummy_set_flag; 2280 if (!type->flags) { 2281 /*allocate a dummy tracer_flags*/ 2282 type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL); 2283 if (!type->flags) { 2284 ret = -ENOMEM; 2285 goto out; 2286 } 2287 type->flags->val = 0; 2288 type->flags->opts = dummy_tracer_opt; 2289 } else 2290 if (!type->flags->opts) 2291 type->flags->opts = dummy_tracer_opt; 2292 2293 /* store the tracer for __set_tracer_option */ 2294 type->flags->trace = type; 2295 2296 ret = do_run_tracer_selftest(type); 2297 if (ret < 0) 2298 goto out; 2299 2300 type->next = trace_types; 2301 trace_types = type; 2302 add_tracer_options(&global_trace, type); 2303 2304 out: 2305 mutex_unlock(&trace_types_lock); 2306 2307 if (ret || !default_bootup_tracer) 2308 goto out_unlock; 2309 2310 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE)) 2311 goto out_unlock; 2312 2313 printk(KERN_INFO "Starting tracer '%s'\n", type->name); 2314 /* Do we want this tracer to start on bootup? */ 2315 tracing_set_tracer(&global_trace, type->name); 2316 default_bootup_tracer = NULL; 2317 2318 apply_trace_boot_options(); 2319 2320 /* disable other selftests, since this will break it. */ 2321 disable_tracing_selftest("running a tracer"); 2322 2323 out_unlock: 2324 return ret; 2325 } 2326 2327 static void tracing_reset_cpu(struct array_buffer *buf, int cpu) 2328 { 2329 struct trace_buffer *buffer = buf->buffer; 2330 2331 if (!buffer) 2332 return; 2333 2334 ring_buffer_record_disable(buffer); 2335 2336 /* Make sure all commits have finished */ 2337 synchronize_rcu(); 2338 ring_buffer_reset_cpu(buffer, cpu); 2339 2340 ring_buffer_record_enable(buffer); 2341 } 2342 2343 void tracing_reset_online_cpus(struct array_buffer *buf) 2344 { 2345 struct trace_buffer *buffer = buf->buffer; 2346 2347 if (!buffer) 2348 return; 2349 2350 ring_buffer_record_disable(buffer); 2351 2352 /* Make sure all commits have finished */ 2353 synchronize_rcu(); 2354 2355 buf->time_start = buffer_ftrace_now(buf, buf->cpu); 2356 2357 ring_buffer_reset_online_cpus(buffer); 2358 2359 ring_buffer_record_enable(buffer); 2360 } 2361 2362 static void tracing_reset_all_cpus(struct array_buffer *buf) 2363 { 2364 struct trace_buffer *buffer = buf->buffer; 2365 2366 if (!buffer) 2367 return; 2368 2369 ring_buffer_record_disable(buffer); 2370 2371 /* Make sure all commits have finished */ 2372 synchronize_rcu(); 2373 2374 buf->time_start = buffer_ftrace_now(buf, buf->cpu); 2375 2376 ring_buffer_reset(buffer); 2377 2378 ring_buffer_record_enable(buffer); 2379 } 2380 2381 /* Must have trace_types_lock held */ 2382 void tracing_reset_all_online_cpus_unlocked(void) 2383 { 2384 struct trace_array *tr; 2385 2386 lockdep_assert_held(&trace_types_lock); 2387 2388 list_for_each_entry(tr, &ftrace_trace_arrays, list) { 2389 if (!tr->clear_trace) 2390 continue; 2391 tr->clear_trace = false; 2392 tracing_reset_online_cpus(&tr->array_buffer); 2393 #ifdef CONFIG_TRACER_MAX_TRACE 2394 tracing_reset_online_cpus(&tr->max_buffer); 2395 #endif 2396 } 2397 } 2398 2399 void tracing_reset_all_online_cpus(void) 2400 { 2401 mutex_lock(&trace_types_lock); 2402 tracing_reset_all_online_cpus_unlocked(); 2403 mutex_unlock(&trace_types_lock); 2404 } 2405 2406 int is_tracing_stopped(void) 2407 { 2408 return global_trace.stop_count; 2409 } 2410 2411 static void tracing_start_tr(struct trace_array *tr) 2412 { 2413 struct trace_buffer *buffer; 2414 unsigned long flags; 2415 2416 if (tracing_disabled) 2417 return; 2418 2419 raw_spin_lock_irqsave(&tr->start_lock, flags); 2420 if (--tr->stop_count) { 2421 if (WARN_ON_ONCE(tr->stop_count < 0)) { 2422 /* Someone screwed up their debugging */ 2423 tr->stop_count = 0; 2424 } 2425 goto out; 2426 } 2427 2428 /* Prevent the buffers from switching */ 2429 arch_spin_lock(&tr->max_lock); 2430 2431 buffer = tr->array_buffer.buffer; 2432 if (buffer) 2433 ring_buffer_record_enable(buffer); 2434 2435 #ifdef CONFIG_TRACER_MAX_TRACE 2436 buffer = tr->max_buffer.buffer; 2437 if (buffer) 2438 ring_buffer_record_enable(buffer); 2439 #endif 2440 2441 arch_spin_unlock(&tr->max_lock); 2442 2443 out: 2444 raw_spin_unlock_irqrestore(&tr->start_lock, flags); 2445 } 2446 2447 /** 2448 * tracing_start - quick start of the tracer 2449 * 2450 * If tracing is enabled but was stopped by tracing_stop, 2451 * this will start the tracer back up. 2452 */ 2453 void tracing_start(void) 2454 2455 { 2456 return tracing_start_tr(&global_trace); 2457 } 2458 2459 static void tracing_stop_tr(struct trace_array *tr) 2460 { 2461 struct trace_buffer *buffer; 2462 unsigned long flags; 2463 2464 raw_spin_lock_irqsave(&tr->start_lock, flags); 2465 if (tr->stop_count++) 2466 goto out; 2467 2468 /* Prevent the buffers from switching */ 2469 arch_spin_lock(&tr->max_lock); 2470 2471 buffer = tr->array_buffer.buffer; 2472 if (buffer) 2473 ring_buffer_record_disable(buffer); 2474 2475 #ifdef CONFIG_TRACER_MAX_TRACE 2476 buffer = tr->max_buffer.buffer; 2477 if (buffer) 2478 ring_buffer_record_disable(buffer); 2479 #endif 2480 2481 arch_spin_unlock(&tr->max_lock); 2482 2483 out: 2484 raw_spin_unlock_irqrestore(&tr->start_lock, flags); 2485 } 2486 2487 /** 2488 * tracing_stop - quick stop of the tracer 2489 * 2490 * Light weight way to stop tracing. Use in conjunction with 2491 * tracing_start. 2492 */ 2493 void tracing_stop(void) 2494 { 2495 return tracing_stop_tr(&global_trace); 2496 } 2497 2498 /* 2499 * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq 2500 * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function 2501 * simplifies those functions and keeps them in sync. 2502 */ 2503 enum print_line_t trace_handle_return(struct trace_seq *s) 2504 { 2505 return trace_seq_has_overflowed(s) ? 2506 TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED; 2507 } 2508 EXPORT_SYMBOL_GPL(trace_handle_return); 2509 2510 static unsigned short migration_disable_value(void) 2511 { 2512 #if defined(CONFIG_SMP) 2513 return current->migration_disabled; 2514 #else 2515 return 0; 2516 #endif 2517 } 2518 2519 unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status) 2520 { 2521 unsigned int trace_flags = irqs_status; 2522 unsigned int pc; 2523 2524 pc = preempt_count(); 2525 2526 if (pc & NMI_MASK) 2527 trace_flags |= TRACE_FLAG_NMI; 2528 if (pc & HARDIRQ_MASK) 2529 trace_flags |= TRACE_FLAG_HARDIRQ; 2530 if (in_serving_softirq()) 2531 trace_flags |= TRACE_FLAG_SOFTIRQ; 2532 if (softirq_count() >> (SOFTIRQ_SHIFT + 1)) 2533 trace_flags |= TRACE_FLAG_BH_OFF; 2534 2535 if (tif_need_resched()) 2536 trace_flags |= TRACE_FLAG_NEED_RESCHED; 2537 if (test_preempt_need_resched()) 2538 trace_flags |= TRACE_FLAG_PREEMPT_RESCHED; 2539 if (IS_ENABLED(CONFIG_ARCH_HAS_PREEMPT_LAZY) && tif_test_bit(TIF_NEED_RESCHED_LAZY)) 2540 trace_flags |= TRACE_FLAG_NEED_RESCHED_LAZY; 2541 return (trace_flags << 16) | (min_t(unsigned int, pc & 0xff, 0xf)) | 2542 (min_t(unsigned int, migration_disable_value(), 0xf)) << 4; 2543 } 2544 2545 struct ring_buffer_event * 2546 trace_buffer_lock_reserve(struct trace_buffer *buffer, 2547 int type, 2548 unsigned long len, 2549 unsigned int trace_ctx) 2550 { 2551 return __trace_buffer_lock_reserve(buffer, type, len, trace_ctx); 2552 } 2553 2554 DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event); 2555 DEFINE_PER_CPU(int, trace_buffered_event_cnt); 2556 static int trace_buffered_event_ref; 2557 2558 /** 2559 * trace_buffered_event_enable - enable buffering events 2560 * 2561 * When events are being filtered, it is quicker to use a temporary 2562 * buffer to write the event data into if there's a likely chance 2563 * that it will not be committed. The discard of the ring buffer 2564 * is not as fast as committing, and is much slower than copying 2565 * a commit. 2566 * 2567 * When an event is to be filtered, allocate per cpu buffers to 2568 * write the event data into, and if the event is filtered and discarded 2569 * it is simply dropped, otherwise, the entire data is to be committed 2570 * in one shot. 2571 */ 2572 void trace_buffered_event_enable(void) 2573 { 2574 struct ring_buffer_event *event; 2575 struct page *page; 2576 int cpu; 2577 2578 WARN_ON_ONCE(!mutex_is_locked(&event_mutex)); 2579 2580 if (trace_buffered_event_ref++) 2581 return; 2582 2583 for_each_tracing_cpu(cpu) { 2584 page = alloc_pages_node(cpu_to_node(cpu), 2585 GFP_KERNEL | __GFP_NORETRY, 0); 2586 /* This is just an optimization and can handle failures */ 2587 if (!page) { 2588 pr_err("Failed to allocate event buffer\n"); 2589 break; 2590 } 2591 2592 event = page_address(page); 2593 memset(event, 0, sizeof(*event)); 2594 2595 per_cpu(trace_buffered_event, cpu) = event; 2596 2597 preempt_disable(); 2598 if (cpu == smp_processor_id() && 2599 __this_cpu_read(trace_buffered_event) != 2600 per_cpu(trace_buffered_event, cpu)) 2601 WARN_ON_ONCE(1); 2602 preempt_enable(); 2603 } 2604 } 2605 2606 static void enable_trace_buffered_event(void *data) 2607 { 2608 /* Probably not needed, but do it anyway */ 2609 smp_rmb(); 2610 this_cpu_dec(trace_buffered_event_cnt); 2611 } 2612 2613 static void disable_trace_buffered_event(void *data) 2614 { 2615 this_cpu_inc(trace_buffered_event_cnt); 2616 } 2617 2618 /** 2619 * trace_buffered_event_disable - disable buffering events 2620 * 2621 * When a filter is removed, it is faster to not use the buffered 2622 * events, and to commit directly into the ring buffer. Free up 2623 * the temp buffers when there are no more users. This requires 2624 * special synchronization with current events. 2625 */ 2626 void trace_buffered_event_disable(void) 2627 { 2628 int cpu; 2629 2630 WARN_ON_ONCE(!mutex_is_locked(&event_mutex)); 2631 2632 if (WARN_ON_ONCE(!trace_buffered_event_ref)) 2633 return; 2634 2635 if (--trace_buffered_event_ref) 2636 return; 2637 2638 /* For each CPU, set the buffer as used. */ 2639 on_each_cpu_mask(tracing_buffer_mask, disable_trace_buffered_event, 2640 NULL, true); 2641 2642 /* Wait for all current users to finish */ 2643 synchronize_rcu(); 2644 2645 for_each_tracing_cpu(cpu) { 2646 free_page((unsigned long)per_cpu(trace_buffered_event, cpu)); 2647 per_cpu(trace_buffered_event, cpu) = NULL; 2648 } 2649 2650 /* 2651 * Wait for all CPUs that potentially started checking if they can use 2652 * their event buffer only after the previous synchronize_rcu() call and 2653 * they still read a valid pointer from trace_buffered_event. It must be 2654 * ensured they don't see cleared trace_buffered_event_cnt else they 2655 * could wrongly decide to use the pointed-to buffer which is now freed. 2656 */ 2657 synchronize_rcu(); 2658 2659 /* For each CPU, relinquish the buffer */ 2660 on_each_cpu_mask(tracing_buffer_mask, enable_trace_buffered_event, NULL, 2661 true); 2662 } 2663 2664 static struct trace_buffer *temp_buffer; 2665 2666 struct ring_buffer_event * 2667 trace_event_buffer_lock_reserve(struct trace_buffer **current_rb, 2668 struct trace_event_file *trace_file, 2669 int type, unsigned long len, 2670 unsigned int trace_ctx) 2671 { 2672 struct ring_buffer_event *entry; 2673 struct trace_array *tr = trace_file->tr; 2674 int val; 2675 2676 *current_rb = tr->array_buffer.buffer; 2677 2678 if (!tr->no_filter_buffering_ref && 2679 (trace_file->flags & (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED))) { 2680 preempt_disable_notrace(); 2681 /* 2682 * Filtering is on, so try to use the per cpu buffer first. 2683 * This buffer will simulate a ring_buffer_event, 2684 * where the type_len is zero and the array[0] will 2685 * hold the full length. 2686 * (see include/linux/ring-buffer.h for details on 2687 * how the ring_buffer_event is structured). 2688 * 2689 * Using a temp buffer during filtering and copying it 2690 * on a matched filter is quicker than writing directly 2691 * into the ring buffer and then discarding it when 2692 * it doesn't match. That is because the discard 2693 * requires several atomic operations to get right. 2694 * Copying on match and doing nothing on a failed match 2695 * is still quicker than no copy on match, but having 2696 * to discard out of the ring buffer on a failed match. 2697 */ 2698 if ((entry = __this_cpu_read(trace_buffered_event))) { 2699 int max_len = PAGE_SIZE - struct_size(entry, array, 1); 2700 2701 val = this_cpu_inc_return(trace_buffered_event_cnt); 2702 2703 /* 2704 * Preemption is disabled, but interrupts and NMIs 2705 * can still come in now. If that happens after 2706 * the above increment, then it will have to go 2707 * back to the old method of allocating the event 2708 * on the ring buffer, and if the filter fails, it 2709 * will have to call ring_buffer_discard_commit() 2710 * to remove it. 2711 * 2712 * Need to also check the unlikely case that the 2713 * length is bigger than the temp buffer size. 2714 * If that happens, then the reserve is pretty much 2715 * guaranteed to fail, as the ring buffer currently 2716 * only allows events less than a page. But that may 2717 * change in the future, so let the ring buffer reserve 2718 * handle the failure in that case. 2719 */ 2720 if (val == 1 && likely(len <= max_len)) { 2721 trace_event_setup(entry, type, trace_ctx); 2722 entry->array[0] = len; 2723 /* Return with preemption disabled */ 2724 return entry; 2725 } 2726 this_cpu_dec(trace_buffered_event_cnt); 2727 } 2728 /* __trace_buffer_lock_reserve() disables preemption */ 2729 preempt_enable_notrace(); 2730 } 2731 2732 entry = __trace_buffer_lock_reserve(*current_rb, type, len, 2733 trace_ctx); 2734 /* 2735 * If tracing is off, but we have triggers enabled 2736 * we still need to look at the event data. Use the temp_buffer 2737 * to store the trace event for the trigger to use. It's recursive 2738 * safe and will not be recorded anywhere. 2739 */ 2740 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) { 2741 *current_rb = temp_buffer; 2742 entry = __trace_buffer_lock_reserve(*current_rb, type, len, 2743 trace_ctx); 2744 } 2745 return entry; 2746 } 2747 EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve); 2748 2749 static DEFINE_RAW_SPINLOCK(tracepoint_iter_lock); 2750 static DEFINE_MUTEX(tracepoint_printk_mutex); 2751 2752 static void output_printk(struct trace_event_buffer *fbuffer) 2753 { 2754 struct trace_event_call *event_call; 2755 struct trace_event_file *file; 2756 struct trace_event *event; 2757 unsigned long flags; 2758 struct trace_iterator *iter = tracepoint_print_iter; 2759 2760 /* We should never get here if iter is NULL */ 2761 if (WARN_ON_ONCE(!iter)) 2762 return; 2763 2764 event_call = fbuffer->trace_file->event_call; 2765 if (!event_call || !event_call->event.funcs || 2766 !event_call->event.funcs->trace) 2767 return; 2768 2769 file = fbuffer->trace_file; 2770 if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) || 2771 (unlikely(file->flags & EVENT_FILE_FL_FILTERED) && 2772 !filter_match_preds(file->filter, fbuffer->entry))) 2773 return; 2774 2775 event = &fbuffer->trace_file->event_call->event; 2776 2777 raw_spin_lock_irqsave(&tracepoint_iter_lock, flags); 2778 trace_seq_init(&iter->seq); 2779 iter->ent = fbuffer->entry; 2780 event_call->event.funcs->trace(iter, 0, event); 2781 trace_seq_putc(&iter->seq, 0); 2782 printk("%s", iter->seq.buffer); 2783 2784 raw_spin_unlock_irqrestore(&tracepoint_iter_lock, flags); 2785 } 2786 2787 int tracepoint_printk_sysctl(const struct ctl_table *table, int write, 2788 void *buffer, size_t *lenp, 2789 loff_t *ppos) 2790 { 2791 int save_tracepoint_printk; 2792 int ret; 2793 2794 guard(mutex)(&tracepoint_printk_mutex); 2795 save_tracepoint_printk = tracepoint_printk; 2796 2797 ret = proc_dointvec(table, write, buffer, lenp, ppos); 2798 2799 /* 2800 * This will force exiting early, as tracepoint_printk 2801 * is always zero when tracepoint_printk_iter is not allocated 2802 */ 2803 if (!tracepoint_print_iter) 2804 tracepoint_printk = 0; 2805 2806 if (save_tracepoint_printk == tracepoint_printk) 2807 return ret; 2808 2809 if (tracepoint_printk) 2810 static_key_enable(&tracepoint_printk_key.key); 2811 else 2812 static_key_disable(&tracepoint_printk_key.key); 2813 2814 return ret; 2815 } 2816 2817 void trace_event_buffer_commit(struct trace_event_buffer *fbuffer) 2818 { 2819 enum event_trigger_type tt = ETT_NONE; 2820 struct trace_event_file *file = fbuffer->trace_file; 2821 2822 if (__event_trigger_test_discard(file, fbuffer->buffer, fbuffer->event, 2823 fbuffer->entry, &tt)) 2824 goto discard; 2825 2826 if (static_key_false(&tracepoint_printk_key.key)) 2827 output_printk(fbuffer); 2828 2829 if (static_branch_unlikely(&trace_event_exports_enabled)) 2830 ftrace_exports(fbuffer->event, TRACE_EXPORT_EVENT); 2831 2832 trace_buffer_unlock_commit_regs(file->tr, fbuffer->buffer, 2833 fbuffer->event, fbuffer->trace_ctx, fbuffer->regs); 2834 2835 discard: 2836 if (tt) 2837 event_triggers_post_call(file, tt); 2838 2839 } 2840 EXPORT_SYMBOL_GPL(trace_event_buffer_commit); 2841 2842 /* 2843 * Skip 3: 2844 * 2845 * trace_buffer_unlock_commit_regs() 2846 * trace_event_buffer_commit() 2847 * trace_event_raw_event_xxx() 2848 */ 2849 # define STACK_SKIP 3 2850 2851 void trace_buffer_unlock_commit_regs(struct trace_array *tr, 2852 struct trace_buffer *buffer, 2853 struct ring_buffer_event *event, 2854 unsigned int trace_ctx, 2855 struct pt_regs *regs) 2856 { 2857 __buffer_unlock_commit(buffer, event); 2858 2859 /* 2860 * If regs is not set, then skip the necessary functions. 2861 * Note, we can still get here via blktrace, wakeup tracer 2862 * and mmiotrace, but that's ok if they lose a function or 2863 * two. They are not that meaningful. 2864 */ 2865 ftrace_trace_stack(tr, buffer, trace_ctx, regs ? 0 : STACK_SKIP, regs); 2866 ftrace_trace_userstack(tr, buffer, trace_ctx); 2867 } 2868 2869 /* 2870 * Similar to trace_buffer_unlock_commit_regs() but do not dump stack. 2871 */ 2872 void 2873 trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer, 2874 struct ring_buffer_event *event) 2875 { 2876 __buffer_unlock_commit(buffer, event); 2877 } 2878 2879 void 2880 trace_function(struct trace_array *tr, unsigned long ip, unsigned long 2881 parent_ip, unsigned int trace_ctx) 2882 { 2883 struct trace_buffer *buffer = tr->array_buffer.buffer; 2884 struct ring_buffer_event *event; 2885 struct ftrace_entry *entry; 2886 2887 event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry), 2888 trace_ctx); 2889 if (!event) 2890 return; 2891 entry = ring_buffer_event_data(event); 2892 entry->ip = ip; 2893 entry->parent_ip = parent_ip; 2894 2895 if (static_branch_unlikely(&trace_function_exports_enabled)) 2896 ftrace_exports(event, TRACE_EXPORT_FUNCTION); 2897 __buffer_unlock_commit(buffer, event); 2898 } 2899 2900 #ifdef CONFIG_STACKTRACE 2901 2902 /* Allow 4 levels of nesting: normal, softirq, irq, NMI */ 2903 #define FTRACE_KSTACK_NESTING 4 2904 2905 #define FTRACE_KSTACK_ENTRIES (SZ_4K / FTRACE_KSTACK_NESTING) 2906 2907 struct ftrace_stack { 2908 unsigned long calls[FTRACE_KSTACK_ENTRIES]; 2909 }; 2910 2911 2912 struct ftrace_stacks { 2913 struct ftrace_stack stacks[FTRACE_KSTACK_NESTING]; 2914 }; 2915 2916 static DEFINE_PER_CPU(struct ftrace_stacks, ftrace_stacks); 2917 static DEFINE_PER_CPU(int, ftrace_stack_reserve); 2918 2919 static void __ftrace_trace_stack(struct trace_array *tr, 2920 struct trace_buffer *buffer, 2921 unsigned int trace_ctx, 2922 int skip, struct pt_regs *regs) 2923 { 2924 struct ring_buffer_event *event; 2925 unsigned int size, nr_entries; 2926 struct ftrace_stack *fstack; 2927 struct stack_entry *entry; 2928 int stackidx; 2929 2930 /* 2931 * Add one, for this function and the call to save_stack_trace() 2932 * If regs is set, then these functions will not be in the way. 2933 */ 2934 #ifndef CONFIG_UNWINDER_ORC 2935 if (!regs) 2936 skip++; 2937 #endif 2938 2939 preempt_disable_notrace(); 2940 2941 stackidx = __this_cpu_inc_return(ftrace_stack_reserve) - 1; 2942 2943 /* This should never happen. If it does, yell once and skip */ 2944 if (WARN_ON_ONCE(stackidx >= FTRACE_KSTACK_NESTING)) 2945 goto out; 2946 2947 /* 2948 * The above __this_cpu_inc_return() is 'atomic' cpu local. An 2949 * interrupt will either see the value pre increment or post 2950 * increment. If the interrupt happens pre increment it will have 2951 * restored the counter when it returns. We just need a barrier to 2952 * keep gcc from moving things around. 2953 */ 2954 barrier(); 2955 2956 fstack = this_cpu_ptr(ftrace_stacks.stacks) + stackidx; 2957 size = ARRAY_SIZE(fstack->calls); 2958 2959 if (regs) { 2960 nr_entries = stack_trace_save_regs(regs, fstack->calls, 2961 size, skip); 2962 } else { 2963 nr_entries = stack_trace_save(fstack->calls, size, skip); 2964 } 2965 2966 #ifdef CONFIG_DYNAMIC_FTRACE 2967 /* Mark entry of stack trace as trampoline code */ 2968 if (tr->ops && tr->ops->trampoline) { 2969 unsigned long tramp_start = tr->ops->trampoline; 2970 unsigned long tramp_end = tramp_start + tr->ops->trampoline_size; 2971 unsigned long *calls = fstack->calls; 2972 2973 for (int i = 0; i < nr_entries; i++) { 2974 if (calls[i] >= tramp_start && calls[i] < tramp_end) 2975 calls[i] = FTRACE_TRAMPOLINE_MARKER; 2976 } 2977 } 2978 #endif 2979 2980 event = __trace_buffer_lock_reserve(buffer, TRACE_STACK, 2981 struct_size(entry, caller, nr_entries), 2982 trace_ctx); 2983 if (!event) 2984 goto out; 2985 entry = ring_buffer_event_data(event); 2986 2987 entry->size = nr_entries; 2988 memcpy(&entry->caller, fstack->calls, 2989 flex_array_size(entry, caller, nr_entries)); 2990 2991 __buffer_unlock_commit(buffer, event); 2992 2993 out: 2994 /* Again, don't let gcc optimize things here */ 2995 barrier(); 2996 __this_cpu_dec(ftrace_stack_reserve); 2997 preempt_enable_notrace(); 2998 2999 } 3000 3001 static inline void ftrace_trace_stack(struct trace_array *tr, 3002 struct trace_buffer *buffer, 3003 unsigned int trace_ctx, 3004 int skip, struct pt_regs *regs) 3005 { 3006 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE)) 3007 return; 3008 3009 __ftrace_trace_stack(tr, buffer, trace_ctx, skip, regs); 3010 } 3011 3012 void __trace_stack(struct trace_array *tr, unsigned int trace_ctx, 3013 int skip) 3014 { 3015 struct trace_buffer *buffer = tr->array_buffer.buffer; 3016 3017 if (rcu_is_watching()) { 3018 __ftrace_trace_stack(tr, buffer, trace_ctx, skip, NULL); 3019 return; 3020 } 3021 3022 if (WARN_ON_ONCE(IS_ENABLED(CONFIG_GENERIC_ENTRY))) 3023 return; 3024 3025 /* 3026 * When an NMI triggers, RCU is enabled via ct_nmi_enter(), 3027 * but if the above rcu_is_watching() failed, then the NMI 3028 * triggered someplace critical, and ct_irq_enter() should 3029 * not be called from NMI. 3030 */ 3031 if (unlikely(in_nmi())) 3032 return; 3033 3034 ct_irq_enter_irqson(); 3035 __ftrace_trace_stack(tr, buffer, trace_ctx, skip, NULL); 3036 ct_irq_exit_irqson(); 3037 } 3038 3039 /** 3040 * trace_dump_stack - record a stack back trace in the trace buffer 3041 * @skip: Number of functions to skip (helper handlers) 3042 */ 3043 void trace_dump_stack(int skip) 3044 { 3045 if (tracing_disabled || tracing_selftest_running) 3046 return; 3047 3048 #ifndef CONFIG_UNWINDER_ORC 3049 /* Skip 1 to skip this function. */ 3050 skip++; 3051 #endif 3052 __ftrace_trace_stack(printk_trace, printk_trace->array_buffer.buffer, 3053 tracing_gen_ctx(), skip, NULL); 3054 } 3055 EXPORT_SYMBOL_GPL(trace_dump_stack); 3056 3057 #ifdef CONFIG_USER_STACKTRACE_SUPPORT 3058 static DEFINE_PER_CPU(int, user_stack_count); 3059 3060 static void 3061 ftrace_trace_userstack(struct trace_array *tr, 3062 struct trace_buffer *buffer, unsigned int trace_ctx) 3063 { 3064 struct ring_buffer_event *event; 3065 struct userstack_entry *entry; 3066 3067 if (!(tr->trace_flags & TRACE_ITER_USERSTACKTRACE)) 3068 return; 3069 3070 /* 3071 * NMIs can not handle page faults, even with fix ups. 3072 * The save user stack can (and often does) fault. 3073 */ 3074 if (unlikely(in_nmi())) 3075 return; 3076 3077 /* 3078 * prevent recursion, since the user stack tracing may 3079 * trigger other kernel events. 3080 */ 3081 preempt_disable(); 3082 if (__this_cpu_read(user_stack_count)) 3083 goto out; 3084 3085 __this_cpu_inc(user_stack_count); 3086 3087 event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK, 3088 sizeof(*entry), trace_ctx); 3089 if (!event) 3090 goto out_drop_count; 3091 entry = ring_buffer_event_data(event); 3092 3093 entry->tgid = current->tgid; 3094 memset(&entry->caller, 0, sizeof(entry->caller)); 3095 3096 stack_trace_save_user(entry->caller, FTRACE_STACK_ENTRIES); 3097 __buffer_unlock_commit(buffer, event); 3098 3099 out_drop_count: 3100 __this_cpu_dec(user_stack_count); 3101 out: 3102 preempt_enable(); 3103 } 3104 #else /* CONFIG_USER_STACKTRACE_SUPPORT */ 3105 static void ftrace_trace_userstack(struct trace_array *tr, 3106 struct trace_buffer *buffer, 3107 unsigned int trace_ctx) 3108 { 3109 } 3110 #endif /* !CONFIG_USER_STACKTRACE_SUPPORT */ 3111 3112 #endif /* CONFIG_STACKTRACE */ 3113 3114 static inline void 3115 func_repeats_set_delta_ts(struct func_repeats_entry *entry, 3116 unsigned long long delta) 3117 { 3118 entry->bottom_delta_ts = delta & U32_MAX; 3119 entry->top_delta_ts = (delta >> 32); 3120 } 3121 3122 void trace_last_func_repeats(struct trace_array *tr, 3123 struct trace_func_repeats *last_info, 3124 unsigned int trace_ctx) 3125 { 3126 struct trace_buffer *buffer = tr->array_buffer.buffer; 3127 struct func_repeats_entry *entry; 3128 struct ring_buffer_event *event; 3129 u64 delta; 3130 3131 event = __trace_buffer_lock_reserve(buffer, TRACE_FUNC_REPEATS, 3132 sizeof(*entry), trace_ctx); 3133 if (!event) 3134 return; 3135 3136 delta = ring_buffer_event_time_stamp(buffer, event) - 3137 last_info->ts_last_call; 3138 3139 entry = ring_buffer_event_data(event); 3140 entry->ip = last_info->ip; 3141 entry->parent_ip = last_info->parent_ip; 3142 entry->count = last_info->count; 3143 func_repeats_set_delta_ts(entry, delta); 3144 3145 __buffer_unlock_commit(buffer, event); 3146 } 3147 3148 /* created for use with alloc_percpu */ 3149 struct trace_buffer_struct { 3150 int nesting; 3151 char buffer[4][TRACE_BUF_SIZE]; 3152 }; 3153 3154 static struct trace_buffer_struct __percpu *trace_percpu_buffer; 3155 3156 /* 3157 * This allows for lockless recording. If we're nested too deeply, then 3158 * this returns NULL. 3159 */ 3160 static char *get_trace_buf(void) 3161 { 3162 struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer); 3163 3164 if (!trace_percpu_buffer || buffer->nesting >= 4) 3165 return NULL; 3166 3167 buffer->nesting++; 3168 3169 /* Interrupts must see nesting incremented before we use the buffer */ 3170 barrier(); 3171 return &buffer->buffer[buffer->nesting - 1][0]; 3172 } 3173 3174 static void put_trace_buf(void) 3175 { 3176 /* Don't let the decrement of nesting leak before this */ 3177 barrier(); 3178 this_cpu_dec(trace_percpu_buffer->nesting); 3179 } 3180 3181 static int alloc_percpu_trace_buffer(void) 3182 { 3183 struct trace_buffer_struct __percpu *buffers; 3184 3185 if (trace_percpu_buffer) 3186 return 0; 3187 3188 buffers = alloc_percpu(struct trace_buffer_struct); 3189 if (MEM_FAIL(!buffers, "Could not allocate percpu trace_printk buffer")) 3190 return -ENOMEM; 3191 3192 trace_percpu_buffer = buffers; 3193 return 0; 3194 } 3195 3196 static int buffers_allocated; 3197 3198 void trace_printk_init_buffers(void) 3199 { 3200 if (buffers_allocated) 3201 return; 3202 3203 if (alloc_percpu_trace_buffer()) 3204 return; 3205 3206 /* trace_printk() is for debug use only. Don't use it in production. */ 3207 3208 pr_warn("\n"); 3209 pr_warn("**********************************************************\n"); 3210 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n"); 3211 pr_warn("** **\n"); 3212 pr_warn("** trace_printk() being used. Allocating extra memory. **\n"); 3213 pr_warn("** **\n"); 3214 pr_warn("** This means that this is a DEBUG kernel and it is **\n"); 3215 pr_warn("** unsafe for production use. **\n"); 3216 pr_warn("** **\n"); 3217 pr_warn("** If you see this message and you are not debugging **\n"); 3218 pr_warn("** the kernel, report this immediately to your vendor! **\n"); 3219 pr_warn("** **\n"); 3220 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n"); 3221 pr_warn("**********************************************************\n"); 3222 3223 /* Expand the buffers to set size */ 3224 tracing_update_buffers(&global_trace); 3225 3226 buffers_allocated = 1; 3227 3228 /* 3229 * trace_printk_init_buffers() can be called by modules. 3230 * If that happens, then we need to start cmdline recording 3231 * directly here. If the global_trace.buffer is already 3232 * allocated here, then this was called by module code. 3233 */ 3234 if (global_trace.array_buffer.buffer) 3235 tracing_start_cmdline_record(); 3236 } 3237 EXPORT_SYMBOL_GPL(trace_printk_init_buffers); 3238 3239 void trace_printk_start_comm(void) 3240 { 3241 /* Start tracing comms if trace printk is set */ 3242 if (!buffers_allocated) 3243 return; 3244 tracing_start_cmdline_record(); 3245 } 3246 3247 static void trace_printk_start_stop_comm(int enabled) 3248 { 3249 if (!buffers_allocated) 3250 return; 3251 3252 if (enabled) 3253 tracing_start_cmdline_record(); 3254 else 3255 tracing_stop_cmdline_record(); 3256 } 3257 3258 /** 3259 * trace_vbprintk - write binary msg to tracing buffer 3260 * @ip: The address of the caller 3261 * @fmt: The string format to write to the buffer 3262 * @args: Arguments for @fmt 3263 */ 3264 int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) 3265 { 3266 struct ring_buffer_event *event; 3267 struct trace_buffer *buffer; 3268 struct trace_array *tr = READ_ONCE(printk_trace); 3269 struct bprint_entry *entry; 3270 unsigned int trace_ctx; 3271 char *tbuffer; 3272 int len = 0, size; 3273 3274 if (!printk_binsafe(tr)) 3275 return trace_vprintk(ip, fmt, args); 3276 3277 if (unlikely(tracing_selftest_running || tracing_disabled)) 3278 return 0; 3279 3280 /* Don't pollute graph traces with trace_vprintk internals */ 3281 pause_graph_tracing(); 3282 3283 trace_ctx = tracing_gen_ctx(); 3284 preempt_disable_notrace(); 3285 3286 tbuffer = get_trace_buf(); 3287 if (!tbuffer) { 3288 len = 0; 3289 goto out_nobuffer; 3290 } 3291 3292 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args); 3293 3294 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0) 3295 goto out_put; 3296 3297 size = sizeof(*entry) + sizeof(u32) * len; 3298 buffer = tr->array_buffer.buffer; 3299 ring_buffer_nest_start(buffer); 3300 event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size, 3301 trace_ctx); 3302 if (!event) 3303 goto out; 3304 entry = ring_buffer_event_data(event); 3305 entry->ip = ip; 3306 entry->fmt = fmt; 3307 3308 memcpy(entry->buf, tbuffer, sizeof(u32) * len); 3309 __buffer_unlock_commit(buffer, event); 3310 ftrace_trace_stack(tr, buffer, trace_ctx, 6, NULL); 3311 3312 out: 3313 ring_buffer_nest_end(buffer); 3314 out_put: 3315 put_trace_buf(); 3316 3317 out_nobuffer: 3318 preempt_enable_notrace(); 3319 unpause_graph_tracing(); 3320 3321 return len; 3322 } 3323 EXPORT_SYMBOL_GPL(trace_vbprintk); 3324 3325 __printf(3, 0) 3326 static int 3327 __trace_array_vprintk(struct trace_buffer *buffer, 3328 unsigned long ip, const char *fmt, va_list args) 3329 { 3330 struct ring_buffer_event *event; 3331 int len = 0, size; 3332 struct print_entry *entry; 3333 unsigned int trace_ctx; 3334 char *tbuffer; 3335 3336 if (tracing_disabled) 3337 return 0; 3338 3339 /* Don't pollute graph traces with trace_vprintk internals */ 3340 pause_graph_tracing(); 3341 3342 trace_ctx = tracing_gen_ctx(); 3343 preempt_disable_notrace(); 3344 3345 3346 tbuffer = get_trace_buf(); 3347 if (!tbuffer) { 3348 len = 0; 3349 goto out_nobuffer; 3350 } 3351 3352 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args); 3353 3354 size = sizeof(*entry) + len + 1; 3355 ring_buffer_nest_start(buffer); 3356 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size, 3357 trace_ctx); 3358 if (!event) 3359 goto out; 3360 entry = ring_buffer_event_data(event); 3361 entry->ip = ip; 3362 3363 memcpy(&entry->buf, tbuffer, len + 1); 3364 __buffer_unlock_commit(buffer, event); 3365 ftrace_trace_stack(printk_trace, buffer, trace_ctx, 6, NULL); 3366 3367 out: 3368 ring_buffer_nest_end(buffer); 3369 put_trace_buf(); 3370 3371 out_nobuffer: 3372 preempt_enable_notrace(); 3373 unpause_graph_tracing(); 3374 3375 return len; 3376 } 3377 3378 __printf(3, 0) 3379 int trace_array_vprintk(struct trace_array *tr, 3380 unsigned long ip, const char *fmt, va_list args) 3381 { 3382 if (tracing_selftest_running && tr == &global_trace) 3383 return 0; 3384 3385 return __trace_array_vprintk(tr->array_buffer.buffer, ip, fmt, args); 3386 } 3387 3388 /** 3389 * trace_array_printk - Print a message to a specific instance 3390 * @tr: The instance trace_array descriptor 3391 * @ip: The instruction pointer that this is called from. 3392 * @fmt: The format to print (printf format) 3393 * 3394 * If a subsystem sets up its own instance, they have the right to 3395 * printk strings into their tracing instance buffer using this 3396 * function. Note, this function will not write into the top level 3397 * buffer (use trace_printk() for that), as writing into the top level 3398 * buffer should only have events that can be individually disabled. 3399 * trace_printk() is only used for debugging a kernel, and should not 3400 * be ever incorporated in normal use. 3401 * 3402 * trace_array_printk() can be used, as it will not add noise to the 3403 * top level tracing buffer. 3404 * 3405 * Note, trace_array_init_printk() must be called on @tr before this 3406 * can be used. 3407 */ 3408 __printf(3, 0) 3409 int trace_array_printk(struct trace_array *tr, 3410 unsigned long ip, const char *fmt, ...) 3411 { 3412 int ret; 3413 va_list ap; 3414 3415 if (!tr) 3416 return -ENOENT; 3417 3418 /* This is only allowed for created instances */ 3419 if (tr == &global_trace) 3420 return 0; 3421 3422 if (!(tr->trace_flags & TRACE_ITER_PRINTK)) 3423 return 0; 3424 3425 va_start(ap, fmt); 3426 ret = trace_array_vprintk(tr, ip, fmt, ap); 3427 va_end(ap); 3428 return ret; 3429 } 3430 EXPORT_SYMBOL_GPL(trace_array_printk); 3431 3432 /** 3433 * trace_array_init_printk - Initialize buffers for trace_array_printk() 3434 * @tr: The trace array to initialize the buffers for 3435 * 3436 * As trace_array_printk() only writes into instances, they are OK to 3437 * have in the kernel (unlike trace_printk()). This needs to be called 3438 * before trace_array_printk() can be used on a trace_array. 3439 */ 3440 int trace_array_init_printk(struct trace_array *tr) 3441 { 3442 if (!tr) 3443 return -ENOENT; 3444 3445 /* This is only allowed for created instances */ 3446 if (tr == &global_trace) 3447 return -EINVAL; 3448 3449 return alloc_percpu_trace_buffer(); 3450 } 3451 EXPORT_SYMBOL_GPL(trace_array_init_printk); 3452 3453 __printf(3, 4) 3454 int trace_array_printk_buf(struct trace_buffer *buffer, 3455 unsigned long ip, const char *fmt, ...) 3456 { 3457 int ret; 3458 va_list ap; 3459 3460 if (!(printk_trace->trace_flags & TRACE_ITER_PRINTK)) 3461 return 0; 3462 3463 va_start(ap, fmt); 3464 ret = __trace_array_vprintk(buffer, ip, fmt, ap); 3465 va_end(ap); 3466 return ret; 3467 } 3468 3469 __printf(2, 0) 3470 int trace_vprintk(unsigned long ip, const char *fmt, va_list args) 3471 { 3472 return trace_array_vprintk(printk_trace, ip, fmt, args); 3473 } 3474 EXPORT_SYMBOL_GPL(trace_vprintk); 3475 3476 static void trace_iterator_increment(struct trace_iterator *iter) 3477 { 3478 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu); 3479 3480 iter->idx++; 3481 if (buf_iter) 3482 ring_buffer_iter_advance(buf_iter); 3483 } 3484 3485 static struct trace_entry * 3486 peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts, 3487 unsigned long *lost_events) 3488 { 3489 struct ring_buffer_event *event; 3490 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu); 3491 3492 if (buf_iter) { 3493 event = ring_buffer_iter_peek(buf_iter, ts); 3494 if (lost_events) 3495 *lost_events = ring_buffer_iter_dropped(buf_iter) ? 3496 (unsigned long)-1 : 0; 3497 } else { 3498 event = ring_buffer_peek(iter->array_buffer->buffer, cpu, ts, 3499 lost_events); 3500 } 3501 3502 if (event) { 3503 iter->ent_size = ring_buffer_event_length(event); 3504 return ring_buffer_event_data(event); 3505 } 3506 iter->ent_size = 0; 3507 return NULL; 3508 } 3509 3510 static struct trace_entry * 3511 __find_next_entry(struct trace_iterator *iter, int *ent_cpu, 3512 unsigned long *missing_events, u64 *ent_ts) 3513 { 3514 struct trace_buffer *buffer = iter->array_buffer->buffer; 3515 struct trace_entry *ent, *next = NULL; 3516 unsigned long lost_events = 0, next_lost = 0; 3517 int cpu_file = iter->cpu_file; 3518 u64 next_ts = 0, ts; 3519 int next_cpu = -1; 3520 int next_size = 0; 3521 int cpu; 3522 3523 /* 3524 * If we are in a per_cpu trace file, don't bother by iterating over 3525 * all cpu and peek directly. 3526 */ 3527 if (cpu_file > RING_BUFFER_ALL_CPUS) { 3528 if (ring_buffer_empty_cpu(buffer, cpu_file)) 3529 return NULL; 3530 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events); 3531 if (ent_cpu) 3532 *ent_cpu = cpu_file; 3533 3534 return ent; 3535 } 3536 3537 for_each_tracing_cpu(cpu) { 3538 3539 if (ring_buffer_empty_cpu(buffer, cpu)) 3540 continue; 3541 3542 ent = peek_next_entry(iter, cpu, &ts, &lost_events); 3543 3544 /* 3545 * Pick the entry with the smallest timestamp: 3546 */ 3547 if (ent && (!next || ts < next_ts)) { 3548 next = ent; 3549 next_cpu = cpu; 3550 next_ts = ts; 3551 next_lost = lost_events; 3552 next_size = iter->ent_size; 3553 } 3554 } 3555 3556 iter->ent_size = next_size; 3557 3558 if (ent_cpu) 3559 *ent_cpu = next_cpu; 3560 3561 if (ent_ts) 3562 *ent_ts = next_ts; 3563 3564 if (missing_events) 3565 *missing_events = next_lost; 3566 3567 return next; 3568 } 3569 3570 #define STATIC_FMT_BUF_SIZE 128 3571 static char static_fmt_buf[STATIC_FMT_BUF_SIZE]; 3572 3573 char *trace_iter_expand_format(struct trace_iterator *iter) 3574 { 3575 char *tmp; 3576 3577 /* 3578 * iter->tr is NULL when used with tp_printk, which makes 3579 * this get called where it is not safe to call krealloc(). 3580 */ 3581 if (!iter->tr || iter->fmt == static_fmt_buf) 3582 return NULL; 3583 3584 tmp = krealloc(iter->fmt, iter->fmt_size + STATIC_FMT_BUF_SIZE, 3585 GFP_KERNEL); 3586 if (tmp) { 3587 iter->fmt_size += STATIC_FMT_BUF_SIZE; 3588 iter->fmt = tmp; 3589 } 3590 3591 return tmp; 3592 } 3593 3594 /* Returns true if the string is safe to dereference from an event */ 3595 static bool trace_safe_str(struct trace_iterator *iter, const char *str) 3596 { 3597 unsigned long addr = (unsigned long)str; 3598 struct trace_event *trace_event; 3599 struct trace_event_call *event; 3600 3601 /* OK if part of the event data */ 3602 if ((addr >= (unsigned long)iter->ent) && 3603 (addr < (unsigned long)iter->ent + iter->ent_size)) 3604 return true; 3605 3606 /* OK if part of the temp seq buffer */ 3607 if ((addr >= (unsigned long)iter->tmp_seq.buffer) && 3608 (addr < (unsigned long)iter->tmp_seq.buffer + TRACE_SEQ_BUFFER_SIZE)) 3609 return true; 3610 3611 /* Core rodata can not be freed */ 3612 if (is_kernel_rodata(addr)) 3613 return true; 3614 3615 if (trace_is_tracepoint_string(str)) 3616 return true; 3617 3618 /* 3619 * Now this could be a module event, referencing core module 3620 * data, which is OK. 3621 */ 3622 if (!iter->ent) 3623 return false; 3624 3625 trace_event = ftrace_find_event(iter->ent->type); 3626 if (!trace_event) 3627 return false; 3628 3629 event = container_of(trace_event, struct trace_event_call, event); 3630 if ((event->flags & TRACE_EVENT_FL_DYNAMIC) || !event->module) 3631 return false; 3632 3633 /* Would rather have rodata, but this will suffice */ 3634 if (within_module_core(addr, event->module)) 3635 return true; 3636 3637 return false; 3638 } 3639 3640 /** 3641 * ignore_event - Check dereferenced fields while writing to the seq buffer 3642 * @iter: The iterator that holds the seq buffer and the event being printed 3643 * 3644 * At boot up, test_event_printk() will flag any event that dereferences 3645 * a string with "%s" that does exist in the ring buffer. It may still 3646 * be valid, as the string may point to a static string in the kernel 3647 * rodata that never gets freed. But if the string pointer is pointing 3648 * to something that was allocated, there's a chance that it can be freed 3649 * by the time the user reads the trace. This would cause a bad memory 3650 * access by the kernel and possibly crash the system. 3651 * 3652 * This function will check if the event has any fields flagged as needing 3653 * to be checked at runtime and perform those checks. 3654 * 3655 * If it is found that a field is unsafe, it will write into the @iter->seq 3656 * a message stating what was found to be unsafe. 3657 * 3658 * @return: true if the event is unsafe and should be ignored, 3659 * false otherwise. 3660 */ 3661 bool ignore_event(struct trace_iterator *iter) 3662 { 3663 struct ftrace_event_field *field; 3664 struct trace_event *trace_event; 3665 struct trace_event_call *event; 3666 struct list_head *head; 3667 struct trace_seq *seq; 3668 const void *ptr; 3669 3670 trace_event = ftrace_find_event(iter->ent->type); 3671 3672 seq = &iter->seq; 3673 3674 if (!trace_event) { 3675 trace_seq_printf(seq, "EVENT ID %d NOT FOUND?\n", iter->ent->type); 3676 return true; 3677 } 3678 3679 event = container_of(trace_event, struct trace_event_call, event); 3680 if (!(event->flags & TRACE_EVENT_FL_TEST_STR)) 3681 return false; 3682 3683 head = trace_get_fields(event); 3684 if (!head) { 3685 trace_seq_printf(seq, "FIELDS FOR EVENT '%s' NOT FOUND?\n", 3686 trace_event_name(event)); 3687 return true; 3688 } 3689 3690 /* Offsets are from the iter->ent that points to the raw event */ 3691 ptr = iter->ent; 3692 3693 list_for_each_entry(field, head, link) { 3694 const char *str; 3695 bool good; 3696 3697 if (!field->needs_test) 3698 continue; 3699 3700 str = *(const char **)(ptr + field->offset); 3701 3702 good = trace_safe_str(iter, str); 3703 3704 /* 3705 * If you hit this warning, it is likely that the 3706 * trace event in question used %s on a string that 3707 * was saved at the time of the event, but may not be 3708 * around when the trace is read. Use __string(), 3709 * __assign_str() and __get_str() helpers in the TRACE_EVENT() 3710 * instead. See samples/trace_events/trace-events-sample.h 3711 * for reference. 3712 */ 3713 if (WARN_ONCE(!good, "event '%s' has unsafe pointer field '%s'", 3714 trace_event_name(event), field->name)) { 3715 trace_seq_printf(seq, "EVENT %s: HAS UNSAFE POINTER FIELD '%s'\n", 3716 trace_event_name(event), field->name); 3717 return true; 3718 } 3719 } 3720 return false; 3721 } 3722 3723 const char *trace_event_format(struct trace_iterator *iter, const char *fmt) 3724 { 3725 const char *p, *new_fmt; 3726 char *q; 3727 3728 if (WARN_ON_ONCE(!fmt)) 3729 return fmt; 3730 3731 if (!iter->tr || iter->tr->trace_flags & TRACE_ITER_HASH_PTR) 3732 return fmt; 3733 3734 p = fmt; 3735 new_fmt = q = iter->fmt; 3736 while (*p) { 3737 if (unlikely(q - new_fmt + 3 > iter->fmt_size)) { 3738 if (!trace_iter_expand_format(iter)) 3739 return fmt; 3740 3741 q += iter->fmt - new_fmt; 3742 new_fmt = iter->fmt; 3743 } 3744 3745 *q++ = *p++; 3746 3747 /* Replace %p with %px */ 3748 if (p[-1] == '%') { 3749 if (p[0] == '%') { 3750 *q++ = *p++; 3751 } else if (p[0] == 'p' && !isalnum(p[1])) { 3752 *q++ = *p++; 3753 *q++ = 'x'; 3754 } 3755 } 3756 } 3757 *q = '\0'; 3758 3759 return new_fmt; 3760 } 3761 3762 #define STATIC_TEMP_BUF_SIZE 128 3763 static char static_temp_buf[STATIC_TEMP_BUF_SIZE] __aligned(4); 3764 3765 /* Find the next real entry, without updating the iterator itself */ 3766 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, 3767 int *ent_cpu, u64 *ent_ts) 3768 { 3769 /* __find_next_entry will reset ent_size */ 3770 int ent_size = iter->ent_size; 3771 struct trace_entry *entry; 3772 3773 /* 3774 * If called from ftrace_dump(), then the iter->temp buffer 3775 * will be the static_temp_buf and not created from kmalloc. 3776 * If the entry size is greater than the buffer, we can 3777 * not save it. Just return NULL in that case. This is only 3778 * used to add markers when two consecutive events' time 3779 * stamps have a large delta. See trace_print_lat_context() 3780 */ 3781 if (iter->temp == static_temp_buf && 3782 STATIC_TEMP_BUF_SIZE < ent_size) 3783 return NULL; 3784 3785 /* 3786 * The __find_next_entry() may call peek_next_entry(), which may 3787 * call ring_buffer_peek() that may make the contents of iter->ent 3788 * undefined. Need to copy iter->ent now. 3789 */ 3790 if (iter->ent && iter->ent != iter->temp) { 3791 if ((!iter->temp || iter->temp_size < iter->ent_size) && 3792 !WARN_ON_ONCE(iter->temp == static_temp_buf)) { 3793 void *temp; 3794 temp = kmalloc(iter->ent_size, GFP_KERNEL); 3795 if (!temp) 3796 return NULL; 3797 kfree(iter->temp); 3798 iter->temp = temp; 3799 iter->temp_size = iter->ent_size; 3800 } 3801 memcpy(iter->temp, iter->ent, iter->ent_size); 3802 iter->ent = iter->temp; 3803 } 3804 entry = __find_next_entry(iter, ent_cpu, NULL, ent_ts); 3805 /* Put back the original ent_size */ 3806 iter->ent_size = ent_size; 3807 3808 return entry; 3809 } 3810 3811 /* Find the next real entry, and increment the iterator to the next entry */ 3812 void *trace_find_next_entry_inc(struct trace_iterator *iter) 3813 { 3814 iter->ent = __find_next_entry(iter, &iter->cpu, 3815 &iter->lost_events, &iter->ts); 3816 3817 if (iter->ent) 3818 trace_iterator_increment(iter); 3819 3820 return iter->ent ? iter : NULL; 3821 } 3822 3823 static void trace_consume(struct trace_iterator *iter) 3824 { 3825 ring_buffer_consume(iter->array_buffer->buffer, iter->cpu, &iter->ts, 3826 &iter->lost_events); 3827 } 3828 3829 static void *s_next(struct seq_file *m, void *v, loff_t *pos) 3830 { 3831 struct trace_iterator *iter = m->private; 3832 int i = (int)*pos; 3833 void *ent; 3834 3835 WARN_ON_ONCE(iter->leftover); 3836 3837 (*pos)++; 3838 3839 /* can't go backwards */ 3840 if (iter->idx > i) 3841 return NULL; 3842 3843 if (iter->idx < 0) 3844 ent = trace_find_next_entry_inc(iter); 3845 else 3846 ent = iter; 3847 3848 while (ent && iter->idx < i) 3849 ent = trace_find_next_entry_inc(iter); 3850 3851 iter->pos = *pos; 3852 3853 return ent; 3854 } 3855 3856 void tracing_iter_reset(struct trace_iterator *iter, int cpu) 3857 { 3858 struct ring_buffer_iter *buf_iter; 3859 unsigned long entries = 0; 3860 u64 ts; 3861 3862 per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = 0; 3863 3864 buf_iter = trace_buffer_iter(iter, cpu); 3865 if (!buf_iter) 3866 return; 3867 3868 ring_buffer_iter_reset(buf_iter); 3869 3870 /* 3871 * We could have the case with the max latency tracers 3872 * that a reset never took place on a cpu. This is evident 3873 * by the timestamp being before the start of the buffer. 3874 */ 3875 while (ring_buffer_iter_peek(buf_iter, &ts)) { 3876 if (ts >= iter->array_buffer->time_start) 3877 break; 3878 entries++; 3879 ring_buffer_iter_advance(buf_iter); 3880 /* This could be a big loop */ 3881 cond_resched(); 3882 } 3883 3884 per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = entries; 3885 } 3886 3887 /* 3888 * The current tracer is copied to avoid a global locking 3889 * all around. 3890 */ 3891 static void *s_start(struct seq_file *m, loff_t *pos) 3892 { 3893 struct trace_iterator *iter = m->private; 3894 struct trace_array *tr = iter->tr; 3895 int cpu_file = iter->cpu_file; 3896 void *p = NULL; 3897 loff_t l = 0; 3898 int cpu; 3899 3900 mutex_lock(&trace_types_lock); 3901 if (unlikely(tr->current_trace != iter->trace)) { 3902 /* Close iter->trace before switching to the new current tracer */ 3903 if (iter->trace->close) 3904 iter->trace->close(iter); 3905 iter->trace = tr->current_trace; 3906 /* Reopen the new current tracer */ 3907 if (iter->trace->open) 3908 iter->trace->open(iter); 3909 } 3910 mutex_unlock(&trace_types_lock); 3911 3912 #ifdef CONFIG_TRACER_MAX_TRACE 3913 if (iter->snapshot && iter->trace->use_max_tr) 3914 return ERR_PTR(-EBUSY); 3915 #endif 3916 3917 if (*pos != iter->pos) { 3918 iter->ent = NULL; 3919 iter->cpu = 0; 3920 iter->idx = -1; 3921 3922 if (cpu_file == RING_BUFFER_ALL_CPUS) { 3923 for_each_tracing_cpu(cpu) 3924 tracing_iter_reset(iter, cpu); 3925 } else 3926 tracing_iter_reset(iter, cpu_file); 3927 3928 iter->leftover = 0; 3929 for (p = iter; p && l < *pos; p = s_next(m, p, &l)) 3930 ; 3931 3932 } else { 3933 /* 3934 * If we overflowed the seq_file before, then we want 3935 * to just reuse the trace_seq buffer again. 3936 */ 3937 if (iter->leftover) 3938 p = iter; 3939 else { 3940 l = *pos - 1; 3941 p = s_next(m, p, &l); 3942 } 3943 } 3944 3945 trace_event_read_lock(); 3946 trace_access_lock(cpu_file); 3947 return p; 3948 } 3949 3950 static void s_stop(struct seq_file *m, void *p) 3951 { 3952 struct trace_iterator *iter = m->private; 3953 3954 #ifdef CONFIG_TRACER_MAX_TRACE 3955 if (iter->snapshot && iter->trace->use_max_tr) 3956 return; 3957 #endif 3958 3959 trace_access_unlock(iter->cpu_file); 3960 trace_event_read_unlock(); 3961 } 3962 3963 static void 3964 get_total_entries_cpu(struct array_buffer *buf, unsigned long *total, 3965 unsigned long *entries, int cpu) 3966 { 3967 unsigned long count; 3968 3969 count = ring_buffer_entries_cpu(buf->buffer, cpu); 3970 /* 3971 * If this buffer has skipped entries, then we hold all 3972 * entries for the trace and we need to ignore the 3973 * ones before the time stamp. 3974 */ 3975 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) { 3976 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries; 3977 /* total is the same as the entries */ 3978 *total = count; 3979 } else 3980 *total = count + 3981 ring_buffer_overrun_cpu(buf->buffer, cpu); 3982 *entries = count; 3983 } 3984 3985 static void 3986 get_total_entries(struct array_buffer *buf, 3987 unsigned long *total, unsigned long *entries) 3988 { 3989 unsigned long t, e; 3990 int cpu; 3991 3992 *total = 0; 3993 *entries = 0; 3994 3995 for_each_tracing_cpu(cpu) { 3996 get_total_entries_cpu(buf, &t, &e, cpu); 3997 *total += t; 3998 *entries += e; 3999 } 4000 } 4001 4002 unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu) 4003 { 4004 unsigned long total, entries; 4005 4006 if (!tr) 4007 tr = &global_trace; 4008 4009 get_total_entries_cpu(&tr->array_buffer, &total, &entries, cpu); 4010 4011 return entries; 4012 } 4013 4014 unsigned long trace_total_entries(struct trace_array *tr) 4015 { 4016 unsigned long total, entries; 4017 4018 if (!tr) 4019 tr = &global_trace; 4020 4021 get_total_entries(&tr->array_buffer, &total, &entries); 4022 4023 return entries; 4024 } 4025 4026 static void print_lat_help_header(struct seq_file *m) 4027 { 4028 seq_puts(m, "# _------=> CPU# \n" 4029 "# / _-----=> irqs-off/BH-disabled\n" 4030 "# | / _----=> need-resched \n" 4031 "# || / _---=> hardirq/softirq \n" 4032 "# ||| / _--=> preempt-depth \n" 4033 "# |||| / _-=> migrate-disable \n" 4034 "# ||||| / delay \n" 4035 "# cmd pid |||||| time | caller \n" 4036 "# \\ / |||||| \\ | / \n"); 4037 } 4038 4039 static void print_event_info(struct array_buffer *buf, struct seq_file *m) 4040 { 4041 unsigned long total; 4042 unsigned long entries; 4043 4044 get_total_entries(buf, &total, &entries); 4045 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n", 4046 entries, total, num_online_cpus()); 4047 seq_puts(m, "#\n"); 4048 } 4049 4050 static void print_func_help_header(struct array_buffer *buf, struct seq_file *m, 4051 unsigned int flags) 4052 { 4053 bool tgid = flags & TRACE_ITER_RECORD_TGID; 4054 4055 print_event_info(buf, m); 4056 4057 seq_printf(m, "# TASK-PID %s CPU# TIMESTAMP FUNCTION\n", tgid ? " TGID " : ""); 4058 seq_printf(m, "# | | %s | | |\n", tgid ? " | " : ""); 4059 } 4060 4061 static void print_func_help_header_irq(struct array_buffer *buf, struct seq_file *m, 4062 unsigned int flags) 4063 { 4064 bool tgid = flags & TRACE_ITER_RECORD_TGID; 4065 static const char space[] = " "; 4066 int prec = tgid ? 12 : 2; 4067 4068 print_event_info(buf, m); 4069 4070 seq_printf(m, "# %.*s _-----=> irqs-off/BH-disabled\n", prec, space); 4071 seq_printf(m, "# %.*s / _----=> need-resched\n", prec, space); 4072 seq_printf(m, "# %.*s| / _---=> hardirq/softirq\n", prec, space); 4073 seq_printf(m, "# %.*s|| / _--=> preempt-depth\n", prec, space); 4074 seq_printf(m, "# %.*s||| / _-=> migrate-disable\n", prec, space); 4075 seq_printf(m, "# %.*s|||| / delay\n", prec, space); 4076 seq_printf(m, "# TASK-PID %.*s CPU# ||||| TIMESTAMP FUNCTION\n", prec, " TGID "); 4077 seq_printf(m, "# | | %.*s | ||||| | |\n", prec, " | "); 4078 } 4079 4080 void 4081 print_trace_header(struct seq_file *m, struct trace_iterator *iter) 4082 { 4083 unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK); 4084 struct array_buffer *buf = iter->array_buffer; 4085 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu); 4086 struct tracer *type = iter->trace; 4087 unsigned long entries; 4088 unsigned long total; 4089 const char *name = type->name; 4090 4091 get_total_entries(buf, &total, &entries); 4092 4093 seq_printf(m, "# %s latency trace v1.1.5 on %s\n", 4094 name, init_utsname()->release); 4095 seq_puts(m, "# -----------------------------------" 4096 "---------------------------------\n"); 4097 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |" 4098 " (M:%s VP:%d, KP:%d, SP:%d HP:%d", 4099 nsecs_to_usecs(data->saved_latency), 4100 entries, 4101 total, 4102 buf->cpu, 4103 preempt_model_none() ? "server" : 4104 preempt_model_voluntary() ? "desktop" : 4105 preempt_model_full() ? "preempt" : 4106 preempt_model_lazy() ? "lazy" : 4107 preempt_model_rt() ? "preempt_rt" : 4108 "unknown", 4109 /* These are reserved for later use */ 4110 0, 0, 0, 0); 4111 #ifdef CONFIG_SMP 4112 seq_printf(m, " #P:%d)\n", num_online_cpus()); 4113 #else 4114 seq_puts(m, ")\n"); 4115 #endif 4116 seq_puts(m, "# -----------------\n"); 4117 seq_printf(m, "# | task: %.16s-%d " 4118 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n", 4119 data->comm, data->pid, 4120 from_kuid_munged(seq_user_ns(m), data->uid), data->nice, 4121 data->policy, data->rt_priority); 4122 seq_puts(m, "# -----------------\n"); 4123 4124 if (data->critical_start) { 4125 seq_puts(m, "# => started at: "); 4126 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags); 4127 trace_print_seq(m, &iter->seq); 4128 seq_puts(m, "\n# => ended at: "); 4129 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags); 4130 trace_print_seq(m, &iter->seq); 4131 seq_puts(m, "\n#\n"); 4132 } 4133 4134 seq_puts(m, "#\n"); 4135 } 4136 4137 static void test_cpu_buff_start(struct trace_iterator *iter) 4138 { 4139 struct trace_seq *s = &iter->seq; 4140 struct trace_array *tr = iter->tr; 4141 4142 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE)) 4143 return; 4144 4145 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE)) 4146 return; 4147 4148 if (cpumask_available(iter->started) && 4149 cpumask_test_cpu(iter->cpu, iter->started)) 4150 return; 4151 4152 if (per_cpu_ptr(iter->array_buffer->data, iter->cpu)->skipped_entries) 4153 return; 4154 4155 if (cpumask_available(iter->started)) 4156 cpumask_set_cpu(iter->cpu, iter->started); 4157 4158 /* Don't print started cpu buffer for the first entry of the trace */ 4159 if (iter->idx > 1) 4160 trace_seq_printf(s, "##### CPU %u buffer started ####\n", 4161 iter->cpu); 4162 } 4163 4164 static enum print_line_t print_trace_fmt(struct trace_iterator *iter) 4165 { 4166 struct trace_array *tr = iter->tr; 4167 struct trace_seq *s = &iter->seq; 4168 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK); 4169 struct trace_entry *entry; 4170 struct trace_event *event; 4171 4172 entry = iter->ent; 4173 4174 test_cpu_buff_start(iter); 4175 4176 event = ftrace_find_event(entry->type); 4177 4178 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) { 4179 if (iter->iter_flags & TRACE_FILE_LAT_FMT) 4180 trace_print_lat_context(iter); 4181 else 4182 trace_print_context(iter); 4183 } 4184 4185 if (trace_seq_has_overflowed(s)) 4186 return TRACE_TYPE_PARTIAL_LINE; 4187 4188 if (event) { 4189 if (tr->trace_flags & TRACE_ITER_FIELDS) 4190 return print_event_fields(iter, event); 4191 /* 4192 * For TRACE_EVENT() events, the print_fmt is not 4193 * safe to use if the array has delta offsets 4194 * Force printing via the fields. 4195 */ 4196 if ((tr->text_delta || tr->data_delta) && 4197 event->type > __TRACE_LAST_TYPE) 4198 return print_event_fields(iter, event); 4199 4200 return event->funcs->trace(iter, sym_flags, event); 4201 } 4202 4203 trace_seq_printf(s, "Unknown type %d\n", entry->type); 4204 4205 return trace_handle_return(s); 4206 } 4207 4208 static enum print_line_t print_raw_fmt(struct trace_iterator *iter) 4209 { 4210 struct trace_array *tr = iter->tr; 4211 struct trace_seq *s = &iter->seq; 4212 struct trace_entry *entry; 4213 struct trace_event *event; 4214 4215 entry = iter->ent; 4216 4217 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) 4218 trace_seq_printf(s, "%d %d %llu ", 4219 entry->pid, iter->cpu, iter->ts); 4220 4221 if (trace_seq_has_overflowed(s)) 4222 return TRACE_TYPE_PARTIAL_LINE; 4223 4224 event = ftrace_find_event(entry->type); 4225 if (event) 4226 return event->funcs->raw(iter, 0, event); 4227 4228 trace_seq_printf(s, "%d ?\n", entry->type); 4229 4230 return trace_handle_return(s); 4231 } 4232 4233 static enum print_line_t print_hex_fmt(struct trace_iterator *iter) 4234 { 4235 struct trace_array *tr = iter->tr; 4236 struct trace_seq *s = &iter->seq; 4237 unsigned char newline = '\n'; 4238 struct trace_entry *entry; 4239 struct trace_event *event; 4240 4241 entry = iter->ent; 4242 4243 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) { 4244 SEQ_PUT_HEX_FIELD(s, entry->pid); 4245 SEQ_PUT_HEX_FIELD(s, iter->cpu); 4246 SEQ_PUT_HEX_FIELD(s, iter->ts); 4247 if (trace_seq_has_overflowed(s)) 4248 return TRACE_TYPE_PARTIAL_LINE; 4249 } 4250 4251 event = ftrace_find_event(entry->type); 4252 if (event) { 4253 enum print_line_t ret = event->funcs->hex(iter, 0, event); 4254 if (ret != TRACE_TYPE_HANDLED) 4255 return ret; 4256 } 4257 4258 SEQ_PUT_FIELD(s, newline); 4259 4260 return trace_handle_return(s); 4261 } 4262 4263 static enum print_line_t print_bin_fmt(struct trace_iterator *iter) 4264 { 4265 struct trace_array *tr = iter->tr; 4266 struct trace_seq *s = &iter->seq; 4267 struct trace_entry *entry; 4268 struct trace_event *event; 4269 4270 entry = iter->ent; 4271 4272 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) { 4273 SEQ_PUT_FIELD(s, entry->pid); 4274 SEQ_PUT_FIELD(s, iter->cpu); 4275 SEQ_PUT_FIELD(s, iter->ts); 4276 if (trace_seq_has_overflowed(s)) 4277 return TRACE_TYPE_PARTIAL_LINE; 4278 } 4279 4280 event = ftrace_find_event(entry->type); 4281 return event ? event->funcs->binary(iter, 0, event) : 4282 TRACE_TYPE_HANDLED; 4283 } 4284 4285 int trace_empty(struct trace_iterator *iter) 4286 { 4287 struct ring_buffer_iter *buf_iter; 4288 int cpu; 4289 4290 /* If we are looking at one CPU buffer, only check that one */ 4291 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) { 4292 cpu = iter->cpu_file; 4293 buf_iter = trace_buffer_iter(iter, cpu); 4294 if (buf_iter) { 4295 if (!ring_buffer_iter_empty(buf_iter)) 4296 return 0; 4297 } else { 4298 if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu)) 4299 return 0; 4300 } 4301 return 1; 4302 } 4303 4304 for_each_tracing_cpu(cpu) { 4305 buf_iter = trace_buffer_iter(iter, cpu); 4306 if (buf_iter) { 4307 if (!ring_buffer_iter_empty(buf_iter)) 4308 return 0; 4309 } else { 4310 if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu)) 4311 return 0; 4312 } 4313 } 4314 4315 return 1; 4316 } 4317 4318 /* Called with trace_event_read_lock() held. */ 4319 enum print_line_t print_trace_line(struct trace_iterator *iter) 4320 { 4321 struct trace_array *tr = iter->tr; 4322 unsigned long trace_flags = tr->trace_flags; 4323 enum print_line_t ret; 4324 4325 if (iter->lost_events) { 4326 if (iter->lost_events == (unsigned long)-1) 4327 trace_seq_printf(&iter->seq, "CPU:%d [LOST EVENTS]\n", 4328 iter->cpu); 4329 else 4330 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n", 4331 iter->cpu, iter->lost_events); 4332 if (trace_seq_has_overflowed(&iter->seq)) 4333 return TRACE_TYPE_PARTIAL_LINE; 4334 } 4335 4336 if (iter->trace && iter->trace->print_line) { 4337 ret = iter->trace->print_line(iter); 4338 if (ret != TRACE_TYPE_UNHANDLED) 4339 return ret; 4340 } 4341 4342 if (iter->ent->type == TRACE_BPUTS && 4343 trace_flags & TRACE_ITER_PRINTK && 4344 trace_flags & TRACE_ITER_PRINTK_MSGONLY) 4345 return trace_print_bputs_msg_only(iter); 4346 4347 if (iter->ent->type == TRACE_BPRINT && 4348 trace_flags & TRACE_ITER_PRINTK && 4349 trace_flags & TRACE_ITER_PRINTK_MSGONLY) 4350 return trace_print_bprintk_msg_only(iter); 4351 4352 if (iter->ent->type == TRACE_PRINT && 4353 trace_flags & TRACE_ITER_PRINTK && 4354 trace_flags & TRACE_ITER_PRINTK_MSGONLY) 4355 return trace_print_printk_msg_only(iter); 4356 4357 if (trace_flags & TRACE_ITER_BIN) 4358 return print_bin_fmt(iter); 4359 4360 if (trace_flags & TRACE_ITER_HEX) 4361 return print_hex_fmt(iter); 4362 4363 if (trace_flags & TRACE_ITER_RAW) 4364 return print_raw_fmt(iter); 4365 4366 return print_trace_fmt(iter); 4367 } 4368 4369 void trace_latency_header(struct seq_file *m) 4370 { 4371 struct trace_iterator *iter = m->private; 4372 struct trace_array *tr = iter->tr; 4373 4374 /* print nothing if the buffers are empty */ 4375 if (trace_empty(iter)) 4376 return; 4377 4378 if (iter->iter_flags & TRACE_FILE_LAT_FMT) 4379 print_trace_header(m, iter); 4380 4381 if (!(tr->trace_flags & TRACE_ITER_VERBOSE)) 4382 print_lat_help_header(m); 4383 } 4384 4385 void trace_default_header(struct seq_file *m) 4386 { 4387 struct trace_iterator *iter = m->private; 4388 struct trace_array *tr = iter->tr; 4389 unsigned long trace_flags = tr->trace_flags; 4390 4391 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO)) 4392 return; 4393 4394 if (iter->iter_flags & TRACE_FILE_LAT_FMT) { 4395 /* print nothing if the buffers are empty */ 4396 if (trace_empty(iter)) 4397 return; 4398 print_trace_header(m, iter); 4399 if (!(trace_flags & TRACE_ITER_VERBOSE)) 4400 print_lat_help_header(m); 4401 } else { 4402 if (!(trace_flags & TRACE_ITER_VERBOSE)) { 4403 if (trace_flags & TRACE_ITER_IRQ_INFO) 4404 print_func_help_header_irq(iter->array_buffer, 4405 m, trace_flags); 4406 else 4407 print_func_help_header(iter->array_buffer, m, 4408 trace_flags); 4409 } 4410 } 4411 } 4412 4413 static void test_ftrace_alive(struct seq_file *m) 4414 { 4415 if (!ftrace_is_dead()) 4416 return; 4417 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n" 4418 "# MAY BE MISSING FUNCTION EVENTS\n"); 4419 } 4420 4421 #ifdef CONFIG_TRACER_MAX_TRACE 4422 static void show_snapshot_main_help(struct seq_file *m) 4423 { 4424 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n" 4425 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n" 4426 "# Takes a snapshot of the main buffer.\n" 4427 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n" 4428 "# (Doesn't have to be '2' works with any number that\n" 4429 "# is not a '0' or '1')\n"); 4430 } 4431 4432 static void show_snapshot_percpu_help(struct seq_file *m) 4433 { 4434 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n"); 4435 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP 4436 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n" 4437 "# Takes a snapshot of the main buffer for this cpu.\n"); 4438 #else 4439 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n" 4440 "# Must use main snapshot file to allocate.\n"); 4441 #endif 4442 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n" 4443 "# (Doesn't have to be '2' works with any number that\n" 4444 "# is not a '0' or '1')\n"); 4445 } 4446 4447 static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) 4448 { 4449 if (iter->tr->allocated_snapshot) 4450 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n"); 4451 else 4452 seq_puts(m, "#\n# * Snapshot is freed *\n#\n"); 4453 4454 seq_puts(m, "# Snapshot commands:\n"); 4455 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) 4456 show_snapshot_main_help(m); 4457 else 4458 show_snapshot_percpu_help(m); 4459 } 4460 #else 4461 /* Should never be called */ 4462 static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { } 4463 #endif 4464 4465 static int s_show(struct seq_file *m, void *v) 4466 { 4467 struct trace_iterator *iter = v; 4468 int ret; 4469 4470 if (iter->ent == NULL) { 4471 if (iter->tr) { 4472 seq_printf(m, "# tracer: %s\n", iter->trace->name); 4473 seq_puts(m, "#\n"); 4474 test_ftrace_alive(m); 4475 } 4476 if (iter->snapshot && trace_empty(iter)) 4477 print_snapshot_help(m, iter); 4478 else if (iter->trace && iter->trace->print_header) 4479 iter->trace->print_header(m); 4480 else 4481 trace_default_header(m); 4482 4483 } else if (iter->leftover) { 4484 /* 4485 * If we filled the seq_file buffer earlier, we 4486 * want to just show it now. 4487 */ 4488 ret = trace_print_seq(m, &iter->seq); 4489 4490 /* ret should this time be zero, but you never know */ 4491 iter->leftover = ret; 4492 4493 } else { 4494 ret = print_trace_line(iter); 4495 if (ret == TRACE_TYPE_PARTIAL_LINE) { 4496 iter->seq.full = 0; 4497 trace_seq_puts(&iter->seq, "[LINE TOO BIG]\n"); 4498 } 4499 ret = trace_print_seq(m, &iter->seq); 4500 /* 4501 * If we overflow the seq_file buffer, then it will 4502 * ask us for this data again at start up. 4503 * Use that instead. 4504 * ret is 0 if seq_file write succeeded. 4505 * -1 otherwise. 4506 */ 4507 iter->leftover = ret; 4508 } 4509 4510 return 0; 4511 } 4512 4513 /* 4514 * Should be used after trace_array_get(), trace_types_lock 4515 * ensures that i_cdev was already initialized. 4516 */ 4517 static inline int tracing_get_cpu(struct inode *inode) 4518 { 4519 if (inode->i_cdev) /* See trace_create_cpu_file() */ 4520 return (long)inode->i_cdev - 1; 4521 return RING_BUFFER_ALL_CPUS; 4522 } 4523 4524 static const struct seq_operations tracer_seq_ops = { 4525 .start = s_start, 4526 .next = s_next, 4527 .stop = s_stop, 4528 .show = s_show, 4529 }; 4530 4531 /* 4532 * Note, as iter itself can be allocated and freed in different 4533 * ways, this function is only used to free its content, and not 4534 * the iterator itself. The only requirement to all the allocations 4535 * is that it must zero all fields (kzalloc), as freeing works with 4536 * ethier allocated content or NULL. 4537 */ 4538 static void free_trace_iter_content(struct trace_iterator *iter) 4539 { 4540 /* The fmt is either NULL, allocated or points to static_fmt_buf */ 4541 if (iter->fmt != static_fmt_buf) 4542 kfree(iter->fmt); 4543 4544 kfree(iter->temp); 4545 kfree(iter->buffer_iter); 4546 mutex_destroy(&iter->mutex); 4547 free_cpumask_var(iter->started); 4548 } 4549 4550 static struct trace_iterator * 4551 __tracing_open(struct inode *inode, struct file *file, bool snapshot) 4552 { 4553 struct trace_array *tr = inode->i_private; 4554 struct trace_iterator *iter; 4555 int cpu; 4556 4557 if (tracing_disabled) 4558 return ERR_PTR(-ENODEV); 4559 4560 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter)); 4561 if (!iter) 4562 return ERR_PTR(-ENOMEM); 4563 4564 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter), 4565 GFP_KERNEL); 4566 if (!iter->buffer_iter) 4567 goto release; 4568 4569 /* 4570 * trace_find_next_entry() may need to save off iter->ent. 4571 * It will place it into the iter->temp buffer. As most 4572 * events are less than 128, allocate a buffer of that size. 4573 * If one is greater, then trace_find_next_entry() will 4574 * allocate a new buffer to adjust for the bigger iter->ent. 4575 * It's not critical if it fails to get allocated here. 4576 */ 4577 iter->temp = kmalloc(128, GFP_KERNEL); 4578 if (iter->temp) 4579 iter->temp_size = 128; 4580 4581 /* 4582 * trace_event_printf() may need to modify given format 4583 * string to replace %p with %px so that it shows real address 4584 * instead of hash value. However, that is only for the event 4585 * tracing, other tracer may not need. Defer the allocation 4586 * until it is needed. 4587 */ 4588 iter->fmt = NULL; 4589 iter->fmt_size = 0; 4590 4591 mutex_lock(&trace_types_lock); 4592 iter->trace = tr->current_trace; 4593 4594 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL)) 4595 goto fail; 4596 4597 iter->tr = tr; 4598 4599 #ifdef CONFIG_TRACER_MAX_TRACE 4600 /* Currently only the top directory has a snapshot */ 4601 if (tr->current_trace->print_max || snapshot) 4602 iter->array_buffer = &tr->max_buffer; 4603 else 4604 #endif 4605 iter->array_buffer = &tr->array_buffer; 4606 iter->snapshot = snapshot; 4607 iter->pos = -1; 4608 iter->cpu_file = tracing_get_cpu(inode); 4609 mutex_init(&iter->mutex); 4610 4611 /* Notify the tracer early; before we stop tracing. */ 4612 if (iter->trace->open) 4613 iter->trace->open(iter); 4614 4615 /* Annotate start of buffers if we had overruns */ 4616 if (ring_buffer_overruns(iter->array_buffer->buffer)) 4617 iter->iter_flags |= TRACE_FILE_ANNOTATE; 4618 4619 /* Output in nanoseconds only if we are using a clock in nanoseconds. */ 4620 if (trace_clocks[tr->clock_id].in_ns) 4621 iter->iter_flags |= TRACE_FILE_TIME_IN_NS; 4622 4623 /* 4624 * If pause-on-trace is enabled, then stop the trace while 4625 * dumping, unless this is the "snapshot" file 4626 */ 4627 if (!iter->snapshot && (tr->trace_flags & TRACE_ITER_PAUSE_ON_TRACE)) 4628 tracing_stop_tr(tr); 4629 4630 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) { 4631 for_each_tracing_cpu(cpu) { 4632 iter->buffer_iter[cpu] = 4633 ring_buffer_read_prepare(iter->array_buffer->buffer, 4634 cpu, GFP_KERNEL); 4635 } 4636 ring_buffer_read_prepare_sync(); 4637 for_each_tracing_cpu(cpu) { 4638 ring_buffer_read_start(iter->buffer_iter[cpu]); 4639 tracing_iter_reset(iter, cpu); 4640 } 4641 } else { 4642 cpu = iter->cpu_file; 4643 iter->buffer_iter[cpu] = 4644 ring_buffer_read_prepare(iter->array_buffer->buffer, 4645 cpu, GFP_KERNEL); 4646 ring_buffer_read_prepare_sync(); 4647 ring_buffer_read_start(iter->buffer_iter[cpu]); 4648 tracing_iter_reset(iter, cpu); 4649 } 4650 4651 mutex_unlock(&trace_types_lock); 4652 4653 return iter; 4654 4655 fail: 4656 mutex_unlock(&trace_types_lock); 4657 free_trace_iter_content(iter); 4658 release: 4659 seq_release_private(inode, file); 4660 return ERR_PTR(-ENOMEM); 4661 } 4662 4663 int tracing_open_generic(struct inode *inode, struct file *filp) 4664 { 4665 int ret; 4666 4667 ret = tracing_check_open_get_tr(NULL); 4668 if (ret) 4669 return ret; 4670 4671 filp->private_data = inode->i_private; 4672 return 0; 4673 } 4674 4675 bool tracing_is_disabled(void) 4676 { 4677 return (tracing_disabled) ? true: false; 4678 } 4679 4680 /* 4681 * Open and update trace_array ref count. 4682 * Must have the current trace_array passed to it. 4683 */ 4684 int tracing_open_generic_tr(struct inode *inode, struct file *filp) 4685 { 4686 struct trace_array *tr = inode->i_private; 4687 int ret; 4688 4689 ret = tracing_check_open_get_tr(tr); 4690 if (ret) 4691 return ret; 4692 4693 filp->private_data = inode->i_private; 4694 4695 return 0; 4696 } 4697 4698 /* 4699 * The private pointer of the inode is the trace_event_file. 4700 * Update the tr ref count associated to it. 4701 */ 4702 int tracing_open_file_tr(struct inode *inode, struct file *filp) 4703 { 4704 struct trace_event_file *file = inode->i_private; 4705 int ret; 4706 4707 ret = tracing_check_open_get_tr(file->tr); 4708 if (ret) 4709 return ret; 4710 4711 mutex_lock(&event_mutex); 4712 4713 /* Fail if the file is marked for removal */ 4714 if (file->flags & EVENT_FILE_FL_FREED) { 4715 trace_array_put(file->tr); 4716 ret = -ENODEV; 4717 } else { 4718 event_file_get(file); 4719 } 4720 4721 mutex_unlock(&event_mutex); 4722 if (ret) 4723 return ret; 4724 4725 filp->private_data = inode->i_private; 4726 4727 return 0; 4728 } 4729 4730 int tracing_release_file_tr(struct inode *inode, struct file *filp) 4731 { 4732 struct trace_event_file *file = inode->i_private; 4733 4734 trace_array_put(file->tr); 4735 event_file_put(file); 4736 4737 return 0; 4738 } 4739 4740 int tracing_single_release_file_tr(struct inode *inode, struct file *filp) 4741 { 4742 tracing_release_file_tr(inode, filp); 4743 return single_release(inode, filp); 4744 } 4745 4746 static int tracing_mark_open(struct inode *inode, struct file *filp) 4747 { 4748 stream_open(inode, filp); 4749 return tracing_open_generic_tr(inode, filp); 4750 } 4751 4752 static int tracing_release(struct inode *inode, struct file *file) 4753 { 4754 struct trace_array *tr = inode->i_private; 4755 struct seq_file *m = file->private_data; 4756 struct trace_iterator *iter; 4757 int cpu; 4758 4759 if (!(file->f_mode & FMODE_READ)) { 4760 trace_array_put(tr); 4761 return 0; 4762 } 4763 4764 /* Writes do not use seq_file */ 4765 iter = m->private; 4766 mutex_lock(&trace_types_lock); 4767 4768 for_each_tracing_cpu(cpu) { 4769 if (iter->buffer_iter[cpu]) 4770 ring_buffer_read_finish(iter->buffer_iter[cpu]); 4771 } 4772 4773 if (iter->trace && iter->trace->close) 4774 iter->trace->close(iter); 4775 4776 if (!iter->snapshot && tr->stop_count) 4777 /* reenable tracing if it was previously enabled */ 4778 tracing_start_tr(tr); 4779 4780 __trace_array_put(tr); 4781 4782 mutex_unlock(&trace_types_lock); 4783 4784 free_trace_iter_content(iter); 4785 seq_release_private(inode, file); 4786 4787 return 0; 4788 } 4789 4790 int tracing_release_generic_tr(struct inode *inode, struct file *file) 4791 { 4792 struct trace_array *tr = inode->i_private; 4793 4794 trace_array_put(tr); 4795 return 0; 4796 } 4797 4798 static int tracing_single_release_tr(struct inode *inode, struct file *file) 4799 { 4800 struct trace_array *tr = inode->i_private; 4801 4802 trace_array_put(tr); 4803 4804 return single_release(inode, file); 4805 } 4806 4807 static int tracing_open(struct inode *inode, struct file *file) 4808 { 4809 struct trace_array *tr = inode->i_private; 4810 struct trace_iterator *iter; 4811 int ret; 4812 4813 ret = tracing_check_open_get_tr(tr); 4814 if (ret) 4815 return ret; 4816 4817 /* If this file was open for write, then erase contents */ 4818 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) { 4819 int cpu = tracing_get_cpu(inode); 4820 struct array_buffer *trace_buf = &tr->array_buffer; 4821 4822 #ifdef CONFIG_TRACER_MAX_TRACE 4823 if (tr->current_trace->print_max) 4824 trace_buf = &tr->max_buffer; 4825 #endif 4826 4827 if (cpu == RING_BUFFER_ALL_CPUS) 4828 tracing_reset_online_cpus(trace_buf); 4829 else 4830 tracing_reset_cpu(trace_buf, cpu); 4831 } 4832 4833 if (file->f_mode & FMODE_READ) { 4834 iter = __tracing_open(inode, file, false); 4835 if (IS_ERR(iter)) 4836 ret = PTR_ERR(iter); 4837 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) 4838 iter->iter_flags |= TRACE_FILE_LAT_FMT; 4839 } 4840 4841 if (ret < 0) 4842 trace_array_put(tr); 4843 4844 return ret; 4845 } 4846 4847 /* 4848 * Some tracers are not suitable for instance buffers. 4849 * A tracer is always available for the global array (toplevel) 4850 * or if it explicitly states that it is. 4851 */ 4852 static bool 4853 trace_ok_for_array(struct tracer *t, struct trace_array *tr) 4854 { 4855 #ifdef CONFIG_TRACER_SNAPSHOT 4856 /* arrays with mapped buffer range do not have snapshots */ 4857 if (tr->range_addr_start && t->use_max_tr) 4858 return false; 4859 #endif 4860 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances; 4861 } 4862 4863 /* Find the next tracer that this trace array may use */ 4864 static struct tracer * 4865 get_tracer_for_array(struct trace_array *tr, struct tracer *t) 4866 { 4867 while (t && !trace_ok_for_array(t, tr)) 4868 t = t->next; 4869 4870 return t; 4871 } 4872 4873 static void * 4874 t_next(struct seq_file *m, void *v, loff_t *pos) 4875 { 4876 struct trace_array *tr = m->private; 4877 struct tracer *t = v; 4878 4879 (*pos)++; 4880 4881 if (t) 4882 t = get_tracer_for_array(tr, t->next); 4883 4884 return t; 4885 } 4886 4887 static void *t_start(struct seq_file *m, loff_t *pos) 4888 { 4889 struct trace_array *tr = m->private; 4890 struct tracer *t; 4891 loff_t l = 0; 4892 4893 mutex_lock(&trace_types_lock); 4894 4895 t = get_tracer_for_array(tr, trace_types); 4896 for (; t && l < *pos; t = t_next(m, t, &l)) 4897 ; 4898 4899 return t; 4900 } 4901 4902 static void t_stop(struct seq_file *m, void *p) 4903 { 4904 mutex_unlock(&trace_types_lock); 4905 } 4906 4907 static int t_show(struct seq_file *m, void *v) 4908 { 4909 struct tracer *t = v; 4910 4911 if (!t) 4912 return 0; 4913 4914 seq_puts(m, t->name); 4915 if (t->next) 4916 seq_putc(m, ' '); 4917 else 4918 seq_putc(m, '\n'); 4919 4920 return 0; 4921 } 4922 4923 static const struct seq_operations show_traces_seq_ops = { 4924 .start = t_start, 4925 .next = t_next, 4926 .stop = t_stop, 4927 .show = t_show, 4928 }; 4929 4930 static int show_traces_open(struct inode *inode, struct file *file) 4931 { 4932 struct trace_array *tr = inode->i_private; 4933 struct seq_file *m; 4934 int ret; 4935 4936 ret = tracing_check_open_get_tr(tr); 4937 if (ret) 4938 return ret; 4939 4940 ret = seq_open(file, &show_traces_seq_ops); 4941 if (ret) { 4942 trace_array_put(tr); 4943 return ret; 4944 } 4945 4946 m = file->private_data; 4947 m->private = tr; 4948 4949 return 0; 4950 } 4951 4952 static int tracing_seq_release(struct inode *inode, struct file *file) 4953 { 4954 struct trace_array *tr = inode->i_private; 4955 4956 trace_array_put(tr); 4957 return seq_release(inode, file); 4958 } 4959 4960 static ssize_t 4961 tracing_write_stub(struct file *filp, const char __user *ubuf, 4962 size_t count, loff_t *ppos) 4963 { 4964 return count; 4965 } 4966 4967 loff_t tracing_lseek(struct file *file, loff_t offset, int whence) 4968 { 4969 int ret; 4970 4971 if (file->f_mode & FMODE_READ) 4972 ret = seq_lseek(file, offset, whence); 4973 else 4974 file->f_pos = ret = 0; 4975 4976 return ret; 4977 } 4978 4979 static const struct file_operations tracing_fops = { 4980 .open = tracing_open, 4981 .read = seq_read, 4982 .read_iter = seq_read_iter, 4983 .splice_read = copy_splice_read, 4984 .write = tracing_write_stub, 4985 .llseek = tracing_lseek, 4986 .release = tracing_release, 4987 }; 4988 4989 static const struct file_operations show_traces_fops = { 4990 .open = show_traces_open, 4991 .read = seq_read, 4992 .llseek = seq_lseek, 4993 .release = tracing_seq_release, 4994 }; 4995 4996 static ssize_t 4997 tracing_cpumask_read(struct file *filp, char __user *ubuf, 4998 size_t count, loff_t *ppos) 4999 { 5000 struct trace_array *tr = file_inode(filp)->i_private; 5001 char *mask_str; 5002 int len; 5003 5004 len = snprintf(NULL, 0, "%*pb\n", 5005 cpumask_pr_args(tr->tracing_cpumask)) + 1; 5006 mask_str = kmalloc(len, GFP_KERNEL); 5007 if (!mask_str) 5008 return -ENOMEM; 5009 5010 len = snprintf(mask_str, len, "%*pb\n", 5011 cpumask_pr_args(tr->tracing_cpumask)); 5012 if (len >= count) { 5013 count = -EINVAL; 5014 goto out_err; 5015 } 5016 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len); 5017 5018 out_err: 5019 kfree(mask_str); 5020 5021 return count; 5022 } 5023 5024 int tracing_set_cpumask(struct trace_array *tr, 5025 cpumask_var_t tracing_cpumask_new) 5026 { 5027 int cpu; 5028 5029 if (!tr) 5030 return -EINVAL; 5031 5032 local_irq_disable(); 5033 arch_spin_lock(&tr->max_lock); 5034 for_each_tracing_cpu(cpu) { 5035 /* 5036 * Increase/decrease the disabled counter if we are 5037 * about to flip a bit in the cpumask: 5038 */ 5039 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) && 5040 !cpumask_test_cpu(cpu, tracing_cpumask_new)) { 5041 atomic_inc(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled); 5042 ring_buffer_record_disable_cpu(tr->array_buffer.buffer, cpu); 5043 #ifdef CONFIG_TRACER_MAX_TRACE 5044 ring_buffer_record_disable_cpu(tr->max_buffer.buffer, cpu); 5045 #endif 5046 } 5047 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) && 5048 cpumask_test_cpu(cpu, tracing_cpumask_new)) { 5049 atomic_dec(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled); 5050 ring_buffer_record_enable_cpu(tr->array_buffer.buffer, cpu); 5051 #ifdef CONFIG_TRACER_MAX_TRACE 5052 ring_buffer_record_enable_cpu(tr->max_buffer.buffer, cpu); 5053 #endif 5054 } 5055 } 5056 arch_spin_unlock(&tr->max_lock); 5057 local_irq_enable(); 5058 5059 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new); 5060 5061 return 0; 5062 } 5063 5064 static ssize_t 5065 tracing_cpumask_write(struct file *filp, const char __user *ubuf, 5066 size_t count, loff_t *ppos) 5067 { 5068 struct trace_array *tr = file_inode(filp)->i_private; 5069 cpumask_var_t tracing_cpumask_new; 5070 int err; 5071 5072 if (count == 0 || count > KMALLOC_MAX_SIZE) 5073 return -EINVAL; 5074 5075 if (!zalloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL)) 5076 return -ENOMEM; 5077 5078 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new); 5079 if (err) 5080 goto err_free; 5081 5082 err = tracing_set_cpumask(tr, tracing_cpumask_new); 5083 if (err) 5084 goto err_free; 5085 5086 free_cpumask_var(tracing_cpumask_new); 5087 5088 return count; 5089 5090 err_free: 5091 free_cpumask_var(tracing_cpumask_new); 5092 5093 return err; 5094 } 5095 5096 static const struct file_operations tracing_cpumask_fops = { 5097 .open = tracing_open_generic_tr, 5098 .read = tracing_cpumask_read, 5099 .write = tracing_cpumask_write, 5100 .release = tracing_release_generic_tr, 5101 .llseek = generic_file_llseek, 5102 }; 5103 5104 static int tracing_trace_options_show(struct seq_file *m, void *v) 5105 { 5106 struct tracer_opt *trace_opts; 5107 struct trace_array *tr = m->private; 5108 u32 tracer_flags; 5109 int i; 5110 5111 guard(mutex)(&trace_types_lock); 5112 5113 tracer_flags = tr->current_trace->flags->val; 5114 trace_opts = tr->current_trace->flags->opts; 5115 5116 for (i = 0; trace_options[i]; i++) { 5117 if (tr->trace_flags & (1 << i)) 5118 seq_printf(m, "%s\n", trace_options[i]); 5119 else 5120 seq_printf(m, "no%s\n", trace_options[i]); 5121 } 5122 5123 for (i = 0; trace_opts[i].name; i++) { 5124 if (tracer_flags & trace_opts[i].bit) 5125 seq_printf(m, "%s\n", trace_opts[i].name); 5126 else 5127 seq_printf(m, "no%s\n", trace_opts[i].name); 5128 } 5129 5130 return 0; 5131 } 5132 5133 static int __set_tracer_option(struct trace_array *tr, 5134 struct tracer_flags *tracer_flags, 5135 struct tracer_opt *opts, int neg) 5136 { 5137 struct tracer *trace = tracer_flags->trace; 5138 int ret; 5139 5140 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg); 5141 if (ret) 5142 return ret; 5143 5144 if (neg) 5145 tracer_flags->val &= ~opts->bit; 5146 else 5147 tracer_flags->val |= opts->bit; 5148 return 0; 5149 } 5150 5151 /* Try to assign a tracer specific option */ 5152 static int set_tracer_option(struct trace_array *tr, char *cmp, int neg) 5153 { 5154 struct tracer *trace = tr->current_trace; 5155 struct tracer_flags *tracer_flags = trace->flags; 5156 struct tracer_opt *opts = NULL; 5157 int i; 5158 5159 for (i = 0; tracer_flags->opts[i].name; i++) { 5160 opts = &tracer_flags->opts[i]; 5161 5162 if (strcmp(cmp, opts->name) == 0) 5163 return __set_tracer_option(tr, trace->flags, opts, neg); 5164 } 5165 5166 return -EINVAL; 5167 } 5168 5169 /* Some tracers require overwrite to stay enabled */ 5170 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set) 5171 { 5172 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set) 5173 return -1; 5174 5175 return 0; 5176 } 5177 5178 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled) 5179 { 5180 if ((mask == TRACE_ITER_RECORD_TGID) || 5181 (mask == TRACE_ITER_RECORD_CMD) || 5182 (mask == TRACE_ITER_TRACE_PRINTK)) 5183 lockdep_assert_held(&event_mutex); 5184 5185 /* do nothing if flag is already set */ 5186 if (!!(tr->trace_flags & mask) == !!enabled) 5187 return 0; 5188 5189 /* Give the tracer a chance to approve the change */ 5190 if (tr->current_trace->flag_changed) 5191 if (tr->current_trace->flag_changed(tr, mask, !!enabled)) 5192 return -EINVAL; 5193 5194 if (mask == TRACE_ITER_TRACE_PRINTK) { 5195 if (enabled) { 5196 update_printk_trace(tr); 5197 } else { 5198 /* 5199 * The global_trace cannot clear this. 5200 * It's flag only gets cleared if another instance sets it. 5201 */ 5202 if (printk_trace == &global_trace) 5203 return -EINVAL; 5204 /* 5205 * An instance must always have it set. 5206 * by default, that's the global_trace instane. 5207 */ 5208 if (printk_trace == tr) 5209 update_printk_trace(&global_trace); 5210 } 5211 } 5212 5213 if (enabled) 5214 tr->trace_flags |= mask; 5215 else 5216 tr->trace_flags &= ~mask; 5217 5218 if (mask == TRACE_ITER_RECORD_CMD) 5219 trace_event_enable_cmd_record(enabled); 5220 5221 if (mask == TRACE_ITER_RECORD_TGID) { 5222 5223 if (trace_alloc_tgid_map() < 0) { 5224 tr->trace_flags &= ~TRACE_ITER_RECORD_TGID; 5225 return -ENOMEM; 5226 } 5227 5228 trace_event_enable_tgid_record(enabled); 5229 } 5230 5231 if (mask == TRACE_ITER_EVENT_FORK) 5232 trace_event_follow_fork(tr, enabled); 5233 5234 if (mask == TRACE_ITER_FUNC_FORK) 5235 ftrace_pid_follow_fork(tr, enabled); 5236 5237 if (mask == TRACE_ITER_OVERWRITE) { 5238 ring_buffer_change_overwrite(tr->array_buffer.buffer, enabled); 5239 #ifdef CONFIG_TRACER_MAX_TRACE 5240 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled); 5241 #endif 5242 } 5243 5244 if (mask == TRACE_ITER_PRINTK) { 5245 trace_printk_start_stop_comm(enabled); 5246 trace_printk_control(enabled); 5247 } 5248 5249 return 0; 5250 } 5251 5252 int trace_set_options(struct trace_array *tr, char *option) 5253 { 5254 char *cmp; 5255 int neg = 0; 5256 int ret; 5257 size_t orig_len = strlen(option); 5258 int len; 5259 5260 cmp = strstrip(option); 5261 5262 len = str_has_prefix(cmp, "no"); 5263 if (len) 5264 neg = 1; 5265 5266 cmp += len; 5267 5268 mutex_lock(&event_mutex); 5269 mutex_lock(&trace_types_lock); 5270 5271 ret = match_string(trace_options, -1, cmp); 5272 /* If no option could be set, test the specific tracer options */ 5273 if (ret < 0) 5274 ret = set_tracer_option(tr, cmp, neg); 5275 else 5276 ret = set_tracer_flag(tr, 1 << ret, !neg); 5277 5278 mutex_unlock(&trace_types_lock); 5279 mutex_unlock(&event_mutex); 5280 5281 /* 5282 * If the first trailing whitespace is replaced with '\0' by strstrip, 5283 * turn it back into a space. 5284 */ 5285 if (orig_len > strlen(option)) 5286 option[strlen(option)] = ' '; 5287 5288 return ret; 5289 } 5290 5291 static void __init apply_trace_boot_options(void) 5292 { 5293 char *buf = trace_boot_options_buf; 5294 char *option; 5295 5296 while (true) { 5297 option = strsep(&buf, ","); 5298 5299 if (!option) 5300 break; 5301 5302 if (*option) 5303 trace_set_options(&global_trace, option); 5304 5305 /* Put back the comma to allow this to be called again */ 5306 if (buf) 5307 *(buf - 1) = ','; 5308 } 5309 } 5310 5311 static ssize_t 5312 tracing_trace_options_write(struct file *filp, const char __user *ubuf, 5313 size_t cnt, loff_t *ppos) 5314 { 5315 struct seq_file *m = filp->private_data; 5316 struct trace_array *tr = m->private; 5317 char buf[64]; 5318 int ret; 5319 5320 if (cnt >= sizeof(buf)) 5321 return -EINVAL; 5322 5323 if (copy_from_user(buf, ubuf, cnt)) 5324 return -EFAULT; 5325 5326 buf[cnt] = 0; 5327 5328 ret = trace_set_options(tr, buf); 5329 if (ret < 0) 5330 return ret; 5331 5332 *ppos += cnt; 5333 5334 return cnt; 5335 } 5336 5337 static int tracing_trace_options_open(struct inode *inode, struct file *file) 5338 { 5339 struct trace_array *tr = inode->i_private; 5340 int ret; 5341 5342 ret = tracing_check_open_get_tr(tr); 5343 if (ret) 5344 return ret; 5345 5346 ret = single_open(file, tracing_trace_options_show, inode->i_private); 5347 if (ret < 0) 5348 trace_array_put(tr); 5349 5350 return ret; 5351 } 5352 5353 static const struct file_operations tracing_iter_fops = { 5354 .open = tracing_trace_options_open, 5355 .read = seq_read, 5356 .llseek = seq_lseek, 5357 .release = tracing_single_release_tr, 5358 .write = tracing_trace_options_write, 5359 }; 5360 5361 static const char readme_msg[] = 5362 "tracing mini-HOWTO:\n\n" 5363 "By default tracefs removes all OTH file permission bits.\n" 5364 "When mounting tracefs an optional group id can be specified\n" 5365 "which adds the group to every directory and file in tracefs:\n\n" 5366 "\t e.g. mount -t tracefs [-o [gid=<gid>]] nodev /sys/kernel/tracing\n\n" 5367 "# echo 0 > tracing_on : quick way to disable tracing\n" 5368 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n" 5369 " Important files:\n" 5370 " trace\t\t\t- The static contents of the buffer\n" 5371 "\t\t\t To clear the buffer write into this file: echo > trace\n" 5372 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n" 5373 " current_tracer\t- function and latency tracers\n" 5374 " available_tracers\t- list of configured tracers for current_tracer\n" 5375 " error_log\t- error log for failed commands (that support it)\n" 5376 " buffer_size_kb\t- view and modify size of per cpu buffer\n" 5377 " buffer_total_size_kb - view total size of all cpu buffers\n\n" 5378 " trace_clock\t\t- change the clock used to order events\n" 5379 " local: Per cpu clock but may not be synced across CPUs\n" 5380 " global: Synced across CPUs but slows tracing down.\n" 5381 " counter: Not a clock, but just an increment\n" 5382 " uptime: Jiffy counter from time of boot\n" 5383 " perf: Same clock that perf events use\n" 5384 #ifdef CONFIG_X86_64 5385 " x86-tsc: TSC cycle counter\n" 5386 #endif 5387 "\n timestamp_mode\t- view the mode used to timestamp events\n" 5388 " delta: Delta difference against a buffer-wide timestamp\n" 5389 " absolute: Absolute (standalone) timestamp\n" 5390 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n" 5391 "\n trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n" 5392 " tracing_cpumask\t- Limit which CPUs to trace\n" 5393 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n" 5394 "\t\t\t Remove sub-buffer with rmdir\n" 5395 " trace_options\t\t- Set format or modify how tracing happens\n" 5396 "\t\t\t Disable an option by prefixing 'no' to the\n" 5397 "\t\t\t option name\n" 5398 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n" 5399 #ifdef CONFIG_DYNAMIC_FTRACE 5400 "\n available_filter_functions - list of functions that can be filtered on\n" 5401 " set_ftrace_filter\t- echo function name in here to only trace these\n" 5402 "\t\t\t functions\n" 5403 "\t accepts: func_full_name or glob-matching-pattern\n" 5404 "\t modules: Can select a group via module\n" 5405 "\t Format: :mod:<module-name>\n" 5406 "\t example: echo :mod:ext3 > set_ftrace_filter\n" 5407 "\t triggers: a command to perform when function is hit\n" 5408 "\t Format: <function>:<trigger>[:count]\n" 5409 "\t trigger: traceon, traceoff\n" 5410 "\t\t enable_event:<system>:<event>\n" 5411 "\t\t disable_event:<system>:<event>\n" 5412 #ifdef CONFIG_STACKTRACE 5413 "\t\t stacktrace\n" 5414 #endif 5415 #ifdef CONFIG_TRACER_SNAPSHOT 5416 "\t\t snapshot\n" 5417 #endif 5418 "\t\t dump\n" 5419 "\t\t cpudump\n" 5420 "\t example: echo do_fault:traceoff > set_ftrace_filter\n" 5421 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n" 5422 "\t The first one will disable tracing every time do_fault is hit\n" 5423 "\t The second will disable tracing at most 3 times when do_trap is hit\n" 5424 "\t The first time do trap is hit and it disables tracing, the\n" 5425 "\t counter will decrement to 2. If tracing is already disabled,\n" 5426 "\t the counter will not decrement. It only decrements when the\n" 5427 "\t trigger did work\n" 5428 "\t To remove trigger without count:\n" 5429 "\t echo '!<function>:<trigger> > set_ftrace_filter\n" 5430 "\t To remove trigger with a count:\n" 5431 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n" 5432 " set_ftrace_notrace\t- echo function name in here to never trace.\n" 5433 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n" 5434 "\t modules: Can select a group via module command :mod:\n" 5435 "\t Does not accept triggers\n" 5436 #endif /* CONFIG_DYNAMIC_FTRACE */ 5437 #ifdef CONFIG_FUNCTION_TRACER 5438 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n" 5439 "\t\t (function)\n" 5440 " set_ftrace_notrace_pid\t- Write pid(s) to not function trace those pids\n" 5441 "\t\t (function)\n" 5442 #endif 5443 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 5444 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n" 5445 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n" 5446 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n" 5447 #endif 5448 #ifdef CONFIG_TRACER_SNAPSHOT 5449 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n" 5450 "\t\t\t snapshot buffer. Read the contents for more\n" 5451 "\t\t\t information\n" 5452 #endif 5453 #ifdef CONFIG_STACK_TRACER 5454 " stack_trace\t\t- Shows the max stack trace when active\n" 5455 " stack_max_size\t- Shows current max stack size that was traced\n" 5456 "\t\t\t Write into this file to reset the max size (trigger a\n" 5457 "\t\t\t new trace)\n" 5458 #ifdef CONFIG_DYNAMIC_FTRACE 5459 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n" 5460 "\t\t\t traces\n" 5461 #endif 5462 #endif /* CONFIG_STACK_TRACER */ 5463 #ifdef CONFIG_DYNAMIC_EVENTS 5464 " dynamic_events\t\t- Create/append/remove/show the generic dynamic events\n" 5465 "\t\t\t Write into this file to define/undefine new trace events.\n" 5466 #endif 5467 #ifdef CONFIG_KPROBE_EVENTS 5468 " kprobe_events\t\t- Create/append/remove/show the kernel dynamic events\n" 5469 "\t\t\t Write into this file to define/undefine new trace events.\n" 5470 #endif 5471 #ifdef CONFIG_UPROBE_EVENTS 5472 " uprobe_events\t\t- Create/append/remove/show the userspace dynamic events\n" 5473 "\t\t\t Write into this file to define/undefine new trace events.\n" 5474 #endif 5475 #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS) || \ 5476 defined(CONFIG_FPROBE_EVENTS) 5477 "\t accepts: event-definitions (one definition per line)\n" 5478 #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS) 5479 "\t Format: p[:[<group>/][<event>]] <place> [<args>]\n" 5480 "\t r[maxactive][:[<group>/][<event>]] <place> [<args>]\n" 5481 #endif 5482 #ifdef CONFIG_FPROBE_EVENTS 5483 "\t f[:[<group>/][<event>]] <func-name>[%return] [<args>]\n" 5484 "\t t[:[<group>/][<event>]] <tracepoint> [<args>]\n" 5485 #endif 5486 #ifdef CONFIG_HIST_TRIGGERS 5487 "\t s:[synthetic/]<event> <field> [<field>]\n" 5488 #endif 5489 "\t e[:[<group>/][<event>]] <attached-group>.<attached-event> [<args>] [if <filter>]\n" 5490 "\t -:[<group>/][<event>]\n" 5491 #ifdef CONFIG_KPROBE_EVENTS 5492 "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n" 5493 "place (kretprobe): [<module>:]<symbol>[+<offset>]%return|<memaddr>\n" 5494 #endif 5495 #ifdef CONFIG_UPROBE_EVENTS 5496 " place (uprobe): <path>:<offset>[%return][(ref_ctr_offset)]\n" 5497 #endif 5498 "\t args: <name>=fetcharg[:type]\n" 5499 "\t fetcharg: (%<register>|$<efield>), @<address>, @<symbol>[+|-<offset>],\n" 5500 #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API 5501 "\t $stack<index>, $stack, $retval, $comm, $arg<N>,\n" 5502 #ifdef CONFIG_PROBE_EVENTS_BTF_ARGS 5503 "\t <argname>[->field[->field|.field...]],\n" 5504 #endif 5505 #else 5506 "\t $stack<index>, $stack, $retval, $comm,\n" 5507 #endif 5508 "\t +|-[u]<offset>(<fetcharg>), \\imm-value, \\\"imm-string\"\n" 5509 "\t kernel return probes support: $retval, $arg<N>, $comm\n" 5510 "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, char, string, symbol,\n" 5511 "\t b<bit-width>@<bit-offset>/<container-size>, ustring,\n" 5512 "\t symstr, %pd/%pD, <type>\\[<array-size>\\]\n" 5513 #ifdef CONFIG_HIST_TRIGGERS 5514 "\t field: <stype> <name>;\n" 5515 "\t stype: u8/u16/u32/u64, s8/s16/s32/s64, pid_t,\n" 5516 "\t [unsigned] char/int/long\n" 5517 #endif 5518 "\t efield: For event probes ('e' types), the field is on of the fields\n" 5519 "\t of the <attached-group>/<attached-event>.\n" 5520 #endif 5521 " set_event\t\t- Enables events by name written into it\n" 5522 "\t\t\t Can enable module events via: :mod:<module>\n" 5523 " events/\t\t- Directory containing all trace event subsystems:\n" 5524 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n" 5525 " events/<system>/\t- Directory containing all trace events for <system>:\n" 5526 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n" 5527 "\t\t\t events\n" 5528 " filter\t\t- If set, only events passing filter are traced\n" 5529 " events/<system>/<event>/\t- Directory containing control files for\n" 5530 "\t\t\t <event>:\n" 5531 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n" 5532 " filter\t\t- If set, only events passing filter are traced\n" 5533 " trigger\t\t- If set, a command to perform when event is hit\n" 5534 "\t Format: <trigger>[:count][if <filter>]\n" 5535 "\t trigger: traceon, traceoff\n" 5536 "\t enable_event:<system>:<event>\n" 5537 "\t disable_event:<system>:<event>\n" 5538 #ifdef CONFIG_HIST_TRIGGERS 5539 "\t enable_hist:<system>:<event>\n" 5540 "\t disable_hist:<system>:<event>\n" 5541 #endif 5542 #ifdef CONFIG_STACKTRACE 5543 "\t\t stacktrace\n" 5544 #endif 5545 #ifdef CONFIG_TRACER_SNAPSHOT 5546 "\t\t snapshot\n" 5547 #endif 5548 #ifdef CONFIG_HIST_TRIGGERS 5549 "\t\t hist (see below)\n" 5550 #endif 5551 "\t example: echo traceoff > events/block/block_unplug/trigger\n" 5552 "\t echo traceoff:3 > events/block/block_unplug/trigger\n" 5553 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n" 5554 "\t events/block/block_unplug/trigger\n" 5555 "\t The first disables tracing every time block_unplug is hit.\n" 5556 "\t The second disables tracing the first 3 times block_unplug is hit.\n" 5557 "\t The third enables the kmalloc event the first 3 times block_unplug\n" 5558 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n" 5559 "\t Like function triggers, the counter is only decremented if it\n" 5560 "\t enabled or disabled tracing.\n" 5561 "\t To remove a trigger without a count:\n" 5562 "\t echo '!<trigger> > <system>/<event>/trigger\n" 5563 "\t To remove a trigger with a count:\n" 5564 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n" 5565 "\t Filters can be ignored when removing a trigger.\n" 5566 #ifdef CONFIG_HIST_TRIGGERS 5567 " hist trigger\t- If set, event hits are aggregated into a hash table\n" 5568 "\t Format: hist:keys=<field1[,field2,...]>\n" 5569 "\t [:<var1>=<field|var_ref|numeric_literal>[,<var2>=...]]\n" 5570 "\t [:values=<field1[,field2,...]>]\n" 5571 "\t [:sort=<field1[,field2,...]>]\n" 5572 "\t [:size=#entries]\n" 5573 "\t [:pause][:continue][:clear]\n" 5574 "\t [:name=histname1]\n" 5575 "\t [:nohitcount]\n" 5576 "\t [:<handler>.<action>]\n" 5577 "\t [if <filter>]\n\n" 5578 "\t Note, special fields can be used as well:\n" 5579 "\t common_timestamp - to record current timestamp\n" 5580 "\t common_cpu - to record the CPU the event happened on\n" 5581 "\n" 5582 "\t A hist trigger variable can be:\n" 5583 "\t - a reference to a field e.g. x=current_timestamp,\n" 5584 "\t - a reference to another variable e.g. y=$x,\n" 5585 "\t - a numeric literal: e.g. ms_per_sec=1000,\n" 5586 "\t - an arithmetic expression: e.g. time_secs=current_timestamp/1000\n" 5587 "\n" 5588 "\t hist trigger arithmetic expressions support addition(+), subtraction(-),\n" 5589 "\t multiplication(*) and division(/) operators. An operand can be either a\n" 5590 "\t variable reference, field or numeric literal.\n" 5591 "\n" 5592 "\t When a matching event is hit, an entry is added to a hash\n" 5593 "\t table using the key(s) and value(s) named, and the value of a\n" 5594 "\t sum called 'hitcount' is incremented. Keys and values\n" 5595 "\t correspond to fields in the event's format description. Keys\n" 5596 "\t can be any field, or the special string 'common_stacktrace'.\n" 5597 "\t Compound keys consisting of up to two fields can be specified\n" 5598 "\t by the 'keys' keyword. Values must correspond to numeric\n" 5599 "\t fields. Sort keys consisting of up to two fields can be\n" 5600 "\t specified using the 'sort' keyword. The sort direction can\n" 5601 "\t be modified by appending '.descending' or '.ascending' to a\n" 5602 "\t sort field. The 'size' parameter can be used to specify more\n" 5603 "\t or fewer than the default 2048 entries for the hashtable size.\n" 5604 "\t If a hist trigger is given a name using the 'name' parameter,\n" 5605 "\t its histogram data will be shared with other triggers of the\n" 5606 "\t same name, and trigger hits will update this common data.\n\n" 5607 "\t Reading the 'hist' file for the event will dump the hash\n" 5608 "\t table in its entirety to stdout. If there are multiple hist\n" 5609 "\t triggers attached to an event, there will be a table for each\n" 5610 "\t trigger in the output. The table displayed for a named\n" 5611 "\t trigger will be the same as any other instance having the\n" 5612 "\t same name. The default format used to display a given field\n" 5613 "\t can be modified by appending any of the following modifiers\n" 5614 "\t to the field name, as applicable:\n\n" 5615 "\t .hex display a number as a hex value\n" 5616 "\t .sym display an address as a symbol\n" 5617 "\t .sym-offset display an address as a symbol and offset\n" 5618 "\t .execname display a common_pid as a program name\n" 5619 "\t .syscall display a syscall id as a syscall name\n" 5620 "\t .log2 display log2 value rather than raw number\n" 5621 "\t .buckets=size display values in groups of size rather than raw number\n" 5622 "\t .usecs display a common_timestamp in microseconds\n" 5623 "\t .percent display a number of percentage value\n" 5624 "\t .graph display a bar-graph of a value\n\n" 5625 "\t The 'pause' parameter can be used to pause an existing hist\n" 5626 "\t trigger or to start a hist trigger but not log any events\n" 5627 "\t until told to do so. 'continue' can be used to start or\n" 5628 "\t restart a paused hist trigger.\n\n" 5629 "\t The 'clear' parameter will clear the contents of a running\n" 5630 "\t hist trigger and leave its current paused/active state\n" 5631 "\t unchanged.\n\n" 5632 "\t The 'nohitcount' (or NOHC) parameter will suppress display of\n" 5633 "\t raw hitcount in the histogram.\n\n" 5634 "\t The enable_hist and disable_hist triggers can be used to\n" 5635 "\t have one event conditionally start and stop another event's\n" 5636 "\t already-attached hist trigger. The syntax is analogous to\n" 5637 "\t the enable_event and disable_event triggers.\n\n" 5638 "\t Hist trigger handlers and actions are executed whenever a\n" 5639 "\t a histogram entry is added or updated. They take the form:\n\n" 5640 "\t <handler>.<action>\n\n" 5641 "\t The available handlers are:\n\n" 5642 "\t onmatch(matching.event) - invoke on addition or update\n" 5643 "\t onmax(var) - invoke if var exceeds current max\n" 5644 "\t onchange(var) - invoke action if var changes\n\n" 5645 "\t The available actions are:\n\n" 5646 "\t trace(<synthetic_event>,param list) - generate synthetic event\n" 5647 "\t save(field,...) - save current event fields\n" 5648 #ifdef CONFIG_TRACER_SNAPSHOT 5649 "\t snapshot() - snapshot the trace buffer\n\n" 5650 #endif 5651 #ifdef CONFIG_SYNTH_EVENTS 5652 " events/synthetic_events\t- Create/append/remove/show synthetic events\n" 5653 "\t Write into this file to define/undefine new synthetic events.\n" 5654 "\t example: echo 'myevent u64 lat; char name[]; long[] stack' >> synthetic_events\n" 5655 #endif 5656 #endif 5657 ; 5658 5659 static ssize_t 5660 tracing_readme_read(struct file *filp, char __user *ubuf, 5661 size_t cnt, loff_t *ppos) 5662 { 5663 return simple_read_from_buffer(ubuf, cnt, ppos, 5664 readme_msg, strlen(readme_msg)); 5665 } 5666 5667 static const struct file_operations tracing_readme_fops = { 5668 .open = tracing_open_generic, 5669 .read = tracing_readme_read, 5670 .llseek = generic_file_llseek, 5671 }; 5672 5673 #ifdef CONFIG_TRACE_EVAL_MAP_FILE 5674 static union trace_eval_map_item * 5675 update_eval_map(union trace_eval_map_item *ptr) 5676 { 5677 if (!ptr->map.eval_string) { 5678 if (ptr->tail.next) { 5679 ptr = ptr->tail.next; 5680 /* Set ptr to the next real item (skip head) */ 5681 ptr++; 5682 } else 5683 return NULL; 5684 } 5685 return ptr; 5686 } 5687 5688 static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos) 5689 { 5690 union trace_eval_map_item *ptr = v; 5691 5692 /* 5693 * Paranoid! If ptr points to end, we don't want to increment past it. 5694 * This really should never happen. 5695 */ 5696 (*pos)++; 5697 ptr = update_eval_map(ptr); 5698 if (WARN_ON_ONCE(!ptr)) 5699 return NULL; 5700 5701 ptr++; 5702 ptr = update_eval_map(ptr); 5703 5704 return ptr; 5705 } 5706 5707 static void *eval_map_start(struct seq_file *m, loff_t *pos) 5708 { 5709 union trace_eval_map_item *v; 5710 loff_t l = 0; 5711 5712 mutex_lock(&trace_eval_mutex); 5713 5714 v = trace_eval_maps; 5715 if (v) 5716 v++; 5717 5718 while (v && l < *pos) { 5719 v = eval_map_next(m, v, &l); 5720 } 5721 5722 return v; 5723 } 5724 5725 static void eval_map_stop(struct seq_file *m, void *v) 5726 { 5727 mutex_unlock(&trace_eval_mutex); 5728 } 5729 5730 static int eval_map_show(struct seq_file *m, void *v) 5731 { 5732 union trace_eval_map_item *ptr = v; 5733 5734 seq_printf(m, "%s %ld (%s)\n", 5735 ptr->map.eval_string, ptr->map.eval_value, 5736 ptr->map.system); 5737 5738 return 0; 5739 } 5740 5741 static const struct seq_operations tracing_eval_map_seq_ops = { 5742 .start = eval_map_start, 5743 .next = eval_map_next, 5744 .stop = eval_map_stop, 5745 .show = eval_map_show, 5746 }; 5747 5748 static int tracing_eval_map_open(struct inode *inode, struct file *filp) 5749 { 5750 int ret; 5751 5752 ret = tracing_check_open_get_tr(NULL); 5753 if (ret) 5754 return ret; 5755 5756 return seq_open(filp, &tracing_eval_map_seq_ops); 5757 } 5758 5759 static const struct file_operations tracing_eval_map_fops = { 5760 .open = tracing_eval_map_open, 5761 .read = seq_read, 5762 .llseek = seq_lseek, 5763 .release = seq_release, 5764 }; 5765 5766 static inline union trace_eval_map_item * 5767 trace_eval_jmp_to_tail(union trace_eval_map_item *ptr) 5768 { 5769 /* Return tail of array given the head */ 5770 return ptr + ptr->head.length + 1; 5771 } 5772 5773 static void 5774 trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start, 5775 int len) 5776 { 5777 struct trace_eval_map **stop; 5778 struct trace_eval_map **map; 5779 union trace_eval_map_item *map_array; 5780 union trace_eval_map_item *ptr; 5781 5782 stop = start + len; 5783 5784 /* 5785 * The trace_eval_maps contains the map plus a head and tail item, 5786 * where the head holds the module and length of array, and the 5787 * tail holds a pointer to the next list. 5788 */ 5789 map_array = kmalloc_array(len + 2, sizeof(*map_array), GFP_KERNEL); 5790 if (!map_array) { 5791 pr_warn("Unable to allocate trace eval mapping\n"); 5792 return; 5793 } 5794 5795 guard(mutex)(&trace_eval_mutex); 5796 5797 if (!trace_eval_maps) 5798 trace_eval_maps = map_array; 5799 else { 5800 ptr = trace_eval_maps; 5801 for (;;) { 5802 ptr = trace_eval_jmp_to_tail(ptr); 5803 if (!ptr->tail.next) 5804 break; 5805 ptr = ptr->tail.next; 5806 5807 } 5808 ptr->tail.next = map_array; 5809 } 5810 map_array->head.mod = mod; 5811 map_array->head.length = len; 5812 map_array++; 5813 5814 for (map = start; (unsigned long)map < (unsigned long)stop; map++) { 5815 map_array->map = **map; 5816 map_array++; 5817 } 5818 memset(map_array, 0, sizeof(*map_array)); 5819 } 5820 5821 static void trace_create_eval_file(struct dentry *d_tracer) 5822 { 5823 trace_create_file("eval_map", TRACE_MODE_READ, d_tracer, 5824 NULL, &tracing_eval_map_fops); 5825 } 5826 5827 #else /* CONFIG_TRACE_EVAL_MAP_FILE */ 5828 static inline void trace_create_eval_file(struct dentry *d_tracer) { } 5829 static inline void trace_insert_eval_map_file(struct module *mod, 5830 struct trace_eval_map **start, int len) { } 5831 #endif /* !CONFIG_TRACE_EVAL_MAP_FILE */ 5832 5833 static void trace_insert_eval_map(struct module *mod, 5834 struct trace_eval_map **start, int len) 5835 { 5836 struct trace_eval_map **map; 5837 5838 if (len <= 0) 5839 return; 5840 5841 map = start; 5842 5843 trace_event_eval_update(map, len); 5844 5845 trace_insert_eval_map_file(mod, start, len); 5846 } 5847 5848 static ssize_t 5849 tracing_set_trace_read(struct file *filp, char __user *ubuf, 5850 size_t cnt, loff_t *ppos) 5851 { 5852 struct trace_array *tr = filp->private_data; 5853 char buf[MAX_TRACER_SIZE+2]; 5854 int r; 5855 5856 mutex_lock(&trace_types_lock); 5857 r = sprintf(buf, "%s\n", tr->current_trace->name); 5858 mutex_unlock(&trace_types_lock); 5859 5860 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 5861 } 5862 5863 int tracer_init(struct tracer *t, struct trace_array *tr) 5864 { 5865 tracing_reset_online_cpus(&tr->array_buffer); 5866 return t->init(tr); 5867 } 5868 5869 static void set_buffer_entries(struct array_buffer *buf, unsigned long val) 5870 { 5871 int cpu; 5872 5873 for_each_tracing_cpu(cpu) 5874 per_cpu_ptr(buf->data, cpu)->entries = val; 5875 } 5876 5877 static void update_buffer_entries(struct array_buffer *buf, int cpu) 5878 { 5879 if (cpu == RING_BUFFER_ALL_CPUS) { 5880 set_buffer_entries(buf, ring_buffer_size(buf->buffer, 0)); 5881 } else { 5882 per_cpu_ptr(buf->data, cpu)->entries = ring_buffer_size(buf->buffer, cpu); 5883 } 5884 } 5885 5886 #ifdef CONFIG_TRACER_MAX_TRACE 5887 /* resize @tr's buffer to the size of @size_tr's entries */ 5888 static int resize_buffer_duplicate_size(struct array_buffer *trace_buf, 5889 struct array_buffer *size_buf, int cpu_id) 5890 { 5891 int cpu, ret = 0; 5892 5893 if (cpu_id == RING_BUFFER_ALL_CPUS) { 5894 for_each_tracing_cpu(cpu) { 5895 ret = ring_buffer_resize(trace_buf->buffer, 5896 per_cpu_ptr(size_buf->data, cpu)->entries, cpu); 5897 if (ret < 0) 5898 break; 5899 per_cpu_ptr(trace_buf->data, cpu)->entries = 5900 per_cpu_ptr(size_buf->data, cpu)->entries; 5901 } 5902 } else { 5903 ret = ring_buffer_resize(trace_buf->buffer, 5904 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id); 5905 if (ret == 0) 5906 per_cpu_ptr(trace_buf->data, cpu_id)->entries = 5907 per_cpu_ptr(size_buf->data, cpu_id)->entries; 5908 } 5909 5910 return ret; 5911 } 5912 #endif /* CONFIG_TRACER_MAX_TRACE */ 5913 5914 static int __tracing_resize_ring_buffer(struct trace_array *tr, 5915 unsigned long size, int cpu) 5916 { 5917 int ret; 5918 5919 /* 5920 * If kernel or user changes the size of the ring buffer 5921 * we use the size that was given, and we can forget about 5922 * expanding it later. 5923 */ 5924 trace_set_ring_buffer_expanded(tr); 5925 5926 /* May be called before buffers are initialized */ 5927 if (!tr->array_buffer.buffer) 5928 return 0; 5929 5930 /* Do not allow tracing while resizing ring buffer */ 5931 tracing_stop_tr(tr); 5932 5933 ret = ring_buffer_resize(tr->array_buffer.buffer, size, cpu); 5934 if (ret < 0) 5935 goto out_start; 5936 5937 #ifdef CONFIG_TRACER_MAX_TRACE 5938 if (!tr->allocated_snapshot) 5939 goto out; 5940 5941 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu); 5942 if (ret < 0) { 5943 int r = resize_buffer_duplicate_size(&tr->array_buffer, 5944 &tr->array_buffer, cpu); 5945 if (r < 0) { 5946 /* 5947 * AARGH! We are left with different 5948 * size max buffer!!!! 5949 * The max buffer is our "snapshot" buffer. 5950 * When a tracer needs a snapshot (one of the 5951 * latency tracers), it swaps the max buffer 5952 * with the saved snap shot. We succeeded to 5953 * update the size of the main buffer, but failed to 5954 * update the size of the max buffer. But when we tried 5955 * to reset the main buffer to the original size, we 5956 * failed there too. This is very unlikely to 5957 * happen, but if it does, warn and kill all 5958 * tracing. 5959 */ 5960 WARN_ON(1); 5961 tracing_disabled = 1; 5962 } 5963 goto out_start; 5964 } 5965 5966 update_buffer_entries(&tr->max_buffer, cpu); 5967 5968 out: 5969 #endif /* CONFIG_TRACER_MAX_TRACE */ 5970 5971 update_buffer_entries(&tr->array_buffer, cpu); 5972 out_start: 5973 tracing_start_tr(tr); 5974 return ret; 5975 } 5976 5977 ssize_t tracing_resize_ring_buffer(struct trace_array *tr, 5978 unsigned long size, int cpu_id) 5979 { 5980 guard(mutex)(&trace_types_lock); 5981 5982 if (cpu_id != RING_BUFFER_ALL_CPUS) { 5983 /* make sure, this cpu is enabled in the mask */ 5984 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) 5985 return -EINVAL; 5986 } 5987 5988 return __tracing_resize_ring_buffer(tr, size, cpu_id); 5989 } 5990 5991 static void update_last_data(struct trace_array *tr) 5992 { 5993 if (!tr->text_delta && !tr->data_delta) 5994 return; 5995 5996 /* 5997 * Need to clear all CPU buffers as there cannot be events 5998 * from the previous boot mixed with events with this boot 5999 * as that will cause a confusing trace. Need to clear all 6000 * CPU buffers, even for those that may currently be offline. 6001 */ 6002 tracing_reset_all_cpus(&tr->array_buffer); 6003 6004 /* Using current data now */ 6005 tr->text_delta = 0; 6006 tr->data_delta = 0; 6007 } 6008 6009 /** 6010 * tracing_update_buffers - used by tracing facility to expand ring buffers 6011 * @tr: The tracing instance 6012 * 6013 * To save on memory when the tracing is never used on a system with it 6014 * configured in. The ring buffers are set to a minimum size. But once 6015 * a user starts to use the tracing facility, then they need to grow 6016 * to their default size. 6017 * 6018 * This function is to be called when a tracer is about to be used. 6019 */ 6020 int tracing_update_buffers(struct trace_array *tr) 6021 { 6022 int ret = 0; 6023 6024 mutex_lock(&trace_types_lock); 6025 6026 update_last_data(tr); 6027 6028 if (!tr->ring_buffer_expanded) 6029 ret = __tracing_resize_ring_buffer(tr, trace_buf_size, 6030 RING_BUFFER_ALL_CPUS); 6031 mutex_unlock(&trace_types_lock); 6032 6033 return ret; 6034 } 6035 6036 struct trace_option_dentry; 6037 6038 static void 6039 create_trace_option_files(struct trace_array *tr, struct tracer *tracer); 6040 6041 /* 6042 * Used to clear out the tracer before deletion of an instance. 6043 * Must have trace_types_lock held. 6044 */ 6045 static void tracing_set_nop(struct trace_array *tr) 6046 { 6047 if (tr->current_trace == &nop_trace) 6048 return; 6049 6050 tr->current_trace->enabled--; 6051 6052 if (tr->current_trace->reset) 6053 tr->current_trace->reset(tr); 6054 6055 tr->current_trace = &nop_trace; 6056 } 6057 6058 static bool tracer_options_updated; 6059 6060 static void add_tracer_options(struct trace_array *tr, struct tracer *t) 6061 { 6062 /* Only enable if the directory has been created already. */ 6063 if (!tr->dir) 6064 return; 6065 6066 /* Only create trace option files after update_tracer_options finish */ 6067 if (!tracer_options_updated) 6068 return; 6069 6070 create_trace_option_files(tr, t); 6071 } 6072 6073 int tracing_set_tracer(struct trace_array *tr, const char *buf) 6074 { 6075 struct tracer *t; 6076 #ifdef CONFIG_TRACER_MAX_TRACE 6077 bool had_max_tr; 6078 #endif 6079 int ret; 6080 6081 guard(mutex)(&trace_types_lock); 6082 6083 update_last_data(tr); 6084 6085 if (!tr->ring_buffer_expanded) { 6086 ret = __tracing_resize_ring_buffer(tr, trace_buf_size, 6087 RING_BUFFER_ALL_CPUS); 6088 if (ret < 0) 6089 return ret; 6090 ret = 0; 6091 } 6092 6093 for (t = trace_types; t; t = t->next) { 6094 if (strcmp(t->name, buf) == 0) 6095 break; 6096 } 6097 if (!t) 6098 return -EINVAL; 6099 6100 if (t == tr->current_trace) 6101 return 0; 6102 6103 #ifdef CONFIG_TRACER_SNAPSHOT 6104 if (t->use_max_tr) { 6105 local_irq_disable(); 6106 arch_spin_lock(&tr->max_lock); 6107 ret = tr->cond_snapshot ? -EBUSY : 0; 6108 arch_spin_unlock(&tr->max_lock); 6109 local_irq_enable(); 6110 if (ret) 6111 return ret; 6112 } 6113 #endif 6114 /* Some tracers won't work on kernel command line */ 6115 if (system_state < SYSTEM_RUNNING && t->noboot) { 6116 pr_warn("Tracer '%s' is not allowed on command line, ignored\n", 6117 t->name); 6118 return -EINVAL; 6119 } 6120 6121 /* Some tracers are only allowed for the top level buffer */ 6122 if (!trace_ok_for_array(t, tr)) 6123 return -EINVAL; 6124 6125 /* If trace pipe files are being read, we can't change the tracer */ 6126 if (tr->trace_ref) 6127 return -EBUSY; 6128 6129 trace_branch_disable(); 6130 6131 tr->current_trace->enabled--; 6132 6133 if (tr->current_trace->reset) 6134 tr->current_trace->reset(tr); 6135 6136 #ifdef CONFIG_TRACER_MAX_TRACE 6137 had_max_tr = tr->current_trace->use_max_tr; 6138 6139 /* Current trace needs to be nop_trace before synchronize_rcu */ 6140 tr->current_trace = &nop_trace; 6141 6142 if (had_max_tr && !t->use_max_tr) { 6143 /* 6144 * We need to make sure that the update_max_tr sees that 6145 * current_trace changed to nop_trace to keep it from 6146 * swapping the buffers after we resize it. 6147 * The update_max_tr is called from interrupts disabled 6148 * so a synchronized_sched() is sufficient. 6149 */ 6150 synchronize_rcu(); 6151 free_snapshot(tr); 6152 tracing_disarm_snapshot(tr); 6153 } 6154 6155 if (!had_max_tr && t->use_max_tr) { 6156 ret = tracing_arm_snapshot_locked(tr); 6157 if (ret) 6158 return ret; 6159 } 6160 #else 6161 tr->current_trace = &nop_trace; 6162 #endif 6163 6164 if (t->init) { 6165 ret = tracer_init(t, tr); 6166 if (ret) { 6167 #ifdef CONFIG_TRACER_MAX_TRACE 6168 if (t->use_max_tr) 6169 tracing_disarm_snapshot(tr); 6170 #endif 6171 return ret; 6172 } 6173 } 6174 6175 tr->current_trace = t; 6176 tr->current_trace->enabled++; 6177 trace_branch_enable(tr); 6178 6179 return 0; 6180 } 6181 6182 static ssize_t 6183 tracing_set_trace_write(struct file *filp, const char __user *ubuf, 6184 size_t cnt, loff_t *ppos) 6185 { 6186 struct trace_array *tr = filp->private_data; 6187 char buf[MAX_TRACER_SIZE+1]; 6188 char *name; 6189 size_t ret; 6190 int err; 6191 6192 ret = cnt; 6193 6194 if (cnt > MAX_TRACER_SIZE) 6195 cnt = MAX_TRACER_SIZE; 6196 6197 if (copy_from_user(buf, ubuf, cnt)) 6198 return -EFAULT; 6199 6200 buf[cnt] = 0; 6201 6202 name = strim(buf); 6203 6204 err = tracing_set_tracer(tr, name); 6205 if (err) 6206 return err; 6207 6208 *ppos += ret; 6209 6210 return ret; 6211 } 6212 6213 static ssize_t 6214 tracing_nsecs_read(unsigned long *ptr, char __user *ubuf, 6215 size_t cnt, loff_t *ppos) 6216 { 6217 char buf[64]; 6218 int r; 6219 6220 r = snprintf(buf, sizeof(buf), "%ld\n", 6221 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr)); 6222 if (r > sizeof(buf)) 6223 r = sizeof(buf); 6224 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 6225 } 6226 6227 static ssize_t 6228 tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf, 6229 size_t cnt, loff_t *ppos) 6230 { 6231 unsigned long val; 6232 int ret; 6233 6234 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 6235 if (ret) 6236 return ret; 6237 6238 *ptr = val * 1000; 6239 6240 return cnt; 6241 } 6242 6243 static ssize_t 6244 tracing_thresh_read(struct file *filp, char __user *ubuf, 6245 size_t cnt, loff_t *ppos) 6246 { 6247 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos); 6248 } 6249 6250 static ssize_t 6251 tracing_thresh_write(struct file *filp, const char __user *ubuf, 6252 size_t cnt, loff_t *ppos) 6253 { 6254 struct trace_array *tr = filp->private_data; 6255 int ret; 6256 6257 guard(mutex)(&trace_types_lock); 6258 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos); 6259 if (ret < 0) 6260 return ret; 6261 6262 if (tr->current_trace->update_thresh) { 6263 ret = tr->current_trace->update_thresh(tr); 6264 if (ret < 0) 6265 return ret; 6266 } 6267 6268 return cnt; 6269 } 6270 6271 #ifdef CONFIG_TRACER_MAX_TRACE 6272 6273 static ssize_t 6274 tracing_max_lat_read(struct file *filp, char __user *ubuf, 6275 size_t cnt, loff_t *ppos) 6276 { 6277 struct trace_array *tr = filp->private_data; 6278 6279 return tracing_nsecs_read(&tr->max_latency, ubuf, cnt, ppos); 6280 } 6281 6282 static ssize_t 6283 tracing_max_lat_write(struct file *filp, const char __user *ubuf, 6284 size_t cnt, loff_t *ppos) 6285 { 6286 struct trace_array *tr = filp->private_data; 6287 6288 return tracing_nsecs_write(&tr->max_latency, ubuf, cnt, ppos); 6289 } 6290 6291 #endif 6292 6293 static int open_pipe_on_cpu(struct trace_array *tr, int cpu) 6294 { 6295 if (cpu == RING_BUFFER_ALL_CPUS) { 6296 if (cpumask_empty(tr->pipe_cpumask)) { 6297 cpumask_setall(tr->pipe_cpumask); 6298 return 0; 6299 } 6300 } else if (!cpumask_test_cpu(cpu, tr->pipe_cpumask)) { 6301 cpumask_set_cpu(cpu, tr->pipe_cpumask); 6302 return 0; 6303 } 6304 return -EBUSY; 6305 } 6306 6307 static void close_pipe_on_cpu(struct trace_array *tr, int cpu) 6308 { 6309 if (cpu == RING_BUFFER_ALL_CPUS) { 6310 WARN_ON(!cpumask_full(tr->pipe_cpumask)); 6311 cpumask_clear(tr->pipe_cpumask); 6312 } else { 6313 WARN_ON(!cpumask_test_cpu(cpu, tr->pipe_cpumask)); 6314 cpumask_clear_cpu(cpu, tr->pipe_cpumask); 6315 } 6316 } 6317 6318 static int tracing_open_pipe(struct inode *inode, struct file *filp) 6319 { 6320 struct trace_array *tr = inode->i_private; 6321 struct trace_iterator *iter; 6322 int cpu; 6323 int ret; 6324 6325 ret = tracing_check_open_get_tr(tr); 6326 if (ret) 6327 return ret; 6328 6329 mutex_lock(&trace_types_lock); 6330 cpu = tracing_get_cpu(inode); 6331 ret = open_pipe_on_cpu(tr, cpu); 6332 if (ret) 6333 goto fail_pipe_on_cpu; 6334 6335 /* create a buffer to store the information to pass to userspace */ 6336 iter = kzalloc(sizeof(*iter), GFP_KERNEL); 6337 if (!iter) { 6338 ret = -ENOMEM; 6339 goto fail_alloc_iter; 6340 } 6341 6342 trace_seq_init(&iter->seq); 6343 iter->trace = tr->current_trace; 6344 6345 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) { 6346 ret = -ENOMEM; 6347 goto fail; 6348 } 6349 6350 /* trace pipe does not show start of buffer */ 6351 cpumask_setall(iter->started); 6352 6353 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) 6354 iter->iter_flags |= TRACE_FILE_LAT_FMT; 6355 6356 /* Output in nanoseconds only if we are using a clock in nanoseconds. */ 6357 if (trace_clocks[tr->clock_id].in_ns) 6358 iter->iter_flags |= TRACE_FILE_TIME_IN_NS; 6359 6360 iter->tr = tr; 6361 iter->array_buffer = &tr->array_buffer; 6362 iter->cpu_file = cpu; 6363 mutex_init(&iter->mutex); 6364 filp->private_data = iter; 6365 6366 if (iter->trace->pipe_open) 6367 iter->trace->pipe_open(iter); 6368 6369 nonseekable_open(inode, filp); 6370 6371 tr->trace_ref++; 6372 6373 mutex_unlock(&trace_types_lock); 6374 return ret; 6375 6376 fail: 6377 kfree(iter); 6378 fail_alloc_iter: 6379 close_pipe_on_cpu(tr, cpu); 6380 fail_pipe_on_cpu: 6381 __trace_array_put(tr); 6382 mutex_unlock(&trace_types_lock); 6383 return ret; 6384 } 6385 6386 static int tracing_release_pipe(struct inode *inode, struct file *file) 6387 { 6388 struct trace_iterator *iter = file->private_data; 6389 struct trace_array *tr = inode->i_private; 6390 6391 mutex_lock(&trace_types_lock); 6392 6393 tr->trace_ref--; 6394 6395 if (iter->trace->pipe_close) 6396 iter->trace->pipe_close(iter); 6397 close_pipe_on_cpu(tr, iter->cpu_file); 6398 mutex_unlock(&trace_types_lock); 6399 6400 free_trace_iter_content(iter); 6401 kfree(iter); 6402 6403 trace_array_put(tr); 6404 6405 return 0; 6406 } 6407 6408 static __poll_t 6409 trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table) 6410 { 6411 struct trace_array *tr = iter->tr; 6412 6413 /* Iterators are static, they should be filled or empty */ 6414 if (trace_buffer_iter(iter, iter->cpu_file)) 6415 return EPOLLIN | EPOLLRDNORM; 6416 6417 if (tr->trace_flags & TRACE_ITER_BLOCK) 6418 /* 6419 * Always select as readable when in blocking mode 6420 */ 6421 return EPOLLIN | EPOLLRDNORM; 6422 else 6423 return ring_buffer_poll_wait(iter->array_buffer->buffer, iter->cpu_file, 6424 filp, poll_table, iter->tr->buffer_percent); 6425 } 6426 6427 static __poll_t 6428 tracing_poll_pipe(struct file *filp, poll_table *poll_table) 6429 { 6430 struct trace_iterator *iter = filp->private_data; 6431 6432 return trace_poll(iter, filp, poll_table); 6433 } 6434 6435 /* Must be called with iter->mutex held. */ 6436 static int tracing_wait_pipe(struct file *filp) 6437 { 6438 struct trace_iterator *iter = filp->private_data; 6439 int ret; 6440 6441 while (trace_empty(iter)) { 6442 6443 if ((filp->f_flags & O_NONBLOCK)) { 6444 return -EAGAIN; 6445 } 6446 6447 /* 6448 * We block until we read something and tracing is disabled. 6449 * We still block if tracing is disabled, but we have never 6450 * read anything. This allows a user to cat this file, and 6451 * then enable tracing. But after we have read something, 6452 * we give an EOF when tracing is again disabled. 6453 * 6454 * iter->pos will be 0 if we haven't read anything. 6455 */ 6456 if (!tracer_tracing_is_on(iter->tr) && iter->pos) 6457 break; 6458 6459 mutex_unlock(&iter->mutex); 6460 6461 ret = wait_on_pipe(iter, 0); 6462 6463 mutex_lock(&iter->mutex); 6464 6465 if (ret) 6466 return ret; 6467 } 6468 6469 return 1; 6470 } 6471 6472 /* 6473 * Consumer reader. 6474 */ 6475 static ssize_t 6476 tracing_read_pipe(struct file *filp, char __user *ubuf, 6477 size_t cnt, loff_t *ppos) 6478 { 6479 struct trace_iterator *iter = filp->private_data; 6480 ssize_t sret; 6481 6482 /* 6483 * Avoid more than one consumer on a single file descriptor 6484 * This is just a matter of traces coherency, the ring buffer itself 6485 * is protected. 6486 */ 6487 guard(mutex)(&iter->mutex); 6488 6489 /* return any leftover data */ 6490 sret = trace_seq_to_user(&iter->seq, ubuf, cnt); 6491 if (sret != -EBUSY) 6492 return sret; 6493 6494 trace_seq_init(&iter->seq); 6495 6496 if (iter->trace->read) { 6497 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos); 6498 if (sret) 6499 return sret; 6500 } 6501 6502 waitagain: 6503 sret = tracing_wait_pipe(filp); 6504 if (sret <= 0) 6505 return sret; 6506 6507 /* stop when tracing is finished */ 6508 if (trace_empty(iter)) 6509 return 0; 6510 6511 if (cnt >= TRACE_SEQ_BUFFER_SIZE) 6512 cnt = TRACE_SEQ_BUFFER_SIZE - 1; 6513 6514 /* reset all but tr, trace, and overruns */ 6515 trace_iterator_reset(iter); 6516 cpumask_clear(iter->started); 6517 trace_seq_init(&iter->seq); 6518 6519 trace_event_read_lock(); 6520 trace_access_lock(iter->cpu_file); 6521 while (trace_find_next_entry_inc(iter) != NULL) { 6522 enum print_line_t ret; 6523 int save_len = iter->seq.seq.len; 6524 6525 ret = print_trace_line(iter); 6526 if (ret == TRACE_TYPE_PARTIAL_LINE) { 6527 /* 6528 * If one print_trace_line() fills entire trace_seq in one shot, 6529 * trace_seq_to_user() will returns -EBUSY because save_len == 0, 6530 * In this case, we need to consume it, otherwise, loop will peek 6531 * this event next time, resulting in an infinite loop. 6532 */ 6533 if (save_len == 0) { 6534 iter->seq.full = 0; 6535 trace_seq_puts(&iter->seq, "[LINE TOO BIG]\n"); 6536 trace_consume(iter); 6537 break; 6538 } 6539 6540 /* In other cases, don't print partial lines */ 6541 iter->seq.seq.len = save_len; 6542 break; 6543 } 6544 if (ret != TRACE_TYPE_NO_CONSUME) 6545 trace_consume(iter); 6546 6547 if (trace_seq_used(&iter->seq) >= cnt) 6548 break; 6549 6550 /* 6551 * Setting the full flag means we reached the trace_seq buffer 6552 * size and we should leave by partial output condition above. 6553 * One of the trace_seq_* functions is not used properly. 6554 */ 6555 WARN_ONCE(iter->seq.full, "full flag set for trace type %d", 6556 iter->ent->type); 6557 } 6558 trace_access_unlock(iter->cpu_file); 6559 trace_event_read_unlock(); 6560 6561 /* Now copy what we have to the user */ 6562 sret = trace_seq_to_user(&iter->seq, ubuf, cnt); 6563 if (iter->seq.readpos >= trace_seq_used(&iter->seq)) 6564 trace_seq_init(&iter->seq); 6565 6566 /* 6567 * If there was nothing to send to user, in spite of consuming trace 6568 * entries, go back to wait for more entries. 6569 */ 6570 if (sret == -EBUSY) 6571 goto waitagain; 6572 6573 return sret; 6574 } 6575 6576 static void tracing_spd_release_pipe(struct splice_pipe_desc *spd, 6577 unsigned int idx) 6578 { 6579 __free_page(spd->pages[idx]); 6580 } 6581 6582 static size_t 6583 tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter) 6584 { 6585 size_t count; 6586 int save_len; 6587 int ret; 6588 6589 /* Seq buffer is page-sized, exactly what we need. */ 6590 for (;;) { 6591 save_len = iter->seq.seq.len; 6592 ret = print_trace_line(iter); 6593 6594 if (trace_seq_has_overflowed(&iter->seq)) { 6595 iter->seq.seq.len = save_len; 6596 break; 6597 } 6598 6599 /* 6600 * This should not be hit, because it should only 6601 * be set if the iter->seq overflowed. But check it 6602 * anyway to be safe. 6603 */ 6604 if (ret == TRACE_TYPE_PARTIAL_LINE) { 6605 iter->seq.seq.len = save_len; 6606 break; 6607 } 6608 6609 count = trace_seq_used(&iter->seq) - save_len; 6610 if (rem < count) { 6611 rem = 0; 6612 iter->seq.seq.len = save_len; 6613 break; 6614 } 6615 6616 if (ret != TRACE_TYPE_NO_CONSUME) 6617 trace_consume(iter); 6618 rem -= count; 6619 if (!trace_find_next_entry_inc(iter)) { 6620 rem = 0; 6621 iter->ent = NULL; 6622 break; 6623 } 6624 } 6625 6626 return rem; 6627 } 6628 6629 static ssize_t tracing_splice_read_pipe(struct file *filp, 6630 loff_t *ppos, 6631 struct pipe_inode_info *pipe, 6632 size_t len, 6633 unsigned int flags) 6634 { 6635 struct page *pages_def[PIPE_DEF_BUFFERS]; 6636 struct partial_page partial_def[PIPE_DEF_BUFFERS]; 6637 struct trace_iterator *iter = filp->private_data; 6638 struct splice_pipe_desc spd = { 6639 .pages = pages_def, 6640 .partial = partial_def, 6641 .nr_pages = 0, /* This gets updated below. */ 6642 .nr_pages_max = PIPE_DEF_BUFFERS, 6643 .ops = &default_pipe_buf_ops, 6644 .spd_release = tracing_spd_release_pipe, 6645 }; 6646 ssize_t ret; 6647 size_t rem; 6648 unsigned int i; 6649 6650 if (splice_grow_spd(pipe, &spd)) 6651 return -ENOMEM; 6652 6653 mutex_lock(&iter->mutex); 6654 6655 if (iter->trace->splice_read) { 6656 ret = iter->trace->splice_read(iter, filp, 6657 ppos, pipe, len, flags); 6658 if (ret) 6659 goto out_err; 6660 } 6661 6662 ret = tracing_wait_pipe(filp); 6663 if (ret <= 0) 6664 goto out_err; 6665 6666 if (!iter->ent && !trace_find_next_entry_inc(iter)) { 6667 ret = -EFAULT; 6668 goto out_err; 6669 } 6670 6671 trace_event_read_lock(); 6672 trace_access_lock(iter->cpu_file); 6673 6674 /* Fill as many pages as possible. */ 6675 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) { 6676 spd.pages[i] = alloc_page(GFP_KERNEL); 6677 if (!spd.pages[i]) 6678 break; 6679 6680 rem = tracing_fill_pipe_page(rem, iter); 6681 6682 /* Copy the data into the page, so we can start over. */ 6683 ret = trace_seq_to_buffer(&iter->seq, 6684 page_address(spd.pages[i]), 6685 trace_seq_used(&iter->seq)); 6686 if (ret < 0) { 6687 __free_page(spd.pages[i]); 6688 break; 6689 } 6690 spd.partial[i].offset = 0; 6691 spd.partial[i].len = trace_seq_used(&iter->seq); 6692 6693 trace_seq_init(&iter->seq); 6694 } 6695 6696 trace_access_unlock(iter->cpu_file); 6697 trace_event_read_unlock(); 6698 mutex_unlock(&iter->mutex); 6699 6700 spd.nr_pages = i; 6701 6702 if (i) 6703 ret = splice_to_pipe(pipe, &spd); 6704 else 6705 ret = 0; 6706 out: 6707 splice_shrink_spd(&spd); 6708 return ret; 6709 6710 out_err: 6711 mutex_unlock(&iter->mutex); 6712 goto out; 6713 } 6714 6715 static ssize_t 6716 tracing_entries_read(struct file *filp, char __user *ubuf, 6717 size_t cnt, loff_t *ppos) 6718 { 6719 struct inode *inode = file_inode(filp); 6720 struct trace_array *tr = inode->i_private; 6721 int cpu = tracing_get_cpu(inode); 6722 char buf[64]; 6723 int r = 0; 6724 ssize_t ret; 6725 6726 mutex_lock(&trace_types_lock); 6727 6728 if (cpu == RING_BUFFER_ALL_CPUS) { 6729 int cpu, buf_size_same; 6730 unsigned long size; 6731 6732 size = 0; 6733 buf_size_same = 1; 6734 /* check if all cpu sizes are same */ 6735 for_each_tracing_cpu(cpu) { 6736 /* fill in the size from first enabled cpu */ 6737 if (size == 0) 6738 size = per_cpu_ptr(tr->array_buffer.data, cpu)->entries; 6739 if (size != per_cpu_ptr(tr->array_buffer.data, cpu)->entries) { 6740 buf_size_same = 0; 6741 break; 6742 } 6743 } 6744 6745 if (buf_size_same) { 6746 if (!tr->ring_buffer_expanded) 6747 r = sprintf(buf, "%lu (expanded: %lu)\n", 6748 size >> 10, 6749 trace_buf_size >> 10); 6750 else 6751 r = sprintf(buf, "%lu\n", size >> 10); 6752 } else 6753 r = sprintf(buf, "X\n"); 6754 } else 6755 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10); 6756 6757 mutex_unlock(&trace_types_lock); 6758 6759 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 6760 return ret; 6761 } 6762 6763 static ssize_t 6764 tracing_entries_write(struct file *filp, const char __user *ubuf, 6765 size_t cnt, loff_t *ppos) 6766 { 6767 struct inode *inode = file_inode(filp); 6768 struct trace_array *tr = inode->i_private; 6769 unsigned long val; 6770 int ret; 6771 6772 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 6773 if (ret) 6774 return ret; 6775 6776 /* must have at least 1 entry */ 6777 if (!val) 6778 return -EINVAL; 6779 6780 /* value is in KB */ 6781 val <<= 10; 6782 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode)); 6783 if (ret < 0) 6784 return ret; 6785 6786 *ppos += cnt; 6787 6788 return cnt; 6789 } 6790 6791 static ssize_t 6792 tracing_total_entries_read(struct file *filp, char __user *ubuf, 6793 size_t cnt, loff_t *ppos) 6794 { 6795 struct trace_array *tr = filp->private_data; 6796 char buf[64]; 6797 int r, cpu; 6798 unsigned long size = 0, expanded_size = 0; 6799 6800 mutex_lock(&trace_types_lock); 6801 for_each_tracing_cpu(cpu) { 6802 size += per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10; 6803 if (!tr->ring_buffer_expanded) 6804 expanded_size += trace_buf_size >> 10; 6805 } 6806 if (tr->ring_buffer_expanded) 6807 r = sprintf(buf, "%lu\n", size); 6808 else 6809 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size); 6810 mutex_unlock(&trace_types_lock); 6811 6812 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 6813 } 6814 6815 static ssize_t 6816 tracing_last_boot_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) 6817 { 6818 struct trace_array *tr = filp->private_data; 6819 struct seq_buf seq; 6820 char buf[64]; 6821 6822 seq_buf_init(&seq, buf, 64); 6823 6824 seq_buf_printf(&seq, "text delta:\t%ld\n", tr->text_delta); 6825 seq_buf_printf(&seq, "data delta:\t%ld\n", tr->data_delta); 6826 6827 return simple_read_from_buffer(ubuf, cnt, ppos, buf, seq_buf_used(&seq)); 6828 } 6829 6830 static int tracing_buffer_meta_open(struct inode *inode, struct file *filp) 6831 { 6832 struct trace_array *tr = inode->i_private; 6833 int cpu = tracing_get_cpu(inode); 6834 int ret; 6835 6836 ret = tracing_check_open_get_tr(tr); 6837 if (ret) 6838 return ret; 6839 6840 ret = ring_buffer_meta_seq_init(filp, tr->array_buffer.buffer, cpu); 6841 if (ret < 0) 6842 __trace_array_put(tr); 6843 return ret; 6844 } 6845 6846 static ssize_t 6847 tracing_free_buffer_write(struct file *filp, const char __user *ubuf, 6848 size_t cnt, loff_t *ppos) 6849 { 6850 /* 6851 * There is no need to read what the user has written, this function 6852 * is just to make sure that there is no error when "echo" is used 6853 */ 6854 6855 *ppos += cnt; 6856 6857 return cnt; 6858 } 6859 6860 static int 6861 tracing_free_buffer_release(struct inode *inode, struct file *filp) 6862 { 6863 struct trace_array *tr = inode->i_private; 6864 6865 /* disable tracing ? */ 6866 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE) 6867 tracer_tracing_off(tr); 6868 /* resize the ring buffer to 0 */ 6869 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS); 6870 6871 trace_array_put(tr); 6872 6873 return 0; 6874 } 6875 6876 #define TRACE_MARKER_MAX_SIZE 4096 6877 6878 static ssize_t 6879 tracing_mark_write(struct file *filp, const char __user *ubuf, 6880 size_t cnt, loff_t *fpos) 6881 { 6882 struct trace_array *tr = filp->private_data; 6883 struct ring_buffer_event *event; 6884 enum event_trigger_type tt = ETT_NONE; 6885 struct trace_buffer *buffer; 6886 struct print_entry *entry; 6887 int meta_size; 6888 ssize_t written; 6889 size_t size; 6890 int len; 6891 6892 /* Used in tracing_mark_raw_write() as well */ 6893 #define FAULTED_STR "<faulted>" 6894 #define FAULTED_SIZE (sizeof(FAULTED_STR) - 1) /* '\0' is already accounted for */ 6895 6896 if (tracing_disabled) 6897 return -EINVAL; 6898 6899 if (!(tr->trace_flags & TRACE_ITER_MARKERS)) 6900 return -EINVAL; 6901 6902 if ((ssize_t)cnt < 0) 6903 return -EINVAL; 6904 6905 if (cnt > TRACE_MARKER_MAX_SIZE) 6906 cnt = TRACE_MARKER_MAX_SIZE; 6907 6908 meta_size = sizeof(*entry) + 2; /* add '\0' and possible '\n' */ 6909 again: 6910 size = cnt + meta_size; 6911 6912 /* If less than "<faulted>", then make sure we can still add that */ 6913 if (cnt < FAULTED_SIZE) 6914 size += FAULTED_SIZE - cnt; 6915 6916 buffer = tr->array_buffer.buffer; 6917 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size, 6918 tracing_gen_ctx()); 6919 if (unlikely(!event)) { 6920 /* 6921 * If the size was greater than what was allowed, then 6922 * make it smaller and try again. 6923 */ 6924 if (size > ring_buffer_max_event_size(buffer)) { 6925 /* cnt < FAULTED size should never be bigger than max */ 6926 if (WARN_ON_ONCE(cnt < FAULTED_SIZE)) 6927 return -EBADF; 6928 cnt = ring_buffer_max_event_size(buffer) - meta_size; 6929 /* The above should only happen once */ 6930 if (WARN_ON_ONCE(cnt + meta_size == size)) 6931 return -EBADF; 6932 goto again; 6933 } 6934 6935 /* Ring buffer disabled, return as if not open for write */ 6936 return -EBADF; 6937 } 6938 6939 entry = ring_buffer_event_data(event); 6940 entry->ip = _THIS_IP_; 6941 6942 len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt); 6943 if (len) { 6944 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE); 6945 cnt = FAULTED_SIZE; 6946 written = -EFAULT; 6947 } else 6948 written = cnt; 6949 6950 if (tr->trace_marker_file && !list_empty(&tr->trace_marker_file->triggers)) { 6951 /* do not add \n before testing triggers, but add \0 */ 6952 entry->buf[cnt] = '\0'; 6953 tt = event_triggers_call(tr->trace_marker_file, buffer, entry, event); 6954 } 6955 6956 if (entry->buf[cnt - 1] != '\n') { 6957 entry->buf[cnt] = '\n'; 6958 entry->buf[cnt + 1] = '\0'; 6959 } else 6960 entry->buf[cnt] = '\0'; 6961 6962 if (static_branch_unlikely(&trace_marker_exports_enabled)) 6963 ftrace_exports(event, TRACE_EXPORT_MARKER); 6964 __buffer_unlock_commit(buffer, event); 6965 6966 if (tt) 6967 event_triggers_post_call(tr->trace_marker_file, tt); 6968 6969 return written; 6970 } 6971 6972 static ssize_t 6973 tracing_mark_raw_write(struct file *filp, const char __user *ubuf, 6974 size_t cnt, loff_t *fpos) 6975 { 6976 struct trace_array *tr = filp->private_data; 6977 struct ring_buffer_event *event; 6978 struct trace_buffer *buffer; 6979 struct raw_data_entry *entry; 6980 ssize_t written; 6981 int size; 6982 int len; 6983 6984 #define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int)) 6985 6986 if (tracing_disabled) 6987 return -EINVAL; 6988 6989 if (!(tr->trace_flags & TRACE_ITER_MARKERS)) 6990 return -EINVAL; 6991 6992 /* The marker must at least have a tag id */ 6993 if (cnt < sizeof(unsigned int)) 6994 return -EINVAL; 6995 6996 size = sizeof(*entry) + cnt; 6997 if (cnt < FAULT_SIZE_ID) 6998 size += FAULT_SIZE_ID - cnt; 6999 7000 buffer = tr->array_buffer.buffer; 7001 7002 if (size > ring_buffer_max_event_size(buffer)) 7003 return -EINVAL; 7004 7005 event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size, 7006 tracing_gen_ctx()); 7007 if (!event) 7008 /* Ring buffer disabled, return as if not open for write */ 7009 return -EBADF; 7010 7011 entry = ring_buffer_event_data(event); 7012 7013 len = __copy_from_user_inatomic(&entry->id, ubuf, cnt); 7014 if (len) { 7015 entry->id = -1; 7016 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE); 7017 written = -EFAULT; 7018 } else 7019 written = cnt; 7020 7021 __buffer_unlock_commit(buffer, event); 7022 7023 return written; 7024 } 7025 7026 static int tracing_clock_show(struct seq_file *m, void *v) 7027 { 7028 struct trace_array *tr = m->private; 7029 int i; 7030 7031 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) 7032 seq_printf(m, 7033 "%s%s%s%s", i ? " " : "", 7034 i == tr->clock_id ? "[" : "", trace_clocks[i].name, 7035 i == tr->clock_id ? "]" : ""); 7036 seq_putc(m, '\n'); 7037 7038 return 0; 7039 } 7040 7041 int tracing_set_clock(struct trace_array *tr, const char *clockstr) 7042 { 7043 int i; 7044 7045 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) { 7046 if (strcmp(trace_clocks[i].name, clockstr) == 0) 7047 break; 7048 } 7049 if (i == ARRAY_SIZE(trace_clocks)) 7050 return -EINVAL; 7051 7052 mutex_lock(&trace_types_lock); 7053 7054 tr->clock_id = i; 7055 7056 ring_buffer_set_clock(tr->array_buffer.buffer, trace_clocks[i].func); 7057 7058 /* 7059 * New clock may not be consistent with the previous clock. 7060 * Reset the buffer so that it doesn't have incomparable timestamps. 7061 */ 7062 tracing_reset_online_cpus(&tr->array_buffer); 7063 7064 #ifdef CONFIG_TRACER_MAX_TRACE 7065 if (tr->max_buffer.buffer) 7066 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func); 7067 tracing_reset_online_cpus(&tr->max_buffer); 7068 #endif 7069 7070 mutex_unlock(&trace_types_lock); 7071 7072 return 0; 7073 } 7074 7075 static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf, 7076 size_t cnt, loff_t *fpos) 7077 { 7078 struct seq_file *m = filp->private_data; 7079 struct trace_array *tr = m->private; 7080 char buf[64]; 7081 const char *clockstr; 7082 int ret; 7083 7084 if (cnt >= sizeof(buf)) 7085 return -EINVAL; 7086 7087 if (copy_from_user(buf, ubuf, cnt)) 7088 return -EFAULT; 7089 7090 buf[cnt] = 0; 7091 7092 clockstr = strstrip(buf); 7093 7094 ret = tracing_set_clock(tr, clockstr); 7095 if (ret) 7096 return ret; 7097 7098 *fpos += cnt; 7099 7100 return cnt; 7101 } 7102 7103 static int tracing_clock_open(struct inode *inode, struct file *file) 7104 { 7105 struct trace_array *tr = inode->i_private; 7106 int ret; 7107 7108 ret = tracing_check_open_get_tr(tr); 7109 if (ret) 7110 return ret; 7111 7112 ret = single_open(file, tracing_clock_show, inode->i_private); 7113 if (ret < 0) 7114 trace_array_put(tr); 7115 7116 return ret; 7117 } 7118 7119 static int tracing_time_stamp_mode_show(struct seq_file *m, void *v) 7120 { 7121 struct trace_array *tr = m->private; 7122 7123 mutex_lock(&trace_types_lock); 7124 7125 if (ring_buffer_time_stamp_abs(tr->array_buffer.buffer)) 7126 seq_puts(m, "delta [absolute]\n"); 7127 else 7128 seq_puts(m, "[delta] absolute\n"); 7129 7130 mutex_unlock(&trace_types_lock); 7131 7132 return 0; 7133 } 7134 7135 static int tracing_time_stamp_mode_open(struct inode *inode, struct file *file) 7136 { 7137 struct trace_array *tr = inode->i_private; 7138 int ret; 7139 7140 ret = tracing_check_open_get_tr(tr); 7141 if (ret) 7142 return ret; 7143 7144 ret = single_open(file, tracing_time_stamp_mode_show, inode->i_private); 7145 if (ret < 0) 7146 trace_array_put(tr); 7147 7148 return ret; 7149 } 7150 7151 u64 tracing_event_time_stamp(struct trace_buffer *buffer, struct ring_buffer_event *rbe) 7152 { 7153 if (rbe == this_cpu_read(trace_buffered_event)) 7154 return ring_buffer_time_stamp(buffer); 7155 7156 return ring_buffer_event_time_stamp(buffer, rbe); 7157 } 7158 7159 /* 7160 * Set or disable using the per CPU trace_buffer_event when possible. 7161 */ 7162 int tracing_set_filter_buffering(struct trace_array *tr, bool set) 7163 { 7164 guard(mutex)(&trace_types_lock); 7165 7166 if (set && tr->no_filter_buffering_ref++) 7167 return 0; 7168 7169 if (!set) { 7170 if (WARN_ON_ONCE(!tr->no_filter_buffering_ref)) 7171 return -EINVAL; 7172 7173 --tr->no_filter_buffering_ref; 7174 } 7175 7176 return 0; 7177 } 7178 7179 struct ftrace_buffer_info { 7180 struct trace_iterator iter; 7181 void *spare; 7182 unsigned int spare_cpu; 7183 unsigned int spare_size; 7184 unsigned int read; 7185 }; 7186 7187 #ifdef CONFIG_TRACER_SNAPSHOT 7188 static int tracing_snapshot_open(struct inode *inode, struct file *file) 7189 { 7190 struct trace_array *tr = inode->i_private; 7191 struct trace_iterator *iter; 7192 struct seq_file *m; 7193 int ret; 7194 7195 ret = tracing_check_open_get_tr(tr); 7196 if (ret) 7197 return ret; 7198 7199 if (file->f_mode & FMODE_READ) { 7200 iter = __tracing_open(inode, file, true); 7201 if (IS_ERR(iter)) 7202 ret = PTR_ERR(iter); 7203 } else { 7204 /* Writes still need the seq_file to hold the private data */ 7205 ret = -ENOMEM; 7206 m = kzalloc(sizeof(*m), GFP_KERNEL); 7207 if (!m) 7208 goto out; 7209 iter = kzalloc(sizeof(*iter), GFP_KERNEL); 7210 if (!iter) { 7211 kfree(m); 7212 goto out; 7213 } 7214 ret = 0; 7215 7216 iter->tr = tr; 7217 iter->array_buffer = &tr->max_buffer; 7218 iter->cpu_file = tracing_get_cpu(inode); 7219 m->private = iter; 7220 file->private_data = m; 7221 } 7222 out: 7223 if (ret < 0) 7224 trace_array_put(tr); 7225 7226 return ret; 7227 } 7228 7229 static void tracing_swap_cpu_buffer(void *tr) 7230 { 7231 update_max_tr_single((struct trace_array *)tr, current, smp_processor_id()); 7232 } 7233 7234 static ssize_t 7235 tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt, 7236 loff_t *ppos) 7237 { 7238 struct seq_file *m = filp->private_data; 7239 struct trace_iterator *iter = m->private; 7240 struct trace_array *tr = iter->tr; 7241 unsigned long val; 7242 int ret; 7243 7244 ret = tracing_update_buffers(tr); 7245 if (ret < 0) 7246 return ret; 7247 7248 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 7249 if (ret) 7250 return ret; 7251 7252 guard(mutex)(&trace_types_lock); 7253 7254 if (tr->current_trace->use_max_tr) 7255 return -EBUSY; 7256 7257 local_irq_disable(); 7258 arch_spin_lock(&tr->max_lock); 7259 if (tr->cond_snapshot) 7260 ret = -EBUSY; 7261 arch_spin_unlock(&tr->max_lock); 7262 local_irq_enable(); 7263 if (ret) 7264 return ret; 7265 7266 switch (val) { 7267 case 0: 7268 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) 7269 return -EINVAL; 7270 if (tr->allocated_snapshot) 7271 free_snapshot(tr); 7272 break; 7273 case 1: 7274 /* Only allow per-cpu swap if the ring buffer supports it */ 7275 #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP 7276 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) 7277 return -EINVAL; 7278 #endif 7279 if (tr->allocated_snapshot) 7280 ret = resize_buffer_duplicate_size(&tr->max_buffer, 7281 &tr->array_buffer, iter->cpu_file); 7282 7283 ret = tracing_arm_snapshot_locked(tr); 7284 if (ret) 7285 return ret; 7286 7287 /* Now, we're going to swap */ 7288 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) { 7289 local_irq_disable(); 7290 update_max_tr(tr, current, smp_processor_id(), NULL); 7291 local_irq_enable(); 7292 } else { 7293 smp_call_function_single(iter->cpu_file, tracing_swap_cpu_buffer, 7294 (void *)tr, 1); 7295 } 7296 tracing_disarm_snapshot(tr); 7297 break; 7298 default: 7299 if (tr->allocated_snapshot) { 7300 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) 7301 tracing_reset_online_cpus(&tr->max_buffer); 7302 else 7303 tracing_reset_cpu(&tr->max_buffer, iter->cpu_file); 7304 } 7305 break; 7306 } 7307 7308 if (ret >= 0) { 7309 *ppos += cnt; 7310 ret = cnt; 7311 } 7312 7313 return ret; 7314 } 7315 7316 static int tracing_snapshot_release(struct inode *inode, struct file *file) 7317 { 7318 struct seq_file *m = file->private_data; 7319 int ret; 7320 7321 ret = tracing_release(inode, file); 7322 7323 if (file->f_mode & FMODE_READ) 7324 return ret; 7325 7326 /* If write only, the seq_file is just a stub */ 7327 if (m) 7328 kfree(m->private); 7329 kfree(m); 7330 7331 return 0; 7332 } 7333 7334 static int tracing_buffers_open(struct inode *inode, struct file *filp); 7335 static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf, 7336 size_t count, loff_t *ppos); 7337 static int tracing_buffers_release(struct inode *inode, struct file *file); 7338 static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos, 7339 struct pipe_inode_info *pipe, size_t len, unsigned int flags); 7340 7341 static int snapshot_raw_open(struct inode *inode, struct file *filp) 7342 { 7343 struct ftrace_buffer_info *info; 7344 int ret; 7345 7346 /* The following checks for tracefs lockdown */ 7347 ret = tracing_buffers_open(inode, filp); 7348 if (ret < 0) 7349 return ret; 7350 7351 info = filp->private_data; 7352 7353 if (info->iter.trace->use_max_tr) { 7354 tracing_buffers_release(inode, filp); 7355 return -EBUSY; 7356 } 7357 7358 info->iter.snapshot = true; 7359 info->iter.array_buffer = &info->iter.tr->max_buffer; 7360 7361 return ret; 7362 } 7363 7364 #endif /* CONFIG_TRACER_SNAPSHOT */ 7365 7366 7367 static const struct file_operations tracing_thresh_fops = { 7368 .open = tracing_open_generic, 7369 .read = tracing_thresh_read, 7370 .write = tracing_thresh_write, 7371 .llseek = generic_file_llseek, 7372 }; 7373 7374 #ifdef CONFIG_TRACER_MAX_TRACE 7375 static const struct file_operations tracing_max_lat_fops = { 7376 .open = tracing_open_generic_tr, 7377 .read = tracing_max_lat_read, 7378 .write = tracing_max_lat_write, 7379 .llseek = generic_file_llseek, 7380 .release = tracing_release_generic_tr, 7381 }; 7382 #endif 7383 7384 static const struct file_operations set_tracer_fops = { 7385 .open = tracing_open_generic_tr, 7386 .read = tracing_set_trace_read, 7387 .write = tracing_set_trace_write, 7388 .llseek = generic_file_llseek, 7389 .release = tracing_release_generic_tr, 7390 }; 7391 7392 static const struct file_operations tracing_pipe_fops = { 7393 .open = tracing_open_pipe, 7394 .poll = tracing_poll_pipe, 7395 .read = tracing_read_pipe, 7396 .splice_read = tracing_splice_read_pipe, 7397 .release = tracing_release_pipe, 7398 }; 7399 7400 static const struct file_operations tracing_entries_fops = { 7401 .open = tracing_open_generic_tr, 7402 .read = tracing_entries_read, 7403 .write = tracing_entries_write, 7404 .llseek = generic_file_llseek, 7405 .release = tracing_release_generic_tr, 7406 }; 7407 7408 static const struct file_operations tracing_buffer_meta_fops = { 7409 .open = tracing_buffer_meta_open, 7410 .read = seq_read, 7411 .llseek = seq_lseek, 7412 .release = tracing_seq_release, 7413 }; 7414 7415 static const struct file_operations tracing_total_entries_fops = { 7416 .open = tracing_open_generic_tr, 7417 .read = tracing_total_entries_read, 7418 .llseek = generic_file_llseek, 7419 .release = tracing_release_generic_tr, 7420 }; 7421 7422 static const struct file_operations tracing_free_buffer_fops = { 7423 .open = tracing_open_generic_tr, 7424 .write = tracing_free_buffer_write, 7425 .release = tracing_free_buffer_release, 7426 }; 7427 7428 static const struct file_operations tracing_mark_fops = { 7429 .open = tracing_mark_open, 7430 .write = tracing_mark_write, 7431 .release = tracing_release_generic_tr, 7432 }; 7433 7434 static const struct file_operations tracing_mark_raw_fops = { 7435 .open = tracing_mark_open, 7436 .write = tracing_mark_raw_write, 7437 .release = tracing_release_generic_tr, 7438 }; 7439 7440 static const struct file_operations trace_clock_fops = { 7441 .open = tracing_clock_open, 7442 .read = seq_read, 7443 .llseek = seq_lseek, 7444 .release = tracing_single_release_tr, 7445 .write = tracing_clock_write, 7446 }; 7447 7448 static const struct file_operations trace_time_stamp_mode_fops = { 7449 .open = tracing_time_stamp_mode_open, 7450 .read = seq_read, 7451 .llseek = seq_lseek, 7452 .release = tracing_single_release_tr, 7453 }; 7454 7455 static const struct file_operations last_boot_fops = { 7456 .open = tracing_open_generic_tr, 7457 .read = tracing_last_boot_read, 7458 .llseek = generic_file_llseek, 7459 .release = tracing_release_generic_tr, 7460 }; 7461 7462 #ifdef CONFIG_TRACER_SNAPSHOT 7463 static const struct file_operations snapshot_fops = { 7464 .open = tracing_snapshot_open, 7465 .read = seq_read, 7466 .write = tracing_snapshot_write, 7467 .llseek = tracing_lseek, 7468 .release = tracing_snapshot_release, 7469 }; 7470 7471 static const struct file_operations snapshot_raw_fops = { 7472 .open = snapshot_raw_open, 7473 .read = tracing_buffers_read, 7474 .release = tracing_buffers_release, 7475 .splice_read = tracing_buffers_splice_read, 7476 }; 7477 7478 #endif /* CONFIG_TRACER_SNAPSHOT */ 7479 7480 /* 7481 * trace_min_max_write - Write a u64 value to a trace_min_max_param struct 7482 * @filp: The active open file structure 7483 * @ubuf: The userspace provided buffer to read value into 7484 * @cnt: The maximum number of bytes to read 7485 * @ppos: The current "file" position 7486 * 7487 * This function implements the write interface for a struct trace_min_max_param. 7488 * The filp->private_data must point to a trace_min_max_param structure that 7489 * defines where to write the value, the min and the max acceptable values, 7490 * and a lock to protect the write. 7491 */ 7492 static ssize_t 7493 trace_min_max_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) 7494 { 7495 struct trace_min_max_param *param = filp->private_data; 7496 u64 val; 7497 int err; 7498 7499 if (!param) 7500 return -EFAULT; 7501 7502 err = kstrtoull_from_user(ubuf, cnt, 10, &val); 7503 if (err) 7504 return err; 7505 7506 if (param->lock) 7507 mutex_lock(param->lock); 7508 7509 if (param->min && val < *param->min) 7510 err = -EINVAL; 7511 7512 if (param->max && val > *param->max) 7513 err = -EINVAL; 7514 7515 if (!err) 7516 *param->val = val; 7517 7518 if (param->lock) 7519 mutex_unlock(param->lock); 7520 7521 if (err) 7522 return err; 7523 7524 return cnt; 7525 } 7526 7527 /* 7528 * trace_min_max_read - Read a u64 value from a trace_min_max_param struct 7529 * @filp: The active open file structure 7530 * @ubuf: The userspace provided buffer to read value into 7531 * @cnt: The maximum number of bytes to read 7532 * @ppos: The current "file" position 7533 * 7534 * This function implements the read interface for a struct trace_min_max_param. 7535 * The filp->private_data must point to a trace_min_max_param struct with valid 7536 * data. 7537 */ 7538 static ssize_t 7539 trace_min_max_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) 7540 { 7541 struct trace_min_max_param *param = filp->private_data; 7542 char buf[U64_STR_SIZE]; 7543 int len; 7544 u64 val; 7545 7546 if (!param) 7547 return -EFAULT; 7548 7549 val = *param->val; 7550 7551 if (cnt > sizeof(buf)) 7552 cnt = sizeof(buf); 7553 7554 len = snprintf(buf, sizeof(buf), "%llu\n", val); 7555 7556 return simple_read_from_buffer(ubuf, cnt, ppos, buf, len); 7557 } 7558 7559 const struct file_operations trace_min_max_fops = { 7560 .open = tracing_open_generic, 7561 .read = trace_min_max_read, 7562 .write = trace_min_max_write, 7563 }; 7564 7565 #define TRACING_LOG_ERRS_MAX 8 7566 #define TRACING_LOG_LOC_MAX 128 7567 7568 #define CMD_PREFIX " Command: " 7569 7570 struct err_info { 7571 const char **errs; /* ptr to loc-specific array of err strings */ 7572 u8 type; /* index into errs -> specific err string */ 7573 u16 pos; /* caret position */ 7574 u64 ts; 7575 }; 7576 7577 struct tracing_log_err { 7578 struct list_head list; 7579 struct err_info info; 7580 char loc[TRACING_LOG_LOC_MAX]; /* err location */ 7581 char *cmd; /* what caused err */ 7582 }; 7583 7584 static DEFINE_MUTEX(tracing_err_log_lock); 7585 7586 static struct tracing_log_err *alloc_tracing_log_err(int len) 7587 { 7588 struct tracing_log_err *err; 7589 7590 err = kzalloc(sizeof(*err), GFP_KERNEL); 7591 if (!err) 7592 return ERR_PTR(-ENOMEM); 7593 7594 err->cmd = kzalloc(len, GFP_KERNEL); 7595 if (!err->cmd) { 7596 kfree(err); 7597 return ERR_PTR(-ENOMEM); 7598 } 7599 7600 return err; 7601 } 7602 7603 static void free_tracing_log_err(struct tracing_log_err *err) 7604 { 7605 kfree(err->cmd); 7606 kfree(err); 7607 } 7608 7609 static struct tracing_log_err *get_tracing_log_err(struct trace_array *tr, 7610 int len) 7611 { 7612 struct tracing_log_err *err; 7613 char *cmd; 7614 7615 if (tr->n_err_log_entries < TRACING_LOG_ERRS_MAX) { 7616 err = alloc_tracing_log_err(len); 7617 if (PTR_ERR(err) != -ENOMEM) 7618 tr->n_err_log_entries++; 7619 7620 return err; 7621 } 7622 cmd = kzalloc(len, GFP_KERNEL); 7623 if (!cmd) 7624 return ERR_PTR(-ENOMEM); 7625 err = list_first_entry(&tr->err_log, struct tracing_log_err, list); 7626 kfree(err->cmd); 7627 err->cmd = cmd; 7628 list_del(&err->list); 7629 7630 return err; 7631 } 7632 7633 /** 7634 * err_pos - find the position of a string within a command for error careting 7635 * @cmd: The tracing command that caused the error 7636 * @str: The string to position the caret at within @cmd 7637 * 7638 * Finds the position of the first occurrence of @str within @cmd. The 7639 * return value can be passed to tracing_log_err() for caret placement 7640 * within @cmd. 7641 * 7642 * Returns the index within @cmd of the first occurrence of @str or 0 7643 * if @str was not found. 7644 */ 7645 unsigned int err_pos(char *cmd, const char *str) 7646 { 7647 char *found; 7648 7649 if (WARN_ON(!strlen(cmd))) 7650 return 0; 7651 7652 found = strstr(cmd, str); 7653 if (found) 7654 return found - cmd; 7655 7656 return 0; 7657 } 7658 7659 /** 7660 * tracing_log_err - write an error to the tracing error log 7661 * @tr: The associated trace array for the error (NULL for top level array) 7662 * @loc: A string describing where the error occurred 7663 * @cmd: The tracing command that caused the error 7664 * @errs: The array of loc-specific static error strings 7665 * @type: The index into errs[], which produces the specific static err string 7666 * @pos: The position the caret should be placed in the cmd 7667 * 7668 * Writes an error into tracing/error_log of the form: 7669 * 7670 * <loc>: error: <text> 7671 * Command: <cmd> 7672 * ^ 7673 * 7674 * tracing/error_log is a small log file containing the last 7675 * TRACING_LOG_ERRS_MAX errors (8). Memory for errors isn't allocated 7676 * unless there has been a tracing error, and the error log can be 7677 * cleared and have its memory freed by writing the empty string in 7678 * truncation mode to it i.e. echo > tracing/error_log. 7679 * 7680 * NOTE: the @errs array along with the @type param are used to 7681 * produce a static error string - this string is not copied and saved 7682 * when the error is logged - only a pointer to it is saved. See 7683 * existing callers for examples of how static strings are typically 7684 * defined for use with tracing_log_err(). 7685 */ 7686 void tracing_log_err(struct trace_array *tr, 7687 const char *loc, const char *cmd, 7688 const char **errs, u8 type, u16 pos) 7689 { 7690 struct tracing_log_err *err; 7691 int len = 0; 7692 7693 if (!tr) 7694 tr = &global_trace; 7695 7696 len += sizeof(CMD_PREFIX) + 2 * sizeof("\n") + strlen(cmd) + 1; 7697 7698 guard(mutex)(&tracing_err_log_lock); 7699 7700 err = get_tracing_log_err(tr, len); 7701 if (PTR_ERR(err) == -ENOMEM) 7702 return; 7703 7704 snprintf(err->loc, TRACING_LOG_LOC_MAX, "%s: error: ", loc); 7705 snprintf(err->cmd, len, "\n" CMD_PREFIX "%s\n", cmd); 7706 7707 err->info.errs = errs; 7708 err->info.type = type; 7709 err->info.pos = pos; 7710 err->info.ts = local_clock(); 7711 7712 list_add_tail(&err->list, &tr->err_log); 7713 } 7714 7715 static void clear_tracing_err_log(struct trace_array *tr) 7716 { 7717 struct tracing_log_err *err, *next; 7718 7719 mutex_lock(&tracing_err_log_lock); 7720 list_for_each_entry_safe(err, next, &tr->err_log, list) { 7721 list_del(&err->list); 7722 free_tracing_log_err(err); 7723 } 7724 7725 tr->n_err_log_entries = 0; 7726 mutex_unlock(&tracing_err_log_lock); 7727 } 7728 7729 static void *tracing_err_log_seq_start(struct seq_file *m, loff_t *pos) 7730 { 7731 struct trace_array *tr = m->private; 7732 7733 mutex_lock(&tracing_err_log_lock); 7734 7735 return seq_list_start(&tr->err_log, *pos); 7736 } 7737 7738 static void *tracing_err_log_seq_next(struct seq_file *m, void *v, loff_t *pos) 7739 { 7740 struct trace_array *tr = m->private; 7741 7742 return seq_list_next(v, &tr->err_log, pos); 7743 } 7744 7745 static void tracing_err_log_seq_stop(struct seq_file *m, void *v) 7746 { 7747 mutex_unlock(&tracing_err_log_lock); 7748 } 7749 7750 static void tracing_err_log_show_pos(struct seq_file *m, u16 pos) 7751 { 7752 u16 i; 7753 7754 for (i = 0; i < sizeof(CMD_PREFIX) - 1; i++) 7755 seq_putc(m, ' '); 7756 for (i = 0; i < pos; i++) 7757 seq_putc(m, ' '); 7758 seq_puts(m, "^\n"); 7759 } 7760 7761 static int tracing_err_log_seq_show(struct seq_file *m, void *v) 7762 { 7763 struct tracing_log_err *err = v; 7764 7765 if (err) { 7766 const char *err_text = err->info.errs[err->info.type]; 7767 u64 sec = err->info.ts; 7768 u32 nsec; 7769 7770 nsec = do_div(sec, NSEC_PER_SEC); 7771 seq_printf(m, "[%5llu.%06u] %s%s", sec, nsec / 1000, 7772 err->loc, err_text); 7773 seq_printf(m, "%s", err->cmd); 7774 tracing_err_log_show_pos(m, err->info.pos); 7775 } 7776 7777 return 0; 7778 } 7779 7780 static const struct seq_operations tracing_err_log_seq_ops = { 7781 .start = tracing_err_log_seq_start, 7782 .next = tracing_err_log_seq_next, 7783 .stop = tracing_err_log_seq_stop, 7784 .show = tracing_err_log_seq_show 7785 }; 7786 7787 static int tracing_err_log_open(struct inode *inode, struct file *file) 7788 { 7789 struct trace_array *tr = inode->i_private; 7790 int ret = 0; 7791 7792 ret = tracing_check_open_get_tr(tr); 7793 if (ret) 7794 return ret; 7795 7796 /* If this file was opened for write, then erase contents */ 7797 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) 7798 clear_tracing_err_log(tr); 7799 7800 if (file->f_mode & FMODE_READ) { 7801 ret = seq_open(file, &tracing_err_log_seq_ops); 7802 if (!ret) { 7803 struct seq_file *m = file->private_data; 7804 m->private = tr; 7805 } else { 7806 trace_array_put(tr); 7807 } 7808 } 7809 return ret; 7810 } 7811 7812 static ssize_t tracing_err_log_write(struct file *file, 7813 const char __user *buffer, 7814 size_t count, loff_t *ppos) 7815 { 7816 return count; 7817 } 7818 7819 static int tracing_err_log_release(struct inode *inode, struct file *file) 7820 { 7821 struct trace_array *tr = inode->i_private; 7822 7823 trace_array_put(tr); 7824 7825 if (file->f_mode & FMODE_READ) 7826 seq_release(inode, file); 7827 7828 return 0; 7829 } 7830 7831 static const struct file_operations tracing_err_log_fops = { 7832 .open = tracing_err_log_open, 7833 .write = tracing_err_log_write, 7834 .read = seq_read, 7835 .llseek = tracing_lseek, 7836 .release = tracing_err_log_release, 7837 }; 7838 7839 static int tracing_buffers_open(struct inode *inode, struct file *filp) 7840 { 7841 struct trace_array *tr = inode->i_private; 7842 struct ftrace_buffer_info *info; 7843 int ret; 7844 7845 ret = tracing_check_open_get_tr(tr); 7846 if (ret) 7847 return ret; 7848 7849 info = kvzalloc(sizeof(*info), GFP_KERNEL); 7850 if (!info) { 7851 trace_array_put(tr); 7852 return -ENOMEM; 7853 } 7854 7855 mutex_lock(&trace_types_lock); 7856 7857 info->iter.tr = tr; 7858 info->iter.cpu_file = tracing_get_cpu(inode); 7859 info->iter.trace = tr->current_trace; 7860 info->iter.array_buffer = &tr->array_buffer; 7861 info->spare = NULL; 7862 /* Force reading ring buffer for first read */ 7863 info->read = (unsigned int)-1; 7864 7865 filp->private_data = info; 7866 7867 tr->trace_ref++; 7868 7869 mutex_unlock(&trace_types_lock); 7870 7871 ret = nonseekable_open(inode, filp); 7872 if (ret < 0) 7873 trace_array_put(tr); 7874 7875 return ret; 7876 } 7877 7878 static __poll_t 7879 tracing_buffers_poll(struct file *filp, poll_table *poll_table) 7880 { 7881 struct ftrace_buffer_info *info = filp->private_data; 7882 struct trace_iterator *iter = &info->iter; 7883 7884 return trace_poll(iter, filp, poll_table); 7885 } 7886 7887 static ssize_t 7888 tracing_buffers_read(struct file *filp, char __user *ubuf, 7889 size_t count, loff_t *ppos) 7890 { 7891 struct ftrace_buffer_info *info = filp->private_data; 7892 struct trace_iterator *iter = &info->iter; 7893 void *trace_data; 7894 int page_size; 7895 ssize_t ret = 0; 7896 ssize_t size; 7897 7898 if (!count) 7899 return 0; 7900 7901 #ifdef CONFIG_TRACER_MAX_TRACE 7902 if (iter->snapshot && iter->tr->current_trace->use_max_tr) 7903 return -EBUSY; 7904 #endif 7905 7906 page_size = ring_buffer_subbuf_size_get(iter->array_buffer->buffer); 7907 7908 /* Make sure the spare matches the current sub buffer size */ 7909 if (info->spare) { 7910 if (page_size != info->spare_size) { 7911 ring_buffer_free_read_page(iter->array_buffer->buffer, 7912 info->spare_cpu, info->spare); 7913 info->spare = NULL; 7914 } 7915 } 7916 7917 if (!info->spare) { 7918 info->spare = ring_buffer_alloc_read_page(iter->array_buffer->buffer, 7919 iter->cpu_file); 7920 if (IS_ERR(info->spare)) { 7921 ret = PTR_ERR(info->spare); 7922 info->spare = NULL; 7923 } else { 7924 info->spare_cpu = iter->cpu_file; 7925 info->spare_size = page_size; 7926 } 7927 } 7928 if (!info->spare) 7929 return ret; 7930 7931 /* Do we have previous read data to read? */ 7932 if (info->read < page_size) 7933 goto read; 7934 7935 again: 7936 trace_access_lock(iter->cpu_file); 7937 ret = ring_buffer_read_page(iter->array_buffer->buffer, 7938 info->spare, 7939 count, 7940 iter->cpu_file, 0); 7941 trace_access_unlock(iter->cpu_file); 7942 7943 if (ret < 0) { 7944 if (trace_empty(iter) && !iter->closed) { 7945 if ((filp->f_flags & O_NONBLOCK)) 7946 return -EAGAIN; 7947 7948 ret = wait_on_pipe(iter, 0); 7949 if (ret) 7950 return ret; 7951 7952 goto again; 7953 } 7954 return 0; 7955 } 7956 7957 info->read = 0; 7958 read: 7959 size = page_size - info->read; 7960 if (size > count) 7961 size = count; 7962 trace_data = ring_buffer_read_page_data(info->spare); 7963 ret = copy_to_user(ubuf, trace_data + info->read, size); 7964 if (ret == size) 7965 return -EFAULT; 7966 7967 size -= ret; 7968 7969 *ppos += size; 7970 info->read += size; 7971 7972 return size; 7973 } 7974 7975 static int tracing_buffers_flush(struct file *file, fl_owner_t id) 7976 { 7977 struct ftrace_buffer_info *info = file->private_data; 7978 struct trace_iterator *iter = &info->iter; 7979 7980 iter->closed = true; 7981 /* Make sure the waiters see the new wait_index */ 7982 (void)atomic_fetch_inc_release(&iter->wait_index); 7983 7984 ring_buffer_wake_waiters(iter->array_buffer->buffer, iter->cpu_file); 7985 7986 return 0; 7987 } 7988 7989 static int tracing_buffers_release(struct inode *inode, struct file *file) 7990 { 7991 struct ftrace_buffer_info *info = file->private_data; 7992 struct trace_iterator *iter = &info->iter; 7993 7994 mutex_lock(&trace_types_lock); 7995 7996 iter->tr->trace_ref--; 7997 7998 __trace_array_put(iter->tr); 7999 8000 if (info->spare) 8001 ring_buffer_free_read_page(iter->array_buffer->buffer, 8002 info->spare_cpu, info->spare); 8003 kvfree(info); 8004 8005 mutex_unlock(&trace_types_lock); 8006 8007 return 0; 8008 } 8009 8010 struct buffer_ref { 8011 struct trace_buffer *buffer; 8012 void *page; 8013 int cpu; 8014 refcount_t refcount; 8015 }; 8016 8017 static void buffer_ref_release(struct buffer_ref *ref) 8018 { 8019 if (!refcount_dec_and_test(&ref->refcount)) 8020 return; 8021 ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page); 8022 kfree(ref); 8023 } 8024 8025 static void buffer_pipe_buf_release(struct pipe_inode_info *pipe, 8026 struct pipe_buffer *buf) 8027 { 8028 struct buffer_ref *ref = (struct buffer_ref *)buf->private; 8029 8030 buffer_ref_release(ref); 8031 buf->private = 0; 8032 } 8033 8034 static bool buffer_pipe_buf_get(struct pipe_inode_info *pipe, 8035 struct pipe_buffer *buf) 8036 { 8037 struct buffer_ref *ref = (struct buffer_ref *)buf->private; 8038 8039 if (refcount_read(&ref->refcount) > INT_MAX/2) 8040 return false; 8041 8042 refcount_inc(&ref->refcount); 8043 return true; 8044 } 8045 8046 /* Pipe buffer operations for a buffer. */ 8047 static const struct pipe_buf_operations buffer_pipe_buf_ops = { 8048 .release = buffer_pipe_buf_release, 8049 .get = buffer_pipe_buf_get, 8050 }; 8051 8052 /* 8053 * Callback from splice_to_pipe(), if we need to release some pages 8054 * at the end of the spd in case we error'ed out in filling the pipe. 8055 */ 8056 static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i) 8057 { 8058 struct buffer_ref *ref = 8059 (struct buffer_ref *)spd->partial[i].private; 8060 8061 buffer_ref_release(ref); 8062 spd->partial[i].private = 0; 8063 } 8064 8065 static ssize_t 8066 tracing_buffers_splice_read(struct file *file, loff_t *ppos, 8067 struct pipe_inode_info *pipe, size_t len, 8068 unsigned int flags) 8069 { 8070 struct ftrace_buffer_info *info = file->private_data; 8071 struct trace_iterator *iter = &info->iter; 8072 struct partial_page partial_def[PIPE_DEF_BUFFERS]; 8073 struct page *pages_def[PIPE_DEF_BUFFERS]; 8074 struct splice_pipe_desc spd = { 8075 .pages = pages_def, 8076 .partial = partial_def, 8077 .nr_pages_max = PIPE_DEF_BUFFERS, 8078 .ops = &buffer_pipe_buf_ops, 8079 .spd_release = buffer_spd_release, 8080 }; 8081 struct buffer_ref *ref; 8082 bool woken = false; 8083 int page_size; 8084 int entries, i; 8085 ssize_t ret = 0; 8086 8087 #ifdef CONFIG_TRACER_MAX_TRACE 8088 if (iter->snapshot && iter->tr->current_trace->use_max_tr) 8089 return -EBUSY; 8090 #endif 8091 8092 page_size = ring_buffer_subbuf_size_get(iter->array_buffer->buffer); 8093 if (*ppos & (page_size - 1)) 8094 return -EINVAL; 8095 8096 if (len & (page_size - 1)) { 8097 if (len < page_size) 8098 return -EINVAL; 8099 len &= (~(page_size - 1)); 8100 } 8101 8102 if (splice_grow_spd(pipe, &spd)) 8103 return -ENOMEM; 8104 8105 again: 8106 trace_access_lock(iter->cpu_file); 8107 entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file); 8108 8109 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= page_size) { 8110 struct page *page; 8111 int r; 8112 8113 ref = kzalloc(sizeof(*ref), GFP_KERNEL); 8114 if (!ref) { 8115 ret = -ENOMEM; 8116 break; 8117 } 8118 8119 refcount_set(&ref->refcount, 1); 8120 ref->buffer = iter->array_buffer->buffer; 8121 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file); 8122 if (IS_ERR(ref->page)) { 8123 ret = PTR_ERR(ref->page); 8124 ref->page = NULL; 8125 kfree(ref); 8126 break; 8127 } 8128 ref->cpu = iter->cpu_file; 8129 8130 r = ring_buffer_read_page(ref->buffer, ref->page, 8131 len, iter->cpu_file, 1); 8132 if (r < 0) { 8133 ring_buffer_free_read_page(ref->buffer, ref->cpu, 8134 ref->page); 8135 kfree(ref); 8136 break; 8137 } 8138 8139 page = virt_to_page(ring_buffer_read_page_data(ref->page)); 8140 8141 spd.pages[i] = page; 8142 spd.partial[i].len = page_size; 8143 spd.partial[i].offset = 0; 8144 spd.partial[i].private = (unsigned long)ref; 8145 spd.nr_pages++; 8146 *ppos += page_size; 8147 8148 entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file); 8149 } 8150 8151 trace_access_unlock(iter->cpu_file); 8152 spd.nr_pages = i; 8153 8154 /* did we read anything? */ 8155 if (!spd.nr_pages) { 8156 8157 if (ret) 8158 goto out; 8159 8160 if (woken) 8161 goto out; 8162 8163 ret = -EAGAIN; 8164 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK)) 8165 goto out; 8166 8167 ret = wait_on_pipe(iter, iter->snapshot ? 0 : iter->tr->buffer_percent); 8168 if (ret) 8169 goto out; 8170 8171 /* No need to wait after waking up when tracing is off */ 8172 if (!tracer_tracing_is_on(iter->tr)) 8173 goto out; 8174 8175 /* Iterate one more time to collect any new data then exit */ 8176 woken = true; 8177 8178 goto again; 8179 } 8180 8181 ret = splice_to_pipe(pipe, &spd); 8182 out: 8183 splice_shrink_spd(&spd); 8184 8185 return ret; 8186 } 8187 8188 static long tracing_buffers_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 8189 { 8190 struct ftrace_buffer_info *info = file->private_data; 8191 struct trace_iterator *iter = &info->iter; 8192 int err; 8193 8194 if (cmd == TRACE_MMAP_IOCTL_GET_READER) { 8195 if (!(file->f_flags & O_NONBLOCK)) { 8196 err = ring_buffer_wait(iter->array_buffer->buffer, 8197 iter->cpu_file, 8198 iter->tr->buffer_percent, 8199 NULL, NULL); 8200 if (err) 8201 return err; 8202 } 8203 8204 return ring_buffer_map_get_reader(iter->array_buffer->buffer, 8205 iter->cpu_file); 8206 } else if (cmd) { 8207 return -ENOTTY; 8208 } 8209 8210 /* 8211 * An ioctl call with cmd 0 to the ring buffer file will wake up all 8212 * waiters 8213 */ 8214 mutex_lock(&trace_types_lock); 8215 8216 /* Make sure the waiters see the new wait_index */ 8217 (void)atomic_fetch_inc_release(&iter->wait_index); 8218 8219 ring_buffer_wake_waiters(iter->array_buffer->buffer, iter->cpu_file); 8220 8221 mutex_unlock(&trace_types_lock); 8222 return 0; 8223 } 8224 8225 #ifdef CONFIG_TRACER_MAX_TRACE 8226 static int get_snapshot_map(struct trace_array *tr) 8227 { 8228 int err = 0; 8229 8230 /* 8231 * Called with mmap_lock held. lockdep would be unhappy if we would now 8232 * take trace_types_lock. Instead use the specific 8233 * snapshot_trigger_lock. 8234 */ 8235 spin_lock(&tr->snapshot_trigger_lock); 8236 8237 if (tr->snapshot || tr->mapped == UINT_MAX) 8238 err = -EBUSY; 8239 else 8240 tr->mapped++; 8241 8242 spin_unlock(&tr->snapshot_trigger_lock); 8243 8244 /* Wait for update_max_tr() to observe iter->tr->mapped */ 8245 if (tr->mapped == 1) 8246 synchronize_rcu(); 8247 8248 return err; 8249 8250 } 8251 static void put_snapshot_map(struct trace_array *tr) 8252 { 8253 spin_lock(&tr->snapshot_trigger_lock); 8254 if (!WARN_ON(!tr->mapped)) 8255 tr->mapped--; 8256 spin_unlock(&tr->snapshot_trigger_lock); 8257 } 8258 #else 8259 static inline int get_snapshot_map(struct trace_array *tr) { return 0; } 8260 static inline void put_snapshot_map(struct trace_array *tr) { } 8261 #endif 8262 8263 static void tracing_buffers_mmap_close(struct vm_area_struct *vma) 8264 { 8265 struct ftrace_buffer_info *info = vma->vm_file->private_data; 8266 struct trace_iterator *iter = &info->iter; 8267 8268 WARN_ON(ring_buffer_unmap(iter->array_buffer->buffer, iter->cpu_file)); 8269 put_snapshot_map(iter->tr); 8270 } 8271 8272 static const struct vm_operations_struct tracing_buffers_vmops = { 8273 .close = tracing_buffers_mmap_close, 8274 }; 8275 8276 static int tracing_buffers_mmap(struct file *filp, struct vm_area_struct *vma) 8277 { 8278 struct ftrace_buffer_info *info = filp->private_data; 8279 struct trace_iterator *iter = &info->iter; 8280 int ret = 0; 8281 8282 /* Currently the boot mapped buffer is not supported for mmap */ 8283 if (iter->tr->flags & TRACE_ARRAY_FL_BOOT) 8284 return -ENODEV; 8285 8286 ret = get_snapshot_map(iter->tr); 8287 if (ret) 8288 return ret; 8289 8290 ret = ring_buffer_map(iter->array_buffer->buffer, iter->cpu_file, vma); 8291 if (ret) 8292 put_snapshot_map(iter->tr); 8293 8294 vma->vm_ops = &tracing_buffers_vmops; 8295 8296 return ret; 8297 } 8298 8299 static const struct file_operations tracing_buffers_fops = { 8300 .open = tracing_buffers_open, 8301 .read = tracing_buffers_read, 8302 .poll = tracing_buffers_poll, 8303 .release = tracing_buffers_release, 8304 .flush = tracing_buffers_flush, 8305 .splice_read = tracing_buffers_splice_read, 8306 .unlocked_ioctl = tracing_buffers_ioctl, 8307 .mmap = tracing_buffers_mmap, 8308 }; 8309 8310 static ssize_t 8311 tracing_stats_read(struct file *filp, char __user *ubuf, 8312 size_t count, loff_t *ppos) 8313 { 8314 struct inode *inode = file_inode(filp); 8315 struct trace_array *tr = inode->i_private; 8316 struct array_buffer *trace_buf = &tr->array_buffer; 8317 int cpu = tracing_get_cpu(inode); 8318 struct trace_seq *s; 8319 unsigned long cnt; 8320 unsigned long long t; 8321 unsigned long usec_rem; 8322 8323 s = kmalloc(sizeof(*s), GFP_KERNEL); 8324 if (!s) 8325 return -ENOMEM; 8326 8327 trace_seq_init(s); 8328 8329 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu); 8330 trace_seq_printf(s, "entries: %ld\n", cnt); 8331 8332 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu); 8333 trace_seq_printf(s, "overrun: %ld\n", cnt); 8334 8335 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu); 8336 trace_seq_printf(s, "commit overrun: %ld\n", cnt); 8337 8338 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu); 8339 trace_seq_printf(s, "bytes: %ld\n", cnt); 8340 8341 if (trace_clocks[tr->clock_id].in_ns) { 8342 /* local or global for trace_clock */ 8343 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu)); 8344 usec_rem = do_div(t, USEC_PER_SEC); 8345 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n", 8346 t, usec_rem); 8347 8348 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer)); 8349 usec_rem = do_div(t, USEC_PER_SEC); 8350 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem); 8351 } else { 8352 /* counter or tsc mode for trace_clock */ 8353 trace_seq_printf(s, "oldest event ts: %llu\n", 8354 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu)); 8355 8356 trace_seq_printf(s, "now ts: %llu\n", 8357 ring_buffer_time_stamp(trace_buf->buffer)); 8358 } 8359 8360 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu); 8361 trace_seq_printf(s, "dropped events: %ld\n", cnt); 8362 8363 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu); 8364 trace_seq_printf(s, "read events: %ld\n", cnt); 8365 8366 count = simple_read_from_buffer(ubuf, count, ppos, 8367 s->buffer, trace_seq_used(s)); 8368 8369 kfree(s); 8370 8371 return count; 8372 } 8373 8374 static const struct file_operations tracing_stats_fops = { 8375 .open = tracing_open_generic_tr, 8376 .read = tracing_stats_read, 8377 .llseek = generic_file_llseek, 8378 .release = tracing_release_generic_tr, 8379 }; 8380 8381 #ifdef CONFIG_DYNAMIC_FTRACE 8382 8383 static ssize_t 8384 tracing_read_dyn_info(struct file *filp, char __user *ubuf, 8385 size_t cnt, loff_t *ppos) 8386 { 8387 ssize_t ret; 8388 char *buf; 8389 int r; 8390 8391 /* 512 should be plenty to hold the amount needed */ 8392 #define DYN_INFO_BUF_SIZE 512 8393 8394 buf = kmalloc(DYN_INFO_BUF_SIZE, GFP_KERNEL); 8395 if (!buf) 8396 return -ENOMEM; 8397 8398 r = scnprintf(buf, DYN_INFO_BUF_SIZE, 8399 "%ld pages:%ld groups: %ld\n" 8400 "ftrace boot update time = %llu (ns)\n" 8401 "ftrace module total update time = %llu (ns)\n", 8402 ftrace_update_tot_cnt, 8403 ftrace_number_of_pages, 8404 ftrace_number_of_groups, 8405 ftrace_update_time, 8406 ftrace_total_mod_time); 8407 8408 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 8409 kfree(buf); 8410 return ret; 8411 } 8412 8413 static const struct file_operations tracing_dyn_info_fops = { 8414 .open = tracing_open_generic, 8415 .read = tracing_read_dyn_info, 8416 .llseek = generic_file_llseek, 8417 }; 8418 #endif /* CONFIG_DYNAMIC_FTRACE */ 8419 8420 #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) 8421 static void 8422 ftrace_snapshot(unsigned long ip, unsigned long parent_ip, 8423 struct trace_array *tr, struct ftrace_probe_ops *ops, 8424 void *data) 8425 { 8426 tracing_snapshot_instance(tr); 8427 } 8428 8429 static void 8430 ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, 8431 struct trace_array *tr, struct ftrace_probe_ops *ops, 8432 void *data) 8433 { 8434 struct ftrace_func_mapper *mapper = data; 8435 long *count = NULL; 8436 8437 if (mapper) 8438 count = (long *)ftrace_func_mapper_find_ip(mapper, ip); 8439 8440 if (count) { 8441 8442 if (*count <= 0) 8443 return; 8444 8445 (*count)--; 8446 } 8447 8448 tracing_snapshot_instance(tr); 8449 } 8450 8451 static int 8452 ftrace_snapshot_print(struct seq_file *m, unsigned long ip, 8453 struct ftrace_probe_ops *ops, void *data) 8454 { 8455 struct ftrace_func_mapper *mapper = data; 8456 long *count = NULL; 8457 8458 seq_printf(m, "%ps:", (void *)ip); 8459 8460 seq_puts(m, "snapshot"); 8461 8462 if (mapper) 8463 count = (long *)ftrace_func_mapper_find_ip(mapper, ip); 8464 8465 if (count) 8466 seq_printf(m, ":count=%ld\n", *count); 8467 else 8468 seq_puts(m, ":unlimited\n"); 8469 8470 return 0; 8471 } 8472 8473 static int 8474 ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr, 8475 unsigned long ip, void *init_data, void **data) 8476 { 8477 struct ftrace_func_mapper *mapper = *data; 8478 8479 if (!mapper) { 8480 mapper = allocate_ftrace_func_mapper(); 8481 if (!mapper) 8482 return -ENOMEM; 8483 *data = mapper; 8484 } 8485 8486 return ftrace_func_mapper_add_ip(mapper, ip, init_data); 8487 } 8488 8489 static void 8490 ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr, 8491 unsigned long ip, void *data) 8492 { 8493 struct ftrace_func_mapper *mapper = data; 8494 8495 if (!ip) { 8496 if (!mapper) 8497 return; 8498 free_ftrace_func_mapper(mapper, NULL); 8499 return; 8500 } 8501 8502 ftrace_func_mapper_remove_ip(mapper, ip); 8503 } 8504 8505 static struct ftrace_probe_ops snapshot_probe_ops = { 8506 .func = ftrace_snapshot, 8507 .print = ftrace_snapshot_print, 8508 }; 8509 8510 static struct ftrace_probe_ops snapshot_count_probe_ops = { 8511 .func = ftrace_count_snapshot, 8512 .print = ftrace_snapshot_print, 8513 .init = ftrace_snapshot_init, 8514 .free = ftrace_snapshot_free, 8515 }; 8516 8517 static int 8518 ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash, 8519 char *glob, char *cmd, char *param, int enable) 8520 { 8521 struct ftrace_probe_ops *ops; 8522 void *count = (void *)-1; 8523 char *number; 8524 int ret; 8525 8526 if (!tr) 8527 return -ENODEV; 8528 8529 /* hash funcs only work with set_ftrace_filter */ 8530 if (!enable) 8531 return -EINVAL; 8532 8533 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops; 8534 8535 if (glob[0] == '!') { 8536 ret = unregister_ftrace_function_probe_func(glob+1, tr, ops); 8537 if (!ret) 8538 tracing_disarm_snapshot(tr); 8539 8540 return ret; 8541 } 8542 8543 if (!param) 8544 goto out_reg; 8545 8546 number = strsep(¶m, ":"); 8547 8548 if (!strlen(number)) 8549 goto out_reg; 8550 8551 /* 8552 * We use the callback data field (which is a pointer) 8553 * as our counter. 8554 */ 8555 ret = kstrtoul(number, 0, (unsigned long *)&count); 8556 if (ret) 8557 return ret; 8558 8559 out_reg: 8560 ret = tracing_arm_snapshot(tr); 8561 if (ret < 0) 8562 goto out; 8563 8564 ret = register_ftrace_function_probe(glob, tr, ops, count); 8565 if (ret < 0) 8566 tracing_disarm_snapshot(tr); 8567 out: 8568 return ret < 0 ? ret : 0; 8569 } 8570 8571 static struct ftrace_func_command ftrace_snapshot_cmd = { 8572 .name = "snapshot", 8573 .func = ftrace_trace_snapshot_callback, 8574 }; 8575 8576 static __init int register_snapshot_cmd(void) 8577 { 8578 return register_ftrace_command(&ftrace_snapshot_cmd); 8579 } 8580 #else 8581 static inline __init int register_snapshot_cmd(void) { return 0; } 8582 #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */ 8583 8584 static struct dentry *tracing_get_dentry(struct trace_array *tr) 8585 { 8586 if (WARN_ON(!tr->dir)) 8587 return ERR_PTR(-ENODEV); 8588 8589 /* Top directory uses NULL as the parent */ 8590 if (tr->flags & TRACE_ARRAY_FL_GLOBAL) 8591 return NULL; 8592 8593 /* All sub buffers have a descriptor */ 8594 return tr->dir; 8595 } 8596 8597 static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu) 8598 { 8599 struct dentry *d_tracer; 8600 8601 if (tr->percpu_dir) 8602 return tr->percpu_dir; 8603 8604 d_tracer = tracing_get_dentry(tr); 8605 if (IS_ERR(d_tracer)) 8606 return NULL; 8607 8608 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer); 8609 8610 MEM_FAIL(!tr->percpu_dir, 8611 "Could not create tracefs directory 'per_cpu/%d'\n", cpu); 8612 8613 return tr->percpu_dir; 8614 } 8615 8616 static struct dentry * 8617 trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent, 8618 void *data, long cpu, const struct file_operations *fops) 8619 { 8620 struct dentry *ret = trace_create_file(name, mode, parent, data, fops); 8621 8622 if (ret) /* See tracing_get_cpu() */ 8623 d_inode(ret)->i_cdev = (void *)(cpu + 1); 8624 return ret; 8625 } 8626 8627 static void 8628 tracing_init_tracefs_percpu(struct trace_array *tr, long cpu) 8629 { 8630 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu); 8631 struct dentry *d_cpu; 8632 char cpu_dir[30]; /* 30 characters should be more than enough */ 8633 8634 if (!d_percpu) 8635 return; 8636 8637 snprintf(cpu_dir, 30, "cpu%ld", cpu); 8638 d_cpu = tracefs_create_dir(cpu_dir, d_percpu); 8639 if (!d_cpu) { 8640 pr_warn("Could not create tracefs '%s' entry\n", cpu_dir); 8641 return; 8642 } 8643 8644 /* per cpu trace_pipe */ 8645 trace_create_cpu_file("trace_pipe", TRACE_MODE_READ, d_cpu, 8646 tr, cpu, &tracing_pipe_fops); 8647 8648 /* per cpu trace */ 8649 trace_create_cpu_file("trace", TRACE_MODE_WRITE, d_cpu, 8650 tr, cpu, &tracing_fops); 8651 8652 trace_create_cpu_file("trace_pipe_raw", TRACE_MODE_READ, d_cpu, 8653 tr, cpu, &tracing_buffers_fops); 8654 8655 trace_create_cpu_file("stats", TRACE_MODE_READ, d_cpu, 8656 tr, cpu, &tracing_stats_fops); 8657 8658 trace_create_cpu_file("buffer_size_kb", TRACE_MODE_READ, d_cpu, 8659 tr, cpu, &tracing_entries_fops); 8660 8661 if (tr->range_addr_start) 8662 trace_create_cpu_file("buffer_meta", TRACE_MODE_READ, d_cpu, 8663 tr, cpu, &tracing_buffer_meta_fops); 8664 #ifdef CONFIG_TRACER_SNAPSHOT 8665 if (!tr->range_addr_start) { 8666 trace_create_cpu_file("snapshot", TRACE_MODE_WRITE, d_cpu, 8667 tr, cpu, &snapshot_fops); 8668 8669 trace_create_cpu_file("snapshot_raw", TRACE_MODE_READ, d_cpu, 8670 tr, cpu, &snapshot_raw_fops); 8671 } 8672 #endif 8673 } 8674 8675 #ifdef CONFIG_FTRACE_SELFTEST 8676 /* Let selftest have access to static functions in this file */ 8677 #include "trace_selftest.c" 8678 #endif 8679 8680 static ssize_t 8681 trace_options_read(struct file *filp, char __user *ubuf, size_t cnt, 8682 loff_t *ppos) 8683 { 8684 struct trace_option_dentry *topt = filp->private_data; 8685 char *buf; 8686 8687 if (topt->flags->val & topt->opt->bit) 8688 buf = "1\n"; 8689 else 8690 buf = "0\n"; 8691 8692 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2); 8693 } 8694 8695 static ssize_t 8696 trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt, 8697 loff_t *ppos) 8698 { 8699 struct trace_option_dentry *topt = filp->private_data; 8700 unsigned long val; 8701 int ret; 8702 8703 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 8704 if (ret) 8705 return ret; 8706 8707 if (val != 0 && val != 1) 8708 return -EINVAL; 8709 8710 if (!!(topt->flags->val & topt->opt->bit) != val) { 8711 mutex_lock(&trace_types_lock); 8712 ret = __set_tracer_option(topt->tr, topt->flags, 8713 topt->opt, !val); 8714 mutex_unlock(&trace_types_lock); 8715 if (ret) 8716 return ret; 8717 } 8718 8719 *ppos += cnt; 8720 8721 return cnt; 8722 } 8723 8724 static int tracing_open_options(struct inode *inode, struct file *filp) 8725 { 8726 struct trace_option_dentry *topt = inode->i_private; 8727 int ret; 8728 8729 ret = tracing_check_open_get_tr(topt->tr); 8730 if (ret) 8731 return ret; 8732 8733 filp->private_data = inode->i_private; 8734 return 0; 8735 } 8736 8737 static int tracing_release_options(struct inode *inode, struct file *file) 8738 { 8739 struct trace_option_dentry *topt = file->private_data; 8740 8741 trace_array_put(topt->tr); 8742 return 0; 8743 } 8744 8745 static const struct file_operations trace_options_fops = { 8746 .open = tracing_open_options, 8747 .read = trace_options_read, 8748 .write = trace_options_write, 8749 .llseek = generic_file_llseek, 8750 .release = tracing_release_options, 8751 }; 8752 8753 /* 8754 * In order to pass in both the trace_array descriptor as well as the index 8755 * to the flag that the trace option file represents, the trace_array 8756 * has a character array of trace_flags_index[], which holds the index 8757 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc. 8758 * The address of this character array is passed to the flag option file 8759 * read/write callbacks. 8760 * 8761 * In order to extract both the index and the trace_array descriptor, 8762 * get_tr_index() uses the following algorithm. 8763 * 8764 * idx = *ptr; 8765 * 8766 * As the pointer itself contains the address of the index (remember 8767 * index[1] == 1). 8768 * 8769 * Then to get the trace_array descriptor, by subtracting that index 8770 * from the ptr, we get to the start of the index itself. 8771 * 8772 * ptr - idx == &index[0] 8773 * 8774 * Then a simple container_of() from that pointer gets us to the 8775 * trace_array descriptor. 8776 */ 8777 static void get_tr_index(void *data, struct trace_array **ptr, 8778 unsigned int *pindex) 8779 { 8780 *pindex = *(unsigned char *)data; 8781 8782 *ptr = container_of(data - *pindex, struct trace_array, 8783 trace_flags_index); 8784 } 8785 8786 static ssize_t 8787 trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt, 8788 loff_t *ppos) 8789 { 8790 void *tr_index = filp->private_data; 8791 struct trace_array *tr; 8792 unsigned int index; 8793 char *buf; 8794 8795 get_tr_index(tr_index, &tr, &index); 8796 8797 if (tr->trace_flags & (1 << index)) 8798 buf = "1\n"; 8799 else 8800 buf = "0\n"; 8801 8802 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2); 8803 } 8804 8805 static ssize_t 8806 trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt, 8807 loff_t *ppos) 8808 { 8809 void *tr_index = filp->private_data; 8810 struct trace_array *tr; 8811 unsigned int index; 8812 unsigned long val; 8813 int ret; 8814 8815 get_tr_index(tr_index, &tr, &index); 8816 8817 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 8818 if (ret) 8819 return ret; 8820 8821 if (val != 0 && val != 1) 8822 return -EINVAL; 8823 8824 mutex_lock(&event_mutex); 8825 mutex_lock(&trace_types_lock); 8826 ret = set_tracer_flag(tr, 1 << index, val); 8827 mutex_unlock(&trace_types_lock); 8828 mutex_unlock(&event_mutex); 8829 8830 if (ret < 0) 8831 return ret; 8832 8833 *ppos += cnt; 8834 8835 return cnt; 8836 } 8837 8838 static const struct file_operations trace_options_core_fops = { 8839 .open = tracing_open_generic, 8840 .read = trace_options_core_read, 8841 .write = trace_options_core_write, 8842 .llseek = generic_file_llseek, 8843 }; 8844 8845 struct dentry *trace_create_file(const char *name, 8846 umode_t mode, 8847 struct dentry *parent, 8848 void *data, 8849 const struct file_operations *fops) 8850 { 8851 struct dentry *ret; 8852 8853 ret = tracefs_create_file(name, mode, parent, data, fops); 8854 if (!ret) 8855 pr_warn("Could not create tracefs '%s' entry\n", name); 8856 8857 return ret; 8858 } 8859 8860 8861 static struct dentry *trace_options_init_dentry(struct trace_array *tr) 8862 { 8863 struct dentry *d_tracer; 8864 8865 if (tr->options) 8866 return tr->options; 8867 8868 d_tracer = tracing_get_dentry(tr); 8869 if (IS_ERR(d_tracer)) 8870 return NULL; 8871 8872 tr->options = tracefs_create_dir("options", d_tracer); 8873 if (!tr->options) { 8874 pr_warn("Could not create tracefs directory 'options'\n"); 8875 return NULL; 8876 } 8877 8878 return tr->options; 8879 } 8880 8881 static void 8882 create_trace_option_file(struct trace_array *tr, 8883 struct trace_option_dentry *topt, 8884 struct tracer_flags *flags, 8885 struct tracer_opt *opt) 8886 { 8887 struct dentry *t_options; 8888 8889 t_options = trace_options_init_dentry(tr); 8890 if (!t_options) 8891 return; 8892 8893 topt->flags = flags; 8894 topt->opt = opt; 8895 topt->tr = tr; 8896 8897 topt->entry = trace_create_file(opt->name, TRACE_MODE_WRITE, 8898 t_options, topt, &trace_options_fops); 8899 8900 } 8901 8902 static void 8903 create_trace_option_files(struct trace_array *tr, struct tracer *tracer) 8904 { 8905 struct trace_option_dentry *topts; 8906 struct trace_options *tr_topts; 8907 struct tracer_flags *flags; 8908 struct tracer_opt *opts; 8909 int cnt; 8910 int i; 8911 8912 if (!tracer) 8913 return; 8914 8915 flags = tracer->flags; 8916 8917 if (!flags || !flags->opts) 8918 return; 8919 8920 /* 8921 * If this is an instance, only create flags for tracers 8922 * the instance may have. 8923 */ 8924 if (!trace_ok_for_array(tracer, tr)) 8925 return; 8926 8927 for (i = 0; i < tr->nr_topts; i++) { 8928 /* Make sure there's no duplicate flags. */ 8929 if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags)) 8930 return; 8931 } 8932 8933 opts = flags->opts; 8934 8935 for (cnt = 0; opts[cnt].name; cnt++) 8936 ; 8937 8938 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL); 8939 if (!topts) 8940 return; 8941 8942 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1), 8943 GFP_KERNEL); 8944 if (!tr_topts) { 8945 kfree(topts); 8946 return; 8947 } 8948 8949 tr->topts = tr_topts; 8950 tr->topts[tr->nr_topts].tracer = tracer; 8951 tr->topts[tr->nr_topts].topts = topts; 8952 tr->nr_topts++; 8953 8954 for (cnt = 0; opts[cnt].name; cnt++) { 8955 create_trace_option_file(tr, &topts[cnt], flags, 8956 &opts[cnt]); 8957 MEM_FAIL(topts[cnt].entry == NULL, 8958 "Failed to create trace option: %s", 8959 opts[cnt].name); 8960 } 8961 } 8962 8963 static struct dentry * 8964 create_trace_option_core_file(struct trace_array *tr, 8965 const char *option, long index) 8966 { 8967 struct dentry *t_options; 8968 8969 t_options = trace_options_init_dentry(tr); 8970 if (!t_options) 8971 return NULL; 8972 8973 return trace_create_file(option, TRACE_MODE_WRITE, t_options, 8974 (void *)&tr->trace_flags_index[index], 8975 &trace_options_core_fops); 8976 } 8977 8978 static void create_trace_options_dir(struct trace_array *tr) 8979 { 8980 struct dentry *t_options; 8981 bool top_level = tr == &global_trace; 8982 int i; 8983 8984 t_options = trace_options_init_dentry(tr); 8985 if (!t_options) 8986 return; 8987 8988 for (i = 0; trace_options[i]; i++) { 8989 if (top_level || 8990 !((1 << i) & TOP_LEVEL_TRACE_FLAGS)) 8991 create_trace_option_core_file(tr, trace_options[i], i); 8992 } 8993 } 8994 8995 static ssize_t 8996 rb_simple_read(struct file *filp, char __user *ubuf, 8997 size_t cnt, loff_t *ppos) 8998 { 8999 struct trace_array *tr = filp->private_data; 9000 char buf[64]; 9001 int r; 9002 9003 r = tracer_tracing_is_on(tr); 9004 r = sprintf(buf, "%d\n", r); 9005 9006 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 9007 } 9008 9009 static ssize_t 9010 rb_simple_write(struct file *filp, const char __user *ubuf, 9011 size_t cnt, loff_t *ppos) 9012 { 9013 struct trace_array *tr = filp->private_data; 9014 struct trace_buffer *buffer = tr->array_buffer.buffer; 9015 unsigned long val; 9016 int ret; 9017 9018 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 9019 if (ret) 9020 return ret; 9021 9022 if (buffer) { 9023 mutex_lock(&trace_types_lock); 9024 if (!!val == tracer_tracing_is_on(tr)) { 9025 val = 0; /* do nothing */ 9026 } else if (val) { 9027 tracer_tracing_on(tr); 9028 if (tr->current_trace->start) 9029 tr->current_trace->start(tr); 9030 } else { 9031 tracer_tracing_off(tr); 9032 if (tr->current_trace->stop) 9033 tr->current_trace->stop(tr); 9034 /* Wake up any waiters */ 9035 ring_buffer_wake_waiters(buffer, RING_BUFFER_ALL_CPUS); 9036 } 9037 mutex_unlock(&trace_types_lock); 9038 } 9039 9040 (*ppos)++; 9041 9042 return cnt; 9043 } 9044 9045 static const struct file_operations rb_simple_fops = { 9046 .open = tracing_open_generic_tr, 9047 .read = rb_simple_read, 9048 .write = rb_simple_write, 9049 .release = tracing_release_generic_tr, 9050 .llseek = default_llseek, 9051 }; 9052 9053 static ssize_t 9054 buffer_percent_read(struct file *filp, char __user *ubuf, 9055 size_t cnt, loff_t *ppos) 9056 { 9057 struct trace_array *tr = filp->private_data; 9058 char buf[64]; 9059 int r; 9060 9061 r = tr->buffer_percent; 9062 r = sprintf(buf, "%d\n", r); 9063 9064 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 9065 } 9066 9067 static ssize_t 9068 buffer_percent_write(struct file *filp, const char __user *ubuf, 9069 size_t cnt, loff_t *ppos) 9070 { 9071 struct trace_array *tr = filp->private_data; 9072 unsigned long val; 9073 int ret; 9074 9075 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 9076 if (ret) 9077 return ret; 9078 9079 if (val > 100) 9080 return -EINVAL; 9081 9082 tr->buffer_percent = val; 9083 9084 (*ppos)++; 9085 9086 return cnt; 9087 } 9088 9089 static const struct file_operations buffer_percent_fops = { 9090 .open = tracing_open_generic_tr, 9091 .read = buffer_percent_read, 9092 .write = buffer_percent_write, 9093 .release = tracing_release_generic_tr, 9094 .llseek = default_llseek, 9095 }; 9096 9097 static ssize_t 9098 buffer_subbuf_size_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) 9099 { 9100 struct trace_array *tr = filp->private_data; 9101 size_t size; 9102 char buf[64]; 9103 int order; 9104 int r; 9105 9106 order = ring_buffer_subbuf_order_get(tr->array_buffer.buffer); 9107 size = (PAGE_SIZE << order) / 1024; 9108 9109 r = sprintf(buf, "%zd\n", size); 9110 9111 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 9112 } 9113 9114 static ssize_t 9115 buffer_subbuf_size_write(struct file *filp, const char __user *ubuf, 9116 size_t cnt, loff_t *ppos) 9117 { 9118 struct trace_array *tr = filp->private_data; 9119 unsigned long val; 9120 int old_order; 9121 int order; 9122 int pages; 9123 int ret; 9124 9125 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 9126 if (ret) 9127 return ret; 9128 9129 val *= 1024; /* value passed in is in KB */ 9130 9131 pages = DIV_ROUND_UP(val, PAGE_SIZE); 9132 order = fls(pages - 1); 9133 9134 /* limit between 1 and 128 system pages */ 9135 if (order < 0 || order > 7) 9136 return -EINVAL; 9137 9138 /* Do not allow tracing while changing the order of the ring buffer */ 9139 tracing_stop_tr(tr); 9140 9141 old_order = ring_buffer_subbuf_order_get(tr->array_buffer.buffer); 9142 if (old_order == order) 9143 goto out; 9144 9145 ret = ring_buffer_subbuf_order_set(tr->array_buffer.buffer, order); 9146 if (ret) 9147 goto out; 9148 9149 #ifdef CONFIG_TRACER_MAX_TRACE 9150 9151 if (!tr->allocated_snapshot) 9152 goto out_max; 9153 9154 ret = ring_buffer_subbuf_order_set(tr->max_buffer.buffer, order); 9155 if (ret) { 9156 /* Put back the old order */ 9157 cnt = ring_buffer_subbuf_order_set(tr->array_buffer.buffer, old_order); 9158 if (WARN_ON_ONCE(cnt)) { 9159 /* 9160 * AARGH! We are left with different orders! 9161 * The max buffer is our "snapshot" buffer. 9162 * When a tracer needs a snapshot (one of the 9163 * latency tracers), it swaps the max buffer 9164 * with the saved snap shot. We succeeded to 9165 * update the order of the main buffer, but failed to 9166 * update the order of the max buffer. But when we tried 9167 * to reset the main buffer to the original size, we 9168 * failed there too. This is very unlikely to 9169 * happen, but if it does, warn and kill all 9170 * tracing. 9171 */ 9172 tracing_disabled = 1; 9173 } 9174 goto out; 9175 } 9176 out_max: 9177 #endif 9178 (*ppos)++; 9179 out: 9180 if (ret) 9181 cnt = ret; 9182 tracing_start_tr(tr); 9183 return cnt; 9184 } 9185 9186 static const struct file_operations buffer_subbuf_size_fops = { 9187 .open = tracing_open_generic_tr, 9188 .read = buffer_subbuf_size_read, 9189 .write = buffer_subbuf_size_write, 9190 .release = tracing_release_generic_tr, 9191 .llseek = default_llseek, 9192 }; 9193 9194 static struct dentry *trace_instance_dir; 9195 9196 static void 9197 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer); 9198 9199 static int 9200 allocate_trace_buffer(struct trace_array *tr, struct array_buffer *buf, int size) 9201 { 9202 enum ring_buffer_flags rb_flags; 9203 9204 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0; 9205 9206 buf->tr = tr; 9207 9208 if (tr->range_addr_start && tr->range_addr_size) { 9209 buf->buffer = ring_buffer_alloc_range(size, rb_flags, 0, 9210 tr->range_addr_start, 9211 tr->range_addr_size); 9212 9213 ring_buffer_last_boot_delta(buf->buffer, 9214 &tr->text_delta, &tr->data_delta); 9215 /* 9216 * This is basically the same as a mapped buffer, 9217 * with the same restrictions. 9218 */ 9219 tr->mapped++; 9220 } else { 9221 buf->buffer = ring_buffer_alloc(size, rb_flags); 9222 } 9223 if (!buf->buffer) 9224 return -ENOMEM; 9225 9226 buf->data = alloc_percpu(struct trace_array_cpu); 9227 if (!buf->data) { 9228 ring_buffer_free(buf->buffer); 9229 buf->buffer = NULL; 9230 return -ENOMEM; 9231 } 9232 9233 /* Allocate the first page for all buffers */ 9234 set_buffer_entries(&tr->array_buffer, 9235 ring_buffer_size(tr->array_buffer.buffer, 0)); 9236 9237 return 0; 9238 } 9239 9240 static void free_trace_buffer(struct array_buffer *buf) 9241 { 9242 if (buf->buffer) { 9243 ring_buffer_free(buf->buffer); 9244 buf->buffer = NULL; 9245 free_percpu(buf->data); 9246 buf->data = NULL; 9247 } 9248 } 9249 9250 static int allocate_trace_buffers(struct trace_array *tr, int size) 9251 { 9252 int ret; 9253 9254 ret = allocate_trace_buffer(tr, &tr->array_buffer, size); 9255 if (ret) 9256 return ret; 9257 9258 #ifdef CONFIG_TRACER_MAX_TRACE 9259 /* Fix mapped buffer trace arrays do not have snapshot buffers */ 9260 if (tr->range_addr_start) 9261 return 0; 9262 9263 ret = allocate_trace_buffer(tr, &tr->max_buffer, 9264 allocate_snapshot ? size : 1); 9265 if (MEM_FAIL(ret, "Failed to allocate trace buffer\n")) { 9266 free_trace_buffer(&tr->array_buffer); 9267 return -ENOMEM; 9268 } 9269 tr->allocated_snapshot = allocate_snapshot; 9270 9271 allocate_snapshot = false; 9272 #endif 9273 9274 return 0; 9275 } 9276 9277 static void free_trace_buffers(struct trace_array *tr) 9278 { 9279 if (!tr) 9280 return; 9281 9282 free_trace_buffer(&tr->array_buffer); 9283 9284 #ifdef CONFIG_TRACER_MAX_TRACE 9285 free_trace_buffer(&tr->max_buffer); 9286 #endif 9287 } 9288 9289 static void init_trace_flags_index(struct trace_array *tr) 9290 { 9291 int i; 9292 9293 /* Used by the trace options files */ 9294 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) 9295 tr->trace_flags_index[i] = i; 9296 } 9297 9298 static void __update_tracer_options(struct trace_array *tr) 9299 { 9300 struct tracer *t; 9301 9302 for (t = trace_types; t; t = t->next) 9303 add_tracer_options(tr, t); 9304 } 9305 9306 static void update_tracer_options(struct trace_array *tr) 9307 { 9308 mutex_lock(&trace_types_lock); 9309 tracer_options_updated = true; 9310 __update_tracer_options(tr); 9311 mutex_unlock(&trace_types_lock); 9312 } 9313 9314 /* Must have trace_types_lock held */ 9315 struct trace_array *trace_array_find(const char *instance) 9316 { 9317 struct trace_array *tr, *found = NULL; 9318 9319 list_for_each_entry(tr, &ftrace_trace_arrays, list) { 9320 if (tr->name && strcmp(tr->name, instance) == 0) { 9321 found = tr; 9322 break; 9323 } 9324 } 9325 9326 return found; 9327 } 9328 9329 struct trace_array *trace_array_find_get(const char *instance) 9330 { 9331 struct trace_array *tr; 9332 9333 mutex_lock(&trace_types_lock); 9334 tr = trace_array_find(instance); 9335 if (tr) 9336 tr->ref++; 9337 mutex_unlock(&trace_types_lock); 9338 9339 return tr; 9340 } 9341 9342 static int trace_array_create_dir(struct trace_array *tr) 9343 { 9344 int ret; 9345 9346 tr->dir = tracefs_create_dir(tr->name, trace_instance_dir); 9347 if (!tr->dir) 9348 return -EINVAL; 9349 9350 ret = event_trace_add_tracer(tr->dir, tr); 9351 if (ret) { 9352 tracefs_remove(tr->dir); 9353 return ret; 9354 } 9355 9356 init_tracer_tracefs(tr, tr->dir); 9357 __update_tracer_options(tr); 9358 9359 return ret; 9360 } 9361 9362 static struct trace_array * 9363 trace_array_create_systems(const char *name, const char *systems, 9364 unsigned long range_addr_start, 9365 unsigned long range_addr_size) 9366 { 9367 struct trace_array *tr; 9368 int ret; 9369 9370 ret = -ENOMEM; 9371 tr = kzalloc(sizeof(*tr), GFP_KERNEL); 9372 if (!tr) 9373 return ERR_PTR(ret); 9374 9375 tr->name = kstrdup(name, GFP_KERNEL); 9376 if (!tr->name) 9377 goto out_free_tr; 9378 9379 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL)) 9380 goto out_free_tr; 9381 9382 if (!zalloc_cpumask_var(&tr->pipe_cpumask, GFP_KERNEL)) 9383 goto out_free_tr; 9384 9385 if (systems) { 9386 tr->system_names = kstrdup_const(systems, GFP_KERNEL); 9387 if (!tr->system_names) 9388 goto out_free_tr; 9389 } 9390 9391 /* Only for boot up memory mapped ring buffers */ 9392 tr->range_addr_start = range_addr_start; 9393 tr->range_addr_size = range_addr_size; 9394 9395 tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS; 9396 9397 cpumask_copy(tr->tracing_cpumask, cpu_all_mask); 9398 9399 raw_spin_lock_init(&tr->start_lock); 9400 9401 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; 9402 #ifdef CONFIG_TRACER_MAX_TRACE 9403 spin_lock_init(&tr->snapshot_trigger_lock); 9404 #endif 9405 tr->current_trace = &nop_trace; 9406 9407 INIT_LIST_HEAD(&tr->systems); 9408 INIT_LIST_HEAD(&tr->events); 9409 INIT_LIST_HEAD(&tr->hist_vars); 9410 INIT_LIST_HEAD(&tr->err_log); 9411 9412 #ifdef CONFIG_MODULES 9413 INIT_LIST_HEAD(&tr->mod_events); 9414 #endif 9415 9416 if (allocate_trace_buffers(tr, trace_buf_size) < 0) 9417 goto out_free_tr; 9418 9419 /* The ring buffer is defaultly expanded */ 9420 trace_set_ring_buffer_expanded(tr); 9421 9422 if (ftrace_allocate_ftrace_ops(tr) < 0) 9423 goto out_free_tr; 9424 9425 ftrace_init_trace_array(tr); 9426 9427 init_trace_flags_index(tr); 9428 9429 if (trace_instance_dir) { 9430 ret = trace_array_create_dir(tr); 9431 if (ret) 9432 goto out_free_tr; 9433 } else 9434 __trace_early_add_events(tr); 9435 9436 list_add(&tr->list, &ftrace_trace_arrays); 9437 9438 tr->ref++; 9439 9440 return tr; 9441 9442 out_free_tr: 9443 ftrace_free_ftrace_ops(tr); 9444 free_trace_buffers(tr); 9445 free_cpumask_var(tr->pipe_cpumask); 9446 free_cpumask_var(tr->tracing_cpumask); 9447 kfree_const(tr->system_names); 9448 kfree(tr->name); 9449 kfree(tr); 9450 9451 return ERR_PTR(ret); 9452 } 9453 9454 static struct trace_array *trace_array_create(const char *name) 9455 { 9456 return trace_array_create_systems(name, NULL, 0, 0); 9457 } 9458 9459 static int instance_mkdir(const char *name) 9460 { 9461 struct trace_array *tr; 9462 int ret; 9463 9464 guard(mutex)(&event_mutex); 9465 guard(mutex)(&trace_types_lock); 9466 9467 ret = -EEXIST; 9468 if (trace_array_find(name)) 9469 return -EEXIST; 9470 9471 tr = trace_array_create(name); 9472 9473 ret = PTR_ERR_OR_ZERO(tr); 9474 9475 return ret; 9476 } 9477 9478 static u64 map_pages(u64 start, u64 size) 9479 { 9480 struct page **pages; 9481 phys_addr_t page_start; 9482 unsigned int page_count; 9483 unsigned int i; 9484 void *vaddr; 9485 9486 page_count = DIV_ROUND_UP(size, PAGE_SIZE); 9487 9488 page_start = start; 9489 pages = kmalloc_array(page_count, sizeof(struct page *), GFP_KERNEL); 9490 if (!pages) 9491 return 0; 9492 9493 for (i = 0; i < page_count; i++) { 9494 phys_addr_t addr = page_start + i * PAGE_SIZE; 9495 pages[i] = pfn_to_page(addr >> PAGE_SHIFT); 9496 } 9497 vaddr = vmap(pages, page_count, VM_MAP, PAGE_KERNEL); 9498 kfree(pages); 9499 9500 return (u64)(unsigned long)vaddr; 9501 } 9502 9503 /** 9504 * trace_array_get_by_name - Create/Lookup a trace array, given its name. 9505 * @name: The name of the trace array to be looked up/created. 9506 * @systems: A list of systems to create event directories for (NULL for all) 9507 * 9508 * Returns pointer to trace array with given name. 9509 * NULL, if it cannot be created. 9510 * 9511 * NOTE: This function increments the reference counter associated with the 9512 * trace array returned. This makes sure it cannot be freed while in use. 9513 * Use trace_array_put() once the trace array is no longer needed. 9514 * If the trace_array is to be freed, trace_array_destroy() needs to 9515 * be called after the trace_array_put(), or simply let user space delete 9516 * it from the tracefs instances directory. But until the 9517 * trace_array_put() is called, user space can not delete it. 9518 * 9519 */ 9520 struct trace_array *trace_array_get_by_name(const char *name, const char *systems) 9521 { 9522 struct trace_array *tr; 9523 9524 guard(mutex)(&event_mutex); 9525 guard(mutex)(&trace_types_lock); 9526 9527 list_for_each_entry(tr, &ftrace_trace_arrays, list) { 9528 if (tr->name && strcmp(tr->name, name) == 0) { 9529 tr->ref++; 9530 return tr; 9531 } 9532 } 9533 9534 tr = trace_array_create_systems(name, systems, 0, 0); 9535 9536 if (IS_ERR(tr)) 9537 tr = NULL; 9538 else 9539 tr->ref++; 9540 9541 return tr; 9542 } 9543 EXPORT_SYMBOL_GPL(trace_array_get_by_name); 9544 9545 static int __remove_instance(struct trace_array *tr) 9546 { 9547 int i; 9548 9549 /* Reference counter for a newly created trace array = 1. */ 9550 if (tr->ref > 1 || (tr->current_trace && tr->trace_ref)) 9551 return -EBUSY; 9552 9553 list_del(&tr->list); 9554 9555 /* Disable all the flags that were enabled coming in */ 9556 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) { 9557 if ((1 << i) & ZEROED_TRACE_FLAGS) 9558 set_tracer_flag(tr, 1 << i, 0); 9559 } 9560 9561 if (printk_trace == tr) 9562 update_printk_trace(&global_trace); 9563 9564 tracing_set_nop(tr); 9565 clear_ftrace_function_probes(tr); 9566 event_trace_del_tracer(tr); 9567 ftrace_clear_pids(tr); 9568 ftrace_destroy_function_files(tr); 9569 tracefs_remove(tr->dir); 9570 free_percpu(tr->last_func_repeats); 9571 free_trace_buffers(tr); 9572 clear_tracing_err_log(tr); 9573 9574 for (i = 0; i < tr->nr_topts; i++) { 9575 kfree(tr->topts[i].topts); 9576 } 9577 kfree(tr->topts); 9578 9579 free_cpumask_var(tr->pipe_cpumask); 9580 free_cpumask_var(tr->tracing_cpumask); 9581 kfree_const(tr->system_names); 9582 kfree(tr->name); 9583 kfree(tr); 9584 9585 return 0; 9586 } 9587 9588 int trace_array_destroy(struct trace_array *this_tr) 9589 { 9590 struct trace_array *tr; 9591 9592 if (!this_tr) 9593 return -EINVAL; 9594 9595 guard(mutex)(&event_mutex); 9596 guard(mutex)(&trace_types_lock); 9597 9598 9599 /* Making sure trace array exists before destroying it. */ 9600 list_for_each_entry(tr, &ftrace_trace_arrays, list) { 9601 if (tr == this_tr) 9602 return __remove_instance(tr); 9603 } 9604 9605 return -ENODEV; 9606 } 9607 EXPORT_SYMBOL_GPL(trace_array_destroy); 9608 9609 static int instance_rmdir(const char *name) 9610 { 9611 struct trace_array *tr; 9612 9613 guard(mutex)(&event_mutex); 9614 guard(mutex)(&trace_types_lock); 9615 9616 tr = trace_array_find(name); 9617 if (!tr) 9618 return -ENODEV; 9619 9620 return __remove_instance(tr); 9621 } 9622 9623 static __init void create_trace_instances(struct dentry *d_tracer) 9624 { 9625 struct trace_array *tr; 9626 9627 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer, 9628 instance_mkdir, 9629 instance_rmdir); 9630 if (MEM_FAIL(!trace_instance_dir, "Failed to create instances directory\n")) 9631 return; 9632 9633 guard(mutex)(&event_mutex); 9634 guard(mutex)(&trace_types_lock); 9635 9636 list_for_each_entry(tr, &ftrace_trace_arrays, list) { 9637 if (!tr->name) 9638 continue; 9639 if (MEM_FAIL(trace_array_create_dir(tr) < 0, 9640 "Failed to create instance directory\n")) 9641 return; 9642 } 9643 } 9644 9645 static void 9646 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer) 9647 { 9648 int cpu; 9649 9650 trace_create_file("available_tracers", TRACE_MODE_READ, d_tracer, 9651 tr, &show_traces_fops); 9652 9653 trace_create_file("current_tracer", TRACE_MODE_WRITE, d_tracer, 9654 tr, &set_tracer_fops); 9655 9656 trace_create_file("tracing_cpumask", TRACE_MODE_WRITE, d_tracer, 9657 tr, &tracing_cpumask_fops); 9658 9659 trace_create_file("trace_options", TRACE_MODE_WRITE, d_tracer, 9660 tr, &tracing_iter_fops); 9661 9662 trace_create_file("trace", TRACE_MODE_WRITE, d_tracer, 9663 tr, &tracing_fops); 9664 9665 trace_create_file("trace_pipe", TRACE_MODE_READ, d_tracer, 9666 tr, &tracing_pipe_fops); 9667 9668 trace_create_file("buffer_size_kb", TRACE_MODE_WRITE, d_tracer, 9669 tr, &tracing_entries_fops); 9670 9671 trace_create_file("buffer_total_size_kb", TRACE_MODE_READ, d_tracer, 9672 tr, &tracing_total_entries_fops); 9673 9674 trace_create_file("free_buffer", 0200, d_tracer, 9675 tr, &tracing_free_buffer_fops); 9676 9677 trace_create_file("trace_marker", 0220, d_tracer, 9678 tr, &tracing_mark_fops); 9679 9680 tr->trace_marker_file = __find_event_file(tr, "ftrace", "print"); 9681 9682 trace_create_file("trace_marker_raw", 0220, d_tracer, 9683 tr, &tracing_mark_raw_fops); 9684 9685 trace_create_file("trace_clock", TRACE_MODE_WRITE, d_tracer, tr, 9686 &trace_clock_fops); 9687 9688 trace_create_file("tracing_on", TRACE_MODE_WRITE, d_tracer, 9689 tr, &rb_simple_fops); 9690 9691 trace_create_file("timestamp_mode", TRACE_MODE_READ, d_tracer, tr, 9692 &trace_time_stamp_mode_fops); 9693 9694 tr->buffer_percent = 50; 9695 9696 trace_create_file("buffer_percent", TRACE_MODE_WRITE, d_tracer, 9697 tr, &buffer_percent_fops); 9698 9699 trace_create_file("buffer_subbuf_size_kb", TRACE_MODE_WRITE, d_tracer, 9700 tr, &buffer_subbuf_size_fops); 9701 9702 create_trace_options_dir(tr); 9703 9704 #ifdef CONFIG_TRACER_MAX_TRACE 9705 trace_create_maxlat_file(tr, d_tracer); 9706 #endif 9707 9708 if (ftrace_create_function_files(tr, d_tracer)) 9709 MEM_FAIL(1, "Could not allocate function filter files"); 9710 9711 if (tr->range_addr_start) { 9712 trace_create_file("last_boot_info", TRACE_MODE_READ, d_tracer, 9713 tr, &last_boot_fops); 9714 #ifdef CONFIG_TRACER_SNAPSHOT 9715 } else { 9716 trace_create_file("snapshot", TRACE_MODE_WRITE, d_tracer, 9717 tr, &snapshot_fops); 9718 #endif 9719 } 9720 9721 trace_create_file("error_log", TRACE_MODE_WRITE, d_tracer, 9722 tr, &tracing_err_log_fops); 9723 9724 for_each_tracing_cpu(cpu) 9725 tracing_init_tracefs_percpu(tr, cpu); 9726 9727 ftrace_init_tracefs(tr, d_tracer); 9728 } 9729 9730 static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore) 9731 { 9732 struct vfsmount *mnt; 9733 struct file_system_type *type; 9734 9735 /* 9736 * To maintain backward compatibility for tools that mount 9737 * debugfs to get to the tracing facility, tracefs is automatically 9738 * mounted to the debugfs/tracing directory. 9739 */ 9740 type = get_fs_type("tracefs"); 9741 if (!type) 9742 return NULL; 9743 mnt = vfs_submount(mntpt, type, "tracefs", NULL); 9744 put_filesystem(type); 9745 if (IS_ERR(mnt)) 9746 return NULL; 9747 mntget(mnt); 9748 9749 return mnt; 9750 } 9751 9752 /** 9753 * tracing_init_dentry - initialize top level trace array 9754 * 9755 * This is called when creating files or directories in the tracing 9756 * directory. It is called via fs_initcall() by any of the boot up code 9757 * and expects to return the dentry of the top level tracing directory. 9758 */ 9759 int tracing_init_dentry(void) 9760 { 9761 struct trace_array *tr = &global_trace; 9762 9763 if (security_locked_down(LOCKDOWN_TRACEFS)) { 9764 pr_warn("Tracing disabled due to lockdown\n"); 9765 return -EPERM; 9766 } 9767 9768 /* The top level trace array uses NULL as parent */ 9769 if (tr->dir) 9770 return 0; 9771 9772 if (WARN_ON(!tracefs_initialized())) 9773 return -ENODEV; 9774 9775 /* 9776 * As there may still be users that expect the tracing 9777 * files to exist in debugfs/tracing, we must automount 9778 * the tracefs file system there, so older tools still 9779 * work with the newer kernel. 9780 */ 9781 tr->dir = debugfs_create_automount("tracing", NULL, 9782 trace_automount, NULL); 9783 9784 return 0; 9785 } 9786 9787 extern struct trace_eval_map *__start_ftrace_eval_maps[]; 9788 extern struct trace_eval_map *__stop_ftrace_eval_maps[]; 9789 9790 static struct workqueue_struct *eval_map_wq __initdata; 9791 static struct work_struct eval_map_work __initdata; 9792 static struct work_struct tracerfs_init_work __initdata; 9793 9794 static void __init eval_map_work_func(struct work_struct *work) 9795 { 9796 int len; 9797 9798 len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps; 9799 trace_insert_eval_map(NULL, __start_ftrace_eval_maps, len); 9800 } 9801 9802 static int __init trace_eval_init(void) 9803 { 9804 INIT_WORK(&eval_map_work, eval_map_work_func); 9805 9806 eval_map_wq = alloc_workqueue("eval_map_wq", WQ_UNBOUND, 0); 9807 if (!eval_map_wq) { 9808 pr_err("Unable to allocate eval_map_wq\n"); 9809 /* Do work here */ 9810 eval_map_work_func(&eval_map_work); 9811 return -ENOMEM; 9812 } 9813 9814 queue_work(eval_map_wq, &eval_map_work); 9815 return 0; 9816 } 9817 9818 subsys_initcall(trace_eval_init); 9819 9820 static int __init trace_eval_sync(void) 9821 { 9822 /* Make sure the eval map updates are finished */ 9823 if (eval_map_wq) 9824 destroy_workqueue(eval_map_wq); 9825 return 0; 9826 } 9827 9828 late_initcall_sync(trace_eval_sync); 9829 9830 9831 #ifdef CONFIG_MODULES 9832 9833 bool module_exists(const char *module) 9834 { 9835 /* All modules have the symbol __this_module */ 9836 static const char this_mod[] = "__this_module"; 9837 char modname[MAX_PARAM_PREFIX_LEN + sizeof(this_mod) + 2]; 9838 unsigned long val; 9839 int n; 9840 9841 n = snprintf(modname, sizeof(modname), "%s:%s", module, this_mod); 9842 9843 if (n > sizeof(modname) - 1) 9844 return false; 9845 9846 val = module_kallsyms_lookup_name(modname); 9847 return val != 0; 9848 } 9849 9850 static void trace_module_add_evals(struct module *mod) 9851 { 9852 if (!mod->num_trace_evals) 9853 return; 9854 9855 /* 9856 * Modules with bad taint do not have events created, do 9857 * not bother with enums either. 9858 */ 9859 if (trace_module_has_bad_taint(mod)) 9860 return; 9861 9862 trace_insert_eval_map(mod, mod->trace_evals, mod->num_trace_evals); 9863 } 9864 9865 #ifdef CONFIG_TRACE_EVAL_MAP_FILE 9866 static void trace_module_remove_evals(struct module *mod) 9867 { 9868 union trace_eval_map_item *map; 9869 union trace_eval_map_item **last = &trace_eval_maps; 9870 9871 if (!mod->num_trace_evals) 9872 return; 9873 9874 guard(mutex)(&trace_eval_mutex); 9875 9876 map = trace_eval_maps; 9877 9878 while (map) { 9879 if (map->head.mod == mod) 9880 break; 9881 map = trace_eval_jmp_to_tail(map); 9882 last = &map->tail.next; 9883 map = map->tail.next; 9884 } 9885 if (!map) 9886 return; 9887 9888 *last = trace_eval_jmp_to_tail(map)->tail.next; 9889 kfree(map); 9890 } 9891 #else 9892 static inline void trace_module_remove_evals(struct module *mod) { } 9893 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */ 9894 9895 static int trace_module_notify(struct notifier_block *self, 9896 unsigned long val, void *data) 9897 { 9898 struct module *mod = data; 9899 9900 switch (val) { 9901 case MODULE_STATE_COMING: 9902 trace_module_add_evals(mod); 9903 break; 9904 case MODULE_STATE_GOING: 9905 trace_module_remove_evals(mod); 9906 break; 9907 } 9908 9909 return NOTIFY_OK; 9910 } 9911 9912 static struct notifier_block trace_module_nb = { 9913 .notifier_call = trace_module_notify, 9914 .priority = 0, 9915 }; 9916 #endif /* CONFIG_MODULES */ 9917 9918 static __init void tracer_init_tracefs_work_func(struct work_struct *work) 9919 { 9920 9921 event_trace_init(); 9922 9923 init_tracer_tracefs(&global_trace, NULL); 9924 ftrace_init_tracefs_toplevel(&global_trace, NULL); 9925 9926 trace_create_file("tracing_thresh", TRACE_MODE_WRITE, NULL, 9927 &global_trace, &tracing_thresh_fops); 9928 9929 trace_create_file("README", TRACE_MODE_READ, NULL, 9930 NULL, &tracing_readme_fops); 9931 9932 trace_create_file("saved_cmdlines", TRACE_MODE_READ, NULL, 9933 NULL, &tracing_saved_cmdlines_fops); 9934 9935 trace_create_file("saved_cmdlines_size", TRACE_MODE_WRITE, NULL, 9936 NULL, &tracing_saved_cmdlines_size_fops); 9937 9938 trace_create_file("saved_tgids", TRACE_MODE_READ, NULL, 9939 NULL, &tracing_saved_tgids_fops); 9940 9941 trace_create_eval_file(NULL); 9942 9943 #ifdef CONFIG_MODULES 9944 register_module_notifier(&trace_module_nb); 9945 #endif 9946 9947 #ifdef CONFIG_DYNAMIC_FTRACE 9948 trace_create_file("dyn_ftrace_total_info", TRACE_MODE_READ, NULL, 9949 NULL, &tracing_dyn_info_fops); 9950 #endif 9951 9952 create_trace_instances(NULL); 9953 9954 update_tracer_options(&global_trace); 9955 } 9956 9957 static __init int tracer_init_tracefs(void) 9958 { 9959 int ret; 9960 9961 trace_access_lock_init(); 9962 9963 ret = tracing_init_dentry(); 9964 if (ret) 9965 return 0; 9966 9967 if (eval_map_wq) { 9968 INIT_WORK(&tracerfs_init_work, tracer_init_tracefs_work_func); 9969 queue_work(eval_map_wq, &tracerfs_init_work); 9970 } else { 9971 tracer_init_tracefs_work_func(NULL); 9972 } 9973 9974 rv_init_interface(); 9975 9976 return 0; 9977 } 9978 9979 fs_initcall(tracer_init_tracefs); 9980 9981 static int trace_die_panic_handler(struct notifier_block *self, 9982 unsigned long ev, void *unused); 9983 9984 static struct notifier_block trace_panic_notifier = { 9985 .notifier_call = trace_die_panic_handler, 9986 .priority = INT_MAX - 1, 9987 }; 9988 9989 static struct notifier_block trace_die_notifier = { 9990 .notifier_call = trace_die_panic_handler, 9991 .priority = INT_MAX - 1, 9992 }; 9993 9994 /* 9995 * The idea is to execute the following die/panic callback early, in order 9996 * to avoid showing irrelevant information in the trace (like other panic 9997 * notifier functions); we are the 2nd to run, after hung_task/rcu_stall 9998 * warnings get disabled (to prevent potential log flooding). 9999 */ 10000 static int trace_die_panic_handler(struct notifier_block *self, 10001 unsigned long ev, void *unused) 10002 { 10003 if (!ftrace_dump_on_oops_enabled()) 10004 return NOTIFY_DONE; 10005 10006 /* The die notifier requires DIE_OOPS to trigger */ 10007 if (self == &trace_die_notifier && ev != DIE_OOPS) 10008 return NOTIFY_DONE; 10009 10010 ftrace_dump(DUMP_PARAM); 10011 10012 return NOTIFY_DONE; 10013 } 10014 10015 /* 10016 * printk is set to max of 1024, we really don't need it that big. 10017 * Nothing should be printing 1000 characters anyway. 10018 */ 10019 #define TRACE_MAX_PRINT 1000 10020 10021 /* 10022 * Define here KERN_TRACE so that we have one place to modify 10023 * it if we decide to change what log level the ftrace dump 10024 * should be at. 10025 */ 10026 #define KERN_TRACE KERN_EMERG 10027 10028 void 10029 trace_printk_seq(struct trace_seq *s) 10030 { 10031 /* Probably should print a warning here. */ 10032 if (s->seq.len >= TRACE_MAX_PRINT) 10033 s->seq.len = TRACE_MAX_PRINT; 10034 10035 /* 10036 * More paranoid code. Although the buffer size is set to 10037 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just 10038 * an extra layer of protection. 10039 */ 10040 if (WARN_ON_ONCE(s->seq.len >= s->seq.size)) 10041 s->seq.len = s->seq.size - 1; 10042 10043 /* should be zero ended, but we are paranoid. */ 10044 s->buffer[s->seq.len] = 0; 10045 10046 printk(KERN_TRACE "%s", s->buffer); 10047 10048 trace_seq_init(s); 10049 } 10050 10051 static void trace_init_iter(struct trace_iterator *iter, struct trace_array *tr) 10052 { 10053 iter->tr = tr; 10054 iter->trace = iter->tr->current_trace; 10055 iter->cpu_file = RING_BUFFER_ALL_CPUS; 10056 iter->array_buffer = &tr->array_buffer; 10057 10058 if (iter->trace && iter->trace->open) 10059 iter->trace->open(iter); 10060 10061 /* Annotate start of buffers if we had overruns */ 10062 if (ring_buffer_overruns(iter->array_buffer->buffer)) 10063 iter->iter_flags |= TRACE_FILE_ANNOTATE; 10064 10065 /* Output in nanoseconds only if we are using a clock in nanoseconds. */ 10066 if (trace_clocks[iter->tr->clock_id].in_ns) 10067 iter->iter_flags |= TRACE_FILE_TIME_IN_NS; 10068 10069 /* Can not use kmalloc for iter.temp and iter.fmt */ 10070 iter->temp = static_temp_buf; 10071 iter->temp_size = STATIC_TEMP_BUF_SIZE; 10072 iter->fmt = static_fmt_buf; 10073 iter->fmt_size = STATIC_FMT_BUF_SIZE; 10074 } 10075 10076 void trace_init_global_iter(struct trace_iterator *iter) 10077 { 10078 trace_init_iter(iter, &global_trace); 10079 } 10080 10081 static void ftrace_dump_one(struct trace_array *tr, enum ftrace_dump_mode dump_mode) 10082 { 10083 /* use static because iter can be a bit big for the stack */ 10084 static struct trace_iterator iter; 10085 unsigned int old_userobj; 10086 unsigned long flags; 10087 int cnt = 0, cpu; 10088 10089 /* 10090 * Always turn off tracing when we dump. 10091 * We don't need to show trace output of what happens 10092 * between multiple crashes. 10093 * 10094 * If the user does a sysrq-z, then they can re-enable 10095 * tracing with echo 1 > tracing_on. 10096 */ 10097 tracer_tracing_off(tr); 10098 10099 local_irq_save(flags); 10100 10101 /* Simulate the iterator */ 10102 trace_init_iter(&iter, tr); 10103 10104 for_each_tracing_cpu(cpu) { 10105 atomic_inc(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled); 10106 } 10107 10108 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ; 10109 10110 /* don't look at user memory in panic mode */ 10111 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ; 10112 10113 if (dump_mode == DUMP_ORIG) 10114 iter.cpu_file = raw_smp_processor_id(); 10115 else 10116 iter.cpu_file = RING_BUFFER_ALL_CPUS; 10117 10118 if (tr == &global_trace) 10119 printk(KERN_TRACE "Dumping ftrace buffer:\n"); 10120 else 10121 printk(KERN_TRACE "Dumping ftrace instance %s buffer:\n", tr->name); 10122 10123 /* Did function tracer already get disabled? */ 10124 if (ftrace_is_dead()) { 10125 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n"); 10126 printk("# MAY BE MISSING FUNCTION EVENTS\n"); 10127 } 10128 10129 /* 10130 * We need to stop all tracing on all CPUS to read 10131 * the next buffer. This is a bit expensive, but is 10132 * not done often. We fill all what we can read, 10133 * and then release the locks again. 10134 */ 10135 10136 while (!trace_empty(&iter)) { 10137 10138 if (!cnt) 10139 printk(KERN_TRACE "---------------------------------\n"); 10140 10141 cnt++; 10142 10143 trace_iterator_reset(&iter); 10144 iter.iter_flags |= TRACE_FILE_LAT_FMT; 10145 10146 if (trace_find_next_entry_inc(&iter) != NULL) { 10147 int ret; 10148 10149 ret = print_trace_line(&iter); 10150 if (ret != TRACE_TYPE_NO_CONSUME) 10151 trace_consume(&iter); 10152 } 10153 touch_nmi_watchdog(); 10154 10155 trace_printk_seq(&iter.seq); 10156 } 10157 10158 if (!cnt) 10159 printk(KERN_TRACE " (ftrace buffer empty)\n"); 10160 else 10161 printk(KERN_TRACE "---------------------------------\n"); 10162 10163 tr->trace_flags |= old_userobj; 10164 10165 for_each_tracing_cpu(cpu) { 10166 atomic_dec(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled); 10167 } 10168 local_irq_restore(flags); 10169 } 10170 10171 static void ftrace_dump_by_param(void) 10172 { 10173 bool first_param = true; 10174 char dump_param[MAX_TRACER_SIZE]; 10175 char *buf, *token, *inst_name; 10176 struct trace_array *tr; 10177 10178 strscpy(dump_param, ftrace_dump_on_oops, MAX_TRACER_SIZE); 10179 buf = dump_param; 10180 10181 while ((token = strsep(&buf, ",")) != NULL) { 10182 if (first_param) { 10183 first_param = false; 10184 if (!strcmp("0", token)) 10185 continue; 10186 else if (!strcmp("1", token)) { 10187 ftrace_dump_one(&global_trace, DUMP_ALL); 10188 continue; 10189 } 10190 else if (!strcmp("2", token) || 10191 !strcmp("orig_cpu", token)) { 10192 ftrace_dump_one(&global_trace, DUMP_ORIG); 10193 continue; 10194 } 10195 } 10196 10197 inst_name = strsep(&token, "="); 10198 tr = trace_array_find(inst_name); 10199 if (!tr) { 10200 printk(KERN_TRACE "Instance %s not found\n", inst_name); 10201 continue; 10202 } 10203 10204 if (token && (!strcmp("2", token) || 10205 !strcmp("orig_cpu", token))) 10206 ftrace_dump_one(tr, DUMP_ORIG); 10207 else 10208 ftrace_dump_one(tr, DUMP_ALL); 10209 } 10210 } 10211 10212 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) 10213 { 10214 static atomic_t dump_running; 10215 10216 /* Only allow one dump user at a time. */ 10217 if (atomic_inc_return(&dump_running) != 1) { 10218 atomic_dec(&dump_running); 10219 return; 10220 } 10221 10222 switch (oops_dump_mode) { 10223 case DUMP_ALL: 10224 ftrace_dump_one(&global_trace, DUMP_ALL); 10225 break; 10226 case DUMP_ORIG: 10227 ftrace_dump_one(&global_trace, DUMP_ORIG); 10228 break; 10229 case DUMP_PARAM: 10230 ftrace_dump_by_param(); 10231 break; 10232 case DUMP_NONE: 10233 break; 10234 default: 10235 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n"); 10236 ftrace_dump_one(&global_trace, DUMP_ALL); 10237 } 10238 10239 atomic_dec(&dump_running); 10240 } 10241 EXPORT_SYMBOL_GPL(ftrace_dump); 10242 10243 #define WRITE_BUFSIZE 4096 10244 10245 ssize_t trace_parse_run_command(struct file *file, const char __user *buffer, 10246 size_t count, loff_t *ppos, 10247 int (*createfn)(const char *)) 10248 { 10249 char *kbuf, *buf, *tmp; 10250 int ret = 0; 10251 size_t done = 0; 10252 size_t size; 10253 10254 kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL); 10255 if (!kbuf) 10256 return -ENOMEM; 10257 10258 while (done < count) { 10259 size = count - done; 10260 10261 if (size >= WRITE_BUFSIZE) 10262 size = WRITE_BUFSIZE - 1; 10263 10264 if (copy_from_user(kbuf, buffer + done, size)) { 10265 ret = -EFAULT; 10266 goto out; 10267 } 10268 kbuf[size] = '\0'; 10269 buf = kbuf; 10270 do { 10271 tmp = strchr(buf, '\n'); 10272 if (tmp) { 10273 *tmp = '\0'; 10274 size = tmp - buf + 1; 10275 } else { 10276 size = strlen(buf); 10277 if (done + size < count) { 10278 if (buf != kbuf) 10279 break; 10280 /* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */ 10281 pr_warn("Line length is too long: Should be less than %d\n", 10282 WRITE_BUFSIZE - 2); 10283 ret = -EINVAL; 10284 goto out; 10285 } 10286 } 10287 done += size; 10288 10289 /* Remove comments */ 10290 tmp = strchr(buf, '#'); 10291 10292 if (tmp) 10293 *tmp = '\0'; 10294 10295 ret = createfn(buf); 10296 if (ret) 10297 goto out; 10298 buf += size; 10299 10300 } while (done < count); 10301 } 10302 ret = done; 10303 10304 out: 10305 kfree(kbuf); 10306 10307 return ret; 10308 } 10309 10310 #ifdef CONFIG_TRACER_MAX_TRACE 10311 __init static bool tr_needs_alloc_snapshot(const char *name) 10312 { 10313 char *test; 10314 int len = strlen(name); 10315 bool ret; 10316 10317 if (!boot_snapshot_index) 10318 return false; 10319 10320 if (strncmp(name, boot_snapshot_info, len) == 0 && 10321 boot_snapshot_info[len] == '\t') 10322 return true; 10323 10324 test = kmalloc(strlen(name) + 3, GFP_KERNEL); 10325 if (!test) 10326 return false; 10327 10328 sprintf(test, "\t%s\t", name); 10329 ret = strstr(boot_snapshot_info, test) == NULL; 10330 kfree(test); 10331 return ret; 10332 } 10333 10334 __init static void do_allocate_snapshot(const char *name) 10335 { 10336 if (!tr_needs_alloc_snapshot(name)) 10337 return; 10338 10339 /* 10340 * When allocate_snapshot is set, the next call to 10341 * allocate_trace_buffers() (called by trace_array_get_by_name()) 10342 * will allocate the snapshot buffer. That will alse clear 10343 * this flag. 10344 */ 10345 allocate_snapshot = true; 10346 } 10347 #else 10348 static inline void do_allocate_snapshot(const char *name) { } 10349 #endif 10350 10351 __init static void enable_instances(void) 10352 { 10353 struct trace_array *tr; 10354 char *curr_str; 10355 char *name; 10356 char *str; 10357 char *tok; 10358 10359 /* A tab is always appended */ 10360 boot_instance_info[boot_instance_index - 1] = '\0'; 10361 str = boot_instance_info; 10362 10363 while ((curr_str = strsep(&str, "\t"))) { 10364 phys_addr_t start = 0; 10365 phys_addr_t size = 0; 10366 unsigned long addr = 0; 10367 bool traceprintk = false; 10368 bool traceoff = false; 10369 char *flag_delim; 10370 char *addr_delim; 10371 10372 tok = strsep(&curr_str, ","); 10373 10374 flag_delim = strchr(tok, '^'); 10375 addr_delim = strchr(tok, '@'); 10376 10377 if (addr_delim) 10378 *addr_delim++ = '\0'; 10379 10380 if (flag_delim) 10381 *flag_delim++ = '\0'; 10382 10383 name = tok; 10384 10385 if (flag_delim) { 10386 char *flag; 10387 10388 while ((flag = strsep(&flag_delim, "^"))) { 10389 if (strcmp(flag, "traceoff") == 0) { 10390 traceoff = true; 10391 } else if ((strcmp(flag, "printk") == 0) || 10392 (strcmp(flag, "traceprintk") == 0) || 10393 (strcmp(flag, "trace_printk") == 0)) { 10394 traceprintk = true; 10395 } else { 10396 pr_info("Tracing: Invalid instance flag '%s' for %s\n", 10397 flag, name); 10398 } 10399 } 10400 } 10401 10402 tok = addr_delim; 10403 if (tok && isdigit(*tok)) { 10404 start = memparse(tok, &tok); 10405 if (!start) { 10406 pr_warn("Tracing: Invalid boot instance address for %s\n", 10407 name); 10408 continue; 10409 } 10410 if (*tok != ':') { 10411 pr_warn("Tracing: No size specified for instance %s\n", name); 10412 continue; 10413 } 10414 tok++; 10415 size = memparse(tok, &tok); 10416 if (!size) { 10417 pr_warn("Tracing: Invalid boot instance size for %s\n", 10418 name); 10419 continue; 10420 } 10421 } else if (tok) { 10422 if (!reserve_mem_find_by_name(tok, &start, &size)) { 10423 start = 0; 10424 pr_warn("Failed to map boot instance %s to %s\n", name, tok); 10425 continue; 10426 } 10427 } 10428 10429 if (start) { 10430 addr = map_pages(start, size); 10431 if (addr) { 10432 pr_info("Tracing: mapped boot instance %s at physical memory %pa of size 0x%lx\n", 10433 name, &start, (unsigned long)size); 10434 } else { 10435 pr_warn("Tracing: Failed to map boot instance %s\n", name); 10436 continue; 10437 } 10438 } else { 10439 /* Only non mapped buffers have snapshot buffers */ 10440 if (IS_ENABLED(CONFIG_TRACER_MAX_TRACE)) 10441 do_allocate_snapshot(name); 10442 } 10443 10444 tr = trace_array_create_systems(name, NULL, addr, size); 10445 if (IS_ERR(tr)) { 10446 pr_warn("Tracing: Failed to create instance buffer %s\n", curr_str); 10447 continue; 10448 } 10449 10450 if (traceoff) 10451 tracer_tracing_off(tr); 10452 10453 if (traceprintk) 10454 update_printk_trace(tr); 10455 10456 /* 10457 * If start is set, then this is a mapped buffer, and 10458 * cannot be deleted by user space, so keep the reference 10459 * to it. 10460 */ 10461 if (start) { 10462 tr->flags |= TRACE_ARRAY_FL_BOOT; 10463 tr->ref++; 10464 } 10465 10466 while ((tok = strsep(&curr_str, ","))) { 10467 early_enable_events(tr, tok, true); 10468 } 10469 } 10470 } 10471 10472 __init static int tracer_alloc_buffers(void) 10473 { 10474 int ring_buf_size; 10475 int ret = -ENOMEM; 10476 10477 10478 if (security_locked_down(LOCKDOWN_TRACEFS)) { 10479 pr_warn("Tracing disabled due to lockdown\n"); 10480 return -EPERM; 10481 } 10482 10483 /* 10484 * Make sure we don't accidentally add more trace options 10485 * than we have bits for. 10486 */ 10487 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE); 10488 10489 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL)) 10490 goto out; 10491 10492 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL)) 10493 goto out_free_buffer_mask; 10494 10495 /* Only allocate trace_printk buffers if a trace_printk exists */ 10496 if (&__stop___trace_bprintk_fmt != &__start___trace_bprintk_fmt) 10497 /* Must be called before global_trace.buffer is allocated */ 10498 trace_printk_init_buffers(); 10499 10500 /* To save memory, keep the ring buffer size to its minimum */ 10501 if (global_trace.ring_buffer_expanded) 10502 ring_buf_size = trace_buf_size; 10503 else 10504 ring_buf_size = 1; 10505 10506 cpumask_copy(tracing_buffer_mask, cpu_possible_mask); 10507 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask); 10508 10509 raw_spin_lock_init(&global_trace.start_lock); 10510 10511 /* 10512 * The prepare callbacks allocates some memory for the ring buffer. We 10513 * don't free the buffer if the CPU goes down. If we were to free 10514 * the buffer, then the user would lose any trace that was in the 10515 * buffer. The memory will be removed once the "instance" is removed. 10516 */ 10517 ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE, 10518 "trace/RB:prepare", trace_rb_cpu_prepare, 10519 NULL); 10520 if (ret < 0) 10521 goto out_free_cpumask; 10522 /* Used for event triggers */ 10523 ret = -ENOMEM; 10524 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE); 10525 if (!temp_buffer) 10526 goto out_rm_hp_state; 10527 10528 if (trace_create_savedcmd() < 0) 10529 goto out_free_temp_buffer; 10530 10531 if (!zalloc_cpumask_var(&global_trace.pipe_cpumask, GFP_KERNEL)) 10532 goto out_free_savedcmd; 10533 10534 /* TODO: make the number of buffers hot pluggable with CPUS */ 10535 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) { 10536 MEM_FAIL(1, "tracer: failed to allocate ring buffer!\n"); 10537 goto out_free_pipe_cpumask; 10538 } 10539 if (global_trace.buffer_disabled) 10540 tracing_off(); 10541 10542 if (trace_boot_clock) { 10543 ret = tracing_set_clock(&global_trace, trace_boot_clock); 10544 if (ret < 0) 10545 pr_warn("Trace clock %s not defined, going back to default\n", 10546 trace_boot_clock); 10547 } 10548 10549 /* 10550 * register_tracer() might reference current_trace, so it 10551 * needs to be set before we register anything. This is 10552 * just a bootstrap of current_trace anyway. 10553 */ 10554 global_trace.current_trace = &nop_trace; 10555 10556 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; 10557 #ifdef CONFIG_TRACER_MAX_TRACE 10558 spin_lock_init(&global_trace.snapshot_trigger_lock); 10559 #endif 10560 ftrace_init_global_array_ops(&global_trace); 10561 10562 #ifdef CONFIG_MODULES 10563 INIT_LIST_HEAD(&global_trace.mod_events); 10564 #endif 10565 10566 init_trace_flags_index(&global_trace); 10567 10568 register_tracer(&nop_trace); 10569 10570 /* Function tracing may start here (via kernel command line) */ 10571 init_function_trace(); 10572 10573 /* All seems OK, enable tracing */ 10574 tracing_disabled = 0; 10575 10576 atomic_notifier_chain_register(&panic_notifier_list, 10577 &trace_panic_notifier); 10578 10579 register_die_notifier(&trace_die_notifier); 10580 10581 global_trace.flags = TRACE_ARRAY_FL_GLOBAL; 10582 10583 INIT_LIST_HEAD(&global_trace.systems); 10584 INIT_LIST_HEAD(&global_trace.events); 10585 INIT_LIST_HEAD(&global_trace.hist_vars); 10586 INIT_LIST_HEAD(&global_trace.err_log); 10587 list_add(&global_trace.list, &ftrace_trace_arrays); 10588 10589 apply_trace_boot_options(); 10590 10591 register_snapshot_cmd(); 10592 10593 return 0; 10594 10595 out_free_pipe_cpumask: 10596 free_cpumask_var(global_trace.pipe_cpumask); 10597 out_free_savedcmd: 10598 trace_free_saved_cmdlines_buffer(); 10599 out_free_temp_buffer: 10600 ring_buffer_free(temp_buffer); 10601 out_rm_hp_state: 10602 cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE); 10603 out_free_cpumask: 10604 free_cpumask_var(global_trace.tracing_cpumask); 10605 out_free_buffer_mask: 10606 free_cpumask_var(tracing_buffer_mask); 10607 out: 10608 return ret; 10609 } 10610 10611 #ifdef CONFIG_FUNCTION_TRACER 10612 /* Used to set module cached ftrace filtering at boot up */ 10613 __init struct trace_array *trace_get_global_array(void) 10614 { 10615 return &global_trace; 10616 } 10617 #endif 10618 10619 void __init ftrace_boot_snapshot(void) 10620 { 10621 #ifdef CONFIG_TRACER_MAX_TRACE 10622 struct trace_array *tr; 10623 10624 if (!snapshot_at_boot) 10625 return; 10626 10627 list_for_each_entry(tr, &ftrace_trace_arrays, list) { 10628 if (!tr->allocated_snapshot) 10629 continue; 10630 10631 tracing_snapshot_instance(tr); 10632 trace_array_puts(tr, "** Boot snapshot taken **\n"); 10633 } 10634 #endif 10635 } 10636 10637 void __init early_trace_init(void) 10638 { 10639 if (tracepoint_printk) { 10640 tracepoint_print_iter = 10641 kzalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL); 10642 if (MEM_FAIL(!tracepoint_print_iter, 10643 "Failed to allocate trace iterator\n")) 10644 tracepoint_printk = 0; 10645 else 10646 static_key_enable(&tracepoint_printk_key.key); 10647 } 10648 tracer_alloc_buffers(); 10649 10650 init_events(); 10651 } 10652 10653 void __init trace_init(void) 10654 { 10655 trace_event_init(); 10656 10657 if (boot_instance_index) 10658 enable_instances(); 10659 } 10660 10661 __init static void clear_boot_tracer(void) 10662 { 10663 /* 10664 * The default tracer at boot buffer is an init section. 10665 * This function is called in lateinit. If we did not 10666 * find the boot tracer, then clear it out, to prevent 10667 * later registration from accessing the buffer that is 10668 * about to be freed. 10669 */ 10670 if (!default_bootup_tracer) 10671 return; 10672 10673 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n", 10674 default_bootup_tracer); 10675 default_bootup_tracer = NULL; 10676 } 10677 10678 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK 10679 __init static void tracing_set_default_clock(void) 10680 { 10681 /* sched_clock_stable() is determined in late_initcall */ 10682 if (!trace_boot_clock && !sched_clock_stable()) { 10683 if (security_locked_down(LOCKDOWN_TRACEFS)) { 10684 pr_warn("Can not set tracing clock due to lockdown\n"); 10685 return; 10686 } 10687 10688 printk(KERN_WARNING 10689 "Unstable clock detected, switching default tracing clock to \"global\"\n" 10690 "If you want to keep using the local clock, then add:\n" 10691 " \"trace_clock=local\"\n" 10692 "on the kernel command line\n"); 10693 tracing_set_clock(&global_trace, "global"); 10694 } 10695 } 10696 #else 10697 static inline void tracing_set_default_clock(void) { } 10698 #endif 10699 10700 __init static int late_trace_init(void) 10701 { 10702 if (tracepoint_printk && tracepoint_printk_stop_on_boot) { 10703 static_key_disable(&tracepoint_printk_key.key); 10704 tracepoint_printk = 0; 10705 } 10706 10707 tracing_set_default_clock(); 10708 clear_boot_tracer(); 10709 return 0; 10710 } 10711 10712 late_initcall_sync(late_trace_init); 10713