1 /* 2 * ring buffer based function tracer 3 * 4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> 5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com> 6 * 7 * Originally taken from the RT patch by: 8 * Arnaldo Carvalho de Melo <acme@redhat.com> 9 * 10 * Based on code from the latency_tracer, that is: 11 * Copyright (C) 2004-2006 Ingo Molnar 12 * Copyright (C) 2004 William Lee Irwin III 13 */ 14 #include <linux/ring_buffer.h> 15 #include <generated/utsrelease.h> 16 #include <linux/stacktrace.h> 17 #include <linux/writeback.h> 18 #include <linux/kallsyms.h> 19 #include <linux/seq_file.h> 20 #include <linux/smp_lock.h> 21 #include <linux/notifier.h> 22 #include <linux/irqflags.h> 23 #include <linux/debugfs.h> 24 #include <linux/pagemap.h> 25 #include <linux/hardirq.h> 26 #include <linux/linkage.h> 27 #include <linux/uaccess.h> 28 #include <linux/kprobes.h> 29 #include <linux/ftrace.h> 30 #include <linux/module.h> 31 #include <linux/percpu.h> 32 #include <linux/splice.h> 33 #include <linux/kdebug.h> 34 #include <linux/string.h> 35 #include <linux/rwsem.h> 36 #include <linux/slab.h> 37 #include <linux/ctype.h> 38 #include <linux/init.h> 39 #include <linux/poll.h> 40 #include <linux/fs.h> 41 42 #include "trace.h" 43 #include "trace_output.h" 44 45 #define TRACE_BUFFER_FLAGS (RB_FL_OVERWRITE) 46 47 /* 48 * On boot up, the ring buffer is set to the minimum size, so that 49 * we do not waste memory on systems that are not using tracing. 50 */ 51 int ring_buffer_expanded; 52 53 /* 54 * We need to change this state when a selftest is running. 55 * A selftest will lurk into the ring-buffer to count the 56 * entries inserted during the selftest although some concurrent 57 * insertions into the ring-buffer such as trace_printk could occurred 58 * at the same time, giving false positive or negative results. 59 */ 60 static bool __read_mostly tracing_selftest_running; 61 62 /* 63 * If a tracer is running, we do not want to run SELFTEST. 64 */ 65 bool __read_mostly tracing_selftest_disabled; 66 67 /* For tracers that don't implement custom flags */ 68 static struct tracer_opt dummy_tracer_opt[] = { 69 { } 70 }; 71 72 static struct tracer_flags dummy_tracer_flags = { 73 .val = 0, 74 .opts = dummy_tracer_opt 75 }; 76 77 static int dummy_set_flag(u32 old_flags, u32 bit, int set) 78 { 79 return 0; 80 } 81 82 /* 83 * Kill all tracing for good (never come back). 84 * It is initialized to 1 but will turn to zero if the initialization 85 * of the tracer is successful. But that is the only place that sets 86 * this back to zero. 87 */ 88 static int tracing_disabled = 1; 89 90 DEFINE_PER_CPU(int, ftrace_cpu_disabled); 91 92 static inline void ftrace_disable_cpu(void) 93 { 94 preempt_disable(); 95 __this_cpu_inc(ftrace_cpu_disabled); 96 } 97 98 static inline void ftrace_enable_cpu(void) 99 { 100 __this_cpu_dec(ftrace_cpu_disabled); 101 preempt_enable(); 102 } 103 104 cpumask_var_t __read_mostly tracing_buffer_mask; 105 106 /* 107 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops 108 * 109 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops 110 * is set, then ftrace_dump is called. This will output the contents 111 * of the ftrace buffers to the console. This is very useful for 112 * capturing traces that lead to crashes and outputing it to a 113 * serial console. 114 * 115 * It is default off, but you can enable it with either specifying 116 * "ftrace_dump_on_oops" in the kernel command line, or setting 117 * /proc/sys/kernel/ftrace_dump_on_oops 118 * Set 1 if you want to dump buffers of all CPUs 119 * Set 2 if you want to dump the buffer of the CPU that triggered oops 120 */ 121 122 enum ftrace_dump_mode ftrace_dump_on_oops; 123 124 static int tracing_set_tracer(const char *buf); 125 126 #define MAX_TRACER_SIZE 100 127 static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata; 128 static char *default_bootup_tracer; 129 130 static int __init set_cmdline_ftrace(char *str) 131 { 132 strncpy(bootup_tracer_buf, str, MAX_TRACER_SIZE); 133 default_bootup_tracer = bootup_tracer_buf; 134 /* We are using ftrace early, expand it */ 135 ring_buffer_expanded = 1; 136 return 1; 137 } 138 __setup("ftrace=", set_cmdline_ftrace); 139 140 static int __init set_ftrace_dump_on_oops(char *str) 141 { 142 if (*str++ != '=' || !*str) { 143 ftrace_dump_on_oops = DUMP_ALL; 144 return 1; 145 } 146 147 if (!strcmp("orig_cpu", str)) { 148 ftrace_dump_on_oops = DUMP_ORIG; 149 return 1; 150 } 151 152 return 0; 153 } 154 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops); 155 156 unsigned long long ns2usecs(cycle_t nsec) 157 { 158 nsec += 500; 159 do_div(nsec, 1000); 160 return nsec; 161 } 162 163 /* 164 * The global_trace is the descriptor that holds the tracing 165 * buffers for the live tracing. For each CPU, it contains 166 * a link list of pages that will store trace entries. The 167 * page descriptor of the pages in the memory is used to hold 168 * the link list by linking the lru item in the page descriptor 169 * to each of the pages in the buffer per CPU. 170 * 171 * For each active CPU there is a data field that holds the 172 * pages for the buffer for that CPU. Each CPU has the same number 173 * of pages allocated for its buffer. 174 */ 175 static struct trace_array global_trace; 176 177 static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu); 178 179 int filter_current_check_discard(struct ring_buffer *buffer, 180 struct ftrace_event_call *call, void *rec, 181 struct ring_buffer_event *event) 182 { 183 return filter_check_discard(call, rec, buffer, event); 184 } 185 EXPORT_SYMBOL_GPL(filter_current_check_discard); 186 187 cycle_t ftrace_now(int cpu) 188 { 189 u64 ts; 190 191 /* Early boot up does not have a buffer yet */ 192 if (!global_trace.buffer) 193 return trace_clock_local(); 194 195 ts = ring_buffer_time_stamp(global_trace.buffer, cpu); 196 ring_buffer_normalize_time_stamp(global_trace.buffer, cpu, &ts); 197 198 return ts; 199 } 200 201 /* 202 * The max_tr is used to snapshot the global_trace when a maximum 203 * latency is reached. Some tracers will use this to store a maximum 204 * trace while it continues examining live traces. 205 * 206 * The buffers for the max_tr are set up the same as the global_trace. 207 * When a snapshot is taken, the link list of the max_tr is swapped 208 * with the link list of the global_trace and the buffers are reset for 209 * the global_trace so the tracing can continue. 210 */ 211 static struct trace_array max_tr; 212 213 static DEFINE_PER_CPU(struct trace_array_cpu, max_tr_data); 214 215 /* tracer_enabled is used to toggle activation of a tracer */ 216 static int tracer_enabled = 1; 217 218 /** 219 * tracing_is_enabled - return tracer_enabled status 220 * 221 * This function is used by other tracers to know the status 222 * of the tracer_enabled flag. Tracers may use this function 223 * to know if it should enable their features when starting 224 * up. See irqsoff tracer for an example (start_irqsoff_tracer). 225 */ 226 int tracing_is_enabled(void) 227 { 228 return tracer_enabled; 229 } 230 231 /* 232 * trace_buf_size is the size in bytes that is allocated 233 * for a buffer. Note, the number of bytes is always rounded 234 * to page size. 235 * 236 * This number is purposely set to a low number of 16384. 237 * If the dump on oops happens, it will be much appreciated 238 * to not have to wait for all that output. Anyway this can be 239 * boot time and run time configurable. 240 */ 241 #define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */ 242 243 static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT; 244 245 /* trace_types holds a link list of available tracers. */ 246 static struct tracer *trace_types __read_mostly; 247 248 /* current_trace points to the tracer that is currently active */ 249 static struct tracer *current_trace __read_mostly; 250 251 /* 252 * trace_types_lock is used to protect the trace_types list. 253 */ 254 static DEFINE_MUTEX(trace_types_lock); 255 256 /* 257 * serialize the access of the ring buffer 258 * 259 * ring buffer serializes readers, but it is low level protection. 260 * The validity of the events (which returns by ring_buffer_peek() ..etc) 261 * are not protected by ring buffer. 262 * 263 * The content of events may become garbage if we allow other process consumes 264 * these events concurrently: 265 * A) the page of the consumed events may become a normal page 266 * (not reader page) in ring buffer, and this page will be rewrited 267 * by events producer. 268 * B) The page of the consumed events may become a page for splice_read, 269 * and this page will be returned to system. 270 * 271 * These primitives allow multi process access to different cpu ring buffer 272 * concurrently. 273 * 274 * These primitives don't distinguish read-only and read-consume access. 275 * Multi read-only access are also serialized. 276 */ 277 278 #ifdef CONFIG_SMP 279 static DECLARE_RWSEM(all_cpu_access_lock); 280 static DEFINE_PER_CPU(struct mutex, cpu_access_lock); 281 282 static inline void trace_access_lock(int cpu) 283 { 284 if (cpu == TRACE_PIPE_ALL_CPU) { 285 /* gain it for accessing the whole ring buffer. */ 286 down_write(&all_cpu_access_lock); 287 } else { 288 /* gain it for accessing a cpu ring buffer. */ 289 290 /* Firstly block other trace_access_lock(TRACE_PIPE_ALL_CPU). */ 291 down_read(&all_cpu_access_lock); 292 293 /* Secondly block other access to this @cpu ring buffer. */ 294 mutex_lock(&per_cpu(cpu_access_lock, cpu)); 295 } 296 } 297 298 static inline void trace_access_unlock(int cpu) 299 { 300 if (cpu == TRACE_PIPE_ALL_CPU) { 301 up_write(&all_cpu_access_lock); 302 } else { 303 mutex_unlock(&per_cpu(cpu_access_lock, cpu)); 304 up_read(&all_cpu_access_lock); 305 } 306 } 307 308 static inline void trace_access_lock_init(void) 309 { 310 int cpu; 311 312 for_each_possible_cpu(cpu) 313 mutex_init(&per_cpu(cpu_access_lock, cpu)); 314 } 315 316 #else 317 318 static DEFINE_MUTEX(access_lock); 319 320 static inline void trace_access_lock(int cpu) 321 { 322 (void)cpu; 323 mutex_lock(&access_lock); 324 } 325 326 static inline void trace_access_unlock(int cpu) 327 { 328 (void)cpu; 329 mutex_unlock(&access_lock); 330 } 331 332 static inline void trace_access_lock_init(void) 333 { 334 } 335 336 #endif 337 338 /* trace_wait is a waitqueue for tasks blocked on trace_poll */ 339 static DECLARE_WAIT_QUEUE_HEAD(trace_wait); 340 341 /* trace_flags holds trace_options default values */ 342 unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | 343 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME | 344 TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD; 345 346 static int trace_stop_count; 347 static DEFINE_SPINLOCK(tracing_start_lock); 348 349 /** 350 * trace_wake_up - wake up tasks waiting for trace input 351 * 352 * Simply wakes up any task that is blocked on the trace_wait 353 * queue. These is used with trace_poll for tasks polling the trace. 354 */ 355 void trace_wake_up(void) 356 { 357 int cpu; 358 359 if (trace_flags & TRACE_ITER_BLOCK) 360 return; 361 /* 362 * The runqueue_is_locked() can fail, but this is the best we 363 * have for now: 364 */ 365 cpu = get_cpu(); 366 if (!runqueue_is_locked(cpu)) 367 wake_up(&trace_wait); 368 put_cpu(); 369 } 370 371 static int __init set_buf_size(char *str) 372 { 373 unsigned long buf_size; 374 375 if (!str) 376 return 0; 377 buf_size = memparse(str, &str); 378 /* nr_entries can not be zero */ 379 if (buf_size == 0) 380 return 0; 381 trace_buf_size = buf_size; 382 return 1; 383 } 384 __setup("trace_buf_size=", set_buf_size); 385 386 static int __init set_tracing_thresh(char *str) 387 { 388 unsigned long threshhold; 389 int ret; 390 391 if (!str) 392 return 0; 393 ret = strict_strtoul(str, 0, &threshhold); 394 if (ret < 0) 395 return 0; 396 tracing_thresh = threshhold * 1000; 397 return 1; 398 } 399 __setup("tracing_thresh=", set_tracing_thresh); 400 401 unsigned long nsecs_to_usecs(unsigned long nsecs) 402 { 403 return nsecs / 1000; 404 } 405 406 /* These must match the bit postions in trace_iterator_flags */ 407 static const char *trace_options[] = { 408 "print-parent", 409 "sym-offset", 410 "sym-addr", 411 "verbose", 412 "raw", 413 "hex", 414 "bin", 415 "block", 416 "stacktrace", 417 "trace_printk", 418 "ftrace_preempt", 419 "branch", 420 "annotate", 421 "userstacktrace", 422 "sym-userobj", 423 "printk-msg-only", 424 "context-info", 425 "latency-format", 426 "sleep-time", 427 "graph-time", 428 "record-cmd", 429 NULL 430 }; 431 432 static struct { 433 u64 (*func)(void); 434 const char *name; 435 } trace_clocks[] = { 436 { trace_clock_local, "local" }, 437 { trace_clock_global, "global" }, 438 }; 439 440 int trace_clock_id; 441 442 /* 443 * trace_parser_get_init - gets the buffer for trace parser 444 */ 445 int trace_parser_get_init(struct trace_parser *parser, int size) 446 { 447 memset(parser, 0, sizeof(*parser)); 448 449 parser->buffer = kmalloc(size, GFP_KERNEL); 450 if (!parser->buffer) 451 return 1; 452 453 parser->size = size; 454 return 0; 455 } 456 457 /* 458 * trace_parser_put - frees the buffer for trace parser 459 */ 460 void trace_parser_put(struct trace_parser *parser) 461 { 462 kfree(parser->buffer); 463 } 464 465 /* 466 * trace_get_user - reads the user input string separated by space 467 * (matched by isspace(ch)) 468 * 469 * For each string found the 'struct trace_parser' is updated, 470 * and the function returns. 471 * 472 * Returns number of bytes read. 473 * 474 * See kernel/trace/trace.h for 'struct trace_parser' details. 475 */ 476 int trace_get_user(struct trace_parser *parser, const char __user *ubuf, 477 size_t cnt, loff_t *ppos) 478 { 479 char ch; 480 size_t read = 0; 481 ssize_t ret; 482 483 if (!*ppos) 484 trace_parser_clear(parser); 485 486 ret = get_user(ch, ubuf++); 487 if (ret) 488 goto out; 489 490 read++; 491 cnt--; 492 493 /* 494 * The parser is not finished with the last write, 495 * continue reading the user input without skipping spaces. 496 */ 497 if (!parser->cont) { 498 /* skip white space */ 499 while (cnt && isspace(ch)) { 500 ret = get_user(ch, ubuf++); 501 if (ret) 502 goto out; 503 read++; 504 cnt--; 505 } 506 507 /* only spaces were written */ 508 if (isspace(ch)) { 509 *ppos += read; 510 ret = read; 511 goto out; 512 } 513 514 parser->idx = 0; 515 } 516 517 /* read the non-space input */ 518 while (cnt && !isspace(ch)) { 519 if (parser->idx < parser->size - 1) 520 parser->buffer[parser->idx++] = ch; 521 else { 522 ret = -EINVAL; 523 goto out; 524 } 525 ret = get_user(ch, ubuf++); 526 if (ret) 527 goto out; 528 read++; 529 cnt--; 530 } 531 532 /* We either got finished input or we have to wait for another call. */ 533 if (isspace(ch)) { 534 parser->buffer[parser->idx] = 0; 535 parser->cont = false; 536 } else { 537 parser->cont = true; 538 parser->buffer[parser->idx++] = ch; 539 } 540 541 *ppos += read; 542 ret = read; 543 544 out: 545 return ret; 546 } 547 548 ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt) 549 { 550 int len; 551 int ret; 552 553 if (!cnt) 554 return 0; 555 556 if (s->len <= s->readpos) 557 return -EBUSY; 558 559 len = s->len - s->readpos; 560 if (cnt > len) 561 cnt = len; 562 ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt); 563 if (ret == cnt) 564 return -EFAULT; 565 566 cnt -= ret; 567 568 s->readpos += cnt; 569 return cnt; 570 } 571 572 static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt) 573 { 574 int len; 575 void *ret; 576 577 if (s->len <= s->readpos) 578 return -EBUSY; 579 580 len = s->len - s->readpos; 581 if (cnt > len) 582 cnt = len; 583 ret = memcpy(buf, s->buffer + s->readpos, cnt); 584 if (!ret) 585 return -EFAULT; 586 587 s->readpos += cnt; 588 return cnt; 589 } 590 591 /* 592 * ftrace_max_lock is used to protect the swapping of buffers 593 * when taking a max snapshot. The buffers themselves are 594 * protected by per_cpu spinlocks. But the action of the swap 595 * needs its own lock. 596 * 597 * This is defined as a arch_spinlock_t in order to help 598 * with performance when lockdep debugging is enabled. 599 * 600 * It is also used in other places outside the update_max_tr 601 * so it needs to be defined outside of the 602 * CONFIG_TRACER_MAX_TRACE. 603 */ 604 static arch_spinlock_t ftrace_max_lock = 605 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; 606 607 unsigned long __read_mostly tracing_thresh; 608 609 #ifdef CONFIG_TRACER_MAX_TRACE 610 unsigned long __read_mostly tracing_max_latency; 611 612 /* 613 * Copy the new maximum trace into the separate maximum-trace 614 * structure. (this way the maximum trace is permanently saved, 615 * for later retrieval via /sys/kernel/debug/tracing/latency_trace) 616 */ 617 static void 618 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) 619 { 620 struct trace_array_cpu *data = tr->data[cpu]; 621 struct trace_array_cpu *max_data; 622 623 max_tr.cpu = cpu; 624 max_tr.time_start = data->preempt_timestamp; 625 626 max_data = max_tr.data[cpu]; 627 max_data->saved_latency = tracing_max_latency; 628 max_data->critical_start = data->critical_start; 629 max_data->critical_end = data->critical_end; 630 631 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN); 632 max_data->pid = tsk->pid; 633 max_data->uid = task_uid(tsk); 634 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO; 635 max_data->policy = tsk->policy; 636 max_data->rt_priority = tsk->rt_priority; 637 638 /* record this tasks comm */ 639 tracing_record_cmdline(tsk); 640 } 641 642 /** 643 * update_max_tr - snapshot all trace buffers from global_trace to max_tr 644 * @tr: tracer 645 * @tsk: the task with the latency 646 * @cpu: The cpu that initiated the trace. 647 * 648 * Flip the buffers between the @tr and the max_tr and record information 649 * about which task was the cause of this latency. 650 */ 651 void 652 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) 653 { 654 struct ring_buffer *buf = tr->buffer; 655 656 if (trace_stop_count) 657 return; 658 659 WARN_ON_ONCE(!irqs_disabled()); 660 if (!current_trace->use_max_tr) { 661 WARN_ON_ONCE(1); 662 return; 663 } 664 arch_spin_lock(&ftrace_max_lock); 665 666 tr->buffer = max_tr.buffer; 667 max_tr.buffer = buf; 668 669 __update_max_tr(tr, tsk, cpu); 670 arch_spin_unlock(&ftrace_max_lock); 671 } 672 673 /** 674 * update_max_tr_single - only copy one trace over, and reset the rest 675 * @tr - tracer 676 * @tsk - task with the latency 677 * @cpu - the cpu of the buffer to copy. 678 * 679 * Flip the trace of a single CPU buffer between the @tr and the max_tr. 680 */ 681 void 682 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) 683 { 684 int ret; 685 686 if (trace_stop_count) 687 return; 688 689 WARN_ON_ONCE(!irqs_disabled()); 690 if (!current_trace->use_max_tr) { 691 WARN_ON_ONCE(1); 692 return; 693 } 694 695 arch_spin_lock(&ftrace_max_lock); 696 697 ftrace_disable_cpu(); 698 699 ret = ring_buffer_swap_cpu(max_tr.buffer, tr->buffer, cpu); 700 701 if (ret == -EBUSY) { 702 /* 703 * We failed to swap the buffer due to a commit taking 704 * place on this CPU. We fail to record, but we reset 705 * the max trace buffer (no one writes directly to it) 706 * and flag that it failed. 707 */ 708 trace_array_printk(&max_tr, _THIS_IP_, 709 "Failed to swap buffers due to commit in progress\n"); 710 } 711 712 ftrace_enable_cpu(); 713 714 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY); 715 716 __update_max_tr(tr, tsk, cpu); 717 arch_spin_unlock(&ftrace_max_lock); 718 } 719 #endif /* CONFIG_TRACER_MAX_TRACE */ 720 721 /** 722 * register_tracer - register a tracer with the ftrace system. 723 * @type - the plugin for the tracer 724 * 725 * Register a new plugin tracer. 726 */ 727 int register_tracer(struct tracer *type) 728 __releases(kernel_lock) 729 __acquires(kernel_lock) 730 { 731 struct tracer *t; 732 int ret = 0; 733 734 if (!type->name) { 735 pr_info("Tracer must have a name\n"); 736 return -1; 737 } 738 739 if (strlen(type->name) >= MAX_TRACER_SIZE) { 740 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE); 741 return -1; 742 } 743 744 mutex_lock(&trace_types_lock); 745 746 tracing_selftest_running = true; 747 748 for (t = trace_types; t; t = t->next) { 749 if (strcmp(type->name, t->name) == 0) { 750 /* already found */ 751 pr_info("Tracer %s already registered\n", 752 type->name); 753 ret = -1; 754 goto out; 755 } 756 } 757 758 if (!type->set_flag) 759 type->set_flag = &dummy_set_flag; 760 if (!type->flags) 761 type->flags = &dummy_tracer_flags; 762 else 763 if (!type->flags->opts) 764 type->flags->opts = dummy_tracer_opt; 765 if (!type->wait_pipe) 766 type->wait_pipe = default_wait_pipe; 767 768 769 #ifdef CONFIG_FTRACE_STARTUP_TEST 770 if (type->selftest && !tracing_selftest_disabled) { 771 struct tracer *saved_tracer = current_trace; 772 struct trace_array *tr = &global_trace; 773 774 /* 775 * Run a selftest on this tracer. 776 * Here we reset the trace buffer, and set the current 777 * tracer to be this tracer. The tracer can then run some 778 * internal tracing to verify that everything is in order. 779 * If we fail, we do not register this tracer. 780 */ 781 tracing_reset_online_cpus(tr); 782 783 current_trace = type; 784 /* the test is responsible for initializing and enabling */ 785 pr_info("Testing tracer %s: ", type->name); 786 ret = type->selftest(type, tr); 787 /* the test is responsible for resetting too */ 788 current_trace = saved_tracer; 789 if (ret) { 790 printk(KERN_CONT "FAILED!\n"); 791 goto out; 792 } 793 /* Only reset on passing, to avoid touching corrupted buffers */ 794 tracing_reset_online_cpus(tr); 795 796 printk(KERN_CONT "PASSED\n"); 797 } 798 #endif 799 800 type->next = trace_types; 801 trace_types = type; 802 803 out: 804 tracing_selftest_running = false; 805 mutex_unlock(&trace_types_lock); 806 807 if (ret || !default_bootup_tracer) 808 goto out_unlock; 809 810 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE)) 811 goto out_unlock; 812 813 printk(KERN_INFO "Starting tracer '%s'\n", type->name); 814 /* Do we want this tracer to start on bootup? */ 815 tracing_set_tracer(type->name); 816 default_bootup_tracer = NULL; 817 /* disable other selftests, since this will break it. */ 818 tracing_selftest_disabled = 1; 819 #ifdef CONFIG_FTRACE_STARTUP_TEST 820 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n", 821 type->name); 822 #endif 823 824 out_unlock: 825 return ret; 826 } 827 828 void unregister_tracer(struct tracer *type) 829 { 830 struct tracer **t; 831 832 mutex_lock(&trace_types_lock); 833 for (t = &trace_types; *t; t = &(*t)->next) { 834 if (*t == type) 835 goto found; 836 } 837 pr_info("Tracer %s not registered\n", type->name); 838 goto out; 839 840 found: 841 *t = (*t)->next; 842 843 if (type == current_trace && tracer_enabled) { 844 tracer_enabled = 0; 845 tracing_stop(); 846 if (current_trace->stop) 847 current_trace->stop(&global_trace); 848 current_trace = &nop_trace; 849 } 850 out: 851 mutex_unlock(&trace_types_lock); 852 } 853 854 static void __tracing_reset(struct ring_buffer *buffer, int cpu) 855 { 856 ftrace_disable_cpu(); 857 ring_buffer_reset_cpu(buffer, cpu); 858 ftrace_enable_cpu(); 859 } 860 861 void tracing_reset(struct trace_array *tr, int cpu) 862 { 863 struct ring_buffer *buffer = tr->buffer; 864 865 ring_buffer_record_disable(buffer); 866 867 /* Make sure all commits have finished */ 868 synchronize_sched(); 869 __tracing_reset(buffer, cpu); 870 871 ring_buffer_record_enable(buffer); 872 } 873 874 void tracing_reset_online_cpus(struct trace_array *tr) 875 { 876 struct ring_buffer *buffer = tr->buffer; 877 int cpu; 878 879 ring_buffer_record_disable(buffer); 880 881 /* Make sure all commits have finished */ 882 synchronize_sched(); 883 884 tr->time_start = ftrace_now(tr->cpu); 885 886 for_each_online_cpu(cpu) 887 __tracing_reset(buffer, cpu); 888 889 ring_buffer_record_enable(buffer); 890 } 891 892 void tracing_reset_current(int cpu) 893 { 894 tracing_reset(&global_trace, cpu); 895 } 896 897 void tracing_reset_current_online_cpus(void) 898 { 899 tracing_reset_online_cpus(&global_trace); 900 } 901 902 #define SAVED_CMDLINES 128 903 #define NO_CMDLINE_MAP UINT_MAX 904 static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1]; 905 static unsigned map_cmdline_to_pid[SAVED_CMDLINES]; 906 static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN]; 907 static int cmdline_idx; 908 static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED; 909 910 /* temporary disable recording */ 911 static atomic_t trace_record_cmdline_disabled __read_mostly; 912 913 static void trace_init_cmdlines(void) 914 { 915 memset(&map_pid_to_cmdline, NO_CMDLINE_MAP, sizeof(map_pid_to_cmdline)); 916 memset(&map_cmdline_to_pid, NO_CMDLINE_MAP, sizeof(map_cmdline_to_pid)); 917 cmdline_idx = 0; 918 } 919 920 int is_tracing_stopped(void) 921 { 922 return trace_stop_count; 923 } 924 925 /** 926 * ftrace_off_permanent - disable all ftrace code permanently 927 * 928 * This should only be called when a serious anomally has 929 * been detected. This will turn off the function tracing, 930 * ring buffers, and other tracing utilites. It takes no 931 * locks and can be called from any context. 932 */ 933 void ftrace_off_permanent(void) 934 { 935 tracing_disabled = 1; 936 ftrace_stop(); 937 tracing_off_permanent(); 938 } 939 940 /** 941 * tracing_start - quick start of the tracer 942 * 943 * If tracing is enabled but was stopped by tracing_stop, 944 * this will start the tracer back up. 945 */ 946 void tracing_start(void) 947 { 948 struct ring_buffer *buffer; 949 unsigned long flags; 950 951 if (tracing_disabled) 952 return; 953 954 spin_lock_irqsave(&tracing_start_lock, flags); 955 if (--trace_stop_count) { 956 if (trace_stop_count < 0) { 957 /* Someone screwed up their debugging */ 958 WARN_ON_ONCE(1); 959 trace_stop_count = 0; 960 } 961 goto out; 962 } 963 964 /* Prevent the buffers from switching */ 965 arch_spin_lock(&ftrace_max_lock); 966 967 buffer = global_trace.buffer; 968 if (buffer) 969 ring_buffer_record_enable(buffer); 970 971 buffer = max_tr.buffer; 972 if (buffer) 973 ring_buffer_record_enable(buffer); 974 975 arch_spin_unlock(&ftrace_max_lock); 976 977 ftrace_start(); 978 out: 979 spin_unlock_irqrestore(&tracing_start_lock, flags); 980 } 981 982 /** 983 * tracing_stop - quick stop of the tracer 984 * 985 * Light weight way to stop tracing. Use in conjunction with 986 * tracing_start. 987 */ 988 void tracing_stop(void) 989 { 990 struct ring_buffer *buffer; 991 unsigned long flags; 992 993 ftrace_stop(); 994 spin_lock_irqsave(&tracing_start_lock, flags); 995 if (trace_stop_count++) 996 goto out; 997 998 /* Prevent the buffers from switching */ 999 arch_spin_lock(&ftrace_max_lock); 1000 1001 buffer = global_trace.buffer; 1002 if (buffer) 1003 ring_buffer_record_disable(buffer); 1004 1005 buffer = max_tr.buffer; 1006 if (buffer) 1007 ring_buffer_record_disable(buffer); 1008 1009 arch_spin_unlock(&ftrace_max_lock); 1010 1011 out: 1012 spin_unlock_irqrestore(&tracing_start_lock, flags); 1013 } 1014 1015 void trace_stop_cmdline_recording(void); 1016 1017 static void trace_save_cmdline(struct task_struct *tsk) 1018 { 1019 unsigned pid, idx; 1020 1021 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT)) 1022 return; 1023 1024 /* 1025 * It's not the end of the world if we don't get 1026 * the lock, but we also don't want to spin 1027 * nor do we want to disable interrupts, 1028 * so if we miss here, then better luck next time. 1029 */ 1030 if (!arch_spin_trylock(&trace_cmdline_lock)) 1031 return; 1032 1033 idx = map_pid_to_cmdline[tsk->pid]; 1034 if (idx == NO_CMDLINE_MAP) { 1035 idx = (cmdline_idx + 1) % SAVED_CMDLINES; 1036 1037 /* 1038 * Check whether the cmdline buffer at idx has a pid 1039 * mapped. We are going to overwrite that entry so we 1040 * need to clear the map_pid_to_cmdline. Otherwise we 1041 * would read the new comm for the old pid. 1042 */ 1043 pid = map_cmdline_to_pid[idx]; 1044 if (pid != NO_CMDLINE_MAP) 1045 map_pid_to_cmdline[pid] = NO_CMDLINE_MAP; 1046 1047 map_cmdline_to_pid[idx] = tsk->pid; 1048 map_pid_to_cmdline[tsk->pid] = idx; 1049 1050 cmdline_idx = idx; 1051 } 1052 1053 memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN); 1054 1055 arch_spin_unlock(&trace_cmdline_lock); 1056 } 1057 1058 void trace_find_cmdline(int pid, char comm[]) 1059 { 1060 unsigned map; 1061 1062 if (!pid) { 1063 strcpy(comm, "<idle>"); 1064 return; 1065 } 1066 1067 if (WARN_ON_ONCE(pid < 0)) { 1068 strcpy(comm, "<XXX>"); 1069 return; 1070 } 1071 1072 if (pid > PID_MAX_DEFAULT) { 1073 strcpy(comm, "<...>"); 1074 return; 1075 } 1076 1077 preempt_disable(); 1078 arch_spin_lock(&trace_cmdline_lock); 1079 map = map_pid_to_cmdline[pid]; 1080 if (map != NO_CMDLINE_MAP) 1081 strcpy(comm, saved_cmdlines[map]); 1082 else 1083 strcpy(comm, "<...>"); 1084 1085 arch_spin_unlock(&trace_cmdline_lock); 1086 preempt_enable(); 1087 } 1088 1089 void tracing_record_cmdline(struct task_struct *tsk) 1090 { 1091 if (atomic_read(&trace_record_cmdline_disabled) || !tracer_enabled || 1092 !tracing_is_on()) 1093 return; 1094 1095 trace_save_cmdline(tsk); 1096 } 1097 1098 void 1099 tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags, 1100 int pc) 1101 { 1102 struct task_struct *tsk = current; 1103 1104 entry->preempt_count = pc & 0xff; 1105 entry->pid = (tsk) ? tsk->pid : 0; 1106 entry->lock_depth = (tsk) ? tsk->lock_depth : 0; 1107 entry->flags = 1108 #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT 1109 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) | 1110 #else 1111 TRACE_FLAG_IRQS_NOSUPPORT | 1112 #endif 1113 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) | 1114 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) | 1115 (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0); 1116 } 1117 EXPORT_SYMBOL_GPL(tracing_generic_entry_update); 1118 1119 struct ring_buffer_event * 1120 trace_buffer_lock_reserve(struct ring_buffer *buffer, 1121 int type, 1122 unsigned long len, 1123 unsigned long flags, int pc) 1124 { 1125 struct ring_buffer_event *event; 1126 1127 event = ring_buffer_lock_reserve(buffer, len); 1128 if (event != NULL) { 1129 struct trace_entry *ent = ring_buffer_event_data(event); 1130 1131 tracing_generic_entry_update(ent, flags, pc); 1132 ent->type = type; 1133 } 1134 1135 return event; 1136 } 1137 1138 static inline void 1139 __trace_buffer_unlock_commit(struct ring_buffer *buffer, 1140 struct ring_buffer_event *event, 1141 unsigned long flags, int pc, 1142 int wake) 1143 { 1144 ring_buffer_unlock_commit(buffer, event); 1145 1146 ftrace_trace_stack(buffer, flags, 6, pc); 1147 ftrace_trace_userstack(buffer, flags, pc); 1148 1149 if (wake) 1150 trace_wake_up(); 1151 } 1152 1153 void trace_buffer_unlock_commit(struct ring_buffer *buffer, 1154 struct ring_buffer_event *event, 1155 unsigned long flags, int pc) 1156 { 1157 __trace_buffer_unlock_commit(buffer, event, flags, pc, 1); 1158 } 1159 1160 struct ring_buffer_event * 1161 trace_current_buffer_lock_reserve(struct ring_buffer **current_rb, 1162 int type, unsigned long len, 1163 unsigned long flags, int pc) 1164 { 1165 *current_rb = global_trace.buffer; 1166 return trace_buffer_lock_reserve(*current_rb, 1167 type, len, flags, pc); 1168 } 1169 EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve); 1170 1171 void trace_current_buffer_unlock_commit(struct ring_buffer *buffer, 1172 struct ring_buffer_event *event, 1173 unsigned long flags, int pc) 1174 { 1175 __trace_buffer_unlock_commit(buffer, event, flags, pc, 1); 1176 } 1177 EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit); 1178 1179 void trace_nowake_buffer_unlock_commit(struct ring_buffer *buffer, 1180 struct ring_buffer_event *event, 1181 unsigned long flags, int pc) 1182 { 1183 __trace_buffer_unlock_commit(buffer, event, flags, pc, 0); 1184 } 1185 EXPORT_SYMBOL_GPL(trace_nowake_buffer_unlock_commit); 1186 1187 void trace_current_buffer_discard_commit(struct ring_buffer *buffer, 1188 struct ring_buffer_event *event) 1189 { 1190 ring_buffer_discard_commit(buffer, event); 1191 } 1192 EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit); 1193 1194 void 1195 trace_function(struct trace_array *tr, 1196 unsigned long ip, unsigned long parent_ip, unsigned long flags, 1197 int pc) 1198 { 1199 struct ftrace_event_call *call = &event_function; 1200 struct ring_buffer *buffer = tr->buffer; 1201 struct ring_buffer_event *event; 1202 struct ftrace_entry *entry; 1203 1204 /* If we are reading the ring buffer, don't trace */ 1205 if (unlikely(__this_cpu_read(ftrace_cpu_disabled))) 1206 return; 1207 1208 event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry), 1209 flags, pc); 1210 if (!event) 1211 return; 1212 entry = ring_buffer_event_data(event); 1213 entry->ip = ip; 1214 entry->parent_ip = parent_ip; 1215 1216 if (!filter_check_discard(call, entry, buffer, event)) 1217 ring_buffer_unlock_commit(buffer, event); 1218 } 1219 1220 void 1221 ftrace(struct trace_array *tr, struct trace_array_cpu *data, 1222 unsigned long ip, unsigned long parent_ip, unsigned long flags, 1223 int pc) 1224 { 1225 if (likely(!atomic_read(&data->disabled))) 1226 trace_function(tr, ip, parent_ip, flags, pc); 1227 } 1228 1229 #ifdef CONFIG_STACKTRACE 1230 static void __ftrace_trace_stack(struct ring_buffer *buffer, 1231 unsigned long flags, 1232 int skip, int pc) 1233 { 1234 struct ftrace_event_call *call = &event_kernel_stack; 1235 struct ring_buffer_event *event; 1236 struct stack_entry *entry; 1237 struct stack_trace trace; 1238 1239 event = trace_buffer_lock_reserve(buffer, TRACE_STACK, 1240 sizeof(*entry), flags, pc); 1241 if (!event) 1242 return; 1243 entry = ring_buffer_event_data(event); 1244 memset(&entry->caller, 0, sizeof(entry->caller)); 1245 1246 trace.nr_entries = 0; 1247 trace.max_entries = FTRACE_STACK_ENTRIES; 1248 trace.skip = skip; 1249 trace.entries = entry->caller; 1250 1251 save_stack_trace(&trace); 1252 if (!filter_check_discard(call, entry, buffer, event)) 1253 ring_buffer_unlock_commit(buffer, event); 1254 } 1255 1256 void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags, 1257 int skip, int pc) 1258 { 1259 if (!(trace_flags & TRACE_ITER_STACKTRACE)) 1260 return; 1261 1262 __ftrace_trace_stack(buffer, flags, skip, pc); 1263 } 1264 1265 void __trace_stack(struct trace_array *tr, unsigned long flags, int skip, 1266 int pc) 1267 { 1268 __ftrace_trace_stack(tr->buffer, flags, skip, pc); 1269 } 1270 1271 /** 1272 * trace_dump_stack - record a stack back trace in the trace buffer 1273 */ 1274 void trace_dump_stack(void) 1275 { 1276 unsigned long flags; 1277 1278 if (tracing_disabled || tracing_selftest_running) 1279 return; 1280 1281 local_save_flags(flags); 1282 1283 /* skipping 3 traces, seems to get us at the caller of this function */ 1284 __ftrace_trace_stack(global_trace.buffer, flags, 3, preempt_count()); 1285 } 1286 1287 void 1288 ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) 1289 { 1290 struct ftrace_event_call *call = &event_user_stack; 1291 struct ring_buffer_event *event; 1292 struct userstack_entry *entry; 1293 struct stack_trace trace; 1294 1295 if (!(trace_flags & TRACE_ITER_USERSTACKTRACE)) 1296 return; 1297 1298 /* 1299 * NMIs can not handle page faults, even with fix ups. 1300 * The save user stack can (and often does) fault. 1301 */ 1302 if (unlikely(in_nmi())) 1303 return; 1304 1305 event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK, 1306 sizeof(*entry), flags, pc); 1307 if (!event) 1308 return; 1309 entry = ring_buffer_event_data(event); 1310 1311 entry->tgid = current->tgid; 1312 memset(&entry->caller, 0, sizeof(entry->caller)); 1313 1314 trace.nr_entries = 0; 1315 trace.max_entries = FTRACE_STACK_ENTRIES; 1316 trace.skip = 0; 1317 trace.entries = entry->caller; 1318 1319 save_stack_trace_user(&trace); 1320 if (!filter_check_discard(call, entry, buffer, event)) 1321 ring_buffer_unlock_commit(buffer, event); 1322 } 1323 1324 #ifdef UNUSED 1325 static void __trace_userstack(struct trace_array *tr, unsigned long flags) 1326 { 1327 ftrace_trace_userstack(tr, flags, preempt_count()); 1328 } 1329 #endif /* UNUSED */ 1330 1331 #endif /* CONFIG_STACKTRACE */ 1332 1333 /** 1334 * trace_vbprintk - write binary msg to tracing buffer 1335 * 1336 */ 1337 int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) 1338 { 1339 static arch_spinlock_t trace_buf_lock = 1340 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; 1341 static u32 trace_buf[TRACE_BUF_SIZE]; 1342 1343 struct ftrace_event_call *call = &event_bprint; 1344 struct ring_buffer_event *event; 1345 struct ring_buffer *buffer; 1346 struct trace_array *tr = &global_trace; 1347 struct trace_array_cpu *data; 1348 struct bprint_entry *entry; 1349 unsigned long flags; 1350 int disable; 1351 int cpu, len = 0, size, pc; 1352 1353 if (unlikely(tracing_selftest_running || tracing_disabled)) 1354 return 0; 1355 1356 /* Don't pollute graph traces with trace_vprintk internals */ 1357 pause_graph_tracing(); 1358 1359 pc = preempt_count(); 1360 preempt_disable_notrace(); 1361 cpu = raw_smp_processor_id(); 1362 data = tr->data[cpu]; 1363 1364 disable = atomic_inc_return(&data->disabled); 1365 if (unlikely(disable != 1)) 1366 goto out; 1367 1368 /* Lockdep uses trace_printk for lock tracing */ 1369 local_irq_save(flags); 1370 arch_spin_lock(&trace_buf_lock); 1371 len = vbin_printf(trace_buf, TRACE_BUF_SIZE, fmt, args); 1372 1373 if (len > TRACE_BUF_SIZE || len < 0) 1374 goto out_unlock; 1375 1376 size = sizeof(*entry) + sizeof(u32) * len; 1377 buffer = tr->buffer; 1378 event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size, 1379 flags, pc); 1380 if (!event) 1381 goto out_unlock; 1382 entry = ring_buffer_event_data(event); 1383 entry->ip = ip; 1384 entry->fmt = fmt; 1385 1386 memcpy(entry->buf, trace_buf, sizeof(u32) * len); 1387 if (!filter_check_discard(call, entry, buffer, event)) { 1388 ring_buffer_unlock_commit(buffer, event); 1389 ftrace_trace_stack(buffer, flags, 6, pc); 1390 } 1391 1392 out_unlock: 1393 arch_spin_unlock(&trace_buf_lock); 1394 local_irq_restore(flags); 1395 1396 out: 1397 atomic_dec_return(&data->disabled); 1398 preempt_enable_notrace(); 1399 unpause_graph_tracing(); 1400 1401 return len; 1402 } 1403 EXPORT_SYMBOL_GPL(trace_vbprintk); 1404 1405 int trace_array_printk(struct trace_array *tr, 1406 unsigned long ip, const char *fmt, ...) 1407 { 1408 int ret; 1409 va_list ap; 1410 1411 if (!(trace_flags & TRACE_ITER_PRINTK)) 1412 return 0; 1413 1414 va_start(ap, fmt); 1415 ret = trace_array_vprintk(tr, ip, fmt, ap); 1416 va_end(ap); 1417 return ret; 1418 } 1419 1420 int trace_array_vprintk(struct trace_array *tr, 1421 unsigned long ip, const char *fmt, va_list args) 1422 { 1423 static arch_spinlock_t trace_buf_lock = __ARCH_SPIN_LOCK_UNLOCKED; 1424 static char trace_buf[TRACE_BUF_SIZE]; 1425 1426 struct ftrace_event_call *call = &event_print; 1427 struct ring_buffer_event *event; 1428 struct ring_buffer *buffer; 1429 struct trace_array_cpu *data; 1430 int cpu, len = 0, size, pc; 1431 struct print_entry *entry; 1432 unsigned long irq_flags; 1433 int disable; 1434 1435 if (tracing_disabled || tracing_selftest_running) 1436 return 0; 1437 1438 pc = preempt_count(); 1439 preempt_disable_notrace(); 1440 cpu = raw_smp_processor_id(); 1441 data = tr->data[cpu]; 1442 1443 disable = atomic_inc_return(&data->disabled); 1444 if (unlikely(disable != 1)) 1445 goto out; 1446 1447 pause_graph_tracing(); 1448 raw_local_irq_save(irq_flags); 1449 arch_spin_lock(&trace_buf_lock); 1450 len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args); 1451 1452 size = sizeof(*entry) + len + 1; 1453 buffer = tr->buffer; 1454 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size, 1455 irq_flags, pc); 1456 if (!event) 1457 goto out_unlock; 1458 entry = ring_buffer_event_data(event); 1459 entry->ip = ip; 1460 1461 memcpy(&entry->buf, trace_buf, len); 1462 entry->buf[len] = '\0'; 1463 if (!filter_check_discard(call, entry, buffer, event)) { 1464 ring_buffer_unlock_commit(buffer, event); 1465 ftrace_trace_stack(buffer, irq_flags, 6, pc); 1466 } 1467 1468 out_unlock: 1469 arch_spin_unlock(&trace_buf_lock); 1470 raw_local_irq_restore(irq_flags); 1471 unpause_graph_tracing(); 1472 out: 1473 atomic_dec_return(&data->disabled); 1474 preempt_enable_notrace(); 1475 1476 return len; 1477 } 1478 1479 int trace_vprintk(unsigned long ip, const char *fmt, va_list args) 1480 { 1481 return trace_array_vprintk(&global_trace, ip, fmt, args); 1482 } 1483 EXPORT_SYMBOL_GPL(trace_vprintk); 1484 1485 static void trace_iterator_increment(struct trace_iterator *iter) 1486 { 1487 /* Don't allow ftrace to trace into the ring buffers */ 1488 ftrace_disable_cpu(); 1489 1490 iter->idx++; 1491 if (iter->buffer_iter[iter->cpu]) 1492 ring_buffer_read(iter->buffer_iter[iter->cpu], NULL); 1493 1494 ftrace_enable_cpu(); 1495 } 1496 1497 static struct trace_entry * 1498 peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts, 1499 unsigned long *lost_events) 1500 { 1501 struct ring_buffer_event *event; 1502 struct ring_buffer_iter *buf_iter = iter->buffer_iter[cpu]; 1503 1504 /* Don't allow ftrace to trace into the ring buffers */ 1505 ftrace_disable_cpu(); 1506 1507 if (buf_iter) 1508 event = ring_buffer_iter_peek(buf_iter, ts); 1509 else 1510 event = ring_buffer_peek(iter->tr->buffer, cpu, ts, 1511 lost_events); 1512 1513 ftrace_enable_cpu(); 1514 1515 return event ? ring_buffer_event_data(event) : NULL; 1516 } 1517 1518 static struct trace_entry * 1519 __find_next_entry(struct trace_iterator *iter, int *ent_cpu, 1520 unsigned long *missing_events, u64 *ent_ts) 1521 { 1522 struct ring_buffer *buffer = iter->tr->buffer; 1523 struct trace_entry *ent, *next = NULL; 1524 unsigned long lost_events = 0, next_lost = 0; 1525 int cpu_file = iter->cpu_file; 1526 u64 next_ts = 0, ts; 1527 int next_cpu = -1; 1528 int cpu; 1529 1530 /* 1531 * If we are in a per_cpu trace file, don't bother by iterating over 1532 * all cpu and peek directly. 1533 */ 1534 if (cpu_file > TRACE_PIPE_ALL_CPU) { 1535 if (ring_buffer_empty_cpu(buffer, cpu_file)) 1536 return NULL; 1537 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events); 1538 if (ent_cpu) 1539 *ent_cpu = cpu_file; 1540 1541 return ent; 1542 } 1543 1544 for_each_tracing_cpu(cpu) { 1545 1546 if (ring_buffer_empty_cpu(buffer, cpu)) 1547 continue; 1548 1549 ent = peek_next_entry(iter, cpu, &ts, &lost_events); 1550 1551 /* 1552 * Pick the entry with the smallest timestamp: 1553 */ 1554 if (ent && (!next || ts < next_ts)) { 1555 next = ent; 1556 next_cpu = cpu; 1557 next_ts = ts; 1558 next_lost = lost_events; 1559 } 1560 } 1561 1562 if (ent_cpu) 1563 *ent_cpu = next_cpu; 1564 1565 if (ent_ts) 1566 *ent_ts = next_ts; 1567 1568 if (missing_events) 1569 *missing_events = next_lost; 1570 1571 return next; 1572 } 1573 1574 /* Find the next real entry, without updating the iterator itself */ 1575 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, 1576 int *ent_cpu, u64 *ent_ts) 1577 { 1578 return __find_next_entry(iter, ent_cpu, NULL, ent_ts); 1579 } 1580 1581 /* Find the next real entry, and increment the iterator to the next entry */ 1582 void *trace_find_next_entry_inc(struct trace_iterator *iter) 1583 { 1584 iter->ent = __find_next_entry(iter, &iter->cpu, 1585 &iter->lost_events, &iter->ts); 1586 1587 if (iter->ent) 1588 trace_iterator_increment(iter); 1589 1590 return iter->ent ? iter : NULL; 1591 } 1592 1593 static void trace_consume(struct trace_iterator *iter) 1594 { 1595 /* Don't allow ftrace to trace into the ring buffers */ 1596 ftrace_disable_cpu(); 1597 ring_buffer_consume(iter->tr->buffer, iter->cpu, &iter->ts, 1598 &iter->lost_events); 1599 ftrace_enable_cpu(); 1600 } 1601 1602 static void *s_next(struct seq_file *m, void *v, loff_t *pos) 1603 { 1604 struct trace_iterator *iter = m->private; 1605 int i = (int)*pos; 1606 void *ent; 1607 1608 WARN_ON_ONCE(iter->leftover); 1609 1610 (*pos)++; 1611 1612 /* can't go backwards */ 1613 if (iter->idx > i) 1614 return NULL; 1615 1616 if (iter->idx < 0) 1617 ent = trace_find_next_entry_inc(iter); 1618 else 1619 ent = iter; 1620 1621 while (ent && iter->idx < i) 1622 ent = trace_find_next_entry_inc(iter); 1623 1624 iter->pos = *pos; 1625 1626 return ent; 1627 } 1628 1629 void tracing_iter_reset(struct trace_iterator *iter, int cpu) 1630 { 1631 struct trace_array *tr = iter->tr; 1632 struct ring_buffer_event *event; 1633 struct ring_buffer_iter *buf_iter; 1634 unsigned long entries = 0; 1635 u64 ts; 1636 1637 tr->data[cpu]->skipped_entries = 0; 1638 1639 if (!iter->buffer_iter[cpu]) 1640 return; 1641 1642 buf_iter = iter->buffer_iter[cpu]; 1643 ring_buffer_iter_reset(buf_iter); 1644 1645 /* 1646 * We could have the case with the max latency tracers 1647 * that a reset never took place on a cpu. This is evident 1648 * by the timestamp being before the start of the buffer. 1649 */ 1650 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) { 1651 if (ts >= iter->tr->time_start) 1652 break; 1653 entries++; 1654 ring_buffer_read(buf_iter, NULL); 1655 } 1656 1657 tr->data[cpu]->skipped_entries = entries; 1658 } 1659 1660 /* 1661 * The current tracer is copied to avoid a global locking 1662 * all around. 1663 */ 1664 static void *s_start(struct seq_file *m, loff_t *pos) 1665 { 1666 struct trace_iterator *iter = m->private; 1667 static struct tracer *old_tracer; 1668 int cpu_file = iter->cpu_file; 1669 void *p = NULL; 1670 loff_t l = 0; 1671 int cpu; 1672 1673 /* copy the tracer to avoid using a global lock all around */ 1674 mutex_lock(&trace_types_lock); 1675 if (unlikely(old_tracer != current_trace && current_trace)) { 1676 old_tracer = current_trace; 1677 *iter->trace = *current_trace; 1678 } 1679 mutex_unlock(&trace_types_lock); 1680 1681 atomic_inc(&trace_record_cmdline_disabled); 1682 1683 if (*pos != iter->pos) { 1684 iter->ent = NULL; 1685 iter->cpu = 0; 1686 iter->idx = -1; 1687 1688 ftrace_disable_cpu(); 1689 1690 if (cpu_file == TRACE_PIPE_ALL_CPU) { 1691 for_each_tracing_cpu(cpu) 1692 tracing_iter_reset(iter, cpu); 1693 } else 1694 tracing_iter_reset(iter, cpu_file); 1695 1696 ftrace_enable_cpu(); 1697 1698 iter->leftover = 0; 1699 for (p = iter; p && l < *pos; p = s_next(m, p, &l)) 1700 ; 1701 1702 } else { 1703 /* 1704 * If we overflowed the seq_file before, then we want 1705 * to just reuse the trace_seq buffer again. 1706 */ 1707 if (iter->leftover) 1708 p = iter; 1709 else { 1710 l = *pos - 1; 1711 p = s_next(m, p, &l); 1712 } 1713 } 1714 1715 trace_event_read_lock(); 1716 trace_access_lock(cpu_file); 1717 return p; 1718 } 1719 1720 static void s_stop(struct seq_file *m, void *p) 1721 { 1722 struct trace_iterator *iter = m->private; 1723 1724 atomic_dec(&trace_record_cmdline_disabled); 1725 trace_access_unlock(iter->cpu_file); 1726 trace_event_read_unlock(); 1727 } 1728 1729 static void print_lat_help_header(struct seq_file *m) 1730 { 1731 seq_puts(m, "# _------=> CPU# \n"); 1732 seq_puts(m, "# / _-----=> irqs-off \n"); 1733 seq_puts(m, "# | / _----=> need-resched \n"); 1734 seq_puts(m, "# || / _---=> hardirq/softirq \n"); 1735 seq_puts(m, "# ||| / _--=> preempt-depth \n"); 1736 seq_puts(m, "# |||| /_--=> lock-depth \n"); 1737 seq_puts(m, "# |||||/ delay \n"); 1738 seq_puts(m, "# cmd pid |||||| time | caller \n"); 1739 seq_puts(m, "# \\ / |||||| \\ | / \n"); 1740 } 1741 1742 static void print_func_help_header(struct seq_file *m) 1743 { 1744 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n"); 1745 seq_puts(m, "# | | | | |\n"); 1746 } 1747 1748 1749 void 1750 print_trace_header(struct seq_file *m, struct trace_iterator *iter) 1751 { 1752 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); 1753 struct trace_array *tr = iter->tr; 1754 struct trace_array_cpu *data = tr->data[tr->cpu]; 1755 struct tracer *type = current_trace; 1756 unsigned long entries = 0; 1757 unsigned long total = 0; 1758 unsigned long count; 1759 const char *name = "preemption"; 1760 int cpu; 1761 1762 if (type) 1763 name = type->name; 1764 1765 1766 for_each_tracing_cpu(cpu) { 1767 count = ring_buffer_entries_cpu(tr->buffer, cpu); 1768 /* 1769 * If this buffer has skipped entries, then we hold all 1770 * entries for the trace and we need to ignore the 1771 * ones before the time stamp. 1772 */ 1773 if (tr->data[cpu]->skipped_entries) { 1774 count -= tr->data[cpu]->skipped_entries; 1775 /* total is the same as the entries */ 1776 total += count; 1777 } else 1778 total += count + 1779 ring_buffer_overrun_cpu(tr->buffer, cpu); 1780 entries += count; 1781 } 1782 1783 seq_printf(m, "# %s latency trace v1.1.5 on %s\n", 1784 name, UTS_RELEASE); 1785 seq_puts(m, "# -----------------------------------" 1786 "---------------------------------\n"); 1787 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |" 1788 " (M:%s VP:%d, KP:%d, SP:%d HP:%d", 1789 nsecs_to_usecs(data->saved_latency), 1790 entries, 1791 total, 1792 tr->cpu, 1793 #if defined(CONFIG_PREEMPT_NONE) 1794 "server", 1795 #elif defined(CONFIG_PREEMPT_VOLUNTARY) 1796 "desktop", 1797 #elif defined(CONFIG_PREEMPT) 1798 "preempt", 1799 #else 1800 "unknown", 1801 #endif 1802 /* These are reserved for later use */ 1803 0, 0, 0, 0); 1804 #ifdef CONFIG_SMP 1805 seq_printf(m, " #P:%d)\n", num_online_cpus()); 1806 #else 1807 seq_puts(m, ")\n"); 1808 #endif 1809 seq_puts(m, "# -----------------\n"); 1810 seq_printf(m, "# | task: %.16s-%d " 1811 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n", 1812 data->comm, data->pid, data->uid, data->nice, 1813 data->policy, data->rt_priority); 1814 seq_puts(m, "# -----------------\n"); 1815 1816 if (data->critical_start) { 1817 seq_puts(m, "# => started at: "); 1818 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags); 1819 trace_print_seq(m, &iter->seq); 1820 seq_puts(m, "\n# => ended at: "); 1821 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags); 1822 trace_print_seq(m, &iter->seq); 1823 seq_puts(m, "\n#\n"); 1824 } 1825 1826 seq_puts(m, "#\n"); 1827 } 1828 1829 static void test_cpu_buff_start(struct trace_iterator *iter) 1830 { 1831 struct trace_seq *s = &iter->seq; 1832 1833 if (!(trace_flags & TRACE_ITER_ANNOTATE)) 1834 return; 1835 1836 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE)) 1837 return; 1838 1839 if (cpumask_test_cpu(iter->cpu, iter->started)) 1840 return; 1841 1842 if (iter->tr->data[iter->cpu]->skipped_entries) 1843 return; 1844 1845 cpumask_set_cpu(iter->cpu, iter->started); 1846 1847 /* Don't print started cpu buffer for the first entry of the trace */ 1848 if (iter->idx > 1) 1849 trace_seq_printf(s, "##### CPU %u buffer started ####\n", 1850 iter->cpu); 1851 } 1852 1853 static enum print_line_t print_trace_fmt(struct trace_iterator *iter) 1854 { 1855 struct trace_seq *s = &iter->seq; 1856 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); 1857 struct trace_entry *entry; 1858 struct trace_event *event; 1859 1860 entry = iter->ent; 1861 1862 test_cpu_buff_start(iter); 1863 1864 event = ftrace_find_event(entry->type); 1865 1866 if (trace_flags & TRACE_ITER_CONTEXT_INFO) { 1867 if (iter->iter_flags & TRACE_FILE_LAT_FMT) { 1868 if (!trace_print_lat_context(iter)) 1869 goto partial; 1870 } else { 1871 if (!trace_print_context(iter)) 1872 goto partial; 1873 } 1874 } 1875 1876 if (event) 1877 return event->funcs->trace(iter, sym_flags, event); 1878 1879 if (!trace_seq_printf(s, "Unknown type %d\n", entry->type)) 1880 goto partial; 1881 1882 return TRACE_TYPE_HANDLED; 1883 partial: 1884 return TRACE_TYPE_PARTIAL_LINE; 1885 } 1886 1887 static enum print_line_t print_raw_fmt(struct trace_iterator *iter) 1888 { 1889 struct trace_seq *s = &iter->seq; 1890 struct trace_entry *entry; 1891 struct trace_event *event; 1892 1893 entry = iter->ent; 1894 1895 if (trace_flags & TRACE_ITER_CONTEXT_INFO) { 1896 if (!trace_seq_printf(s, "%d %d %llu ", 1897 entry->pid, iter->cpu, iter->ts)) 1898 goto partial; 1899 } 1900 1901 event = ftrace_find_event(entry->type); 1902 if (event) 1903 return event->funcs->raw(iter, 0, event); 1904 1905 if (!trace_seq_printf(s, "%d ?\n", entry->type)) 1906 goto partial; 1907 1908 return TRACE_TYPE_HANDLED; 1909 partial: 1910 return TRACE_TYPE_PARTIAL_LINE; 1911 } 1912 1913 static enum print_line_t print_hex_fmt(struct trace_iterator *iter) 1914 { 1915 struct trace_seq *s = &iter->seq; 1916 unsigned char newline = '\n'; 1917 struct trace_entry *entry; 1918 struct trace_event *event; 1919 1920 entry = iter->ent; 1921 1922 if (trace_flags & TRACE_ITER_CONTEXT_INFO) { 1923 SEQ_PUT_HEX_FIELD_RET(s, entry->pid); 1924 SEQ_PUT_HEX_FIELD_RET(s, iter->cpu); 1925 SEQ_PUT_HEX_FIELD_RET(s, iter->ts); 1926 } 1927 1928 event = ftrace_find_event(entry->type); 1929 if (event) { 1930 enum print_line_t ret = event->funcs->hex(iter, 0, event); 1931 if (ret != TRACE_TYPE_HANDLED) 1932 return ret; 1933 } 1934 1935 SEQ_PUT_FIELD_RET(s, newline); 1936 1937 return TRACE_TYPE_HANDLED; 1938 } 1939 1940 static enum print_line_t print_bin_fmt(struct trace_iterator *iter) 1941 { 1942 struct trace_seq *s = &iter->seq; 1943 struct trace_entry *entry; 1944 struct trace_event *event; 1945 1946 entry = iter->ent; 1947 1948 if (trace_flags & TRACE_ITER_CONTEXT_INFO) { 1949 SEQ_PUT_FIELD_RET(s, entry->pid); 1950 SEQ_PUT_FIELD_RET(s, iter->cpu); 1951 SEQ_PUT_FIELD_RET(s, iter->ts); 1952 } 1953 1954 event = ftrace_find_event(entry->type); 1955 return event ? event->funcs->binary(iter, 0, event) : 1956 TRACE_TYPE_HANDLED; 1957 } 1958 1959 int trace_empty(struct trace_iterator *iter) 1960 { 1961 int cpu; 1962 1963 /* If we are looking at one CPU buffer, only check that one */ 1964 if (iter->cpu_file != TRACE_PIPE_ALL_CPU) { 1965 cpu = iter->cpu_file; 1966 if (iter->buffer_iter[cpu]) { 1967 if (!ring_buffer_iter_empty(iter->buffer_iter[cpu])) 1968 return 0; 1969 } else { 1970 if (!ring_buffer_empty_cpu(iter->tr->buffer, cpu)) 1971 return 0; 1972 } 1973 return 1; 1974 } 1975 1976 for_each_tracing_cpu(cpu) { 1977 if (iter->buffer_iter[cpu]) { 1978 if (!ring_buffer_iter_empty(iter->buffer_iter[cpu])) 1979 return 0; 1980 } else { 1981 if (!ring_buffer_empty_cpu(iter->tr->buffer, cpu)) 1982 return 0; 1983 } 1984 } 1985 1986 return 1; 1987 } 1988 1989 /* Called with trace_event_read_lock() held. */ 1990 enum print_line_t print_trace_line(struct trace_iterator *iter) 1991 { 1992 enum print_line_t ret; 1993 1994 if (iter->lost_events) 1995 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n", 1996 iter->cpu, iter->lost_events); 1997 1998 if (iter->trace && iter->trace->print_line) { 1999 ret = iter->trace->print_line(iter); 2000 if (ret != TRACE_TYPE_UNHANDLED) 2001 return ret; 2002 } 2003 2004 if (iter->ent->type == TRACE_BPRINT && 2005 trace_flags & TRACE_ITER_PRINTK && 2006 trace_flags & TRACE_ITER_PRINTK_MSGONLY) 2007 return trace_print_bprintk_msg_only(iter); 2008 2009 if (iter->ent->type == TRACE_PRINT && 2010 trace_flags & TRACE_ITER_PRINTK && 2011 trace_flags & TRACE_ITER_PRINTK_MSGONLY) 2012 return trace_print_printk_msg_only(iter); 2013 2014 if (trace_flags & TRACE_ITER_BIN) 2015 return print_bin_fmt(iter); 2016 2017 if (trace_flags & TRACE_ITER_HEX) 2018 return print_hex_fmt(iter); 2019 2020 if (trace_flags & TRACE_ITER_RAW) 2021 return print_raw_fmt(iter); 2022 2023 return print_trace_fmt(iter); 2024 } 2025 2026 void trace_default_header(struct seq_file *m) 2027 { 2028 struct trace_iterator *iter = m->private; 2029 2030 if (iter->iter_flags & TRACE_FILE_LAT_FMT) { 2031 /* print nothing if the buffers are empty */ 2032 if (trace_empty(iter)) 2033 return; 2034 print_trace_header(m, iter); 2035 if (!(trace_flags & TRACE_ITER_VERBOSE)) 2036 print_lat_help_header(m); 2037 } else { 2038 if (!(trace_flags & TRACE_ITER_VERBOSE)) 2039 print_func_help_header(m); 2040 } 2041 } 2042 2043 static int s_show(struct seq_file *m, void *v) 2044 { 2045 struct trace_iterator *iter = v; 2046 int ret; 2047 2048 if (iter->ent == NULL) { 2049 if (iter->tr) { 2050 seq_printf(m, "# tracer: %s\n", iter->trace->name); 2051 seq_puts(m, "#\n"); 2052 } 2053 if (iter->trace && iter->trace->print_header) 2054 iter->trace->print_header(m); 2055 else 2056 trace_default_header(m); 2057 2058 } else if (iter->leftover) { 2059 /* 2060 * If we filled the seq_file buffer earlier, we 2061 * want to just show it now. 2062 */ 2063 ret = trace_print_seq(m, &iter->seq); 2064 2065 /* ret should this time be zero, but you never know */ 2066 iter->leftover = ret; 2067 2068 } else { 2069 print_trace_line(iter); 2070 ret = trace_print_seq(m, &iter->seq); 2071 /* 2072 * If we overflow the seq_file buffer, then it will 2073 * ask us for this data again at start up. 2074 * Use that instead. 2075 * ret is 0 if seq_file write succeeded. 2076 * -1 otherwise. 2077 */ 2078 iter->leftover = ret; 2079 } 2080 2081 return 0; 2082 } 2083 2084 static const struct seq_operations tracer_seq_ops = { 2085 .start = s_start, 2086 .next = s_next, 2087 .stop = s_stop, 2088 .show = s_show, 2089 }; 2090 2091 static struct trace_iterator * 2092 __tracing_open(struct inode *inode, struct file *file) 2093 { 2094 long cpu_file = (long) inode->i_private; 2095 void *fail_ret = ERR_PTR(-ENOMEM); 2096 struct trace_iterator *iter; 2097 struct seq_file *m; 2098 int cpu, ret; 2099 2100 if (tracing_disabled) 2101 return ERR_PTR(-ENODEV); 2102 2103 iter = kzalloc(sizeof(*iter), GFP_KERNEL); 2104 if (!iter) 2105 return ERR_PTR(-ENOMEM); 2106 2107 /* 2108 * We make a copy of the current tracer to avoid concurrent 2109 * changes on it while we are reading. 2110 */ 2111 mutex_lock(&trace_types_lock); 2112 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL); 2113 if (!iter->trace) 2114 goto fail; 2115 2116 if (current_trace) 2117 *iter->trace = *current_trace; 2118 2119 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL)) 2120 goto fail; 2121 2122 if (current_trace && current_trace->print_max) 2123 iter->tr = &max_tr; 2124 else 2125 iter->tr = &global_trace; 2126 iter->pos = -1; 2127 mutex_init(&iter->mutex); 2128 iter->cpu_file = cpu_file; 2129 2130 /* Notify the tracer early; before we stop tracing. */ 2131 if (iter->trace && iter->trace->open) 2132 iter->trace->open(iter); 2133 2134 /* Annotate start of buffers if we had overruns */ 2135 if (ring_buffer_overruns(iter->tr->buffer)) 2136 iter->iter_flags |= TRACE_FILE_ANNOTATE; 2137 2138 /* stop the trace while dumping */ 2139 tracing_stop(); 2140 2141 if (iter->cpu_file == TRACE_PIPE_ALL_CPU) { 2142 for_each_tracing_cpu(cpu) { 2143 iter->buffer_iter[cpu] = 2144 ring_buffer_read_prepare(iter->tr->buffer, cpu); 2145 } 2146 ring_buffer_read_prepare_sync(); 2147 for_each_tracing_cpu(cpu) { 2148 ring_buffer_read_start(iter->buffer_iter[cpu]); 2149 tracing_iter_reset(iter, cpu); 2150 } 2151 } else { 2152 cpu = iter->cpu_file; 2153 iter->buffer_iter[cpu] = 2154 ring_buffer_read_prepare(iter->tr->buffer, cpu); 2155 ring_buffer_read_prepare_sync(); 2156 ring_buffer_read_start(iter->buffer_iter[cpu]); 2157 tracing_iter_reset(iter, cpu); 2158 } 2159 2160 ret = seq_open(file, &tracer_seq_ops); 2161 if (ret < 0) { 2162 fail_ret = ERR_PTR(ret); 2163 goto fail_buffer; 2164 } 2165 2166 m = file->private_data; 2167 m->private = iter; 2168 2169 mutex_unlock(&trace_types_lock); 2170 2171 return iter; 2172 2173 fail_buffer: 2174 for_each_tracing_cpu(cpu) { 2175 if (iter->buffer_iter[cpu]) 2176 ring_buffer_read_finish(iter->buffer_iter[cpu]); 2177 } 2178 free_cpumask_var(iter->started); 2179 tracing_start(); 2180 fail: 2181 mutex_unlock(&trace_types_lock); 2182 kfree(iter->trace); 2183 kfree(iter); 2184 2185 return fail_ret; 2186 } 2187 2188 int tracing_open_generic(struct inode *inode, struct file *filp) 2189 { 2190 if (tracing_disabled) 2191 return -ENODEV; 2192 2193 filp->private_data = inode->i_private; 2194 return 0; 2195 } 2196 2197 static int tracing_release(struct inode *inode, struct file *file) 2198 { 2199 struct seq_file *m = (struct seq_file *)file->private_data; 2200 struct trace_iterator *iter; 2201 int cpu; 2202 2203 if (!(file->f_mode & FMODE_READ)) 2204 return 0; 2205 2206 iter = m->private; 2207 2208 mutex_lock(&trace_types_lock); 2209 for_each_tracing_cpu(cpu) { 2210 if (iter->buffer_iter[cpu]) 2211 ring_buffer_read_finish(iter->buffer_iter[cpu]); 2212 } 2213 2214 if (iter->trace && iter->trace->close) 2215 iter->trace->close(iter); 2216 2217 /* reenable tracing if it was previously enabled */ 2218 tracing_start(); 2219 mutex_unlock(&trace_types_lock); 2220 2221 seq_release(inode, file); 2222 mutex_destroy(&iter->mutex); 2223 free_cpumask_var(iter->started); 2224 kfree(iter->trace); 2225 kfree(iter); 2226 return 0; 2227 } 2228 2229 static int tracing_open(struct inode *inode, struct file *file) 2230 { 2231 struct trace_iterator *iter; 2232 int ret = 0; 2233 2234 /* If this file was open for write, then erase contents */ 2235 if ((file->f_mode & FMODE_WRITE) && 2236 (file->f_flags & O_TRUNC)) { 2237 long cpu = (long) inode->i_private; 2238 2239 if (cpu == TRACE_PIPE_ALL_CPU) 2240 tracing_reset_online_cpus(&global_trace); 2241 else 2242 tracing_reset(&global_trace, cpu); 2243 } 2244 2245 if (file->f_mode & FMODE_READ) { 2246 iter = __tracing_open(inode, file); 2247 if (IS_ERR(iter)) 2248 ret = PTR_ERR(iter); 2249 else if (trace_flags & TRACE_ITER_LATENCY_FMT) 2250 iter->iter_flags |= TRACE_FILE_LAT_FMT; 2251 } 2252 return ret; 2253 } 2254 2255 static void * 2256 t_next(struct seq_file *m, void *v, loff_t *pos) 2257 { 2258 struct tracer *t = v; 2259 2260 (*pos)++; 2261 2262 if (t) 2263 t = t->next; 2264 2265 return t; 2266 } 2267 2268 static void *t_start(struct seq_file *m, loff_t *pos) 2269 { 2270 struct tracer *t; 2271 loff_t l = 0; 2272 2273 mutex_lock(&trace_types_lock); 2274 for (t = trace_types; t && l < *pos; t = t_next(m, t, &l)) 2275 ; 2276 2277 return t; 2278 } 2279 2280 static void t_stop(struct seq_file *m, void *p) 2281 { 2282 mutex_unlock(&trace_types_lock); 2283 } 2284 2285 static int t_show(struct seq_file *m, void *v) 2286 { 2287 struct tracer *t = v; 2288 2289 if (!t) 2290 return 0; 2291 2292 seq_printf(m, "%s", t->name); 2293 if (t->next) 2294 seq_putc(m, ' '); 2295 else 2296 seq_putc(m, '\n'); 2297 2298 return 0; 2299 } 2300 2301 static const struct seq_operations show_traces_seq_ops = { 2302 .start = t_start, 2303 .next = t_next, 2304 .stop = t_stop, 2305 .show = t_show, 2306 }; 2307 2308 static int show_traces_open(struct inode *inode, struct file *file) 2309 { 2310 if (tracing_disabled) 2311 return -ENODEV; 2312 2313 return seq_open(file, &show_traces_seq_ops); 2314 } 2315 2316 static ssize_t 2317 tracing_write_stub(struct file *filp, const char __user *ubuf, 2318 size_t count, loff_t *ppos) 2319 { 2320 return count; 2321 } 2322 2323 static const struct file_operations tracing_fops = { 2324 .open = tracing_open, 2325 .read = seq_read, 2326 .write = tracing_write_stub, 2327 .llseek = seq_lseek, 2328 .release = tracing_release, 2329 }; 2330 2331 static const struct file_operations show_traces_fops = { 2332 .open = show_traces_open, 2333 .read = seq_read, 2334 .release = seq_release, 2335 .llseek = seq_lseek, 2336 }; 2337 2338 /* 2339 * Only trace on a CPU if the bitmask is set: 2340 */ 2341 static cpumask_var_t tracing_cpumask; 2342 2343 /* 2344 * The tracer itself will not take this lock, but still we want 2345 * to provide a consistent cpumask to user-space: 2346 */ 2347 static DEFINE_MUTEX(tracing_cpumask_update_lock); 2348 2349 /* 2350 * Temporary storage for the character representation of the 2351 * CPU bitmask (and one more byte for the newline): 2352 */ 2353 static char mask_str[NR_CPUS + 1]; 2354 2355 static ssize_t 2356 tracing_cpumask_read(struct file *filp, char __user *ubuf, 2357 size_t count, loff_t *ppos) 2358 { 2359 int len; 2360 2361 mutex_lock(&tracing_cpumask_update_lock); 2362 2363 len = cpumask_scnprintf(mask_str, count, tracing_cpumask); 2364 if (count - len < 2) { 2365 count = -EINVAL; 2366 goto out_err; 2367 } 2368 len += sprintf(mask_str + len, "\n"); 2369 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1); 2370 2371 out_err: 2372 mutex_unlock(&tracing_cpumask_update_lock); 2373 2374 return count; 2375 } 2376 2377 static ssize_t 2378 tracing_cpumask_write(struct file *filp, const char __user *ubuf, 2379 size_t count, loff_t *ppos) 2380 { 2381 int err, cpu; 2382 cpumask_var_t tracing_cpumask_new; 2383 2384 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL)) 2385 return -ENOMEM; 2386 2387 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new); 2388 if (err) 2389 goto err_unlock; 2390 2391 mutex_lock(&tracing_cpumask_update_lock); 2392 2393 local_irq_disable(); 2394 arch_spin_lock(&ftrace_max_lock); 2395 for_each_tracing_cpu(cpu) { 2396 /* 2397 * Increase/decrease the disabled counter if we are 2398 * about to flip a bit in the cpumask: 2399 */ 2400 if (cpumask_test_cpu(cpu, tracing_cpumask) && 2401 !cpumask_test_cpu(cpu, tracing_cpumask_new)) { 2402 atomic_inc(&global_trace.data[cpu]->disabled); 2403 } 2404 if (!cpumask_test_cpu(cpu, tracing_cpumask) && 2405 cpumask_test_cpu(cpu, tracing_cpumask_new)) { 2406 atomic_dec(&global_trace.data[cpu]->disabled); 2407 } 2408 } 2409 arch_spin_unlock(&ftrace_max_lock); 2410 local_irq_enable(); 2411 2412 cpumask_copy(tracing_cpumask, tracing_cpumask_new); 2413 2414 mutex_unlock(&tracing_cpumask_update_lock); 2415 free_cpumask_var(tracing_cpumask_new); 2416 2417 return count; 2418 2419 err_unlock: 2420 free_cpumask_var(tracing_cpumask_new); 2421 2422 return err; 2423 } 2424 2425 static const struct file_operations tracing_cpumask_fops = { 2426 .open = tracing_open_generic, 2427 .read = tracing_cpumask_read, 2428 .write = tracing_cpumask_write, 2429 .llseek = generic_file_llseek, 2430 }; 2431 2432 static int tracing_trace_options_show(struct seq_file *m, void *v) 2433 { 2434 struct tracer_opt *trace_opts; 2435 u32 tracer_flags; 2436 int i; 2437 2438 mutex_lock(&trace_types_lock); 2439 tracer_flags = current_trace->flags->val; 2440 trace_opts = current_trace->flags->opts; 2441 2442 for (i = 0; trace_options[i]; i++) { 2443 if (trace_flags & (1 << i)) 2444 seq_printf(m, "%s\n", trace_options[i]); 2445 else 2446 seq_printf(m, "no%s\n", trace_options[i]); 2447 } 2448 2449 for (i = 0; trace_opts[i].name; i++) { 2450 if (tracer_flags & trace_opts[i].bit) 2451 seq_printf(m, "%s\n", trace_opts[i].name); 2452 else 2453 seq_printf(m, "no%s\n", trace_opts[i].name); 2454 } 2455 mutex_unlock(&trace_types_lock); 2456 2457 return 0; 2458 } 2459 2460 static int __set_tracer_option(struct tracer *trace, 2461 struct tracer_flags *tracer_flags, 2462 struct tracer_opt *opts, int neg) 2463 { 2464 int ret; 2465 2466 ret = trace->set_flag(tracer_flags->val, opts->bit, !neg); 2467 if (ret) 2468 return ret; 2469 2470 if (neg) 2471 tracer_flags->val &= ~opts->bit; 2472 else 2473 tracer_flags->val |= opts->bit; 2474 return 0; 2475 } 2476 2477 /* Try to assign a tracer specific option */ 2478 static int set_tracer_option(struct tracer *trace, char *cmp, int neg) 2479 { 2480 struct tracer_flags *tracer_flags = trace->flags; 2481 struct tracer_opt *opts = NULL; 2482 int i; 2483 2484 for (i = 0; tracer_flags->opts[i].name; i++) { 2485 opts = &tracer_flags->opts[i]; 2486 2487 if (strcmp(cmp, opts->name) == 0) 2488 return __set_tracer_option(trace, trace->flags, 2489 opts, neg); 2490 } 2491 2492 return -EINVAL; 2493 } 2494 2495 static void set_tracer_flags(unsigned int mask, int enabled) 2496 { 2497 /* do nothing if flag is already set */ 2498 if (!!(trace_flags & mask) == !!enabled) 2499 return; 2500 2501 if (enabled) 2502 trace_flags |= mask; 2503 else 2504 trace_flags &= ~mask; 2505 2506 if (mask == TRACE_ITER_RECORD_CMD) 2507 trace_event_enable_cmd_record(enabled); 2508 } 2509 2510 static ssize_t 2511 tracing_trace_options_write(struct file *filp, const char __user *ubuf, 2512 size_t cnt, loff_t *ppos) 2513 { 2514 char buf[64]; 2515 char *cmp; 2516 int neg = 0; 2517 int ret; 2518 int i; 2519 2520 if (cnt >= sizeof(buf)) 2521 return -EINVAL; 2522 2523 if (copy_from_user(&buf, ubuf, cnt)) 2524 return -EFAULT; 2525 2526 buf[cnt] = 0; 2527 cmp = strstrip(buf); 2528 2529 if (strncmp(cmp, "no", 2) == 0) { 2530 neg = 1; 2531 cmp += 2; 2532 } 2533 2534 for (i = 0; trace_options[i]; i++) { 2535 if (strcmp(cmp, trace_options[i]) == 0) { 2536 set_tracer_flags(1 << i, !neg); 2537 break; 2538 } 2539 } 2540 2541 /* If no option could be set, test the specific tracer options */ 2542 if (!trace_options[i]) { 2543 mutex_lock(&trace_types_lock); 2544 ret = set_tracer_option(current_trace, cmp, neg); 2545 mutex_unlock(&trace_types_lock); 2546 if (ret) 2547 return ret; 2548 } 2549 2550 *ppos += cnt; 2551 2552 return cnt; 2553 } 2554 2555 static int tracing_trace_options_open(struct inode *inode, struct file *file) 2556 { 2557 if (tracing_disabled) 2558 return -ENODEV; 2559 return single_open(file, tracing_trace_options_show, NULL); 2560 } 2561 2562 static const struct file_operations tracing_iter_fops = { 2563 .open = tracing_trace_options_open, 2564 .read = seq_read, 2565 .llseek = seq_lseek, 2566 .release = single_release, 2567 .write = tracing_trace_options_write, 2568 }; 2569 2570 static const char readme_msg[] = 2571 "tracing mini-HOWTO:\n\n" 2572 "# mount -t debugfs nodev /sys/kernel/debug\n\n" 2573 "# cat /sys/kernel/debug/tracing/available_tracers\n" 2574 "wakeup preemptirqsoff preemptoff irqsoff function sched_switch nop\n\n" 2575 "# cat /sys/kernel/debug/tracing/current_tracer\n" 2576 "nop\n" 2577 "# echo sched_switch > /sys/kernel/debug/tracing/current_tracer\n" 2578 "# cat /sys/kernel/debug/tracing/current_tracer\n" 2579 "sched_switch\n" 2580 "# cat /sys/kernel/debug/tracing/trace_options\n" 2581 "noprint-parent nosym-offset nosym-addr noverbose\n" 2582 "# echo print-parent > /sys/kernel/debug/tracing/trace_options\n" 2583 "# echo 1 > /sys/kernel/debug/tracing/tracing_enabled\n" 2584 "# cat /sys/kernel/debug/tracing/trace > /tmp/trace.txt\n" 2585 "# echo 0 > /sys/kernel/debug/tracing/tracing_enabled\n" 2586 ; 2587 2588 static ssize_t 2589 tracing_readme_read(struct file *filp, char __user *ubuf, 2590 size_t cnt, loff_t *ppos) 2591 { 2592 return simple_read_from_buffer(ubuf, cnt, ppos, 2593 readme_msg, strlen(readme_msg)); 2594 } 2595 2596 static const struct file_operations tracing_readme_fops = { 2597 .open = tracing_open_generic, 2598 .read = tracing_readme_read, 2599 .llseek = generic_file_llseek, 2600 }; 2601 2602 static ssize_t 2603 tracing_saved_cmdlines_read(struct file *file, char __user *ubuf, 2604 size_t cnt, loff_t *ppos) 2605 { 2606 char *buf_comm; 2607 char *file_buf; 2608 char *buf; 2609 int len = 0; 2610 int pid; 2611 int i; 2612 2613 file_buf = kmalloc(SAVED_CMDLINES*(16+TASK_COMM_LEN), GFP_KERNEL); 2614 if (!file_buf) 2615 return -ENOMEM; 2616 2617 buf_comm = kmalloc(TASK_COMM_LEN, GFP_KERNEL); 2618 if (!buf_comm) { 2619 kfree(file_buf); 2620 return -ENOMEM; 2621 } 2622 2623 buf = file_buf; 2624 2625 for (i = 0; i < SAVED_CMDLINES; i++) { 2626 int r; 2627 2628 pid = map_cmdline_to_pid[i]; 2629 if (pid == -1 || pid == NO_CMDLINE_MAP) 2630 continue; 2631 2632 trace_find_cmdline(pid, buf_comm); 2633 r = sprintf(buf, "%d %s\n", pid, buf_comm); 2634 buf += r; 2635 len += r; 2636 } 2637 2638 len = simple_read_from_buffer(ubuf, cnt, ppos, 2639 file_buf, len); 2640 2641 kfree(file_buf); 2642 kfree(buf_comm); 2643 2644 return len; 2645 } 2646 2647 static const struct file_operations tracing_saved_cmdlines_fops = { 2648 .open = tracing_open_generic, 2649 .read = tracing_saved_cmdlines_read, 2650 .llseek = generic_file_llseek, 2651 }; 2652 2653 static ssize_t 2654 tracing_ctrl_read(struct file *filp, char __user *ubuf, 2655 size_t cnt, loff_t *ppos) 2656 { 2657 char buf[64]; 2658 int r; 2659 2660 r = sprintf(buf, "%u\n", tracer_enabled); 2661 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 2662 } 2663 2664 static ssize_t 2665 tracing_ctrl_write(struct file *filp, const char __user *ubuf, 2666 size_t cnt, loff_t *ppos) 2667 { 2668 struct trace_array *tr = filp->private_data; 2669 char buf[64]; 2670 unsigned long val; 2671 int ret; 2672 2673 if (cnt >= sizeof(buf)) 2674 return -EINVAL; 2675 2676 if (copy_from_user(&buf, ubuf, cnt)) 2677 return -EFAULT; 2678 2679 buf[cnt] = 0; 2680 2681 ret = strict_strtoul(buf, 10, &val); 2682 if (ret < 0) 2683 return ret; 2684 2685 val = !!val; 2686 2687 mutex_lock(&trace_types_lock); 2688 if (tracer_enabled ^ val) { 2689 if (val) { 2690 tracer_enabled = 1; 2691 if (current_trace->start) 2692 current_trace->start(tr); 2693 tracing_start(); 2694 } else { 2695 tracer_enabled = 0; 2696 tracing_stop(); 2697 if (current_trace->stop) 2698 current_trace->stop(tr); 2699 } 2700 } 2701 mutex_unlock(&trace_types_lock); 2702 2703 *ppos += cnt; 2704 2705 return cnt; 2706 } 2707 2708 static ssize_t 2709 tracing_set_trace_read(struct file *filp, char __user *ubuf, 2710 size_t cnt, loff_t *ppos) 2711 { 2712 char buf[MAX_TRACER_SIZE+2]; 2713 int r; 2714 2715 mutex_lock(&trace_types_lock); 2716 if (current_trace) 2717 r = sprintf(buf, "%s\n", current_trace->name); 2718 else 2719 r = sprintf(buf, "\n"); 2720 mutex_unlock(&trace_types_lock); 2721 2722 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 2723 } 2724 2725 int tracer_init(struct tracer *t, struct trace_array *tr) 2726 { 2727 tracing_reset_online_cpus(tr); 2728 return t->init(tr); 2729 } 2730 2731 static int tracing_resize_ring_buffer(unsigned long size) 2732 { 2733 int ret; 2734 2735 /* 2736 * If kernel or user changes the size of the ring buffer 2737 * we use the size that was given, and we can forget about 2738 * expanding it later. 2739 */ 2740 ring_buffer_expanded = 1; 2741 2742 ret = ring_buffer_resize(global_trace.buffer, size); 2743 if (ret < 0) 2744 return ret; 2745 2746 if (!current_trace->use_max_tr) 2747 goto out; 2748 2749 ret = ring_buffer_resize(max_tr.buffer, size); 2750 if (ret < 0) { 2751 int r; 2752 2753 r = ring_buffer_resize(global_trace.buffer, 2754 global_trace.entries); 2755 if (r < 0) { 2756 /* 2757 * AARGH! We are left with different 2758 * size max buffer!!!! 2759 * The max buffer is our "snapshot" buffer. 2760 * When a tracer needs a snapshot (one of the 2761 * latency tracers), it swaps the max buffer 2762 * with the saved snap shot. We succeeded to 2763 * update the size of the main buffer, but failed to 2764 * update the size of the max buffer. But when we tried 2765 * to reset the main buffer to the original size, we 2766 * failed there too. This is very unlikely to 2767 * happen, but if it does, warn and kill all 2768 * tracing. 2769 */ 2770 WARN_ON(1); 2771 tracing_disabled = 1; 2772 } 2773 return ret; 2774 } 2775 2776 max_tr.entries = size; 2777 out: 2778 global_trace.entries = size; 2779 2780 return ret; 2781 } 2782 2783 2784 /** 2785 * tracing_update_buffers - used by tracing facility to expand ring buffers 2786 * 2787 * To save on memory when the tracing is never used on a system with it 2788 * configured in. The ring buffers are set to a minimum size. But once 2789 * a user starts to use the tracing facility, then they need to grow 2790 * to their default size. 2791 * 2792 * This function is to be called when a tracer is about to be used. 2793 */ 2794 int tracing_update_buffers(void) 2795 { 2796 int ret = 0; 2797 2798 mutex_lock(&trace_types_lock); 2799 if (!ring_buffer_expanded) 2800 ret = tracing_resize_ring_buffer(trace_buf_size); 2801 mutex_unlock(&trace_types_lock); 2802 2803 return ret; 2804 } 2805 2806 struct trace_option_dentry; 2807 2808 static struct trace_option_dentry * 2809 create_trace_option_files(struct tracer *tracer); 2810 2811 static void 2812 destroy_trace_option_files(struct trace_option_dentry *topts); 2813 2814 static int tracing_set_tracer(const char *buf) 2815 { 2816 static struct trace_option_dentry *topts; 2817 struct trace_array *tr = &global_trace; 2818 struct tracer *t; 2819 int ret = 0; 2820 2821 mutex_lock(&trace_types_lock); 2822 2823 if (!ring_buffer_expanded) { 2824 ret = tracing_resize_ring_buffer(trace_buf_size); 2825 if (ret < 0) 2826 goto out; 2827 ret = 0; 2828 } 2829 2830 for (t = trace_types; t; t = t->next) { 2831 if (strcmp(t->name, buf) == 0) 2832 break; 2833 } 2834 if (!t) { 2835 ret = -EINVAL; 2836 goto out; 2837 } 2838 if (t == current_trace) 2839 goto out; 2840 2841 trace_branch_disable(); 2842 if (current_trace && current_trace->reset) 2843 current_trace->reset(tr); 2844 if (current_trace && current_trace->use_max_tr) { 2845 /* 2846 * We don't free the ring buffer. instead, resize it because 2847 * The max_tr ring buffer has some state (e.g. ring->clock) and 2848 * we want preserve it. 2849 */ 2850 ring_buffer_resize(max_tr.buffer, 1); 2851 max_tr.entries = 1; 2852 } 2853 destroy_trace_option_files(topts); 2854 2855 current_trace = t; 2856 2857 topts = create_trace_option_files(current_trace); 2858 if (current_trace->use_max_tr) { 2859 ret = ring_buffer_resize(max_tr.buffer, global_trace.entries); 2860 if (ret < 0) 2861 goto out; 2862 max_tr.entries = global_trace.entries; 2863 } 2864 2865 if (t->init) { 2866 ret = tracer_init(t, tr); 2867 if (ret) 2868 goto out; 2869 } 2870 2871 trace_branch_enable(tr); 2872 out: 2873 mutex_unlock(&trace_types_lock); 2874 2875 return ret; 2876 } 2877 2878 static ssize_t 2879 tracing_set_trace_write(struct file *filp, const char __user *ubuf, 2880 size_t cnt, loff_t *ppos) 2881 { 2882 char buf[MAX_TRACER_SIZE+1]; 2883 int i; 2884 size_t ret; 2885 int err; 2886 2887 ret = cnt; 2888 2889 if (cnt > MAX_TRACER_SIZE) 2890 cnt = MAX_TRACER_SIZE; 2891 2892 if (copy_from_user(&buf, ubuf, cnt)) 2893 return -EFAULT; 2894 2895 buf[cnt] = 0; 2896 2897 /* strip ending whitespace. */ 2898 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--) 2899 buf[i] = 0; 2900 2901 err = tracing_set_tracer(buf); 2902 if (err) 2903 return err; 2904 2905 *ppos += ret; 2906 2907 return ret; 2908 } 2909 2910 static ssize_t 2911 tracing_max_lat_read(struct file *filp, char __user *ubuf, 2912 size_t cnt, loff_t *ppos) 2913 { 2914 unsigned long *ptr = filp->private_data; 2915 char buf[64]; 2916 int r; 2917 2918 r = snprintf(buf, sizeof(buf), "%ld\n", 2919 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr)); 2920 if (r > sizeof(buf)) 2921 r = sizeof(buf); 2922 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 2923 } 2924 2925 static ssize_t 2926 tracing_max_lat_write(struct file *filp, const char __user *ubuf, 2927 size_t cnt, loff_t *ppos) 2928 { 2929 unsigned long *ptr = filp->private_data; 2930 char buf[64]; 2931 unsigned long val; 2932 int ret; 2933 2934 if (cnt >= sizeof(buf)) 2935 return -EINVAL; 2936 2937 if (copy_from_user(&buf, ubuf, cnt)) 2938 return -EFAULT; 2939 2940 buf[cnt] = 0; 2941 2942 ret = strict_strtoul(buf, 10, &val); 2943 if (ret < 0) 2944 return ret; 2945 2946 *ptr = val * 1000; 2947 2948 return cnt; 2949 } 2950 2951 static int tracing_open_pipe(struct inode *inode, struct file *filp) 2952 { 2953 long cpu_file = (long) inode->i_private; 2954 struct trace_iterator *iter; 2955 int ret = 0; 2956 2957 if (tracing_disabled) 2958 return -ENODEV; 2959 2960 mutex_lock(&trace_types_lock); 2961 2962 /* create a buffer to store the information to pass to userspace */ 2963 iter = kzalloc(sizeof(*iter), GFP_KERNEL); 2964 if (!iter) { 2965 ret = -ENOMEM; 2966 goto out; 2967 } 2968 2969 /* 2970 * We make a copy of the current tracer to avoid concurrent 2971 * changes on it while we are reading. 2972 */ 2973 iter->trace = kmalloc(sizeof(*iter->trace), GFP_KERNEL); 2974 if (!iter->trace) { 2975 ret = -ENOMEM; 2976 goto fail; 2977 } 2978 if (current_trace) 2979 *iter->trace = *current_trace; 2980 2981 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) { 2982 ret = -ENOMEM; 2983 goto fail; 2984 } 2985 2986 /* trace pipe does not show start of buffer */ 2987 cpumask_setall(iter->started); 2988 2989 if (trace_flags & TRACE_ITER_LATENCY_FMT) 2990 iter->iter_flags |= TRACE_FILE_LAT_FMT; 2991 2992 iter->cpu_file = cpu_file; 2993 iter->tr = &global_trace; 2994 mutex_init(&iter->mutex); 2995 filp->private_data = iter; 2996 2997 if (iter->trace->pipe_open) 2998 iter->trace->pipe_open(iter); 2999 3000 nonseekable_open(inode, filp); 3001 out: 3002 mutex_unlock(&trace_types_lock); 3003 return ret; 3004 3005 fail: 3006 kfree(iter->trace); 3007 kfree(iter); 3008 mutex_unlock(&trace_types_lock); 3009 return ret; 3010 } 3011 3012 static int tracing_release_pipe(struct inode *inode, struct file *file) 3013 { 3014 struct trace_iterator *iter = file->private_data; 3015 3016 mutex_lock(&trace_types_lock); 3017 3018 if (iter->trace->pipe_close) 3019 iter->trace->pipe_close(iter); 3020 3021 mutex_unlock(&trace_types_lock); 3022 3023 free_cpumask_var(iter->started); 3024 mutex_destroy(&iter->mutex); 3025 kfree(iter->trace); 3026 kfree(iter); 3027 3028 return 0; 3029 } 3030 3031 static unsigned int 3032 tracing_poll_pipe(struct file *filp, poll_table *poll_table) 3033 { 3034 struct trace_iterator *iter = filp->private_data; 3035 3036 if (trace_flags & TRACE_ITER_BLOCK) { 3037 /* 3038 * Always select as readable when in blocking mode 3039 */ 3040 return POLLIN | POLLRDNORM; 3041 } else { 3042 if (!trace_empty(iter)) 3043 return POLLIN | POLLRDNORM; 3044 poll_wait(filp, &trace_wait, poll_table); 3045 if (!trace_empty(iter)) 3046 return POLLIN | POLLRDNORM; 3047 3048 return 0; 3049 } 3050 } 3051 3052 3053 void default_wait_pipe(struct trace_iterator *iter) 3054 { 3055 DEFINE_WAIT(wait); 3056 3057 prepare_to_wait(&trace_wait, &wait, TASK_INTERRUPTIBLE); 3058 3059 if (trace_empty(iter)) 3060 schedule(); 3061 3062 finish_wait(&trace_wait, &wait); 3063 } 3064 3065 /* 3066 * This is a make-shift waitqueue. 3067 * A tracer might use this callback on some rare cases: 3068 * 3069 * 1) the current tracer might hold the runqueue lock when it wakes up 3070 * a reader, hence a deadlock (sched, function, and function graph tracers) 3071 * 2) the function tracers, trace all functions, we don't want 3072 * the overhead of calling wake_up and friends 3073 * (and tracing them too) 3074 * 3075 * Anyway, this is really very primitive wakeup. 3076 */ 3077 void poll_wait_pipe(struct trace_iterator *iter) 3078 { 3079 set_current_state(TASK_INTERRUPTIBLE); 3080 /* sleep for 100 msecs, and try again. */ 3081 schedule_timeout(HZ / 10); 3082 } 3083 3084 /* Must be called with trace_types_lock mutex held. */ 3085 static int tracing_wait_pipe(struct file *filp) 3086 { 3087 struct trace_iterator *iter = filp->private_data; 3088 3089 while (trace_empty(iter)) { 3090 3091 if ((filp->f_flags & O_NONBLOCK)) { 3092 return -EAGAIN; 3093 } 3094 3095 mutex_unlock(&iter->mutex); 3096 3097 iter->trace->wait_pipe(iter); 3098 3099 mutex_lock(&iter->mutex); 3100 3101 if (signal_pending(current)) 3102 return -EINTR; 3103 3104 /* 3105 * We block until we read something and tracing is disabled. 3106 * We still block if tracing is disabled, but we have never 3107 * read anything. This allows a user to cat this file, and 3108 * then enable tracing. But after we have read something, 3109 * we give an EOF when tracing is again disabled. 3110 * 3111 * iter->pos will be 0 if we haven't read anything. 3112 */ 3113 if (!tracer_enabled && iter->pos) 3114 break; 3115 } 3116 3117 return 1; 3118 } 3119 3120 /* 3121 * Consumer reader. 3122 */ 3123 static ssize_t 3124 tracing_read_pipe(struct file *filp, char __user *ubuf, 3125 size_t cnt, loff_t *ppos) 3126 { 3127 struct trace_iterator *iter = filp->private_data; 3128 static struct tracer *old_tracer; 3129 ssize_t sret; 3130 3131 /* return any leftover data */ 3132 sret = trace_seq_to_user(&iter->seq, ubuf, cnt); 3133 if (sret != -EBUSY) 3134 return sret; 3135 3136 trace_seq_init(&iter->seq); 3137 3138 /* copy the tracer to avoid using a global lock all around */ 3139 mutex_lock(&trace_types_lock); 3140 if (unlikely(old_tracer != current_trace && current_trace)) { 3141 old_tracer = current_trace; 3142 *iter->trace = *current_trace; 3143 } 3144 mutex_unlock(&trace_types_lock); 3145 3146 /* 3147 * Avoid more than one consumer on a single file descriptor 3148 * This is just a matter of traces coherency, the ring buffer itself 3149 * is protected. 3150 */ 3151 mutex_lock(&iter->mutex); 3152 if (iter->trace->read) { 3153 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos); 3154 if (sret) 3155 goto out; 3156 } 3157 3158 waitagain: 3159 sret = tracing_wait_pipe(filp); 3160 if (sret <= 0) 3161 goto out; 3162 3163 /* stop when tracing is finished */ 3164 if (trace_empty(iter)) { 3165 sret = 0; 3166 goto out; 3167 } 3168 3169 if (cnt >= PAGE_SIZE) 3170 cnt = PAGE_SIZE - 1; 3171 3172 /* reset all but tr, trace, and overruns */ 3173 memset(&iter->seq, 0, 3174 sizeof(struct trace_iterator) - 3175 offsetof(struct trace_iterator, seq)); 3176 iter->pos = -1; 3177 3178 trace_event_read_lock(); 3179 trace_access_lock(iter->cpu_file); 3180 while (trace_find_next_entry_inc(iter) != NULL) { 3181 enum print_line_t ret; 3182 int len = iter->seq.len; 3183 3184 ret = print_trace_line(iter); 3185 if (ret == TRACE_TYPE_PARTIAL_LINE) { 3186 /* don't print partial lines */ 3187 iter->seq.len = len; 3188 break; 3189 } 3190 if (ret != TRACE_TYPE_NO_CONSUME) 3191 trace_consume(iter); 3192 3193 if (iter->seq.len >= cnt) 3194 break; 3195 } 3196 trace_access_unlock(iter->cpu_file); 3197 trace_event_read_unlock(); 3198 3199 /* Now copy what we have to the user */ 3200 sret = trace_seq_to_user(&iter->seq, ubuf, cnt); 3201 if (iter->seq.readpos >= iter->seq.len) 3202 trace_seq_init(&iter->seq); 3203 3204 /* 3205 * If there was nothing to send to user, inspite of consuming trace 3206 * entries, go back to wait for more entries. 3207 */ 3208 if (sret == -EBUSY) 3209 goto waitagain; 3210 3211 out: 3212 mutex_unlock(&iter->mutex); 3213 3214 return sret; 3215 } 3216 3217 static void tracing_pipe_buf_release(struct pipe_inode_info *pipe, 3218 struct pipe_buffer *buf) 3219 { 3220 __free_page(buf->page); 3221 } 3222 3223 static void tracing_spd_release_pipe(struct splice_pipe_desc *spd, 3224 unsigned int idx) 3225 { 3226 __free_page(spd->pages[idx]); 3227 } 3228 3229 static const struct pipe_buf_operations tracing_pipe_buf_ops = { 3230 .can_merge = 0, 3231 .map = generic_pipe_buf_map, 3232 .unmap = generic_pipe_buf_unmap, 3233 .confirm = generic_pipe_buf_confirm, 3234 .release = tracing_pipe_buf_release, 3235 .steal = generic_pipe_buf_steal, 3236 .get = generic_pipe_buf_get, 3237 }; 3238 3239 static size_t 3240 tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter) 3241 { 3242 size_t count; 3243 int ret; 3244 3245 /* Seq buffer is page-sized, exactly what we need. */ 3246 for (;;) { 3247 count = iter->seq.len; 3248 ret = print_trace_line(iter); 3249 count = iter->seq.len - count; 3250 if (rem < count) { 3251 rem = 0; 3252 iter->seq.len -= count; 3253 break; 3254 } 3255 if (ret == TRACE_TYPE_PARTIAL_LINE) { 3256 iter->seq.len -= count; 3257 break; 3258 } 3259 3260 if (ret != TRACE_TYPE_NO_CONSUME) 3261 trace_consume(iter); 3262 rem -= count; 3263 if (!trace_find_next_entry_inc(iter)) { 3264 rem = 0; 3265 iter->ent = NULL; 3266 break; 3267 } 3268 } 3269 3270 return rem; 3271 } 3272 3273 static ssize_t tracing_splice_read_pipe(struct file *filp, 3274 loff_t *ppos, 3275 struct pipe_inode_info *pipe, 3276 size_t len, 3277 unsigned int flags) 3278 { 3279 struct page *pages_def[PIPE_DEF_BUFFERS]; 3280 struct partial_page partial_def[PIPE_DEF_BUFFERS]; 3281 struct trace_iterator *iter = filp->private_data; 3282 struct splice_pipe_desc spd = { 3283 .pages = pages_def, 3284 .partial = partial_def, 3285 .nr_pages = 0, /* This gets updated below. */ 3286 .flags = flags, 3287 .ops = &tracing_pipe_buf_ops, 3288 .spd_release = tracing_spd_release_pipe, 3289 }; 3290 static struct tracer *old_tracer; 3291 ssize_t ret; 3292 size_t rem; 3293 unsigned int i; 3294 3295 if (splice_grow_spd(pipe, &spd)) 3296 return -ENOMEM; 3297 3298 /* copy the tracer to avoid using a global lock all around */ 3299 mutex_lock(&trace_types_lock); 3300 if (unlikely(old_tracer != current_trace && current_trace)) { 3301 old_tracer = current_trace; 3302 *iter->trace = *current_trace; 3303 } 3304 mutex_unlock(&trace_types_lock); 3305 3306 mutex_lock(&iter->mutex); 3307 3308 if (iter->trace->splice_read) { 3309 ret = iter->trace->splice_read(iter, filp, 3310 ppos, pipe, len, flags); 3311 if (ret) 3312 goto out_err; 3313 } 3314 3315 ret = tracing_wait_pipe(filp); 3316 if (ret <= 0) 3317 goto out_err; 3318 3319 if (!iter->ent && !trace_find_next_entry_inc(iter)) { 3320 ret = -EFAULT; 3321 goto out_err; 3322 } 3323 3324 trace_event_read_lock(); 3325 trace_access_lock(iter->cpu_file); 3326 3327 /* Fill as many pages as possible. */ 3328 for (i = 0, rem = len; i < pipe->buffers && rem; i++) { 3329 spd.pages[i] = alloc_page(GFP_KERNEL); 3330 if (!spd.pages[i]) 3331 break; 3332 3333 rem = tracing_fill_pipe_page(rem, iter); 3334 3335 /* Copy the data into the page, so we can start over. */ 3336 ret = trace_seq_to_buffer(&iter->seq, 3337 page_address(spd.pages[i]), 3338 iter->seq.len); 3339 if (ret < 0) { 3340 __free_page(spd.pages[i]); 3341 break; 3342 } 3343 spd.partial[i].offset = 0; 3344 spd.partial[i].len = iter->seq.len; 3345 3346 trace_seq_init(&iter->seq); 3347 } 3348 3349 trace_access_unlock(iter->cpu_file); 3350 trace_event_read_unlock(); 3351 mutex_unlock(&iter->mutex); 3352 3353 spd.nr_pages = i; 3354 3355 ret = splice_to_pipe(pipe, &spd); 3356 out: 3357 splice_shrink_spd(pipe, &spd); 3358 return ret; 3359 3360 out_err: 3361 mutex_unlock(&iter->mutex); 3362 goto out; 3363 } 3364 3365 static ssize_t 3366 tracing_entries_read(struct file *filp, char __user *ubuf, 3367 size_t cnt, loff_t *ppos) 3368 { 3369 struct trace_array *tr = filp->private_data; 3370 char buf[96]; 3371 int r; 3372 3373 mutex_lock(&trace_types_lock); 3374 if (!ring_buffer_expanded) 3375 r = sprintf(buf, "%lu (expanded: %lu)\n", 3376 tr->entries >> 10, 3377 trace_buf_size >> 10); 3378 else 3379 r = sprintf(buf, "%lu\n", tr->entries >> 10); 3380 mutex_unlock(&trace_types_lock); 3381 3382 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 3383 } 3384 3385 static ssize_t 3386 tracing_entries_write(struct file *filp, const char __user *ubuf, 3387 size_t cnt, loff_t *ppos) 3388 { 3389 unsigned long val; 3390 char buf[64]; 3391 int ret, cpu; 3392 3393 if (cnt >= sizeof(buf)) 3394 return -EINVAL; 3395 3396 if (copy_from_user(&buf, ubuf, cnt)) 3397 return -EFAULT; 3398 3399 buf[cnt] = 0; 3400 3401 ret = strict_strtoul(buf, 10, &val); 3402 if (ret < 0) 3403 return ret; 3404 3405 /* must have at least 1 entry */ 3406 if (!val) 3407 return -EINVAL; 3408 3409 mutex_lock(&trace_types_lock); 3410 3411 tracing_stop(); 3412 3413 /* disable all cpu buffers */ 3414 for_each_tracing_cpu(cpu) { 3415 if (global_trace.data[cpu]) 3416 atomic_inc(&global_trace.data[cpu]->disabled); 3417 if (max_tr.data[cpu]) 3418 atomic_inc(&max_tr.data[cpu]->disabled); 3419 } 3420 3421 /* value is in KB */ 3422 val <<= 10; 3423 3424 if (val != global_trace.entries) { 3425 ret = tracing_resize_ring_buffer(val); 3426 if (ret < 0) { 3427 cnt = ret; 3428 goto out; 3429 } 3430 } 3431 3432 *ppos += cnt; 3433 3434 /* If check pages failed, return ENOMEM */ 3435 if (tracing_disabled) 3436 cnt = -ENOMEM; 3437 out: 3438 for_each_tracing_cpu(cpu) { 3439 if (global_trace.data[cpu]) 3440 atomic_dec(&global_trace.data[cpu]->disabled); 3441 if (max_tr.data[cpu]) 3442 atomic_dec(&max_tr.data[cpu]->disabled); 3443 } 3444 3445 tracing_start(); 3446 mutex_unlock(&trace_types_lock); 3447 3448 return cnt; 3449 } 3450 3451 static int mark_printk(const char *fmt, ...) 3452 { 3453 int ret; 3454 va_list args; 3455 va_start(args, fmt); 3456 ret = trace_vprintk(0, fmt, args); 3457 va_end(args); 3458 return ret; 3459 } 3460 3461 static ssize_t 3462 tracing_mark_write(struct file *filp, const char __user *ubuf, 3463 size_t cnt, loff_t *fpos) 3464 { 3465 char *buf; 3466 size_t written; 3467 3468 if (tracing_disabled) 3469 return -EINVAL; 3470 3471 if (cnt > TRACE_BUF_SIZE) 3472 cnt = TRACE_BUF_SIZE; 3473 3474 buf = kmalloc(cnt + 2, GFP_KERNEL); 3475 if (buf == NULL) 3476 return -ENOMEM; 3477 3478 if (copy_from_user(buf, ubuf, cnt)) { 3479 kfree(buf); 3480 return -EFAULT; 3481 } 3482 if (buf[cnt-1] != '\n') { 3483 buf[cnt] = '\n'; 3484 buf[cnt+1] = '\0'; 3485 } else 3486 buf[cnt] = '\0'; 3487 3488 written = mark_printk("%s", buf); 3489 kfree(buf); 3490 *fpos += written; 3491 3492 /* don't tell userspace we wrote more - it might confuse them */ 3493 if (written > cnt) 3494 written = cnt; 3495 3496 return written; 3497 } 3498 3499 static int tracing_clock_show(struct seq_file *m, void *v) 3500 { 3501 int i; 3502 3503 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) 3504 seq_printf(m, 3505 "%s%s%s%s", i ? " " : "", 3506 i == trace_clock_id ? "[" : "", trace_clocks[i].name, 3507 i == trace_clock_id ? "]" : ""); 3508 seq_putc(m, '\n'); 3509 3510 return 0; 3511 } 3512 3513 static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf, 3514 size_t cnt, loff_t *fpos) 3515 { 3516 char buf[64]; 3517 const char *clockstr; 3518 int i; 3519 3520 if (cnt >= sizeof(buf)) 3521 return -EINVAL; 3522 3523 if (copy_from_user(&buf, ubuf, cnt)) 3524 return -EFAULT; 3525 3526 buf[cnt] = 0; 3527 3528 clockstr = strstrip(buf); 3529 3530 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) { 3531 if (strcmp(trace_clocks[i].name, clockstr) == 0) 3532 break; 3533 } 3534 if (i == ARRAY_SIZE(trace_clocks)) 3535 return -EINVAL; 3536 3537 trace_clock_id = i; 3538 3539 mutex_lock(&trace_types_lock); 3540 3541 ring_buffer_set_clock(global_trace.buffer, trace_clocks[i].func); 3542 if (max_tr.buffer) 3543 ring_buffer_set_clock(max_tr.buffer, trace_clocks[i].func); 3544 3545 mutex_unlock(&trace_types_lock); 3546 3547 *fpos += cnt; 3548 3549 return cnt; 3550 } 3551 3552 static int tracing_clock_open(struct inode *inode, struct file *file) 3553 { 3554 if (tracing_disabled) 3555 return -ENODEV; 3556 return single_open(file, tracing_clock_show, NULL); 3557 } 3558 3559 static const struct file_operations tracing_max_lat_fops = { 3560 .open = tracing_open_generic, 3561 .read = tracing_max_lat_read, 3562 .write = tracing_max_lat_write, 3563 .llseek = generic_file_llseek, 3564 }; 3565 3566 static const struct file_operations tracing_ctrl_fops = { 3567 .open = tracing_open_generic, 3568 .read = tracing_ctrl_read, 3569 .write = tracing_ctrl_write, 3570 .llseek = generic_file_llseek, 3571 }; 3572 3573 static const struct file_operations set_tracer_fops = { 3574 .open = tracing_open_generic, 3575 .read = tracing_set_trace_read, 3576 .write = tracing_set_trace_write, 3577 .llseek = generic_file_llseek, 3578 }; 3579 3580 static const struct file_operations tracing_pipe_fops = { 3581 .open = tracing_open_pipe, 3582 .poll = tracing_poll_pipe, 3583 .read = tracing_read_pipe, 3584 .splice_read = tracing_splice_read_pipe, 3585 .release = tracing_release_pipe, 3586 .llseek = no_llseek, 3587 }; 3588 3589 static const struct file_operations tracing_entries_fops = { 3590 .open = tracing_open_generic, 3591 .read = tracing_entries_read, 3592 .write = tracing_entries_write, 3593 .llseek = generic_file_llseek, 3594 }; 3595 3596 static const struct file_operations tracing_mark_fops = { 3597 .open = tracing_open_generic, 3598 .write = tracing_mark_write, 3599 .llseek = generic_file_llseek, 3600 }; 3601 3602 static const struct file_operations trace_clock_fops = { 3603 .open = tracing_clock_open, 3604 .read = seq_read, 3605 .llseek = seq_lseek, 3606 .release = single_release, 3607 .write = tracing_clock_write, 3608 }; 3609 3610 struct ftrace_buffer_info { 3611 struct trace_array *tr; 3612 void *spare; 3613 int cpu; 3614 unsigned int read; 3615 }; 3616 3617 static int tracing_buffers_open(struct inode *inode, struct file *filp) 3618 { 3619 int cpu = (int)(long)inode->i_private; 3620 struct ftrace_buffer_info *info; 3621 3622 if (tracing_disabled) 3623 return -ENODEV; 3624 3625 info = kzalloc(sizeof(*info), GFP_KERNEL); 3626 if (!info) 3627 return -ENOMEM; 3628 3629 info->tr = &global_trace; 3630 info->cpu = cpu; 3631 info->spare = NULL; 3632 /* Force reading ring buffer for first read */ 3633 info->read = (unsigned int)-1; 3634 3635 filp->private_data = info; 3636 3637 return nonseekable_open(inode, filp); 3638 } 3639 3640 static ssize_t 3641 tracing_buffers_read(struct file *filp, char __user *ubuf, 3642 size_t count, loff_t *ppos) 3643 { 3644 struct ftrace_buffer_info *info = filp->private_data; 3645 ssize_t ret; 3646 size_t size; 3647 3648 if (!count) 3649 return 0; 3650 3651 if (!info->spare) 3652 info->spare = ring_buffer_alloc_read_page(info->tr->buffer); 3653 if (!info->spare) 3654 return -ENOMEM; 3655 3656 /* Do we have previous read data to read? */ 3657 if (info->read < PAGE_SIZE) 3658 goto read; 3659 3660 info->read = 0; 3661 3662 trace_access_lock(info->cpu); 3663 ret = ring_buffer_read_page(info->tr->buffer, 3664 &info->spare, 3665 count, 3666 info->cpu, 0); 3667 trace_access_unlock(info->cpu); 3668 if (ret < 0) 3669 return 0; 3670 3671 read: 3672 size = PAGE_SIZE - info->read; 3673 if (size > count) 3674 size = count; 3675 3676 ret = copy_to_user(ubuf, info->spare + info->read, size); 3677 if (ret == size) 3678 return -EFAULT; 3679 size -= ret; 3680 3681 *ppos += size; 3682 info->read += size; 3683 3684 return size; 3685 } 3686 3687 static int tracing_buffers_release(struct inode *inode, struct file *file) 3688 { 3689 struct ftrace_buffer_info *info = file->private_data; 3690 3691 if (info->spare) 3692 ring_buffer_free_read_page(info->tr->buffer, info->spare); 3693 kfree(info); 3694 3695 return 0; 3696 } 3697 3698 struct buffer_ref { 3699 struct ring_buffer *buffer; 3700 void *page; 3701 int ref; 3702 }; 3703 3704 static void buffer_pipe_buf_release(struct pipe_inode_info *pipe, 3705 struct pipe_buffer *buf) 3706 { 3707 struct buffer_ref *ref = (struct buffer_ref *)buf->private; 3708 3709 if (--ref->ref) 3710 return; 3711 3712 ring_buffer_free_read_page(ref->buffer, ref->page); 3713 kfree(ref); 3714 buf->private = 0; 3715 } 3716 3717 static int buffer_pipe_buf_steal(struct pipe_inode_info *pipe, 3718 struct pipe_buffer *buf) 3719 { 3720 return 1; 3721 } 3722 3723 static void buffer_pipe_buf_get(struct pipe_inode_info *pipe, 3724 struct pipe_buffer *buf) 3725 { 3726 struct buffer_ref *ref = (struct buffer_ref *)buf->private; 3727 3728 ref->ref++; 3729 } 3730 3731 /* Pipe buffer operations for a buffer. */ 3732 static const struct pipe_buf_operations buffer_pipe_buf_ops = { 3733 .can_merge = 0, 3734 .map = generic_pipe_buf_map, 3735 .unmap = generic_pipe_buf_unmap, 3736 .confirm = generic_pipe_buf_confirm, 3737 .release = buffer_pipe_buf_release, 3738 .steal = buffer_pipe_buf_steal, 3739 .get = buffer_pipe_buf_get, 3740 }; 3741 3742 /* 3743 * Callback from splice_to_pipe(), if we need to release some pages 3744 * at the end of the spd in case we error'ed out in filling the pipe. 3745 */ 3746 static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i) 3747 { 3748 struct buffer_ref *ref = 3749 (struct buffer_ref *)spd->partial[i].private; 3750 3751 if (--ref->ref) 3752 return; 3753 3754 ring_buffer_free_read_page(ref->buffer, ref->page); 3755 kfree(ref); 3756 spd->partial[i].private = 0; 3757 } 3758 3759 static ssize_t 3760 tracing_buffers_splice_read(struct file *file, loff_t *ppos, 3761 struct pipe_inode_info *pipe, size_t len, 3762 unsigned int flags) 3763 { 3764 struct ftrace_buffer_info *info = file->private_data; 3765 struct partial_page partial_def[PIPE_DEF_BUFFERS]; 3766 struct page *pages_def[PIPE_DEF_BUFFERS]; 3767 struct splice_pipe_desc spd = { 3768 .pages = pages_def, 3769 .partial = partial_def, 3770 .flags = flags, 3771 .ops = &buffer_pipe_buf_ops, 3772 .spd_release = buffer_spd_release, 3773 }; 3774 struct buffer_ref *ref; 3775 int entries, size, i; 3776 size_t ret; 3777 3778 if (splice_grow_spd(pipe, &spd)) 3779 return -ENOMEM; 3780 3781 if (*ppos & (PAGE_SIZE - 1)) { 3782 WARN_ONCE(1, "Ftrace: previous read must page-align\n"); 3783 ret = -EINVAL; 3784 goto out; 3785 } 3786 3787 if (len & (PAGE_SIZE - 1)) { 3788 WARN_ONCE(1, "Ftrace: splice_read should page-align\n"); 3789 if (len < PAGE_SIZE) { 3790 ret = -EINVAL; 3791 goto out; 3792 } 3793 len &= PAGE_MASK; 3794 } 3795 3796 trace_access_lock(info->cpu); 3797 entries = ring_buffer_entries_cpu(info->tr->buffer, info->cpu); 3798 3799 for (i = 0; i < pipe->buffers && len && entries; i++, len -= PAGE_SIZE) { 3800 struct page *page; 3801 int r; 3802 3803 ref = kzalloc(sizeof(*ref), GFP_KERNEL); 3804 if (!ref) 3805 break; 3806 3807 ref->ref = 1; 3808 ref->buffer = info->tr->buffer; 3809 ref->page = ring_buffer_alloc_read_page(ref->buffer); 3810 if (!ref->page) { 3811 kfree(ref); 3812 break; 3813 } 3814 3815 r = ring_buffer_read_page(ref->buffer, &ref->page, 3816 len, info->cpu, 1); 3817 if (r < 0) { 3818 ring_buffer_free_read_page(ref->buffer, 3819 ref->page); 3820 kfree(ref); 3821 break; 3822 } 3823 3824 /* 3825 * zero out any left over data, this is going to 3826 * user land. 3827 */ 3828 size = ring_buffer_page_len(ref->page); 3829 if (size < PAGE_SIZE) 3830 memset(ref->page + size, 0, PAGE_SIZE - size); 3831 3832 page = virt_to_page(ref->page); 3833 3834 spd.pages[i] = page; 3835 spd.partial[i].len = PAGE_SIZE; 3836 spd.partial[i].offset = 0; 3837 spd.partial[i].private = (unsigned long)ref; 3838 spd.nr_pages++; 3839 *ppos += PAGE_SIZE; 3840 3841 entries = ring_buffer_entries_cpu(info->tr->buffer, info->cpu); 3842 } 3843 3844 trace_access_unlock(info->cpu); 3845 spd.nr_pages = i; 3846 3847 /* did we read anything? */ 3848 if (!spd.nr_pages) { 3849 if (flags & SPLICE_F_NONBLOCK) 3850 ret = -EAGAIN; 3851 else 3852 ret = 0; 3853 /* TODO: block */ 3854 goto out; 3855 } 3856 3857 ret = splice_to_pipe(pipe, &spd); 3858 splice_shrink_spd(pipe, &spd); 3859 out: 3860 return ret; 3861 } 3862 3863 static const struct file_operations tracing_buffers_fops = { 3864 .open = tracing_buffers_open, 3865 .read = tracing_buffers_read, 3866 .release = tracing_buffers_release, 3867 .splice_read = tracing_buffers_splice_read, 3868 .llseek = no_llseek, 3869 }; 3870 3871 static ssize_t 3872 tracing_stats_read(struct file *filp, char __user *ubuf, 3873 size_t count, loff_t *ppos) 3874 { 3875 unsigned long cpu = (unsigned long)filp->private_data; 3876 struct trace_array *tr = &global_trace; 3877 struct trace_seq *s; 3878 unsigned long cnt; 3879 3880 s = kmalloc(sizeof(*s), GFP_KERNEL); 3881 if (!s) 3882 return -ENOMEM; 3883 3884 trace_seq_init(s); 3885 3886 cnt = ring_buffer_entries_cpu(tr->buffer, cpu); 3887 trace_seq_printf(s, "entries: %ld\n", cnt); 3888 3889 cnt = ring_buffer_overrun_cpu(tr->buffer, cpu); 3890 trace_seq_printf(s, "overrun: %ld\n", cnt); 3891 3892 cnt = ring_buffer_commit_overrun_cpu(tr->buffer, cpu); 3893 trace_seq_printf(s, "commit overrun: %ld\n", cnt); 3894 3895 count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len); 3896 3897 kfree(s); 3898 3899 return count; 3900 } 3901 3902 static const struct file_operations tracing_stats_fops = { 3903 .open = tracing_open_generic, 3904 .read = tracing_stats_read, 3905 .llseek = generic_file_llseek, 3906 }; 3907 3908 #ifdef CONFIG_DYNAMIC_FTRACE 3909 3910 int __weak ftrace_arch_read_dyn_info(char *buf, int size) 3911 { 3912 return 0; 3913 } 3914 3915 static ssize_t 3916 tracing_read_dyn_info(struct file *filp, char __user *ubuf, 3917 size_t cnt, loff_t *ppos) 3918 { 3919 static char ftrace_dyn_info_buffer[1024]; 3920 static DEFINE_MUTEX(dyn_info_mutex); 3921 unsigned long *p = filp->private_data; 3922 char *buf = ftrace_dyn_info_buffer; 3923 int size = ARRAY_SIZE(ftrace_dyn_info_buffer); 3924 int r; 3925 3926 mutex_lock(&dyn_info_mutex); 3927 r = sprintf(buf, "%ld ", *p); 3928 3929 r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r); 3930 buf[r++] = '\n'; 3931 3932 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 3933 3934 mutex_unlock(&dyn_info_mutex); 3935 3936 return r; 3937 } 3938 3939 static const struct file_operations tracing_dyn_info_fops = { 3940 .open = tracing_open_generic, 3941 .read = tracing_read_dyn_info, 3942 .llseek = generic_file_llseek, 3943 }; 3944 #endif 3945 3946 static struct dentry *d_tracer; 3947 3948 struct dentry *tracing_init_dentry(void) 3949 { 3950 static int once; 3951 3952 if (d_tracer) 3953 return d_tracer; 3954 3955 if (!debugfs_initialized()) 3956 return NULL; 3957 3958 d_tracer = debugfs_create_dir("tracing", NULL); 3959 3960 if (!d_tracer && !once) { 3961 once = 1; 3962 pr_warning("Could not create debugfs directory 'tracing'\n"); 3963 return NULL; 3964 } 3965 3966 return d_tracer; 3967 } 3968 3969 static struct dentry *d_percpu; 3970 3971 struct dentry *tracing_dentry_percpu(void) 3972 { 3973 static int once; 3974 struct dentry *d_tracer; 3975 3976 if (d_percpu) 3977 return d_percpu; 3978 3979 d_tracer = tracing_init_dentry(); 3980 3981 if (!d_tracer) 3982 return NULL; 3983 3984 d_percpu = debugfs_create_dir("per_cpu", d_tracer); 3985 3986 if (!d_percpu && !once) { 3987 once = 1; 3988 pr_warning("Could not create debugfs directory 'per_cpu'\n"); 3989 return NULL; 3990 } 3991 3992 return d_percpu; 3993 } 3994 3995 static void tracing_init_debugfs_percpu(long cpu) 3996 { 3997 struct dentry *d_percpu = tracing_dentry_percpu(); 3998 struct dentry *d_cpu; 3999 /* strlen(cpu) + MAX(log10(cpu)) + '\0' */ 4000 char cpu_dir[7]; 4001 4002 if (cpu > 999 || cpu < 0) 4003 return; 4004 4005 sprintf(cpu_dir, "cpu%ld", cpu); 4006 d_cpu = debugfs_create_dir(cpu_dir, d_percpu); 4007 if (!d_cpu) { 4008 pr_warning("Could not create debugfs '%s' entry\n", cpu_dir); 4009 return; 4010 } 4011 4012 /* per cpu trace_pipe */ 4013 trace_create_file("trace_pipe", 0444, d_cpu, 4014 (void *) cpu, &tracing_pipe_fops); 4015 4016 /* per cpu trace */ 4017 trace_create_file("trace", 0644, d_cpu, 4018 (void *) cpu, &tracing_fops); 4019 4020 trace_create_file("trace_pipe_raw", 0444, d_cpu, 4021 (void *) cpu, &tracing_buffers_fops); 4022 4023 trace_create_file("stats", 0444, d_cpu, 4024 (void *) cpu, &tracing_stats_fops); 4025 } 4026 4027 #ifdef CONFIG_FTRACE_SELFTEST 4028 /* Let selftest have access to static functions in this file */ 4029 #include "trace_selftest.c" 4030 #endif 4031 4032 struct trace_option_dentry { 4033 struct tracer_opt *opt; 4034 struct tracer_flags *flags; 4035 struct dentry *entry; 4036 }; 4037 4038 static ssize_t 4039 trace_options_read(struct file *filp, char __user *ubuf, size_t cnt, 4040 loff_t *ppos) 4041 { 4042 struct trace_option_dentry *topt = filp->private_data; 4043 char *buf; 4044 4045 if (topt->flags->val & topt->opt->bit) 4046 buf = "1\n"; 4047 else 4048 buf = "0\n"; 4049 4050 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2); 4051 } 4052 4053 static ssize_t 4054 trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt, 4055 loff_t *ppos) 4056 { 4057 struct trace_option_dentry *topt = filp->private_data; 4058 unsigned long val; 4059 char buf[64]; 4060 int ret; 4061 4062 if (cnt >= sizeof(buf)) 4063 return -EINVAL; 4064 4065 if (copy_from_user(&buf, ubuf, cnt)) 4066 return -EFAULT; 4067 4068 buf[cnt] = 0; 4069 4070 ret = strict_strtoul(buf, 10, &val); 4071 if (ret < 0) 4072 return ret; 4073 4074 if (val != 0 && val != 1) 4075 return -EINVAL; 4076 4077 if (!!(topt->flags->val & topt->opt->bit) != val) { 4078 mutex_lock(&trace_types_lock); 4079 ret = __set_tracer_option(current_trace, topt->flags, 4080 topt->opt, !val); 4081 mutex_unlock(&trace_types_lock); 4082 if (ret) 4083 return ret; 4084 } 4085 4086 *ppos += cnt; 4087 4088 return cnt; 4089 } 4090 4091 4092 static const struct file_operations trace_options_fops = { 4093 .open = tracing_open_generic, 4094 .read = trace_options_read, 4095 .write = trace_options_write, 4096 .llseek = generic_file_llseek, 4097 }; 4098 4099 static ssize_t 4100 trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt, 4101 loff_t *ppos) 4102 { 4103 long index = (long)filp->private_data; 4104 char *buf; 4105 4106 if (trace_flags & (1 << index)) 4107 buf = "1\n"; 4108 else 4109 buf = "0\n"; 4110 4111 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2); 4112 } 4113 4114 static ssize_t 4115 trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt, 4116 loff_t *ppos) 4117 { 4118 long index = (long)filp->private_data; 4119 char buf[64]; 4120 unsigned long val; 4121 int ret; 4122 4123 if (cnt >= sizeof(buf)) 4124 return -EINVAL; 4125 4126 if (copy_from_user(&buf, ubuf, cnt)) 4127 return -EFAULT; 4128 4129 buf[cnt] = 0; 4130 4131 ret = strict_strtoul(buf, 10, &val); 4132 if (ret < 0) 4133 return ret; 4134 4135 if (val != 0 && val != 1) 4136 return -EINVAL; 4137 set_tracer_flags(1 << index, val); 4138 4139 *ppos += cnt; 4140 4141 return cnt; 4142 } 4143 4144 static const struct file_operations trace_options_core_fops = { 4145 .open = tracing_open_generic, 4146 .read = trace_options_core_read, 4147 .write = trace_options_core_write, 4148 .llseek = generic_file_llseek, 4149 }; 4150 4151 struct dentry *trace_create_file(const char *name, 4152 mode_t mode, 4153 struct dentry *parent, 4154 void *data, 4155 const struct file_operations *fops) 4156 { 4157 struct dentry *ret; 4158 4159 ret = debugfs_create_file(name, mode, parent, data, fops); 4160 if (!ret) 4161 pr_warning("Could not create debugfs '%s' entry\n", name); 4162 4163 return ret; 4164 } 4165 4166 4167 static struct dentry *trace_options_init_dentry(void) 4168 { 4169 struct dentry *d_tracer; 4170 static struct dentry *t_options; 4171 4172 if (t_options) 4173 return t_options; 4174 4175 d_tracer = tracing_init_dentry(); 4176 if (!d_tracer) 4177 return NULL; 4178 4179 t_options = debugfs_create_dir("options", d_tracer); 4180 if (!t_options) { 4181 pr_warning("Could not create debugfs directory 'options'\n"); 4182 return NULL; 4183 } 4184 4185 return t_options; 4186 } 4187 4188 static void 4189 create_trace_option_file(struct trace_option_dentry *topt, 4190 struct tracer_flags *flags, 4191 struct tracer_opt *opt) 4192 { 4193 struct dentry *t_options; 4194 4195 t_options = trace_options_init_dentry(); 4196 if (!t_options) 4197 return; 4198 4199 topt->flags = flags; 4200 topt->opt = opt; 4201 4202 topt->entry = trace_create_file(opt->name, 0644, t_options, topt, 4203 &trace_options_fops); 4204 4205 } 4206 4207 static struct trace_option_dentry * 4208 create_trace_option_files(struct tracer *tracer) 4209 { 4210 struct trace_option_dentry *topts; 4211 struct tracer_flags *flags; 4212 struct tracer_opt *opts; 4213 int cnt; 4214 4215 if (!tracer) 4216 return NULL; 4217 4218 flags = tracer->flags; 4219 4220 if (!flags || !flags->opts) 4221 return NULL; 4222 4223 opts = flags->opts; 4224 4225 for (cnt = 0; opts[cnt].name; cnt++) 4226 ; 4227 4228 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL); 4229 if (!topts) 4230 return NULL; 4231 4232 for (cnt = 0; opts[cnt].name; cnt++) 4233 create_trace_option_file(&topts[cnt], flags, 4234 &opts[cnt]); 4235 4236 return topts; 4237 } 4238 4239 static void 4240 destroy_trace_option_files(struct trace_option_dentry *topts) 4241 { 4242 int cnt; 4243 4244 if (!topts) 4245 return; 4246 4247 for (cnt = 0; topts[cnt].opt; cnt++) { 4248 if (topts[cnt].entry) 4249 debugfs_remove(topts[cnt].entry); 4250 } 4251 4252 kfree(topts); 4253 } 4254 4255 static struct dentry * 4256 create_trace_option_core_file(const char *option, long index) 4257 { 4258 struct dentry *t_options; 4259 4260 t_options = trace_options_init_dentry(); 4261 if (!t_options) 4262 return NULL; 4263 4264 return trace_create_file(option, 0644, t_options, (void *)index, 4265 &trace_options_core_fops); 4266 } 4267 4268 static __init void create_trace_options_dir(void) 4269 { 4270 struct dentry *t_options; 4271 int i; 4272 4273 t_options = trace_options_init_dentry(); 4274 if (!t_options) 4275 return; 4276 4277 for (i = 0; trace_options[i]; i++) 4278 create_trace_option_core_file(trace_options[i], i); 4279 } 4280 4281 static __init int tracer_init_debugfs(void) 4282 { 4283 struct dentry *d_tracer; 4284 int cpu; 4285 4286 trace_access_lock_init(); 4287 4288 d_tracer = tracing_init_dentry(); 4289 4290 trace_create_file("tracing_enabled", 0644, d_tracer, 4291 &global_trace, &tracing_ctrl_fops); 4292 4293 trace_create_file("trace_options", 0644, d_tracer, 4294 NULL, &tracing_iter_fops); 4295 4296 trace_create_file("tracing_cpumask", 0644, d_tracer, 4297 NULL, &tracing_cpumask_fops); 4298 4299 trace_create_file("trace", 0644, d_tracer, 4300 (void *) TRACE_PIPE_ALL_CPU, &tracing_fops); 4301 4302 trace_create_file("available_tracers", 0444, d_tracer, 4303 &global_trace, &show_traces_fops); 4304 4305 trace_create_file("current_tracer", 0644, d_tracer, 4306 &global_trace, &set_tracer_fops); 4307 4308 #ifdef CONFIG_TRACER_MAX_TRACE 4309 trace_create_file("tracing_max_latency", 0644, d_tracer, 4310 &tracing_max_latency, &tracing_max_lat_fops); 4311 #endif 4312 4313 trace_create_file("tracing_thresh", 0644, d_tracer, 4314 &tracing_thresh, &tracing_max_lat_fops); 4315 4316 trace_create_file("README", 0444, d_tracer, 4317 NULL, &tracing_readme_fops); 4318 4319 trace_create_file("trace_pipe", 0444, d_tracer, 4320 (void *) TRACE_PIPE_ALL_CPU, &tracing_pipe_fops); 4321 4322 trace_create_file("buffer_size_kb", 0644, d_tracer, 4323 &global_trace, &tracing_entries_fops); 4324 4325 trace_create_file("trace_marker", 0220, d_tracer, 4326 NULL, &tracing_mark_fops); 4327 4328 trace_create_file("saved_cmdlines", 0444, d_tracer, 4329 NULL, &tracing_saved_cmdlines_fops); 4330 4331 trace_create_file("trace_clock", 0644, d_tracer, NULL, 4332 &trace_clock_fops); 4333 4334 #ifdef CONFIG_DYNAMIC_FTRACE 4335 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer, 4336 &ftrace_update_tot_cnt, &tracing_dyn_info_fops); 4337 #endif 4338 4339 create_trace_options_dir(); 4340 4341 for_each_tracing_cpu(cpu) 4342 tracing_init_debugfs_percpu(cpu); 4343 4344 return 0; 4345 } 4346 4347 static int trace_panic_handler(struct notifier_block *this, 4348 unsigned long event, void *unused) 4349 { 4350 if (ftrace_dump_on_oops) 4351 ftrace_dump(ftrace_dump_on_oops); 4352 return NOTIFY_OK; 4353 } 4354 4355 static struct notifier_block trace_panic_notifier = { 4356 .notifier_call = trace_panic_handler, 4357 .next = NULL, 4358 .priority = 150 /* priority: INT_MAX >= x >= 0 */ 4359 }; 4360 4361 static int trace_die_handler(struct notifier_block *self, 4362 unsigned long val, 4363 void *data) 4364 { 4365 switch (val) { 4366 case DIE_OOPS: 4367 if (ftrace_dump_on_oops) 4368 ftrace_dump(ftrace_dump_on_oops); 4369 break; 4370 default: 4371 break; 4372 } 4373 return NOTIFY_OK; 4374 } 4375 4376 static struct notifier_block trace_die_notifier = { 4377 .notifier_call = trace_die_handler, 4378 .priority = 200 4379 }; 4380 4381 /* 4382 * printk is set to max of 1024, we really don't need it that big. 4383 * Nothing should be printing 1000 characters anyway. 4384 */ 4385 #define TRACE_MAX_PRINT 1000 4386 4387 /* 4388 * Define here KERN_TRACE so that we have one place to modify 4389 * it if we decide to change what log level the ftrace dump 4390 * should be at. 4391 */ 4392 #define KERN_TRACE KERN_EMERG 4393 4394 void 4395 trace_printk_seq(struct trace_seq *s) 4396 { 4397 /* Probably should print a warning here. */ 4398 if (s->len >= 1000) 4399 s->len = 1000; 4400 4401 /* should be zero ended, but we are paranoid. */ 4402 s->buffer[s->len] = 0; 4403 4404 printk(KERN_TRACE "%s", s->buffer); 4405 4406 trace_seq_init(s); 4407 } 4408 4409 void trace_init_global_iter(struct trace_iterator *iter) 4410 { 4411 iter->tr = &global_trace; 4412 iter->trace = current_trace; 4413 iter->cpu_file = TRACE_PIPE_ALL_CPU; 4414 } 4415 4416 static void 4417 __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode) 4418 { 4419 static arch_spinlock_t ftrace_dump_lock = 4420 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; 4421 /* use static because iter can be a bit big for the stack */ 4422 static struct trace_iterator iter; 4423 unsigned int old_userobj; 4424 static int dump_ran; 4425 unsigned long flags; 4426 int cnt = 0, cpu; 4427 4428 /* only one dump */ 4429 local_irq_save(flags); 4430 arch_spin_lock(&ftrace_dump_lock); 4431 if (dump_ran) 4432 goto out; 4433 4434 dump_ran = 1; 4435 4436 tracing_off(); 4437 4438 if (disable_tracing) 4439 ftrace_kill(); 4440 4441 trace_init_global_iter(&iter); 4442 4443 for_each_tracing_cpu(cpu) { 4444 atomic_inc(&iter.tr->data[cpu]->disabled); 4445 } 4446 4447 old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ; 4448 4449 /* don't look at user memory in panic mode */ 4450 trace_flags &= ~TRACE_ITER_SYM_USEROBJ; 4451 4452 /* Simulate the iterator */ 4453 iter.tr = &global_trace; 4454 iter.trace = current_trace; 4455 4456 switch (oops_dump_mode) { 4457 case DUMP_ALL: 4458 iter.cpu_file = TRACE_PIPE_ALL_CPU; 4459 break; 4460 case DUMP_ORIG: 4461 iter.cpu_file = raw_smp_processor_id(); 4462 break; 4463 case DUMP_NONE: 4464 goto out_enable; 4465 default: 4466 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n"); 4467 iter.cpu_file = TRACE_PIPE_ALL_CPU; 4468 } 4469 4470 printk(KERN_TRACE "Dumping ftrace buffer:\n"); 4471 4472 /* 4473 * We need to stop all tracing on all CPUS to read the 4474 * the next buffer. This is a bit expensive, but is 4475 * not done often. We fill all what we can read, 4476 * and then release the locks again. 4477 */ 4478 4479 while (!trace_empty(&iter)) { 4480 4481 if (!cnt) 4482 printk(KERN_TRACE "---------------------------------\n"); 4483 4484 cnt++; 4485 4486 /* reset all but tr, trace, and overruns */ 4487 memset(&iter.seq, 0, 4488 sizeof(struct trace_iterator) - 4489 offsetof(struct trace_iterator, seq)); 4490 iter.iter_flags |= TRACE_FILE_LAT_FMT; 4491 iter.pos = -1; 4492 4493 if (trace_find_next_entry_inc(&iter) != NULL) { 4494 int ret; 4495 4496 ret = print_trace_line(&iter); 4497 if (ret != TRACE_TYPE_NO_CONSUME) 4498 trace_consume(&iter); 4499 } 4500 4501 trace_printk_seq(&iter.seq); 4502 } 4503 4504 if (!cnt) 4505 printk(KERN_TRACE " (ftrace buffer empty)\n"); 4506 else 4507 printk(KERN_TRACE "---------------------------------\n"); 4508 4509 out_enable: 4510 /* Re-enable tracing if requested */ 4511 if (!disable_tracing) { 4512 trace_flags |= old_userobj; 4513 4514 for_each_tracing_cpu(cpu) { 4515 atomic_dec(&iter.tr->data[cpu]->disabled); 4516 } 4517 tracing_on(); 4518 } 4519 4520 out: 4521 arch_spin_unlock(&ftrace_dump_lock); 4522 local_irq_restore(flags); 4523 } 4524 4525 /* By default: disable tracing after the dump */ 4526 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) 4527 { 4528 __ftrace_dump(true, oops_dump_mode); 4529 } 4530 4531 __init static int tracer_alloc_buffers(void) 4532 { 4533 int ring_buf_size; 4534 int i; 4535 int ret = -ENOMEM; 4536 4537 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL)) 4538 goto out; 4539 4540 if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL)) 4541 goto out_free_buffer_mask; 4542 4543 /* To save memory, keep the ring buffer size to its minimum */ 4544 if (ring_buffer_expanded) 4545 ring_buf_size = trace_buf_size; 4546 else 4547 ring_buf_size = 1; 4548 4549 cpumask_copy(tracing_buffer_mask, cpu_possible_mask); 4550 cpumask_copy(tracing_cpumask, cpu_all_mask); 4551 4552 /* TODO: make the number of buffers hot pluggable with CPUS */ 4553 global_trace.buffer = ring_buffer_alloc(ring_buf_size, 4554 TRACE_BUFFER_FLAGS); 4555 if (!global_trace.buffer) { 4556 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n"); 4557 WARN_ON(1); 4558 goto out_free_cpumask; 4559 } 4560 global_trace.entries = ring_buffer_size(global_trace.buffer); 4561 4562 4563 #ifdef CONFIG_TRACER_MAX_TRACE 4564 max_tr.buffer = ring_buffer_alloc(1, TRACE_BUFFER_FLAGS); 4565 if (!max_tr.buffer) { 4566 printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n"); 4567 WARN_ON(1); 4568 ring_buffer_free(global_trace.buffer); 4569 goto out_free_cpumask; 4570 } 4571 max_tr.entries = 1; 4572 #endif 4573 4574 /* Allocate the first page for all buffers */ 4575 for_each_tracing_cpu(i) { 4576 global_trace.data[i] = &per_cpu(global_trace_cpu, i); 4577 max_tr.data[i] = &per_cpu(max_tr_data, i); 4578 } 4579 4580 trace_init_cmdlines(); 4581 4582 register_tracer(&nop_trace); 4583 current_trace = &nop_trace; 4584 /* All seems OK, enable tracing */ 4585 tracing_disabled = 0; 4586 4587 atomic_notifier_chain_register(&panic_notifier_list, 4588 &trace_panic_notifier); 4589 4590 register_die_notifier(&trace_die_notifier); 4591 4592 return 0; 4593 4594 out_free_cpumask: 4595 free_cpumask_var(tracing_cpumask); 4596 out_free_buffer_mask: 4597 free_cpumask_var(tracing_buffer_mask); 4598 out: 4599 return ret; 4600 } 4601 4602 __init static int clear_boot_tracer(void) 4603 { 4604 /* 4605 * The default tracer at boot buffer is an init section. 4606 * This function is called in lateinit. If we did not 4607 * find the boot tracer, then clear it out, to prevent 4608 * later registration from accessing the buffer that is 4609 * about to be freed. 4610 */ 4611 if (!default_bootup_tracer) 4612 return 0; 4613 4614 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n", 4615 default_bootup_tracer); 4616 default_bootup_tracer = NULL; 4617 4618 return 0; 4619 } 4620 4621 early_initcall(tracer_alloc_buffers); 4622 fs_initcall(tracer_init_debugfs); 4623 late_initcall(clear_boot_tracer); 4624