1 /* 2 * ring buffer based function tracer 3 * 4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> 5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com> 6 * 7 * Originally taken from the RT patch by: 8 * Arnaldo Carvalho de Melo <acme@redhat.com> 9 * 10 * Based on code from the latency_tracer, that is: 11 * Copyright (C) 2004-2006 Ingo Molnar 12 * Copyright (C) 2004 William Lee Irwin III 13 */ 14 #include <linux/ring_buffer.h> 15 #include <linux/utsrelease.h> 16 #include <linux/stacktrace.h> 17 #include <linux/writeback.h> 18 #include <linux/kallsyms.h> 19 #include <linux/seq_file.h> 20 #include <linux/smp_lock.h> 21 #include <linux/notifier.h> 22 #include <linux/irqflags.h> 23 #include <linux/debugfs.h> 24 #include <linux/pagemap.h> 25 #include <linux/hardirq.h> 26 #include <linux/linkage.h> 27 #include <linux/uaccess.h> 28 #include <linux/kprobes.h> 29 #include <linux/ftrace.h> 30 #include <linux/module.h> 31 #include <linux/percpu.h> 32 #include <linux/splice.h> 33 #include <linux/kdebug.h> 34 #include <linux/string.h> 35 #include <linux/ctype.h> 36 #include <linux/init.h> 37 #include <linux/poll.h> 38 #include <linux/gfp.h> 39 #include <linux/fs.h> 40 41 #include "trace.h" 42 #include "trace_output.h" 43 44 #define TRACE_BUFFER_FLAGS (RB_FL_OVERWRITE) 45 46 /* 47 * On boot up, the ring buffer is set to the minimum size, so that 48 * we do not waste memory on systems that are not using tracing. 49 */ 50 int ring_buffer_expanded; 51 52 /* 53 * We need to change this state when a selftest is running. 54 * A selftest will lurk into the ring-buffer to count the 55 * entries inserted during the selftest although some concurrent 56 * insertions into the ring-buffer such as trace_printk could occurred 57 * at the same time, giving false positive or negative results. 58 */ 59 static bool __read_mostly tracing_selftest_running; 60 61 /* 62 * If a tracer is running, we do not want to run SELFTEST. 63 */ 64 bool __read_mostly tracing_selftest_disabled; 65 66 /* For tracers that don't implement custom flags */ 67 static struct tracer_opt dummy_tracer_opt[] = { 68 { } 69 }; 70 71 static struct tracer_flags dummy_tracer_flags = { 72 .val = 0, 73 .opts = dummy_tracer_opt 74 }; 75 76 static int dummy_set_flag(u32 old_flags, u32 bit, int set) 77 { 78 return 0; 79 } 80 81 /* 82 * Kill all tracing for good (never come back). 83 * It is initialized to 1 but will turn to zero if the initialization 84 * of the tracer is successful. But that is the only place that sets 85 * this back to zero. 86 */ 87 static int tracing_disabled = 1; 88 89 DEFINE_PER_CPU(local_t, ftrace_cpu_disabled); 90 91 static inline void ftrace_disable_cpu(void) 92 { 93 preempt_disable(); 94 local_inc(&__get_cpu_var(ftrace_cpu_disabled)); 95 } 96 97 static inline void ftrace_enable_cpu(void) 98 { 99 local_dec(&__get_cpu_var(ftrace_cpu_disabled)); 100 preempt_enable(); 101 } 102 103 static cpumask_var_t __read_mostly tracing_buffer_mask; 104 105 /* Define which cpu buffers are currently read in trace_pipe */ 106 static cpumask_var_t tracing_reader_cpumask; 107 108 #define for_each_tracing_cpu(cpu) \ 109 for_each_cpu(cpu, tracing_buffer_mask) 110 111 /* 112 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops 113 * 114 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops 115 * is set, then ftrace_dump is called. This will output the contents 116 * of the ftrace buffers to the console. This is very useful for 117 * capturing traces that lead to crashes and outputing it to a 118 * serial console. 119 * 120 * It is default off, but you can enable it with either specifying 121 * "ftrace_dump_on_oops" in the kernel command line, or setting 122 * /proc/sys/kernel/ftrace_dump_on_oops to true. 123 */ 124 int ftrace_dump_on_oops; 125 126 static int tracing_set_tracer(const char *buf); 127 128 #define MAX_TRACER_SIZE 100 129 static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata; 130 static char *default_bootup_tracer; 131 132 static int __init set_cmdline_ftrace(char *str) 133 { 134 strncpy(bootup_tracer_buf, str, MAX_TRACER_SIZE); 135 default_bootup_tracer = bootup_tracer_buf; 136 /* We are using ftrace early, expand it */ 137 ring_buffer_expanded = 1; 138 return 1; 139 } 140 __setup("ftrace=", set_cmdline_ftrace); 141 142 static int __init set_ftrace_dump_on_oops(char *str) 143 { 144 ftrace_dump_on_oops = 1; 145 return 1; 146 } 147 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops); 148 149 unsigned long long ns2usecs(cycle_t nsec) 150 { 151 nsec += 500; 152 do_div(nsec, 1000); 153 return nsec; 154 } 155 156 /* 157 * The global_trace is the descriptor that holds the tracing 158 * buffers for the live tracing. For each CPU, it contains 159 * a link list of pages that will store trace entries. The 160 * page descriptor of the pages in the memory is used to hold 161 * the link list by linking the lru item in the page descriptor 162 * to each of the pages in the buffer per CPU. 163 * 164 * For each active CPU there is a data field that holds the 165 * pages for the buffer for that CPU. Each CPU has the same number 166 * of pages allocated for its buffer. 167 */ 168 static struct trace_array global_trace; 169 170 static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu); 171 172 int filter_current_check_discard(struct ring_buffer *buffer, 173 struct ftrace_event_call *call, void *rec, 174 struct ring_buffer_event *event) 175 { 176 return filter_check_discard(call, rec, buffer, event); 177 } 178 EXPORT_SYMBOL_GPL(filter_current_check_discard); 179 180 cycle_t ftrace_now(int cpu) 181 { 182 u64 ts; 183 184 /* Early boot up does not have a buffer yet */ 185 if (!global_trace.buffer) 186 return trace_clock_local(); 187 188 ts = ring_buffer_time_stamp(global_trace.buffer, cpu); 189 ring_buffer_normalize_time_stamp(global_trace.buffer, cpu, &ts); 190 191 return ts; 192 } 193 194 /* 195 * The max_tr is used to snapshot the global_trace when a maximum 196 * latency is reached. Some tracers will use this to store a maximum 197 * trace while it continues examining live traces. 198 * 199 * The buffers for the max_tr are set up the same as the global_trace. 200 * When a snapshot is taken, the link list of the max_tr is swapped 201 * with the link list of the global_trace and the buffers are reset for 202 * the global_trace so the tracing can continue. 203 */ 204 static struct trace_array max_tr; 205 206 static DEFINE_PER_CPU(struct trace_array_cpu, max_data); 207 208 /* tracer_enabled is used to toggle activation of a tracer */ 209 static int tracer_enabled = 1; 210 211 /** 212 * tracing_is_enabled - return tracer_enabled status 213 * 214 * This function is used by other tracers to know the status 215 * of the tracer_enabled flag. Tracers may use this function 216 * to know if it should enable their features when starting 217 * up. See irqsoff tracer for an example (start_irqsoff_tracer). 218 */ 219 int tracing_is_enabled(void) 220 { 221 return tracer_enabled; 222 } 223 224 /* 225 * trace_buf_size is the size in bytes that is allocated 226 * for a buffer. Note, the number of bytes is always rounded 227 * to page size. 228 * 229 * This number is purposely set to a low number of 16384. 230 * If the dump on oops happens, it will be much appreciated 231 * to not have to wait for all that output. Anyway this can be 232 * boot time and run time configurable. 233 */ 234 #define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */ 235 236 static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT; 237 238 /* trace_types holds a link list of available tracers. */ 239 static struct tracer *trace_types __read_mostly; 240 241 /* current_trace points to the tracer that is currently active */ 242 static struct tracer *current_trace __read_mostly; 243 244 /* 245 * trace_types_lock is used to protect the trace_types list. 246 * This lock is also used to keep user access serialized. 247 * Accesses from userspace will grab this lock while userspace 248 * activities happen inside the kernel. 249 */ 250 static DEFINE_MUTEX(trace_types_lock); 251 252 /* trace_wait is a waitqueue for tasks blocked on trace_poll */ 253 static DECLARE_WAIT_QUEUE_HEAD(trace_wait); 254 255 /* trace_flags holds trace_options default values */ 256 unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | 257 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME | 258 TRACE_ITER_GRAPH_TIME; 259 260 static int trace_stop_count; 261 static DEFINE_SPINLOCK(tracing_start_lock); 262 263 /** 264 * trace_wake_up - wake up tasks waiting for trace input 265 * 266 * Simply wakes up any task that is blocked on the trace_wait 267 * queue. These is used with trace_poll for tasks polling the trace. 268 */ 269 void trace_wake_up(void) 270 { 271 int cpu; 272 273 if (trace_flags & TRACE_ITER_BLOCK) 274 return; 275 /* 276 * The runqueue_is_locked() can fail, but this is the best we 277 * have for now: 278 */ 279 cpu = get_cpu(); 280 if (!runqueue_is_locked(cpu)) 281 wake_up(&trace_wait); 282 put_cpu(); 283 } 284 285 static int __init set_buf_size(char *str) 286 { 287 unsigned long buf_size; 288 289 if (!str) 290 return 0; 291 buf_size = memparse(str, &str); 292 /* nr_entries can not be zero */ 293 if (buf_size == 0) 294 return 0; 295 trace_buf_size = buf_size; 296 return 1; 297 } 298 __setup("trace_buf_size=", set_buf_size); 299 300 unsigned long nsecs_to_usecs(unsigned long nsecs) 301 { 302 return nsecs / 1000; 303 } 304 305 /* These must match the bit postions in trace_iterator_flags */ 306 static const char *trace_options[] = { 307 "print-parent", 308 "sym-offset", 309 "sym-addr", 310 "verbose", 311 "raw", 312 "hex", 313 "bin", 314 "block", 315 "stacktrace", 316 "sched-tree", 317 "trace_printk", 318 "ftrace_preempt", 319 "branch", 320 "annotate", 321 "userstacktrace", 322 "sym-userobj", 323 "printk-msg-only", 324 "context-info", 325 "latency-format", 326 "sleep-time", 327 "graph-time", 328 NULL 329 }; 330 331 static struct { 332 u64 (*func)(void); 333 const char *name; 334 } trace_clocks[] = { 335 { trace_clock_local, "local" }, 336 { trace_clock_global, "global" }, 337 }; 338 339 int trace_clock_id; 340 341 /* 342 * trace_parser_get_init - gets the buffer for trace parser 343 */ 344 int trace_parser_get_init(struct trace_parser *parser, int size) 345 { 346 memset(parser, 0, sizeof(*parser)); 347 348 parser->buffer = kmalloc(size, GFP_KERNEL); 349 if (!parser->buffer) 350 return 1; 351 352 parser->size = size; 353 return 0; 354 } 355 356 /* 357 * trace_parser_put - frees the buffer for trace parser 358 */ 359 void trace_parser_put(struct trace_parser *parser) 360 { 361 kfree(parser->buffer); 362 } 363 364 /* 365 * trace_get_user - reads the user input string separated by space 366 * (matched by isspace(ch)) 367 * 368 * For each string found the 'struct trace_parser' is updated, 369 * and the function returns. 370 * 371 * Returns number of bytes read. 372 * 373 * See kernel/trace/trace.h for 'struct trace_parser' details. 374 */ 375 int trace_get_user(struct trace_parser *parser, const char __user *ubuf, 376 size_t cnt, loff_t *ppos) 377 { 378 char ch; 379 size_t read = 0; 380 ssize_t ret; 381 382 if (!*ppos) 383 trace_parser_clear(parser); 384 385 ret = get_user(ch, ubuf++); 386 if (ret) 387 goto out; 388 389 read++; 390 cnt--; 391 392 /* 393 * The parser is not finished with the last write, 394 * continue reading the user input without skipping spaces. 395 */ 396 if (!parser->cont) { 397 /* skip white space */ 398 while (cnt && isspace(ch)) { 399 ret = get_user(ch, ubuf++); 400 if (ret) 401 goto out; 402 read++; 403 cnt--; 404 } 405 406 /* only spaces were written */ 407 if (isspace(ch)) { 408 *ppos += read; 409 ret = read; 410 goto out; 411 } 412 413 parser->idx = 0; 414 } 415 416 /* read the non-space input */ 417 while (cnt && !isspace(ch)) { 418 if (parser->idx < parser->size - 1) 419 parser->buffer[parser->idx++] = ch; 420 else { 421 ret = -EINVAL; 422 goto out; 423 } 424 ret = get_user(ch, ubuf++); 425 if (ret) 426 goto out; 427 read++; 428 cnt--; 429 } 430 431 /* We either got finished input or we have to wait for another call. */ 432 if (isspace(ch)) { 433 parser->buffer[parser->idx] = 0; 434 parser->cont = false; 435 } else { 436 parser->cont = true; 437 parser->buffer[parser->idx++] = ch; 438 } 439 440 *ppos += read; 441 ret = read; 442 443 out: 444 return ret; 445 } 446 447 ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt) 448 { 449 int len; 450 int ret; 451 452 if (!cnt) 453 return 0; 454 455 if (s->len <= s->readpos) 456 return -EBUSY; 457 458 len = s->len - s->readpos; 459 if (cnt > len) 460 cnt = len; 461 ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt); 462 if (ret == cnt) 463 return -EFAULT; 464 465 cnt -= ret; 466 467 s->readpos += cnt; 468 return cnt; 469 } 470 471 static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt) 472 { 473 int len; 474 void *ret; 475 476 if (s->len <= s->readpos) 477 return -EBUSY; 478 479 len = s->len - s->readpos; 480 if (cnt > len) 481 cnt = len; 482 ret = memcpy(buf, s->buffer + s->readpos, cnt); 483 if (!ret) 484 return -EFAULT; 485 486 s->readpos += cnt; 487 return cnt; 488 } 489 490 /* 491 * ftrace_max_lock is used to protect the swapping of buffers 492 * when taking a max snapshot. The buffers themselves are 493 * protected by per_cpu spinlocks. But the action of the swap 494 * needs its own lock. 495 * 496 * This is defined as a raw_spinlock_t in order to help 497 * with performance when lockdep debugging is enabled. 498 * 499 * It is also used in other places outside the update_max_tr 500 * so it needs to be defined outside of the 501 * CONFIG_TRACER_MAX_TRACE. 502 */ 503 static raw_spinlock_t ftrace_max_lock = 504 (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; 505 506 #ifdef CONFIG_TRACER_MAX_TRACE 507 unsigned long __read_mostly tracing_max_latency; 508 unsigned long __read_mostly tracing_thresh; 509 510 /* 511 * Copy the new maximum trace into the separate maximum-trace 512 * structure. (this way the maximum trace is permanently saved, 513 * for later retrieval via /sys/kernel/debug/tracing/latency_trace) 514 */ 515 static void 516 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) 517 { 518 struct trace_array_cpu *data = tr->data[cpu]; 519 struct trace_array_cpu *max_data = tr->data[cpu]; 520 521 max_tr.cpu = cpu; 522 max_tr.time_start = data->preempt_timestamp; 523 524 max_data = max_tr.data[cpu]; 525 max_data->saved_latency = tracing_max_latency; 526 max_data->critical_start = data->critical_start; 527 max_data->critical_end = data->critical_end; 528 529 memcpy(data->comm, tsk->comm, TASK_COMM_LEN); 530 max_data->pid = tsk->pid; 531 max_data->uid = task_uid(tsk); 532 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO; 533 max_data->policy = tsk->policy; 534 max_data->rt_priority = tsk->rt_priority; 535 536 /* record this tasks comm */ 537 tracing_record_cmdline(tsk); 538 } 539 540 /** 541 * update_max_tr - snapshot all trace buffers from global_trace to max_tr 542 * @tr: tracer 543 * @tsk: the task with the latency 544 * @cpu: The cpu that initiated the trace. 545 * 546 * Flip the buffers between the @tr and the max_tr and record information 547 * about which task was the cause of this latency. 548 */ 549 void 550 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) 551 { 552 struct ring_buffer *buf = tr->buffer; 553 554 if (trace_stop_count) 555 return; 556 557 WARN_ON_ONCE(!irqs_disabled()); 558 __raw_spin_lock(&ftrace_max_lock); 559 560 tr->buffer = max_tr.buffer; 561 max_tr.buffer = buf; 562 563 __update_max_tr(tr, tsk, cpu); 564 __raw_spin_unlock(&ftrace_max_lock); 565 } 566 567 /** 568 * update_max_tr_single - only copy one trace over, and reset the rest 569 * @tr - tracer 570 * @tsk - task with the latency 571 * @cpu - the cpu of the buffer to copy. 572 * 573 * Flip the trace of a single CPU buffer between the @tr and the max_tr. 574 */ 575 void 576 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) 577 { 578 int ret; 579 580 if (trace_stop_count) 581 return; 582 583 WARN_ON_ONCE(!irqs_disabled()); 584 __raw_spin_lock(&ftrace_max_lock); 585 586 ftrace_disable_cpu(); 587 588 ret = ring_buffer_swap_cpu(max_tr.buffer, tr->buffer, cpu); 589 590 if (ret == -EBUSY) { 591 /* 592 * We failed to swap the buffer due to a commit taking 593 * place on this CPU. We fail to record, but we reset 594 * the max trace buffer (no one writes directly to it) 595 * and flag that it failed. 596 */ 597 trace_array_printk(&max_tr, _THIS_IP_, 598 "Failed to swap buffers due to commit in progress\n"); 599 } 600 601 ftrace_enable_cpu(); 602 603 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY); 604 605 __update_max_tr(tr, tsk, cpu); 606 __raw_spin_unlock(&ftrace_max_lock); 607 } 608 #endif /* CONFIG_TRACER_MAX_TRACE */ 609 610 /** 611 * register_tracer - register a tracer with the ftrace system. 612 * @type - the plugin for the tracer 613 * 614 * Register a new plugin tracer. 615 */ 616 int register_tracer(struct tracer *type) 617 __releases(kernel_lock) 618 __acquires(kernel_lock) 619 { 620 struct tracer *t; 621 int ret = 0; 622 623 if (!type->name) { 624 pr_info("Tracer must have a name\n"); 625 return -1; 626 } 627 628 if (strlen(type->name) > MAX_TRACER_SIZE) { 629 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE); 630 return -1; 631 } 632 633 /* 634 * When this gets called we hold the BKL which means that 635 * preemption is disabled. Various trace selftests however 636 * need to disable and enable preemption for successful tests. 637 * So we drop the BKL here and grab it after the tests again. 638 */ 639 unlock_kernel(); 640 mutex_lock(&trace_types_lock); 641 642 tracing_selftest_running = true; 643 644 for (t = trace_types; t; t = t->next) { 645 if (strcmp(type->name, t->name) == 0) { 646 /* already found */ 647 pr_info("Tracer %s already registered\n", 648 type->name); 649 ret = -1; 650 goto out; 651 } 652 } 653 654 if (!type->set_flag) 655 type->set_flag = &dummy_set_flag; 656 if (!type->flags) 657 type->flags = &dummy_tracer_flags; 658 else 659 if (!type->flags->opts) 660 type->flags->opts = dummy_tracer_opt; 661 if (!type->wait_pipe) 662 type->wait_pipe = default_wait_pipe; 663 664 665 #ifdef CONFIG_FTRACE_STARTUP_TEST 666 if (type->selftest && !tracing_selftest_disabled) { 667 struct tracer *saved_tracer = current_trace; 668 struct trace_array *tr = &global_trace; 669 670 /* 671 * Run a selftest on this tracer. 672 * Here we reset the trace buffer, and set the current 673 * tracer to be this tracer. The tracer can then run some 674 * internal tracing to verify that everything is in order. 675 * If we fail, we do not register this tracer. 676 */ 677 tracing_reset_online_cpus(tr); 678 679 current_trace = type; 680 /* the test is responsible for initializing and enabling */ 681 pr_info("Testing tracer %s: ", type->name); 682 ret = type->selftest(type, tr); 683 /* the test is responsible for resetting too */ 684 current_trace = saved_tracer; 685 if (ret) { 686 printk(KERN_CONT "FAILED!\n"); 687 goto out; 688 } 689 /* Only reset on passing, to avoid touching corrupted buffers */ 690 tracing_reset_online_cpus(tr); 691 692 printk(KERN_CONT "PASSED\n"); 693 } 694 #endif 695 696 type->next = trace_types; 697 trace_types = type; 698 699 out: 700 tracing_selftest_running = false; 701 mutex_unlock(&trace_types_lock); 702 703 if (ret || !default_bootup_tracer) 704 goto out_unlock; 705 706 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE)) 707 goto out_unlock; 708 709 printk(KERN_INFO "Starting tracer '%s'\n", type->name); 710 /* Do we want this tracer to start on bootup? */ 711 tracing_set_tracer(type->name); 712 default_bootup_tracer = NULL; 713 /* disable other selftests, since this will break it. */ 714 tracing_selftest_disabled = 1; 715 #ifdef CONFIG_FTRACE_STARTUP_TEST 716 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n", 717 type->name); 718 #endif 719 720 out_unlock: 721 lock_kernel(); 722 return ret; 723 } 724 725 void unregister_tracer(struct tracer *type) 726 { 727 struct tracer **t; 728 729 mutex_lock(&trace_types_lock); 730 for (t = &trace_types; *t; t = &(*t)->next) { 731 if (*t == type) 732 goto found; 733 } 734 pr_info("Tracer %s not registered\n", type->name); 735 goto out; 736 737 found: 738 *t = (*t)->next; 739 740 if (type == current_trace && tracer_enabled) { 741 tracer_enabled = 0; 742 tracing_stop(); 743 if (current_trace->stop) 744 current_trace->stop(&global_trace); 745 current_trace = &nop_trace; 746 } 747 out: 748 mutex_unlock(&trace_types_lock); 749 } 750 751 static void __tracing_reset(struct trace_array *tr, int cpu) 752 { 753 ftrace_disable_cpu(); 754 ring_buffer_reset_cpu(tr->buffer, cpu); 755 ftrace_enable_cpu(); 756 } 757 758 void tracing_reset(struct trace_array *tr, int cpu) 759 { 760 struct ring_buffer *buffer = tr->buffer; 761 762 ring_buffer_record_disable(buffer); 763 764 /* Make sure all commits have finished */ 765 synchronize_sched(); 766 __tracing_reset(tr, cpu); 767 768 ring_buffer_record_enable(buffer); 769 } 770 771 void tracing_reset_online_cpus(struct trace_array *tr) 772 { 773 struct ring_buffer *buffer = tr->buffer; 774 int cpu; 775 776 ring_buffer_record_disable(buffer); 777 778 /* Make sure all commits have finished */ 779 synchronize_sched(); 780 781 tr->time_start = ftrace_now(tr->cpu); 782 783 for_each_online_cpu(cpu) 784 __tracing_reset(tr, cpu); 785 786 ring_buffer_record_enable(buffer); 787 } 788 789 void tracing_reset_current(int cpu) 790 { 791 tracing_reset(&global_trace, cpu); 792 } 793 794 void tracing_reset_current_online_cpus(void) 795 { 796 tracing_reset_online_cpus(&global_trace); 797 } 798 799 #define SAVED_CMDLINES 128 800 #define NO_CMDLINE_MAP UINT_MAX 801 static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1]; 802 static unsigned map_cmdline_to_pid[SAVED_CMDLINES]; 803 static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN]; 804 static int cmdline_idx; 805 static raw_spinlock_t trace_cmdline_lock = __RAW_SPIN_LOCK_UNLOCKED; 806 807 /* temporary disable recording */ 808 static atomic_t trace_record_cmdline_disabled __read_mostly; 809 810 static void trace_init_cmdlines(void) 811 { 812 memset(&map_pid_to_cmdline, NO_CMDLINE_MAP, sizeof(map_pid_to_cmdline)); 813 memset(&map_cmdline_to_pid, NO_CMDLINE_MAP, sizeof(map_cmdline_to_pid)); 814 cmdline_idx = 0; 815 } 816 817 int is_tracing_stopped(void) 818 { 819 return trace_stop_count; 820 } 821 822 /** 823 * ftrace_off_permanent - disable all ftrace code permanently 824 * 825 * This should only be called when a serious anomally has 826 * been detected. This will turn off the function tracing, 827 * ring buffers, and other tracing utilites. It takes no 828 * locks and can be called from any context. 829 */ 830 void ftrace_off_permanent(void) 831 { 832 tracing_disabled = 1; 833 ftrace_stop(); 834 tracing_off_permanent(); 835 } 836 837 /** 838 * tracing_start - quick start of the tracer 839 * 840 * If tracing is enabled but was stopped by tracing_stop, 841 * this will start the tracer back up. 842 */ 843 void tracing_start(void) 844 { 845 struct ring_buffer *buffer; 846 unsigned long flags; 847 848 if (tracing_disabled) 849 return; 850 851 spin_lock_irqsave(&tracing_start_lock, flags); 852 if (--trace_stop_count) { 853 if (trace_stop_count < 0) { 854 /* Someone screwed up their debugging */ 855 WARN_ON_ONCE(1); 856 trace_stop_count = 0; 857 } 858 goto out; 859 } 860 861 862 buffer = global_trace.buffer; 863 if (buffer) 864 ring_buffer_record_enable(buffer); 865 866 buffer = max_tr.buffer; 867 if (buffer) 868 ring_buffer_record_enable(buffer); 869 870 ftrace_start(); 871 out: 872 spin_unlock_irqrestore(&tracing_start_lock, flags); 873 } 874 875 /** 876 * tracing_stop - quick stop of the tracer 877 * 878 * Light weight way to stop tracing. Use in conjunction with 879 * tracing_start. 880 */ 881 void tracing_stop(void) 882 { 883 struct ring_buffer *buffer; 884 unsigned long flags; 885 886 ftrace_stop(); 887 spin_lock_irqsave(&tracing_start_lock, flags); 888 if (trace_stop_count++) 889 goto out; 890 891 buffer = global_trace.buffer; 892 if (buffer) 893 ring_buffer_record_disable(buffer); 894 895 buffer = max_tr.buffer; 896 if (buffer) 897 ring_buffer_record_disable(buffer); 898 899 out: 900 spin_unlock_irqrestore(&tracing_start_lock, flags); 901 } 902 903 void trace_stop_cmdline_recording(void); 904 905 static void trace_save_cmdline(struct task_struct *tsk) 906 { 907 unsigned pid, idx; 908 909 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT)) 910 return; 911 912 /* 913 * It's not the end of the world if we don't get 914 * the lock, but we also don't want to spin 915 * nor do we want to disable interrupts, 916 * so if we miss here, then better luck next time. 917 */ 918 if (!__raw_spin_trylock(&trace_cmdline_lock)) 919 return; 920 921 idx = map_pid_to_cmdline[tsk->pid]; 922 if (idx == NO_CMDLINE_MAP) { 923 idx = (cmdline_idx + 1) % SAVED_CMDLINES; 924 925 /* 926 * Check whether the cmdline buffer at idx has a pid 927 * mapped. We are going to overwrite that entry so we 928 * need to clear the map_pid_to_cmdline. Otherwise we 929 * would read the new comm for the old pid. 930 */ 931 pid = map_cmdline_to_pid[idx]; 932 if (pid != NO_CMDLINE_MAP) 933 map_pid_to_cmdline[pid] = NO_CMDLINE_MAP; 934 935 map_cmdline_to_pid[idx] = tsk->pid; 936 map_pid_to_cmdline[tsk->pid] = idx; 937 938 cmdline_idx = idx; 939 } 940 941 memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN); 942 943 __raw_spin_unlock(&trace_cmdline_lock); 944 } 945 946 void trace_find_cmdline(int pid, char comm[]) 947 { 948 unsigned map; 949 950 if (!pid) { 951 strcpy(comm, "<idle>"); 952 return; 953 } 954 955 if (pid > PID_MAX_DEFAULT) { 956 strcpy(comm, "<...>"); 957 return; 958 } 959 960 preempt_disable(); 961 __raw_spin_lock(&trace_cmdline_lock); 962 map = map_pid_to_cmdline[pid]; 963 if (map != NO_CMDLINE_MAP) 964 strcpy(comm, saved_cmdlines[map]); 965 else 966 strcpy(comm, "<...>"); 967 968 __raw_spin_unlock(&trace_cmdline_lock); 969 preempt_enable(); 970 } 971 972 void tracing_record_cmdline(struct task_struct *tsk) 973 { 974 if (atomic_read(&trace_record_cmdline_disabled) || !tracer_enabled || 975 !tracing_is_on()) 976 return; 977 978 trace_save_cmdline(tsk); 979 } 980 981 void 982 tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags, 983 int pc) 984 { 985 struct task_struct *tsk = current; 986 987 entry->preempt_count = pc & 0xff; 988 entry->pid = (tsk) ? tsk->pid : 0; 989 entry->lock_depth = (tsk) ? tsk->lock_depth : 0; 990 entry->flags = 991 #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT 992 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) | 993 #else 994 TRACE_FLAG_IRQS_NOSUPPORT | 995 #endif 996 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) | 997 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) | 998 (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0); 999 } 1000 EXPORT_SYMBOL_GPL(tracing_generic_entry_update); 1001 1002 struct ring_buffer_event * 1003 trace_buffer_lock_reserve(struct ring_buffer *buffer, 1004 int type, 1005 unsigned long len, 1006 unsigned long flags, int pc) 1007 { 1008 struct ring_buffer_event *event; 1009 1010 event = ring_buffer_lock_reserve(buffer, len); 1011 if (event != NULL) { 1012 struct trace_entry *ent = ring_buffer_event_data(event); 1013 1014 tracing_generic_entry_update(ent, flags, pc); 1015 ent->type = type; 1016 } 1017 1018 return event; 1019 } 1020 1021 static inline void 1022 __trace_buffer_unlock_commit(struct ring_buffer *buffer, 1023 struct ring_buffer_event *event, 1024 unsigned long flags, int pc, 1025 int wake) 1026 { 1027 ring_buffer_unlock_commit(buffer, event); 1028 1029 ftrace_trace_stack(buffer, flags, 6, pc); 1030 ftrace_trace_userstack(buffer, flags, pc); 1031 1032 if (wake) 1033 trace_wake_up(); 1034 } 1035 1036 void trace_buffer_unlock_commit(struct ring_buffer *buffer, 1037 struct ring_buffer_event *event, 1038 unsigned long flags, int pc) 1039 { 1040 __trace_buffer_unlock_commit(buffer, event, flags, pc, 1); 1041 } 1042 1043 struct ring_buffer_event * 1044 trace_current_buffer_lock_reserve(struct ring_buffer **current_rb, 1045 int type, unsigned long len, 1046 unsigned long flags, int pc) 1047 { 1048 *current_rb = global_trace.buffer; 1049 return trace_buffer_lock_reserve(*current_rb, 1050 type, len, flags, pc); 1051 } 1052 EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve); 1053 1054 void trace_current_buffer_unlock_commit(struct ring_buffer *buffer, 1055 struct ring_buffer_event *event, 1056 unsigned long flags, int pc) 1057 { 1058 __trace_buffer_unlock_commit(buffer, event, flags, pc, 1); 1059 } 1060 EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit); 1061 1062 void trace_nowake_buffer_unlock_commit(struct ring_buffer *buffer, 1063 struct ring_buffer_event *event, 1064 unsigned long flags, int pc) 1065 { 1066 __trace_buffer_unlock_commit(buffer, event, flags, pc, 0); 1067 } 1068 EXPORT_SYMBOL_GPL(trace_nowake_buffer_unlock_commit); 1069 1070 void trace_current_buffer_discard_commit(struct ring_buffer *buffer, 1071 struct ring_buffer_event *event) 1072 { 1073 ring_buffer_discard_commit(buffer, event); 1074 } 1075 EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit); 1076 1077 void 1078 trace_function(struct trace_array *tr, 1079 unsigned long ip, unsigned long parent_ip, unsigned long flags, 1080 int pc) 1081 { 1082 struct ftrace_event_call *call = &event_function; 1083 struct ring_buffer *buffer = tr->buffer; 1084 struct ring_buffer_event *event; 1085 struct ftrace_entry *entry; 1086 1087 /* If we are reading the ring buffer, don't trace */ 1088 if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) 1089 return; 1090 1091 event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry), 1092 flags, pc); 1093 if (!event) 1094 return; 1095 entry = ring_buffer_event_data(event); 1096 entry->ip = ip; 1097 entry->parent_ip = parent_ip; 1098 1099 if (!filter_check_discard(call, entry, buffer, event)) 1100 ring_buffer_unlock_commit(buffer, event); 1101 } 1102 1103 void 1104 ftrace(struct trace_array *tr, struct trace_array_cpu *data, 1105 unsigned long ip, unsigned long parent_ip, unsigned long flags, 1106 int pc) 1107 { 1108 if (likely(!atomic_read(&data->disabled))) 1109 trace_function(tr, ip, parent_ip, flags, pc); 1110 } 1111 1112 #ifdef CONFIG_STACKTRACE 1113 static void __ftrace_trace_stack(struct ring_buffer *buffer, 1114 unsigned long flags, 1115 int skip, int pc) 1116 { 1117 struct ftrace_event_call *call = &event_kernel_stack; 1118 struct ring_buffer_event *event; 1119 struct stack_entry *entry; 1120 struct stack_trace trace; 1121 1122 event = trace_buffer_lock_reserve(buffer, TRACE_STACK, 1123 sizeof(*entry), flags, pc); 1124 if (!event) 1125 return; 1126 entry = ring_buffer_event_data(event); 1127 memset(&entry->caller, 0, sizeof(entry->caller)); 1128 1129 trace.nr_entries = 0; 1130 trace.max_entries = FTRACE_STACK_ENTRIES; 1131 trace.skip = skip; 1132 trace.entries = entry->caller; 1133 1134 save_stack_trace(&trace); 1135 if (!filter_check_discard(call, entry, buffer, event)) 1136 ring_buffer_unlock_commit(buffer, event); 1137 } 1138 1139 void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags, 1140 int skip, int pc) 1141 { 1142 if (!(trace_flags & TRACE_ITER_STACKTRACE)) 1143 return; 1144 1145 __ftrace_trace_stack(buffer, flags, skip, pc); 1146 } 1147 1148 void __trace_stack(struct trace_array *tr, unsigned long flags, int skip, 1149 int pc) 1150 { 1151 __ftrace_trace_stack(tr->buffer, flags, skip, pc); 1152 } 1153 1154 void 1155 ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) 1156 { 1157 struct ftrace_event_call *call = &event_user_stack; 1158 struct ring_buffer_event *event; 1159 struct userstack_entry *entry; 1160 struct stack_trace trace; 1161 1162 if (!(trace_flags & TRACE_ITER_USERSTACKTRACE)) 1163 return; 1164 1165 event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK, 1166 sizeof(*entry), flags, pc); 1167 if (!event) 1168 return; 1169 entry = ring_buffer_event_data(event); 1170 1171 entry->tgid = current->tgid; 1172 memset(&entry->caller, 0, sizeof(entry->caller)); 1173 1174 trace.nr_entries = 0; 1175 trace.max_entries = FTRACE_STACK_ENTRIES; 1176 trace.skip = 0; 1177 trace.entries = entry->caller; 1178 1179 save_stack_trace_user(&trace); 1180 if (!filter_check_discard(call, entry, buffer, event)) 1181 ring_buffer_unlock_commit(buffer, event); 1182 } 1183 1184 #ifdef UNUSED 1185 static void __trace_userstack(struct trace_array *tr, unsigned long flags) 1186 { 1187 ftrace_trace_userstack(tr, flags, preempt_count()); 1188 } 1189 #endif /* UNUSED */ 1190 1191 #endif /* CONFIG_STACKTRACE */ 1192 1193 static void 1194 ftrace_trace_special(void *__tr, 1195 unsigned long arg1, unsigned long arg2, unsigned long arg3, 1196 int pc) 1197 { 1198 struct ftrace_event_call *call = &event_special; 1199 struct ring_buffer_event *event; 1200 struct trace_array *tr = __tr; 1201 struct ring_buffer *buffer = tr->buffer; 1202 struct special_entry *entry; 1203 1204 event = trace_buffer_lock_reserve(buffer, TRACE_SPECIAL, 1205 sizeof(*entry), 0, pc); 1206 if (!event) 1207 return; 1208 entry = ring_buffer_event_data(event); 1209 entry->arg1 = arg1; 1210 entry->arg2 = arg2; 1211 entry->arg3 = arg3; 1212 1213 if (!filter_check_discard(call, entry, buffer, event)) 1214 trace_buffer_unlock_commit(buffer, event, 0, pc); 1215 } 1216 1217 void 1218 __trace_special(void *__tr, void *__data, 1219 unsigned long arg1, unsigned long arg2, unsigned long arg3) 1220 { 1221 ftrace_trace_special(__tr, arg1, arg2, arg3, preempt_count()); 1222 } 1223 1224 void 1225 ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) 1226 { 1227 struct trace_array *tr = &global_trace; 1228 struct trace_array_cpu *data; 1229 unsigned long flags; 1230 int cpu; 1231 int pc; 1232 1233 if (tracing_disabled) 1234 return; 1235 1236 pc = preempt_count(); 1237 local_irq_save(flags); 1238 cpu = raw_smp_processor_id(); 1239 data = tr->data[cpu]; 1240 1241 if (likely(atomic_inc_return(&data->disabled) == 1)) 1242 ftrace_trace_special(tr, arg1, arg2, arg3, pc); 1243 1244 atomic_dec(&data->disabled); 1245 local_irq_restore(flags); 1246 } 1247 1248 /** 1249 * trace_vbprintk - write binary msg to tracing buffer 1250 * 1251 */ 1252 int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) 1253 { 1254 static raw_spinlock_t trace_buf_lock = 1255 (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; 1256 static u32 trace_buf[TRACE_BUF_SIZE]; 1257 1258 struct ftrace_event_call *call = &event_bprint; 1259 struct ring_buffer_event *event; 1260 struct ring_buffer *buffer; 1261 struct trace_array *tr = &global_trace; 1262 struct trace_array_cpu *data; 1263 struct bprint_entry *entry; 1264 unsigned long flags; 1265 int disable; 1266 int resched; 1267 int cpu, len = 0, size, pc; 1268 1269 if (unlikely(tracing_selftest_running || tracing_disabled)) 1270 return 0; 1271 1272 /* Don't pollute graph traces with trace_vprintk internals */ 1273 pause_graph_tracing(); 1274 1275 pc = preempt_count(); 1276 resched = ftrace_preempt_disable(); 1277 cpu = raw_smp_processor_id(); 1278 data = tr->data[cpu]; 1279 1280 disable = atomic_inc_return(&data->disabled); 1281 if (unlikely(disable != 1)) 1282 goto out; 1283 1284 /* Lockdep uses trace_printk for lock tracing */ 1285 local_irq_save(flags); 1286 __raw_spin_lock(&trace_buf_lock); 1287 len = vbin_printf(trace_buf, TRACE_BUF_SIZE, fmt, args); 1288 1289 if (len > TRACE_BUF_SIZE || len < 0) 1290 goto out_unlock; 1291 1292 size = sizeof(*entry) + sizeof(u32) * len; 1293 buffer = tr->buffer; 1294 event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size, 1295 flags, pc); 1296 if (!event) 1297 goto out_unlock; 1298 entry = ring_buffer_event_data(event); 1299 entry->ip = ip; 1300 entry->fmt = fmt; 1301 1302 memcpy(entry->buf, trace_buf, sizeof(u32) * len); 1303 if (!filter_check_discard(call, entry, buffer, event)) 1304 ring_buffer_unlock_commit(buffer, event); 1305 1306 out_unlock: 1307 __raw_spin_unlock(&trace_buf_lock); 1308 local_irq_restore(flags); 1309 1310 out: 1311 atomic_dec_return(&data->disabled); 1312 ftrace_preempt_enable(resched); 1313 unpause_graph_tracing(); 1314 1315 return len; 1316 } 1317 EXPORT_SYMBOL_GPL(trace_vbprintk); 1318 1319 int trace_array_printk(struct trace_array *tr, 1320 unsigned long ip, const char *fmt, ...) 1321 { 1322 int ret; 1323 va_list ap; 1324 1325 if (!(trace_flags & TRACE_ITER_PRINTK)) 1326 return 0; 1327 1328 va_start(ap, fmt); 1329 ret = trace_array_vprintk(tr, ip, fmt, ap); 1330 va_end(ap); 1331 return ret; 1332 } 1333 1334 int trace_array_vprintk(struct trace_array *tr, 1335 unsigned long ip, const char *fmt, va_list args) 1336 { 1337 static raw_spinlock_t trace_buf_lock = __RAW_SPIN_LOCK_UNLOCKED; 1338 static char trace_buf[TRACE_BUF_SIZE]; 1339 1340 struct ftrace_event_call *call = &event_print; 1341 struct ring_buffer_event *event; 1342 struct ring_buffer *buffer; 1343 struct trace_array_cpu *data; 1344 int cpu, len = 0, size, pc; 1345 struct print_entry *entry; 1346 unsigned long irq_flags; 1347 int disable; 1348 1349 if (tracing_disabled || tracing_selftest_running) 1350 return 0; 1351 1352 pc = preempt_count(); 1353 preempt_disable_notrace(); 1354 cpu = raw_smp_processor_id(); 1355 data = tr->data[cpu]; 1356 1357 disable = atomic_inc_return(&data->disabled); 1358 if (unlikely(disable != 1)) 1359 goto out; 1360 1361 pause_graph_tracing(); 1362 raw_local_irq_save(irq_flags); 1363 __raw_spin_lock(&trace_buf_lock); 1364 if (args == NULL) { 1365 strncpy(trace_buf, fmt, TRACE_BUF_SIZE); 1366 len = strlen(trace_buf); 1367 } else 1368 len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args); 1369 1370 size = sizeof(*entry) + len + 1; 1371 buffer = tr->buffer; 1372 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size, 1373 irq_flags, pc); 1374 if (!event) 1375 goto out_unlock; 1376 entry = ring_buffer_event_data(event); 1377 entry->ip = ip; 1378 1379 memcpy(&entry->buf, trace_buf, len); 1380 entry->buf[len] = '\0'; 1381 if (!filter_check_discard(call, entry, buffer, event)) 1382 ring_buffer_unlock_commit(buffer, event); 1383 1384 out_unlock: 1385 __raw_spin_unlock(&trace_buf_lock); 1386 raw_local_irq_restore(irq_flags); 1387 unpause_graph_tracing(); 1388 out: 1389 atomic_dec_return(&data->disabled); 1390 preempt_enable_notrace(); 1391 1392 return len; 1393 } 1394 1395 int trace_vprintk(unsigned long ip, const char *fmt, va_list args) 1396 { 1397 return trace_array_vprintk(&global_trace, ip, fmt, args); 1398 } 1399 EXPORT_SYMBOL_GPL(trace_vprintk); 1400 1401 enum trace_file_type { 1402 TRACE_FILE_LAT_FMT = 1, 1403 TRACE_FILE_ANNOTATE = 2, 1404 }; 1405 1406 static void trace_iterator_increment(struct trace_iterator *iter) 1407 { 1408 /* Don't allow ftrace to trace into the ring buffers */ 1409 ftrace_disable_cpu(); 1410 1411 iter->idx++; 1412 if (iter->buffer_iter[iter->cpu]) 1413 ring_buffer_read(iter->buffer_iter[iter->cpu], NULL); 1414 1415 ftrace_enable_cpu(); 1416 } 1417 1418 static struct trace_entry * 1419 peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts) 1420 { 1421 struct ring_buffer_event *event; 1422 struct ring_buffer_iter *buf_iter = iter->buffer_iter[cpu]; 1423 1424 /* Don't allow ftrace to trace into the ring buffers */ 1425 ftrace_disable_cpu(); 1426 1427 if (buf_iter) 1428 event = ring_buffer_iter_peek(buf_iter, ts); 1429 else 1430 event = ring_buffer_peek(iter->tr->buffer, cpu, ts); 1431 1432 ftrace_enable_cpu(); 1433 1434 return event ? ring_buffer_event_data(event) : NULL; 1435 } 1436 1437 static struct trace_entry * 1438 __find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts) 1439 { 1440 struct ring_buffer *buffer = iter->tr->buffer; 1441 struct trace_entry *ent, *next = NULL; 1442 int cpu_file = iter->cpu_file; 1443 u64 next_ts = 0, ts; 1444 int next_cpu = -1; 1445 int cpu; 1446 1447 /* 1448 * If we are in a per_cpu trace file, don't bother by iterating over 1449 * all cpu and peek directly. 1450 */ 1451 if (cpu_file > TRACE_PIPE_ALL_CPU) { 1452 if (ring_buffer_empty_cpu(buffer, cpu_file)) 1453 return NULL; 1454 ent = peek_next_entry(iter, cpu_file, ent_ts); 1455 if (ent_cpu) 1456 *ent_cpu = cpu_file; 1457 1458 return ent; 1459 } 1460 1461 for_each_tracing_cpu(cpu) { 1462 1463 if (ring_buffer_empty_cpu(buffer, cpu)) 1464 continue; 1465 1466 ent = peek_next_entry(iter, cpu, &ts); 1467 1468 /* 1469 * Pick the entry with the smallest timestamp: 1470 */ 1471 if (ent && (!next || ts < next_ts)) { 1472 next = ent; 1473 next_cpu = cpu; 1474 next_ts = ts; 1475 } 1476 } 1477 1478 if (ent_cpu) 1479 *ent_cpu = next_cpu; 1480 1481 if (ent_ts) 1482 *ent_ts = next_ts; 1483 1484 return next; 1485 } 1486 1487 /* Find the next real entry, without updating the iterator itself */ 1488 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, 1489 int *ent_cpu, u64 *ent_ts) 1490 { 1491 return __find_next_entry(iter, ent_cpu, ent_ts); 1492 } 1493 1494 /* Find the next real entry, and increment the iterator to the next entry */ 1495 static void *find_next_entry_inc(struct trace_iterator *iter) 1496 { 1497 iter->ent = __find_next_entry(iter, &iter->cpu, &iter->ts); 1498 1499 if (iter->ent) 1500 trace_iterator_increment(iter); 1501 1502 return iter->ent ? iter : NULL; 1503 } 1504 1505 static void trace_consume(struct trace_iterator *iter) 1506 { 1507 /* Don't allow ftrace to trace into the ring buffers */ 1508 ftrace_disable_cpu(); 1509 ring_buffer_consume(iter->tr->buffer, iter->cpu, &iter->ts); 1510 ftrace_enable_cpu(); 1511 } 1512 1513 static void *s_next(struct seq_file *m, void *v, loff_t *pos) 1514 { 1515 struct trace_iterator *iter = m->private; 1516 int i = (int)*pos; 1517 void *ent; 1518 1519 (*pos)++; 1520 1521 /* can't go backwards */ 1522 if (iter->idx > i) 1523 return NULL; 1524 1525 if (iter->idx < 0) 1526 ent = find_next_entry_inc(iter); 1527 else 1528 ent = iter; 1529 1530 while (ent && iter->idx < i) 1531 ent = find_next_entry_inc(iter); 1532 1533 iter->pos = *pos; 1534 1535 return ent; 1536 } 1537 1538 static void tracing_iter_reset(struct trace_iterator *iter, int cpu) 1539 { 1540 struct trace_array *tr = iter->tr; 1541 struct ring_buffer_event *event; 1542 struct ring_buffer_iter *buf_iter; 1543 unsigned long entries = 0; 1544 u64 ts; 1545 1546 tr->data[cpu]->skipped_entries = 0; 1547 1548 if (!iter->buffer_iter[cpu]) 1549 return; 1550 1551 buf_iter = iter->buffer_iter[cpu]; 1552 ring_buffer_iter_reset(buf_iter); 1553 1554 /* 1555 * We could have the case with the max latency tracers 1556 * that a reset never took place on a cpu. This is evident 1557 * by the timestamp being before the start of the buffer. 1558 */ 1559 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) { 1560 if (ts >= iter->tr->time_start) 1561 break; 1562 entries++; 1563 ring_buffer_read(buf_iter, NULL); 1564 } 1565 1566 tr->data[cpu]->skipped_entries = entries; 1567 } 1568 1569 /* 1570 * No necessary locking here. The worst thing which can 1571 * happen is loosing events consumed at the same time 1572 * by a trace_pipe reader. 1573 * Other than that, we don't risk to crash the ring buffer 1574 * because it serializes the readers. 1575 * 1576 * The current tracer is copied to avoid a global locking 1577 * all around. 1578 */ 1579 static void *s_start(struct seq_file *m, loff_t *pos) 1580 { 1581 struct trace_iterator *iter = m->private; 1582 static struct tracer *old_tracer; 1583 int cpu_file = iter->cpu_file; 1584 void *p = NULL; 1585 loff_t l = 0; 1586 int cpu; 1587 1588 /* copy the tracer to avoid using a global lock all around */ 1589 mutex_lock(&trace_types_lock); 1590 if (unlikely(old_tracer != current_trace && current_trace)) { 1591 old_tracer = current_trace; 1592 *iter->trace = *current_trace; 1593 } 1594 mutex_unlock(&trace_types_lock); 1595 1596 atomic_inc(&trace_record_cmdline_disabled); 1597 1598 if (*pos != iter->pos) { 1599 iter->ent = NULL; 1600 iter->cpu = 0; 1601 iter->idx = -1; 1602 1603 ftrace_disable_cpu(); 1604 1605 if (cpu_file == TRACE_PIPE_ALL_CPU) { 1606 for_each_tracing_cpu(cpu) 1607 tracing_iter_reset(iter, cpu); 1608 } else 1609 tracing_iter_reset(iter, cpu_file); 1610 1611 ftrace_enable_cpu(); 1612 1613 for (p = iter; p && l < *pos; p = s_next(m, p, &l)) 1614 ; 1615 1616 } else { 1617 l = *pos - 1; 1618 p = s_next(m, p, &l); 1619 } 1620 1621 trace_event_read_lock(); 1622 return p; 1623 } 1624 1625 static void s_stop(struct seq_file *m, void *p) 1626 { 1627 atomic_dec(&trace_record_cmdline_disabled); 1628 trace_event_read_unlock(); 1629 } 1630 1631 static void print_lat_help_header(struct seq_file *m) 1632 { 1633 seq_puts(m, "# _------=> CPU# \n"); 1634 seq_puts(m, "# / _-----=> irqs-off \n"); 1635 seq_puts(m, "# | / _----=> need-resched \n"); 1636 seq_puts(m, "# || / _---=> hardirq/softirq \n"); 1637 seq_puts(m, "# ||| / _--=> preempt-depth \n"); 1638 seq_puts(m, "# |||| /_--=> lock-depth \n"); 1639 seq_puts(m, "# |||||/ delay \n"); 1640 seq_puts(m, "# cmd pid |||||| time | caller \n"); 1641 seq_puts(m, "# \\ / |||||| \\ | / \n"); 1642 } 1643 1644 static void print_func_help_header(struct seq_file *m) 1645 { 1646 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n"); 1647 seq_puts(m, "# | | | | |\n"); 1648 } 1649 1650 1651 static void 1652 print_trace_header(struct seq_file *m, struct trace_iterator *iter) 1653 { 1654 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); 1655 struct trace_array *tr = iter->tr; 1656 struct trace_array_cpu *data = tr->data[tr->cpu]; 1657 struct tracer *type = current_trace; 1658 unsigned long entries = 0; 1659 unsigned long total = 0; 1660 unsigned long count; 1661 const char *name = "preemption"; 1662 int cpu; 1663 1664 if (type) 1665 name = type->name; 1666 1667 1668 for_each_tracing_cpu(cpu) { 1669 count = ring_buffer_entries_cpu(tr->buffer, cpu); 1670 /* 1671 * If this buffer has skipped entries, then we hold all 1672 * entries for the trace and we need to ignore the 1673 * ones before the time stamp. 1674 */ 1675 if (tr->data[cpu]->skipped_entries) { 1676 count -= tr->data[cpu]->skipped_entries; 1677 /* total is the same as the entries */ 1678 total += count; 1679 } else 1680 total += count + 1681 ring_buffer_overrun_cpu(tr->buffer, cpu); 1682 entries += count; 1683 } 1684 1685 seq_printf(m, "# %s latency trace v1.1.5 on %s\n", 1686 name, UTS_RELEASE); 1687 seq_puts(m, "# -----------------------------------" 1688 "---------------------------------\n"); 1689 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |" 1690 " (M:%s VP:%d, KP:%d, SP:%d HP:%d", 1691 nsecs_to_usecs(data->saved_latency), 1692 entries, 1693 total, 1694 tr->cpu, 1695 #if defined(CONFIG_PREEMPT_NONE) 1696 "server", 1697 #elif defined(CONFIG_PREEMPT_VOLUNTARY) 1698 "desktop", 1699 #elif defined(CONFIG_PREEMPT) 1700 "preempt", 1701 #else 1702 "unknown", 1703 #endif 1704 /* These are reserved for later use */ 1705 0, 0, 0, 0); 1706 #ifdef CONFIG_SMP 1707 seq_printf(m, " #P:%d)\n", num_online_cpus()); 1708 #else 1709 seq_puts(m, ")\n"); 1710 #endif 1711 seq_puts(m, "# -----------------\n"); 1712 seq_printf(m, "# | task: %.16s-%d " 1713 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n", 1714 data->comm, data->pid, data->uid, data->nice, 1715 data->policy, data->rt_priority); 1716 seq_puts(m, "# -----------------\n"); 1717 1718 if (data->critical_start) { 1719 seq_puts(m, "# => started at: "); 1720 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags); 1721 trace_print_seq(m, &iter->seq); 1722 seq_puts(m, "\n# => ended at: "); 1723 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags); 1724 trace_print_seq(m, &iter->seq); 1725 seq_puts(m, "\n#\n"); 1726 } 1727 1728 seq_puts(m, "#\n"); 1729 } 1730 1731 static void test_cpu_buff_start(struct trace_iterator *iter) 1732 { 1733 struct trace_seq *s = &iter->seq; 1734 1735 if (!(trace_flags & TRACE_ITER_ANNOTATE)) 1736 return; 1737 1738 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE)) 1739 return; 1740 1741 if (cpumask_test_cpu(iter->cpu, iter->started)) 1742 return; 1743 1744 if (iter->tr->data[iter->cpu]->skipped_entries) 1745 return; 1746 1747 cpumask_set_cpu(iter->cpu, iter->started); 1748 1749 /* Don't print started cpu buffer for the first entry of the trace */ 1750 if (iter->idx > 1) 1751 trace_seq_printf(s, "##### CPU %u buffer started ####\n", 1752 iter->cpu); 1753 } 1754 1755 static enum print_line_t print_trace_fmt(struct trace_iterator *iter) 1756 { 1757 struct trace_seq *s = &iter->seq; 1758 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); 1759 struct trace_entry *entry; 1760 struct trace_event *event; 1761 1762 entry = iter->ent; 1763 1764 test_cpu_buff_start(iter); 1765 1766 event = ftrace_find_event(entry->type); 1767 1768 if (trace_flags & TRACE_ITER_CONTEXT_INFO) { 1769 if (iter->iter_flags & TRACE_FILE_LAT_FMT) { 1770 if (!trace_print_lat_context(iter)) 1771 goto partial; 1772 } else { 1773 if (!trace_print_context(iter)) 1774 goto partial; 1775 } 1776 } 1777 1778 if (event) 1779 return event->trace(iter, sym_flags); 1780 1781 if (!trace_seq_printf(s, "Unknown type %d\n", entry->type)) 1782 goto partial; 1783 1784 return TRACE_TYPE_HANDLED; 1785 partial: 1786 return TRACE_TYPE_PARTIAL_LINE; 1787 } 1788 1789 static enum print_line_t print_raw_fmt(struct trace_iterator *iter) 1790 { 1791 struct trace_seq *s = &iter->seq; 1792 struct trace_entry *entry; 1793 struct trace_event *event; 1794 1795 entry = iter->ent; 1796 1797 if (trace_flags & TRACE_ITER_CONTEXT_INFO) { 1798 if (!trace_seq_printf(s, "%d %d %llu ", 1799 entry->pid, iter->cpu, iter->ts)) 1800 goto partial; 1801 } 1802 1803 event = ftrace_find_event(entry->type); 1804 if (event) 1805 return event->raw(iter, 0); 1806 1807 if (!trace_seq_printf(s, "%d ?\n", entry->type)) 1808 goto partial; 1809 1810 return TRACE_TYPE_HANDLED; 1811 partial: 1812 return TRACE_TYPE_PARTIAL_LINE; 1813 } 1814 1815 static enum print_line_t print_hex_fmt(struct trace_iterator *iter) 1816 { 1817 struct trace_seq *s = &iter->seq; 1818 unsigned char newline = '\n'; 1819 struct trace_entry *entry; 1820 struct trace_event *event; 1821 1822 entry = iter->ent; 1823 1824 if (trace_flags & TRACE_ITER_CONTEXT_INFO) { 1825 SEQ_PUT_HEX_FIELD_RET(s, entry->pid); 1826 SEQ_PUT_HEX_FIELD_RET(s, iter->cpu); 1827 SEQ_PUT_HEX_FIELD_RET(s, iter->ts); 1828 } 1829 1830 event = ftrace_find_event(entry->type); 1831 if (event) { 1832 enum print_line_t ret = event->hex(iter, 0); 1833 if (ret != TRACE_TYPE_HANDLED) 1834 return ret; 1835 } 1836 1837 SEQ_PUT_FIELD_RET(s, newline); 1838 1839 return TRACE_TYPE_HANDLED; 1840 } 1841 1842 static enum print_line_t print_bin_fmt(struct trace_iterator *iter) 1843 { 1844 struct trace_seq *s = &iter->seq; 1845 struct trace_entry *entry; 1846 struct trace_event *event; 1847 1848 entry = iter->ent; 1849 1850 if (trace_flags & TRACE_ITER_CONTEXT_INFO) { 1851 SEQ_PUT_FIELD_RET(s, entry->pid); 1852 SEQ_PUT_FIELD_RET(s, iter->cpu); 1853 SEQ_PUT_FIELD_RET(s, iter->ts); 1854 } 1855 1856 event = ftrace_find_event(entry->type); 1857 return event ? event->binary(iter, 0) : TRACE_TYPE_HANDLED; 1858 } 1859 1860 static int trace_empty(struct trace_iterator *iter) 1861 { 1862 int cpu; 1863 1864 /* If we are looking at one CPU buffer, only check that one */ 1865 if (iter->cpu_file != TRACE_PIPE_ALL_CPU) { 1866 cpu = iter->cpu_file; 1867 if (iter->buffer_iter[cpu]) { 1868 if (!ring_buffer_iter_empty(iter->buffer_iter[cpu])) 1869 return 0; 1870 } else { 1871 if (!ring_buffer_empty_cpu(iter->tr->buffer, cpu)) 1872 return 0; 1873 } 1874 return 1; 1875 } 1876 1877 for_each_tracing_cpu(cpu) { 1878 if (iter->buffer_iter[cpu]) { 1879 if (!ring_buffer_iter_empty(iter->buffer_iter[cpu])) 1880 return 0; 1881 } else { 1882 if (!ring_buffer_empty_cpu(iter->tr->buffer, cpu)) 1883 return 0; 1884 } 1885 } 1886 1887 return 1; 1888 } 1889 1890 /* Called with trace_event_read_lock() held. */ 1891 static enum print_line_t print_trace_line(struct trace_iterator *iter) 1892 { 1893 enum print_line_t ret; 1894 1895 if (iter->trace && iter->trace->print_line) { 1896 ret = iter->trace->print_line(iter); 1897 if (ret != TRACE_TYPE_UNHANDLED) 1898 return ret; 1899 } 1900 1901 if (iter->ent->type == TRACE_BPRINT && 1902 trace_flags & TRACE_ITER_PRINTK && 1903 trace_flags & TRACE_ITER_PRINTK_MSGONLY) 1904 return trace_print_bprintk_msg_only(iter); 1905 1906 if (iter->ent->type == TRACE_PRINT && 1907 trace_flags & TRACE_ITER_PRINTK && 1908 trace_flags & TRACE_ITER_PRINTK_MSGONLY) 1909 return trace_print_printk_msg_only(iter); 1910 1911 if (trace_flags & TRACE_ITER_BIN) 1912 return print_bin_fmt(iter); 1913 1914 if (trace_flags & TRACE_ITER_HEX) 1915 return print_hex_fmt(iter); 1916 1917 if (trace_flags & TRACE_ITER_RAW) 1918 return print_raw_fmt(iter); 1919 1920 return print_trace_fmt(iter); 1921 } 1922 1923 static int s_show(struct seq_file *m, void *v) 1924 { 1925 struct trace_iterator *iter = v; 1926 1927 if (iter->ent == NULL) { 1928 if (iter->tr) { 1929 seq_printf(m, "# tracer: %s\n", iter->trace->name); 1930 seq_puts(m, "#\n"); 1931 } 1932 if (iter->trace && iter->trace->print_header) 1933 iter->trace->print_header(m); 1934 else if (iter->iter_flags & TRACE_FILE_LAT_FMT) { 1935 /* print nothing if the buffers are empty */ 1936 if (trace_empty(iter)) 1937 return 0; 1938 print_trace_header(m, iter); 1939 if (!(trace_flags & TRACE_ITER_VERBOSE)) 1940 print_lat_help_header(m); 1941 } else { 1942 if (!(trace_flags & TRACE_ITER_VERBOSE)) 1943 print_func_help_header(m); 1944 } 1945 } else { 1946 print_trace_line(iter); 1947 trace_print_seq(m, &iter->seq); 1948 } 1949 1950 return 0; 1951 } 1952 1953 static const struct seq_operations tracer_seq_ops = { 1954 .start = s_start, 1955 .next = s_next, 1956 .stop = s_stop, 1957 .show = s_show, 1958 }; 1959 1960 static struct trace_iterator * 1961 __tracing_open(struct inode *inode, struct file *file) 1962 { 1963 long cpu_file = (long) inode->i_private; 1964 void *fail_ret = ERR_PTR(-ENOMEM); 1965 struct trace_iterator *iter; 1966 struct seq_file *m; 1967 int cpu, ret; 1968 1969 if (tracing_disabled) 1970 return ERR_PTR(-ENODEV); 1971 1972 iter = kzalloc(sizeof(*iter), GFP_KERNEL); 1973 if (!iter) 1974 return ERR_PTR(-ENOMEM); 1975 1976 /* 1977 * We make a copy of the current tracer to avoid concurrent 1978 * changes on it while we are reading. 1979 */ 1980 mutex_lock(&trace_types_lock); 1981 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL); 1982 if (!iter->trace) 1983 goto fail; 1984 1985 if (current_trace) 1986 *iter->trace = *current_trace; 1987 1988 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL)) 1989 goto fail; 1990 1991 if (current_trace && current_trace->print_max) 1992 iter->tr = &max_tr; 1993 else 1994 iter->tr = &global_trace; 1995 iter->pos = -1; 1996 mutex_init(&iter->mutex); 1997 iter->cpu_file = cpu_file; 1998 1999 /* Notify the tracer early; before we stop tracing. */ 2000 if (iter->trace && iter->trace->open) 2001 iter->trace->open(iter); 2002 2003 /* Annotate start of buffers if we had overruns */ 2004 if (ring_buffer_overruns(iter->tr->buffer)) 2005 iter->iter_flags |= TRACE_FILE_ANNOTATE; 2006 2007 /* stop the trace while dumping */ 2008 tracing_stop(); 2009 2010 if (iter->cpu_file == TRACE_PIPE_ALL_CPU) { 2011 for_each_tracing_cpu(cpu) { 2012 2013 iter->buffer_iter[cpu] = 2014 ring_buffer_read_start(iter->tr->buffer, cpu); 2015 tracing_iter_reset(iter, cpu); 2016 } 2017 } else { 2018 cpu = iter->cpu_file; 2019 iter->buffer_iter[cpu] = 2020 ring_buffer_read_start(iter->tr->buffer, cpu); 2021 tracing_iter_reset(iter, cpu); 2022 } 2023 2024 ret = seq_open(file, &tracer_seq_ops); 2025 if (ret < 0) { 2026 fail_ret = ERR_PTR(ret); 2027 goto fail_buffer; 2028 } 2029 2030 m = file->private_data; 2031 m->private = iter; 2032 2033 mutex_unlock(&trace_types_lock); 2034 2035 return iter; 2036 2037 fail_buffer: 2038 for_each_tracing_cpu(cpu) { 2039 if (iter->buffer_iter[cpu]) 2040 ring_buffer_read_finish(iter->buffer_iter[cpu]); 2041 } 2042 free_cpumask_var(iter->started); 2043 tracing_start(); 2044 fail: 2045 mutex_unlock(&trace_types_lock); 2046 kfree(iter->trace); 2047 kfree(iter); 2048 2049 return fail_ret; 2050 } 2051 2052 int tracing_open_generic(struct inode *inode, struct file *filp) 2053 { 2054 if (tracing_disabled) 2055 return -ENODEV; 2056 2057 filp->private_data = inode->i_private; 2058 return 0; 2059 } 2060 2061 static int tracing_release(struct inode *inode, struct file *file) 2062 { 2063 struct seq_file *m = (struct seq_file *)file->private_data; 2064 struct trace_iterator *iter; 2065 int cpu; 2066 2067 if (!(file->f_mode & FMODE_READ)) 2068 return 0; 2069 2070 iter = m->private; 2071 2072 mutex_lock(&trace_types_lock); 2073 for_each_tracing_cpu(cpu) { 2074 if (iter->buffer_iter[cpu]) 2075 ring_buffer_read_finish(iter->buffer_iter[cpu]); 2076 } 2077 2078 if (iter->trace && iter->trace->close) 2079 iter->trace->close(iter); 2080 2081 /* reenable tracing if it was previously enabled */ 2082 tracing_start(); 2083 mutex_unlock(&trace_types_lock); 2084 2085 seq_release(inode, file); 2086 mutex_destroy(&iter->mutex); 2087 free_cpumask_var(iter->started); 2088 kfree(iter->trace); 2089 kfree(iter); 2090 return 0; 2091 } 2092 2093 static int tracing_open(struct inode *inode, struct file *file) 2094 { 2095 struct trace_iterator *iter; 2096 int ret = 0; 2097 2098 /* If this file was open for write, then erase contents */ 2099 if ((file->f_mode & FMODE_WRITE) && 2100 (file->f_flags & O_TRUNC)) { 2101 long cpu = (long) inode->i_private; 2102 2103 if (cpu == TRACE_PIPE_ALL_CPU) 2104 tracing_reset_online_cpus(&global_trace); 2105 else 2106 tracing_reset(&global_trace, cpu); 2107 } 2108 2109 if (file->f_mode & FMODE_READ) { 2110 iter = __tracing_open(inode, file); 2111 if (IS_ERR(iter)) 2112 ret = PTR_ERR(iter); 2113 else if (trace_flags & TRACE_ITER_LATENCY_FMT) 2114 iter->iter_flags |= TRACE_FILE_LAT_FMT; 2115 } 2116 return ret; 2117 } 2118 2119 static void * 2120 t_next(struct seq_file *m, void *v, loff_t *pos) 2121 { 2122 struct tracer *t = v; 2123 2124 (*pos)++; 2125 2126 if (t) 2127 t = t->next; 2128 2129 return t; 2130 } 2131 2132 static void *t_start(struct seq_file *m, loff_t *pos) 2133 { 2134 struct tracer *t; 2135 loff_t l = 0; 2136 2137 mutex_lock(&trace_types_lock); 2138 for (t = trace_types; t && l < *pos; t = t_next(m, t, &l)) 2139 ; 2140 2141 return t; 2142 } 2143 2144 static void t_stop(struct seq_file *m, void *p) 2145 { 2146 mutex_unlock(&trace_types_lock); 2147 } 2148 2149 static int t_show(struct seq_file *m, void *v) 2150 { 2151 struct tracer *t = v; 2152 2153 if (!t) 2154 return 0; 2155 2156 seq_printf(m, "%s", t->name); 2157 if (t->next) 2158 seq_putc(m, ' '); 2159 else 2160 seq_putc(m, '\n'); 2161 2162 return 0; 2163 } 2164 2165 static const struct seq_operations show_traces_seq_ops = { 2166 .start = t_start, 2167 .next = t_next, 2168 .stop = t_stop, 2169 .show = t_show, 2170 }; 2171 2172 static int show_traces_open(struct inode *inode, struct file *file) 2173 { 2174 if (tracing_disabled) 2175 return -ENODEV; 2176 2177 return seq_open(file, &show_traces_seq_ops); 2178 } 2179 2180 static ssize_t 2181 tracing_write_stub(struct file *filp, const char __user *ubuf, 2182 size_t count, loff_t *ppos) 2183 { 2184 return count; 2185 } 2186 2187 static const struct file_operations tracing_fops = { 2188 .open = tracing_open, 2189 .read = seq_read, 2190 .write = tracing_write_stub, 2191 .llseek = seq_lseek, 2192 .release = tracing_release, 2193 }; 2194 2195 static const struct file_operations show_traces_fops = { 2196 .open = show_traces_open, 2197 .read = seq_read, 2198 .release = seq_release, 2199 }; 2200 2201 /* 2202 * Only trace on a CPU if the bitmask is set: 2203 */ 2204 static cpumask_var_t tracing_cpumask; 2205 2206 /* 2207 * The tracer itself will not take this lock, but still we want 2208 * to provide a consistent cpumask to user-space: 2209 */ 2210 static DEFINE_MUTEX(tracing_cpumask_update_lock); 2211 2212 /* 2213 * Temporary storage for the character representation of the 2214 * CPU bitmask (and one more byte for the newline): 2215 */ 2216 static char mask_str[NR_CPUS + 1]; 2217 2218 static ssize_t 2219 tracing_cpumask_read(struct file *filp, char __user *ubuf, 2220 size_t count, loff_t *ppos) 2221 { 2222 int len; 2223 2224 mutex_lock(&tracing_cpumask_update_lock); 2225 2226 len = cpumask_scnprintf(mask_str, count, tracing_cpumask); 2227 if (count - len < 2) { 2228 count = -EINVAL; 2229 goto out_err; 2230 } 2231 len += sprintf(mask_str + len, "\n"); 2232 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1); 2233 2234 out_err: 2235 mutex_unlock(&tracing_cpumask_update_lock); 2236 2237 return count; 2238 } 2239 2240 static ssize_t 2241 tracing_cpumask_write(struct file *filp, const char __user *ubuf, 2242 size_t count, loff_t *ppos) 2243 { 2244 int err, cpu; 2245 cpumask_var_t tracing_cpumask_new; 2246 2247 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL)) 2248 return -ENOMEM; 2249 2250 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new); 2251 if (err) 2252 goto err_unlock; 2253 2254 mutex_lock(&tracing_cpumask_update_lock); 2255 2256 local_irq_disable(); 2257 __raw_spin_lock(&ftrace_max_lock); 2258 for_each_tracing_cpu(cpu) { 2259 /* 2260 * Increase/decrease the disabled counter if we are 2261 * about to flip a bit in the cpumask: 2262 */ 2263 if (cpumask_test_cpu(cpu, tracing_cpumask) && 2264 !cpumask_test_cpu(cpu, tracing_cpumask_new)) { 2265 atomic_inc(&global_trace.data[cpu]->disabled); 2266 } 2267 if (!cpumask_test_cpu(cpu, tracing_cpumask) && 2268 cpumask_test_cpu(cpu, tracing_cpumask_new)) { 2269 atomic_dec(&global_trace.data[cpu]->disabled); 2270 } 2271 } 2272 __raw_spin_unlock(&ftrace_max_lock); 2273 local_irq_enable(); 2274 2275 cpumask_copy(tracing_cpumask, tracing_cpumask_new); 2276 2277 mutex_unlock(&tracing_cpumask_update_lock); 2278 free_cpumask_var(tracing_cpumask_new); 2279 2280 return count; 2281 2282 err_unlock: 2283 free_cpumask_var(tracing_cpumask_new); 2284 2285 return err; 2286 } 2287 2288 static const struct file_operations tracing_cpumask_fops = { 2289 .open = tracing_open_generic, 2290 .read = tracing_cpumask_read, 2291 .write = tracing_cpumask_write, 2292 }; 2293 2294 static ssize_t 2295 tracing_trace_options_read(struct file *filp, char __user *ubuf, 2296 size_t cnt, loff_t *ppos) 2297 { 2298 struct tracer_opt *trace_opts; 2299 u32 tracer_flags; 2300 int len = 0; 2301 char *buf; 2302 int r = 0; 2303 int i; 2304 2305 2306 /* calculate max size */ 2307 for (i = 0; trace_options[i]; i++) { 2308 len += strlen(trace_options[i]); 2309 len += 3; /* "no" and newline */ 2310 } 2311 2312 mutex_lock(&trace_types_lock); 2313 tracer_flags = current_trace->flags->val; 2314 trace_opts = current_trace->flags->opts; 2315 2316 /* 2317 * Increase the size with names of options specific 2318 * of the current tracer. 2319 */ 2320 for (i = 0; trace_opts[i].name; i++) { 2321 len += strlen(trace_opts[i].name); 2322 len += 3; /* "no" and newline */ 2323 } 2324 2325 /* +1 for \0 */ 2326 buf = kmalloc(len + 1, GFP_KERNEL); 2327 if (!buf) { 2328 mutex_unlock(&trace_types_lock); 2329 return -ENOMEM; 2330 } 2331 2332 for (i = 0; trace_options[i]; i++) { 2333 if (trace_flags & (1 << i)) 2334 r += sprintf(buf + r, "%s\n", trace_options[i]); 2335 else 2336 r += sprintf(buf + r, "no%s\n", trace_options[i]); 2337 } 2338 2339 for (i = 0; trace_opts[i].name; i++) { 2340 if (tracer_flags & trace_opts[i].bit) 2341 r += sprintf(buf + r, "%s\n", 2342 trace_opts[i].name); 2343 else 2344 r += sprintf(buf + r, "no%s\n", 2345 trace_opts[i].name); 2346 } 2347 mutex_unlock(&trace_types_lock); 2348 2349 WARN_ON(r >= len + 1); 2350 2351 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 2352 2353 kfree(buf); 2354 return r; 2355 } 2356 2357 /* Try to assign a tracer specific option */ 2358 static int set_tracer_option(struct tracer *trace, char *cmp, int neg) 2359 { 2360 struct tracer_flags *tracer_flags = trace->flags; 2361 struct tracer_opt *opts = NULL; 2362 int ret = 0, i = 0; 2363 int len; 2364 2365 for (i = 0; tracer_flags->opts[i].name; i++) { 2366 opts = &tracer_flags->opts[i]; 2367 len = strlen(opts->name); 2368 2369 if (strncmp(cmp, opts->name, len) == 0) { 2370 ret = trace->set_flag(tracer_flags->val, 2371 opts->bit, !neg); 2372 break; 2373 } 2374 } 2375 /* Not found */ 2376 if (!tracer_flags->opts[i].name) 2377 return -EINVAL; 2378 2379 /* Refused to handle */ 2380 if (ret) 2381 return ret; 2382 2383 if (neg) 2384 tracer_flags->val &= ~opts->bit; 2385 else 2386 tracer_flags->val |= opts->bit; 2387 2388 return 0; 2389 } 2390 2391 static void set_tracer_flags(unsigned int mask, int enabled) 2392 { 2393 /* do nothing if flag is already set */ 2394 if (!!(trace_flags & mask) == !!enabled) 2395 return; 2396 2397 if (enabled) 2398 trace_flags |= mask; 2399 else 2400 trace_flags &= ~mask; 2401 } 2402 2403 static ssize_t 2404 tracing_trace_options_write(struct file *filp, const char __user *ubuf, 2405 size_t cnt, loff_t *ppos) 2406 { 2407 char buf[64]; 2408 char *cmp = buf; 2409 int neg = 0; 2410 int ret; 2411 int i; 2412 2413 if (cnt >= sizeof(buf)) 2414 return -EINVAL; 2415 2416 if (copy_from_user(&buf, ubuf, cnt)) 2417 return -EFAULT; 2418 2419 buf[cnt] = 0; 2420 2421 if (strncmp(buf, "no", 2) == 0) { 2422 neg = 1; 2423 cmp += 2; 2424 } 2425 2426 for (i = 0; trace_options[i]; i++) { 2427 int len = strlen(trace_options[i]); 2428 2429 if (strncmp(cmp, trace_options[i], len) == 0) { 2430 set_tracer_flags(1 << i, !neg); 2431 break; 2432 } 2433 } 2434 2435 /* If no option could be set, test the specific tracer options */ 2436 if (!trace_options[i]) { 2437 mutex_lock(&trace_types_lock); 2438 ret = set_tracer_option(current_trace, cmp, neg); 2439 mutex_unlock(&trace_types_lock); 2440 if (ret) 2441 return ret; 2442 } 2443 2444 *ppos += cnt; 2445 2446 return cnt; 2447 } 2448 2449 static const struct file_operations tracing_iter_fops = { 2450 .open = tracing_open_generic, 2451 .read = tracing_trace_options_read, 2452 .write = tracing_trace_options_write, 2453 }; 2454 2455 static const char readme_msg[] = 2456 "tracing mini-HOWTO:\n\n" 2457 "# mount -t debugfs nodev /sys/kernel/debug\n\n" 2458 "# cat /sys/kernel/debug/tracing/available_tracers\n" 2459 "wakeup preemptirqsoff preemptoff irqsoff function sched_switch nop\n\n" 2460 "# cat /sys/kernel/debug/tracing/current_tracer\n" 2461 "nop\n" 2462 "# echo sched_switch > /sys/kernel/debug/tracing/current_tracer\n" 2463 "# cat /sys/kernel/debug/tracing/current_tracer\n" 2464 "sched_switch\n" 2465 "# cat /sys/kernel/debug/tracing/trace_options\n" 2466 "noprint-parent nosym-offset nosym-addr noverbose\n" 2467 "# echo print-parent > /sys/kernel/debug/tracing/trace_options\n" 2468 "# echo 1 > /sys/kernel/debug/tracing/tracing_enabled\n" 2469 "# cat /sys/kernel/debug/tracing/trace > /tmp/trace.txt\n" 2470 "# echo 0 > /sys/kernel/debug/tracing/tracing_enabled\n" 2471 ; 2472 2473 static ssize_t 2474 tracing_readme_read(struct file *filp, char __user *ubuf, 2475 size_t cnt, loff_t *ppos) 2476 { 2477 return simple_read_from_buffer(ubuf, cnt, ppos, 2478 readme_msg, strlen(readme_msg)); 2479 } 2480 2481 static const struct file_operations tracing_readme_fops = { 2482 .open = tracing_open_generic, 2483 .read = tracing_readme_read, 2484 }; 2485 2486 static ssize_t 2487 tracing_saved_cmdlines_read(struct file *file, char __user *ubuf, 2488 size_t cnt, loff_t *ppos) 2489 { 2490 char *buf_comm; 2491 char *file_buf; 2492 char *buf; 2493 int len = 0; 2494 int pid; 2495 int i; 2496 2497 file_buf = kmalloc(SAVED_CMDLINES*(16+TASK_COMM_LEN), GFP_KERNEL); 2498 if (!file_buf) 2499 return -ENOMEM; 2500 2501 buf_comm = kmalloc(TASK_COMM_LEN, GFP_KERNEL); 2502 if (!buf_comm) { 2503 kfree(file_buf); 2504 return -ENOMEM; 2505 } 2506 2507 buf = file_buf; 2508 2509 for (i = 0; i < SAVED_CMDLINES; i++) { 2510 int r; 2511 2512 pid = map_cmdline_to_pid[i]; 2513 if (pid == -1 || pid == NO_CMDLINE_MAP) 2514 continue; 2515 2516 trace_find_cmdline(pid, buf_comm); 2517 r = sprintf(buf, "%d %s\n", pid, buf_comm); 2518 buf += r; 2519 len += r; 2520 } 2521 2522 len = simple_read_from_buffer(ubuf, cnt, ppos, 2523 file_buf, len); 2524 2525 kfree(file_buf); 2526 kfree(buf_comm); 2527 2528 return len; 2529 } 2530 2531 static const struct file_operations tracing_saved_cmdlines_fops = { 2532 .open = tracing_open_generic, 2533 .read = tracing_saved_cmdlines_read, 2534 }; 2535 2536 static ssize_t 2537 tracing_ctrl_read(struct file *filp, char __user *ubuf, 2538 size_t cnt, loff_t *ppos) 2539 { 2540 char buf[64]; 2541 int r; 2542 2543 r = sprintf(buf, "%u\n", tracer_enabled); 2544 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 2545 } 2546 2547 static ssize_t 2548 tracing_ctrl_write(struct file *filp, const char __user *ubuf, 2549 size_t cnt, loff_t *ppos) 2550 { 2551 struct trace_array *tr = filp->private_data; 2552 char buf[64]; 2553 unsigned long val; 2554 int ret; 2555 2556 if (cnt >= sizeof(buf)) 2557 return -EINVAL; 2558 2559 if (copy_from_user(&buf, ubuf, cnt)) 2560 return -EFAULT; 2561 2562 buf[cnt] = 0; 2563 2564 ret = strict_strtoul(buf, 10, &val); 2565 if (ret < 0) 2566 return ret; 2567 2568 val = !!val; 2569 2570 mutex_lock(&trace_types_lock); 2571 if (tracer_enabled ^ val) { 2572 if (val) { 2573 tracer_enabled = 1; 2574 if (current_trace->start) 2575 current_trace->start(tr); 2576 tracing_start(); 2577 } else { 2578 tracer_enabled = 0; 2579 tracing_stop(); 2580 if (current_trace->stop) 2581 current_trace->stop(tr); 2582 } 2583 } 2584 mutex_unlock(&trace_types_lock); 2585 2586 *ppos += cnt; 2587 2588 return cnt; 2589 } 2590 2591 static ssize_t 2592 tracing_set_trace_read(struct file *filp, char __user *ubuf, 2593 size_t cnt, loff_t *ppos) 2594 { 2595 char buf[MAX_TRACER_SIZE+2]; 2596 int r; 2597 2598 mutex_lock(&trace_types_lock); 2599 if (current_trace) 2600 r = sprintf(buf, "%s\n", current_trace->name); 2601 else 2602 r = sprintf(buf, "\n"); 2603 mutex_unlock(&trace_types_lock); 2604 2605 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 2606 } 2607 2608 int tracer_init(struct tracer *t, struct trace_array *tr) 2609 { 2610 tracing_reset_online_cpus(tr); 2611 return t->init(tr); 2612 } 2613 2614 static int tracing_resize_ring_buffer(unsigned long size) 2615 { 2616 int ret; 2617 2618 /* 2619 * If kernel or user changes the size of the ring buffer 2620 * we use the size that was given, and we can forget about 2621 * expanding it later. 2622 */ 2623 ring_buffer_expanded = 1; 2624 2625 ret = ring_buffer_resize(global_trace.buffer, size); 2626 if (ret < 0) 2627 return ret; 2628 2629 ret = ring_buffer_resize(max_tr.buffer, size); 2630 if (ret < 0) { 2631 int r; 2632 2633 r = ring_buffer_resize(global_trace.buffer, 2634 global_trace.entries); 2635 if (r < 0) { 2636 /* 2637 * AARGH! We are left with different 2638 * size max buffer!!!! 2639 * The max buffer is our "snapshot" buffer. 2640 * When a tracer needs a snapshot (one of the 2641 * latency tracers), it swaps the max buffer 2642 * with the saved snap shot. We succeeded to 2643 * update the size of the main buffer, but failed to 2644 * update the size of the max buffer. But when we tried 2645 * to reset the main buffer to the original size, we 2646 * failed there too. This is very unlikely to 2647 * happen, but if it does, warn and kill all 2648 * tracing. 2649 */ 2650 WARN_ON(1); 2651 tracing_disabled = 1; 2652 } 2653 return ret; 2654 } 2655 2656 global_trace.entries = size; 2657 2658 return ret; 2659 } 2660 2661 /** 2662 * tracing_update_buffers - used by tracing facility to expand ring buffers 2663 * 2664 * To save on memory when the tracing is never used on a system with it 2665 * configured in. The ring buffers are set to a minimum size. But once 2666 * a user starts to use the tracing facility, then they need to grow 2667 * to their default size. 2668 * 2669 * This function is to be called when a tracer is about to be used. 2670 */ 2671 int tracing_update_buffers(void) 2672 { 2673 int ret = 0; 2674 2675 mutex_lock(&trace_types_lock); 2676 if (!ring_buffer_expanded) 2677 ret = tracing_resize_ring_buffer(trace_buf_size); 2678 mutex_unlock(&trace_types_lock); 2679 2680 return ret; 2681 } 2682 2683 struct trace_option_dentry; 2684 2685 static struct trace_option_dentry * 2686 create_trace_option_files(struct tracer *tracer); 2687 2688 static void 2689 destroy_trace_option_files(struct trace_option_dentry *topts); 2690 2691 static int tracing_set_tracer(const char *buf) 2692 { 2693 static struct trace_option_dentry *topts; 2694 struct trace_array *tr = &global_trace; 2695 struct tracer *t; 2696 int ret = 0; 2697 2698 mutex_lock(&trace_types_lock); 2699 2700 if (!ring_buffer_expanded) { 2701 ret = tracing_resize_ring_buffer(trace_buf_size); 2702 if (ret < 0) 2703 goto out; 2704 ret = 0; 2705 } 2706 2707 for (t = trace_types; t; t = t->next) { 2708 if (strcmp(t->name, buf) == 0) 2709 break; 2710 } 2711 if (!t) { 2712 ret = -EINVAL; 2713 goto out; 2714 } 2715 if (t == current_trace) 2716 goto out; 2717 2718 trace_branch_disable(); 2719 if (current_trace && current_trace->reset) 2720 current_trace->reset(tr); 2721 2722 destroy_trace_option_files(topts); 2723 2724 current_trace = t; 2725 2726 topts = create_trace_option_files(current_trace); 2727 2728 if (t->init) { 2729 ret = tracer_init(t, tr); 2730 if (ret) 2731 goto out; 2732 } 2733 2734 trace_branch_enable(tr); 2735 out: 2736 mutex_unlock(&trace_types_lock); 2737 2738 return ret; 2739 } 2740 2741 static ssize_t 2742 tracing_set_trace_write(struct file *filp, const char __user *ubuf, 2743 size_t cnt, loff_t *ppos) 2744 { 2745 char buf[MAX_TRACER_SIZE+1]; 2746 int i; 2747 size_t ret; 2748 int err; 2749 2750 ret = cnt; 2751 2752 if (cnt > MAX_TRACER_SIZE) 2753 cnt = MAX_TRACER_SIZE; 2754 2755 if (copy_from_user(&buf, ubuf, cnt)) 2756 return -EFAULT; 2757 2758 buf[cnt] = 0; 2759 2760 /* strip ending whitespace. */ 2761 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--) 2762 buf[i] = 0; 2763 2764 err = tracing_set_tracer(buf); 2765 if (err) 2766 return err; 2767 2768 *ppos += ret; 2769 2770 return ret; 2771 } 2772 2773 static ssize_t 2774 tracing_max_lat_read(struct file *filp, char __user *ubuf, 2775 size_t cnt, loff_t *ppos) 2776 { 2777 unsigned long *ptr = filp->private_data; 2778 char buf[64]; 2779 int r; 2780 2781 r = snprintf(buf, sizeof(buf), "%ld\n", 2782 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr)); 2783 if (r > sizeof(buf)) 2784 r = sizeof(buf); 2785 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 2786 } 2787 2788 static ssize_t 2789 tracing_max_lat_write(struct file *filp, const char __user *ubuf, 2790 size_t cnt, loff_t *ppos) 2791 { 2792 unsigned long *ptr = filp->private_data; 2793 char buf[64]; 2794 unsigned long val; 2795 int ret; 2796 2797 if (cnt >= sizeof(buf)) 2798 return -EINVAL; 2799 2800 if (copy_from_user(&buf, ubuf, cnt)) 2801 return -EFAULT; 2802 2803 buf[cnt] = 0; 2804 2805 ret = strict_strtoul(buf, 10, &val); 2806 if (ret < 0) 2807 return ret; 2808 2809 *ptr = val * 1000; 2810 2811 return cnt; 2812 } 2813 2814 static int tracing_open_pipe(struct inode *inode, struct file *filp) 2815 { 2816 long cpu_file = (long) inode->i_private; 2817 struct trace_iterator *iter; 2818 int ret = 0; 2819 2820 if (tracing_disabled) 2821 return -ENODEV; 2822 2823 mutex_lock(&trace_types_lock); 2824 2825 /* We only allow one reader per cpu */ 2826 if (cpu_file == TRACE_PIPE_ALL_CPU) { 2827 if (!cpumask_empty(tracing_reader_cpumask)) { 2828 ret = -EBUSY; 2829 goto out; 2830 } 2831 cpumask_setall(tracing_reader_cpumask); 2832 } else { 2833 if (!cpumask_test_cpu(cpu_file, tracing_reader_cpumask)) 2834 cpumask_set_cpu(cpu_file, tracing_reader_cpumask); 2835 else { 2836 ret = -EBUSY; 2837 goto out; 2838 } 2839 } 2840 2841 /* create a buffer to store the information to pass to userspace */ 2842 iter = kzalloc(sizeof(*iter), GFP_KERNEL); 2843 if (!iter) { 2844 ret = -ENOMEM; 2845 goto out; 2846 } 2847 2848 /* 2849 * We make a copy of the current tracer to avoid concurrent 2850 * changes on it while we are reading. 2851 */ 2852 iter->trace = kmalloc(sizeof(*iter->trace), GFP_KERNEL); 2853 if (!iter->trace) { 2854 ret = -ENOMEM; 2855 goto fail; 2856 } 2857 if (current_trace) 2858 *iter->trace = *current_trace; 2859 2860 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) { 2861 ret = -ENOMEM; 2862 goto fail; 2863 } 2864 2865 /* trace pipe does not show start of buffer */ 2866 cpumask_setall(iter->started); 2867 2868 if (trace_flags & TRACE_ITER_LATENCY_FMT) 2869 iter->iter_flags |= TRACE_FILE_LAT_FMT; 2870 2871 iter->cpu_file = cpu_file; 2872 iter->tr = &global_trace; 2873 mutex_init(&iter->mutex); 2874 filp->private_data = iter; 2875 2876 if (iter->trace->pipe_open) 2877 iter->trace->pipe_open(iter); 2878 2879 out: 2880 mutex_unlock(&trace_types_lock); 2881 return ret; 2882 2883 fail: 2884 kfree(iter->trace); 2885 kfree(iter); 2886 mutex_unlock(&trace_types_lock); 2887 return ret; 2888 } 2889 2890 static int tracing_release_pipe(struct inode *inode, struct file *file) 2891 { 2892 struct trace_iterator *iter = file->private_data; 2893 2894 mutex_lock(&trace_types_lock); 2895 2896 if (iter->cpu_file == TRACE_PIPE_ALL_CPU) 2897 cpumask_clear(tracing_reader_cpumask); 2898 else 2899 cpumask_clear_cpu(iter->cpu_file, tracing_reader_cpumask); 2900 2901 mutex_unlock(&trace_types_lock); 2902 2903 free_cpumask_var(iter->started); 2904 mutex_destroy(&iter->mutex); 2905 kfree(iter->trace); 2906 kfree(iter); 2907 2908 return 0; 2909 } 2910 2911 static unsigned int 2912 tracing_poll_pipe(struct file *filp, poll_table *poll_table) 2913 { 2914 struct trace_iterator *iter = filp->private_data; 2915 2916 if (trace_flags & TRACE_ITER_BLOCK) { 2917 /* 2918 * Always select as readable when in blocking mode 2919 */ 2920 return POLLIN | POLLRDNORM; 2921 } else { 2922 if (!trace_empty(iter)) 2923 return POLLIN | POLLRDNORM; 2924 poll_wait(filp, &trace_wait, poll_table); 2925 if (!trace_empty(iter)) 2926 return POLLIN | POLLRDNORM; 2927 2928 return 0; 2929 } 2930 } 2931 2932 2933 void default_wait_pipe(struct trace_iterator *iter) 2934 { 2935 DEFINE_WAIT(wait); 2936 2937 prepare_to_wait(&trace_wait, &wait, TASK_INTERRUPTIBLE); 2938 2939 if (trace_empty(iter)) 2940 schedule(); 2941 2942 finish_wait(&trace_wait, &wait); 2943 } 2944 2945 /* 2946 * This is a make-shift waitqueue. 2947 * A tracer might use this callback on some rare cases: 2948 * 2949 * 1) the current tracer might hold the runqueue lock when it wakes up 2950 * a reader, hence a deadlock (sched, function, and function graph tracers) 2951 * 2) the function tracers, trace all functions, we don't want 2952 * the overhead of calling wake_up and friends 2953 * (and tracing them too) 2954 * 2955 * Anyway, this is really very primitive wakeup. 2956 */ 2957 void poll_wait_pipe(struct trace_iterator *iter) 2958 { 2959 set_current_state(TASK_INTERRUPTIBLE); 2960 /* sleep for 100 msecs, and try again. */ 2961 schedule_timeout(HZ / 10); 2962 } 2963 2964 /* Must be called with trace_types_lock mutex held. */ 2965 static int tracing_wait_pipe(struct file *filp) 2966 { 2967 struct trace_iterator *iter = filp->private_data; 2968 2969 while (trace_empty(iter)) { 2970 2971 if ((filp->f_flags & O_NONBLOCK)) { 2972 return -EAGAIN; 2973 } 2974 2975 mutex_unlock(&iter->mutex); 2976 2977 iter->trace->wait_pipe(iter); 2978 2979 mutex_lock(&iter->mutex); 2980 2981 if (signal_pending(current)) 2982 return -EINTR; 2983 2984 /* 2985 * We block until we read something and tracing is disabled. 2986 * We still block if tracing is disabled, but we have never 2987 * read anything. This allows a user to cat this file, and 2988 * then enable tracing. But after we have read something, 2989 * we give an EOF when tracing is again disabled. 2990 * 2991 * iter->pos will be 0 if we haven't read anything. 2992 */ 2993 if (!tracer_enabled && iter->pos) 2994 break; 2995 } 2996 2997 return 1; 2998 } 2999 3000 /* 3001 * Consumer reader. 3002 */ 3003 static ssize_t 3004 tracing_read_pipe(struct file *filp, char __user *ubuf, 3005 size_t cnt, loff_t *ppos) 3006 { 3007 struct trace_iterator *iter = filp->private_data; 3008 static struct tracer *old_tracer; 3009 ssize_t sret; 3010 3011 /* return any leftover data */ 3012 sret = trace_seq_to_user(&iter->seq, ubuf, cnt); 3013 if (sret != -EBUSY) 3014 return sret; 3015 3016 trace_seq_init(&iter->seq); 3017 3018 /* copy the tracer to avoid using a global lock all around */ 3019 mutex_lock(&trace_types_lock); 3020 if (unlikely(old_tracer != current_trace && current_trace)) { 3021 old_tracer = current_trace; 3022 *iter->trace = *current_trace; 3023 } 3024 mutex_unlock(&trace_types_lock); 3025 3026 /* 3027 * Avoid more than one consumer on a single file descriptor 3028 * This is just a matter of traces coherency, the ring buffer itself 3029 * is protected. 3030 */ 3031 mutex_lock(&iter->mutex); 3032 if (iter->trace->read) { 3033 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos); 3034 if (sret) 3035 goto out; 3036 } 3037 3038 waitagain: 3039 sret = tracing_wait_pipe(filp); 3040 if (sret <= 0) 3041 goto out; 3042 3043 /* stop when tracing is finished */ 3044 if (trace_empty(iter)) { 3045 sret = 0; 3046 goto out; 3047 } 3048 3049 if (cnt >= PAGE_SIZE) 3050 cnt = PAGE_SIZE - 1; 3051 3052 /* reset all but tr, trace, and overruns */ 3053 memset(&iter->seq, 0, 3054 sizeof(struct trace_iterator) - 3055 offsetof(struct trace_iterator, seq)); 3056 iter->pos = -1; 3057 3058 trace_event_read_lock(); 3059 while (find_next_entry_inc(iter) != NULL) { 3060 enum print_line_t ret; 3061 int len = iter->seq.len; 3062 3063 ret = print_trace_line(iter); 3064 if (ret == TRACE_TYPE_PARTIAL_LINE) { 3065 /* don't print partial lines */ 3066 iter->seq.len = len; 3067 break; 3068 } 3069 if (ret != TRACE_TYPE_NO_CONSUME) 3070 trace_consume(iter); 3071 3072 if (iter->seq.len >= cnt) 3073 break; 3074 } 3075 trace_event_read_unlock(); 3076 3077 /* Now copy what we have to the user */ 3078 sret = trace_seq_to_user(&iter->seq, ubuf, cnt); 3079 if (iter->seq.readpos >= iter->seq.len) 3080 trace_seq_init(&iter->seq); 3081 3082 /* 3083 * If there was nothing to send to user, inspite of consuming trace 3084 * entries, go back to wait for more entries. 3085 */ 3086 if (sret == -EBUSY) 3087 goto waitagain; 3088 3089 out: 3090 mutex_unlock(&iter->mutex); 3091 3092 return sret; 3093 } 3094 3095 static void tracing_pipe_buf_release(struct pipe_inode_info *pipe, 3096 struct pipe_buffer *buf) 3097 { 3098 __free_page(buf->page); 3099 } 3100 3101 static void tracing_spd_release_pipe(struct splice_pipe_desc *spd, 3102 unsigned int idx) 3103 { 3104 __free_page(spd->pages[idx]); 3105 } 3106 3107 static struct pipe_buf_operations tracing_pipe_buf_ops = { 3108 .can_merge = 0, 3109 .map = generic_pipe_buf_map, 3110 .unmap = generic_pipe_buf_unmap, 3111 .confirm = generic_pipe_buf_confirm, 3112 .release = tracing_pipe_buf_release, 3113 .steal = generic_pipe_buf_steal, 3114 .get = generic_pipe_buf_get, 3115 }; 3116 3117 static size_t 3118 tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter) 3119 { 3120 size_t count; 3121 int ret; 3122 3123 /* Seq buffer is page-sized, exactly what we need. */ 3124 for (;;) { 3125 count = iter->seq.len; 3126 ret = print_trace_line(iter); 3127 count = iter->seq.len - count; 3128 if (rem < count) { 3129 rem = 0; 3130 iter->seq.len -= count; 3131 break; 3132 } 3133 if (ret == TRACE_TYPE_PARTIAL_LINE) { 3134 iter->seq.len -= count; 3135 break; 3136 } 3137 3138 if (ret != TRACE_TYPE_NO_CONSUME) 3139 trace_consume(iter); 3140 rem -= count; 3141 if (!find_next_entry_inc(iter)) { 3142 rem = 0; 3143 iter->ent = NULL; 3144 break; 3145 } 3146 } 3147 3148 return rem; 3149 } 3150 3151 static ssize_t tracing_splice_read_pipe(struct file *filp, 3152 loff_t *ppos, 3153 struct pipe_inode_info *pipe, 3154 size_t len, 3155 unsigned int flags) 3156 { 3157 struct page *pages[PIPE_BUFFERS]; 3158 struct partial_page partial[PIPE_BUFFERS]; 3159 struct trace_iterator *iter = filp->private_data; 3160 struct splice_pipe_desc spd = { 3161 .pages = pages, 3162 .partial = partial, 3163 .nr_pages = 0, /* This gets updated below. */ 3164 .flags = flags, 3165 .ops = &tracing_pipe_buf_ops, 3166 .spd_release = tracing_spd_release_pipe, 3167 }; 3168 static struct tracer *old_tracer; 3169 ssize_t ret; 3170 size_t rem; 3171 unsigned int i; 3172 3173 /* copy the tracer to avoid using a global lock all around */ 3174 mutex_lock(&trace_types_lock); 3175 if (unlikely(old_tracer != current_trace && current_trace)) { 3176 old_tracer = current_trace; 3177 *iter->trace = *current_trace; 3178 } 3179 mutex_unlock(&trace_types_lock); 3180 3181 mutex_lock(&iter->mutex); 3182 3183 if (iter->trace->splice_read) { 3184 ret = iter->trace->splice_read(iter, filp, 3185 ppos, pipe, len, flags); 3186 if (ret) 3187 goto out_err; 3188 } 3189 3190 ret = tracing_wait_pipe(filp); 3191 if (ret <= 0) 3192 goto out_err; 3193 3194 if (!iter->ent && !find_next_entry_inc(iter)) { 3195 ret = -EFAULT; 3196 goto out_err; 3197 } 3198 3199 trace_event_read_lock(); 3200 3201 /* Fill as many pages as possible. */ 3202 for (i = 0, rem = len; i < PIPE_BUFFERS && rem; i++) { 3203 pages[i] = alloc_page(GFP_KERNEL); 3204 if (!pages[i]) 3205 break; 3206 3207 rem = tracing_fill_pipe_page(rem, iter); 3208 3209 /* Copy the data into the page, so we can start over. */ 3210 ret = trace_seq_to_buffer(&iter->seq, 3211 page_address(pages[i]), 3212 iter->seq.len); 3213 if (ret < 0) { 3214 __free_page(pages[i]); 3215 break; 3216 } 3217 partial[i].offset = 0; 3218 partial[i].len = iter->seq.len; 3219 3220 trace_seq_init(&iter->seq); 3221 } 3222 3223 trace_event_read_unlock(); 3224 mutex_unlock(&iter->mutex); 3225 3226 spd.nr_pages = i; 3227 3228 return splice_to_pipe(pipe, &spd); 3229 3230 out_err: 3231 mutex_unlock(&iter->mutex); 3232 3233 return ret; 3234 } 3235 3236 static ssize_t 3237 tracing_entries_read(struct file *filp, char __user *ubuf, 3238 size_t cnt, loff_t *ppos) 3239 { 3240 struct trace_array *tr = filp->private_data; 3241 char buf[96]; 3242 int r; 3243 3244 mutex_lock(&trace_types_lock); 3245 if (!ring_buffer_expanded) 3246 r = sprintf(buf, "%lu (expanded: %lu)\n", 3247 tr->entries >> 10, 3248 trace_buf_size >> 10); 3249 else 3250 r = sprintf(buf, "%lu\n", tr->entries >> 10); 3251 mutex_unlock(&trace_types_lock); 3252 3253 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 3254 } 3255 3256 static ssize_t 3257 tracing_entries_write(struct file *filp, const char __user *ubuf, 3258 size_t cnt, loff_t *ppos) 3259 { 3260 unsigned long val; 3261 char buf[64]; 3262 int ret, cpu; 3263 3264 if (cnt >= sizeof(buf)) 3265 return -EINVAL; 3266 3267 if (copy_from_user(&buf, ubuf, cnt)) 3268 return -EFAULT; 3269 3270 buf[cnt] = 0; 3271 3272 ret = strict_strtoul(buf, 10, &val); 3273 if (ret < 0) 3274 return ret; 3275 3276 /* must have at least 1 entry */ 3277 if (!val) 3278 return -EINVAL; 3279 3280 mutex_lock(&trace_types_lock); 3281 3282 tracing_stop(); 3283 3284 /* disable all cpu buffers */ 3285 for_each_tracing_cpu(cpu) { 3286 if (global_trace.data[cpu]) 3287 atomic_inc(&global_trace.data[cpu]->disabled); 3288 if (max_tr.data[cpu]) 3289 atomic_inc(&max_tr.data[cpu]->disabled); 3290 } 3291 3292 /* value is in KB */ 3293 val <<= 10; 3294 3295 if (val != global_trace.entries) { 3296 ret = tracing_resize_ring_buffer(val); 3297 if (ret < 0) { 3298 cnt = ret; 3299 goto out; 3300 } 3301 } 3302 3303 *ppos += cnt; 3304 3305 /* If check pages failed, return ENOMEM */ 3306 if (tracing_disabled) 3307 cnt = -ENOMEM; 3308 out: 3309 for_each_tracing_cpu(cpu) { 3310 if (global_trace.data[cpu]) 3311 atomic_dec(&global_trace.data[cpu]->disabled); 3312 if (max_tr.data[cpu]) 3313 atomic_dec(&max_tr.data[cpu]->disabled); 3314 } 3315 3316 tracing_start(); 3317 max_tr.entries = global_trace.entries; 3318 mutex_unlock(&trace_types_lock); 3319 3320 return cnt; 3321 } 3322 3323 static ssize_t 3324 tracing_mark_write(struct file *filp, const char __user *ubuf, 3325 size_t cnt, loff_t *fpos) 3326 { 3327 char *buf; 3328 3329 if (tracing_disabled) 3330 return -EINVAL; 3331 3332 if (cnt > TRACE_BUF_SIZE) 3333 cnt = TRACE_BUF_SIZE; 3334 3335 buf = kmalloc(cnt + 2, GFP_KERNEL); 3336 if (buf == NULL) 3337 return -ENOMEM; 3338 3339 if (copy_from_user(buf, ubuf, cnt)) { 3340 kfree(buf); 3341 return -EFAULT; 3342 } 3343 if (buf[cnt-1] != '\n') { 3344 buf[cnt] = '\n'; 3345 buf[cnt+1] = '\0'; 3346 } else 3347 buf[cnt] = '\0'; 3348 3349 cnt = trace_vprintk(0, buf, NULL); 3350 kfree(buf); 3351 *fpos += cnt; 3352 3353 return cnt; 3354 } 3355 3356 static ssize_t tracing_clock_read(struct file *filp, char __user *ubuf, 3357 size_t cnt, loff_t *ppos) 3358 { 3359 char buf[64]; 3360 int bufiter = 0; 3361 int i; 3362 3363 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) 3364 bufiter += snprintf(buf + bufiter, sizeof(buf) - bufiter, 3365 "%s%s%s%s", i ? " " : "", 3366 i == trace_clock_id ? "[" : "", trace_clocks[i].name, 3367 i == trace_clock_id ? "]" : ""); 3368 bufiter += snprintf(buf + bufiter, sizeof(buf) - bufiter, "\n"); 3369 3370 return simple_read_from_buffer(ubuf, cnt, ppos, buf, bufiter); 3371 } 3372 3373 static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf, 3374 size_t cnt, loff_t *fpos) 3375 { 3376 char buf[64]; 3377 const char *clockstr; 3378 int i; 3379 3380 if (cnt >= sizeof(buf)) 3381 return -EINVAL; 3382 3383 if (copy_from_user(&buf, ubuf, cnt)) 3384 return -EFAULT; 3385 3386 buf[cnt] = 0; 3387 3388 clockstr = strstrip(buf); 3389 3390 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) { 3391 if (strcmp(trace_clocks[i].name, clockstr) == 0) 3392 break; 3393 } 3394 if (i == ARRAY_SIZE(trace_clocks)) 3395 return -EINVAL; 3396 3397 trace_clock_id = i; 3398 3399 mutex_lock(&trace_types_lock); 3400 3401 ring_buffer_set_clock(global_trace.buffer, trace_clocks[i].func); 3402 if (max_tr.buffer) 3403 ring_buffer_set_clock(max_tr.buffer, trace_clocks[i].func); 3404 3405 mutex_unlock(&trace_types_lock); 3406 3407 *fpos += cnt; 3408 3409 return cnt; 3410 } 3411 3412 static const struct file_operations tracing_max_lat_fops = { 3413 .open = tracing_open_generic, 3414 .read = tracing_max_lat_read, 3415 .write = tracing_max_lat_write, 3416 }; 3417 3418 static const struct file_operations tracing_ctrl_fops = { 3419 .open = tracing_open_generic, 3420 .read = tracing_ctrl_read, 3421 .write = tracing_ctrl_write, 3422 }; 3423 3424 static const struct file_operations set_tracer_fops = { 3425 .open = tracing_open_generic, 3426 .read = tracing_set_trace_read, 3427 .write = tracing_set_trace_write, 3428 }; 3429 3430 static const struct file_operations tracing_pipe_fops = { 3431 .open = tracing_open_pipe, 3432 .poll = tracing_poll_pipe, 3433 .read = tracing_read_pipe, 3434 .splice_read = tracing_splice_read_pipe, 3435 .release = tracing_release_pipe, 3436 }; 3437 3438 static const struct file_operations tracing_entries_fops = { 3439 .open = tracing_open_generic, 3440 .read = tracing_entries_read, 3441 .write = tracing_entries_write, 3442 }; 3443 3444 static const struct file_operations tracing_mark_fops = { 3445 .open = tracing_open_generic, 3446 .write = tracing_mark_write, 3447 }; 3448 3449 static const struct file_operations trace_clock_fops = { 3450 .open = tracing_open_generic, 3451 .read = tracing_clock_read, 3452 .write = tracing_clock_write, 3453 }; 3454 3455 struct ftrace_buffer_info { 3456 struct trace_array *tr; 3457 void *spare; 3458 int cpu; 3459 unsigned int read; 3460 }; 3461 3462 static int tracing_buffers_open(struct inode *inode, struct file *filp) 3463 { 3464 int cpu = (int)(long)inode->i_private; 3465 struct ftrace_buffer_info *info; 3466 3467 if (tracing_disabled) 3468 return -ENODEV; 3469 3470 info = kzalloc(sizeof(*info), GFP_KERNEL); 3471 if (!info) 3472 return -ENOMEM; 3473 3474 info->tr = &global_trace; 3475 info->cpu = cpu; 3476 info->spare = NULL; 3477 /* Force reading ring buffer for first read */ 3478 info->read = (unsigned int)-1; 3479 3480 filp->private_data = info; 3481 3482 return nonseekable_open(inode, filp); 3483 } 3484 3485 static ssize_t 3486 tracing_buffers_read(struct file *filp, char __user *ubuf, 3487 size_t count, loff_t *ppos) 3488 { 3489 struct ftrace_buffer_info *info = filp->private_data; 3490 unsigned int pos; 3491 ssize_t ret; 3492 size_t size; 3493 3494 if (!count) 3495 return 0; 3496 3497 if (!info->spare) 3498 info->spare = ring_buffer_alloc_read_page(info->tr->buffer); 3499 if (!info->spare) 3500 return -ENOMEM; 3501 3502 /* Do we have previous read data to read? */ 3503 if (info->read < PAGE_SIZE) 3504 goto read; 3505 3506 info->read = 0; 3507 3508 ret = ring_buffer_read_page(info->tr->buffer, 3509 &info->spare, 3510 count, 3511 info->cpu, 0); 3512 if (ret < 0) 3513 return 0; 3514 3515 pos = ring_buffer_page_len(info->spare); 3516 3517 if (pos < PAGE_SIZE) 3518 memset(info->spare + pos, 0, PAGE_SIZE - pos); 3519 3520 read: 3521 size = PAGE_SIZE - info->read; 3522 if (size > count) 3523 size = count; 3524 3525 ret = copy_to_user(ubuf, info->spare + info->read, size); 3526 if (ret == size) 3527 return -EFAULT; 3528 size -= ret; 3529 3530 *ppos += size; 3531 info->read += size; 3532 3533 return size; 3534 } 3535 3536 static int tracing_buffers_release(struct inode *inode, struct file *file) 3537 { 3538 struct ftrace_buffer_info *info = file->private_data; 3539 3540 if (info->spare) 3541 ring_buffer_free_read_page(info->tr->buffer, info->spare); 3542 kfree(info); 3543 3544 return 0; 3545 } 3546 3547 struct buffer_ref { 3548 struct ring_buffer *buffer; 3549 void *page; 3550 int ref; 3551 }; 3552 3553 static void buffer_pipe_buf_release(struct pipe_inode_info *pipe, 3554 struct pipe_buffer *buf) 3555 { 3556 struct buffer_ref *ref = (struct buffer_ref *)buf->private; 3557 3558 if (--ref->ref) 3559 return; 3560 3561 ring_buffer_free_read_page(ref->buffer, ref->page); 3562 kfree(ref); 3563 buf->private = 0; 3564 } 3565 3566 static int buffer_pipe_buf_steal(struct pipe_inode_info *pipe, 3567 struct pipe_buffer *buf) 3568 { 3569 return 1; 3570 } 3571 3572 static void buffer_pipe_buf_get(struct pipe_inode_info *pipe, 3573 struct pipe_buffer *buf) 3574 { 3575 struct buffer_ref *ref = (struct buffer_ref *)buf->private; 3576 3577 ref->ref++; 3578 } 3579 3580 /* Pipe buffer operations for a buffer. */ 3581 static struct pipe_buf_operations buffer_pipe_buf_ops = { 3582 .can_merge = 0, 3583 .map = generic_pipe_buf_map, 3584 .unmap = generic_pipe_buf_unmap, 3585 .confirm = generic_pipe_buf_confirm, 3586 .release = buffer_pipe_buf_release, 3587 .steal = buffer_pipe_buf_steal, 3588 .get = buffer_pipe_buf_get, 3589 }; 3590 3591 /* 3592 * Callback from splice_to_pipe(), if we need to release some pages 3593 * at the end of the spd in case we error'ed out in filling the pipe. 3594 */ 3595 static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i) 3596 { 3597 struct buffer_ref *ref = 3598 (struct buffer_ref *)spd->partial[i].private; 3599 3600 if (--ref->ref) 3601 return; 3602 3603 ring_buffer_free_read_page(ref->buffer, ref->page); 3604 kfree(ref); 3605 spd->partial[i].private = 0; 3606 } 3607 3608 static ssize_t 3609 tracing_buffers_splice_read(struct file *file, loff_t *ppos, 3610 struct pipe_inode_info *pipe, size_t len, 3611 unsigned int flags) 3612 { 3613 struct ftrace_buffer_info *info = file->private_data; 3614 struct partial_page partial[PIPE_BUFFERS]; 3615 struct page *pages[PIPE_BUFFERS]; 3616 struct splice_pipe_desc spd = { 3617 .pages = pages, 3618 .partial = partial, 3619 .flags = flags, 3620 .ops = &buffer_pipe_buf_ops, 3621 .spd_release = buffer_spd_release, 3622 }; 3623 struct buffer_ref *ref; 3624 int entries, size, i; 3625 size_t ret; 3626 3627 if (*ppos & (PAGE_SIZE - 1)) { 3628 WARN_ONCE(1, "Ftrace: previous read must page-align\n"); 3629 return -EINVAL; 3630 } 3631 3632 if (len & (PAGE_SIZE - 1)) { 3633 WARN_ONCE(1, "Ftrace: splice_read should page-align\n"); 3634 if (len < PAGE_SIZE) 3635 return -EINVAL; 3636 len &= PAGE_MASK; 3637 } 3638 3639 entries = ring_buffer_entries_cpu(info->tr->buffer, info->cpu); 3640 3641 for (i = 0; i < PIPE_BUFFERS && len && entries; i++, len -= PAGE_SIZE) { 3642 struct page *page; 3643 int r; 3644 3645 ref = kzalloc(sizeof(*ref), GFP_KERNEL); 3646 if (!ref) 3647 break; 3648 3649 ref->ref = 1; 3650 ref->buffer = info->tr->buffer; 3651 ref->page = ring_buffer_alloc_read_page(ref->buffer); 3652 if (!ref->page) { 3653 kfree(ref); 3654 break; 3655 } 3656 3657 r = ring_buffer_read_page(ref->buffer, &ref->page, 3658 len, info->cpu, 1); 3659 if (r < 0) { 3660 ring_buffer_free_read_page(ref->buffer, 3661 ref->page); 3662 kfree(ref); 3663 break; 3664 } 3665 3666 /* 3667 * zero out any left over data, this is going to 3668 * user land. 3669 */ 3670 size = ring_buffer_page_len(ref->page); 3671 if (size < PAGE_SIZE) 3672 memset(ref->page + size, 0, PAGE_SIZE - size); 3673 3674 page = virt_to_page(ref->page); 3675 3676 spd.pages[i] = page; 3677 spd.partial[i].len = PAGE_SIZE; 3678 spd.partial[i].offset = 0; 3679 spd.partial[i].private = (unsigned long)ref; 3680 spd.nr_pages++; 3681 *ppos += PAGE_SIZE; 3682 3683 entries = ring_buffer_entries_cpu(info->tr->buffer, info->cpu); 3684 } 3685 3686 spd.nr_pages = i; 3687 3688 /* did we read anything? */ 3689 if (!spd.nr_pages) { 3690 if (flags & SPLICE_F_NONBLOCK) 3691 ret = -EAGAIN; 3692 else 3693 ret = 0; 3694 /* TODO: block */ 3695 return ret; 3696 } 3697 3698 ret = splice_to_pipe(pipe, &spd); 3699 3700 return ret; 3701 } 3702 3703 static const struct file_operations tracing_buffers_fops = { 3704 .open = tracing_buffers_open, 3705 .read = tracing_buffers_read, 3706 .release = tracing_buffers_release, 3707 .splice_read = tracing_buffers_splice_read, 3708 .llseek = no_llseek, 3709 }; 3710 3711 static ssize_t 3712 tracing_stats_read(struct file *filp, char __user *ubuf, 3713 size_t count, loff_t *ppos) 3714 { 3715 unsigned long cpu = (unsigned long)filp->private_data; 3716 struct trace_array *tr = &global_trace; 3717 struct trace_seq *s; 3718 unsigned long cnt; 3719 3720 s = kmalloc(sizeof(*s), GFP_KERNEL); 3721 if (!s) 3722 return -ENOMEM; 3723 3724 trace_seq_init(s); 3725 3726 cnt = ring_buffer_entries_cpu(tr->buffer, cpu); 3727 trace_seq_printf(s, "entries: %ld\n", cnt); 3728 3729 cnt = ring_buffer_overrun_cpu(tr->buffer, cpu); 3730 trace_seq_printf(s, "overrun: %ld\n", cnt); 3731 3732 cnt = ring_buffer_commit_overrun_cpu(tr->buffer, cpu); 3733 trace_seq_printf(s, "commit overrun: %ld\n", cnt); 3734 3735 count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len); 3736 3737 kfree(s); 3738 3739 return count; 3740 } 3741 3742 static const struct file_operations tracing_stats_fops = { 3743 .open = tracing_open_generic, 3744 .read = tracing_stats_read, 3745 }; 3746 3747 #ifdef CONFIG_DYNAMIC_FTRACE 3748 3749 int __weak ftrace_arch_read_dyn_info(char *buf, int size) 3750 { 3751 return 0; 3752 } 3753 3754 static ssize_t 3755 tracing_read_dyn_info(struct file *filp, char __user *ubuf, 3756 size_t cnt, loff_t *ppos) 3757 { 3758 static char ftrace_dyn_info_buffer[1024]; 3759 static DEFINE_MUTEX(dyn_info_mutex); 3760 unsigned long *p = filp->private_data; 3761 char *buf = ftrace_dyn_info_buffer; 3762 int size = ARRAY_SIZE(ftrace_dyn_info_buffer); 3763 int r; 3764 3765 mutex_lock(&dyn_info_mutex); 3766 r = sprintf(buf, "%ld ", *p); 3767 3768 r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r); 3769 buf[r++] = '\n'; 3770 3771 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 3772 3773 mutex_unlock(&dyn_info_mutex); 3774 3775 return r; 3776 } 3777 3778 static const struct file_operations tracing_dyn_info_fops = { 3779 .open = tracing_open_generic, 3780 .read = tracing_read_dyn_info, 3781 }; 3782 #endif 3783 3784 static struct dentry *d_tracer; 3785 3786 struct dentry *tracing_init_dentry(void) 3787 { 3788 static int once; 3789 3790 if (d_tracer) 3791 return d_tracer; 3792 3793 if (!debugfs_initialized()) 3794 return NULL; 3795 3796 d_tracer = debugfs_create_dir("tracing", NULL); 3797 3798 if (!d_tracer && !once) { 3799 once = 1; 3800 pr_warning("Could not create debugfs directory 'tracing'\n"); 3801 return NULL; 3802 } 3803 3804 return d_tracer; 3805 } 3806 3807 static struct dentry *d_percpu; 3808 3809 struct dentry *tracing_dentry_percpu(void) 3810 { 3811 static int once; 3812 struct dentry *d_tracer; 3813 3814 if (d_percpu) 3815 return d_percpu; 3816 3817 d_tracer = tracing_init_dentry(); 3818 3819 if (!d_tracer) 3820 return NULL; 3821 3822 d_percpu = debugfs_create_dir("per_cpu", d_tracer); 3823 3824 if (!d_percpu && !once) { 3825 once = 1; 3826 pr_warning("Could not create debugfs directory 'per_cpu'\n"); 3827 return NULL; 3828 } 3829 3830 return d_percpu; 3831 } 3832 3833 static void tracing_init_debugfs_percpu(long cpu) 3834 { 3835 struct dentry *d_percpu = tracing_dentry_percpu(); 3836 struct dentry *d_cpu; 3837 /* strlen(cpu) + MAX(log10(cpu)) + '\0' */ 3838 char cpu_dir[7]; 3839 3840 if (cpu > 999 || cpu < 0) 3841 return; 3842 3843 sprintf(cpu_dir, "cpu%ld", cpu); 3844 d_cpu = debugfs_create_dir(cpu_dir, d_percpu); 3845 if (!d_cpu) { 3846 pr_warning("Could not create debugfs '%s' entry\n", cpu_dir); 3847 return; 3848 } 3849 3850 /* per cpu trace_pipe */ 3851 trace_create_file("trace_pipe", 0444, d_cpu, 3852 (void *) cpu, &tracing_pipe_fops); 3853 3854 /* per cpu trace */ 3855 trace_create_file("trace", 0644, d_cpu, 3856 (void *) cpu, &tracing_fops); 3857 3858 trace_create_file("trace_pipe_raw", 0444, d_cpu, 3859 (void *) cpu, &tracing_buffers_fops); 3860 3861 trace_create_file("stats", 0444, d_cpu, 3862 (void *) cpu, &tracing_stats_fops); 3863 } 3864 3865 #ifdef CONFIG_FTRACE_SELFTEST 3866 /* Let selftest have access to static functions in this file */ 3867 #include "trace_selftest.c" 3868 #endif 3869 3870 struct trace_option_dentry { 3871 struct tracer_opt *opt; 3872 struct tracer_flags *flags; 3873 struct dentry *entry; 3874 }; 3875 3876 static ssize_t 3877 trace_options_read(struct file *filp, char __user *ubuf, size_t cnt, 3878 loff_t *ppos) 3879 { 3880 struct trace_option_dentry *topt = filp->private_data; 3881 char *buf; 3882 3883 if (topt->flags->val & topt->opt->bit) 3884 buf = "1\n"; 3885 else 3886 buf = "0\n"; 3887 3888 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2); 3889 } 3890 3891 static ssize_t 3892 trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt, 3893 loff_t *ppos) 3894 { 3895 struct trace_option_dentry *topt = filp->private_data; 3896 unsigned long val; 3897 char buf[64]; 3898 int ret; 3899 3900 if (cnt >= sizeof(buf)) 3901 return -EINVAL; 3902 3903 if (copy_from_user(&buf, ubuf, cnt)) 3904 return -EFAULT; 3905 3906 buf[cnt] = 0; 3907 3908 ret = strict_strtoul(buf, 10, &val); 3909 if (ret < 0) 3910 return ret; 3911 3912 ret = 0; 3913 switch (val) { 3914 case 0: 3915 /* do nothing if already cleared */ 3916 if (!(topt->flags->val & topt->opt->bit)) 3917 break; 3918 3919 mutex_lock(&trace_types_lock); 3920 if (current_trace->set_flag) 3921 ret = current_trace->set_flag(topt->flags->val, 3922 topt->opt->bit, 0); 3923 mutex_unlock(&trace_types_lock); 3924 if (ret) 3925 return ret; 3926 topt->flags->val &= ~topt->opt->bit; 3927 break; 3928 case 1: 3929 /* do nothing if already set */ 3930 if (topt->flags->val & topt->opt->bit) 3931 break; 3932 3933 mutex_lock(&trace_types_lock); 3934 if (current_trace->set_flag) 3935 ret = current_trace->set_flag(topt->flags->val, 3936 topt->opt->bit, 1); 3937 mutex_unlock(&trace_types_lock); 3938 if (ret) 3939 return ret; 3940 topt->flags->val |= topt->opt->bit; 3941 break; 3942 3943 default: 3944 return -EINVAL; 3945 } 3946 3947 *ppos += cnt; 3948 3949 return cnt; 3950 } 3951 3952 3953 static const struct file_operations trace_options_fops = { 3954 .open = tracing_open_generic, 3955 .read = trace_options_read, 3956 .write = trace_options_write, 3957 }; 3958 3959 static ssize_t 3960 trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt, 3961 loff_t *ppos) 3962 { 3963 long index = (long)filp->private_data; 3964 char *buf; 3965 3966 if (trace_flags & (1 << index)) 3967 buf = "1\n"; 3968 else 3969 buf = "0\n"; 3970 3971 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2); 3972 } 3973 3974 static ssize_t 3975 trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt, 3976 loff_t *ppos) 3977 { 3978 long index = (long)filp->private_data; 3979 char buf[64]; 3980 unsigned long val; 3981 int ret; 3982 3983 if (cnt >= sizeof(buf)) 3984 return -EINVAL; 3985 3986 if (copy_from_user(&buf, ubuf, cnt)) 3987 return -EFAULT; 3988 3989 buf[cnt] = 0; 3990 3991 ret = strict_strtoul(buf, 10, &val); 3992 if (ret < 0) 3993 return ret; 3994 3995 if (val != 0 && val != 1) 3996 return -EINVAL; 3997 set_tracer_flags(1 << index, val); 3998 3999 *ppos += cnt; 4000 4001 return cnt; 4002 } 4003 4004 static const struct file_operations trace_options_core_fops = { 4005 .open = tracing_open_generic, 4006 .read = trace_options_core_read, 4007 .write = trace_options_core_write, 4008 }; 4009 4010 struct dentry *trace_create_file(const char *name, 4011 mode_t mode, 4012 struct dentry *parent, 4013 void *data, 4014 const struct file_operations *fops) 4015 { 4016 struct dentry *ret; 4017 4018 ret = debugfs_create_file(name, mode, parent, data, fops); 4019 if (!ret) 4020 pr_warning("Could not create debugfs '%s' entry\n", name); 4021 4022 return ret; 4023 } 4024 4025 4026 static struct dentry *trace_options_init_dentry(void) 4027 { 4028 struct dentry *d_tracer; 4029 static struct dentry *t_options; 4030 4031 if (t_options) 4032 return t_options; 4033 4034 d_tracer = tracing_init_dentry(); 4035 if (!d_tracer) 4036 return NULL; 4037 4038 t_options = debugfs_create_dir("options", d_tracer); 4039 if (!t_options) { 4040 pr_warning("Could not create debugfs directory 'options'\n"); 4041 return NULL; 4042 } 4043 4044 return t_options; 4045 } 4046 4047 static void 4048 create_trace_option_file(struct trace_option_dentry *topt, 4049 struct tracer_flags *flags, 4050 struct tracer_opt *opt) 4051 { 4052 struct dentry *t_options; 4053 4054 t_options = trace_options_init_dentry(); 4055 if (!t_options) 4056 return; 4057 4058 topt->flags = flags; 4059 topt->opt = opt; 4060 4061 topt->entry = trace_create_file(opt->name, 0644, t_options, topt, 4062 &trace_options_fops); 4063 4064 } 4065 4066 static struct trace_option_dentry * 4067 create_trace_option_files(struct tracer *tracer) 4068 { 4069 struct trace_option_dentry *topts; 4070 struct tracer_flags *flags; 4071 struct tracer_opt *opts; 4072 int cnt; 4073 4074 if (!tracer) 4075 return NULL; 4076 4077 flags = tracer->flags; 4078 4079 if (!flags || !flags->opts) 4080 return NULL; 4081 4082 opts = flags->opts; 4083 4084 for (cnt = 0; opts[cnt].name; cnt++) 4085 ; 4086 4087 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL); 4088 if (!topts) 4089 return NULL; 4090 4091 for (cnt = 0; opts[cnt].name; cnt++) 4092 create_trace_option_file(&topts[cnt], flags, 4093 &opts[cnt]); 4094 4095 return topts; 4096 } 4097 4098 static void 4099 destroy_trace_option_files(struct trace_option_dentry *topts) 4100 { 4101 int cnt; 4102 4103 if (!topts) 4104 return; 4105 4106 for (cnt = 0; topts[cnt].opt; cnt++) { 4107 if (topts[cnt].entry) 4108 debugfs_remove(topts[cnt].entry); 4109 } 4110 4111 kfree(topts); 4112 } 4113 4114 static struct dentry * 4115 create_trace_option_core_file(const char *option, long index) 4116 { 4117 struct dentry *t_options; 4118 4119 t_options = trace_options_init_dentry(); 4120 if (!t_options) 4121 return NULL; 4122 4123 return trace_create_file(option, 0644, t_options, (void *)index, 4124 &trace_options_core_fops); 4125 } 4126 4127 static __init void create_trace_options_dir(void) 4128 { 4129 struct dentry *t_options; 4130 int i; 4131 4132 t_options = trace_options_init_dentry(); 4133 if (!t_options) 4134 return; 4135 4136 for (i = 0; trace_options[i]; i++) 4137 create_trace_option_core_file(trace_options[i], i); 4138 } 4139 4140 static __init int tracer_init_debugfs(void) 4141 { 4142 struct dentry *d_tracer; 4143 int cpu; 4144 4145 d_tracer = tracing_init_dentry(); 4146 4147 trace_create_file("tracing_enabled", 0644, d_tracer, 4148 &global_trace, &tracing_ctrl_fops); 4149 4150 trace_create_file("trace_options", 0644, d_tracer, 4151 NULL, &tracing_iter_fops); 4152 4153 trace_create_file("tracing_cpumask", 0644, d_tracer, 4154 NULL, &tracing_cpumask_fops); 4155 4156 trace_create_file("trace", 0644, d_tracer, 4157 (void *) TRACE_PIPE_ALL_CPU, &tracing_fops); 4158 4159 trace_create_file("available_tracers", 0444, d_tracer, 4160 &global_trace, &show_traces_fops); 4161 4162 trace_create_file("current_tracer", 0644, d_tracer, 4163 &global_trace, &set_tracer_fops); 4164 4165 #ifdef CONFIG_TRACER_MAX_TRACE 4166 trace_create_file("tracing_max_latency", 0644, d_tracer, 4167 &tracing_max_latency, &tracing_max_lat_fops); 4168 4169 trace_create_file("tracing_thresh", 0644, d_tracer, 4170 &tracing_thresh, &tracing_max_lat_fops); 4171 #endif 4172 4173 trace_create_file("README", 0444, d_tracer, 4174 NULL, &tracing_readme_fops); 4175 4176 trace_create_file("trace_pipe", 0444, d_tracer, 4177 (void *) TRACE_PIPE_ALL_CPU, &tracing_pipe_fops); 4178 4179 trace_create_file("buffer_size_kb", 0644, d_tracer, 4180 &global_trace, &tracing_entries_fops); 4181 4182 trace_create_file("trace_marker", 0220, d_tracer, 4183 NULL, &tracing_mark_fops); 4184 4185 trace_create_file("saved_cmdlines", 0444, d_tracer, 4186 NULL, &tracing_saved_cmdlines_fops); 4187 4188 trace_create_file("trace_clock", 0644, d_tracer, NULL, 4189 &trace_clock_fops); 4190 4191 #ifdef CONFIG_DYNAMIC_FTRACE 4192 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer, 4193 &ftrace_update_tot_cnt, &tracing_dyn_info_fops); 4194 #endif 4195 #ifdef CONFIG_SYSPROF_TRACER 4196 init_tracer_sysprof_debugfs(d_tracer); 4197 #endif 4198 4199 create_trace_options_dir(); 4200 4201 for_each_tracing_cpu(cpu) 4202 tracing_init_debugfs_percpu(cpu); 4203 4204 return 0; 4205 } 4206 4207 static int trace_panic_handler(struct notifier_block *this, 4208 unsigned long event, void *unused) 4209 { 4210 if (ftrace_dump_on_oops) 4211 ftrace_dump(); 4212 return NOTIFY_OK; 4213 } 4214 4215 static struct notifier_block trace_panic_notifier = { 4216 .notifier_call = trace_panic_handler, 4217 .next = NULL, 4218 .priority = 150 /* priority: INT_MAX >= x >= 0 */ 4219 }; 4220 4221 static int trace_die_handler(struct notifier_block *self, 4222 unsigned long val, 4223 void *data) 4224 { 4225 switch (val) { 4226 case DIE_OOPS: 4227 if (ftrace_dump_on_oops) 4228 ftrace_dump(); 4229 break; 4230 default: 4231 break; 4232 } 4233 return NOTIFY_OK; 4234 } 4235 4236 static struct notifier_block trace_die_notifier = { 4237 .notifier_call = trace_die_handler, 4238 .priority = 200 4239 }; 4240 4241 /* 4242 * printk is set to max of 1024, we really don't need it that big. 4243 * Nothing should be printing 1000 characters anyway. 4244 */ 4245 #define TRACE_MAX_PRINT 1000 4246 4247 /* 4248 * Define here KERN_TRACE so that we have one place to modify 4249 * it if we decide to change what log level the ftrace dump 4250 * should be at. 4251 */ 4252 #define KERN_TRACE KERN_EMERG 4253 4254 static void 4255 trace_printk_seq(struct trace_seq *s) 4256 { 4257 /* Probably should print a warning here. */ 4258 if (s->len >= 1000) 4259 s->len = 1000; 4260 4261 /* should be zero ended, but we are paranoid. */ 4262 s->buffer[s->len] = 0; 4263 4264 printk(KERN_TRACE "%s", s->buffer); 4265 4266 trace_seq_init(s); 4267 } 4268 4269 static void __ftrace_dump(bool disable_tracing) 4270 { 4271 static raw_spinlock_t ftrace_dump_lock = 4272 (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; 4273 /* use static because iter can be a bit big for the stack */ 4274 static struct trace_iterator iter; 4275 unsigned int old_userobj; 4276 static int dump_ran; 4277 unsigned long flags; 4278 int cnt = 0, cpu; 4279 4280 /* only one dump */ 4281 local_irq_save(flags); 4282 __raw_spin_lock(&ftrace_dump_lock); 4283 if (dump_ran) 4284 goto out; 4285 4286 dump_ran = 1; 4287 4288 tracing_off(); 4289 4290 if (disable_tracing) 4291 ftrace_kill(); 4292 4293 for_each_tracing_cpu(cpu) { 4294 atomic_inc(&global_trace.data[cpu]->disabled); 4295 } 4296 4297 old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ; 4298 4299 /* don't look at user memory in panic mode */ 4300 trace_flags &= ~TRACE_ITER_SYM_USEROBJ; 4301 4302 printk(KERN_TRACE "Dumping ftrace buffer:\n"); 4303 4304 /* Simulate the iterator */ 4305 iter.tr = &global_trace; 4306 iter.trace = current_trace; 4307 iter.cpu_file = TRACE_PIPE_ALL_CPU; 4308 4309 /* 4310 * We need to stop all tracing on all CPUS to read the 4311 * the next buffer. This is a bit expensive, but is 4312 * not done often. We fill all what we can read, 4313 * and then release the locks again. 4314 */ 4315 4316 while (!trace_empty(&iter)) { 4317 4318 if (!cnt) 4319 printk(KERN_TRACE "---------------------------------\n"); 4320 4321 cnt++; 4322 4323 /* reset all but tr, trace, and overruns */ 4324 memset(&iter.seq, 0, 4325 sizeof(struct trace_iterator) - 4326 offsetof(struct trace_iterator, seq)); 4327 iter.iter_flags |= TRACE_FILE_LAT_FMT; 4328 iter.pos = -1; 4329 4330 if (find_next_entry_inc(&iter) != NULL) { 4331 int ret; 4332 4333 ret = print_trace_line(&iter); 4334 if (ret != TRACE_TYPE_NO_CONSUME) 4335 trace_consume(&iter); 4336 } 4337 4338 trace_printk_seq(&iter.seq); 4339 } 4340 4341 if (!cnt) 4342 printk(KERN_TRACE " (ftrace buffer empty)\n"); 4343 else 4344 printk(KERN_TRACE "---------------------------------\n"); 4345 4346 /* Re-enable tracing if requested */ 4347 if (!disable_tracing) { 4348 trace_flags |= old_userobj; 4349 4350 for_each_tracing_cpu(cpu) { 4351 atomic_dec(&global_trace.data[cpu]->disabled); 4352 } 4353 tracing_on(); 4354 } 4355 4356 out: 4357 __raw_spin_unlock(&ftrace_dump_lock); 4358 local_irq_restore(flags); 4359 } 4360 4361 /* By default: disable tracing after the dump */ 4362 void ftrace_dump(void) 4363 { 4364 __ftrace_dump(true); 4365 } 4366 4367 __init static int tracer_alloc_buffers(void) 4368 { 4369 int ring_buf_size; 4370 int i; 4371 int ret = -ENOMEM; 4372 4373 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL)) 4374 goto out; 4375 4376 if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL)) 4377 goto out_free_buffer_mask; 4378 4379 if (!zalloc_cpumask_var(&tracing_reader_cpumask, GFP_KERNEL)) 4380 goto out_free_tracing_cpumask; 4381 4382 /* To save memory, keep the ring buffer size to its minimum */ 4383 if (ring_buffer_expanded) 4384 ring_buf_size = trace_buf_size; 4385 else 4386 ring_buf_size = 1; 4387 4388 cpumask_copy(tracing_buffer_mask, cpu_possible_mask); 4389 cpumask_copy(tracing_cpumask, cpu_all_mask); 4390 4391 /* TODO: make the number of buffers hot pluggable with CPUS */ 4392 global_trace.buffer = ring_buffer_alloc(ring_buf_size, 4393 TRACE_BUFFER_FLAGS); 4394 if (!global_trace.buffer) { 4395 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n"); 4396 WARN_ON(1); 4397 goto out_free_cpumask; 4398 } 4399 global_trace.entries = ring_buffer_size(global_trace.buffer); 4400 4401 4402 #ifdef CONFIG_TRACER_MAX_TRACE 4403 max_tr.buffer = ring_buffer_alloc(ring_buf_size, 4404 TRACE_BUFFER_FLAGS); 4405 if (!max_tr.buffer) { 4406 printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n"); 4407 WARN_ON(1); 4408 ring_buffer_free(global_trace.buffer); 4409 goto out_free_cpumask; 4410 } 4411 max_tr.entries = ring_buffer_size(max_tr.buffer); 4412 WARN_ON(max_tr.entries != global_trace.entries); 4413 #endif 4414 4415 /* Allocate the first page for all buffers */ 4416 for_each_tracing_cpu(i) { 4417 global_trace.data[i] = &per_cpu(global_trace_cpu, i); 4418 max_tr.data[i] = &per_cpu(max_data, i); 4419 } 4420 4421 trace_init_cmdlines(); 4422 4423 register_tracer(&nop_trace); 4424 current_trace = &nop_trace; 4425 #ifdef CONFIG_BOOT_TRACER 4426 register_tracer(&boot_tracer); 4427 #endif 4428 /* All seems OK, enable tracing */ 4429 tracing_disabled = 0; 4430 4431 atomic_notifier_chain_register(&panic_notifier_list, 4432 &trace_panic_notifier); 4433 4434 register_die_notifier(&trace_die_notifier); 4435 4436 return 0; 4437 4438 out_free_cpumask: 4439 free_cpumask_var(tracing_reader_cpumask); 4440 out_free_tracing_cpumask: 4441 free_cpumask_var(tracing_cpumask); 4442 out_free_buffer_mask: 4443 free_cpumask_var(tracing_buffer_mask); 4444 out: 4445 return ret; 4446 } 4447 4448 __init static int clear_boot_tracer(void) 4449 { 4450 /* 4451 * The default tracer at boot buffer is an init section. 4452 * This function is called in lateinit. If we did not 4453 * find the boot tracer, then clear it out, to prevent 4454 * later registration from accessing the buffer that is 4455 * about to be freed. 4456 */ 4457 if (!default_bootup_tracer) 4458 return 0; 4459 4460 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n", 4461 default_bootup_tracer); 4462 default_bootup_tracer = NULL; 4463 4464 return 0; 4465 } 4466 4467 early_initcall(tracer_alloc_buffers); 4468 fs_initcall(tracer_init_debugfs); 4469 late_initcall(clear_boot_tracer); 4470