1 /* 2 * 3 * Function graph tracer. 4 * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com> 5 * Mostly borrowed from function tracer which 6 * is Copyright (c) Steven Rostedt <srostedt@redhat.com> 7 * 8 */ 9 #include <linux/debugfs.h> 10 #include <linux/uaccess.h> 11 #include <linux/ftrace.h> 12 #include <linux/fs.h> 13 14 #include "trace.h" 15 #include "trace_output.h" 16 17 struct fgraph_data { 18 pid_t last_pid; 19 int depth; 20 }; 21 22 #define TRACE_GRAPH_INDENT 2 23 24 /* Flag options */ 25 #define TRACE_GRAPH_PRINT_OVERRUN 0x1 26 #define TRACE_GRAPH_PRINT_CPU 0x2 27 #define TRACE_GRAPH_PRINT_OVERHEAD 0x4 28 #define TRACE_GRAPH_PRINT_PROC 0x8 29 #define TRACE_GRAPH_PRINT_DURATION 0x10 30 #define TRACE_GRAPH_PRINT_ABS_TIME 0X20 31 32 static struct tracer_opt trace_opts[] = { 33 /* Display overruns? (for self-debug purpose) */ 34 { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) }, 35 /* Display CPU ? */ 36 { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) }, 37 /* Display Overhead ? */ 38 { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) }, 39 /* Display proc name/pid */ 40 { TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) }, 41 /* Display duration of execution */ 42 { TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) }, 43 /* Display absolute time of an entry */ 44 { TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) }, 45 { } /* Empty entry */ 46 }; 47 48 static struct tracer_flags tracer_flags = { 49 /* Don't display overruns and proc by default */ 50 .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD | 51 TRACE_GRAPH_PRINT_DURATION, 52 .opts = trace_opts 53 }; 54 55 static struct trace_array *graph_array; 56 57 58 /* Add a function return address to the trace stack on thread info.*/ 59 int 60 ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth, 61 unsigned long frame_pointer) 62 { 63 unsigned long long calltime; 64 int index; 65 66 if (!current->ret_stack) 67 return -EBUSY; 68 69 /* 70 * We must make sure the ret_stack is tested before we read 71 * anything else. 72 */ 73 smp_rmb(); 74 75 /* The return trace stack is full */ 76 if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) { 77 atomic_inc(¤t->trace_overrun); 78 return -EBUSY; 79 } 80 81 calltime = trace_clock_local(); 82 83 index = ++current->curr_ret_stack; 84 barrier(); 85 current->ret_stack[index].ret = ret; 86 current->ret_stack[index].func = func; 87 current->ret_stack[index].calltime = calltime; 88 current->ret_stack[index].subtime = 0; 89 current->ret_stack[index].fp = frame_pointer; 90 *depth = index; 91 92 return 0; 93 } 94 95 /* Retrieve a function return address to the trace stack on thread info.*/ 96 static void 97 ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret, 98 unsigned long frame_pointer) 99 { 100 int index; 101 102 index = current->curr_ret_stack; 103 104 if (unlikely(index < 0)) { 105 ftrace_graph_stop(); 106 WARN_ON(1); 107 /* Might as well panic, otherwise we have no where to go */ 108 *ret = (unsigned long)panic; 109 return; 110 } 111 112 #ifdef CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST 113 /* 114 * The arch may choose to record the frame pointer used 115 * and check it here to make sure that it is what we expect it 116 * to be. If gcc does not set the place holder of the return 117 * address in the frame pointer, and does a copy instead, then 118 * the function graph trace will fail. This test detects this 119 * case. 120 * 121 * Currently, x86_32 with optimize for size (-Os) makes the latest 122 * gcc do the above. 123 */ 124 if (unlikely(current->ret_stack[index].fp != frame_pointer)) { 125 ftrace_graph_stop(); 126 WARN(1, "Bad frame pointer: expected %lx, received %lx\n" 127 " from func %pF return to %lx\n", 128 current->ret_stack[index].fp, 129 frame_pointer, 130 (void *)current->ret_stack[index].func, 131 current->ret_stack[index].ret); 132 *ret = (unsigned long)panic; 133 return; 134 } 135 #endif 136 137 *ret = current->ret_stack[index].ret; 138 trace->func = current->ret_stack[index].func; 139 trace->calltime = current->ret_stack[index].calltime; 140 trace->overrun = atomic_read(¤t->trace_overrun); 141 trace->depth = index; 142 } 143 144 /* 145 * Send the trace to the ring-buffer. 146 * @return the original return address. 147 */ 148 unsigned long ftrace_return_to_handler(unsigned long frame_pointer) 149 { 150 struct ftrace_graph_ret trace; 151 unsigned long ret; 152 153 ftrace_pop_return_trace(&trace, &ret, frame_pointer); 154 trace.rettime = trace_clock_local(); 155 ftrace_graph_return(&trace); 156 barrier(); 157 current->curr_ret_stack--; 158 159 if (unlikely(!ret)) { 160 ftrace_graph_stop(); 161 WARN_ON(1); 162 /* Might as well panic. What else to do? */ 163 ret = (unsigned long)panic; 164 } 165 166 return ret; 167 } 168 169 static int __trace_graph_entry(struct trace_array *tr, 170 struct ftrace_graph_ent *trace, 171 unsigned long flags, 172 int pc) 173 { 174 struct ftrace_event_call *call = &event_funcgraph_entry; 175 struct ring_buffer_event *event; 176 struct ring_buffer *buffer = tr->buffer; 177 struct ftrace_graph_ent_entry *entry; 178 179 if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) 180 return 0; 181 182 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT, 183 sizeof(*entry), flags, pc); 184 if (!event) 185 return 0; 186 entry = ring_buffer_event_data(event); 187 entry->graph_ent = *trace; 188 if (!filter_current_check_discard(buffer, call, entry, event)) 189 ring_buffer_unlock_commit(buffer, event); 190 191 return 1; 192 } 193 194 int trace_graph_entry(struct ftrace_graph_ent *trace) 195 { 196 struct trace_array *tr = graph_array; 197 struct trace_array_cpu *data; 198 unsigned long flags; 199 long disabled; 200 int ret; 201 int cpu; 202 int pc; 203 204 if (unlikely(!tr)) 205 return 0; 206 207 if (!ftrace_trace_task(current)) 208 return 0; 209 210 if (!ftrace_graph_addr(trace->func)) 211 return 0; 212 213 local_irq_save(flags); 214 cpu = raw_smp_processor_id(); 215 data = tr->data[cpu]; 216 disabled = atomic_inc_return(&data->disabled); 217 if (likely(disabled == 1)) { 218 pc = preempt_count(); 219 ret = __trace_graph_entry(tr, trace, flags, pc); 220 } else { 221 ret = 0; 222 } 223 /* Only do the atomic if it is not already set */ 224 if (!test_tsk_trace_graph(current)) 225 set_tsk_trace_graph(current); 226 227 atomic_dec(&data->disabled); 228 local_irq_restore(flags); 229 230 return ret; 231 } 232 233 static void __trace_graph_return(struct trace_array *tr, 234 struct ftrace_graph_ret *trace, 235 unsigned long flags, 236 int pc) 237 { 238 struct ftrace_event_call *call = &event_funcgraph_exit; 239 struct ring_buffer_event *event; 240 struct ring_buffer *buffer = tr->buffer; 241 struct ftrace_graph_ret_entry *entry; 242 243 if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) 244 return; 245 246 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET, 247 sizeof(*entry), flags, pc); 248 if (!event) 249 return; 250 entry = ring_buffer_event_data(event); 251 entry->ret = *trace; 252 if (!filter_current_check_discard(buffer, call, entry, event)) 253 ring_buffer_unlock_commit(buffer, event); 254 } 255 256 void trace_graph_return(struct ftrace_graph_ret *trace) 257 { 258 struct trace_array *tr = graph_array; 259 struct trace_array_cpu *data; 260 unsigned long flags; 261 long disabled; 262 int cpu; 263 int pc; 264 265 local_irq_save(flags); 266 cpu = raw_smp_processor_id(); 267 data = tr->data[cpu]; 268 disabled = atomic_inc_return(&data->disabled); 269 if (likely(disabled == 1)) { 270 pc = preempt_count(); 271 __trace_graph_return(tr, trace, flags, pc); 272 } 273 if (!trace->depth) 274 clear_tsk_trace_graph(current); 275 atomic_dec(&data->disabled); 276 local_irq_restore(flags); 277 } 278 279 static int graph_trace_init(struct trace_array *tr) 280 { 281 int ret; 282 283 graph_array = tr; 284 ret = register_ftrace_graph(&trace_graph_return, 285 &trace_graph_entry); 286 if (ret) 287 return ret; 288 tracing_start_cmdline_record(); 289 290 return 0; 291 } 292 293 void set_graph_array(struct trace_array *tr) 294 { 295 graph_array = tr; 296 } 297 298 static void graph_trace_reset(struct trace_array *tr) 299 { 300 tracing_stop_cmdline_record(); 301 unregister_ftrace_graph(); 302 } 303 304 static int max_bytes_for_cpu; 305 306 static enum print_line_t 307 print_graph_cpu(struct trace_seq *s, int cpu) 308 { 309 int ret; 310 311 /* 312 * Start with a space character - to make it stand out 313 * to the right a bit when trace output is pasted into 314 * email: 315 */ 316 ret = trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu); 317 if (!ret) 318 return TRACE_TYPE_PARTIAL_LINE; 319 320 return TRACE_TYPE_HANDLED; 321 } 322 323 #define TRACE_GRAPH_PROCINFO_LENGTH 14 324 325 static enum print_line_t 326 print_graph_proc(struct trace_seq *s, pid_t pid) 327 { 328 char comm[TASK_COMM_LEN]; 329 /* sign + log10(MAX_INT) + '\0' */ 330 char pid_str[11]; 331 int spaces = 0; 332 int ret; 333 int len; 334 int i; 335 336 trace_find_cmdline(pid, comm); 337 comm[7] = '\0'; 338 sprintf(pid_str, "%d", pid); 339 340 /* 1 stands for the "-" character */ 341 len = strlen(comm) + strlen(pid_str) + 1; 342 343 if (len < TRACE_GRAPH_PROCINFO_LENGTH) 344 spaces = TRACE_GRAPH_PROCINFO_LENGTH - len; 345 346 /* First spaces to align center */ 347 for (i = 0; i < spaces / 2; i++) { 348 ret = trace_seq_printf(s, " "); 349 if (!ret) 350 return TRACE_TYPE_PARTIAL_LINE; 351 } 352 353 ret = trace_seq_printf(s, "%s-%s", comm, pid_str); 354 if (!ret) 355 return TRACE_TYPE_PARTIAL_LINE; 356 357 /* Last spaces to align center */ 358 for (i = 0; i < spaces - (spaces / 2); i++) { 359 ret = trace_seq_printf(s, " "); 360 if (!ret) 361 return TRACE_TYPE_PARTIAL_LINE; 362 } 363 return TRACE_TYPE_HANDLED; 364 } 365 366 367 /* If the pid changed since the last trace, output this event */ 368 static enum print_line_t 369 verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data) 370 { 371 pid_t prev_pid; 372 pid_t *last_pid; 373 int ret; 374 375 if (!data) 376 return TRACE_TYPE_HANDLED; 377 378 last_pid = &(per_cpu_ptr(data, cpu)->last_pid); 379 380 if (*last_pid == pid) 381 return TRACE_TYPE_HANDLED; 382 383 prev_pid = *last_pid; 384 *last_pid = pid; 385 386 if (prev_pid == -1) 387 return TRACE_TYPE_HANDLED; 388 /* 389 * Context-switch trace line: 390 391 ------------------------------------------ 392 | 1) migration/0--1 => sshd-1755 393 ------------------------------------------ 394 395 */ 396 ret = trace_seq_printf(s, 397 " ------------------------------------------\n"); 398 if (!ret) 399 return TRACE_TYPE_PARTIAL_LINE; 400 401 ret = print_graph_cpu(s, cpu); 402 if (ret == TRACE_TYPE_PARTIAL_LINE) 403 return TRACE_TYPE_PARTIAL_LINE; 404 405 ret = print_graph_proc(s, prev_pid); 406 if (ret == TRACE_TYPE_PARTIAL_LINE) 407 return TRACE_TYPE_PARTIAL_LINE; 408 409 ret = trace_seq_printf(s, " => "); 410 if (!ret) 411 return TRACE_TYPE_PARTIAL_LINE; 412 413 ret = print_graph_proc(s, pid); 414 if (ret == TRACE_TYPE_PARTIAL_LINE) 415 return TRACE_TYPE_PARTIAL_LINE; 416 417 ret = trace_seq_printf(s, 418 "\n ------------------------------------------\n\n"); 419 if (!ret) 420 return TRACE_TYPE_PARTIAL_LINE; 421 422 return TRACE_TYPE_HANDLED; 423 } 424 425 static struct ftrace_graph_ret_entry * 426 get_return_for_leaf(struct trace_iterator *iter, 427 struct ftrace_graph_ent_entry *curr) 428 { 429 struct ring_buffer_iter *ring_iter; 430 struct ring_buffer_event *event; 431 struct ftrace_graph_ret_entry *next; 432 433 ring_iter = iter->buffer_iter[iter->cpu]; 434 435 /* First peek to compare current entry and the next one */ 436 if (ring_iter) 437 event = ring_buffer_iter_peek(ring_iter, NULL); 438 else { 439 /* We need to consume the current entry to see the next one */ 440 ring_buffer_consume(iter->tr->buffer, iter->cpu, NULL); 441 event = ring_buffer_peek(iter->tr->buffer, iter->cpu, 442 NULL); 443 } 444 445 if (!event) 446 return NULL; 447 448 next = ring_buffer_event_data(event); 449 450 if (next->ent.type != TRACE_GRAPH_RET) 451 return NULL; 452 453 if (curr->ent.pid != next->ent.pid || 454 curr->graph_ent.func != next->ret.func) 455 return NULL; 456 457 /* this is a leaf, now advance the iterator */ 458 if (ring_iter) 459 ring_buffer_read(ring_iter, NULL); 460 461 return next; 462 } 463 464 /* Signal a overhead of time execution to the output */ 465 static int 466 print_graph_overhead(unsigned long long duration, struct trace_seq *s) 467 { 468 /* If duration disappear, we don't need anything */ 469 if (!(tracer_flags.val & TRACE_GRAPH_PRINT_DURATION)) 470 return 1; 471 472 /* Non nested entry or return */ 473 if (duration == -1) 474 return trace_seq_printf(s, " "); 475 476 if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) { 477 /* Duration exceeded 100 msecs */ 478 if (duration > 100000ULL) 479 return trace_seq_printf(s, "! "); 480 481 /* Duration exceeded 10 msecs */ 482 if (duration > 10000ULL) 483 return trace_seq_printf(s, "+ "); 484 } 485 486 return trace_seq_printf(s, " "); 487 } 488 489 static int print_graph_abs_time(u64 t, struct trace_seq *s) 490 { 491 unsigned long usecs_rem; 492 493 usecs_rem = do_div(t, NSEC_PER_SEC); 494 usecs_rem /= 1000; 495 496 return trace_seq_printf(s, "%5lu.%06lu | ", 497 (unsigned long)t, usecs_rem); 498 } 499 500 static enum print_line_t 501 print_graph_irq(struct trace_iterator *iter, unsigned long addr, 502 enum trace_type type, int cpu, pid_t pid) 503 { 504 int ret; 505 struct trace_seq *s = &iter->seq; 506 507 if (addr < (unsigned long)__irqentry_text_start || 508 addr >= (unsigned long)__irqentry_text_end) 509 return TRACE_TYPE_UNHANDLED; 510 511 /* Absolute time */ 512 if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) { 513 ret = print_graph_abs_time(iter->ts, s); 514 if (!ret) 515 return TRACE_TYPE_PARTIAL_LINE; 516 } 517 518 /* Cpu */ 519 if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) { 520 ret = print_graph_cpu(s, cpu); 521 if (ret == TRACE_TYPE_PARTIAL_LINE) 522 return TRACE_TYPE_PARTIAL_LINE; 523 } 524 /* Proc */ 525 if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) { 526 ret = print_graph_proc(s, pid); 527 if (ret == TRACE_TYPE_PARTIAL_LINE) 528 return TRACE_TYPE_PARTIAL_LINE; 529 ret = trace_seq_printf(s, " | "); 530 if (!ret) 531 return TRACE_TYPE_PARTIAL_LINE; 532 } 533 534 /* No overhead */ 535 ret = print_graph_overhead(-1, s); 536 if (!ret) 537 return TRACE_TYPE_PARTIAL_LINE; 538 539 if (type == TRACE_GRAPH_ENT) 540 ret = trace_seq_printf(s, "==========>"); 541 else 542 ret = trace_seq_printf(s, "<=========="); 543 544 if (!ret) 545 return TRACE_TYPE_PARTIAL_LINE; 546 547 /* Don't close the duration column if haven't one */ 548 if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) 549 trace_seq_printf(s, " |"); 550 ret = trace_seq_printf(s, "\n"); 551 552 if (!ret) 553 return TRACE_TYPE_PARTIAL_LINE; 554 return TRACE_TYPE_HANDLED; 555 } 556 557 enum print_line_t 558 trace_print_graph_duration(unsigned long long duration, struct trace_seq *s) 559 { 560 unsigned long nsecs_rem = do_div(duration, 1000); 561 /* log10(ULONG_MAX) + '\0' */ 562 char msecs_str[21]; 563 char nsecs_str[5]; 564 int ret, len; 565 int i; 566 567 sprintf(msecs_str, "%lu", (unsigned long) duration); 568 569 /* Print msecs */ 570 ret = trace_seq_printf(s, "%s", msecs_str); 571 if (!ret) 572 return TRACE_TYPE_PARTIAL_LINE; 573 574 len = strlen(msecs_str); 575 576 /* Print nsecs (we don't want to exceed 7 numbers) */ 577 if (len < 7) { 578 snprintf(nsecs_str, 8 - len, "%03lu", nsecs_rem); 579 ret = trace_seq_printf(s, ".%s", nsecs_str); 580 if (!ret) 581 return TRACE_TYPE_PARTIAL_LINE; 582 len += strlen(nsecs_str); 583 } 584 585 ret = trace_seq_printf(s, " us "); 586 if (!ret) 587 return TRACE_TYPE_PARTIAL_LINE; 588 589 /* Print remaining spaces to fit the row's width */ 590 for (i = len; i < 7; i++) { 591 ret = trace_seq_printf(s, " "); 592 if (!ret) 593 return TRACE_TYPE_PARTIAL_LINE; 594 } 595 return TRACE_TYPE_HANDLED; 596 } 597 598 static enum print_line_t 599 print_graph_duration(unsigned long long duration, struct trace_seq *s) 600 { 601 int ret; 602 603 ret = trace_print_graph_duration(duration, s); 604 if (ret != TRACE_TYPE_HANDLED) 605 return ret; 606 607 ret = trace_seq_printf(s, "| "); 608 if (!ret) 609 return TRACE_TYPE_PARTIAL_LINE; 610 611 return TRACE_TYPE_HANDLED; 612 } 613 614 /* Case of a leaf function on its call entry */ 615 static enum print_line_t 616 print_graph_entry_leaf(struct trace_iterator *iter, 617 struct ftrace_graph_ent_entry *entry, 618 struct ftrace_graph_ret_entry *ret_entry, struct trace_seq *s) 619 { 620 struct fgraph_data *data = iter->private; 621 struct ftrace_graph_ret *graph_ret; 622 struct ftrace_graph_ent *call; 623 unsigned long long duration; 624 int ret; 625 int i; 626 627 graph_ret = &ret_entry->ret; 628 call = &entry->graph_ent; 629 duration = graph_ret->rettime - graph_ret->calltime; 630 631 if (data) { 632 int cpu = iter->cpu; 633 int *depth = &(per_cpu_ptr(data, cpu)->depth); 634 635 /* 636 * Comments display at + 1 to depth. Since 637 * this is a leaf function, keep the comments 638 * equal to this depth. 639 */ 640 *depth = call->depth - 1; 641 } 642 643 /* Overhead */ 644 ret = print_graph_overhead(duration, s); 645 if (!ret) 646 return TRACE_TYPE_PARTIAL_LINE; 647 648 /* Duration */ 649 if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) { 650 ret = print_graph_duration(duration, s); 651 if (ret == TRACE_TYPE_PARTIAL_LINE) 652 return TRACE_TYPE_PARTIAL_LINE; 653 } 654 655 /* Function */ 656 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) { 657 ret = trace_seq_printf(s, " "); 658 if (!ret) 659 return TRACE_TYPE_PARTIAL_LINE; 660 } 661 662 ret = trace_seq_printf(s, "%pf();\n", (void *)call->func); 663 if (!ret) 664 return TRACE_TYPE_PARTIAL_LINE; 665 666 return TRACE_TYPE_HANDLED; 667 } 668 669 static enum print_line_t 670 print_graph_entry_nested(struct trace_iterator *iter, 671 struct ftrace_graph_ent_entry *entry, 672 struct trace_seq *s, int cpu) 673 { 674 struct ftrace_graph_ent *call = &entry->graph_ent; 675 struct fgraph_data *data = iter->private; 676 int ret; 677 int i; 678 679 if (data) { 680 int cpu = iter->cpu; 681 int *depth = &(per_cpu_ptr(data, cpu)->depth); 682 683 *depth = call->depth; 684 } 685 686 /* No overhead */ 687 ret = print_graph_overhead(-1, s); 688 if (!ret) 689 return TRACE_TYPE_PARTIAL_LINE; 690 691 /* No time */ 692 if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) { 693 ret = trace_seq_printf(s, " | "); 694 if (!ret) 695 return TRACE_TYPE_PARTIAL_LINE; 696 } 697 698 /* Function */ 699 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) { 700 ret = trace_seq_printf(s, " "); 701 if (!ret) 702 return TRACE_TYPE_PARTIAL_LINE; 703 } 704 705 ret = trace_seq_printf(s, "%pf() {\n", (void *)call->func); 706 if (!ret) 707 return TRACE_TYPE_PARTIAL_LINE; 708 709 /* 710 * we already consumed the current entry to check the next one 711 * and see if this is a leaf. 712 */ 713 return TRACE_TYPE_NO_CONSUME; 714 } 715 716 static enum print_line_t 717 print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s, 718 int type, unsigned long addr) 719 { 720 struct fgraph_data *data = iter->private; 721 struct trace_entry *ent = iter->ent; 722 int cpu = iter->cpu; 723 int ret; 724 725 /* Pid */ 726 if (verif_pid(s, ent->pid, cpu, data) == TRACE_TYPE_PARTIAL_LINE) 727 return TRACE_TYPE_PARTIAL_LINE; 728 729 if (type) { 730 /* Interrupt */ 731 ret = print_graph_irq(iter, addr, type, cpu, ent->pid); 732 if (ret == TRACE_TYPE_PARTIAL_LINE) 733 return TRACE_TYPE_PARTIAL_LINE; 734 } 735 736 /* Absolute time */ 737 if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) { 738 ret = print_graph_abs_time(iter->ts, s); 739 if (!ret) 740 return TRACE_TYPE_PARTIAL_LINE; 741 } 742 743 /* Cpu */ 744 if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) { 745 ret = print_graph_cpu(s, cpu); 746 if (ret == TRACE_TYPE_PARTIAL_LINE) 747 return TRACE_TYPE_PARTIAL_LINE; 748 } 749 750 /* Proc */ 751 if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) { 752 ret = print_graph_proc(s, ent->pid); 753 if (ret == TRACE_TYPE_PARTIAL_LINE) 754 return TRACE_TYPE_PARTIAL_LINE; 755 756 ret = trace_seq_printf(s, " | "); 757 if (!ret) 758 return TRACE_TYPE_PARTIAL_LINE; 759 } 760 761 return 0; 762 } 763 764 static enum print_line_t 765 print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s, 766 struct trace_iterator *iter) 767 { 768 int cpu = iter->cpu; 769 struct ftrace_graph_ent *call = &field->graph_ent; 770 struct ftrace_graph_ret_entry *leaf_ret; 771 772 if (print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func)) 773 return TRACE_TYPE_PARTIAL_LINE; 774 775 leaf_ret = get_return_for_leaf(iter, field); 776 if (leaf_ret) 777 return print_graph_entry_leaf(iter, field, leaf_ret, s); 778 else 779 return print_graph_entry_nested(iter, field, s, cpu); 780 781 } 782 783 static enum print_line_t 784 print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, 785 struct trace_entry *ent, struct trace_iterator *iter) 786 { 787 unsigned long long duration = trace->rettime - trace->calltime; 788 struct fgraph_data *data = iter->private; 789 pid_t pid = ent->pid; 790 int cpu = iter->cpu; 791 int ret; 792 int i; 793 794 if (data) { 795 int cpu = iter->cpu; 796 int *depth = &(per_cpu_ptr(data, cpu)->depth); 797 798 /* 799 * Comments display at + 1 to depth. This is the 800 * return from a function, we now want the comments 801 * to display at the same level of the bracket. 802 */ 803 *depth = trace->depth - 1; 804 } 805 806 if (print_graph_prologue(iter, s, 0, 0)) 807 return TRACE_TYPE_PARTIAL_LINE; 808 809 /* Overhead */ 810 ret = print_graph_overhead(duration, s); 811 if (!ret) 812 return TRACE_TYPE_PARTIAL_LINE; 813 814 /* Duration */ 815 if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) { 816 ret = print_graph_duration(duration, s); 817 if (ret == TRACE_TYPE_PARTIAL_LINE) 818 return TRACE_TYPE_PARTIAL_LINE; 819 } 820 821 /* Closing brace */ 822 for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) { 823 ret = trace_seq_printf(s, " "); 824 if (!ret) 825 return TRACE_TYPE_PARTIAL_LINE; 826 } 827 828 ret = trace_seq_printf(s, "}\n"); 829 if (!ret) 830 return TRACE_TYPE_PARTIAL_LINE; 831 832 /* Overrun */ 833 if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERRUN) { 834 ret = trace_seq_printf(s, " (Overruns: %lu)\n", 835 trace->overrun); 836 if (!ret) 837 return TRACE_TYPE_PARTIAL_LINE; 838 } 839 840 ret = print_graph_irq(iter, trace->func, TRACE_GRAPH_RET, cpu, pid); 841 if (ret == TRACE_TYPE_PARTIAL_LINE) 842 return TRACE_TYPE_PARTIAL_LINE; 843 844 return TRACE_TYPE_HANDLED; 845 } 846 847 static enum print_line_t 848 print_graph_comment(struct trace_seq *s, struct trace_entry *ent, 849 struct trace_iterator *iter) 850 { 851 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); 852 struct fgraph_data *data = iter->private; 853 struct trace_event *event; 854 int depth = 0; 855 int ret; 856 int i; 857 858 if (data) 859 depth = per_cpu_ptr(data, iter->cpu)->depth; 860 861 if (print_graph_prologue(iter, s, 0, 0)) 862 return TRACE_TYPE_PARTIAL_LINE; 863 864 /* No overhead */ 865 ret = print_graph_overhead(-1, s); 866 if (!ret) 867 return TRACE_TYPE_PARTIAL_LINE; 868 869 /* No time */ 870 if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) { 871 ret = trace_seq_printf(s, " | "); 872 if (!ret) 873 return TRACE_TYPE_PARTIAL_LINE; 874 } 875 876 /* Indentation */ 877 if (depth > 0) 878 for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++) { 879 ret = trace_seq_printf(s, " "); 880 if (!ret) 881 return TRACE_TYPE_PARTIAL_LINE; 882 } 883 884 /* The comment */ 885 ret = trace_seq_printf(s, "/* "); 886 if (!ret) 887 return TRACE_TYPE_PARTIAL_LINE; 888 889 switch (iter->ent->type) { 890 case TRACE_BPRINT: 891 ret = trace_print_bprintk_msg_only(iter); 892 if (ret != TRACE_TYPE_HANDLED) 893 return ret; 894 break; 895 case TRACE_PRINT: 896 ret = trace_print_printk_msg_only(iter); 897 if (ret != TRACE_TYPE_HANDLED) 898 return ret; 899 break; 900 default: 901 event = ftrace_find_event(ent->type); 902 if (!event) 903 return TRACE_TYPE_UNHANDLED; 904 905 ret = event->trace(iter, sym_flags); 906 if (ret != TRACE_TYPE_HANDLED) 907 return ret; 908 } 909 910 /* Strip ending newline */ 911 if (s->buffer[s->len - 1] == '\n') { 912 s->buffer[s->len - 1] = '\0'; 913 s->len--; 914 } 915 916 ret = trace_seq_printf(s, " */\n"); 917 if (!ret) 918 return TRACE_TYPE_PARTIAL_LINE; 919 920 return TRACE_TYPE_HANDLED; 921 } 922 923 924 enum print_line_t 925 print_graph_function(struct trace_iterator *iter) 926 { 927 struct trace_entry *entry = iter->ent; 928 struct trace_seq *s = &iter->seq; 929 930 switch (entry->type) { 931 case TRACE_GRAPH_ENT: { 932 /* 933 * print_graph_entry() may consume the current event, 934 * thus @field may become invalid, so we need to save it. 935 * sizeof(struct ftrace_graph_ent_entry) is very small, 936 * it can be safely saved at the stack. 937 */ 938 struct ftrace_graph_ent_entry *field, saved; 939 trace_assign_type(field, entry); 940 saved = *field; 941 return print_graph_entry(&saved, s, iter); 942 } 943 case TRACE_GRAPH_RET: { 944 struct ftrace_graph_ret_entry *field; 945 trace_assign_type(field, entry); 946 return print_graph_return(&field->ret, s, entry, iter); 947 } 948 default: 949 return print_graph_comment(s, entry, iter); 950 } 951 952 return TRACE_TYPE_HANDLED; 953 } 954 955 static void print_graph_headers(struct seq_file *s) 956 { 957 /* 1st line */ 958 seq_printf(s, "# "); 959 if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) 960 seq_printf(s, " TIME "); 961 if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) 962 seq_printf(s, "CPU"); 963 if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) 964 seq_printf(s, " TASK/PID "); 965 if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) 966 seq_printf(s, " DURATION "); 967 seq_printf(s, " FUNCTION CALLS\n"); 968 969 /* 2nd line */ 970 seq_printf(s, "# "); 971 if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) 972 seq_printf(s, " | "); 973 if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) 974 seq_printf(s, "| "); 975 if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) 976 seq_printf(s, " | | "); 977 if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) 978 seq_printf(s, " | | "); 979 seq_printf(s, " | | | |\n"); 980 } 981 982 static void graph_trace_open(struct trace_iterator *iter) 983 { 984 /* pid and depth on the last trace processed */ 985 struct fgraph_data *data = alloc_percpu(struct fgraph_data); 986 int cpu; 987 988 if (!data) 989 pr_warning("function graph tracer: not enough memory\n"); 990 else 991 for_each_possible_cpu(cpu) { 992 pid_t *pid = &(per_cpu_ptr(data, cpu)->last_pid); 993 int *depth = &(per_cpu_ptr(data, cpu)->depth); 994 *pid = -1; 995 *depth = 0; 996 } 997 998 iter->private = data; 999 } 1000 1001 static void graph_trace_close(struct trace_iterator *iter) 1002 { 1003 free_percpu(iter->private); 1004 } 1005 1006 static struct tracer graph_trace __read_mostly = { 1007 .name = "function_graph", 1008 .open = graph_trace_open, 1009 .close = graph_trace_close, 1010 .wait_pipe = poll_wait_pipe, 1011 .init = graph_trace_init, 1012 .reset = graph_trace_reset, 1013 .print_line = print_graph_function, 1014 .print_header = print_graph_headers, 1015 .flags = &tracer_flags, 1016 #ifdef CONFIG_FTRACE_SELFTEST 1017 .selftest = trace_selftest_startup_function_graph, 1018 #endif 1019 }; 1020 1021 static __init int init_graph_trace(void) 1022 { 1023 max_bytes_for_cpu = snprintf(NULL, 0, "%d", nr_cpu_ids - 1); 1024 1025 return register_tracer(&graph_trace); 1026 } 1027 1028 device_initcall(init_graph_trace); 1029