1 /* 2 * 3 * Function graph tracer. 4 * Copyright (c) 2008 Frederic Weisbecker <fweisbec@gmail.com> 5 * Mostly borrowed from function tracer which 6 * is Copyright (c) Steven Rostedt <srostedt@redhat.com> 7 * 8 */ 9 #include <linux/debugfs.h> 10 #include <linux/uaccess.h> 11 #include <linux/ftrace.h> 12 #include <linux/fs.h> 13 14 #include "trace.h" 15 16 #define TRACE_GRAPH_INDENT 2 17 18 /* Flag options */ 19 #define TRACE_GRAPH_PRINT_OVERRUN 0x1 20 #define TRACE_GRAPH_PRINT_CPU 0x2 21 #define TRACE_GRAPH_PRINT_OVERHEAD 0x4 22 #define TRACE_GRAPH_PRINT_PROC 0x8 23 24 static struct tracer_opt trace_opts[] = { 25 /* Display overruns ? */ 26 { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) }, 27 /* Display CPU ? */ 28 { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) }, 29 /* Display Overhead ? */ 30 { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) }, 31 /* Display proc name/pid */ 32 { TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) }, 33 { } /* Empty entry */ 34 }; 35 36 static struct tracer_flags tracer_flags = { 37 /* Don't display overruns and proc by default */ 38 .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD, 39 .opts = trace_opts 40 }; 41 42 /* pid on the last trace processed */ 43 static pid_t last_pid[NR_CPUS] = { [0 ... NR_CPUS-1] = -1 }; 44 45 static int graph_trace_init(struct trace_array *tr) 46 { 47 int cpu, ret; 48 49 for_each_online_cpu(cpu) 50 tracing_reset(tr, cpu); 51 52 ret = register_ftrace_graph(&trace_graph_return, 53 &trace_graph_entry); 54 if (ret) 55 return ret; 56 tracing_start_cmdline_record(); 57 58 return 0; 59 } 60 61 static void graph_trace_reset(struct trace_array *tr) 62 { 63 tracing_stop_cmdline_record(); 64 unregister_ftrace_graph(); 65 } 66 67 static inline int log10_cpu(int nb) 68 { 69 if (nb / 100) 70 return 3; 71 if (nb / 10) 72 return 2; 73 return 1; 74 } 75 76 static enum print_line_t 77 print_graph_cpu(struct trace_seq *s, int cpu) 78 { 79 int i; 80 int ret; 81 int log10_this = log10_cpu(cpu); 82 int log10_all = log10_cpu(cpumask_weight(cpu_online_mask)); 83 84 85 /* 86 * Start with a space character - to make it stand out 87 * to the right a bit when trace output is pasted into 88 * email: 89 */ 90 ret = trace_seq_printf(s, " "); 91 92 /* 93 * Tricky - we space the CPU field according to the max 94 * number of online CPUs. On a 2-cpu system it would take 95 * a maximum of 1 digit - on a 128 cpu system it would 96 * take up to 3 digits: 97 */ 98 for (i = 0; i < log10_all - log10_this; i++) { 99 ret = trace_seq_printf(s, " "); 100 if (!ret) 101 return TRACE_TYPE_PARTIAL_LINE; 102 } 103 ret = trace_seq_printf(s, "%d) ", cpu); 104 if (!ret) 105 return TRACE_TYPE_PARTIAL_LINE; 106 107 return TRACE_TYPE_HANDLED; 108 } 109 110 #define TRACE_GRAPH_PROCINFO_LENGTH 14 111 112 static enum print_line_t 113 print_graph_proc(struct trace_seq *s, pid_t pid) 114 { 115 int i; 116 int ret; 117 int len; 118 char comm[8]; 119 int spaces = 0; 120 /* sign + log10(MAX_INT) + '\0' */ 121 char pid_str[11]; 122 123 strncpy(comm, trace_find_cmdline(pid), 7); 124 comm[7] = '\0'; 125 sprintf(pid_str, "%d", pid); 126 127 /* 1 stands for the "-" character */ 128 len = strlen(comm) + strlen(pid_str) + 1; 129 130 if (len < TRACE_GRAPH_PROCINFO_LENGTH) 131 spaces = TRACE_GRAPH_PROCINFO_LENGTH - len; 132 133 /* First spaces to align center */ 134 for (i = 0; i < spaces / 2; i++) { 135 ret = trace_seq_printf(s, " "); 136 if (!ret) 137 return TRACE_TYPE_PARTIAL_LINE; 138 } 139 140 ret = trace_seq_printf(s, "%s-%s", comm, pid_str); 141 if (!ret) 142 return TRACE_TYPE_PARTIAL_LINE; 143 144 /* Last spaces to align center */ 145 for (i = 0; i < spaces - (spaces / 2); i++) { 146 ret = trace_seq_printf(s, " "); 147 if (!ret) 148 return TRACE_TYPE_PARTIAL_LINE; 149 } 150 return TRACE_TYPE_HANDLED; 151 } 152 153 154 /* If the pid changed since the last trace, output this event */ 155 static enum print_line_t 156 verif_pid(struct trace_seq *s, pid_t pid, int cpu) 157 { 158 pid_t prev_pid; 159 int ret; 160 161 if (last_pid[cpu] != -1 && last_pid[cpu] == pid) 162 return TRACE_TYPE_HANDLED; 163 164 prev_pid = last_pid[cpu]; 165 last_pid[cpu] = pid; 166 167 /* 168 * Context-switch trace line: 169 170 ------------------------------------------ 171 | 1) migration/0--1 => sshd-1755 172 ------------------------------------------ 173 174 */ 175 ret = trace_seq_printf(s, 176 " ------------------------------------------\n"); 177 if (!ret) 178 TRACE_TYPE_PARTIAL_LINE; 179 180 ret = print_graph_cpu(s, cpu); 181 if (ret == TRACE_TYPE_PARTIAL_LINE) 182 TRACE_TYPE_PARTIAL_LINE; 183 184 ret = print_graph_proc(s, prev_pid); 185 if (ret == TRACE_TYPE_PARTIAL_LINE) 186 TRACE_TYPE_PARTIAL_LINE; 187 188 ret = trace_seq_printf(s, " => "); 189 if (!ret) 190 TRACE_TYPE_PARTIAL_LINE; 191 192 ret = print_graph_proc(s, pid); 193 if (ret == TRACE_TYPE_PARTIAL_LINE) 194 TRACE_TYPE_PARTIAL_LINE; 195 196 ret = trace_seq_printf(s, 197 "\n ------------------------------------------\n\n"); 198 if (!ret) 199 TRACE_TYPE_PARTIAL_LINE; 200 201 return ret; 202 } 203 204 static bool 205 trace_branch_is_leaf(struct trace_iterator *iter, 206 struct ftrace_graph_ent_entry *curr) 207 { 208 struct ring_buffer_iter *ring_iter; 209 struct ring_buffer_event *event; 210 struct ftrace_graph_ret_entry *next; 211 212 ring_iter = iter->buffer_iter[iter->cpu]; 213 214 if (!ring_iter) 215 return false; 216 217 event = ring_buffer_iter_peek(ring_iter, NULL); 218 219 if (!event) 220 return false; 221 222 next = ring_buffer_event_data(event); 223 224 if (next->ent.type != TRACE_GRAPH_RET) 225 return false; 226 227 if (curr->ent.pid != next->ent.pid || 228 curr->graph_ent.func != next->ret.func) 229 return false; 230 231 return true; 232 } 233 234 static enum print_line_t 235 print_graph_irq(struct trace_seq *s, unsigned long addr, 236 enum trace_type type, int cpu, pid_t pid) 237 { 238 int ret; 239 240 if (addr < (unsigned long)__irqentry_text_start || 241 addr >= (unsigned long)__irqentry_text_end) 242 return TRACE_TYPE_UNHANDLED; 243 244 if (type == TRACE_GRAPH_ENT) { 245 ret = trace_seq_printf(s, "==========> | "); 246 } else { 247 /* Cpu */ 248 if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) { 249 ret = print_graph_cpu(s, cpu); 250 if (ret == TRACE_TYPE_PARTIAL_LINE) 251 return TRACE_TYPE_PARTIAL_LINE; 252 } 253 /* Proc */ 254 if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) { 255 ret = print_graph_proc(s, pid); 256 if (ret == TRACE_TYPE_PARTIAL_LINE) 257 return TRACE_TYPE_PARTIAL_LINE; 258 259 ret = trace_seq_printf(s, " | "); 260 if (!ret) 261 return TRACE_TYPE_PARTIAL_LINE; 262 } 263 264 /* No overhead */ 265 if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) { 266 ret = trace_seq_printf(s, " "); 267 if (!ret) 268 return TRACE_TYPE_PARTIAL_LINE; 269 } 270 271 ret = trace_seq_printf(s, "<========== |\n"); 272 } 273 if (!ret) 274 return TRACE_TYPE_PARTIAL_LINE; 275 return TRACE_TYPE_HANDLED; 276 } 277 278 static enum print_line_t 279 print_graph_duration(unsigned long long duration, struct trace_seq *s) 280 { 281 unsigned long nsecs_rem = do_div(duration, 1000); 282 /* log10(ULONG_MAX) + '\0' */ 283 char msecs_str[21]; 284 char nsecs_str[5]; 285 int ret, len; 286 int i; 287 288 sprintf(msecs_str, "%lu", (unsigned long) duration); 289 290 /* Print msecs */ 291 ret = trace_seq_printf(s, msecs_str); 292 if (!ret) 293 return TRACE_TYPE_PARTIAL_LINE; 294 295 len = strlen(msecs_str); 296 297 /* Print nsecs (we don't want to exceed 7 numbers) */ 298 if (len < 7) { 299 snprintf(nsecs_str, 8 - len, "%03lu", nsecs_rem); 300 ret = trace_seq_printf(s, ".%s", nsecs_str); 301 if (!ret) 302 return TRACE_TYPE_PARTIAL_LINE; 303 len += strlen(nsecs_str); 304 } 305 306 ret = trace_seq_printf(s, " us "); 307 if (!ret) 308 return TRACE_TYPE_PARTIAL_LINE; 309 310 /* Print remaining spaces to fit the row's width */ 311 for (i = len; i < 7; i++) { 312 ret = trace_seq_printf(s, " "); 313 if (!ret) 314 return TRACE_TYPE_PARTIAL_LINE; 315 } 316 317 ret = trace_seq_printf(s, "| "); 318 if (!ret) 319 return TRACE_TYPE_PARTIAL_LINE; 320 return TRACE_TYPE_HANDLED; 321 322 } 323 324 /* Signal a overhead of time execution to the output */ 325 static int 326 print_graph_overhead(unsigned long long duration, struct trace_seq *s) 327 { 328 /* Duration exceeded 100 msecs */ 329 if (duration > 100000ULL) 330 return trace_seq_printf(s, "! "); 331 332 /* Duration exceeded 10 msecs */ 333 if (duration > 10000ULL) 334 return trace_seq_printf(s, "+ "); 335 336 return trace_seq_printf(s, " "); 337 } 338 339 /* Case of a leaf function on its call entry */ 340 static enum print_line_t 341 print_graph_entry_leaf(struct trace_iterator *iter, 342 struct ftrace_graph_ent_entry *entry, struct trace_seq *s) 343 { 344 struct ftrace_graph_ret_entry *ret_entry; 345 struct ftrace_graph_ret *graph_ret; 346 struct ring_buffer_event *event; 347 struct ftrace_graph_ent *call; 348 unsigned long long duration; 349 int ret; 350 int i; 351 352 event = ring_buffer_read(iter->buffer_iter[iter->cpu], NULL); 353 ret_entry = ring_buffer_event_data(event); 354 graph_ret = &ret_entry->ret; 355 call = &entry->graph_ent; 356 duration = graph_ret->rettime - graph_ret->calltime; 357 358 /* Overhead */ 359 if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) { 360 ret = print_graph_overhead(duration, s); 361 if (!ret) 362 return TRACE_TYPE_PARTIAL_LINE; 363 } 364 365 /* Duration */ 366 ret = print_graph_duration(duration, s); 367 if (ret == TRACE_TYPE_PARTIAL_LINE) 368 return TRACE_TYPE_PARTIAL_LINE; 369 370 /* Function */ 371 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) { 372 ret = trace_seq_printf(s, " "); 373 if (!ret) 374 return TRACE_TYPE_PARTIAL_LINE; 375 } 376 377 ret = seq_print_ip_sym(s, call->func, 0); 378 if (!ret) 379 return TRACE_TYPE_PARTIAL_LINE; 380 381 ret = trace_seq_printf(s, "();\n"); 382 if (!ret) 383 return TRACE_TYPE_PARTIAL_LINE; 384 385 return TRACE_TYPE_HANDLED; 386 } 387 388 static enum print_line_t 389 print_graph_entry_nested(struct ftrace_graph_ent_entry *entry, 390 struct trace_seq *s, pid_t pid, int cpu) 391 { 392 int i; 393 int ret; 394 struct ftrace_graph_ent *call = &entry->graph_ent; 395 396 /* No overhead */ 397 if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) { 398 ret = trace_seq_printf(s, " "); 399 if (!ret) 400 return TRACE_TYPE_PARTIAL_LINE; 401 } 402 403 /* Interrupt */ 404 ret = print_graph_irq(s, call->func, TRACE_GRAPH_ENT, cpu, pid); 405 if (ret == TRACE_TYPE_UNHANDLED) { 406 /* No time */ 407 ret = trace_seq_printf(s, " | "); 408 if (!ret) 409 return TRACE_TYPE_PARTIAL_LINE; 410 } else { 411 if (ret == TRACE_TYPE_PARTIAL_LINE) 412 return TRACE_TYPE_PARTIAL_LINE; 413 } 414 415 416 /* Function */ 417 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) { 418 ret = trace_seq_printf(s, " "); 419 if (!ret) 420 return TRACE_TYPE_PARTIAL_LINE; 421 } 422 423 ret = seq_print_ip_sym(s, call->func, 0); 424 if (!ret) 425 return TRACE_TYPE_PARTIAL_LINE; 426 427 ret = trace_seq_printf(s, "() {\n"); 428 if (!ret) 429 return TRACE_TYPE_PARTIAL_LINE; 430 431 return TRACE_TYPE_HANDLED; 432 } 433 434 static enum print_line_t 435 print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s, 436 struct trace_iterator *iter, int cpu) 437 { 438 int ret; 439 struct trace_entry *ent = iter->ent; 440 441 /* Pid */ 442 if (verif_pid(s, ent->pid, cpu) == TRACE_TYPE_PARTIAL_LINE) 443 return TRACE_TYPE_PARTIAL_LINE; 444 445 /* Cpu */ 446 if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) { 447 ret = print_graph_cpu(s, cpu); 448 if (ret == TRACE_TYPE_PARTIAL_LINE) 449 return TRACE_TYPE_PARTIAL_LINE; 450 } 451 452 /* Proc */ 453 if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) { 454 ret = print_graph_proc(s, ent->pid); 455 if (ret == TRACE_TYPE_PARTIAL_LINE) 456 return TRACE_TYPE_PARTIAL_LINE; 457 458 ret = trace_seq_printf(s, " | "); 459 if (!ret) 460 return TRACE_TYPE_PARTIAL_LINE; 461 } 462 463 if (trace_branch_is_leaf(iter, field)) 464 return print_graph_entry_leaf(iter, field, s); 465 else 466 return print_graph_entry_nested(field, s, iter->ent->pid, cpu); 467 468 } 469 470 static enum print_line_t 471 print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, 472 struct trace_entry *ent, int cpu) 473 { 474 int i; 475 int ret; 476 unsigned long long duration = trace->rettime - trace->calltime; 477 478 /* Pid */ 479 if (verif_pid(s, ent->pid, cpu) == TRACE_TYPE_PARTIAL_LINE) 480 return TRACE_TYPE_PARTIAL_LINE; 481 482 /* Cpu */ 483 if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) { 484 ret = print_graph_cpu(s, cpu); 485 if (ret == TRACE_TYPE_PARTIAL_LINE) 486 return TRACE_TYPE_PARTIAL_LINE; 487 } 488 489 /* Proc */ 490 if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) { 491 ret = print_graph_proc(s, ent->pid); 492 if (ret == TRACE_TYPE_PARTIAL_LINE) 493 return TRACE_TYPE_PARTIAL_LINE; 494 495 ret = trace_seq_printf(s, " | "); 496 if (!ret) 497 return TRACE_TYPE_PARTIAL_LINE; 498 } 499 500 /* Overhead */ 501 if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) { 502 ret = print_graph_overhead(duration, s); 503 if (!ret) 504 return TRACE_TYPE_PARTIAL_LINE; 505 } 506 507 /* Duration */ 508 ret = print_graph_duration(duration, s); 509 if (ret == TRACE_TYPE_PARTIAL_LINE) 510 return TRACE_TYPE_PARTIAL_LINE; 511 512 /* Closing brace */ 513 for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) { 514 ret = trace_seq_printf(s, " "); 515 if (!ret) 516 return TRACE_TYPE_PARTIAL_LINE; 517 } 518 519 ret = trace_seq_printf(s, "}\n"); 520 if (!ret) 521 return TRACE_TYPE_PARTIAL_LINE; 522 523 /* Overrun */ 524 if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERRUN) { 525 ret = trace_seq_printf(s, " (Overruns: %lu)\n", 526 trace->overrun); 527 if (!ret) 528 return TRACE_TYPE_PARTIAL_LINE; 529 } 530 531 ret = print_graph_irq(s, trace->func, TRACE_GRAPH_RET, cpu, ent->pid); 532 if (ret == TRACE_TYPE_PARTIAL_LINE) 533 return TRACE_TYPE_PARTIAL_LINE; 534 535 return TRACE_TYPE_HANDLED; 536 } 537 538 static enum print_line_t 539 print_graph_comment(struct print_entry *trace, struct trace_seq *s, 540 struct trace_entry *ent, struct trace_iterator *iter) 541 { 542 int i; 543 int ret; 544 545 /* Pid */ 546 if (verif_pid(s, ent->pid, iter->cpu) == TRACE_TYPE_PARTIAL_LINE) 547 return TRACE_TYPE_PARTIAL_LINE; 548 549 /* Cpu */ 550 if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) { 551 ret = print_graph_cpu(s, iter->cpu); 552 if (ret == TRACE_TYPE_PARTIAL_LINE) 553 return TRACE_TYPE_PARTIAL_LINE; 554 } 555 556 /* Proc */ 557 if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) { 558 ret = print_graph_proc(s, ent->pid); 559 if (ret == TRACE_TYPE_PARTIAL_LINE) 560 return TRACE_TYPE_PARTIAL_LINE; 561 562 ret = trace_seq_printf(s, " | "); 563 if (!ret) 564 return TRACE_TYPE_PARTIAL_LINE; 565 } 566 567 /* No overhead */ 568 if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) { 569 ret = trace_seq_printf(s, " "); 570 if (!ret) 571 return TRACE_TYPE_PARTIAL_LINE; 572 } 573 574 /* No time */ 575 ret = trace_seq_printf(s, " | "); 576 if (!ret) 577 return TRACE_TYPE_PARTIAL_LINE; 578 579 /* Indentation */ 580 if (trace->depth > 0) 581 for (i = 0; i < (trace->depth + 1) * TRACE_GRAPH_INDENT; i++) { 582 ret = trace_seq_printf(s, " "); 583 if (!ret) 584 return TRACE_TYPE_PARTIAL_LINE; 585 } 586 587 /* The comment */ 588 ret = trace_seq_printf(s, "/* %s", trace->buf); 589 if (!ret) 590 return TRACE_TYPE_PARTIAL_LINE; 591 592 if (ent->flags & TRACE_FLAG_CONT) 593 trace_seq_print_cont(s, iter); 594 595 ret = trace_seq_printf(s, " */\n"); 596 if (!ret) 597 return TRACE_TYPE_PARTIAL_LINE; 598 599 return TRACE_TYPE_HANDLED; 600 } 601 602 603 enum print_line_t 604 print_graph_function(struct trace_iterator *iter) 605 { 606 struct trace_seq *s = &iter->seq; 607 struct trace_entry *entry = iter->ent; 608 609 switch (entry->type) { 610 case TRACE_GRAPH_ENT: { 611 struct ftrace_graph_ent_entry *field; 612 trace_assign_type(field, entry); 613 return print_graph_entry(field, s, iter, 614 iter->cpu); 615 } 616 case TRACE_GRAPH_RET: { 617 struct ftrace_graph_ret_entry *field; 618 trace_assign_type(field, entry); 619 return print_graph_return(&field->ret, s, entry, iter->cpu); 620 } 621 case TRACE_PRINT: { 622 struct print_entry *field; 623 trace_assign_type(field, entry); 624 return print_graph_comment(field, s, entry, iter); 625 } 626 default: 627 return TRACE_TYPE_UNHANDLED; 628 } 629 } 630 631 static void print_graph_headers(struct seq_file *s) 632 { 633 /* 1st line */ 634 seq_printf(s, "# "); 635 if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) 636 seq_printf(s, "CPU "); 637 if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) 638 seq_printf(s, "TASK/PID "); 639 if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) 640 seq_printf(s, "OVERHEAD/"); 641 seq_printf(s, "DURATION FUNCTION CALLS\n"); 642 643 /* 2nd line */ 644 seq_printf(s, "# "); 645 if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) 646 seq_printf(s, "| "); 647 if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) 648 seq_printf(s, "| | "); 649 if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) { 650 seq_printf(s, "| "); 651 seq_printf(s, "| | | | |\n"); 652 } else 653 seq_printf(s, " | | | | |\n"); 654 } 655 static struct tracer graph_trace __read_mostly = { 656 .name = "function_graph", 657 .init = graph_trace_init, 658 .reset = graph_trace_reset, 659 .print_line = print_graph_function, 660 .print_header = print_graph_headers, 661 .flags = &tracer_flags, 662 }; 663 664 static __init int init_graph_trace(void) 665 { 666 return register_tracer(&graph_trace); 667 } 668 669 device_initcall(init_graph_trace); 670