1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * 4 * Function graph tracer. 5 * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com> 6 * Mostly borrowed from function tracer which 7 * is Copyright (c) Steven Rostedt <srostedt@redhat.com> 8 * 9 */ 10 #include <linux/uaccess.h> 11 #include <linux/ftrace.h> 12 #include <linux/interrupt.h> 13 #include <linux/slab.h> 14 #include <linux/fs.h> 15 16 #include "trace.h" 17 #include "trace_output.h" 18 19 static bool kill_ftrace_graph; 20 21 /** 22 * ftrace_graph_is_dead - returns true if ftrace_graph_stop() was called 23 * 24 * ftrace_graph_stop() is called when a severe error is detected in 25 * the function graph tracing. This function is called by the critical 26 * paths of function graph to keep those paths from doing any more harm. 27 */ 28 bool ftrace_graph_is_dead(void) 29 { 30 return kill_ftrace_graph; 31 } 32 33 /** 34 * ftrace_graph_stop - set to permanently disable function graph tracincg 35 * 36 * In case of an error int function graph tracing, this is called 37 * to try to keep function graph tracing from causing any more harm. 38 * Usually this is pretty severe and this is called to try to at least 39 * get a warning out to the user. 40 */ 41 void ftrace_graph_stop(void) 42 { 43 kill_ftrace_graph = true; 44 } 45 46 /* When set, irq functions will be ignored */ 47 static int ftrace_graph_skip_irqs; 48 49 struct fgraph_cpu_data { 50 pid_t last_pid; 51 int depth; 52 int depth_irq; 53 int ignore; 54 unsigned long enter_funcs[FTRACE_RETFUNC_DEPTH]; 55 }; 56 57 struct fgraph_data { 58 struct fgraph_cpu_data __percpu *cpu_data; 59 60 /* Place to preserve last processed entry. */ 61 struct ftrace_graph_ent_entry ent; 62 struct ftrace_graph_ret_entry ret; 63 int failed; 64 int cpu; 65 }; 66 67 #define TRACE_GRAPH_INDENT 2 68 69 unsigned int fgraph_max_depth; 70 71 static struct tracer_opt trace_opts[] = { 72 /* Display overruns? (for self-debug purpose) */ 73 { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) }, 74 /* Display CPU ? */ 75 { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) }, 76 /* Display Overhead ? */ 77 { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) }, 78 /* Display proc name/pid */ 79 { TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) }, 80 /* Display duration of execution */ 81 { TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) }, 82 /* Display absolute time of an entry */ 83 { TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) }, 84 /* Display interrupts */ 85 { TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) }, 86 /* Display function name after trailing } */ 87 { TRACER_OPT(funcgraph-tail, TRACE_GRAPH_PRINT_TAIL) }, 88 /* Include sleep time (scheduled out) between entry and return */ 89 { TRACER_OPT(sleep-time, TRACE_GRAPH_SLEEP_TIME) }, 90 /* Include time within nested functions */ 91 { TRACER_OPT(graph-time, TRACE_GRAPH_GRAPH_TIME) }, 92 { } /* Empty entry */ 93 }; 94 95 static struct tracer_flags tracer_flags = { 96 /* Don't display overruns, proc, or tail by default */ 97 .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD | 98 TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS | 99 TRACE_GRAPH_SLEEP_TIME | TRACE_GRAPH_GRAPH_TIME, 100 .opts = trace_opts 101 }; 102 103 static struct trace_array *graph_array; 104 105 /* 106 * DURATION column is being also used to display IRQ signs, 107 * following values are used by print_graph_irq and others 108 * to fill in space into DURATION column. 109 */ 110 enum { 111 FLAGS_FILL_FULL = 1 << TRACE_GRAPH_PRINT_FILL_SHIFT, 112 FLAGS_FILL_START = 2 << TRACE_GRAPH_PRINT_FILL_SHIFT, 113 FLAGS_FILL_END = 3 << TRACE_GRAPH_PRINT_FILL_SHIFT, 114 }; 115 116 static void 117 print_graph_duration(struct trace_array *tr, unsigned long long duration, 118 struct trace_seq *s, u32 flags); 119 120 /* Add a function return address to the trace stack on thread info.*/ 121 static int 122 ftrace_push_return_trace(unsigned long ret, unsigned long func, 123 unsigned long frame_pointer, unsigned long *retp) 124 { 125 unsigned long long calltime; 126 int index; 127 128 if (unlikely(ftrace_graph_is_dead())) 129 return -EBUSY; 130 131 if (!current->ret_stack) 132 return -EBUSY; 133 134 /* 135 * We must make sure the ret_stack is tested before we read 136 * anything else. 137 */ 138 smp_rmb(); 139 140 /* The return trace stack is full */ 141 if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) { 142 atomic_inc(¤t->trace_overrun); 143 return -EBUSY; 144 } 145 146 /* 147 * The curr_ret_stack is an index to ftrace return stack of 148 * current task. Its value should be in [0, FTRACE_RETFUNC_ 149 * DEPTH) when the function graph tracer is used. To support 150 * filtering out specific functions, it makes the index 151 * negative by subtracting huge value (FTRACE_NOTRACE_DEPTH) 152 * so when it sees a negative index the ftrace will ignore 153 * the record. And the index gets recovered when returning 154 * from the filtered function by adding the FTRACE_NOTRACE_ 155 * DEPTH and then it'll continue to record functions normally. 156 * 157 * The curr_ret_stack is initialized to -1 and get increased 158 * in this function. So it can be less than -1 only if it was 159 * filtered out via ftrace_graph_notrace_addr() which can be 160 * set from set_graph_notrace file in tracefs by user. 161 */ 162 if (current->curr_ret_stack < -1) 163 return -EBUSY; 164 165 calltime = trace_clock_local(); 166 167 index = ++current->curr_ret_stack; 168 if (ftrace_graph_notrace_addr(func)) 169 current->curr_ret_stack -= FTRACE_NOTRACE_DEPTH; 170 barrier(); 171 current->ret_stack[index].ret = ret; 172 current->ret_stack[index].func = func; 173 current->ret_stack[index].calltime = calltime; 174 #ifdef HAVE_FUNCTION_GRAPH_FP_TEST 175 current->ret_stack[index].fp = frame_pointer; 176 #endif 177 #ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR 178 current->ret_stack[index].retp = retp; 179 #endif 180 return 0; 181 } 182 183 int function_graph_enter(unsigned long ret, unsigned long func, 184 unsigned long frame_pointer, unsigned long *retp) 185 { 186 struct ftrace_graph_ent trace; 187 188 trace.func = func; 189 trace.depth = ++current->curr_ret_depth; 190 191 /* Only trace if the calling function expects to */ 192 if (!ftrace_graph_entry(&trace)) 193 goto out; 194 195 if (ftrace_push_return_trace(ret, func, 196 frame_pointer, retp)) 197 goto out; 198 199 return 0; 200 out: 201 current->curr_ret_depth--; 202 return -EBUSY; 203 } 204 205 /* Retrieve a function return address to the trace stack on thread info.*/ 206 static void 207 ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret, 208 unsigned long frame_pointer) 209 { 210 int index; 211 212 index = current->curr_ret_stack; 213 214 /* 215 * A negative index here means that it's just returned from a 216 * notrace'd function. Recover index to get an original 217 * return address. See ftrace_push_return_trace(). 218 * 219 * TODO: Need to check whether the stack gets corrupted. 220 */ 221 if (index < 0) 222 index += FTRACE_NOTRACE_DEPTH; 223 224 if (unlikely(index < 0 || index >= FTRACE_RETFUNC_DEPTH)) { 225 ftrace_graph_stop(); 226 WARN_ON(1); 227 /* Might as well panic, otherwise we have no where to go */ 228 *ret = (unsigned long)panic; 229 return; 230 } 231 232 #ifdef HAVE_FUNCTION_GRAPH_FP_TEST 233 /* 234 * The arch may choose to record the frame pointer used 235 * and check it here to make sure that it is what we expect it 236 * to be. If gcc does not set the place holder of the return 237 * address in the frame pointer, and does a copy instead, then 238 * the function graph trace will fail. This test detects this 239 * case. 240 * 241 * Currently, x86_32 with optimize for size (-Os) makes the latest 242 * gcc do the above. 243 * 244 * Note, -mfentry does not use frame pointers, and this test 245 * is not needed if CC_USING_FENTRY is set. 246 */ 247 if (unlikely(current->ret_stack[index].fp != frame_pointer)) { 248 ftrace_graph_stop(); 249 WARN(1, "Bad frame pointer: expected %lx, received %lx\n" 250 " from func %ps return to %lx\n", 251 current->ret_stack[index].fp, 252 frame_pointer, 253 (void *)current->ret_stack[index].func, 254 current->ret_stack[index].ret); 255 *ret = (unsigned long)panic; 256 return; 257 } 258 #endif 259 260 *ret = current->ret_stack[index].ret; 261 trace->func = current->ret_stack[index].func; 262 trace->calltime = current->ret_stack[index].calltime; 263 trace->overrun = atomic_read(¤t->trace_overrun); 264 trace->depth = current->curr_ret_depth--; 265 /* 266 * We still want to trace interrupts coming in if 267 * max_depth is set to 1. Make sure the decrement is 268 * seen before ftrace_graph_return. 269 */ 270 barrier(); 271 } 272 273 /* 274 * Send the trace to the ring-buffer. 275 * @return the original return address. 276 */ 277 unsigned long ftrace_return_to_handler(unsigned long frame_pointer) 278 { 279 struct ftrace_graph_ret trace; 280 unsigned long ret; 281 282 ftrace_pop_return_trace(&trace, &ret, frame_pointer); 283 trace.rettime = trace_clock_local(); 284 ftrace_graph_return(&trace); 285 /* 286 * The ftrace_graph_return() may still access the current 287 * ret_stack structure, we need to make sure the update of 288 * curr_ret_stack is after that. 289 */ 290 barrier(); 291 current->curr_ret_stack--; 292 /* 293 * The curr_ret_stack can be less than -1 only if it was 294 * filtered out and it's about to return from the function. 295 * Recover the index and continue to trace normal functions. 296 */ 297 if (current->curr_ret_stack < -1) { 298 current->curr_ret_stack += FTRACE_NOTRACE_DEPTH; 299 return ret; 300 } 301 302 if (unlikely(!ret)) { 303 ftrace_graph_stop(); 304 WARN_ON(1); 305 /* Might as well panic. What else to do? */ 306 ret = (unsigned long)panic; 307 } 308 309 return ret; 310 } 311 312 /** 313 * ftrace_graph_ret_addr - convert a potentially modified stack return address 314 * to its original value 315 * 316 * This function can be called by stack unwinding code to convert a found stack 317 * return address ('ret') to its original value, in case the function graph 318 * tracer has modified it to be 'return_to_handler'. If the address hasn't 319 * been modified, the unchanged value of 'ret' is returned. 320 * 321 * 'idx' is a state variable which should be initialized by the caller to zero 322 * before the first call. 323 * 324 * 'retp' is a pointer to the return address on the stack. It's ignored if 325 * the arch doesn't have HAVE_FUNCTION_GRAPH_RET_ADDR_PTR defined. 326 */ 327 #ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR 328 unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx, 329 unsigned long ret, unsigned long *retp) 330 { 331 int index = task->curr_ret_stack; 332 int i; 333 334 if (ret != (unsigned long)return_to_handler) 335 return ret; 336 337 if (index < -1) 338 index += FTRACE_NOTRACE_DEPTH; 339 340 if (index < 0) 341 return ret; 342 343 for (i = 0; i <= index; i++) 344 if (task->ret_stack[i].retp == retp) 345 return task->ret_stack[i].ret; 346 347 return ret; 348 } 349 #else /* !HAVE_FUNCTION_GRAPH_RET_ADDR_PTR */ 350 unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx, 351 unsigned long ret, unsigned long *retp) 352 { 353 int task_idx; 354 355 if (ret != (unsigned long)return_to_handler) 356 return ret; 357 358 task_idx = task->curr_ret_stack; 359 360 if (!task->ret_stack || task_idx < *idx) 361 return ret; 362 363 task_idx -= *idx; 364 (*idx)++; 365 366 return task->ret_stack[task_idx].ret; 367 } 368 #endif /* HAVE_FUNCTION_GRAPH_RET_ADDR_PTR */ 369 370 int __trace_graph_entry(struct trace_array *tr, 371 struct ftrace_graph_ent *trace, 372 unsigned long flags, 373 int pc) 374 { 375 struct trace_event_call *call = &event_funcgraph_entry; 376 struct ring_buffer_event *event; 377 struct ring_buffer *buffer = tr->trace_buffer.buffer; 378 struct ftrace_graph_ent_entry *entry; 379 380 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT, 381 sizeof(*entry), flags, pc); 382 if (!event) 383 return 0; 384 entry = ring_buffer_event_data(event); 385 entry->graph_ent = *trace; 386 if (!call_filter_check_discard(call, entry, buffer, event)) 387 trace_buffer_unlock_commit_nostack(buffer, event); 388 389 return 1; 390 } 391 392 static inline int ftrace_graph_ignore_irqs(void) 393 { 394 if (!ftrace_graph_skip_irqs || trace_recursion_test(TRACE_IRQ_BIT)) 395 return 0; 396 397 return in_irq(); 398 } 399 400 int trace_graph_entry(struct ftrace_graph_ent *trace) 401 { 402 struct trace_array *tr = graph_array; 403 struct trace_array_cpu *data; 404 unsigned long flags; 405 long disabled; 406 int ret; 407 int cpu; 408 int pc; 409 410 if (!ftrace_trace_task(tr)) 411 return 0; 412 413 if (ftrace_graph_ignore_func(trace)) 414 return 0; 415 416 if (ftrace_graph_ignore_irqs()) 417 return 0; 418 419 /* 420 * Do not trace a function if it's filtered by set_graph_notrace. 421 * Make the index of ret stack negative to indicate that it should 422 * ignore further functions. But it needs its own ret stack entry 423 * to recover the original index in order to continue tracing after 424 * returning from the function. 425 */ 426 if (ftrace_graph_notrace_addr(trace->func)) 427 return 1; 428 429 /* 430 * Stop here if tracing_threshold is set. We only write function return 431 * events to the ring buffer. 432 */ 433 if (tracing_thresh) 434 return 1; 435 436 local_irq_save(flags); 437 cpu = raw_smp_processor_id(); 438 data = per_cpu_ptr(tr->trace_buffer.data, cpu); 439 disabled = atomic_inc_return(&data->disabled); 440 if (likely(disabled == 1)) { 441 pc = preempt_count(); 442 ret = __trace_graph_entry(tr, trace, flags, pc); 443 } else { 444 ret = 0; 445 } 446 447 atomic_dec(&data->disabled); 448 local_irq_restore(flags); 449 450 return ret; 451 } 452 453 static void 454 __trace_graph_function(struct trace_array *tr, 455 unsigned long ip, unsigned long flags, int pc) 456 { 457 u64 time = trace_clock_local(); 458 struct ftrace_graph_ent ent = { 459 .func = ip, 460 .depth = 0, 461 }; 462 struct ftrace_graph_ret ret = { 463 .func = ip, 464 .depth = 0, 465 .calltime = time, 466 .rettime = time, 467 }; 468 469 __trace_graph_entry(tr, &ent, flags, pc); 470 __trace_graph_return(tr, &ret, flags, pc); 471 } 472 473 void 474 trace_graph_function(struct trace_array *tr, 475 unsigned long ip, unsigned long parent_ip, 476 unsigned long flags, int pc) 477 { 478 __trace_graph_function(tr, ip, flags, pc); 479 } 480 481 void __trace_graph_return(struct trace_array *tr, 482 struct ftrace_graph_ret *trace, 483 unsigned long flags, 484 int pc) 485 { 486 struct trace_event_call *call = &event_funcgraph_exit; 487 struct ring_buffer_event *event; 488 struct ring_buffer *buffer = tr->trace_buffer.buffer; 489 struct ftrace_graph_ret_entry *entry; 490 491 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET, 492 sizeof(*entry), flags, pc); 493 if (!event) 494 return; 495 entry = ring_buffer_event_data(event); 496 entry->ret = *trace; 497 if (!call_filter_check_discard(call, entry, buffer, event)) 498 trace_buffer_unlock_commit_nostack(buffer, event); 499 } 500 501 void trace_graph_return(struct ftrace_graph_ret *trace) 502 { 503 struct trace_array *tr = graph_array; 504 struct trace_array_cpu *data; 505 unsigned long flags; 506 long disabled; 507 int cpu; 508 int pc; 509 510 local_irq_save(flags); 511 cpu = raw_smp_processor_id(); 512 data = per_cpu_ptr(tr->trace_buffer.data, cpu); 513 disabled = atomic_inc_return(&data->disabled); 514 if (likely(disabled == 1)) { 515 pc = preempt_count(); 516 __trace_graph_return(tr, trace, flags, pc); 517 } 518 atomic_dec(&data->disabled); 519 local_irq_restore(flags); 520 } 521 522 void set_graph_array(struct trace_array *tr) 523 { 524 graph_array = tr; 525 526 /* Make graph_array visible before we start tracing */ 527 528 smp_mb(); 529 } 530 531 static void trace_graph_thresh_return(struct ftrace_graph_ret *trace) 532 { 533 if (tracing_thresh && 534 (trace->rettime - trace->calltime < tracing_thresh)) 535 return; 536 else 537 trace_graph_return(trace); 538 } 539 540 static int graph_trace_init(struct trace_array *tr) 541 { 542 int ret; 543 544 set_graph_array(tr); 545 if (tracing_thresh) 546 ret = register_ftrace_graph(&trace_graph_thresh_return, 547 &trace_graph_entry); 548 else 549 ret = register_ftrace_graph(&trace_graph_return, 550 &trace_graph_entry); 551 if (ret) 552 return ret; 553 tracing_start_cmdline_record(); 554 555 return 0; 556 } 557 558 static void graph_trace_reset(struct trace_array *tr) 559 { 560 tracing_stop_cmdline_record(); 561 unregister_ftrace_graph(); 562 } 563 564 static int graph_trace_update_thresh(struct trace_array *tr) 565 { 566 graph_trace_reset(tr); 567 return graph_trace_init(tr); 568 } 569 570 static int max_bytes_for_cpu; 571 572 static void print_graph_cpu(struct trace_seq *s, int cpu) 573 { 574 /* 575 * Start with a space character - to make it stand out 576 * to the right a bit when trace output is pasted into 577 * email: 578 */ 579 trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu); 580 } 581 582 #define TRACE_GRAPH_PROCINFO_LENGTH 14 583 584 static void print_graph_proc(struct trace_seq *s, pid_t pid) 585 { 586 char comm[TASK_COMM_LEN]; 587 /* sign + log10(MAX_INT) + '\0' */ 588 char pid_str[11]; 589 int spaces = 0; 590 int len; 591 int i; 592 593 trace_find_cmdline(pid, comm); 594 comm[7] = '\0'; 595 sprintf(pid_str, "%d", pid); 596 597 /* 1 stands for the "-" character */ 598 len = strlen(comm) + strlen(pid_str) + 1; 599 600 if (len < TRACE_GRAPH_PROCINFO_LENGTH) 601 spaces = TRACE_GRAPH_PROCINFO_LENGTH - len; 602 603 /* First spaces to align center */ 604 for (i = 0; i < spaces / 2; i++) 605 trace_seq_putc(s, ' '); 606 607 trace_seq_printf(s, "%s-%s", comm, pid_str); 608 609 /* Last spaces to align center */ 610 for (i = 0; i < spaces - (spaces / 2); i++) 611 trace_seq_putc(s, ' '); 612 } 613 614 615 static void print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry) 616 { 617 trace_seq_putc(s, ' '); 618 trace_print_lat_fmt(s, entry); 619 } 620 621 /* If the pid changed since the last trace, output this event */ 622 static void 623 verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data) 624 { 625 pid_t prev_pid; 626 pid_t *last_pid; 627 628 if (!data) 629 return; 630 631 last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid); 632 633 if (*last_pid == pid) 634 return; 635 636 prev_pid = *last_pid; 637 *last_pid = pid; 638 639 if (prev_pid == -1) 640 return; 641 /* 642 * Context-switch trace line: 643 644 ------------------------------------------ 645 | 1) migration/0--1 => sshd-1755 646 ------------------------------------------ 647 648 */ 649 trace_seq_puts(s, " ------------------------------------------\n"); 650 print_graph_cpu(s, cpu); 651 print_graph_proc(s, prev_pid); 652 trace_seq_puts(s, " => "); 653 print_graph_proc(s, pid); 654 trace_seq_puts(s, "\n ------------------------------------------\n\n"); 655 } 656 657 static struct ftrace_graph_ret_entry * 658 get_return_for_leaf(struct trace_iterator *iter, 659 struct ftrace_graph_ent_entry *curr) 660 { 661 struct fgraph_data *data = iter->private; 662 struct ring_buffer_iter *ring_iter = NULL; 663 struct ring_buffer_event *event; 664 struct ftrace_graph_ret_entry *next; 665 666 /* 667 * If the previous output failed to write to the seq buffer, 668 * then we just reuse the data from before. 669 */ 670 if (data && data->failed) { 671 curr = &data->ent; 672 next = &data->ret; 673 } else { 674 675 ring_iter = trace_buffer_iter(iter, iter->cpu); 676 677 /* First peek to compare current entry and the next one */ 678 if (ring_iter) 679 event = ring_buffer_iter_peek(ring_iter, NULL); 680 else { 681 /* 682 * We need to consume the current entry to see 683 * the next one. 684 */ 685 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, 686 NULL, NULL); 687 event = ring_buffer_peek(iter->trace_buffer->buffer, iter->cpu, 688 NULL, NULL); 689 } 690 691 if (!event) 692 return NULL; 693 694 next = ring_buffer_event_data(event); 695 696 if (data) { 697 /* 698 * Save current and next entries for later reference 699 * if the output fails. 700 */ 701 data->ent = *curr; 702 /* 703 * If the next event is not a return type, then 704 * we only care about what type it is. Otherwise we can 705 * safely copy the entire event. 706 */ 707 if (next->ent.type == TRACE_GRAPH_RET) 708 data->ret = *next; 709 else 710 data->ret.ent.type = next->ent.type; 711 } 712 } 713 714 if (next->ent.type != TRACE_GRAPH_RET) 715 return NULL; 716 717 if (curr->ent.pid != next->ent.pid || 718 curr->graph_ent.func != next->ret.func) 719 return NULL; 720 721 /* this is a leaf, now advance the iterator */ 722 if (ring_iter) 723 ring_buffer_read(ring_iter, NULL); 724 725 return next; 726 } 727 728 static void print_graph_abs_time(u64 t, struct trace_seq *s) 729 { 730 unsigned long usecs_rem; 731 732 usecs_rem = do_div(t, NSEC_PER_SEC); 733 usecs_rem /= 1000; 734 735 trace_seq_printf(s, "%5lu.%06lu | ", 736 (unsigned long)t, usecs_rem); 737 } 738 739 static void 740 print_graph_irq(struct trace_iterator *iter, unsigned long addr, 741 enum trace_type type, int cpu, pid_t pid, u32 flags) 742 { 743 struct trace_array *tr = iter->tr; 744 struct trace_seq *s = &iter->seq; 745 struct trace_entry *ent = iter->ent; 746 747 if (addr < (unsigned long)__irqentry_text_start || 748 addr >= (unsigned long)__irqentry_text_end) 749 return; 750 751 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) { 752 /* Absolute time */ 753 if (flags & TRACE_GRAPH_PRINT_ABS_TIME) 754 print_graph_abs_time(iter->ts, s); 755 756 /* Cpu */ 757 if (flags & TRACE_GRAPH_PRINT_CPU) 758 print_graph_cpu(s, cpu); 759 760 /* Proc */ 761 if (flags & TRACE_GRAPH_PRINT_PROC) { 762 print_graph_proc(s, pid); 763 trace_seq_puts(s, " | "); 764 } 765 766 /* Latency format */ 767 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) 768 print_graph_lat_fmt(s, ent); 769 } 770 771 /* No overhead */ 772 print_graph_duration(tr, 0, s, flags | FLAGS_FILL_START); 773 774 if (type == TRACE_GRAPH_ENT) 775 trace_seq_puts(s, "==========>"); 776 else 777 trace_seq_puts(s, "<=========="); 778 779 print_graph_duration(tr, 0, s, flags | FLAGS_FILL_END); 780 trace_seq_putc(s, '\n'); 781 } 782 783 void 784 trace_print_graph_duration(unsigned long long duration, struct trace_seq *s) 785 { 786 unsigned long nsecs_rem = do_div(duration, 1000); 787 /* log10(ULONG_MAX) + '\0' */ 788 char usecs_str[21]; 789 char nsecs_str[5]; 790 int len; 791 int i; 792 793 sprintf(usecs_str, "%lu", (unsigned long) duration); 794 795 /* Print msecs */ 796 trace_seq_printf(s, "%s", usecs_str); 797 798 len = strlen(usecs_str); 799 800 /* Print nsecs (we don't want to exceed 7 numbers) */ 801 if (len < 7) { 802 size_t slen = min_t(size_t, sizeof(nsecs_str), 8UL - len); 803 804 snprintf(nsecs_str, slen, "%03lu", nsecs_rem); 805 trace_seq_printf(s, ".%s", nsecs_str); 806 len += strlen(nsecs_str) + 1; 807 } 808 809 trace_seq_puts(s, " us "); 810 811 /* Print remaining spaces to fit the row's width */ 812 for (i = len; i < 8; i++) 813 trace_seq_putc(s, ' '); 814 } 815 816 static void 817 print_graph_duration(struct trace_array *tr, unsigned long long duration, 818 struct trace_seq *s, u32 flags) 819 { 820 if (!(flags & TRACE_GRAPH_PRINT_DURATION) || 821 !(tr->trace_flags & TRACE_ITER_CONTEXT_INFO)) 822 return; 823 824 /* No real adata, just filling the column with spaces */ 825 switch (flags & TRACE_GRAPH_PRINT_FILL_MASK) { 826 case FLAGS_FILL_FULL: 827 trace_seq_puts(s, " | "); 828 return; 829 case FLAGS_FILL_START: 830 trace_seq_puts(s, " "); 831 return; 832 case FLAGS_FILL_END: 833 trace_seq_puts(s, " |"); 834 return; 835 } 836 837 /* Signal a overhead of time execution to the output */ 838 if (flags & TRACE_GRAPH_PRINT_OVERHEAD) 839 trace_seq_printf(s, "%c ", trace_find_mark(duration)); 840 else 841 trace_seq_puts(s, " "); 842 843 trace_print_graph_duration(duration, s); 844 trace_seq_puts(s, "| "); 845 } 846 847 /* Case of a leaf function on its call entry */ 848 static enum print_line_t 849 print_graph_entry_leaf(struct trace_iterator *iter, 850 struct ftrace_graph_ent_entry *entry, 851 struct ftrace_graph_ret_entry *ret_entry, 852 struct trace_seq *s, u32 flags) 853 { 854 struct fgraph_data *data = iter->private; 855 struct trace_array *tr = iter->tr; 856 struct ftrace_graph_ret *graph_ret; 857 struct ftrace_graph_ent *call; 858 unsigned long long duration; 859 int cpu = iter->cpu; 860 int i; 861 862 graph_ret = &ret_entry->ret; 863 call = &entry->graph_ent; 864 duration = graph_ret->rettime - graph_ret->calltime; 865 866 if (data) { 867 struct fgraph_cpu_data *cpu_data; 868 869 cpu_data = per_cpu_ptr(data->cpu_data, cpu); 870 871 /* If a graph tracer ignored set_graph_notrace */ 872 if (call->depth < -1) 873 call->depth += FTRACE_NOTRACE_DEPTH; 874 875 /* 876 * Comments display at + 1 to depth. Since 877 * this is a leaf function, keep the comments 878 * equal to this depth. 879 */ 880 cpu_data->depth = call->depth - 1; 881 882 /* No need to keep this function around for this depth */ 883 if (call->depth < FTRACE_RETFUNC_DEPTH && 884 !WARN_ON_ONCE(call->depth < 0)) 885 cpu_data->enter_funcs[call->depth] = 0; 886 } 887 888 /* Overhead and duration */ 889 print_graph_duration(tr, duration, s, flags); 890 891 /* Function */ 892 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) 893 trace_seq_putc(s, ' '); 894 895 trace_seq_printf(s, "%ps();\n", (void *)call->func); 896 897 print_graph_irq(iter, graph_ret->func, TRACE_GRAPH_RET, 898 cpu, iter->ent->pid, flags); 899 900 return trace_handle_return(s); 901 } 902 903 static enum print_line_t 904 print_graph_entry_nested(struct trace_iterator *iter, 905 struct ftrace_graph_ent_entry *entry, 906 struct trace_seq *s, int cpu, u32 flags) 907 { 908 struct ftrace_graph_ent *call = &entry->graph_ent; 909 struct fgraph_data *data = iter->private; 910 struct trace_array *tr = iter->tr; 911 int i; 912 913 if (data) { 914 struct fgraph_cpu_data *cpu_data; 915 int cpu = iter->cpu; 916 917 /* If a graph tracer ignored set_graph_notrace */ 918 if (call->depth < -1) 919 call->depth += FTRACE_NOTRACE_DEPTH; 920 921 cpu_data = per_cpu_ptr(data->cpu_data, cpu); 922 cpu_data->depth = call->depth; 923 924 /* Save this function pointer to see if the exit matches */ 925 if (call->depth < FTRACE_RETFUNC_DEPTH && 926 !WARN_ON_ONCE(call->depth < 0)) 927 cpu_data->enter_funcs[call->depth] = call->func; 928 } 929 930 /* No time */ 931 print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL); 932 933 /* Function */ 934 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) 935 trace_seq_putc(s, ' '); 936 937 trace_seq_printf(s, "%ps() {\n", (void *)call->func); 938 939 if (trace_seq_has_overflowed(s)) 940 return TRACE_TYPE_PARTIAL_LINE; 941 942 /* 943 * we already consumed the current entry to check the next one 944 * and see if this is a leaf. 945 */ 946 return TRACE_TYPE_NO_CONSUME; 947 } 948 949 static void 950 print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s, 951 int type, unsigned long addr, u32 flags) 952 { 953 struct fgraph_data *data = iter->private; 954 struct trace_entry *ent = iter->ent; 955 struct trace_array *tr = iter->tr; 956 int cpu = iter->cpu; 957 958 /* Pid */ 959 verif_pid(s, ent->pid, cpu, data); 960 961 if (type) 962 /* Interrupt */ 963 print_graph_irq(iter, addr, type, cpu, ent->pid, flags); 964 965 if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO)) 966 return; 967 968 /* Absolute time */ 969 if (flags & TRACE_GRAPH_PRINT_ABS_TIME) 970 print_graph_abs_time(iter->ts, s); 971 972 /* Cpu */ 973 if (flags & TRACE_GRAPH_PRINT_CPU) 974 print_graph_cpu(s, cpu); 975 976 /* Proc */ 977 if (flags & TRACE_GRAPH_PRINT_PROC) { 978 print_graph_proc(s, ent->pid); 979 trace_seq_puts(s, " | "); 980 } 981 982 /* Latency format */ 983 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) 984 print_graph_lat_fmt(s, ent); 985 986 return; 987 } 988 989 /* 990 * Entry check for irq code 991 * 992 * returns 1 if 993 * - we are inside irq code 994 * - we just entered irq code 995 * 996 * retunns 0 if 997 * - funcgraph-interrupts option is set 998 * - we are not inside irq code 999 */ 1000 static int 1001 check_irq_entry(struct trace_iterator *iter, u32 flags, 1002 unsigned long addr, int depth) 1003 { 1004 int cpu = iter->cpu; 1005 int *depth_irq; 1006 struct fgraph_data *data = iter->private; 1007 1008 /* 1009 * If we are either displaying irqs, or we got called as 1010 * a graph event and private data does not exist, 1011 * then we bypass the irq check. 1012 */ 1013 if ((flags & TRACE_GRAPH_PRINT_IRQS) || 1014 (!data)) 1015 return 0; 1016 1017 depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq); 1018 1019 /* 1020 * We are inside the irq code 1021 */ 1022 if (*depth_irq >= 0) 1023 return 1; 1024 1025 if ((addr < (unsigned long)__irqentry_text_start) || 1026 (addr >= (unsigned long)__irqentry_text_end)) 1027 return 0; 1028 1029 /* 1030 * We are entering irq code. 1031 */ 1032 *depth_irq = depth; 1033 return 1; 1034 } 1035 1036 /* 1037 * Return check for irq code 1038 * 1039 * returns 1 if 1040 * - we are inside irq code 1041 * - we just left irq code 1042 * 1043 * returns 0 if 1044 * - funcgraph-interrupts option is set 1045 * - we are not inside irq code 1046 */ 1047 static int 1048 check_irq_return(struct trace_iterator *iter, u32 flags, int depth) 1049 { 1050 int cpu = iter->cpu; 1051 int *depth_irq; 1052 struct fgraph_data *data = iter->private; 1053 1054 /* 1055 * If we are either displaying irqs, or we got called as 1056 * a graph event and private data does not exist, 1057 * then we bypass the irq check. 1058 */ 1059 if ((flags & TRACE_GRAPH_PRINT_IRQS) || 1060 (!data)) 1061 return 0; 1062 1063 depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq); 1064 1065 /* 1066 * We are not inside the irq code. 1067 */ 1068 if (*depth_irq == -1) 1069 return 0; 1070 1071 /* 1072 * We are inside the irq code, and this is returning entry. 1073 * Let's not trace it and clear the entry depth, since 1074 * we are out of irq code. 1075 * 1076 * This condition ensures that we 'leave the irq code' once 1077 * we are out of the entry depth. Thus protecting us from 1078 * the RETURN entry loss. 1079 */ 1080 if (*depth_irq >= depth) { 1081 *depth_irq = -1; 1082 return 1; 1083 } 1084 1085 /* 1086 * We are inside the irq code, and this is not the entry. 1087 */ 1088 return 1; 1089 } 1090 1091 static enum print_line_t 1092 print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s, 1093 struct trace_iterator *iter, u32 flags) 1094 { 1095 struct fgraph_data *data = iter->private; 1096 struct ftrace_graph_ent *call = &field->graph_ent; 1097 struct ftrace_graph_ret_entry *leaf_ret; 1098 static enum print_line_t ret; 1099 int cpu = iter->cpu; 1100 1101 if (check_irq_entry(iter, flags, call->func, call->depth)) 1102 return TRACE_TYPE_HANDLED; 1103 1104 print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags); 1105 1106 leaf_ret = get_return_for_leaf(iter, field); 1107 if (leaf_ret) 1108 ret = print_graph_entry_leaf(iter, field, leaf_ret, s, flags); 1109 else 1110 ret = print_graph_entry_nested(iter, field, s, cpu, flags); 1111 1112 if (data) { 1113 /* 1114 * If we failed to write our output, then we need to make 1115 * note of it. Because we already consumed our entry. 1116 */ 1117 if (s->full) { 1118 data->failed = 1; 1119 data->cpu = cpu; 1120 } else 1121 data->failed = 0; 1122 } 1123 1124 return ret; 1125 } 1126 1127 static enum print_line_t 1128 print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, 1129 struct trace_entry *ent, struct trace_iterator *iter, 1130 u32 flags) 1131 { 1132 unsigned long long duration = trace->rettime - trace->calltime; 1133 struct fgraph_data *data = iter->private; 1134 struct trace_array *tr = iter->tr; 1135 pid_t pid = ent->pid; 1136 int cpu = iter->cpu; 1137 int func_match = 1; 1138 int i; 1139 1140 if (check_irq_return(iter, flags, trace->depth)) 1141 return TRACE_TYPE_HANDLED; 1142 1143 if (data) { 1144 struct fgraph_cpu_data *cpu_data; 1145 int cpu = iter->cpu; 1146 1147 cpu_data = per_cpu_ptr(data->cpu_data, cpu); 1148 1149 /* 1150 * Comments display at + 1 to depth. This is the 1151 * return from a function, we now want the comments 1152 * to display at the same level of the bracket. 1153 */ 1154 cpu_data->depth = trace->depth - 1; 1155 1156 if (trace->depth < FTRACE_RETFUNC_DEPTH && 1157 !WARN_ON_ONCE(trace->depth < 0)) { 1158 if (cpu_data->enter_funcs[trace->depth] != trace->func) 1159 func_match = 0; 1160 cpu_data->enter_funcs[trace->depth] = 0; 1161 } 1162 } 1163 1164 print_graph_prologue(iter, s, 0, 0, flags); 1165 1166 /* Overhead and duration */ 1167 print_graph_duration(tr, duration, s, flags); 1168 1169 /* Closing brace */ 1170 for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) 1171 trace_seq_putc(s, ' '); 1172 1173 /* 1174 * If the return function does not have a matching entry, 1175 * then the entry was lost. Instead of just printing 1176 * the '}' and letting the user guess what function this 1177 * belongs to, write out the function name. Always do 1178 * that if the funcgraph-tail option is enabled. 1179 */ 1180 if (func_match && !(flags & TRACE_GRAPH_PRINT_TAIL)) 1181 trace_seq_puts(s, "}\n"); 1182 else 1183 trace_seq_printf(s, "} /* %ps */\n", (void *)trace->func); 1184 1185 /* Overrun */ 1186 if (flags & TRACE_GRAPH_PRINT_OVERRUN) 1187 trace_seq_printf(s, " (Overruns: %lu)\n", 1188 trace->overrun); 1189 1190 print_graph_irq(iter, trace->func, TRACE_GRAPH_RET, 1191 cpu, pid, flags); 1192 1193 return trace_handle_return(s); 1194 } 1195 1196 static enum print_line_t 1197 print_graph_comment(struct trace_seq *s, struct trace_entry *ent, 1198 struct trace_iterator *iter, u32 flags) 1199 { 1200 struct trace_array *tr = iter->tr; 1201 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK); 1202 struct fgraph_data *data = iter->private; 1203 struct trace_event *event; 1204 int depth = 0; 1205 int ret; 1206 int i; 1207 1208 if (data) 1209 depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth; 1210 1211 print_graph_prologue(iter, s, 0, 0, flags); 1212 1213 /* No time */ 1214 print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL); 1215 1216 /* Indentation */ 1217 if (depth > 0) 1218 for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++) 1219 trace_seq_putc(s, ' '); 1220 1221 /* The comment */ 1222 trace_seq_puts(s, "/* "); 1223 1224 switch (iter->ent->type) { 1225 case TRACE_BPUTS: 1226 ret = trace_print_bputs_msg_only(iter); 1227 if (ret != TRACE_TYPE_HANDLED) 1228 return ret; 1229 break; 1230 case TRACE_BPRINT: 1231 ret = trace_print_bprintk_msg_only(iter); 1232 if (ret != TRACE_TYPE_HANDLED) 1233 return ret; 1234 break; 1235 case TRACE_PRINT: 1236 ret = trace_print_printk_msg_only(iter); 1237 if (ret != TRACE_TYPE_HANDLED) 1238 return ret; 1239 break; 1240 default: 1241 event = ftrace_find_event(ent->type); 1242 if (!event) 1243 return TRACE_TYPE_UNHANDLED; 1244 1245 ret = event->funcs->trace(iter, sym_flags, event); 1246 if (ret != TRACE_TYPE_HANDLED) 1247 return ret; 1248 } 1249 1250 if (trace_seq_has_overflowed(s)) 1251 goto out; 1252 1253 /* Strip ending newline */ 1254 if (s->buffer[s->seq.len - 1] == '\n') { 1255 s->buffer[s->seq.len - 1] = '\0'; 1256 s->seq.len--; 1257 } 1258 1259 trace_seq_puts(s, " */\n"); 1260 out: 1261 return trace_handle_return(s); 1262 } 1263 1264 1265 enum print_line_t 1266 print_graph_function_flags(struct trace_iterator *iter, u32 flags) 1267 { 1268 struct ftrace_graph_ent_entry *field; 1269 struct fgraph_data *data = iter->private; 1270 struct trace_entry *entry = iter->ent; 1271 struct trace_seq *s = &iter->seq; 1272 int cpu = iter->cpu; 1273 int ret; 1274 1275 if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) { 1276 per_cpu_ptr(data->cpu_data, cpu)->ignore = 0; 1277 return TRACE_TYPE_HANDLED; 1278 } 1279 1280 /* 1281 * If the last output failed, there's a possibility we need 1282 * to print out the missing entry which would never go out. 1283 */ 1284 if (data && data->failed) { 1285 field = &data->ent; 1286 iter->cpu = data->cpu; 1287 ret = print_graph_entry(field, s, iter, flags); 1288 if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) { 1289 per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1; 1290 ret = TRACE_TYPE_NO_CONSUME; 1291 } 1292 iter->cpu = cpu; 1293 return ret; 1294 } 1295 1296 switch (entry->type) { 1297 case TRACE_GRAPH_ENT: { 1298 /* 1299 * print_graph_entry() may consume the current event, 1300 * thus @field may become invalid, so we need to save it. 1301 * sizeof(struct ftrace_graph_ent_entry) is very small, 1302 * it can be safely saved at the stack. 1303 */ 1304 struct ftrace_graph_ent_entry saved; 1305 trace_assign_type(field, entry); 1306 saved = *field; 1307 return print_graph_entry(&saved, s, iter, flags); 1308 } 1309 case TRACE_GRAPH_RET: { 1310 struct ftrace_graph_ret_entry *field; 1311 trace_assign_type(field, entry); 1312 return print_graph_return(&field->ret, s, entry, iter, flags); 1313 } 1314 case TRACE_STACK: 1315 case TRACE_FN: 1316 /* dont trace stack and functions as comments */ 1317 return TRACE_TYPE_UNHANDLED; 1318 1319 default: 1320 return print_graph_comment(s, entry, iter, flags); 1321 } 1322 1323 return TRACE_TYPE_HANDLED; 1324 } 1325 1326 static enum print_line_t 1327 print_graph_function(struct trace_iterator *iter) 1328 { 1329 return print_graph_function_flags(iter, tracer_flags.val); 1330 } 1331 1332 static enum print_line_t 1333 print_graph_function_event(struct trace_iterator *iter, int flags, 1334 struct trace_event *event) 1335 { 1336 return print_graph_function(iter); 1337 } 1338 1339 static void print_lat_header(struct seq_file *s, u32 flags) 1340 { 1341 static const char spaces[] = " " /* 16 spaces */ 1342 " " /* 4 spaces */ 1343 " "; /* 17 spaces */ 1344 int size = 0; 1345 1346 if (flags & TRACE_GRAPH_PRINT_ABS_TIME) 1347 size += 16; 1348 if (flags & TRACE_GRAPH_PRINT_CPU) 1349 size += 4; 1350 if (flags & TRACE_GRAPH_PRINT_PROC) 1351 size += 17; 1352 1353 seq_printf(s, "#%.*s _-----=> irqs-off \n", size, spaces); 1354 seq_printf(s, "#%.*s / _----=> need-resched \n", size, spaces); 1355 seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces); 1356 seq_printf(s, "#%.*s|| / _--=> preempt-depth \n", size, spaces); 1357 seq_printf(s, "#%.*s||| / \n", size, spaces); 1358 } 1359 1360 static void __print_graph_headers_flags(struct trace_array *tr, 1361 struct seq_file *s, u32 flags) 1362 { 1363 int lat = tr->trace_flags & TRACE_ITER_LATENCY_FMT; 1364 1365 if (lat) 1366 print_lat_header(s, flags); 1367 1368 /* 1st line */ 1369 seq_putc(s, '#'); 1370 if (flags & TRACE_GRAPH_PRINT_ABS_TIME) 1371 seq_puts(s, " TIME "); 1372 if (flags & TRACE_GRAPH_PRINT_CPU) 1373 seq_puts(s, " CPU"); 1374 if (flags & TRACE_GRAPH_PRINT_PROC) 1375 seq_puts(s, " TASK/PID "); 1376 if (lat) 1377 seq_puts(s, "||||"); 1378 if (flags & TRACE_GRAPH_PRINT_DURATION) 1379 seq_puts(s, " DURATION "); 1380 seq_puts(s, " FUNCTION CALLS\n"); 1381 1382 /* 2nd line */ 1383 seq_putc(s, '#'); 1384 if (flags & TRACE_GRAPH_PRINT_ABS_TIME) 1385 seq_puts(s, " | "); 1386 if (flags & TRACE_GRAPH_PRINT_CPU) 1387 seq_puts(s, " | "); 1388 if (flags & TRACE_GRAPH_PRINT_PROC) 1389 seq_puts(s, " | | "); 1390 if (lat) 1391 seq_puts(s, "||||"); 1392 if (flags & TRACE_GRAPH_PRINT_DURATION) 1393 seq_puts(s, " | | "); 1394 seq_puts(s, " | | | |\n"); 1395 } 1396 1397 static void print_graph_headers(struct seq_file *s) 1398 { 1399 print_graph_headers_flags(s, tracer_flags.val); 1400 } 1401 1402 void print_graph_headers_flags(struct seq_file *s, u32 flags) 1403 { 1404 struct trace_iterator *iter = s->private; 1405 struct trace_array *tr = iter->tr; 1406 1407 if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO)) 1408 return; 1409 1410 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) { 1411 /* print nothing if the buffers are empty */ 1412 if (trace_empty(iter)) 1413 return; 1414 1415 print_trace_header(s, iter); 1416 } 1417 1418 __print_graph_headers_flags(tr, s, flags); 1419 } 1420 1421 void graph_trace_open(struct trace_iterator *iter) 1422 { 1423 /* pid and depth on the last trace processed */ 1424 struct fgraph_data *data; 1425 gfp_t gfpflags; 1426 int cpu; 1427 1428 iter->private = NULL; 1429 1430 /* We can be called in atomic context via ftrace_dump() */ 1431 gfpflags = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL; 1432 1433 data = kzalloc(sizeof(*data), gfpflags); 1434 if (!data) 1435 goto out_err; 1436 1437 data->cpu_data = alloc_percpu_gfp(struct fgraph_cpu_data, gfpflags); 1438 if (!data->cpu_data) 1439 goto out_err_free; 1440 1441 for_each_possible_cpu(cpu) { 1442 pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid); 1443 int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth); 1444 int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore); 1445 int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq); 1446 1447 *pid = -1; 1448 *depth = 0; 1449 *ignore = 0; 1450 *depth_irq = -1; 1451 } 1452 1453 iter->private = data; 1454 1455 return; 1456 1457 out_err_free: 1458 kfree(data); 1459 out_err: 1460 pr_warn("function graph tracer: not enough memory\n"); 1461 } 1462 1463 void graph_trace_close(struct trace_iterator *iter) 1464 { 1465 struct fgraph_data *data = iter->private; 1466 1467 if (data) { 1468 free_percpu(data->cpu_data); 1469 kfree(data); 1470 } 1471 } 1472 1473 static int 1474 func_graph_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) 1475 { 1476 if (bit == TRACE_GRAPH_PRINT_IRQS) 1477 ftrace_graph_skip_irqs = !set; 1478 1479 if (bit == TRACE_GRAPH_SLEEP_TIME) 1480 ftrace_graph_sleep_time_control(set); 1481 1482 if (bit == TRACE_GRAPH_GRAPH_TIME) 1483 ftrace_graph_graph_time_control(set); 1484 1485 return 0; 1486 } 1487 1488 static struct trace_event_functions graph_functions = { 1489 .trace = print_graph_function_event, 1490 }; 1491 1492 static struct trace_event graph_trace_entry_event = { 1493 .type = TRACE_GRAPH_ENT, 1494 .funcs = &graph_functions, 1495 }; 1496 1497 static struct trace_event graph_trace_ret_event = { 1498 .type = TRACE_GRAPH_RET, 1499 .funcs = &graph_functions 1500 }; 1501 1502 static struct tracer graph_trace __tracer_data = { 1503 .name = "function_graph", 1504 .update_thresh = graph_trace_update_thresh, 1505 .open = graph_trace_open, 1506 .pipe_open = graph_trace_open, 1507 .close = graph_trace_close, 1508 .pipe_close = graph_trace_close, 1509 .init = graph_trace_init, 1510 .reset = graph_trace_reset, 1511 .print_line = print_graph_function, 1512 .print_header = print_graph_headers, 1513 .flags = &tracer_flags, 1514 .set_flag = func_graph_set_flag, 1515 #ifdef CONFIG_FTRACE_SELFTEST 1516 .selftest = trace_selftest_startup_function_graph, 1517 #endif 1518 }; 1519 1520 1521 static ssize_t 1522 graph_depth_write(struct file *filp, const char __user *ubuf, size_t cnt, 1523 loff_t *ppos) 1524 { 1525 unsigned long val; 1526 int ret; 1527 1528 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 1529 if (ret) 1530 return ret; 1531 1532 fgraph_max_depth = val; 1533 1534 *ppos += cnt; 1535 1536 return cnt; 1537 } 1538 1539 static ssize_t 1540 graph_depth_read(struct file *filp, char __user *ubuf, size_t cnt, 1541 loff_t *ppos) 1542 { 1543 char buf[15]; /* More than enough to hold UINT_MAX + "\n"*/ 1544 int n; 1545 1546 n = sprintf(buf, "%d\n", fgraph_max_depth); 1547 1548 return simple_read_from_buffer(ubuf, cnt, ppos, buf, n); 1549 } 1550 1551 static const struct file_operations graph_depth_fops = { 1552 .open = tracing_open_generic, 1553 .write = graph_depth_write, 1554 .read = graph_depth_read, 1555 .llseek = generic_file_llseek, 1556 }; 1557 1558 static __init int init_graph_tracefs(void) 1559 { 1560 struct dentry *d_tracer; 1561 1562 d_tracer = tracing_init_dentry(); 1563 if (IS_ERR(d_tracer)) 1564 return 0; 1565 1566 trace_create_file("max_graph_depth", 0644, d_tracer, 1567 NULL, &graph_depth_fops); 1568 1569 return 0; 1570 } 1571 fs_initcall(init_graph_tracefs); 1572 1573 static __init int init_graph_trace(void) 1574 { 1575 max_bytes_for_cpu = snprintf(NULL, 0, "%u", nr_cpu_ids - 1); 1576 1577 if (!register_trace_event(&graph_trace_entry_event)) { 1578 pr_warn("Warning: could not register graph trace events\n"); 1579 return 1; 1580 } 1581 1582 if (!register_trace_event(&graph_trace_ret_event)) { 1583 pr_warn("Warning: could not register graph trace events\n"); 1584 return 1; 1585 } 1586 1587 return register_tracer(&graph_trace); 1588 } 1589 1590 core_initcall(init_graph_trace); 1591