1 /* 2 * trace irqs off critical timings 3 * 4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> 5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com> 6 * 7 * From code in the latency_tracer, that is: 8 * 9 * Copyright (C) 2004-2006 Ingo Molnar 10 * Copyright (C) 2004 Nadia Yvette Chambers 11 */ 12 #include <linux/kallsyms.h> 13 #include <linux/debugfs.h> 14 #include <linux/uaccess.h> 15 #include <linux/module.h> 16 #include <linux/ftrace.h> 17 #include <linux/fs.h> 18 19 #include "trace.h" 20 21 static struct trace_array *irqsoff_trace __read_mostly; 22 static int tracer_enabled __read_mostly; 23 24 static DEFINE_PER_CPU(int, tracing_cpu); 25 26 static DEFINE_RAW_SPINLOCK(max_trace_lock); 27 28 enum { 29 TRACER_IRQS_OFF = (1 << 1), 30 TRACER_PREEMPT_OFF = (1 << 2), 31 }; 32 33 static int trace_type __read_mostly; 34 35 static int save_lat_flag; 36 37 static void stop_irqsoff_tracer(struct trace_array *tr, int graph); 38 static int start_irqsoff_tracer(struct trace_array *tr, int graph); 39 40 #ifdef CONFIG_PREEMPT_TRACER 41 static inline int 42 preempt_trace(void) 43 { 44 return ((trace_type & TRACER_PREEMPT_OFF) && preempt_count()); 45 } 46 #else 47 # define preempt_trace() (0) 48 #endif 49 50 #ifdef CONFIG_IRQSOFF_TRACER 51 static inline int 52 irq_trace(void) 53 { 54 return ((trace_type & TRACER_IRQS_OFF) && 55 irqs_disabled()); 56 } 57 #else 58 # define irq_trace() (0) 59 #endif 60 61 #define TRACE_DISPLAY_GRAPH 1 62 63 static struct tracer_opt trace_opts[] = { 64 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 65 /* display latency trace as call graph */ 66 { TRACER_OPT(display-graph, TRACE_DISPLAY_GRAPH) }, 67 #endif 68 { } /* Empty entry */ 69 }; 70 71 static struct tracer_flags tracer_flags = { 72 .val = 0, 73 .opts = trace_opts, 74 }; 75 76 #define is_graph() (tracer_flags.val & TRACE_DISPLAY_GRAPH) 77 78 /* 79 * Sequence count - we record it when starting a measurement and 80 * skip the latency if the sequence has changed - some other section 81 * did a maximum and could disturb our measurement with serial console 82 * printouts, etc. Truly coinciding maximum latencies should be rare 83 * and what happens together happens separately as well, so this doesn't 84 * decrease the validity of the maximum found: 85 */ 86 static __cacheline_aligned_in_smp unsigned long max_sequence; 87 88 #ifdef CONFIG_FUNCTION_TRACER 89 /* 90 * Prologue for the preempt and irqs off function tracers. 91 * 92 * Returns 1 if it is OK to continue, and data->disabled is 93 * incremented. 94 * 0 if the trace is to be ignored, and data->disabled 95 * is kept the same. 96 * 97 * Note, this function is also used outside this ifdef but 98 * inside the #ifdef of the function graph tracer below. 99 * This is OK, since the function graph tracer is 100 * dependent on the function tracer. 101 */ 102 static int func_prolog_dec(struct trace_array *tr, 103 struct trace_array_cpu **data, 104 unsigned long *flags) 105 { 106 long disabled; 107 int cpu; 108 109 /* 110 * Does not matter if we preempt. We test the flags 111 * afterward, to see if irqs are disabled or not. 112 * If we preempt and get a false positive, the flags 113 * test will fail. 114 */ 115 cpu = raw_smp_processor_id(); 116 if (likely(!per_cpu(tracing_cpu, cpu))) 117 return 0; 118 119 local_save_flags(*flags); 120 /* slight chance to get a false positive on tracing_cpu */ 121 if (!irqs_disabled_flags(*flags)) 122 return 0; 123 124 *data = tr->data[cpu]; 125 disabled = atomic_inc_return(&(*data)->disabled); 126 127 if (likely(disabled == 1)) 128 return 1; 129 130 atomic_dec(&(*data)->disabled); 131 132 return 0; 133 } 134 135 /* 136 * irqsoff uses its own tracer function to keep the overhead down: 137 */ 138 static void 139 irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip, 140 struct ftrace_ops *op, struct pt_regs *pt_regs) 141 { 142 struct trace_array *tr = irqsoff_trace; 143 struct trace_array_cpu *data; 144 unsigned long flags; 145 146 if (!func_prolog_dec(tr, &data, &flags)) 147 return; 148 149 trace_function(tr, ip, parent_ip, flags, preempt_count()); 150 151 atomic_dec(&data->disabled); 152 } 153 154 static struct ftrace_ops trace_ops __read_mostly = 155 { 156 .func = irqsoff_tracer_call, 157 .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE, 158 }; 159 #endif /* CONFIG_FUNCTION_TRACER */ 160 161 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 162 static int irqsoff_set_flag(u32 old_flags, u32 bit, int set) 163 { 164 int cpu; 165 166 if (!(bit & TRACE_DISPLAY_GRAPH)) 167 return -EINVAL; 168 169 if (!(is_graph() ^ set)) 170 return 0; 171 172 stop_irqsoff_tracer(irqsoff_trace, !set); 173 174 for_each_possible_cpu(cpu) 175 per_cpu(tracing_cpu, cpu) = 0; 176 177 tracing_max_latency = 0; 178 tracing_reset_online_cpus(irqsoff_trace); 179 180 return start_irqsoff_tracer(irqsoff_trace, set); 181 } 182 183 static int irqsoff_graph_entry(struct ftrace_graph_ent *trace) 184 { 185 struct trace_array *tr = irqsoff_trace; 186 struct trace_array_cpu *data; 187 unsigned long flags; 188 int ret; 189 int pc; 190 191 if (!func_prolog_dec(tr, &data, &flags)) 192 return 0; 193 194 pc = preempt_count(); 195 ret = __trace_graph_entry(tr, trace, flags, pc); 196 atomic_dec(&data->disabled); 197 198 return ret; 199 } 200 201 static void irqsoff_graph_return(struct ftrace_graph_ret *trace) 202 { 203 struct trace_array *tr = irqsoff_trace; 204 struct trace_array_cpu *data; 205 unsigned long flags; 206 int pc; 207 208 if (!func_prolog_dec(tr, &data, &flags)) 209 return; 210 211 pc = preempt_count(); 212 __trace_graph_return(tr, trace, flags, pc); 213 atomic_dec(&data->disabled); 214 } 215 216 static void irqsoff_trace_open(struct trace_iterator *iter) 217 { 218 if (is_graph()) 219 graph_trace_open(iter); 220 221 } 222 223 static void irqsoff_trace_close(struct trace_iterator *iter) 224 { 225 if (iter->private) 226 graph_trace_close(iter); 227 } 228 229 #define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_CPU | \ 230 TRACE_GRAPH_PRINT_PROC | \ 231 TRACE_GRAPH_PRINT_ABS_TIME | \ 232 TRACE_GRAPH_PRINT_DURATION) 233 234 static enum print_line_t irqsoff_print_line(struct trace_iterator *iter) 235 { 236 /* 237 * In graph mode call the graph tracer output function, 238 * otherwise go with the TRACE_FN event handler 239 */ 240 if (is_graph()) 241 return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS); 242 243 return TRACE_TYPE_UNHANDLED; 244 } 245 246 static void irqsoff_print_header(struct seq_file *s) 247 { 248 if (is_graph()) 249 print_graph_headers_flags(s, GRAPH_TRACER_FLAGS); 250 else 251 trace_default_header(s); 252 } 253 254 static void 255 __trace_function(struct trace_array *tr, 256 unsigned long ip, unsigned long parent_ip, 257 unsigned long flags, int pc) 258 { 259 if (is_graph()) 260 trace_graph_function(tr, ip, parent_ip, flags, pc); 261 else 262 trace_function(tr, ip, parent_ip, flags, pc); 263 } 264 265 #else 266 #define __trace_function trace_function 267 268 static int irqsoff_set_flag(u32 old_flags, u32 bit, int set) 269 { 270 return -EINVAL; 271 } 272 273 static int irqsoff_graph_entry(struct ftrace_graph_ent *trace) 274 { 275 return -1; 276 } 277 278 static enum print_line_t irqsoff_print_line(struct trace_iterator *iter) 279 { 280 return TRACE_TYPE_UNHANDLED; 281 } 282 283 static void irqsoff_graph_return(struct ftrace_graph_ret *trace) { } 284 static void irqsoff_trace_open(struct trace_iterator *iter) { } 285 static void irqsoff_trace_close(struct trace_iterator *iter) { } 286 287 #ifdef CONFIG_FUNCTION_TRACER 288 static void irqsoff_print_header(struct seq_file *s) 289 { 290 trace_default_header(s); 291 } 292 #else 293 static void irqsoff_print_header(struct seq_file *s) 294 { 295 trace_latency_header(s); 296 } 297 #endif /* CONFIG_FUNCTION_TRACER */ 298 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 299 300 /* 301 * Should this new latency be reported/recorded? 302 */ 303 static int report_latency(cycle_t delta) 304 { 305 if (tracing_thresh) { 306 if (delta < tracing_thresh) 307 return 0; 308 } else { 309 if (delta <= tracing_max_latency) 310 return 0; 311 } 312 return 1; 313 } 314 315 static void 316 check_critical_timing(struct trace_array *tr, 317 struct trace_array_cpu *data, 318 unsigned long parent_ip, 319 int cpu) 320 { 321 cycle_t T0, T1, delta; 322 unsigned long flags; 323 int pc; 324 325 T0 = data->preempt_timestamp; 326 T1 = ftrace_now(cpu); 327 delta = T1-T0; 328 329 local_save_flags(flags); 330 331 pc = preempt_count(); 332 333 if (!report_latency(delta)) 334 goto out; 335 336 raw_spin_lock_irqsave(&max_trace_lock, flags); 337 338 /* check if we are still the max latency */ 339 if (!report_latency(delta)) 340 goto out_unlock; 341 342 __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc); 343 /* Skip 5 functions to get to the irq/preempt enable function */ 344 __trace_stack(tr, flags, 5, pc); 345 346 if (data->critical_sequence != max_sequence) 347 goto out_unlock; 348 349 data->critical_end = parent_ip; 350 351 if (likely(!is_tracing_stopped())) { 352 tracing_max_latency = delta; 353 update_max_tr_single(tr, current, cpu); 354 } 355 356 max_sequence++; 357 358 out_unlock: 359 raw_spin_unlock_irqrestore(&max_trace_lock, flags); 360 361 out: 362 data->critical_sequence = max_sequence; 363 data->preempt_timestamp = ftrace_now(cpu); 364 __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc); 365 } 366 367 static inline void 368 start_critical_timing(unsigned long ip, unsigned long parent_ip) 369 { 370 int cpu; 371 struct trace_array *tr = irqsoff_trace; 372 struct trace_array_cpu *data; 373 unsigned long flags; 374 375 if (likely(!tracer_enabled)) 376 return; 377 378 cpu = raw_smp_processor_id(); 379 380 if (per_cpu(tracing_cpu, cpu)) 381 return; 382 383 data = tr->data[cpu]; 384 385 if (unlikely(!data) || atomic_read(&data->disabled)) 386 return; 387 388 atomic_inc(&data->disabled); 389 390 data->critical_sequence = max_sequence; 391 data->preempt_timestamp = ftrace_now(cpu); 392 data->critical_start = parent_ip ? : ip; 393 394 local_save_flags(flags); 395 396 __trace_function(tr, ip, parent_ip, flags, preempt_count()); 397 398 per_cpu(tracing_cpu, cpu) = 1; 399 400 atomic_dec(&data->disabled); 401 } 402 403 static inline void 404 stop_critical_timing(unsigned long ip, unsigned long parent_ip) 405 { 406 int cpu; 407 struct trace_array *tr = irqsoff_trace; 408 struct trace_array_cpu *data; 409 unsigned long flags; 410 411 cpu = raw_smp_processor_id(); 412 /* Always clear the tracing cpu on stopping the trace */ 413 if (unlikely(per_cpu(tracing_cpu, cpu))) 414 per_cpu(tracing_cpu, cpu) = 0; 415 else 416 return; 417 418 if (!tracer_enabled) 419 return; 420 421 data = tr->data[cpu]; 422 423 if (unlikely(!data) || 424 !data->critical_start || atomic_read(&data->disabled)) 425 return; 426 427 atomic_inc(&data->disabled); 428 429 local_save_flags(flags); 430 __trace_function(tr, ip, parent_ip, flags, preempt_count()); 431 check_critical_timing(tr, data, parent_ip ? : ip, cpu); 432 data->critical_start = 0; 433 atomic_dec(&data->disabled); 434 } 435 436 /* start and stop critical timings used to for stoppage (in idle) */ 437 void start_critical_timings(void) 438 { 439 if (preempt_trace() || irq_trace()) 440 start_critical_timing(CALLER_ADDR0, CALLER_ADDR1); 441 } 442 EXPORT_SYMBOL_GPL(start_critical_timings); 443 444 void stop_critical_timings(void) 445 { 446 if (preempt_trace() || irq_trace()) 447 stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1); 448 } 449 EXPORT_SYMBOL_GPL(stop_critical_timings); 450 451 #ifdef CONFIG_IRQSOFF_TRACER 452 #ifdef CONFIG_PROVE_LOCKING 453 void time_hardirqs_on(unsigned long a0, unsigned long a1) 454 { 455 if (!preempt_trace() && irq_trace()) 456 stop_critical_timing(a0, a1); 457 } 458 459 void time_hardirqs_off(unsigned long a0, unsigned long a1) 460 { 461 if (!preempt_trace() && irq_trace()) 462 start_critical_timing(a0, a1); 463 } 464 465 #else /* !CONFIG_PROVE_LOCKING */ 466 467 /* 468 * Stubs: 469 */ 470 471 void trace_softirqs_on(unsigned long ip) 472 { 473 } 474 475 void trace_softirqs_off(unsigned long ip) 476 { 477 } 478 479 inline void print_irqtrace_events(struct task_struct *curr) 480 { 481 } 482 483 /* 484 * We are only interested in hardirq on/off events: 485 */ 486 void trace_hardirqs_on(void) 487 { 488 if (!preempt_trace() && irq_trace()) 489 stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1); 490 } 491 EXPORT_SYMBOL(trace_hardirqs_on); 492 493 void trace_hardirqs_off(void) 494 { 495 if (!preempt_trace() && irq_trace()) 496 start_critical_timing(CALLER_ADDR0, CALLER_ADDR1); 497 } 498 EXPORT_SYMBOL(trace_hardirqs_off); 499 500 void trace_hardirqs_on_caller(unsigned long caller_addr) 501 { 502 if (!preempt_trace() && irq_trace()) 503 stop_critical_timing(CALLER_ADDR0, caller_addr); 504 } 505 EXPORT_SYMBOL(trace_hardirqs_on_caller); 506 507 void trace_hardirqs_off_caller(unsigned long caller_addr) 508 { 509 if (!preempt_trace() && irq_trace()) 510 start_critical_timing(CALLER_ADDR0, caller_addr); 511 } 512 EXPORT_SYMBOL(trace_hardirqs_off_caller); 513 514 #endif /* CONFIG_PROVE_LOCKING */ 515 #endif /* CONFIG_IRQSOFF_TRACER */ 516 517 #ifdef CONFIG_PREEMPT_TRACER 518 void trace_preempt_on(unsigned long a0, unsigned long a1) 519 { 520 if (preempt_trace() && !irq_trace()) 521 stop_critical_timing(a0, a1); 522 } 523 524 void trace_preempt_off(unsigned long a0, unsigned long a1) 525 { 526 if (preempt_trace() && !irq_trace()) 527 start_critical_timing(a0, a1); 528 } 529 #endif /* CONFIG_PREEMPT_TRACER */ 530 531 static int start_irqsoff_tracer(struct trace_array *tr, int graph) 532 { 533 int ret = 0; 534 535 if (!graph) 536 ret = register_ftrace_function(&trace_ops); 537 else 538 ret = register_ftrace_graph(&irqsoff_graph_return, 539 &irqsoff_graph_entry); 540 541 if (!ret && tracing_is_enabled()) 542 tracer_enabled = 1; 543 else 544 tracer_enabled = 0; 545 546 return ret; 547 } 548 549 static void stop_irqsoff_tracer(struct trace_array *tr, int graph) 550 { 551 tracer_enabled = 0; 552 553 if (!graph) 554 unregister_ftrace_function(&trace_ops); 555 else 556 unregister_ftrace_graph(); 557 } 558 559 static void __irqsoff_tracer_init(struct trace_array *tr) 560 { 561 save_lat_flag = trace_flags & TRACE_ITER_LATENCY_FMT; 562 trace_flags |= TRACE_ITER_LATENCY_FMT; 563 564 tracing_max_latency = 0; 565 irqsoff_trace = tr; 566 /* make sure that the tracer is visible */ 567 smp_wmb(); 568 tracing_reset_online_cpus(tr); 569 570 if (start_irqsoff_tracer(tr, is_graph())) 571 printk(KERN_ERR "failed to start irqsoff tracer\n"); 572 } 573 574 static void irqsoff_tracer_reset(struct trace_array *tr) 575 { 576 stop_irqsoff_tracer(tr, is_graph()); 577 578 if (!save_lat_flag) 579 trace_flags &= ~TRACE_ITER_LATENCY_FMT; 580 } 581 582 static void irqsoff_tracer_start(struct trace_array *tr) 583 { 584 tracer_enabled = 1; 585 } 586 587 static void irqsoff_tracer_stop(struct trace_array *tr) 588 { 589 tracer_enabled = 0; 590 } 591 592 #ifdef CONFIG_IRQSOFF_TRACER 593 static int irqsoff_tracer_init(struct trace_array *tr) 594 { 595 trace_type = TRACER_IRQS_OFF; 596 597 __irqsoff_tracer_init(tr); 598 return 0; 599 } 600 static struct tracer irqsoff_tracer __read_mostly = 601 { 602 .name = "irqsoff", 603 .init = irqsoff_tracer_init, 604 .reset = irqsoff_tracer_reset, 605 .start = irqsoff_tracer_start, 606 .stop = irqsoff_tracer_stop, 607 .print_max = true, 608 .print_header = irqsoff_print_header, 609 .print_line = irqsoff_print_line, 610 .flags = &tracer_flags, 611 .set_flag = irqsoff_set_flag, 612 #ifdef CONFIG_FTRACE_SELFTEST 613 .selftest = trace_selftest_startup_irqsoff, 614 #endif 615 .open = irqsoff_trace_open, 616 .close = irqsoff_trace_close, 617 .use_max_tr = true, 618 }; 619 # define register_irqsoff(trace) register_tracer(&trace) 620 #else 621 # define register_irqsoff(trace) do { } while (0) 622 #endif 623 624 #ifdef CONFIG_PREEMPT_TRACER 625 static int preemptoff_tracer_init(struct trace_array *tr) 626 { 627 trace_type = TRACER_PREEMPT_OFF; 628 629 __irqsoff_tracer_init(tr); 630 return 0; 631 } 632 633 static struct tracer preemptoff_tracer __read_mostly = 634 { 635 .name = "preemptoff", 636 .init = preemptoff_tracer_init, 637 .reset = irqsoff_tracer_reset, 638 .start = irqsoff_tracer_start, 639 .stop = irqsoff_tracer_stop, 640 .print_max = true, 641 .print_header = irqsoff_print_header, 642 .print_line = irqsoff_print_line, 643 .flags = &tracer_flags, 644 .set_flag = irqsoff_set_flag, 645 #ifdef CONFIG_FTRACE_SELFTEST 646 .selftest = trace_selftest_startup_preemptoff, 647 #endif 648 .open = irqsoff_trace_open, 649 .close = irqsoff_trace_close, 650 .use_max_tr = true, 651 }; 652 # define register_preemptoff(trace) register_tracer(&trace) 653 #else 654 # define register_preemptoff(trace) do { } while (0) 655 #endif 656 657 #if defined(CONFIG_IRQSOFF_TRACER) && \ 658 defined(CONFIG_PREEMPT_TRACER) 659 660 static int preemptirqsoff_tracer_init(struct trace_array *tr) 661 { 662 trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF; 663 664 __irqsoff_tracer_init(tr); 665 return 0; 666 } 667 668 static struct tracer preemptirqsoff_tracer __read_mostly = 669 { 670 .name = "preemptirqsoff", 671 .init = preemptirqsoff_tracer_init, 672 .reset = irqsoff_tracer_reset, 673 .start = irqsoff_tracer_start, 674 .stop = irqsoff_tracer_stop, 675 .print_max = true, 676 .print_header = irqsoff_print_header, 677 .print_line = irqsoff_print_line, 678 .flags = &tracer_flags, 679 .set_flag = irqsoff_set_flag, 680 #ifdef CONFIG_FTRACE_SELFTEST 681 .selftest = trace_selftest_startup_preemptirqsoff, 682 #endif 683 .open = irqsoff_trace_open, 684 .close = irqsoff_trace_close, 685 .use_max_tr = true, 686 }; 687 688 # define register_preemptirqsoff(trace) register_tracer(&trace) 689 #else 690 # define register_preemptirqsoff(trace) do { } while (0) 691 #endif 692 693 __init static int init_irqsoff_tracer(void) 694 { 695 register_irqsoff(irqsoff_tracer); 696 register_preemptoff(preemptoff_tracer); 697 register_preemptirqsoff(preemptirqsoff_tracer); 698 699 return 0; 700 } 701 core_initcall(init_irqsoff_tracer); 702