1 /* 2 * trace irqs off criticall timings 3 * 4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> 5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com> 6 * 7 * From code in the latency_tracer, that is: 8 * 9 * Copyright (C) 2004-2006 Ingo Molnar 10 * Copyright (C) 2004 William Lee Irwin III 11 */ 12 #include <linux/kallsyms.h> 13 #include <linux/debugfs.h> 14 #include <linux/uaccess.h> 15 #include <linux/module.h> 16 #include <linux/ftrace.h> 17 #include <linux/fs.h> 18 19 #include "trace.h" 20 21 static struct trace_array *irqsoff_trace __read_mostly; 22 static int tracer_enabled __read_mostly; 23 24 static DEFINE_PER_CPU(int, tracing_cpu); 25 26 static DEFINE_SPINLOCK(max_trace_lock); 27 28 enum { 29 TRACER_IRQS_OFF = (1 << 1), 30 TRACER_PREEMPT_OFF = (1 << 2), 31 }; 32 33 static int trace_type __read_mostly; 34 35 #ifdef CONFIG_PREEMPT_TRACER 36 static inline int 37 preempt_trace(void) 38 { 39 return ((trace_type & TRACER_PREEMPT_OFF) && preempt_count()); 40 } 41 #else 42 # define preempt_trace() (0) 43 #endif 44 45 #ifdef CONFIG_IRQSOFF_TRACER 46 static inline int 47 irq_trace(void) 48 { 49 return ((trace_type & TRACER_IRQS_OFF) && 50 irqs_disabled()); 51 } 52 #else 53 # define irq_trace() (0) 54 #endif 55 56 /* 57 * Sequence count - we record it when starting a measurement and 58 * skip the latency if the sequence has changed - some other section 59 * did a maximum and could disturb our measurement with serial console 60 * printouts, etc. Truly coinciding maximum latencies should be rare 61 * and what happens together happens separately as well, so this doesnt 62 * decrease the validity of the maximum found: 63 */ 64 static __cacheline_aligned_in_smp unsigned long max_sequence; 65 66 #ifdef CONFIG_FUNCTION_TRACER 67 /* 68 * irqsoff uses its own tracer function to keep the overhead down: 69 */ 70 static void 71 irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip) 72 { 73 struct trace_array *tr = irqsoff_trace; 74 struct trace_array_cpu *data; 75 unsigned long flags; 76 long disabled; 77 int cpu; 78 79 /* 80 * Does not matter if we preempt. We test the flags 81 * afterward, to see if irqs are disabled or not. 82 * If we preempt and get a false positive, the flags 83 * test will fail. 84 */ 85 cpu = raw_smp_processor_id(); 86 if (likely(!per_cpu(tracing_cpu, cpu))) 87 return; 88 89 local_save_flags(flags); 90 /* slight chance to get a false positive on tracing_cpu */ 91 if (!irqs_disabled_flags(flags)) 92 return; 93 94 data = tr->data[cpu]; 95 disabled = atomic_inc_return(&data->disabled); 96 97 if (likely(disabled == 1)) 98 trace_function(tr, data, ip, parent_ip, flags, preempt_count()); 99 100 atomic_dec(&data->disabled); 101 } 102 103 static struct ftrace_ops trace_ops __read_mostly = 104 { 105 .func = irqsoff_tracer_call, 106 }; 107 #endif /* CONFIG_FUNCTION_TRACER */ 108 109 /* 110 * Should this new latency be reported/recorded? 111 */ 112 static int report_latency(cycle_t delta) 113 { 114 if (tracing_thresh) { 115 if (delta < tracing_thresh) 116 return 0; 117 } else { 118 if (delta <= tracing_max_latency) 119 return 0; 120 } 121 return 1; 122 } 123 124 static void 125 check_critical_timing(struct trace_array *tr, 126 struct trace_array_cpu *data, 127 unsigned long parent_ip, 128 int cpu) 129 { 130 unsigned long latency, t0, t1; 131 cycle_t T0, T1, delta; 132 unsigned long flags; 133 int pc; 134 135 /* 136 * usecs conversion is slow so we try to delay the conversion 137 * as long as possible: 138 */ 139 T0 = data->preempt_timestamp; 140 T1 = ftrace_now(cpu); 141 delta = T1-T0; 142 143 local_save_flags(flags); 144 145 pc = preempt_count(); 146 147 if (!report_latency(delta)) 148 goto out; 149 150 spin_lock_irqsave(&max_trace_lock, flags); 151 152 /* check if we are still the max latency */ 153 if (!report_latency(delta)) 154 goto out_unlock; 155 156 trace_function(tr, data, CALLER_ADDR0, parent_ip, flags, pc); 157 158 latency = nsecs_to_usecs(delta); 159 160 if (data->critical_sequence != max_sequence) 161 goto out_unlock; 162 163 tracing_max_latency = delta; 164 t0 = nsecs_to_usecs(T0); 165 t1 = nsecs_to_usecs(T1); 166 167 data->critical_end = parent_ip; 168 169 update_max_tr_single(tr, current, cpu); 170 171 max_sequence++; 172 173 out_unlock: 174 spin_unlock_irqrestore(&max_trace_lock, flags); 175 176 out: 177 data->critical_sequence = max_sequence; 178 data->preempt_timestamp = ftrace_now(cpu); 179 tracing_reset(tr, cpu); 180 trace_function(tr, data, CALLER_ADDR0, parent_ip, flags, pc); 181 } 182 183 static inline void 184 start_critical_timing(unsigned long ip, unsigned long parent_ip) 185 { 186 int cpu; 187 struct trace_array *tr = irqsoff_trace; 188 struct trace_array_cpu *data; 189 unsigned long flags; 190 191 if (likely(!tracer_enabled)) 192 return; 193 194 cpu = raw_smp_processor_id(); 195 196 if (per_cpu(tracing_cpu, cpu)) 197 return; 198 199 data = tr->data[cpu]; 200 201 if (unlikely(!data) || atomic_read(&data->disabled)) 202 return; 203 204 atomic_inc(&data->disabled); 205 206 data->critical_sequence = max_sequence; 207 data->preempt_timestamp = ftrace_now(cpu); 208 data->critical_start = parent_ip ? : ip; 209 tracing_reset(tr, cpu); 210 211 local_save_flags(flags); 212 213 trace_function(tr, data, ip, parent_ip, flags, preempt_count()); 214 215 per_cpu(tracing_cpu, cpu) = 1; 216 217 atomic_dec(&data->disabled); 218 } 219 220 static inline void 221 stop_critical_timing(unsigned long ip, unsigned long parent_ip) 222 { 223 int cpu; 224 struct trace_array *tr = irqsoff_trace; 225 struct trace_array_cpu *data; 226 unsigned long flags; 227 228 cpu = raw_smp_processor_id(); 229 /* Always clear the tracing cpu on stopping the trace */ 230 if (unlikely(per_cpu(tracing_cpu, cpu))) 231 per_cpu(tracing_cpu, cpu) = 0; 232 else 233 return; 234 235 if (!tracer_enabled) 236 return; 237 238 data = tr->data[cpu]; 239 240 if (unlikely(!data) || 241 !data->critical_start || atomic_read(&data->disabled)) 242 return; 243 244 atomic_inc(&data->disabled); 245 246 local_save_flags(flags); 247 trace_function(tr, data, ip, parent_ip, flags, preempt_count()); 248 check_critical_timing(tr, data, parent_ip ? : ip, cpu); 249 data->critical_start = 0; 250 atomic_dec(&data->disabled); 251 } 252 253 /* start and stop critical timings used to for stoppage (in idle) */ 254 void start_critical_timings(void) 255 { 256 if (preempt_trace() || irq_trace()) 257 start_critical_timing(CALLER_ADDR0, CALLER_ADDR1); 258 } 259 EXPORT_SYMBOL_GPL(start_critical_timings); 260 261 void stop_critical_timings(void) 262 { 263 if (preempt_trace() || irq_trace()) 264 stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1); 265 } 266 EXPORT_SYMBOL_GPL(stop_critical_timings); 267 268 #ifdef CONFIG_IRQSOFF_TRACER 269 #ifdef CONFIG_PROVE_LOCKING 270 void time_hardirqs_on(unsigned long a0, unsigned long a1) 271 { 272 if (!preempt_trace() && irq_trace()) 273 stop_critical_timing(a0, a1); 274 } 275 276 void time_hardirqs_off(unsigned long a0, unsigned long a1) 277 { 278 if (!preempt_trace() && irq_trace()) 279 start_critical_timing(a0, a1); 280 } 281 282 #else /* !CONFIG_PROVE_LOCKING */ 283 284 /* 285 * Stubs: 286 */ 287 288 void early_boot_irqs_off(void) 289 { 290 } 291 292 void early_boot_irqs_on(void) 293 { 294 } 295 296 void trace_softirqs_on(unsigned long ip) 297 { 298 } 299 300 void trace_softirqs_off(unsigned long ip) 301 { 302 } 303 304 inline void print_irqtrace_events(struct task_struct *curr) 305 { 306 } 307 308 /* 309 * We are only interested in hardirq on/off events: 310 */ 311 void trace_hardirqs_on(void) 312 { 313 if (!preempt_trace() && irq_trace()) 314 stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1); 315 } 316 EXPORT_SYMBOL(trace_hardirqs_on); 317 318 void trace_hardirqs_off(void) 319 { 320 if (!preempt_trace() && irq_trace()) 321 start_critical_timing(CALLER_ADDR0, CALLER_ADDR1); 322 } 323 EXPORT_SYMBOL(trace_hardirqs_off); 324 325 void trace_hardirqs_on_caller(unsigned long caller_addr) 326 { 327 if (!preempt_trace() && irq_trace()) 328 stop_critical_timing(CALLER_ADDR0, caller_addr); 329 } 330 EXPORT_SYMBOL(trace_hardirqs_on_caller); 331 332 void trace_hardirqs_off_caller(unsigned long caller_addr) 333 { 334 if (!preempt_trace() && irq_trace()) 335 start_critical_timing(CALLER_ADDR0, caller_addr); 336 } 337 EXPORT_SYMBOL(trace_hardirqs_off_caller); 338 339 #endif /* CONFIG_PROVE_LOCKING */ 340 #endif /* CONFIG_IRQSOFF_TRACER */ 341 342 #ifdef CONFIG_PREEMPT_TRACER 343 void trace_preempt_on(unsigned long a0, unsigned long a1) 344 { 345 if (preempt_trace()) 346 stop_critical_timing(a0, a1); 347 } 348 349 void trace_preempt_off(unsigned long a0, unsigned long a1) 350 { 351 if (preempt_trace()) 352 start_critical_timing(a0, a1); 353 } 354 #endif /* CONFIG_PREEMPT_TRACER */ 355 356 static void start_irqsoff_tracer(struct trace_array *tr) 357 { 358 register_ftrace_function(&trace_ops); 359 tracer_enabled = 1; 360 } 361 362 static void stop_irqsoff_tracer(struct trace_array *tr) 363 { 364 tracer_enabled = 0; 365 unregister_ftrace_function(&trace_ops); 366 } 367 368 static void __irqsoff_tracer_init(struct trace_array *tr) 369 { 370 irqsoff_trace = tr; 371 /* make sure that the tracer is visible */ 372 smp_wmb(); 373 374 if (tr->ctrl) 375 start_irqsoff_tracer(tr); 376 } 377 378 static void irqsoff_tracer_reset(struct trace_array *tr) 379 { 380 if (tr->ctrl) 381 stop_irqsoff_tracer(tr); 382 } 383 384 static void irqsoff_tracer_ctrl_update(struct trace_array *tr) 385 { 386 if (tr->ctrl) 387 start_irqsoff_tracer(tr); 388 else 389 stop_irqsoff_tracer(tr); 390 } 391 392 static void irqsoff_tracer_open(struct trace_iterator *iter) 393 { 394 /* stop the trace while dumping */ 395 if (iter->tr->ctrl) 396 stop_irqsoff_tracer(iter->tr); 397 } 398 399 static void irqsoff_tracer_close(struct trace_iterator *iter) 400 { 401 if (iter->tr->ctrl) 402 start_irqsoff_tracer(iter->tr); 403 } 404 405 #ifdef CONFIG_IRQSOFF_TRACER 406 static void irqsoff_tracer_init(struct trace_array *tr) 407 { 408 trace_type = TRACER_IRQS_OFF; 409 410 __irqsoff_tracer_init(tr); 411 } 412 static struct tracer irqsoff_tracer __read_mostly = 413 { 414 .name = "irqsoff", 415 .init = irqsoff_tracer_init, 416 .reset = irqsoff_tracer_reset, 417 .open = irqsoff_tracer_open, 418 .close = irqsoff_tracer_close, 419 .ctrl_update = irqsoff_tracer_ctrl_update, 420 .print_max = 1, 421 #ifdef CONFIG_FTRACE_SELFTEST 422 .selftest = trace_selftest_startup_irqsoff, 423 #endif 424 }; 425 # define register_irqsoff(trace) register_tracer(&trace) 426 #else 427 # define register_irqsoff(trace) do { } while (0) 428 #endif 429 430 #ifdef CONFIG_PREEMPT_TRACER 431 static void preemptoff_tracer_init(struct trace_array *tr) 432 { 433 trace_type = TRACER_PREEMPT_OFF; 434 435 __irqsoff_tracer_init(tr); 436 } 437 438 static struct tracer preemptoff_tracer __read_mostly = 439 { 440 .name = "preemptoff", 441 .init = preemptoff_tracer_init, 442 .reset = irqsoff_tracer_reset, 443 .open = irqsoff_tracer_open, 444 .close = irqsoff_tracer_close, 445 .ctrl_update = irqsoff_tracer_ctrl_update, 446 .print_max = 1, 447 #ifdef CONFIG_FTRACE_SELFTEST 448 .selftest = trace_selftest_startup_preemptoff, 449 #endif 450 }; 451 # define register_preemptoff(trace) register_tracer(&trace) 452 #else 453 # define register_preemptoff(trace) do { } while (0) 454 #endif 455 456 #if defined(CONFIG_IRQSOFF_TRACER) && \ 457 defined(CONFIG_PREEMPT_TRACER) 458 459 static void preemptirqsoff_tracer_init(struct trace_array *tr) 460 { 461 trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF; 462 463 __irqsoff_tracer_init(tr); 464 } 465 466 static struct tracer preemptirqsoff_tracer __read_mostly = 467 { 468 .name = "preemptirqsoff", 469 .init = preemptirqsoff_tracer_init, 470 .reset = irqsoff_tracer_reset, 471 .open = irqsoff_tracer_open, 472 .close = irqsoff_tracer_close, 473 .ctrl_update = irqsoff_tracer_ctrl_update, 474 .print_max = 1, 475 #ifdef CONFIG_FTRACE_SELFTEST 476 .selftest = trace_selftest_startup_preemptirqsoff, 477 #endif 478 }; 479 480 # define register_preemptirqsoff(trace) register_tracer(&trace) 481 #else 482 # define register_preemptirqsoff(trace) do { } while (0) 483 #endif 484 485 __init static int init_irqsoff_tracer(void) 486 { 487 register_irqsoff(irqsoff_tracer); 488 register_preemptoff(preemptoff_tracer); 489 register_preemptirqsoff(preemptirqsoff_tracer); 490 491 return 0; 492 } 493 device_initcall(init_irqsoff_tracer); 494