1 /* 2 * trace task wakeup timings 3 * 4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> 5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com> 6 * 7 * Based on code from the latency_tracer, that is: 8 * 9 * Copyright (C) 2004-2006 Ingo Molnar 10 * Copyright (C) 2004 William Lee Irwin III 11 */ 12 #include <linux/module.h> 13 #include <linux/fs.h> 14 #include <linux/debugfs.h> 15 #include <linux/kallsyms.h> 16 #include <linux/uaccess.h> 17 #include <linux/ftrace.h> 18 #include <trace/events/sched.h> 19 20 #include "trace.h" 21 22 static struct trace_array *wakeup_trace; 23 static int __read_mostly tracer_enabled; 24 25 static struct task_struct *wakeup_task; 26 static int wakeup_cpu; 27 static unsigned wakeup_prio = -1; 28 static int wakeup_rt; 29 30 static raw_spinlock_t wakeup_lock = 31 (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; 32 33 static void __wakeup_reset(struct trace_array *tr); 34 35 static int save_lat_flag; 36 37 #ifdef CONFIG_FUNCTION_TRACER 38 /* 39 * irqsoff uses its own tracer function to keep the overhead down: 40 */ 41 static void 42 wakeup_tracer_call(unsigned long ip, unsigned long parent_ip) 43 { 44 struct trace_array *tr = wakeup_trace; 45 struct trace_array_cpu *data; 46 unsigned long flags; 47 long disabled; 48 int resched; 49 int cpu; 50 int pc; 51 52 if (likely(!wakeup_task)) 53 return; 54 55 pc = preempt_count(); 56 resched = ftrace_preempt_disable(); 57 58 cpu = raw_smp_processor_id(); 59 data = tr->data[cpu]; 60 disabled = atomic_inc_return(&data->disabled); 61 if (unlikely(disabled != 1)) 62 goto out; 63 64 local_irq_save(flags); 65 __raw_spin_lock(&wakeup_lock); 66 67 if (unlikely(!wakeup_task)) 68 goto unlock; 69 70 /* 71 * The task can't disappear because it needs to 72 * wake up first, and we have the wakeup_lock. 73 */ 74 if (task_cpu(wakeup_task) != cpu) 75 goto unlock; 76 77 trace_function(tr, ip, parent_ip, flags, pc); 78 79 unlock: 80 __raw_spin_unlock(&wakeup_lock); 81 local_irq_restore(flags); 82 83 out: 84 atomic_dec(&data->disabled); 85 86 ftrace_preempt_enable(resched); 87 } 88 89 static struct ftrace_ops trace_ops __read_mostly = 90 { 91 .func = wakeup_tracer_call, 92 }; 93 #endif /* CONFIG_FUNCTION_TRACER */ 94 95 /* 96 * Should this new latency be reported/recorded? 97 */ 98 static int report_latency(cycle_t delta) 99 { 100 if (tracing_thresh) { 101 if (delta < tracing_thresh) 102 return 0; 103 } else { 104 if (delta <= tracing_max_latency) 105 return 0; 106 } 107 return 1; 108 } 109 110 static void notrace 111 probe_wakeup_sched_switch(struct rq *rq, struct task_struct *prev, 112 struct task_struct *next) 113 { 114 unsigned long latency = 0, t0 = 0, t1 = 0; 115 struct trace_array_cpu *data; 116 cycle_t T0, T1, delta; 117 unsigned long flags; 118 long disabled; 119 int cpu; 120 int pc; 121 122 tracing_record_cmdline(prev); 123 124 if (unlikely(!tracer_enabled)) 125 return; 126 127 /* 128 * When we start a new trace, we set wakeup_task to NULL 129 * and then set tracer_enabled = 1. We want to make sure 130 * that another CPU does not see the tracer_enabled = 1 131 * and the wakeup_task with an older task, that might 132 * actually be the same as next. 133 */ 134 smp_rmb(); 135 136 if (next != wakeup_task) 137 return; 138 139 pc = preempt_count(); 140 141 /* disable local data, not wakeup_cpu data */ 142 cpu = raw_smp_processor_id(); 143 disabled = atomic_inc_return(&wakeup_trace->data[cpu]->disabled); 144 if (likely(disabled != 1)) 145 goto out; 146 147 local_irq_save(flags); 148 __raw_spin_lock(&wakeup_lock); 149 150 /* We could race with grabbing wakeup_lock */ 151 if (unlikely(!tracer_enabled || next != wakeup_task)) 152 goto out_unlock; 153 154 /* The task we are waiting for is waking up */ 155 data = wakeup_trace->data[wakeup_cpu]; 156 157 trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, flags, pc); 158 tracing_sched_switch_trace(wakeup_trace, prev, next, flags, pc); 159 160 /* 161 * usecs conversion is slow so we try to delay the conversion 162 * as long as possible: 163 */ 164 T0 = data->preempt_timestamp; 165 T1 = ftrace_now(cpu); 166 delta = T1-T0; 167 168 if (!report_latency(delta)) 169 goto out_unlock; 170 171 latency = nsecs_to_usecs(delta); 172 173 tracing_max_latency = delta; 174 t0 = nsecs_to_usecs(T0); 175 t1 = nsecs_to_usecs(T1); 176 177 update_max_tr(wakeup_trace, wakeup_task, wakeup_cpu); 178 179 out_unlock: 180 __wakeup_reset(wakeup_trace); 181 __raw_spin_unlock(&wakeup_lock); 182 local_irq_restore(flags); 183 out: 184 atomic_dec(&wakeup_trace->data[cpu]->disabled); 185 } 186 187 static void __wakeup_reset(struct trace_array *tr) 188 { 189 wakeup_cpu = -1; 190 wakeup_prio = -1; 191 192 if (wakeup_task) 193 put_task_struct(wakeup_task); 194 195 wakeup_task = NULL; 196 } 197 198 static void wakeup_reset(struct trace_array *tr) 199 { 200 unsigned long flags; 201 202 tracing_reset_online_cpus(tr); 203 204 local_irq_save(flags); 205 __raw_spin_lock(&wakeup_lock); 206 __wakeup_reset(tr); 207 __raw_spin_unlock(&wakeup_lock); 208 local_irq_restore(flags); 209 } 210 211 static void 212 probe_wakeup(struct rq *rq, struct task_struct *p, int success) 213 { 214 struct trace_array_cpu *data; 215 int cpu = smp_processor_id(); 216 unsigned long flags; 217 long disabled; 218 int pc; 219 220 if (likely(!tracer_enabled)) 221 return; 222 223 tracing_record_cmdline(p); 224 tracing_record_cmdline(current); 225 226 if ((wakeup_rt && !rt_task(p)) || 227 p->prio >= wakeup_prio || 228 p->prio >= current->prio) 229 return; 230 231 pc = preempt_count(); 232 disabled = atomic_inc_return(&wakeup_trace->data[cpu]->disabled); 233 if (unlikely(disabled != 1)) 234 goto out; 235 236 /* interrupts should be off from try_to_wake_up */ 237 __raw_spin_lock(&wakeup_lock); 238 239 /* check for races. */ 240 if (!tracer_enabled || p->prio >= wakeup_prio) 241 goto out_locked; 242 243 /* reset the trace */ 244 __wakeup_reset(wakeup_trace); 245 246 wakeup_cpu = task_cpu(p); 247 wakeup_prio = p->prio; 248 249 wakeup_task = p; 250 get_task_struct(wakeup_task); 251 252 local_save_flags(flags); 253 254 data = wakeup_trace->data[wakeup_cpu]; 255 data->preempt_timestamp = ftrace_now(cpu); 256 tracing_sched_wakeup_trace(wakeup_trace, p, current, flags, pc); 257 258 /* 259 * We must be careful in using CALLER_ADDR2. But since wake_up 260 * is not called by an assembly function (where as schedule is) 261 * it should be safe to use it here. 262 */ 263 trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc); 264 265 out_locked: 266 __raw_spin_unlock(&wakeup_lock); 267 out: 268 atomic_dec(&wakeup_trace->data[cpu]->disabled); 269 } 270 271 static void start_wakeup_tracer(struct trace_array *tr) 272 { 273 int ret; 274 275 ret = register_trace_sched_wakeup(probe_wakeup); 276 if (ret) { 277 pr_info("wakeup trace: Couldn't activate tracepoint" 278 " probe to kernel_sched_wakeup\n"); 279 return; 280 } 281 282 ret = register_trace_sched_wakeup_new(probe_wakeup); 283 if (ret) { 284 pr_info("wakeup trace: Couldn't activate tracepoint" 285 " probe to kernel_sched_wakeup_new\n"); 286 goto fail_deprobe; 287 } 288 289 ret = register_trace_sched_switch(probe_wakeup_sched_switch); 290 if (ret) { 291 pr_info("sched trace: Couldn't activate tracepoint" 292 " probe to kernel_sched_switch\n"); 293 goto fail_deprobe_wake_new; 294 } 295 296 wakeup_reset(tr); 297 298 /* 299 * Don't let the tracer_enabled = 1 show up before 300 * the wakeup_task is reset. This may be overkill since 301 * wakeup_reset does a spin_unlock after setting the 302 * wakeup_task to NULL, but I want to be safe. 303 * This is a slow path anyway. 304 */ 305 smp_wmb(); 306 307 register_ftrace_function(&trace_ops); 308 309 if (tracing_is_enabled()) 310 tracer_enabled = 1; 311 else 312 tracer_enabled = 0; 313 314 return; 315 fail_deprobe_wake_new: 316 unregister_trace_sched_wakeup_new(probe_wakeup); 317 fail_deprobe: 318 unregister_trace_sched_wakeup(probe_wakeup); 319 } 320 321 static void stop_wakeup_tracer(struct trace_array *tr) 322 { 323 tracer_enabled = 0; 324 unregister_ftrace_function(&trace_ops); 325 unregister_trace_sched_switch(probe_wakeup_sched_switch); 326 unregister_trace_sched_wakeup_new(probe_wakeup); 327 unregister_trace_sched_wakeup(probe_wakeup); 328 } 329 330 static int __wakeup_tracer_init(struct trace_array *tr) 331 { 332 save_lat_flag = trace_flags & TRACE_ITER_LATENCY_FMT; 333 trace_flags |= TRACE_ITER_LATENCY_FMT; 334 335 tracing_max_latency = 0; 336 wakeup_trace = tr; 337 start_wakeup_tracer(tr); 338 return 0; 339 } 340 341 static int wakeup_tracer_init(struct trace_array *tr) 342 { 343 wakeup_rt = 0; 344 return __wakeup_tracer_init(tr); 345 } 346 347 static int wakeup_rt_tracer_init(struct trace_array *tr) 348 { 349 wakeup_rt = 1; 350 return __wakeup_tracer_init(tr); 351 } 352 353 static void wakeup_tracer_reset(struct trace_array *tr) 354 { 355 stop_wakeup_tracer(tr); 356 /* make sure we put back any tasks we are tracing */ 357 wakeup_reset(tr); 358 359 if (!save_lat_flag) 360 trace_flags &= ~TRACE_ITER_LATENCY_FMT; 361 } 362 363 static void wakeup_tracer_start(struct trace_array *tr) 364 { 365 wakeup_reset(tr); 366 tracer_enabled = 1; 367 } 368 369 static void wakeup_tracer_stop(struct trace_array *tr) 370 { 371 tracer_enabled = 0; 372 } 373 374 static struct tracer wakeup_tracer __read_mostly = 375 { 376 .name = "wakeup", 377 .init = wakeup_tracer_init, 378 .reset = wakeup_tracer_reset, 379 .start = wakeup_tracer_start, 380 .stop = wakeup_tracer_stop, 381 .print_max = 1, 382 #ifdef CONFIG_FTRACE_SELFTEST 383 .selftest = trace_selftest_startup_wakeup, 384 #endif 385 }; 386 387 static struct tracer wakeup_rt_tracer __read_mostly = 388 { 389 .name = "wakeup_rt", 390 .init = wakeup_rt_tracer_init, 391 .reset = wakeup_tracer_reset, 392 .start = wakeup_tracer_start, 393 .stop = wakeup_tracer_stop, 394 .wait_pipe = poll_wait_pipe, 395 .print_max = 1, 396 #ifdef CONFIG_FTRACE_SELFTEST 397 .selftest = trace_selftest_startup_wakeup, 398 #endif 399 }; 400 401 __init static int init_wakeup_tracer(void) 402 { 403 int ret; 404 405 ret = register_tracer(&wakeup_tracer); 406 if (ret) 407 return ret; 408 409 ret = register_tracer(&wakeup_rt_tracer); 410 if (ret) 411 return ret; 412 413 return 0; 414 } 415 device_initcall(init_wakeup_tracer); 416