1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * trace task wakeup timings
4 *
5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
6 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 *
8 * Based on code from the latency_tracer, that is:
9 *
10 * Copyright (C) 2004-2006 Ingo Molnar
11 * Copyright (C) 2004 Nadia Yvette Chambers
12 */
13 #include <linux/module.h>
14 #include <linux/kallsyms.h>
15 #include <linux/uaccess.h>
16 #include <linux/ftrace.h>
17 #include <linux/sched/rt.h>
18 #include <linux/sched/deadline.h>
19 #include <trace/events/sched.h>
20 #include "trace.h"
21
22 static struct trace_array *wakeup_trace;
23 static int __read_mostly tracer_enabled;
24
25 static struct task_struct *wakeup_task;
26 static int wakeup_cpu;
27 static int wakeup_current_cpu;
28 static unsigned wakeup_prio = -1;
29 static bool wakeup_rt;
30 static bool wakeup_dl;
31 static bool tracing_dl;
32
33 static arch_spinlock_t wakeup_lock =
34 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
35
36 static void wakeup_reset(struct trace_array *tr);
37 static void __wakeup_reset(struct trace_array *tr);
38 static int start_func_tracer(struct trace_array *tr, int graph);
39 static void stop_func_tracer(struct trace_array *tr, int graph);
40
41 static int save_flags;
42
43 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
44 # define is_graph(tr) ((tr)->trace_flags & TRACE_ITER_DISPLAY_GRAPH)
45 #else
46 # define is_graph(tr) false
47 #endif
48
49 #ifdef CONFIG_FUNCTION_TRACER
50
51 static bool function_enabled;
52
53 /*
54 * Prologue for the wakeup function tracers.
55 *
56 * Returns 1 if it is OK to continue, and preemption
57 * is disabled and data->disabled is incremented.
58 * 0 if the trace is to be ignored, and preemption
59 * is not disabled and data->disabled is
60 * kept the same.
61 *
62 * Note, this function is also used outside this ifdef but
63 * inside the #ifdef of the function graph tracer below.
64 * This is OK, since the function graph tracer is
65 * dependent on the function tracer.
66 */
67 static int
func_prolog_preempt_disable(struct trace_array * tr,struct trace_array_cpu ** data,unsigned int * trace_ctx)68 func_prolog_preempt_disable(struct trace_array *tr,
69 struct trace_array_cpu **data,
70 unsigned int *trace_ctx)
71 {
72 long disabled;
73 int cpu;
74
75 if (likely(!wakeup_task))
76 return 0;
77
78 *trace_ctx = tracing_gen_ctx();
79 preempt_disable_notrace();
80
81 cpu = raw_smp_processor_id();
82 if (cpu != wakeup_current_cpu)
83 goto out_enable;
84
85 *data = per_cpu_ptr(tr->array_buffer.data, cpu);
86 disabled = atomic_inc_return(&(*data)->disabled);
87 if (unlikely(disabled != 1))
88 goto out;
89
90 return 1;
91
92 out:
93 atomic_dec(&(*data)->disabled);
94
95 out_enable:
96 preempt_enable_notrace();
97 return 0;
98 }
99
100 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
101
wakeup_display_graph(struct trace_array * tr,int set)102 static int wakeup_display_graph(struct trace_array *tr, int set)
103 {
104 if (!(is_graph(tr) ^ set))
105 return 0;
106
107 stop_func_tracer(tr, !set);
108
109 wakeup_reset(wakeup_trace);
110 tr->max_latency = 0;
111
112 return start_func_tracer(tr, set);
113 }
114
wakeup_graph_entry(struct ftrace_graph_ent * trace,struct fgraph_ops * gops,struct ftrace_regs * fregs)115 static int wakeup_graph_entry(struct ftrace_graph_ent *trace,
116 struct fgraph_ops *gops,
117 struct ftrace_regs *fregs)
118 {
119 struct trace_array *tr = wakeup_trace;
120 struct trace_array_cpu *data;
121 unsigned int trace_ctx;
122 u64 *calltime;
123 int ret = 0;
124
125 if (ftrace_graph_ignore_func(gops, trace))
126 return 0;
127 /*
128 * Do not trace a function if it's filtered by set_graph_notrace.
129 * Make the index of ret stack negative to indicate that it should
130 * ignore further functions. But it needs its own ret stack entry
131 * to recover the original index in order to continue tracing after
132 * returning from the function.
133 */
134 if (ftrace_graph_notrace_addr(trace->func))
135 return 1;
136
137 if (!func_prolog_preempt_disable(tr, &data, &trace_ctx))
138 return 0;
139
140 calltime = fgraph_reserve_data(gops->idx, sizeof(*calltime));
141 if (!calltime)
142 return 0;
143
144 *calltime = trace_clock_local();
145
146 ret = __trace_graph_entry(tr, trace, trace_ctx);
147 atomic_dec(&data->disabled);
148 preempt_enable_notrace();
149
150 return ret;
151 }
152
wakeup_graph_return(struct ftrace_graph_ret * trace,struct fgraph_ops * gops,struct ftrace_regs * fregs)153 static void wakeup_graph_return(struct ftrace_graph_ret *trace,
154 struct fgraph_ops *gops,
155 struct ftrace_regs *fregs)
156 {
157 struct trace_array *tr = wakeup_trace;
158 struct trace_array_cpu *data;
159 unsigned int trace_ctx;
160 u64 *calltime;
161 u64 rettime;
162 int size;
163
164 ftrace_graph_addr_finish(gops, trace);
165
166 if (!func_prolog_preempt_disable(tr, &data, &trace_ctx))
167 return;
168
169 rettime = trace_clock_local();
170
171 calltime = fgraph_retrieve_data(gops->idx, &size);
172 if (!calltime)
173 return;
174
175 __trace_graph_return(tr, trace, trace_ctx, *calltime, rettime);
176 atomic_dec(&data->disabled);
177
178 preempt_enable_notrace();
179 return;
180 }
181
182 static struct fgraph_ops fgraph_wakeup_ops = {
183 .entryfunc = &wakeup_graph_entry,
184 .retfunc = &wakeup_graph_return,
185 };
186
wakeup_trace_open(struct trace_iterator * iter)187 static void wakeup_trace_open(struct trace_iterator *iter)
188 {
189 if (is_graph(iter->tr))
190 graph_trace_open(iter);
191 else
192 iter->private = NULL;
193 }
194
wakeup_trace_close(struct trace_iterator * iter)195 static void wakeup_trace_close(struct trace_iterator *iter)
196 {
197 if (iter->private)
198 graph_trace_close(iter);
199 }
200
201 #define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_PROC | \
202 TRACE_GRAPH_PRINT_CPU | \
203 TRACE_GRAPH_PRINT_REL_TIME | \
204 TRACE_GRAPH_PRINT_DURATION | \
205 TRACE_GRAPH_PRINT_OVERHEAD | \
206 TRACE_GRAPH_PRINT_IRQS)
207
wakeup_print_line(struct trace_iterator * iter)208 static enum print_line_t wakeup_print_line(struct trace_iterator *iter)
209 {
210 /*
211 * In graph mode call the graph tracer output function,
212 * otherwise go with the TRACE_FN event handler
213 */
214 if (is_graph(iter->tr))
215 return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS);
216
217 return TRACE_TYPE_UNHANDLED;
218 }
219
wakeup_print_header(struct seq_file * s)220 static void wakeup_print_header(struct seq_file *s)
221 {
222 if (is_graph(wakeup_trace))
223 print_graph_headers_flags(s, GRAPH_TRACER_FLAGS);
224 else
225 trace_default_header(s);
226 }
227 #endif /* else CONFIG_FUNCTION_GRAPH_TRACER */
228
229 /*
230 * wakeup uses its own tracer function to keep the overhead down:
231 */
232 static void
wakeup_tracer_call(unsigned long ip,unsigned long parent_ip,struct ftrace_ops * op,struct ftrace_regs * fregs)233 wakeup_tracer_call(unsigned long ip, unsigned long parent_ip,
234 struct ftrace_ops *op, struct ftrace_regs *fregs)
235 {
236 struct trace_array *tr = wakeup_trace;
237 struct trace_array_cpu *data;
238 unsigned long flags;
239 unsigned int trace_ctx;
240
241 if (!func_prolog_preempt_disable(tr, &data, &trace_ctx))
242 return;
243
244 local_irq_save(flags);
245 trace_function(tr, ip, parent_ip, trace_ctx);
246 local_irq_restore(flags);
247
248 atomic_dec(&data->disabled);
249 preempt_enable_notrace();
250 }
251
register_wakeup_function(struct trace_array * tr,int graph,int set)252 static int register_wakeup_function(struct trace_array *tr, int graph, int set)
253 {
254 int ret;
255
256 /* 'set' is set if TRACE_ITER_FUNCTION is about to be set */
257 if (function_enabled || (!set && !(tr->trace_flags & TRACE_ITER_FUNCTION)))
258 return 0;
259
260 if (graph)
261 ret = register_ftrace_graph(&fgraph_wakeup_ops);
262 else
263 ret = register_ftrace_function(tr->ops);
264
265 if (!ret)
266 function_enabled = true;
267
268 return ret;
269 }
270
unregister_wakeup_function(struct trace_array * tr,int graph)271 static void unregister_wakeup_function(struct trace_array *tr, int graph)
272 {
273 if (!function_enabled)
274 return;
275
276 if (graph)
277 unregister_ftrace_graph(&fgraph_wakeup_ops);
278 else
279 unregister_ftrace_function(tr->ops);
280
281 function_enabled = false;
282 }
283
wakeup_function_set(struct trace_array * tr,u32 mask,int set)284 static int wakeup_function_set(struct trace_array *tr, u32 mask, int set)
285 {
286 if (!(mask & TRACE_ITER_FUNCTION))
287 return 0;
288
289 if (set)
290 register_wakeup_function(tr, is_graph(tr), 1);
291 else
292 unregister_wakeup_function(tr, is_graph(tr));
293 return 1;
294 }
295 #else /* CONFIG_FUNCTION_TRACER */
register_wakeup_function(struct trace_array * tr,int graph,int set)296 static int register_wakeup_function(struct trace_array *tr, int graph, int set)
297 {
298 return 0;
299 }
unregister_wakeup_function(struct trace_array * tr,int graph)300 static void unregister_wakeup_function(struct trace_array *tr, int graph) { }
wakeup_function_set(struct trace_array * tr,u32 mask,int set)301 static int wakeup_function_set(struct trace_array *tr, u32 mask, int set)
302 {
303 return 0;
304 }
305 #endif /* else CONFIG_FUNCTION_TRACER */
306
307 #ifndef CONFIG_FUNCTION_GRAPH_TRACER
wakeup_print_line(struct trace_iterator * iter)308 static enum print_line_t wakeup_print_line(struct trace_iterator *iter)
309 {
310 return TRACE_TYPE_UNHANDLED;
311 }
312
wakeup_trace_open(struct trace_iterator * iter)313 static void wakeup_trace_open(struct trace_iterator *iter) { }
wakeup_trace_close(struct trace_iterator * iter)314 static void wakeup_trace_close(struct trace_iterator *iter) { }
315
wakeup_print_header(struct seq_file * s)316 static void wakeup_print_header(struct seq_file *s)
317 {
318 trace_default_header(s);
319 }
320 #endif /* !CONFIG_FUNCTION_GRAPH_TRACER */
321
322 static void
__trace_function(struct trace_array * tr,unsigned long ip,unsigned long parent_ip,unsigned int trace_ctx)323 __trace_function(struct trace_array *tr,
324 unsigned long ip, unsigned long parent_ip,
325 unsigned int trace_ctx)
326 {
327 if (is_graph(tr))
328 trace_graph_function(tr, ip, parent_ip, trace_ctx);
329 else
330 trace_function(tr, ip, parent_ip, trace_ctx);
331 }
332
wakeup_flag_changed(struct trace_array * tr,u32 mask,int set)333 static int wakeup_flag_changed(struct trace_array *tr, u32 mask, int set)
334 {
335 struct tracer *tracer = tr->current_trace;
336
337 if (wakeup_function_set(tr, mask, set))
338 return 0;
339
340 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
341 if (mask & TRACE_ITER_DISPLAY_GRAPH)
342 return wakeup_display_graph(tr, set);
343 #endif
344
345 return trace_keep_overwrite(tracer, mask, set);
346 }
347
start_func_tracer(struct trace_array * tr,int graph)348 static int start_func_tracer(struct trace_array *tr, int graph)
349 {
350 int ret;
351
352 ret = register_wakeup_function(tr, graph, 0);
353
354 if (!ret && tracing_is_enabled())
355 tracer_enabled = 1;
356 else
357 tracer_enabled = 0;
358
359 return ret;
360 }
361
stop_func_tracer(struct trace_array * tr,int graph)362 static void stop_func_tracer(struct trace_array *tr, int graph)
363 {
364 tracer_enabled = 0;
365
366 unregister_wakeup_function(tr, graph);
367 }
368
369 /*
370 * Should this new latency be reported/recorded?
371 */
report_latency(struct trace_array * tr,u64 delta)372 static bool report_latency(struct trace_array *tr, u64 delta)
373 {
374 if (tracing_thresh) {
375 if (delta < tracing_thresh)
376 return false;
377 } else {
378 if (delta <= tr->max_latency)
379 return false;
380 }
381 return true;
382 }
383
384 static void
probe_wakeup_migrate_task(void * ignore,struct task_struct * task,int cpu)385 probe_wakeup_migrate_task(void *ignore, struct task_struct *task, int cpu)
386 {
387 if (task != wakeup_task)
388 return;
389
390 wakeup_current_cpu = cpu;
391 }
392
393 static void
tracing_sched_switch_trace(struct trace_array * tr,struct task_struct * prev,struct task_struct * next,unsigned int trace_ctx)394 tracing_sched_switch_trace(struct trace_array *tr,
395 struct task_struct *prev,
396 struct task_struct *next,
397 unsigned int trace_ctx)
398 {
399 struct trace_buffer *buffer = tr->array_buffer.buffer;
400 struct ring_buffer_event *event;
401 struct ctx_switch_entry *entry;
402
403 event = trace_buffer_lock_reserve(buffer, TRACE_CTX,
404 sizeof(*entry), trace_ctx);
405 if (!event)
406 return;
407 entry = ring_buffer_event_data(event);
408 entry->prev_pid = prev->pid;
409 entry->prev_prio = prev->prio;
410 entry->prev_state = task_state_index(prev);
411 entry->next_pid = next->pid;
412 entry->next_prio = next->prio;
413 entry->next_state = task_state_index(next);
414 entry->next_cpu = task_cpu(next);
415
416 trace_buffer_unlock_commit(tr, buffer, event, trace_ctx);
417 }
418
419 static void
tracing_sched_wakeup_trace(struct trace_array * tr,struct task_struct * wakee,struct task_struct * curr,unsigned int trace_ctx)420 tracing_sched_wakeup_trace(struct trace_array *tr,
421 struct task_struct *wakee,
422 struct task_struct *curr,
423 unsigned int trace_ctx)
424 {
425 struct ring_buffer_event *event;
426 struct ctx_switch_entry *entry;
427 struct trace_buffer *buffer = tr->array_buffer.buffer;
428
429 event = trace_buffer_lock_reserve(buffer, TRACE_WAKE,
430 sizeof(*entry), trace_ctx);
431 if (!event)
432 return;
433 entry = ring_buffer_event_data(event);
434 entry->prev_pid = curr->pid;
435 entry->prev_prio = curr->prio;
436 entry->prev_state = task_state_index(curr);
437 entry->next_pid = wakee->pid;
438 entry->next_prio = wakee->prio;
439 entry->next_state = task_state_index(wakee);
440 entry->next_cpu = task_cpu(wakee);
441
442 trace_buffer_unlock_commit(tr, buffer, event, trace_ctx);
443 }
444
445 static void notrace
probe_wakeup_sched_switch(void * ignore,bool preempt,struct task_struct * prev,struct task_struct * next,unsigned int prev_state)446 probe_wakeup_sched_switch(void *ignore, bool preempt,
447 struct task_struct *prev, struct task_struct *next,
448 unsigned int prev_state)
449 {
450 struct trace_array_cpu *data;
451 u64 T0, T1, delta;
452 unsigned long flags;
453 long disabled;
454 int cpu;
455 unsigned int trace_ctx;
456
457 tracing_record_cmdline(prev);
458
459 if (unlikely(!tracer_enabled))
460 return;
461
462 /*
463 * When we start a new trace, we set wakeup_task to NULL
464 * and then set tracer_enabled = 1. We want to make sure
465 * that another CPU does not see the tracer_enabled = 1
466 * and the wakeup_task with an older task, that might
467 * actually be the same as next.
468 */
469 smp_rmb();
470
471 if (next != wakeup_task)
472 return;
473
474 /* disable local data, not wakeup_cpu data */
475 cpu = raw_smp_processor_id();
476 disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled);
477 if (likely(disabled != 1))
478 goto out;
479
480 local_irq_save(flags);
481 trace_ctx = tracing_gen_ctx_flags(flags);
482
483 arch_spin_lock(&wakeup_lock);
484
485 /* We could race with grabbing wakeup_lock */
486 if (unlikely(!tracer_enabled || next != wakeup_task))
487 goto out_unlock;
488
489 /* The task we are waiting for is waking up */
490 data = per_cpu_ptr(wakeup_trace->array_buffer.data, wakeup_cpu);
491
492 __trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, trace_ctx);
493 tracing_sched_switch_trace(wakeup_trace, prev, next, trace_ctx);
494 __trace_stack(wakeup_trace, trace_ctx, 0);
495
496 T0 = data->preempt_timestamp;
497 T1 = ftrace_now(cpu);
498 delta = T1-T0;
499
500 if (!report_latency(wakeup_trace, delta))
501 goto out_unlock;
502
503 if (likely(!is_tracing_stopped())) {
504 wakeup_trace->max_latency = delta;
505 update_max_tr(wakeup_trace, wakeup_task, wakeup_cpu, NULL);
506 }
507
508 out_unlock:
509 __wakeup_reset(wakeup_trace);
510 arch_spin_unlock(&wakeup_lock);
511 local_irq_restore(flags);
512 out:
513 atomic_dec(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled);
514 }
515
__wakeup_reset(struct trace_array * tr)516 static void __wakeup_reset(struct trace_array *tr)
517 {
518 wakeup_cpu = -1;
519 wakeup_prio = -1;
520 tracing_dl = false;
521
522 if (wakeup_task)
523 put_task_struct(wakeup_task);
524
525 wakeup_task = NULL;
526 }
527
wakeup_reset(struct trace_array * tr)528 static void wakeup_reset(struct trace_array *tr)
529 {
530 unsigned long flags;
531
532 tracing_reset_online_cpus(&tr->array_buffer);
533
534 local_irq_save(flags);
535 arch_spin_lock(&wakeup_lock);
536 __wakeup_reset(tr);
537 arch_spin_unlock(&wakeup_lock);
538 local_irq_restore(flags);
539 }
540
541 static void
probe_wakeup(void * ignore,struct task_struct * p)542 probe_wakeup(void *ignore, struct task_struct *p)
543 {
544 struct trace_array_cpu *data;
545 int cpu = smp_processor_id();
546 long disabled;
547 unsigned int trace_ctx;
548
549 if (likely(!tracer_enabled))
550 return;
551
552 tracing_record_cmdline(p);
553 tracing_record_cmdline(current);
554
555 /*
556 * Semantic is like this:
557 * - wakeup tracer handles all tasks in the system, independently
558 * from their scheduling class;
559 * - wakeup_rt tracer handles tasks belonging to sched_dl and
560 * sched_rt class;
561 * - wakeup_dl handles tasks belonging to sched_dl class only.
562 */
563 if (tracing_dl || (wakeup_dl && !dl_task(p)) ||
564 (wakeup_rt && !rt_or_dl_task(p)) ||
565 (!dl_task(p) && (p->prio >= wakeup_prio || p->prio >= current->prio)))
566 return;
567
568 disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled);
569 if (unlikely(disabled != 1))
570 goto out;
571
572 trace_ctx = tracing_gen_ctx();
573
574 /* interrupts should be off from try_to_wake_up */
575 arch_spin_lock(&wakeup_lock);
576
577 /* check for races. */
578 if (!tracer_enabled || tracing_dl ||
579 (!dl_task(p) && p->prio >= wakeup_prio))
580 goto out_locked;
581
582 /* reset the trace */
583 __wakeup_reset(wakeup_trace);
584
585 wakeup_cpu = task_cpu(p);
586 wakeup_current_cpu = wakeup_cpu;
587 wakeup_prio = p->prio;
588
589 /*
590 * Once you start tracing a -deadline task, don't bother tracing
591 * another task until the first one wakes up.
592 */
593 if (dl_task(p))
594 tracing_dl = true;
595 else
596 tracing_dl = false;
597
598 wakeup_task = get_task_struct(p);
599
600 data = per_cpu_ptr(wakeup_trace->array_buffer.data, wakeup_cpu);
601 data->preempt_timestamp = ftrace_now(cpu);
602 tracing_sched_wakeup_trace(wakeup_trace, p, current, trace_ctx);
603 __trace_stack(wakeup_trace, trace_ctx, 0);
604
605 /*
606 * We must be careful in using CALLER_ADDR2. But since wake_up
607 * is not called by an assembly function (where as schedule is)
608 * it should be safe to use it here.
609 */
610 __trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, trace_ctx);
611
612 out_locked:
613 arch_spin_unlock(&wakeup_lock);
614 out:
615 atomic_dec(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled);
616 }
617
start_wakeup_tracer(struct trace_array * tr)618 static void start_wakeup_tracer(struct trace_array *tr)
619 {
620 int ret;
621
622 ret = register_trace_sched_wakeup(probe_wakeup, NULL);
623 if (ret) {
624 pr_info("wakeup trace: Couldn't activate tracepoint"
625 " probe to kernel_sched_wakeup\n");
626 return;
627 }
628
629 ret = register_trace_sched_wakeup_new(probe_wakeup, NULL);
630 if (ret) {
631 pr_info("wakeup trace: Couldn't activate tracepoint"
632 " probe to kernel_sched_wakeup_new\n");
633 goto fail_deprobe;
634 }
635
636 ret = register_trace_sched_switch(probe_wakeup_sched_switch, NULL);
637 if (ret) {
638 pr_info("sched trace: Couldn't activate tracepoint"
639 " probe to kernel_sched_switch\n");
640 goto fail_deprobe_wake_new;
641 }
642
643 ret = register_trace_sched_migrate_task(probe_wakeup_migrate_task, NULL);
644 if (ret) {
645 pr_info("wakeup trace: Couldn't activate tracepoint"
646 " probe to kernel_sched_migrate_task\n");
647 goto fail_deprobe_sched_switch;
648 }
649
650 wakeup_reset(tr);
651
652 /*
653 * Don't let the tracer_enabled = 1 show up before
654 * the wakeup_task is reset. This may be overkill since
655 * wakeup_reset does a spin_unlock after setting the
656 * wakeup_task to NULL, but I want to be safe.
657 * This is a slow path anyway.
658 */
659 smp_wmb();
660
661 if (start_func_tracer(tr, is_graph(tr)))
662 printk(KERN_ERR "failed to start wakeup tracer\n");
663
664 return;
665 fail_deprobe_sched_switch:
666 unregister_trace_sched_switch(probe_wakeup_sched_switch, NULL);
667 fail_deprobe_wake_new:
668 unregister_trace_sched_wakeup_new(probe_wakeup, NULL);
669 fail_deprobe:
670 unregister_trace_sched_wakeup(probe_wakeup, NULL);
671 }
672
stop_wakeup_tracer(struct trace_array * tr)673 static void stop_wakeup_tracer(struct trace_array *tr)
674 {
675 tracer_enabled = 0;
676 stop_func_tracer(tr, is_graph(tr));
677 unregister_trace_sched_switch(probe_wakeup_sched_switch, NULL);
678 unregister_trace_sched_wakeup_new(probe_wakeup, NULL);
679 unregister_trace_sched_wakeup(probe_wakeup, NULL);
680 unregister_trace_sched_migrate_task(probe_wakeup_migrate_task, NULL);
681 }
682
683 static bool wakeup_busy;
684
__wakeup_tracer_init(struct trace_array * tr)685 static int __wakeup_tracer_init(struct trace_array *tr)
686 {
687 save_flags = tr->trace_flags;
688
689 /* non overwrite screws up the latency tracers */
690 set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1);
691 set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1);
692
693 tr->max_latency = 0;
694 wakeup_trace = tr;
695 ftrace_init_array_ops(tr, wakeup_tracer_call);
696 start_wakeup_tracer(tr);
697
698 wakeup_busy = true;
699 return 0;
700 }
701
wakeup_tracer_init(struct trace_array * tr)702 static int wakeup_tracer_init(struct trace_array *tr)
703 {
704 if (wakeup_busy)
705 return -EBUSY;
706
707 wakeup_dl = false;
708 wakeup_rt = false;
709 return __wakeup_tracer_init(tr);
710 }
711
wakeup_rt_tracer_init(struct trace_array * tr)712 static int wakeup_rt_tracer_init(struct trace_array *tr)
713 {
714 if (wakeup_busy)
715 return -EBUSY;
716
717 wakeup_dl = false;
718 wakeup_rt = true;
719 return __wakeup_tracer_init(tr);
720 }
721
wakeup_dl_tracer_init(struct trace_array * tr)722 static int wakeup_dl_tracer_init(struct trace_array *tr)
723 {
724 if (wakeup_busy)
725 return -EBUSY;
726
727 wakeup_dl = true;
728 wakeup_rt = false;
729 return __wakeup_tracer_init(tr);
730 }
731
wakeup_tracer_reset(struct trace_array * tr)732 static void wakeup_tracer_reset(struct trace_array *tr)
733 {
734 int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT;
735 int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE;
736
737 stop_wakeup_tracer(tr);
738 /* make sure we put back any tasks we are tracing */
739 wakeup_reset(tr);
740
741 set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag);
742 set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag);
743 ftrace_reset_array_ops(tr);
744 wakeup_busy = false;
745 }
746
wakeup_tracer_start(struct trace_array * tr)747 static void wakeup_tracer_start(struct trace_array *tr)
748 {
749 wakeup_reset(tr);
750 tracer_enabled = 1;
751 }
752
wakeup_tracer_stop(struct trace_array * tr)753 static void wakeup_tracer_stop(struct trace_array *tr)
754 {
755 tracer_enabled = 0;
756 }
757
758 static struct tracer wakeup_tracer __read_mostly =
759 {
760 .name = "wakeup",
761 .init = wakeup_tracer_init,
762 .reset = wakeup_tracer_reset,
763 .start = wakeup_tracer_start,
764 .stop = wakeup_tracer_stop,
765 .print_max = true,
766 .print_header = wakeup_print_header,
767 .print_line = wakeup_print_line,
768 .flag_changed = wakeup_flag_changed,
769 #ifdef CONFIG_FTRACE_SELFTEST
770 .selftest = trace_selftest_startup_wakeup,
771 #endif
772 .open = wakeup_trace_open,
773 .close = wakeup_trace_close,
774 .allow_instances = true,
775 .use_max_tr = true,
776 };
777
778 static struct tracer wakeup_rt_tracer __read_mostly =
779 {
780 .name = "wakeup_rt",
781 .init = wakeup_rt_tracer_init,
782 .reset = wakeup_tracer_reset,
783 .start = wakeup_tracer_start,
784 .stop = wakeup_tracer_stop,
785 .print_max = true,
786 .print_header = wakeup_print_header,
787 .print_line = wakeup_print_line,
788 .flag_changed = wakeup_flag_changed,
789 #ifdef CONFIG_FTRACE_SELFTEST
790 .selftest = trace_selftest_startup_wakeup,
791 #endif
792 .open = wakeup_trace_open,
793 .close = wakeup_trace_close,
794 .allow_instances = true,
795 .use_max_tr = true,
796 };
797
798 static struct tracer wakeup_dl_tracer __read_mostly =
799 {
800 .name = "wakeup_dl",
801 .init = wakeup_dl_tracer_init,
802 .reset = wakeup_tracer_reset,
803 .start = wakeup_tracer_start,
804 .stop = wakeup_tracer_stop,
805 .print_max = true,
806 .print_header = wakeup_print_header,
807 .print_line = wakeup_print_line,
808 .flag_changed = wakeup_flag_changed,
809 #ifdef CONFIG_FTRACE_SELFTEST
810 .selftest = trace_selftest_startup_wakeup,
811 #endif
812 .open = wakeup_trace_open,
813 .close = wakeup_trace_close,
814 .allow_instances = true,
815 .use_max_tr = true,
816 };
817
init_wakeup_tracer(void)818 __init static int init_wakeup_tracer(void)
819 {
820 int ret;
821
822 ret = register_tracer(&wakeup_tracer);
823 if (ret)
824 return ret;
825
826 ret = register_tracer(&wakeup_rt_tracer);
827 if (ret)
828 return ret;
829
830 ret = register_tracer(&wakeup_dl_tracer);
831 if (ret)
832 return ret;
833
834 return 0;
835 }
836 core_initcall(init_wakeup_tracer);
837