trace_irqsoff.c (9d4d8572a539ef807e21c196f145aa365fd52f0e) trace_irqsoff.c (36590c50b2d0729952511129916beeea30d31d81)
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * trace irqs off critical timings
4 *
5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
6 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 *
8 * From code in the latency_tracer, that is:

--- 129 unchanged lines hidden (view full) ---

138 */
139static void
140irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip,
141 struct ftrace_ops *op, struct ftrace_regs *fregs)
142{
143 struct trace_array *tr = irqsoff_trace;
144 struct trace_array_cpu *data;
145 unsigned long flags;
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * trace irqs off critical timings
4 *
5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
6 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 *
8 * From code in the latency_tracer, that is:

--- 129 unchanged lines hidden (view full) ---

138 */
139static void
140irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip,
141 struct ftrace_ops *op, struct ftrace_regs *fregs)
142{
143 struct trace_array *tr = irqsoff_trace;
144 struct trace_array_cpu *data;
145 unsigned long flags;
146 unsigned int trace_ctx;
146
147 if (!func_prolog_dec(tr, &data, &flags))
148 return;
149
147
148 if (!func_prolog_dec(tr, &data, &flags))
149 return;
150
150 trace_function(tr, ip, parent_ip, flags, preempt_count());
151 trace_ctx = tracing_gen_ctx_flags(flags);
151
152
153 trace_function(tr, ip, parent_ip, trace_ctx);
154
152 atomic_dec(&data->disabled);
153}
154#endif /* CONFIG_FUNCTION_TRACER */
155
156#ifdef CONFIG_FUNCTION_GRAPH_TRACER
157static int irqsoff_display_graph(struct trace_array *tr, int set)
158{
159 int cpu;

--- 12 unchanged lines hidden (view full) ---

172 return start_irqsoff_tracer(irqsoff_trace, set);
173}
174
175static int irqsoff_graph_entry(struct ftrace_graph_ent *trace)
176{
177 struct trace_array *tr = irqsoff_trace;
178 struct trace_array_cpu *data;
179 unsigned long flags;
155 atomic_dec(&data->disabled);
156}
157#endif /* CONFIG_FUNCTION_TRACER */
158
159#ifdef CONFIG_FUNCTION_GRAPH_TRACER
160static int irqsoff_display_graph(struct trace_array *tr, int set)
161{
162 int cpu;

--- 12 unchanged lines hidden (view full) ---

175 return start_irqsoff_tracer(irqsoff_trace, set);
176}
177
178static int irqsoff_graph_entry(struct ftrace_graph_ent *trace)
179{
180 struct trace_array *tr = irqsoff_trace;
181 struct trace_array_cpu *data;
182 unsigned long flags;
183 unsigned int trace_ctx;
180 int ret;
184 int ret;
181 int pc;
182
183 if (ftrace_graph_ignore_func(trace))
184 return 0;
185 /*
186 * Do not trace a function if it's filtered by set_graph_notrace.
187 * Make the index of ret stack negative to indicate that it should
188 * ignore further functions. But it needs its own ret stack entry
189 * to recover the original index in order to continue tracing after
190 * returning from the function.
191 */
192 if (ftrace_graph_notrace_addr(trace->func))
193 return 1;
194
195 if (!func_prolog_dec(tr, &data, &flags))
196 return 0;
197
185
186 if (ftrace_graph_ignore_func(trace))
187 return 0;
188 /*
189 * Do not trace a function if it's filtered by set_graph_notrace.
190 * Make the index of ret stack negative to indicate that it should
191 * ignore further functions. But it needs its own ret stack entry
192 * to recover the original index in order to continue tracing after
193 * returning from the function.
194 */
195 if (ftrace_graph_notrace_addr(trace->func))
196 return 1;
197
198 if (!func_prolog_dec(tr, &data, &flags))
199 return 0;
200
198 pc = preempt_count();
199 ret = __trace_graph_entry(tr, trace, flags, pc);
201 trace_ctx = tracing_gen_ctx_flags(flags);
202 ret = __trace_graph_entry(tr, trace, trace_ctx);
200 atomic_dec(&data->disabled);
201
202 return ret;
203}
204
205static void irqsoff_graph_return(struct ftrace_graph_ret *trace)
206{
207 struct trace_array *tr = irqsoff_trace;
208 struct trace_array_cpu *data;
209 unsigned long flags;
203 atomic_dec(&data->disabled);
204
205 return ret;
206}
207
208static void irqsoff_graph_return(struct ftrace_graph_ret *trace)
209{
210 struct trace_array *tr = irqsoff_trace;
211 struct trace_array_cpu *data;
212 unsigned long flags;
210 int pc;
213 unsigned int trace_ctx;
211
212 ftrace_graph_addr_finish(trace);
213
214 if (!func_prolog_dec(tr, &data, &flags))
215 return;
216
214
215 ftrace_graph_addr_finish(trace);
216
217 if (!func_prolog_dec(tr, &data, &flags))
218 return;
219
217 pc = preempt_count();
218 __trace_graph_return(tr, trace, flags, pc);
220 trace_ctx = tracing_gen_ctx_flags(flags);
221 __trace_graph_return(tr, trace, trace_ctx);
219 atomic_dec(&data->disabled);
220}
221
222static struct fgraph_ops fgraph_ops = {
223 .entryfunc = &irqsoff_graph_entry,
224 .retfunc = &irqsoff_graph_return,
225};
226

--- 35 unchanged lines hidden (view full) ---

262 print_graph_headers_flags(s, GRAPH_TRACER_FLAGS);
263 else
264 trace_default_header(s);
265}
266
267static void
268__trace_function(struct trace_array *tr,
269 unsigned long ip, unsigned long parent_ip,
222 atomic_dec(&data->disabled);
223}
224
225static struct fgraph_ops fgraph_ops = {
226 .entryfunc = &irqsoff_graph_entry,
227 .retfunc = &irqsoff_graph_return,
228};
229

--- 35 unchanged lines hidden (view full) ---

265 print_graph_headers_flags(s, GRAPH_TRACER_FLAGS);
266 else
267 trace_default_header(s);
268}
269
270static void
271__trace_function(struct trace_array *tr,
272 unsigned long ip, unsigned long parent_ip,
270 unsigned long flags, int pc)
273 unsigned int trace_ctx)
271{
272 if (is_graph(tr))
274{
275 if (is_graph(tr))
273 trace_graph_function(tr, ip, parent_ip, flags, pc);
276 trace_graph_function(tr, ip, parent_ip, trace_ctx);
274 else
277 else
275 trace_function(tr, ip, parent_ip, flags, pc);
278 trace_function(tr, ip, parent_ip, trace_ctx);
276}
277
278#else
279#define __trace_function trace_function
280
281static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
282{
283 return TRACE_TYPE_UNHANDLED;

--- 33 unchanged lines hidden (view full) ---

317static void
318check_critical_timing(struct trace_array *tr,
319 struct trace_array_cpu *data,
320 unsigned long parent_ip,
321 int cpu)
322{
323 u64 T0, T1, delta;
324 unsigned long flags;
279}
280
281#else
282#define __trace_function trace_function
283
284static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
285{
286 return TRACE_TYPE_UNHANDLED;

--- 33 unchanged lines hidden (view full) ---

320static void
321check_critical_timing(struct trace_array *tr,
322 struct trace_array_cpu *data,
323 unsigned long parent_ip,
324 int cpu)
325{
326 u64 T0, T1, delta;
327 unsigned long flags;
325 int pc;
328 unsigned int trace_ctx;
326
327 T0 = data->preempt_timestamp;
328 T1 = ftrace_now(cpu);
329 delta = T1-T0;
330
329
330 T0 = data->preempt_timestamp;
331 T1 = ftrace_now(cpu);
332 delta = T1-T0;
333
331 local_save_flags(flags);
334 trace_ctx = tracing_gen_ctx();
332
335
333 pc = preempt_count();
334
335 if (!report_latency(tr, delta))
336 goto out;
337
338 raw_spin_lock_irqsave(&max_trace_lock, flags);
339
340 /* check if we are still the max latency */
341 if (!report_latency(tr, delta))
342 goto out_unlock;
343
336 if (!report_latency(tr, delta))
337 goto out;
338
339 raw_spin_lock_irqsave(&max_trace_lock, flags);
340
341 /* check if we are still the max latency */
342 if (!report_latency(tr, delta))
343 goto out_unlock;
344
344 __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
345 __trace_function(tr, CALLER_ADDR0, parent_ip, trace_ctx);
345 /* Skip 5 functions to get to the irq/preempt enable function */
346 /* Skip 5 functions to get to the irq/preempt enable function */
346 __trace_stack(tr, flags, 5, pc);
347 __trace_stack(tr, trace_ctx, 5);
347
348 if (data->critical_sequence != max_sequence)
349 goto out_unlock;
350
351 data->critical_end = parent_ip;
352
353 if (likely(!is_tracing_stopped())) {
354 tr->max_latency = delta;
355 update_max_tr_single(tr, current, cpu);
356 }
357
358 max_sequence++;
359
360out_unlock:
361 raw_spin_unlock_irqrestore(&max_trace_lock, flags);
362
363out:
364 data->critical_sequence = max_sequence;
365 data->preempt_timestamp = ftrace_now(cpu);
348
349 if (data->critical_sequence != max_sequence)
350 goto out_unlock;
351
352 data->critical_end = parent_ip;
353
354 if (likely(!is_tracing_stopped())) {
355 tr->max_latency = delta;
356 update_max_tr_single(tr, current, cpu);
357 }
358
359 max_sequence++;
360
361out_unlock:
362 raw_spin_unlock_irqrestore(&max_trace_lock, flags);
363
364out:
365 data->critical_sequence = max_sequence;
366 data->preempt_timestamp = ftrace_now(cpu);
366 __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
367 __trace_function(tr, CALLER_ADDR0, parent_ip, trace_ctx);
367}
368
369static nokprobe_inline void
368}
369
370static nokprobe_inline void
370start_critical_timing(unsigned long ip, unsigned long parent_ip, int pc)
371start_critical_timing(unsigned long ip, unsigned long parent_ip)
371{
372 int cpu;
373 struct trace_array *tr = irqsoff_trace;
374 struct trace_array_cpu *data;
372{
373 int cpu;
374 struct trace_array *tr = irqsoff_trace;
375 struct trace_array_cpu *data;
375 unsigned long flags;
376
377 if (!tracer_enabled || !tracing_is_enabled())
378 return;
379
380 cpu = raw_smp_processor_id();
381
382 if (per_cpu(tracing_cpu, cpu))
383 return;

--- 4 unchanged lines hidden (view full) ---

388 return;
389
390 atomic_inc(&data->disabled);
391
392 data->critical_sequence = max_sequence;
393 data->preempt_timestamp = ftrace_now(cpu);
394 data->critical_start = parent_ip ? : ip;
395
376
377 if (!tracer_enabled || !tracing_is_enabled())
378 return;
379
380 cpu = raw_smp_processor_id();
381
382 if (per_cpu(tracing_cpu, cpu))
383 return;

--- 4 unchanged lines hidden (view full) ---

388 return;
389
390 atomic_inc(&data->disabled);
391
392 data->critical_sequence = max_sequence;
393 data->preempt_timestamp = ftrace_now(cpu);
394 data->critical_start = parent_ip ? : ip;
395
396 local_save_flags(flags);
396 __trace_function(tr, ip, parent_ip, tracing_gen_ctx());
397
397
398 __trace_function(tr, ip, parent_ip, flags, pc);
399
400 per_cpu(tracing_cpu, cpu) = 1;
401
402 atomic_dec(&data->disabled);
403}
404
405static nokprobe_inline void
398 per_cpu(tracing_cpu, cpu) = 1;
399
400 atomic_dec(&data->disabled);
401}
402
403static nokprobe_inline void
406stop_critical_timing(unsigned long ip, unsigned long parent_ip, int pc)
404stop_critical_timing(unsigned long ip, unsigned long parent_ip)
407{
408 int cpu;
409 struct trace_array *tr = irqsoff_trace;
410 struct trace_array_cpu *data;
405{
406 int cpu;
407 struct trace_array *tr = irqsoff_trace;
408 struct trace_array_cpu *data;
411 unsigned long flags;
409 unsigned int trace_ctx;
412
413 cpu = raw_smp_processor_id();
414 /* Always clear the tracing cpu on stopping the trace */
415 if (unlikely(per_cpu(tracing_cpu, cpu)))
416 per_cpu(tracing_cpu, cpu) = 0;
417 else
418 return;
419
420 if (!tracer_enabled || !tracing_is_enabled())
421 return;
422
423 data = per_cpu_ptr(tr->array_buffer.data, cpu);
424
425 if (unlikely(!data) ||
426 !data->critical_start || atomic_read(&data->disabled))
427 return;
428
429 atomic_inc(&data->disabled);
430
410
411 cpu = raw_smp_processor_id();
412 /* Always clear the tracing cpu on stopping the trace */
413 if (unlikely(per_cpu(tracing_cpu, cpu)))
414 per_cpu(tracing_cpu, cpu) = 0;
415 else
416 return;
417
418 if (!tracer_enabled || !tracing_is_enabled())
419 return;
420
421 data = per_cpu_ptr(tr->array_buffer.data, cpu);
422
423 if (unlikely(!data) ||
424 !data->critical_start || atomic_read(&data->disabled))
425 return;
426
427 atomic_inc(&data->disabled);
428
431 local_save_flags(flags);
432 __trace_function(tr, ip, parent_ip, flags, pc);
429 trace_ctx = tracing_gen_ctx();
430 __trace_function(tr, ip, parent_ip, trace_ctx);
433 check_critical_timing(tr, data, parent_ip ? : ip, cpu);
434 data->critical_start = 0;
435 atomic_dec(&data->disabled);
436}
437
438/* start and stop critical timings used to for stoppage (in idle) */
439void start_critical_timings(void)
440{
431 check_critical_timing(tr, data, parent_ip ? : ip, cpu);
432 data->critical_start = 0;
433 atomic_dec(&data->disabled);
434}
435
436/* start and stop critical timings used to for stoppage (in idle) */
437void start_critical_timings(void)
438{
441 int pc = preempt_count();
442
443 if (preempt_trace(pc) || irq_trace())
444 start_critical_timing(CALLER_ADDR0, CALLER_ADDR1, pc);
439 if (preempt_trace(preempt_count()) || irq_trace())
440 start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
445}
446EXPORT_SYMBOL_GPL(start_critical_timings);
447NOKPROBE_SYMBOL(start_critical_timings);
448
449void stop_critical_timings(void)
450{
441}
442EXPORT_SYMBOL_GPL(start_critical_timings);
443NOKPROBE_SYMBOL(start_critical_timings);
444
445void stop_critical_timings(void)
446{
451 int pc = preempt_count();
452
453 if (preempt_trace(pc) || irq_trace())
454 stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1, pc);
447 if (preempt_trace(preempt_count()) || irq_trace())
448 stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
455}
456EXPORT_SYMBOL_GPL(stop_critical_timings);
457NOKPROBE_SYMBOL(stop_critical_timings);
458
459#ifdef CONFIG_FUNCTION_TRACER
460static bool function_enabled;
461
462static int register_irqsoff_function(struct trace_array *tr, int graph, int set)

--- 94 unchanged lines hidden (view full) ---

557 if (irqsoff_busy)
558 return -EBUSY;
559
560 save_flags = tr->trace_flags;
561
562 /* non overwrite screws up the latency tracers */
563 set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1);
564 set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1);
449}
450EXPORT_SYMBOL_GPL(stop_critical_timings);
451NOKPROBE_SYMBOL(stop_critical_timings);
452
453#ifdef CONFIG_FUNCTION_TRACER
454static bool function_enabled;
455
456static int register_irqsoff_function(struct trace_array *tr, int graph, int set)

--- 94 unchanged lines hidden (view full) ---

551 if (irqsoff_busy)
552 return -EBUSY;
553
554 save_flags = tr->trace_flags;
555
556 /* non overwrite screws up the latency tracers */
557 set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1);
558 set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1);
559 /* without pause, we will produce garbage if another latency occurs */
560 set_tracer_flag(tr, TRACE_ITER_PAUSE_ON_TRACE, 1);
565
566 tr->max_latency = 0;
567 irqsoff_trace = tr;
568 /* make sure that the tracer is visible */
569 smp_wmb();
570
571 ftrace_init_array_ops(tr, irqsoff_tracer_call);
572

--- 5 unchanged lines hidden (view full) ---

578 irqsoff_busy = true;
579 return 0;
580}
581
582static void __irqsoff_tracer_reset(struct trace_array *tr)
583{
584 int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT;
585 int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE;
561
562 tr->max_latency = 0;
563 irqsoff_trace = tr;
564 /* make sure that the tracer is visible */
565 smp_wmb();
566
567 ftrace_init_array_ops(tr, irqsoff_tracer_call);
568

--- 5 unchanged lines hidden (view full) ---

574 irqsoff_busy = true;
575 return 0;
576}
577
578static void __irqsoff_tracer_reset(struct trace_array *tr)
579{
580 int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT;
581 int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE;
582 int pause_flag = save_flags & TRACE_ITER_PAUSE_ON_TRACE;
586
587 stop_irqsoff_tracer(tr, is_graph(tr));
588
589 set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag);
590 set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag);
583
584 stop_irqsoff_tracer(tr, is_graph(tr));
585
586 set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag);
587 set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag);
588 set_tracer_flag(tr, TRACE_ITER_PAUSE_ON_TRACE, pause_flag);
591 ftrace_reset_array_ops(tr);
592
593 irqsoff_busy = false;
594}
595
596static void irqsoff_tracer_start(struct trace_array *tr)
597{
598 tracer_enabled = 1;

--- 5 unchanged lines hidden (view full) ---

604}
605
606#ifdef CONFIG_IRQSOFF_TRACER
607/*
608 * We are only interested in hardirq on/off events:
609 */
610void tracer_hardirqs_on(unsigned long a0, unsigned long a1)
611{
589 ftrace_reset_array_ops(tr);
590
591 irqsoff_busy = false;
592}
593
594static void irqsoff_tracer_start(struct trace_array *tr)
595{
596 tracer_enabled = 1;

--- 5 unchanged lines hidden (view full) ---

602}
603
604#ifdef CONFIG_IRQSOFF_TRACER
605/*
606 * We are only interested in hardirq on/off events:
607 */
608void tracer_hardirqs_on(unsigned long a0, unsigned long a1)
609{
612 unsigned int pc = preempt_count();
613
614 if (!preempt_trace(pc) && irq_trace())
615 stop_critical_timing(a0, a1, pc);
610 if (!preempt_trace(preempt_count()) && irq_trace())
611 stop_critical_timing(a0, a1);
616}
617NOKPROBE_SYMBOL(tracer_hardirqs_on);
618
619void tracer_hardirqs_off(unsigned long a0, unsigned long a1)
620{
612}
613NOKPROBE_SYMBOL(tracer_hardirqs_on);
614
615void tracer_hardirqs_off(unsigned long a0, unsigned long a1)
616{
621 unsigned int pc = preempt_count();
622
623 if (!preempt_trace(pc) && irq_trace())
624 start_critical_timing(a0, a1, pc);
617 if (!preempt_trace(preempt_count()) && irq_trace())
618 start_critical_timing(a0, a1);
625}
626NOKPROBE_SYMBOL(tracer_hardirqs_off);
627
628static int irqsoff_tracer_init(struct trace_array *tr)
629{
630 trace_type = TRACER_IRQS_OFF;
631
632 return __irqsoff_tracer_init(tr);

--- 23 unchanged lines hidden (view full) ---

656 .allow_instances = true,
657 .use_max_tr = true,
658};
659#endif /* CONFIG_IRQSOFF_TRACER */
660
661#ifdef CONFIG_PREEMPT_TRACER
662void tracer_preempt_on(unsigned long a0, unsigned long a1)
663{
619}
620NOKPROBE_SYMBOL(tracer_hardirqs_off);
621
622static int irqsoff_tracer_init(struct trace_array *tr)
623{
624 trace_type = TRACER_IRQS_OFF;
625
626 return __irqsoff_tracer_init(tr);

--- 23 unchanged lines hidden (view full) ---

650 .allow_instances = true,
651 .use_max_tr = true,
652};
653#endif /* CONFIG_IRQSOFF_TRACER */
654
655#ifdef CONFIG_PREEMPT_TRACER
656void tracer_preempt_on(unsigned long a0, unsigned long a1)
657{
664 int pc = preempt_count();
665
666 if (preempt_trace(pc) && !irq_trace())
667 stop_critical_timing(a0, a1, pc);
658 if (preempt_trace(preempt_count()) && !irq_trace())
659 stop_critical_timing(a0, a1);
668}
669
670void tracer_preempt_off(unsigned long a0, unsigned long a1)
671{
660}
661
662void tracer_preempt_off(unsigned long a0, unsigned long a1)
663{
672 int pc = preempt_count();
673
674 if (preempt_trace(pc) && !irq_trace())
675 start_critical_timing(a0, a1, pc);
664 if (preempt_trace(preempt_count()) && !irq_trace())
665 start_critical_timing(a0, a1);
676}
677
678static int preemptoff_tracer_init(struct trace_array *tr)
679{
680 trace_type = TRACER_PREEMPT_OFF;
681
682 return __irqsoff_tracer_init(tr);
683}

--- 78 unchanged lines hidden ---
666}
667
668static int preemptoff_tracer_init(struct trace_array *tr)
669{
670 trace_type = TRACER_PREEMPT_OFF;
671
672 return __irqsoff_tracer_init(tr);
673}

--- 78 unchanged lines hidden ---