xref: /linux/kernel/trace/trace_kdb.c (revision ca55b2fef3a9373fcfc30f82fd26bc7fccbda732)
1 /*
2  * kdb helper for dumping the ftrace buffer
3  *
4  * Copyright (C) 2010 Jason Wessel <jason.wessel@windriver.com>
5  *
6  * ftrace_dump_buf based on ftrace_dump:
7  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
8  * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
9  *
10  */
11 #include <linux/init.h>
12 #include <linux/kgdb.h>
13 #include <linux/kdb.h>
14 #include <linux/ftrace.h>
15 
16 #include "trace.h"
17 #include "trace_output.h"
18 
19 static void ftrace_dump_buf(int skip_lines, long cpu_file)
20 {
21 	/* use static because iter can be a bit big for the stack */
22 	static struct trace_iterator iter;
23 	static struct ring_buffer_iter *buffer_iter[CONFIG_NR_CPUS];
24 	unsigned int old_userobj;
25 	int cnt = 0, cpu;
26 
27 	trace_init_global_iter(&iter);
28 	iter.buffer_iter = buffer_iter;
29 
30 	for_each_tracing_cpu(cpu) {
31 		atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
32 	}
33 
34 	old_userobj = trace_flags;
35 
36 	/* don't look at user memory in panic mode */
37 	trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
38 
39 	kdb_printf("Dumping ftrace buffer:\n");
40 
41 	/* reset all but tr, trace, and overruns */
42 	memset(&iter.seq, 0,
43 		   sizeof(struct trace_iterator) -
44 		   offsetof(struct trace_iterator, seq));
45 	iter.iter_flags |= TRACE_FILE_LAT_FMT;
46 	iter.pos = -1;
47 
48 	if (cpu_file == RING_BUFFER_ALL_CPUS) {
49 		for_each_tracing_cpu(cpu) {
50 			iter.buffer_iter[cpu] =
51 			ring_buffer_read_prepare(iter.trace_buffer->buffer, cpu);
52 			ring_buffer_read_start(iter.buffer_iter[cpu]);
53 			tracing_iter_reset(&iter, cpu);
54 		}
55 	} else {
56 		iter.cpu_file = cpu_file;
57 		iter.buffer_iter[cpu_file] =
58 			ring_buffer_read_prepare(iter.trace_buffer->buffer, cpu_file);
59 		ring_buffer_read_start(iter.buffer_iter[cpu_file]);
60 		tracing_iter_reset(&iter, cpu_file);
61 	}
62 
63 	while (trace_find_next_entry_inc(&iter)) {
64 		if (!cnt)
65 			kdb_printf("---------------------------------\n");
66 		cnt++;
67 
68 		if (!skip_lines) {
69 			print_trace_line(&iter);
70 			trace_printk_seq(&iter.seq);
71 		} else {
72 			skip_lines--;
73 		}
74 
75 		if (KDB_FLAG(CMD_INTERRUPT))
76 			goto out;
77 	}
78 
79 	if (!cnt)
80 		kdb_printf("   (ftrace buffer empty)\n");
81 	else
82 		kdb_printf("---------------------------------\n");
83 
84 out:
85 	trace_flags = old_userobj;
86 
87 	for_each_tracing_cpu(cpu) {
88 		atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
89 	}
90 
91 	for_each_tracing_cpu(cpu) {
92 		if (iter.buffer_iter[cpu]) {
93 			ring_buffer_read_finish(iter.buffer_iter[cpu]);
94 			iter.buffer_iter[cpu] = NULL;
95 		}
96 	}
97 }
98 
99 /*
100  * kdb_ftdump - Dump the ftrace log buffer
101  */
102 static int kdb_ftdump(int argc, const char **argv)
103 {
104 	int skip_lines = 0;
105 	long cpu_file;
106 	char *cp;
107 
108 	if (argc > 2)
109 		return KDB_ARGCOUNT;
110 
111 	if (argc) {
112 		skip_lines = simple_strtol(argv[1], &cp, 0);
113 		if (*cp)
114 			skip_lines = 0;
115 	}
116 
117 	if (argc == 2) {
118 		cpu_file = simple_strtol(argv[2], &cp, 0);
119 		if (*cp || cpu_file >= NR_CPUS || cpu_file < 0 ||
120 		    !cpu_online(cpu_file))
121 			return KDB_BADINT;
122 	} else {
123 		cpu_file = RING_BUFFER_ALL_CPUS;
124 	}
125 
126 	kdb_trap_printk++;
127 	ftrace_dump_buf(skip_lines, cpu_file);
128 	kdb_trap_printk--;
129 
130 	return 0;
131 }
132 
133 static __init int kdb_ftrace_register(void)
134 {
135 	kdb_register_flags("ftdump", kdb_ftdump, "[skip_#lines] [cpu]",
136 			    "Dump ftrace log", 0, KDB_ENABLE_ALWAYS_SAFE);
137 	return 0;
138 }
139 
140 late_initcall(kdb_ftrace_register);
141