1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * kdb helper for dumping the ftrace buffer 4 * 5 * Copyright (C) 2010 Jason Wessel <jason.wessel@windriver.com> 6 * 7 * ftrace_dump_buf based on ftrace_dump: 8 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> 9 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com> 10 * 11 */ 12 #include <linux/init.h> 13 #include <linux/kgdb.h> 14 #include <linux/kdb.h> 15 #include <linux/ftrace.h> 16 17 #include "trace.h" 18 #include "trace_output.h" 19 20 static struct trace_iterator iter; 21 static struct ring_buffer_iter *buffer_iter[CONFIG_NR_CPUS]; 22 23 static void ftrace_dump_buf(int skip_entries, long cpu_file) 24 { 25 struct trace_array *tr; 26 unsigned int old_userobj; 27 int cnt = 0, cpu; 28 29 tr = iter.tr; 30 31 old_userobj = tr->trace_flags; 32 33 /* don't look at user memory in panic mode */ 34 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ; 35 36 kdb_printf("Dumping ftrace buffer:\n"); 37 if (skip_entries) 38 kdb_printf("(skipping %d entries)\n", skip_entries); 39 40 trace_iterator_reset(&iter); 41 iter.iter_flags |= TRACE_FILE_LAT_FMT; 42 43 if (cpu_file == RING_BUFFER_ALL_CPUS) { 44 for_each_tracing_cpu(cpu) { 45 iter.buffer_iter[cpu] = 46 ring_buffer_read_prepare(iter.array_buffer->buffer, 47 cpu, GFP_ATOMIC); 48 ring_buffer_read_start(iter.buffer_iter[cpu]); 49 tracing_iter_reset(&iter, cpu); 50 } 51 } else { 52 iter.cpu_file = cpu_file; 53 iter.buffer_iter[cpu_file] = 54 ring_buffer_read_prepare(iter.array_buffer->buffer, 55 cpu_file, GFP_ATOMIC); 56 ring_buffer_read_start(iter.buffer_iter[cpu_file]); 57 tracing_iter_reset(&iter, cpu_file); 58 } 59 60 while (trace_find_next_entry_inc(&iter)) { 61 if (!cnt) 62 kdb_printf("---------------------------------\n"); 63 cnt++; 64 65 if (!skip_entries) { 66 print_trace_line(&iter); 67 trace_printk_seq(&iter.seq); 68 } else { 69 skip_entries--; 70 } 71 72 if (KDB_FLAG(CMD_INTERRUPT)) 73 goto out; 74 } 75 76 if (!cnt) 77 kdb_printf(" (ftrace buffer empty)\n"); 78 else 79 kdb_printf("---------------------------------\n"); 80 81 out: 82 tr->trace_flags = old_userobj; 83 84 for_each_tracing_cpu(cpu) { 85 if (iter.buffer_iter[cpu]) { 86 ring_buffer_read_finish(iter.buffer_iter[cpu]); 87 iter.buffer_iter[cpu] = NULL; 88 } 89 } 90 } 91 92 /* 93 * kdb_ftdump - Dump the ftrace log buffer 94 */ 95 static int kdb_ftdump(int argc, const char **argv) 96 { 97 int skip_entries = 0; 98 long cpu_file; 99 int err; 100 int cnt; 101 int cpu; 102 103 if (argc > 2) 104 return KDB_ARGCOUNT; 105 106 if (argc && kstrtoint(argv[1], 0, &skip_entries)) 107 return KDB_BADINT; 108 109 if (argc == 2) { 110 err = kstrtol(argv[2], 0, &cpu_file); 111 if (err || cpu_file >= NR_CPUS || cpu_file < 0 || 112 !cpu_online(cpu_file)) 113 return KDB_BADINT; 114 } else { 115 cpu_file = RING_BUFFER_ALL_CPUS; 116 } 117 118 kdb_trap_printk++; 119 120 trace_init_global_iter(&iter); 121 iter.buffer_iter = buffer_iter; 122 123 for_each_tracing_cpu(cpu) { 124 atomic_inc(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled); 125 } 126 127 /* A negative skip_entries means skip all but the last entries */ 128 if (skip_entries < 0) { 129 if (cpu_file == RING_BUFFER_ALL_CPUS) 130 cnt = trace_total_entries(NULL); 131 else 132 cnt = trace_total_entries_cpu(NULL, cpu_file); 133 skip_entries = max(cnt + skip_entries, 0); 134 } 135 136 ftrace_dump_buf(skip_entries, cpu_file); 137 138 for_each_tracing_cpu(cpu) { 139 atomic_dec(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled); 140 } 141 142 kdb_trap_printk--; 143 144 return 0; 145 } 146 147 static kdbtab_t ftdump_cmd = { 148 .name = "ftdump", 149 .func = kdb_ftdump, 150 .usage = "[skip_#entries] [cpu]", 151 .help = "Dump ftrace log; -skip dumps last #entries", 152 .flags = KDB_ENABLE_ALWAYS_SAFE, 153 }; 154 155 static __init int kdb_ftrace_register(void) 156 { 157 kdb_register(&ftdump_cmd); 158 return 0; 159 } 160 161 late_initcall(kdb_ftrace_register); 162