1 /* 2 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com> 3 * 4 */ 5 #include <linux/stacktrace.h> 6 #include <linux/kallsyms.h> 7 #include <linux/seq_file.h> 8 #include <linux/spinlock.h> 9 #include <linux/uaccess.h> 10 #include <linux/debugfs.h> 11 #include <linux/ftrace.h> 12 #include <linux/module.h> 13 #include <linux/init.h> 14 #include <linux/fs.h> 15 #include "trace.h" 16 17 #define STACK_TRACE_ENTRIES 500 18 19 static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] = 20 { [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX }; 21 static unsigned stack_dump_index[STACK_TRACE_ENTRIES]; 22 23 static struct stack_trace max_stack_trace = { 24 .max_entries = STACK_TRACE_ENTRIES, 25 .entries = stack_dump_trace, 26 }; 27 28 static unsigned long max_stack_size; 29 static raw_spinlock_t max_stack_lock = 30 (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; 31 32 static int stack_trace_disabled __read_mostly; 33 static DEFINE_PER_CPU(int, trace_active); 34 35 static inline void check_stack(void) 36 { 37 unsigned long this_size, flags; 38 unsigned long *p, *top, *start; 39 int i; 40 41 this_size = ((unsigned long)&this_size) & (THREAD_SIZE-1); 42 this_size = THREAD_SIZE - this_size; 43 44 if (this_size <= max_stack_size) 45 return; 46 47 raw_local_irq_save(flags); 48 __raw_spin_lock(&max_stack_lock); 49 50 /* a race could have already updated it */ 51 if (this_size <= max_stack_size) 52 goto out; 53 54 max_stack_size = this_size; 55 56 max_stack_trace.nr_entries = 0; 57 max_stack_trace.skip = 3; 58 59 save_stack_trace(&max_stack_trace); 60 61 /* 62 * Now find where in the stack these are. 63 */ 64 i = 0; 65 start = &this_size; 66 top = (unsigned long *) 67 (((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE); 68 69 /* 70 * Loop through all the entries. One of the entries may 71 * for some reason be missed on the stack, so we may 72 * have to account for them. If they are all there, this 73 * loop will only happen once. This code only takes place 74 * on a new max, so it is far from a fast path. 75 */ 76 while (i < max_stack_trace.nr_entries) { 77 78 stack_dump_index[i] = this_size; 79 p = start; 80 81 for (; p < top && i < max_stack_trace.nr_entries; p++) { 82 if (*p == stack_dump_trace[i]) { 83 this_size = stack_dump_index[i++] = 84 (top - p) * sizeof(unsigned long); 85 /* Start the search from here */ 86 start = p + 1; 87 } 88 } 89 90 i++; 91 } 92 93 out: 94 __raw_spin_unlock(&max_stack_lock); 95 raw_local_irq_restore(flags); 96 } 97 98 static void 99 stack_trace_call(unsigned long ip, unsigned long parent_ip) 100 { 101 int cpu, resched; 102 103 if (unlikely(!ftrace_enabled || stack_trace_disabled)) 104 return; 105 106 resched = need_resched(); 107 preempt_disable_notrace(); 108 109 cpu = raw_smp_processor_id(); 110 /* no atomic needed, we only modify this variable by this cpu */ 111 if (per_cpu(trace_active, cpu)++ != 0) 112 goto out; 113 114 check_stack(); 115 116 out: 117 per_cpu(trace_active, cpu)--; 118 /* prevent recursion in schedule */ 119 if (resched) 120 preempt_enable_no_resched_notrace(); 121 else 122 preempt_enable_notrace(); 123 } 124 125 static struct ftrace_ops trace_ops __read_mostly = 126 { 127 .func = stack_trace_call, 128 }; 129 130 static ssize_t 131 stack_max_size_read(struct file *filp, char __user *ubuf, 132 size_t count, loff_t *ppos) 133 { 134 unsigned long *ptr = filp->private_data; 135 char buf[64]; 136 int r; 137 138 r = snprintf(buf, sizeof(buf), "%ld\n", *ptr); 139 if (r > sizeof(buf)) 140 r = sizeof(buf); 141 return simple_read_from_buffer(ubuf, count, ppos, buf, r); 142 } 143 144 static ssize_t 145 stack_max_size_write(struct file *filp, const char __user *ubuf, 146 size_t count, loff_t *ppos) 147 { 148 long *ptr = filp->private_data; 149 unsigned long val, flags; 150 char buf[64]; 151 int ret; 152 153 if (count >= sizeof(buf)) 154 return -EINVAL; 155 156 if (copy_from_user(&buf, ubuf, count)) 157 return -EFAULT; 158 159 buf[count] = 0; 160 161 ret = strict_strtoul(buf, 10, &val); 162 if (ret < 0) 163 return ret; 164 165 raw_local_irq_save(flags); 166 __raw_spin_lock(&max_stack_lock); 167 *ptr = val; 168 __raw_spin_unlock(&max_stack_lock); 169 raw_local_irq_restore(flags); 170 171 return count; 172 } 173 174 static struct file_operations stack_max_size_fops = { 175 .open = tracing_open_generic, 176 .read = stack_max_size_read, 177 .write = stack_max_size_write, 178 }; 179 180 static void * 181 t_next(struct seq_file *m, void *v, loff_t *pos) 182 { 183 long i = (long)m->private; 184 185 (*pos)++; 186 187 i++; 188 189 if (i >= max_stack_trace.nr_entries || 190 stack_dump_trace[i] == ULONG_MAX) 191 return NULL; 192 193 m->private = (void *)i; 194 195 return &m->private; 196 } 197 198 static void *t_start(struct seq_file *m, loff_t *pos) 199 { 200 void *t = &m->private; 201 loff_t l = 0; 202 203 local_irq_disable(); 204 __raw_spin_lock(&max_stack_lock); 205 206 for (; t && l < *pos; t = t_next(m, t, &l)) 207 ; 208 209 return t; 210 } 211 212 static void t_stop(struct seq_file *m, void *p) 213 { 214 __raw_spin_unlock(&max_stack_lock); 215 local_irq_enable(); 216 } 217 218 static int trace_lookup_stack(struct seq_file *m, long i) 219 { 220 unsigned long addr = stack_dump_trace[i]; 221 #ifdef CONFIG_KALLSYMS 222 char str[KSYM_SYMBOL_LEN]; 223 224 sprint_symbol(str, addr); 225 226 return seq_printf(m, "%s\n", str); 227 #else 228 return seq_printf(m, "%p\n", (void*)addr); 229 #endif 230 } 231 232 static int t_show(struct seq_file *m, void *v) 233 { 234 long i = *(long *)v; 235 int size; 236 237 if (i < 0) { 238 seq_printf(m, " Depth Size Location" 239 " (%d entries)\n" 240 " ----- ---- --------\n", 241 max_stack_trace.nr_entries); 242 return 0; 243 } 244 245 if (i >= max_stack_trace.nr_entries || 246 stack_dump_trace[i] == ULONG_MAX) 247 return 0; 248 249 if (i+1 == max_stack_trace.nr_entries || 250 stack_dump_trace[i+1] == ULONG_MAX) 251 size = stack_dump_index[i]; 252 else 253 size = stack_dump_index[i] - stack_dump_index[i+1]; 254 255 seq_printf(m, "%3ld) %8d %5d ", i, stack_dump_index[i], size); 256 257 trace_lookup_stack(m, i); 258 259 return 0; 260 } 261 262 static struct seq_operations stack_trace_seq_ops = { 263 .start = t_start, 264 .next = t_next, 265 .stop = t_stop, 266 .show = t_show, 267 }; 268 269 static int stack_trace_open(struct inode *inode, struct file *file) 270 { 271 int ret; 272 273 ret = seq_open(file, &stack_trace_seq_ops); 274 if (!ret) { 275 struct seq_file *m = file->private_data; 276 m->private = (void *)-1; 277 } 278 279 return ret; 280 } 281 282 static struct file_operations stack_trace_fops = { 283 .open = stack_trace_open, 284 .read = seq_read, 285 .llseek = seq_lseek, 286 }; 287 288 static __init int stack_trace_init(void) 289 { 290 struct dentry *d_tracer; 291 struct dentry *entry; 292 293 d_tracer = tracing_init_dentry(); 294 295 entry = debugfs_create_file("stack_max_size", 0644, d_tracer, 296 &max_stack_size, &stack_max_size_fops); 297 if (!entry) 298 pr_warning("Could not create debugfs 'stack_max_size' entry\n"); 299 300 entry = debugfs_create_file("stack_trace", 0444, d_tracer, 301 NULL, &stack_trace_fops); 302 if (!entry) 303 pr_warning("Could not create debugfs 'stack_trace' entry\n"); 304 305 register_ftrace_function(&trace_ops); 306 307 return 0; 308 } 309 310 device_initcall(stack_trace_init); 311