1 /* 2 * unlikely profiler 3 * 4 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com> 5 */ 6 #include <linux/kallsyms.h> 7 #include <linux/seq_file.h> 8 #include <linux/spinlock.h> 9 #include <linux/irqflags.h> 10 #include <linux/debugfs.h> 11 #include <linux/uaccess.h> 12 #include <linux/module.h> 13 #include <linux/ftrace.h> 14 #include <linux/hash.h> 15 #include <linux/fs.h> 16 #include <asm/local.h> 17 #include "trace.h" 18 19 #ifdef CONFIG_BRANCH_TRACER 20 21 static int branch_tracing_enabled __read_mostly; 22 static DEFINE_MUTEX(branch_tracing_mutex); 23 static struct trace_array *branch_tracer; 24 25 static void 26 probe_likely_condition(struct ftrace_branch_data *f, int val, int expect) 27 { 28 struct trace_array *tr = branch_tracer; 29 struct ring_buffer_event *event; 30 struct trace_branch *entry; 31 unsigned long flags, irq_flags; 32 int cpu, pc; 33 const char *p; 34 35 /* 36 * I would love to save just the ftrace_likely_data pointer, but 37 * this code can also be used by modules. Ugly things can happen 38 * if the module is unloaded, and then we go and read the 39 * pointer. This is slower, but much safer. 40 */ 41 42 if (unlikely(!tr)) 43 return; 44 45 local_irq_save(flags); 46 cpu = raw_smp_processor_id(); 47 if (atomic_inc_return(&tr->data[cpu]->disabled) != 1) 48 goto out; 49 50 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), 51 &irq_flags); 52 if (!event) 53 goto out; 54 55 pc = preempt_count(); 56 entry = ring_buffer_event_data(event); 57 tracing_generic_entry_update(&entry->ent, flags, pc); 58 entry->ent.type = TRACE_BRANCH; 59 60 /* Strip off the path, only save the file */ 61 p = f->file + strlen(f->file); 62 while (p >= f->file && *p != '/') 63 p--; 64 p++; 65 66 strncpy(entry->func, f->func, TRACE_FUNC_SIZE); 67 strncpy(entry->file, p, TRACE_FILE_SIZE); 68 entry->func[TRACE_FUNC_SIZE] = 0; 69 entry->file[TRACE_FILE_SIZE] = 0; 70 entry->line = f->line; 71 entry->correct = val == expect; 72 73 ring_buffer_unlock_commit(tr->buffer, event, irq_flags); 74 75 out: 76 atomic_dec(&tr->data[cpu]->disabled); 77 local_irq_restore(flags); 78 } 79 80 static inline 81 void trace_likely_condition(struct ftrace_branch_data *f, int val, int expect) 82 { 83 if (!branch_tracing_enabled) 84 return; 85 86 probe_likely_condition(f, val, expect); 87 } 88 89 int enable_branch_tracing(struct trace_array *tr) 90 { 91 int ret = 0; 92 93 mutex_lock(&branch_tracing_mutex); 94 branch_tracer = tr; 95 /* 96 * Must be seen before enabling. The reader is a condition 97 * where we do not need a matching rmb() 98 */ 99 smp_wmb(); 100 branch_tracing_enabled++; 101 mutex_unlock(&branch_tracing_mutex); 102 103 return ret; 104 } 105 106 void disable_branch_tracing(void) 107 { 108 mutex_lock(&branch_tracing_mutex); 109 110 if (!branch_tracing_enabled) 111 goto out_unlock; 112 113 branch_tracing_enabled--; 114 115 out_unlock: 116 mutex_unlock(&branch_tracing_mutex); 117 } 118 119 static void start_branch_trace(struct trace_array *tr) 120 { 121 enable_branch_tracing(tr); 122 } 123 124 static void stop_branch_trace(struct trace_array *tr) 125 { 126 disable_branch_tracing(); 127 } 128 129 static int branch_trace_init(struct trace_array *tr) 130 { 131 int cpu; 132 133 for_each_online_cpu(cpu) 134 tracing_reset(tr, cpu); 135 136 start_branch_trace(tr); 137 return 0; 138 } 139 140 static void branch_trace_reset(struct trace_array *tr) 141 { 142 stop_branch_trace(tr); 143 } 144 145 struct tracer branch_trace __read_mostly = 146 { 147 .name = "branch", 148 .init = branch_trace_init, 149 .reset = branch_trace_reset, 150 #ifdef CONFIG_FTRACE_SELFTEST 151 .selftest = trace_selftest_startup_branch, 152 #endif 153 }; 154 155 __init static int init_branch_trace(void) 156 { 157 return register_tracer(&branch_trace); 158 } 159 160 device_initcall(init_branch_trace); 161 #else 162 static inline 163 void trace_likely_condition(struct ftrace_branch_data *f, int val, int expect) 164 { 165 } 166 #endif /* CONFIG_BRANCH_TRACER */ 167 168 void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect) 169 { 170 /* 171 * I would love to have a trace point here instead, but the 172 * trace point code is so inundated with unlikely and likely 173 * conditions that the recursive nightmare that exists is too 174 * much to try to get working. At least for now. 175 */ 176 trace_likely_condition(f, val, expect); 177 178 /* FIXME: Make this atomic! */ 179 if (val == expect) 180 f->correct++; 181 else 182 f->incorrect++; 183 } 184 EXPORT_SYMBOL(ftrace_likely_update); 185 186 struct ftrace_pointer { 187 void *start; 188 void *stop; 189 int hit; 190 }; 191 192 static void * 193 t_next(struct seq_file *m, void *v, loff_t *pos) 194 { 195 const struct ftrace_pointer *f = m->private; 196 struct ftrace_branch_data *p = v; 197 198 (*pos)++; 199 200 if (v == (void *)1) 201 return f->start; 202 203 ++p; 204 205 if ((void *)p >= (void *)f->stop) 206 return NULL; 207 208 return p; 209 } 210 211 static void *t_start(struct seq_file *m, loff_t *pos) 212 { 213 void *t = (void *)1; 214 loff_t l = 0; 215 216 for (; t && l < *pos; t = t_next(m, t, &l)) 217 ; 218 219 return t; 220 } 221 222 static void t_stop(struct seq_file *m, void *p) 223 { 224 } 225 226 static int t_show(struct seq_file *m, void *v) 227 { 228 const struct ftrace_pointer *fp = m->private; 229 struct ftrace_branch_data *p = v; 230 const char *f; 231 long percent; 232 233 if (v == (void *)1) { 234 if (fp->hit) 235 seq_printf(m, " miss hit %% "); 236 else 237 seq_printf(m, " correct incorrect %% "); 238 seq_printf(m, " Function " 239 " File Line\n" 240 " ------- --------- - " 241 " -------- " 242 " ---- ----\n"); 243 return 0; 244 } 245 246 /* Only print the file, not the path */ 247 f = p->file + strlen(p->file); 248 while (f >= p->file && *f != '/') 249 f--; 250 f++; 251 252 /* 253 * The miss is overlayed on correct, and hit on incorrect. 254 */ 255 if (p->correct) { 256 percent = p->incorrect * 100; 257 percent /= p->correct + p->incorrect; 258 } else 259 percent = p->incorrect ? 100 : -1; 260 261 seq_printf(m, "%8lu %8lu ", p->correct, p->incorrect); 262 if (percent < 0) 263 seq_printf(m, " X "); 264 else 265 seq_printf(m, "%3ld ", percent); 266 seq_printf(m, "%-30.30s %-20.20s %d\n", p->func, f, p->line); 267 return 0; 268 } 269 270 static struct seq_operations tracing_likely_seq_ops = { 271 .start = t_start, 272 .next = t_next, 273 .stop = t_stop, 274 .show = t_show, 275 }; 276 277 static int tracing_branch_open(struct inode *inode, struct file *file) 278 { 279 int ret; 280 281 ret = seq_open(file, &tracing_likely_seq_ops); 282 if (!ret) { 283 struct seq_file *m = file->private_data; 284 m->private = (void *)inode->i_private; 285 } 286 287 return ret; 288 } 289 290 static const struct file_operations tracing_branch_fops = { 291 .open = tracing_branch_open, 292 .read = seq_read, 293 .llseek = seq_lseek, 294 }; 295 296 #ifdef CONFIG_PROFILE_ALL_BRANCHES 297 extern unsigned long __start_branch_profile[]; 298 extern unsigned long __stop_branch_profile[]; 299 300 static const struct ftrace_pointer ftrace_branch_pos = { 301 .start = __start_branch_profile, 302 .stop = __stop_branch_profile, 303 .hit = 1, 304 }; 305 306 #endif /* CONFIG_PROFILE_ALL_BRANCHES */ 307 308 extern unsigned long __start_annotated_branch_profile[]; 309 extern unsigned long __stop_annotated_branch_profile[]; 310 311 static const struct ftrace_pointer ftrace_annotated_branch_pos = { 312 .start = __start_annotated_branch_profile, 313 .stop = __stop_annotated_branch_profile, 314 }; 315 316 static __init int ftrace_branch_init(void) 317 { 318 struct dentry *d_tracer; 319 struct dentry *entry; 320 321 d_tracer = tracing_init_dentry(); 322 323 entry = debugfs_create_file("profile_annotated_branch", 0444, d_tracer, 324 (void *)&ftrace_annotated_branch_pos, 325 &tracing_branch_fops); 326 if (!entry) 327 pr_warning("Could not create debugfs " 328 "'profile_annotatet_branch' entry\n"); 329 330 #ifdef CONFIG_PROFILE_ALL_BRANCHES 331 entry = debugfs_create_file("profile_branch", 0444, d_tracer, 332 (void *)&ftrace_branch_pos, 333 &tracing_branch_fops); 334 if (!entry) 335 pr_warning("Could not create debugfs" 336 " 'profile_branch' entry\n"); 337 #endif 338 339 return 0; 340 } 341 342 device_initcall(ftrace_branch_init); 343