1 /* 2 * unlikely profiler 3 * 4 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com> 5 */ 6 #include <linux/kallsyms.h> 7 #include <linux/seq_file.h> 8 #include <linux/spinlock.h> 9 #include <linux/irqflags.h> 10 #include <linux/debugfs.h> 11 #include <linux/uaccess.h> 12 #include <linux/module.h> 13 #include <linux/ftrace.h> 14 #include <linux/hash.h> 15 #include <linux/fs.h> 16 #include <asm/local.h> 17 18 #include "trace.h" 19 #include "trace_stat.h" 20 #include "trace_output.h" 21 22 #ifdef CONFIG_BRANCH_TRACER 23 24 static struct tracer branch_trace; 25 static int branch_tracing_enabled __read_mostly; 26 static DEFINE_MUTEX(branch_tracing_mutex); 27 28 static struct trace_array *branch_tracer; 29 30 static void 31 probe_likely_condition(struct ftrace_branch_data *f, int val, int expect) 32 { 33 struct ftrace_event_call *call = &event_branch; 34 struct trace_array *tr = branch_tracer; 35 struct trace_array_cpu *data; 36 struct ring_buffer_event *event; 37 struct trace_branch *entry; 38 struct ring_buffer *buffer; 39 unsigned long flags; 40 int cpu, pc; 41 const char *p; 42 43 /* 44 * I would love to save just the ftrace_likely_data pointer, but 45 * this code can also be used by modules. Ugly things can happen 46 * if the module is unloaded, and then we go and read the 47 * pointer. This is slower, but much safer. 48 */ 49 50 if (unlikely(!tr)) 51 return; 52 53 local_irq_save(flags); 54 cpu = raw_smp_processor_id(); 55 data = per_cpu_ptr(tr->trace_buffer.data, cpu); 56 if (atomic_inc_return(&data->disabled) != 1) 57 goto out; 58 59 pc = preempt_count(); 60 buffer = tr->trace_buffer.buffer; 61 event = trace_buffer_lock_reserve(buffer, TRACE_BRANCH, 62 sizeof(*entry), flags, pc); 63 if (!event) 64 goto out; 65 66 entry = ring_buffer_event_data(event); 67 68 /* Strip off the path, only save the file */ 69 p = f->file + strlen(f->file); 70 while (p >= f->file && *p != '/') 71 p--; 72 p++; 73 74 strncpy(entry->func, f->func, TRACE_FUNC_SIZE); 75 strncpy(entry->file, p, TRACE_FILE_SIZE); 76 entry->func[TRACE_FUNC_SIZE] = 0; 77 entry->file[TRACE_FILE_SIZE] = 0; 78 entry->line = f->line; 79 entry->correct = val == expect; 80 81 if (!call_filter_check_discard(call, entry, buffer, event)) 82 __buffer_unlock_commit(buffer, event); 83 84 out: 85 atomic_dec(&data->disabled); 86 local_irq_restore(flags); 87 } 88 89 static inline 90 void trace_likely_condition(struct ftrace_branch_data *f, int val, int expect) 91 { 92 if (!branch_tracing_enabled) 93 return; 94 95 probe_likely_condition(f, val, expect); 96 } 97 98 int enable_branch_tracing(struct trace_array *tr) 99 { 100 mutex_lock(&branch_tracing_mutex); 101 branch_tracer = tr; 102 /* 103 * Must be seen before enabling. The reader is a condition 104 * where we do not need a matching rmb() 105 */ 106 smp_wmb(); 107 branch_tracing_enabled++; 108 mutex_unlock(&branch_tracing_mutex); 109 110 return 0; 111 } 112 113 void disable_branch_tracing(void) 114 { 115 mutex_lock(&branch_tracing_mutex); 116 117 if (!branch_tracing_enabled) 118 goto out_unlock; 119 120 branch_tracing_enabled--; 121 122 out_unlock: 123 mutex_unlock(&branch_tracing_mutex); 124 } 125 126 static void start_branch_trace(struct trace_array *tr) 127 { 128 enable_branch_tracing(tr); 129 } 130 131 static void stop_branch_trace(struct trace_array *tr) 132 { 133 disable_branch_tracing(); 134 } 135 136 static int branch_trace_init(struct trace_array *tr) 137 { 138 start_branch_trace(tr); 139 return 0; 140 } 141 142 static void branch_trace_reset(struct trace_array *tr) 143 { 144 stop_branch_trace(tr); 145 } 146 147 static enum print_line_t trace_branch_print(struct trace_iterator *iter, 148 int flags, struct trace_event *event) 149 { 150 struct trace_branch *field; 151 152 trace_assign_type(field, iter->ent); 153 154 trace_seq_printf(&iter->seq, "[%s] %s:%s:%d\n", 155 field->correct ? " ok " : " MISS ", 156 field->func, 157 field->file, 158 field->line); 159 160 return trace_handle_return(&iter->seq); 161 } 162 163 static void branch_print_header(struct seq_file *s) 164 { 165 seq_puts(s, "# TASK-PID CPU# TIMESTAMP CORRECT" 166 " FUNC:FILE:LINE\n" 167 "# | | | | | " 168 " |\n"); 169 } 170 171 static struct trace_event_functions trace_branch_funcs = { 172 .trace = trace_branch_print, 173 }; 174 175 static struct trace_event trace_branch_event = { 176 .type = TRACE_BRANCH, 177 .funcs = &trace_branch_funcs, 178 }; 179 180 static struct tracer branch_trace __read_mostly = 181 { 182 .name = "branch", 183 .init = branch_trace_init, 184 .reset = branch_trace_reset, 185 #ifdef CONFIG_FTRACE_SELFTEST 186 .selftest = trace_selftest_startup_branch, 187 #endif /* CONFIG_FTRACE_SELFTEST */ 188 .print_header = branch_print_header, 189 }; 190 191 __init static int init_branch_tracer(void) 192 { 193 int ret; 194 195 ret = register_ftrace_event(&trace_branch_event); 196 if (!ret) { 197 printk(KERN_WARNING "Warning: could not register " 198 "branch events\n"); 199 return 1; 200 } 201 return register_tracer(&branch_trace); 202 } 203 core_initcall(init_branch_tracer); 204 205 #else 206 static inline 207 void trace_likely_condition(struct ftrace_branch_data *f, int val, int expect) 208 { 209 } 210 #endif /* CONFIG_BRANCH_TRACER */ 211 212 void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect) 213 { 214 /* 215 * I would love to have a trace point here instead, but the 216 * trace point code is so inundated with unlikely and likely 217 * conditions that the recursive nightmare that exists is too 218 * much to try to get working. At least for now. 219 */ 220 trace_likely_condition(f, val, expect); 221 222 /* FIXME: Make this atomic! */ 223 if (val == expect) 224 f->correct++; 225 else 226 f->incorrect++; 227 } 228 EXPORT_SYMBOL(ftrace_likely_update); 229 230 extern unsigned long __start_annotated_branch_profile[]; 231 extern unsigned long __stop_annotated_branch_profile[]; 232 233 static int annotated_branch_stat_headers(struct seq_file *m) 234 { 235 seq_puts(m, " correct incorrect % " 236 " Function " 237 " File Line\n" 238 " ------- --------- - " 239 " -------- " 240 " ---- ----\n"); 241 return 0; 242 } 243 244 static inline long get_incorrect_percent(struct ftrace_branch_data *p) 245 { 246 long percent; 247 248 if (p->correct) { 249 percent = p->incorrect * 100; 250 percent /= p->correct + p->incorrect; 251 } else 252 percent = p->incorrect ? 100 : -1; 253 254 return percent; 255 } 256 257 static int branch_stat_show(struct seq_file *m, void *v) 258 { 259 struct ftrace_branch_data *p = v; 260 const char *f; 261 long percent; 262 263 /* Only print the file, not the path */ 264 f = p->file + strlen(p->file); 265 while (f >= p->file && *f != '/') 266 f--; 267 f++; 268 269 /* 270 * The miss is overlayed on correct, and hit on incorrect. 271 */ 272 percent = get_incorrect_percent(p); 273 274 seq_printf(m, "%8lu %8lu ", p->correct, p->incorrect); 275 if (percent < 0) 276 seq_puts(m, " X "); 277 else 278 seq_printf(m, "%3ld ", percent); 279 seq_printf(m, "%-30.30s %-20.20s %d\n", p->func, f, p->line); 280 return 0; 281 } 282 283 static void *annotated_branch_stat_start(struct tracer_stat *trace) 284 { 285 return __start_annotated_branch_profile; 286 } 287 288 static void * 289 annotated_branch_stat_next(void *v, int idx) 290 { 291 struct ftrace_branch_data *p = v; 292 293 ++p; 294 295 if ((void *)p >= (void *)__stop_annotated_branch_profile) 296 return NULL; 297 298 return p; 299 } 300 301 static int annotated_branch_stat_cmp(void *p1, void *p2) 302 { 303 struct ftrace_branch_data *a = p1; 304 struct ftrace_branch_data *b = p2; 305 306 long percent_a, percent_b; 307 308 percent_a = get_incorrect_percent(a); 309 percent_b = get_incorrect_percent(b); 310 311 if (percent_a < percent_b) 312 return -1; 313 if (percent_a > percent_b) 314 return 1; 315 316 if (a->incorrect < b->incorrect) 317 return -1; 318 if (a->incorrect > b->incorrect) 319 return 1; 320 321 /* 322 * Since the above shows worse (incorrect) cases 323 * first, we continue that by showing best (correct) 324 * cases last. 325 */ 326 if (a->correct > b->correct) 327 return -1; 328 if (a->correct < b->correct) 329 return 1; 330 331 return 0; 332 } 333 334 static struct tracer_stat annotated_branch_stats = { 335 .name = "branch_annotated", 336 .stat_start = annotated_branch_stat_start, 337 .stat_next = annotated_branch_stat_next, 338 .stat_cmp = annotated_branch_stat_cmp, 339 .stat_headers = annotated_branch_stat_headers, 340 .stat_show = branch_stat_show 341 }; 342 343 __init static int init_annotated_branch_stats(void) 344 { 345 int ret; 346 347 ret = register_stat_tracer(&annotated_branch_stats); 348 if (!ret) { 349 printk(KERN_WARNING "Warning: could not register " 350 "annotated branches stats\n"); 351 return 1; 352 } 353 return 0; 354 } 355 fs_initcall(init_annotated_branch_stats); 356 357 #ifdef CONFIG_PROFILE_ALL_BRANCHES 358 359 extern unsigned long __start_branch_profile[]; 360 extern unsigned long __stop_branch_profile[]; 361 362 static int all_branch_stat_headers(struct seq_file *m) 363 { 364 seq_puts(m, " miss hit % " 365 " Function " 366 " File Line\n" 367 " ------- --------- - " 368 " -------- " 369 " ---- ----\n"); 370 return 0; 371 } 372 373 static void *all_branch_stat_start(struct tracer_stat *trace) 374 { 375 return __start_branch_profile; 376 } 377 378 static void * 379 all_branch_stat_next(void *v, int idx) 380 { 381 struct ftrace_branch_data *p = v; 382 383 ++p; 384 385 if ((void *)p >= (void *)__stop_branch_profile) 386 return NULL; 387 388 return p; 389 } 390 391 static struct tracer_stat all_branch_stats = { 392 .name = "branch_all", 393 .stat_start = all_branch_stat_start, 394 .stat_next = all_branch_stat_next, 395 .stat_headers = all_branch_stat_headers, 396 .stat_show = branch_stat_show 397 }; 398 399 __init static int all_annotated_branch_stats(void) 400 { 401 int ret; 402 403 ret = register_stat_tracer(&all_branch_stats); 404 if (!ret) { 405 printk(KERN_WARNING "Warning: could not register " 406 "all branches stats\n"); 407 return 1; 408 } 409 return 0; 410 } 411 fs_initcall(all_annotated_branch_stats); 412 #endif /* CONFIG_PROFILE_ALL_BRANCHES */ 413