1 #ifndef _LINUX_KERNEL_TRACE_H 2 #define _LINUX_KERNEL_TRACE_H 3 4 #include <linux/fs.h> 5 #include <asm/atomic.h> 6 #include <linux/sched.h> 7 #include <linux/clocksource.h> 8 #include <linux/ring_buffer.h> 9 #include <linux/mmiotrace.h> 10 #include <linux/ftrace.h> 11 12 enum trace_type { 13 __TRACE_FIRST_TYPE = 0, 14 15 TRACE_FN, 16 TRACE_CTX, 17 TRACE_WAKE, 18 TRACE_CONT, 19 TRACE_STACK, 20 TRACE_PRINT, 21 TRACE_SPECIAL, 22 TRACE_MMIO_RW, 23 TRACE_MMIO_MAP, 24 TRACE_BOOT, 25 26 __TRACE_LAST_TYPE 27 }; 28 29 /* 30 * The trace entry - the most basic unit of tracing. This is what 31 * is printed in the end as a single line in the trace output, such as: 32 * 33 * bash-15816 [01] 235.197585: idle_cpu <- irq_enter 34 */ 35 struct trace_entry { 36 unsigned char type; 37 unsigned char cpu; 38 unsigned char flags; 39 unsigned char preempt_count; 40 int pid; 41 }; 42 43 /* 44 * Function trace entry - function address and parent function addres: 45 */ 46 struct ftrace_entry { 47 struct trace_entry ent; 48 unsigned long ip; 49 unsigned long parent_ip; 50 }; 51 extern struct tracer boot_tracer; 52 53 /* 54 * Context switch trace entry - which task (and prio) we switched from/to: 55 */ 56 struct ctx_switch_entry { 57 struct trace_entry ent; 58 unsigned int prev_pid; 59 unsigned char prev_prio; 60 unsigned char prev_state; 61 unsigned int next_pid; 62 unsigned char next_prio; 63 unsigned char next_state; 64 unsigned int next_cpu; 65 }; 66 67 /* 68 * Special (free-form) trace entry: 69 */ 70 struct special_entry { 71 struct trace_entry ent; 72 unsigned long arg1; 73 unsigned long arg2; 74 unsigned long arg3; 75 }; 76 77 /* 78 * Stack-trace entry: 79 */ 80 81 #define FTRACE_STACK_ENTRIES 8 82 83 struct stack_entry { 84 struct trace_entry ent; 85 unsigned long caller[FTRACE_STACK_ENTRIES]; 86 }; 87 88 /* 89 * ftrace_printk entry: 90 */ 91 struct print_entry { 92 struct trace_entry ent; 93 unsigned long ip; 94 char buf[]; 95 }; 96 97 #define TRACE_OLD_SIZE 88 98 99 struct trace_field_cont { 100 unsigned char type; 101 /* Temporary till we get rid of this completely */ 102 char buf[TRACE_OLD_SIZE - 1]; 103 }; 104 105 struct trace_mmiotrace_rw { 106 struct trace_entry ent; 107 struct mmiotrace_rw rw; 108 }; 109 110 struct trace_mmiotrace_map { 111 struct trace_entry ent; 112 struct mmiotrace_map map; 113 }; 114 115 struct trace_boot { 116 struct trace_entry ent; 117 struct boot_trace initcall; 118 }; 119 120 /* 121 * trace_flag_type is an enumeration that holds different 122 * states when a trace occurs. These are: 123 * IRQS_OFF - interrupts were disabled 124 * IRQS_NOSUPPORT - arch does not support irqs_disabled_flags 125 * NEED_RESCED - reschedule is requested 126 * HARDIRQ - inside an interrupt handler 127 * SOFTIRQ - inside a softirq handler 128 * CONT - multiple entries hold the trace item 129 */ 130 enum trace_flag_type { 131 TRACE_FLAG_IRQS_OFF = 0x01, 132 TRACE_FLAG_IRQS_NOSUPPORT = 0x02, 133 TRACE_FLAG_NEED_RESCHED = 0x04, 134 TRACE_FLAG_HARDIRQ = 0x08, 135 TRACE_FLAG_SOFTIRQ = 0x10, 136 TRACE_FLAG_CONT = 0x20, 137 }; 138 139 #define TRACE_BUF_SIZE 1024 140 141 /* 142 * The CPU trace array - it consists of thousands of trace entries 143 * plus some other descriptor data: (for example which task started 144 * the trace, etc.) 145 */ 146 struct trace_array_cpu { 147 atomic_t disabled; 148 149 /* these fields get copied into max-trace: */ 150 unsigned long trace_idx; 151 unsigned long overrun; 152 unsigned long saved_latency; 153 unsigned long critical_start; 154 unsigned long critical_end; 155 unsigned long critical_sequence; 156 unsigned long nice; 157 unsigned long policy; 158 unsigned long rt_priority; 159 cycle_t preempt_timestamp; 160 pid_t pid; 161 uid_t uid; 162 char comm[TASK_COMM_LEN]; 163 }; 164 165 struct trace_iterator; 166 167 /* 168 * The trace array - an array of per-CPU trace arrays. This is the 169 * highest level data structure that individual tracers deal with. 170 * They have on/off state as well: 171 */ 172 struct trace_array { 173 struct ring_buffer *buffer; 174 unsigned long entries; 175 long ctrl; 176 int cpu; 177 cycle_t time_start; 178 struct task_struct *waiter; 179 struct trace_array_cpu *data[NR_CPUS]; 180 }; 181 182 #define FTRACE_CMP_TYPE(var, type) \ 183 __builtin_types_compatible_p(typeof(var), type *) 184 185 #undef IF_ASSIGN 186 #define IF_ASSIGN(var, entry, etype, id) \ 187 if (FTRACE_CMP_TYPE(var, etype)) { \ 188 var = (typeof(var))(entry); \ 189 WARN_ON(id && (entry)->type != id); \ 190 break; \ 191 } 192 193 /* Will cause compile errors if type is not found. */ 194 extern void __ftrace_bad_type(void); 195 196 /* 197 * The trace_assign_type is a verifier that the entry type is 198 * the same as the type being assigned. To add new types simply 199 * add a line with the following format: 200 * 201 * IF_ASSIGN(var, ent, type, id); 202 * 203 * Where "type" is the trace type that includes the trace_entry 204 * as the "ent" item. And "id" is the trace identifier that is 205 * used in the trace_type enum. 206 * 207 * If the type can have more than one id, then use zero. 208 */ 209 #define trace_assign_type(var, ent) \ 210 do { \ 211 IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN); \ 212 IF_ASSIGN(var, ent, struct ctx_switch_entry, 0); \ 213 IF_ASSIGN(var, ent, struct trace_field_cont, TRACE_CONT); \ 214 IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \ 215 IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \ 216 IF_ASSIGN(var, ent, struct special_entry, 0); \ 217 IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \ 218 TRACE_MMIO_RW); \ 219 IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \ 220 TRACE_MMIO_MAP); \ 221 IF_ASSIGN(var, ent, struct trace_boot, TRACE_BOOT); \ 222 __ftrace_bad_type(); \ 223 } while (0) 224 225 /* Return values for print_line callback */ 226 enum print_line_t { 227 TRACE_TYPE_PARTIAL_LINE = 0, /* Retry after flushing the seq */ 228 TRACE_TYPE_HANDLED = 1, 229 TRACE_TYPE_UNHANDLED = 2 /* Relay to other output functions */ 230 }; 231 232 /* 233 * A specific tracer, represented by methods that operate on a trace array: 234 */ 235 struct tracer { 236 const char *name; 237 void (*init)(struct trace_array *tr); 238 void (*reset)(struct trace_array *tr); 239 void (*open)(struct trace_iterator *iter); 240 void (*pipe_open)(struct trace_iterator *iter); 241 void (*close)(struct trace_iterator *iter); 242 void (*start)(struct trace_iterator *iter); 243 void (*stop)(struct trace_iterator *iter); 244 ssize_t (*read)(struct trace_iterator *iter, 245 struct file *filp, char __user *ubuf, 246 size_t cnt, loff_t *ppos); 247 void (*ctrl_update)(struct trace_array *tr); 248 #ifdef CONFIG_FTRACE_STARTUP_TEST 249 int (*selftest)(struct tracer *trace, 250 struct trace_array *tr); 251 #endif 252 enum print_line_t (*print_line)(struct trace_iterator *iter); 253 struct tracer *next; 254 int print_max; 255 }; 256 257 struct trace_seq { 258 unsigned char buffer[PAGE_SIZE]; 259 unsigned int len; 260 unsigned int readpos; 261 }; 262 263 /* 264 * Trace iterator - used by printout routines who present trace 265 * results to users and which routines might sleep, etc: 266 */ 267 struct trace_iterator { 268 struct trace_array *tr; 269 struct tracer *trace; 270 void *private; 271 struct ring_buffer_iter *buffer_iter[NR_CPUS]; 272 273 /* The below is zeroed out in pipe_read */ 274 struct trace_seq seq; 275 struct trace_entry *ent; 276 int cpu; 277 u64 ts; 278 279 unsigned long iter_flags; 280 loff_t pos; 281 long idx; 282 }; 283 284 void trace_wake_up(void); 285 void tracing_reset(struct trace_array *tr, int cpu); 286 int tracing_open_generic(struct inode *inode, struct file *filp); 287 struct dentry *tracing_init_dentry(void); 288 void init_tracer_sysprof_debugfs(struct dentry *d_tracer); 289 290 struct trace_entry *tracing_get_trace_entry(struct trace_array *tr, 291 struct trace_array_cpu *data); 292 void tracing_generic_entry_update(struct trace_entry *entry, 293 unsigned long flags, 294 int pc); 295 296 void ftrace(struct trace_array *tr, 297 struct trace_array_cpu *data, 298 unsigned long ip, 299 unsigned long parent_ip, 300 unsigned long flags, int pc); 301 void tracing_sched_switch_trace(struct trace_array *tr, 302 struct trace_array_cpu *data, 303 struct task_struct *prev, 304 struct task_struct *next, 305 unsigned long flags, int pc); 306 void tracing_record_cmdline(struct task_struct *tsk); 307 308 void tracing_sched_wakeup_trace(struct trace_array *tr, 309 struct trace_array_cpu *data, 310 struct task_struct *wakee, 311 struct task_struct *cur, 312 unsigned long flags, int pc); 313 void trace_special(struct trace_array *tr, 314 struct trace_array_cpu *data, 315 unsigned long arg1, 316 unsigned long arg2, 317 unsigned long arg3, int pc); 318 void trace_function(struct trace_array *tr, 319 struct trace_array_cpu *data, 320 unsigned long ip, 321 unsigned long parent_ip, 322 unsigned long flags, int pc); 323 324 void tracing_start_cmdline_record(void); 325 void tracing_stop_cmdline_record(void); 326 int register_tracer(struct tracer *type); 327 void unregister_tracer(struct tracer *type); 328 329 extern unsigned long nsecs_to_usecs(unsigned long nsecs); 330 331 extern unsigned long tracing_max_latency; 332 extern unsigned long tracing_thresh; 333 334 void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu); 335 void update_max_tr_single(struct trace_array *tr, 336 struct task_struct *tsk, int cpu); 337 338 extern cycle_t ftrace_now(int cpu); 339 340 #ifdef CONFIG_FUNCTION_TRACER 341 void tracing_start_function_trace(void); 342 void tracing_stop_function_trace(void); 343 #else 344 # define tracing_start_function_trace() do { } while (0) 345 # define tracing_stop_function_trace() do { } while (0) 346 #endif 347 348 #ifdef CONFIG_CONTEXT_SWITCH_TRACER 349 typedef void 350 (*tracer_switch_func_t)(void *private, 351 void *__rq, 352 struct task_struct *prev, 353 struct task_struct *next); 354 355 struct tracer_switch_ops { 356 tracer_switch_func_t func; 357 void *private; 358 struct tracer_switch_ops *next; 359 }; 360 361 #endif /* CONFIG_CONTEXT_SWITCH_TRACER */ 362 363 #ifdef CONFIG_DYNAMIC_FTRACE 364 extern unsigned long ftrace_update_tot_cnt; 365 #define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func 366 extern int DYN_FTRACE_TEST_NAME(void); 367 #endif 368 369 #ifdef CONFIG_FTRACE_STARTUP_TEST 370 extern int trace_selftest_startup_function(struct tracer *trace, 371 struct trace_array *tr); 372 extern int trace_selftest_startup_irqsoff(struct tracer *trace, 373 struct trace_array *tr); 374 extern int trace_selftest_startup_preemptoff(struct tracer *trace, 375 struct trace_array *tr); 376 extern int trace_selftest_startup_preemptirqsoff(struct tracer *trace, 377 struct trace_array *tr); 378 extern int trace_selftest_startup_wakeup(struct tracer *trace, 379 struct trace_array *tr); 380 extern int trace_selftest_startup_nop(struct tracer *trace, 381 struct trace_array *tr); 382 extern int trace_selftest_startup_sched_switch(struct tracer *trace, 383 struct trace_array *tr); 384 extern int trace_selftest_startup_sysprof(struct tracer *trace, 385 struct trace_array *tr); 386 #endif /* CONFIG_FTRACE_STARTUP_TEST */ 387 388 extern void *head_page(struct trace_array_cpu *data); 389 extern int trace_seq_printf(struct trace_seq *s, const char *fmt, ...); 390 extern void trace_seq_print_cont(struct trace_seq *s, 391 struct trace_iterator *iter); 392 extern ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, 393 size_t cnt); 394 extern long ns2usecs(cycle_t nsec); 395 extern int trace_vprintk(unsigned long ip, const char *fmt, va_list args); 396 397 extern unsigned long trace_flags; 398 399 /* 400 * trace_iterator_flags is an enumeration that defines bit 401 * positions into trace_flags that controls the output. 402 * 403 * NOTE: These bits must match the trace_options array in 404 * trace.c. 405 */ 406 enum trace_iterator_flags { 407 TRACE_ITER_PRINT_PARENT = 0x01, 408 TRACE_ITER_SYM_OFFSET = 0x02, 409 TRACE_ITER_SYM_ADDR = 0x04, 410 TRACE_ITER_VERBOSE = 0x08, 411 TRACE_ITER_RAW = 0x10, 412 TRACE_ITER_HEX = 0x20, 413 TRACE_ITER_BIN = 0x40, 414 TRACE_ITER_BLOCK = 0x80, 415 TRACE_ITER_STACKTRACE = 0x100, 416 TRACE_ITER_SCHED_TREE = 0x200, 417 TRACE_ITER_PRINTK = 0x400, 418 }; 419 420 extern struct tracer nop_trace; 421 422 #endif /* _LINUX_KERNEL_TRACE_H */ 423