1 2 #ifndef _LINUX_KERNEL_TRACE_H 3 #define _LINUX_KERNEL_TRACE_H 4 5 #include <linux/fs.h> 6 #include <linux/atomic.h> 7 #include <linux/sched.h> 8 #include <linux/clocksource.h> 9 #include <linux/ring_buffer.h> 10 #include <linux/mmiotrace.h> 11 #include <linux/tracepoint.h> 12 #include <linux/ftrace.h> 13 #include <linux/hw_breakpoint.h> 14 #include <linux/trace_seq.h> 15 #include <linux/trace_events.h> 16 #include <linux/compiler.h> 17 #include <linux/trace_seq.h> 18 #include <linux/glob.h> 19 20 #ifdef CONFIG_FTRACE_SYSCALLS 21 #include <asm/unistd.h> /* For NR_SYSCALLS */ 22 #include <asm/syscall.h> /* some archs define it here */ 23 #endif 24 25 enum trace_type { 26 __TRACE_FIRST_TYPE = 0, 27 28 TRACE_FN, 29 TRACE_CTX, 30 TRACE_WAKE, 31 TRACE_STACK, 32 TRACE_PRINT, 33 TRACE_BPRINT, 34 TRACE_MMIO_RW, 35 TRACE_MMIO_MAP, 36 TRACE_BRANCH, 37 TRACE_GRAPH_RET, 38 TRACE_GRAPH_ENT, 39 TRACE_USER_STACK, 40 TRACE_BLK, 41 TRACE_BPUTS, 42 TRACE_HWLAT, 43 TRACE_RAW_DATA, 44 45 __TRACE_LAST_TYPE, 46 }; 47 48 49 #undef __field 50 #define __field(type, item) type item; 51 52 #undef __field_struct 53 #define __field_struct(type, item) __field(type, item) 54 55 #undef __field_desc 56 #define __field_desc(type, container, item) 57 58 #undef __array 59 #define __array(type, item, size) type item[size]; 60 61 #undef __array_desc 62 #define __array_desc(type, container, item, size) 63 64 #undef __dynamic_array 65 #define __dynamic_array(type, item) type item[]; 66 67 #undef F_STRUCT 68 #define F_STRUCT(args...) args 69 70 #undef FTRACE_ENTRY 71 #define FTRACE_ENTRY(name, struct_name, id, tstruct, print, filter) \ 72 struct struct_name { \ 73 struct trace_entry ent; \ 74 tstruct \ 75 } 76 77 #undef FTRACE_ENTRY_DUP 78 #define FTRACE_ENTRY_DUP(name, name_struct, id, tstruct, printk, filter) 79 80 #undef FTRACE_ENTRY_REG 81 #define FTRACE_ENTRY_REG(name, struct_name, id, tstruct, print, \ 82 filter, regfn) \ 83 FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print), \ 84 filter) 85 86 #undef FTRACE_ENTRY_PACKED 87 #define FTRACE_ENTRY_PACKED(name, struct_name, id, tstruct, print, \ 88 filter) \ 89 FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print), \ 90 filter) __packed 91 92 #include "trace_entries.h" 93 94 /* 95 * syscalls are special, and need special handling, this is why 96 * they are not included in trace_entries.h 97 */ 98 struct syscall_trace_enter { 99 struct trace_entry ent; 100 int nr; 101 unsigned long args[]; 102 }; 103 104 struct syscall_trace_exit { 105 struct trace_entry ent; 106 int nr; 107 long ret; 108 }; 109 110 struct kprobe_trace_entry_head { 111 struct trace_entry ent; 112 unsigned long ip; 113 }; 114 115 struct kretprobe_trace_entry_head { 116 struct trace_entry ent; 117 unsigned long func; 118 unsigned long ret_ip; 119 }; 120 121 /* 122 * trace_flag_type is an enumeration that holds different 123 * states when a trace occurs. These are: 124 * IRQS_OFF - interrupts were disabled 125 * IRQS_NOSUPPORT - arch does not support irqs_disabled_flags 126 * NEED_RESCHED - reschedule is requested 127 * HARDIRQ - inside an interrupt handler 128 * SOFTIRQ - inside a softirq handler 129 */ 130 enum trace_flag_type { 131 TRACE_FLAG_IRQS_OFF = 0x01, 132 TRACE_FLAG_IRQS_NOSUPPORT = 0x02, 133 TRACE_FLAG_NEED_RESCHED = 0x04, 134 TRACE_FLAG_HARDIRQ = 0x08, 135 TRACE_FLAG_SOFTIRQ = 0x10, 136 TRACE_FLAG_PREEMPT_RESCHED = 0x20, 137 TRACE_FLAG_NMI = 0x40, 138 }; 139 140 #define TRACE_BUF_SIZE 1024 141 142 struct trace_array; 143 144 /* 145 * The CPU trace array - it consists of thousands of trace entries 146 * plus some other descriptor data: (for example which task started 147 * the trace, etc.) 148 */ 149 struct trace_array_cpu { 150 atomic_t disabled; 151 void *buffer_page; /* ring buffer spare */ 152 153 unsigned long entries; 154 unsigned long saved_latency; 155 unsigned long critical_start; 156 unsigned long critical_end; 157 unsigned long critical_sequence; 158 unsigned long nice; 159 unsigned long policy; 160 unsigned long rt_priority; 161 unsigned long skipped_entries; 162 u64 preempt_timestamp; 163 pid_t pid; 164 kuid_t uid; 165 char comm[TASK_COMM_LEN]; 166 167 bool ignore_pid; 168 #ifdef CONFIG_FUNCTION_TRACER 169 bool ftrace_ignore_pid; 170 #endif 171 }; 172 173 struct tracer; 174 struct trace_option_dentry; 175 176 struct trace_buffer { 177 struct trace_array *tr; 178 struct ring_buffer *buffer; 179 struct trace_array_cpu __percpu *data; 180 u64 time_start; 181 int cpu; 182 }; 183 184 #define TRACE_FLAGS_MAX_SIZE 32 185 186 struct trace_options { 187 struct tracer *tracer; 188 struct trace_option_dentry *topts; 189 }; 190 191 struct trace_pid_list { 192 int pid_max; 193 unsigned long *pids; 194 }; 195 196 /* 197 * The trace array - an array of per-CPU trace arrays. This is the 198 * highest level data structure that individual tracers deal with. 199 * They have on/off state as well: 200 */ 201 struct trace_array { 202 struct list_head list; 203 char *name; 204 struct trace_buffer trace_buffer; 205 #ifdef CONFIG_TRACER_MAX_TRACE 206 /* 207 * The max_buffer is used to snapshot the trace when a maximum 208 * latency is reached, or when the user initiates a snapshot. 209 * Some tracers will use this to store a maximum trace while 210 * it continues examining live traces. 211 * 212 * The buffers for the max_buffer are set up the same as the trace_buffer 213 * When a snapshot is taken, the buffer of the max_buffer is swapped 214 * with the buffer of the trace_buffer and the buffers are reset for 215 * the trace_buffer so the tracing can continue. 216 */ 217 struct trace_buffer max_buffer; 218 bool allocated_snapshot; 219 #endif 220 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER) 221 unsigned long max_latency; 222 #endif 223 struct trace_pid_list __rcu *filtered_pids; 224 /* 225 * max_lock is used to protect the swapping of buffers 226 * when taking a max snapshot. The buffers themselves are 227 * protected by per_cpu spinlocks. But the action of the swap 228 * needs its own lock. 229 * 230 * This is defined as a arch_spinlock_t in order to help 231 * with performance when lockdep debugging is enabled. 232 * 233 * It is also used in other places outside the update_max_tr 234 * so it needs to be defined outside of the 235 * CONFIG_TRACER_MAX_TRACE. 236 */ 237 arch_spinlock_t max_lock; 238 int buffer_disabled; 239 #ifdef CONFIG_FTRACE_SYSCALLS 240 int sys_refcount_enter; 241 int sys_refcount_exit; 242 struct trace_event_file __rcu *enter_syscall_files[NR_syscalls]; 243 struct trace_event_file __rcu *exit_syscall_files[NR_syscalls]; 244 #endif 245 int stop_count; 246 int clock_id; 247 int nr_topts; 248 bool clear_trace; 249 struct tracer *current_trace; 250 unsigned int trace_flags; 251 unsigned char trace_flags_index[TRACE_FLAGS_MAX_SIZE]; 252 unsigned int flags; 253 raw_spinlock_t start_lock; 254 struct dentry *dir; 255 struct dentry *options; 256 struct dentry *percpu_dir; 257 struct dentry *event_dir; 258 struct trace_options *topts; 259 struct list_head systems; 260 struct list_head events; 261 cpumask_var_t tracing_cpumask; /* only trace on set CPUs */ 262 int ref; 263 #ifdef CONFIG_FUNCTION_TRACER 264 struct ftrace_ops *ops; 265 struct trace_pid_list __rcu *function_pids; 266 #ifdef CONFIG_DYNAMIC_FTRACE 267 /* All of these are protected by the ftrace_lock */ 268 struct list_head func_probes; 269 struct list_head mod_trace; 270 struct list_head mod_notrace; 271 #endif 272 /* function tracing enabled */ 273 int function_enabled; 274 #endif 275 }; 276 277 enum { 278 TRACE_ARRAY_FL_GLOBAL = (1 << 0) 279 }; 280 281 extern struct list_head ftrace_trace_arrays; 282 283 extern struct mutex trace_types_lock; 284 285 extern int trace_array_get(struct trace_array *tr); 286 extern void trace_array_put(struct trace_array *tr); 287 288 /* 289 * The global tracer (top) should be the first trace array added, 290 * but we check the flag anyway. 291 */ 292 static inline struct trace_array *top_trace_array(void) 293 { 294 struct trace_array *tr; 295 296 if (list_empty(&ftrace_trace_arrays)) 297 return NULL; 298 299 tr = list_entry(ftrace_trace_arrays.prev, 300 typeof(*tr), list); 301 WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL)); 302 return tr; 303 } 304 305 #define FTRACE_CMP_TYPE(var, type) \ 306 __builtin_types_compatible_p(typeof(var), type *) 307 308 #undef IF_ASSIGN 309 #define IF_ASSIGN(var, entry, etype, id) \ 310 if (FTRACE_CMP_TYPE(var, etype)) { \ 311 var = (typeof(var))(entry); \ 312 WARN_ON(id && (entry)->type != id); \ 313 break; \ 314 } 315 316 /* Will cause compile errors if type is not found. */ 317 extern void __ftrace_bad_type(void); 318 319 /* 320 * The trace_assign_type is a verifier that the entry type is 321 * the same as the type being assigned. To add new types simply 322 * add a line with the following format: 323 * 324 * IF_ASSIGN(var, ent, type, id); 325 * 326 * Where "type" is the trace type that includes the trace_entry 327 * as the "ent" item. And "id" is the trace identifier that is 328 * used in the trace_type enum. 329 * 330 * If the type can have more than one id, then use zero. 331 */ 332 #define trace_assign_type(var, ent) \ 333 do { \ 334 IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN); \ 335 IF_ASSIGN(var, ent, struct ctx_switch_entry, 0); \ 336 IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \ 337 IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\ 338 IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \ 339 IF_ASSIGN(var, ent, struct bprint_entry, TRACE_BPRINT); \ 340 IF_ASSIGN(var, ent, struct bputs_entry, TRACE_BPUTS); \ 341 IF_ASSIGN(var, ent, struct hwlat_entry, TRACE_HWLAT); \ 342 IF_ASSIGN(var, ent, struct raw_data_entry, TRACE_RAW_DATA);\ 343 IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \ 344 TRACE_MMIO_RW); \ 345 IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \ 346 TRACE_MMIO_MAP); \ 347 IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \ 348 IF_ASSIGN(var, ent, struct ftrace_graph_ent_entry, \ 349 TRACE_GRAPH_ENT); \ 350 IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \ 351 TRACE_GRAPH_RET); \ 352 __ftrace_bad_type(); \ 353 } while (0) 354 355 /* 356 * An option specific to a tracer. This is a boolean value. 357 * The bit is the bit index that sets its value on the 358 * flags value in struct tracer_flags. 359 */ 360 struct tracer_opt { 361 const char *name; /* Will appear on the trace_options file */ 362 u32 bit; /* Mask assigned in val field in tracer_flags */ 363 }; 364 365 /* 366 * The set of specific options for a tracer. Your tracer 367 * have to set the initial value of the flags val. 368 */ 369 struct tracer_flags { 370 u32 val; 371 struct tracer_opt *opts; 372 struct tracer *trace; 373 }; 374 375 /* Makes more easy to define a tracer opt */ 376 #define TRACER_OPT(s, b) .name = #s, .bit = b 377 378 379 struct trace_option_dentry { 380 struct tracer_opt *opt; 381 struct tracer_flags *flags; 382 struct trace_array *tr; 383 struct dentry *entry; 384 }; 385 386 /** 387 * struct tracer - a specific tracer and its callbacks to interact with tracefs 388 * @name: the name chosen to select it on the available_tracers file 389 * @init: called when one switches to this tracer (echo name > current_tracer) 390 * @reset: called when one switches to another tracer 391 * @start: called when tracing is unpaused (echo 1 > tracing_on) 392 * @stop: called when tracing is paused (echo 0 > tracing_on) 393 * @update_thresh: called when tracing_thresh is updated 394 * @open: called when the trace file is opened 395 * @pipe_open: called when the trace_pipe file is opened 396 * @close: called when the trace file is released 397 * @pipe_close: called when the trace_pipe file is released 398 * @read: override the default read callback on trace_pipe 399 * @splice_read: override the default splice_read callback on trace_pipe 400 * @selftest: selftest to run on boot (see trace_selftest.c) 401 * @print_headers: override the first lines that describe your columns 402 * @print_line: callback that prints a trace 403 * @set_flag: signals one of your private flags changed (trace_options file) 404 * @flags: your private flags 405 */ 406 struct tracer { 407 const char *name; 408 int (*init)(struct trace_array *tr); 409 void (*reset)(struct trace_array *tr); 410 void (*start)(struct trace_array *tr); 411 void (*stop)(struct trace_array *tr); 412 int (*update_thresh)(struct trace_array *tr); 413 void (*open)(struct trace_iterator *iter); 414 void (*pipe_open)(struct trace_iterator *iter); 415 void (*close)(struct trace_iterator *iter); 416 void (*pipe_close)(struct trace_iterator *iter); 417 ssize_t (*read)(struct trace_iterator *iter, 418 struct file *filp, char __user *ubuf, 419 size_t cnt, loff_t *ppos); 420 ssize_t (*splice_read)(struct trace_iterator *iter, 421 struct file *filp, 422 loff_t *ppos, 423 struct pipe_inode_info *pipe, 424 size_t len, 425 unsigned int flags); 426 #ifdef CONFIG_FTRACE_STARTUP_TEST 427 int (*selftest)(struct tracer *trace, 428 struct trace_array *tr); 429 #endif 430 void (*print_header)(struct seq_file *m); 431 enum print_line_t (*print_line)(struct trace_iterator *iter); 432 /* If you handled the flag setting, return 0 */ 433 int (*set_flag)(struct trace_array *tr, 434 u32 old_flags, u32 bit, int set); 435 /* Return 0 if OK with change, else return non-zero */ 436 int (*flag_changed)(struct trace_array *tr, 437 u32 mask, int set); 438 struct tracer *next; 439 struct tracer_flags *flags; 440 int enabled; 441 int ref; 442 bool print_max; 443 bool allow_instances; 444 #ifdef CONFIG_TRACER_MAX_TRACE 445 bool use_max_tr; 446 #endif 447 /* True if tracer cannot be enabled in kernel param */ 448 bool noboot; 449 }; 450 451 452 /* Only current can touch trace_recursion */ 453 454 /* 455 * For function tracing recursion: 456 * The order of these bits are important. 457 * 458 * When function tracing occurs, the following steps are made: 459 * If arch does not support a ftrace feature: 460 * call internal function (uses INTERNAL bits) which calls... 461 * If callback is registered to the "global" list, the list 462 * function is called and recursion checks the GLOBAL bits. 463 * then this function calls... 464 * The function callback, which can use the FTRACE bits to 465 * check for recursion. 466 * 467 * Now if the arch does not suppport a feature, and it calls 468 * the global list function which calls the ftrace callback 469 * all three of these steps will do a recursion protection. 470 * There's no reason to do one if the previous caller already 471 * did. The recursion that we are protecting against will 472 * go through the same steps again. 473 * 474 * To prevent the multiple recursion checks, if a recursion 475 * bit is set that is higher than the MAX bit of the current 476 * check, then we know that the check was made by the previous 477 * caller, and we can skip the current check. 478 */ 479 enum { 480 TRACE_BUFFER_BIT, 481 TRACE_BUFFER_NMI_BIT, 482 TRACE_BUFFER_IRQ_BIT, 483 TRACE_BUFFER_SIRQ_BIT, 484 485 /* Start of function recursion bits */ 486 TRACE_FTRACE_BIT, 487 TRACE_FTRACE_NMI_BIT, 488 TRACE_FTRACE_IRQ_BIT, 489 TRACE_FTRACE_SIRQ_BIT, 490 491 /* INTERNAL_BITs must be greater than FTRACE_BITs */ 492 TRACE_INTERNAL_BIT, 493 TRACE_INTERNAL_NMI_BIT, 494 TRACE_INTERNAL_IRQ_BIT, 495 TRACE_INTERNAL_SIRQ_BIT, 496 497 TRACE_BRANCH_BIT, 498 /* 499 * Abuse of the trace_recursion. 500 * As we need a way to maintain state if we are tracing the function 501 * graph in irq because we want to trace a particular function that 502 * was called in irq context but we have irq tracing off. Since this 503 * can only be modified by current, we can reuse trace_recursion. 504 */ 505 TRACE_IRQ_BIT, 506 }; 507 508 #define trace_recursion_set(bit) do { (current)->trace_recursion |= (1<<(bit)); } while (0) 509 #define trace_recursion_clear(bit) do { (current)->trace_recursion &= ~(1<<(bit)); } while (0) 510 #define trace_recursion_test(bit) ((current)->trace_recursion & (1<<(bit))) 511 512 #define TRACE_CONTEXT_BITS 4 513 514 #define TRACE_FTRACE_START TRACE_FTRACE_BIT 515 #define TRACE_FTRACE_MAX ((1 << (TRACE_FTRACE_START + TRACE_CONTEXT_BITS)) - 1) 516 517 #define TRACE_LIST_START TRACE_INTERNAL_BIT 518 #define TRACE_LIST_MAX ((1 << (TRACE_LIST_START + TRACE_CONTEXT_BITS)) - 1) 519 520 #define TRACE_CONTEXT_MASK TRACE_LIST_MAX 521 522 static __always_inline int trace_get_context_bit(void) 523 { 524 int bit; 525 526 if (in_interrupt()) { 527 if (in_nmi()) 528 bit = 0; 529 530 else if (in_irq()) 531 bit = 1; 532 else 533 bit = 2; 534 } else 535 bit = 3; 536 537 return bit; 538 } 539 540 static __always_inline int trace_test_and_set_recursion(int start, int max) 541 { 542 unsigned int val = current->trace_recursion; 543 int bit; 544 545 /* A previous recursion check was made */ 546 if ((val & TRACE_CONTEXT_MASK) > max) 547 return 0; 548 549 bit = trace_get_context_bit() + start; 550 if (unlikely(val & (1 << bit))) 551 return -1; 552 553 val |= 1 << bit; 554 current->trace_recursion = val; 555 barrier(); 556 557 return bit; 558 } 559 560 static __always_inline void trace_clear_recursion(int bit) 561 { 562 unsigned int val = current->trace_recursion; 563 564 if (!bit) 565 return; 566 567 bit = 1 << bit; 568 val &= ~bit; 569 570 barrier(); 571 current->trace_recursion = val; 572 } 573 574 static inline struct ring_buffer_iter * 575 trace_buffer_iter(struct trace_iterator *iter, int cpu) 576 { 577 if (iter->buffer_iter && iter->buffer_iter[cpu]) 578 return iter->buffer_iter[cpu]; 579 return NULL; 580 } 581 582 int tracer_init(struct tracer *t, struct trace_array *tr); 583 int tracing_is_enabled(void); 584 void tracing_reset(struct trace_buffer *buf, int cpu); 585 void tracing_reset_online_cpus(struct trace_buffer *buf); 586 void tracing_reset_current(int cpu); 587 void tracing_reset_all_online_cpus(void); 588 int tracing_open_generic(struct inode *inode, struct file *filp); 589 bool tracing_is_disabled(void); 590 int tracer_tracing_is_on(struct trace_array *tr); 591 void tracer_tracing_on(struct trace_array *tr); 592 void tracer_tracing_off(struct trace_array *tr); 593 struct dentry *trace_create_file(const char *name, 594 umode_t mode, 595 struct dentry *parent, 596 void *data, 597 const struct file_operations *fops); 598 599 struct dentry *tracing_init_dentry(void); 600 601 struct ring_buffer_event; 602 603 struct ring_buffer_event * 604 trace_buffer_lock_reserve(struct ring_buffer *buffer, 605 int type, 606 unsigned long len, 607 unsigned long flags, 608 int pc); 609 610 struct trace_entry *tracing_get_trace_entry(struct trace_array *tr, 611 struct trace_array_cpu *data); 612 613 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, 614 int *ent_cpu, u64 *ent_ts); 615 616 void trace_buffer_unlock_commit_nostack(struct ring_buffer *buffer, 617 struct ring_buffer_event *event); 618 619 int trace_empty(struct trace_iterator *iter); 620 621 void *trace_find_next_entry_inc(struct trace_iterator *iter); 622 623 void trace_init_global_iter(struct trace_iterator *iter); 624 625 void tracing_iter_reset(struct trace_iterator *iter, int cpu); 626 627 void trace_function(struct trace_array *tr, 628 unsigned long ip, 629 unsigned long parent_ip, 630 unsigned long flags, int pc); 631 void trace_graph_function(struct trace_array *tr, 632 unsigned long ip, 633 unsigned long parent_ip, 634 unsigned long flags, int pc); 635 void trace_latency_header(struct seq_file *m); 636 void trace_default_header(struct seq_file *m); 637 void print_trace_header(struct seq_file *m, struct trace_iterator *iter); 638 int trace_empty(struct trace_iterator *iter); 639 640 void trace_graph_return(struct ftrace_graph_ret *trace); 641 int trace_graph_entry(struct ftrace_graph_ent *trace); 642 void set_graph_array(struct trace_array *tr); 643 644 void tracing_start_cmdline_record(void); 645 void tracing_stop_cmdline_record(void); 646 void tracing_start_tgid_record(void); 647 void tracing_stop_tgid_record(void); 648 649 int register_tracer(struct tracer *type); 650 int is_tracing_stopped(void); 651 652 loff_t tracing_lseek(struct file *file, loff_t offset, int whence); 653 654 extern cpumask_var_t __read_mostly tracing_buffer_mask; 655 656 #define for_each_tracing_cpu(cpu) \ 657 for_each_cpu(cpu, tracing_buffer_mask) 658 659 extern unsigned long nsecs_to_usecs(unsigned long nsecs); 660 661 extern unsigned long tracing_thresh; 662 663 /* PID filtering */ 664 665 extern int pid_max; 666 667 bool trace_find_filtered_pid(struct trace_pid_list *filtered_pids, 668 pid_t search_pid); 669 bool trace_ignore_this_task(struct trace_pid_list *filtered_pids, 670 struct task_struct *task); 671 void trace_filter_add_remove_task(struct trace_pid_list *pid_list, 672 struct task_struct *self, 673 struct task_struct *task); 674 void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos); 675 void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos); 676 int trace_pid_show(struct seq_file *m, void *v); 677 void trace_free_pid_list(struct trace_pid_list *pid_list); 678 int trace_pid_write(struct trace_pid_list *filtered_pids, 679 struct trace_pid_list **new_pid_list, 680 const char __user *ubuf, size_t cnt); 681 682 #ifdef CONFIG_TRACER_MAX_TRACE 683 void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu); 684 void update_max_tr_single(struct trace_array *tr, 685 struct task_struct *tsk, int cpu); 686 #endif /* CONFIG_TRACER_MAX_TRACE */ 687 688 #ifdef CONFIG_STACKTRACE 689 void ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, 690 int pc); 691 692 void __trace_stack(struct trace_array *tr, unsigned long flags, int skip, 693 int pc); 694 #else 695 static inline void ftrace_trace_userstack(struct ring_buffer *buffer, 696 unsigned long flags, int pc) 697 { 698 } 699 700 static inline void __trace_stack(struct trace_array *tr, unsigned long flags, 701 int skip, int pc) 702 { 703 } 704 #endif /* CONFIG_STACKTRACE */ 705 706 extern u64 ftrace_now(int cpu); 707 708 extern void trace_find_cmdline(int pid, char comm[]); 709 extern int trace_find_tgid(int pid); 710 extern void trace_event_follow_fork(struct trace_array *tr, bool enable); 711 712 #ifdef CONFIG_DYNAMIC_FTRACE 713 extern unsigned long ftrace_update_tot_cnt; 714 void ftrace_init_trace_array(struct trace_array *tr); 715 #else 716 static inline void ftrace_init_trace_array(struct trace_array *tr) { } 717 #endif 718 #define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func 719 extern int DYN_FTRACE_TEST_NAME(void); 720 #define DYN_FTRACE_TEST_NAME2 trace_selftest_dynamic_test_func2 721 extern int DYN_FTRACE_TEST_NAME2(void); 722 723 extern bool ring_buffer_expanded; 724 extern bool tracing_selftest_disabled; 725 726 #ifdef CONFIG_FTRACE_STARTUP_TEST 727 extern int trace_selftest_startup_function(struct tracer *trace, 728 struct trace_array *tr); 729 extern int trace_selftest_startup_function_graph(struct tracer *trace, 730 struct trace_array *tr); 731 extern int trace_selftest_startup_irqsoff(struct tracer *trace, 732 struct trace_array *tr); 733 extern int trace_selftest_startup_preemptoff(struct tracer *trace, 734 struct trace_array *tr); 735 extern int trace_selftest_startup_preemptirqsoff(struct tracer *trace, 736 struct trace_array *tr); 737 extern int trace_selftest_startup_wakeup(struct tracer *trace, 738 struct trace_array *tr); 739 extern int trace_selftest_startup_nop(struct tracer *trace, 740 struct trace_array *tr); 741 extern int trace_selftest_startup_sched_switch(struct tracer *trace, 742 struct trace_array *tr); 743 extern int trace_selftest_startup_branch(struct tracer *trace, 744 struct trace_array *tr); 745 /* 746 * Tracer data references selftest functions that only occur 747 * on boot up. These can be __init functions. Thus, when selftests 748 * are enabled, then the tracers need to reference __init functions. 749 */ 750 #define __tracer_data __refdata 751 #else 752 /* Tracers are seldom changed. Optimize when selftests are disabled. */ 753 #define __tracer_data __read_mostly 754 #endif /* CONFIG_FTRACE_STARTUP_TEST */ 755 756 extern void *head_page(struct trace_array_cpu *data); 757 extern unsigned long long ns2usecs(u64 nsec); 758 extern int 759 trace_vbprintk(unsigned long ip, const char *fmt, va_list args); 760 extern int 761 trace_vprintk(unsigned long ip, const char *fmt, va_list args); 762 extern int 763 trace_array_vprintk(struct trace_array *tr, 764 unsigned long ip, const char *fmt, va_list args); 765 int trace_array_printk(struct trace_array *tr, 766 unsigned long ip, const char *fmt, ...); 767 int trace_array_printk_buf(struct ring_buffer *buffer, 768 unsigned long ip, const char *fmt, ...); 769 void trace_printk_seq(struct trace_seq *s); 770 enum print_line_t print_trace_line(struct trace_iterator *iter); 771 772 extern char trace_find_mark(unsigned long long duration); 773 774 struct ftrace_hash; 775 776 struct ftrace_mod_load { 777 struct list_head list; 778 char *func; 779 char *module; 780 int enable; 781 }; 782 783 enum { 784 FTRACE_HASH_FL_MOD = (1 << 0), 785 }; 786 787 struct ftrace_hash { 788 unsigned long size_bits; 789 struct hlist_head *buckets; 790 unsigned long count; 791 unsigned long flags; 792 struct rcu_head rcu; 793 }; 794 795 struct ftrace_func_entry * 796 ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip); 797 798 static __always_inline bool ftrace_hash_empty(struct ftrace_hash *hash) 799 { 800 return !hash || !(hash->count || (hash->flags & FTRACE_HASH_FL_MOD)); 801 } 802 803 /* Standard output formatting function used for function return traces */ 804 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 805 806 /* Flag options */ 807 #define TRACE_GRAPH_PRINT_OVERRUN 0x1 808 #define TRACE_GRAPH_PRINT_CPU 0x2 809 #define TRACE_GRAPH_PRINT_OVERHEAD 0x4 810 #define TRACE_GRAPH_PRINT_PROC 0x8 811 #define TRACE_GRAPH_PRINT_DURATION 0x10 812 #define TRACE_GRAPH_PRINT_ABS_TIME 0x20 813 #define TRACE_GRAPH_PRINT_IRQS 0x40 814 #define TRACE_GRAPH_PRINT_TAIL 0x80 815 #define TRACE_GRAPH_SLEEP_TIME 0x100 816 #define TRACE_GRAPH_GRAPH_TIME 0x200 817 #define TRACE_GRAPH_PRINT_FILL_SHIFT 28 818 #define TRACE_GRAPH_PRINT_FILL_MASK (0x3 << TRACE_GRAPH_PRINT_FILL_SHIFT) 819 820 extern void ftrace_graph_sleep_time_control(bool enable); 821 extern void ftrace_graph_graph_time_control(bool enable); 822 823 extern enum print_line_t 824 print_graph_function_flags(struct trace_iterator *iter, u32 flags); 825 extern void print_graph_headers_flags(struct seq_file *s, u32 flags); 826 extern void 827 trace_print_graph_duration(unsigned long long duration, struct trace_seq *s); 828 extern void graph_trace_open(struct trace_iterator *iter); 829 extern void graph_trace_close(struct trace_iterator *iter); 830 extern int __trace_graph_entry(struct trace_array *tr, 831 struct ftrace_graph_ent *trace, 832 unsigned long flags, int pc); 833 extern void __trace_graph_return(struct trace_array *tr, 834 struct ftrace_graph_ret *trace, 835 unsigned long flags, int pc); 836 837 #ifdef CONFIG_DYNAMIC_FTRACE 838 extern struct ftrace_hash *ftrace_graph_hash; 839 extern struct ftrace_hash *ftrace_graph_notrace_hash; 840 841 static inline int ftrace_graph_addr(unsigned long addr) 842 { 843 int ret = 0; 844 845 preempt_disable_notrace(); 846 847 if (ftrace_hash_empty(ftrace_graph_hash)) { 848 ret = 1; 849 goto out; 850 } 851 852 if (ftrace_lookup_ip(ftrace_graph_hash, addr)) { 853 /* 854 * If no irqs are to be traced, but a set_graph_function 855 * is set, and called by an interrupt handler, we still 856 * want to trace it. 857 */ 858 if (in_irq()) 859 trace_recursion_set(TRACE_IRQ_BIT); 860 else 861 trace_recursion_clear(TRACE_IRQ_BIT); 862 ret = 1; 863 } 864 865 out: 866 preempt_enable_notrace(); 867 return ret; 868 } 869 870 static inline int ftrace_graph_notrace_addr(unsigned long addr) 871 { 872 int ret = 0; 873 874 preempt_disable_notrace(); 875 876 if (ftrace_lookup_ip(ftrace_graph_notrace_hash, addr)) 877 ret = 1; 878 879 preempt_enable_notrace(); 880 return ret; 881 } 882 #else 883 static inline int ftrace_graph_addr(unsigned long addr) 884 { 885 return 1; 886 } 887 888 static inline int ftrace_graph_notrace_addr(unsigned long addr) 889 { 890 return 0; 891 } 892 #endif /* CONFIG_DYNAMIC_FTRACE */ 893 894 extern unsigned int fgraph_max_depth; 895 896 static inline bool ftrace_graph_ignore_func(struct ftrace_graph_ent *trace) 897 { 898 /* trace it when it is-nested-in or is a function enabled. */ 899 return !(trace->depth || ftrace_graph_addr(trace->func)) || 900 (trace->depth < 0) || 901 (fgraph_max_depth && trace->depth >= fgraph_max_depth); 902 } 903 904 #else /* CONFIG_FUNCTION_GRAPH_TRACER */ 905 static inline enum print_line_t 906 print_graph_function_flags(struct trace_iterator *iter, u32 flags) 907 { 908 return TRACE_TYPE_UNHANDLED; 909 } 910 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 911 912 extern struct list_head ftrace_pids; 913 914 #ifdef CONFIG_FUNCTION_TRACER 915 struct ftrace_func_command { 916 struct list_head list; 917 char *name; 918 int (*func)(struct trace_array *tr, 919 struct ftrace_hash *hash, 920 char *func, char *cmd, 921 char *params, int enable); 922 }; 923 extern bool ftrace_filter_param __initdata; 924 static inline int ftrace_trace_task(struct trace_array *tr) 925 { 926 return !this_cpu_read(tr->trace_buffer.data->ftrace_ignore_pid); 927 } 928 extern int ftrace_is_dead(void); 929 int ftrace_create_function_files(struct trace_array *tr, 930 struct dentry *parent); 931 void ftrace_destroy_function_files(struct trace_array *tr); 932 void ftrace_init_global_array_ops(struct trace_array *tr); 933 void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func); 934 void ftrace_reset_array_ops(struct trace_array *tr); 935 int using_ftrace_ops_list_func(void); 936 void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d_tracer); 937 void ftrace_init_tracefs_toplevel(struct trace_array *tr, 938 struct dentry *d_tracer); 939 void ftrace_clear_pids(struct trace_array *tr); 940 int init_function_trace(void); 941 void ftrace_pid_follow_fork(struct trace_array *tr, bool enable); 942 #else 943 static inline int ftrace_trace_task(struct trace_array *tr) 944 { 945 return 1; 946 } 947 static inline int ftrace_is_dead(void) { return 0; } 948 static inline int 949 ftrace_create_function_files(struct trace_array *tr, 950 struct dentry *parent) 951 { 952 return 0; 953 } 954 static inline void ftrace_destroy_function_files(struct trace_array *tr) { } 955 static inline __init void 956 ftrace_init_global_array_ops(struct trace_array *tr) { } 957 static inline void ftrace_reset_array_ops(struct trace_array *tr) { } 958 static inline void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d) { } 959 static inline void ftrace_init_tracefs_toplevel(struct trace_array *tr, struct dentry *d) { } 960 static inline void ftrace_clear_pids(struct trace_array *tr) { } 961 static inline int init_function_trace(void) { return 0; } 962 static inline void ftrace_pid_follow_fork(struct trace_array *tr, bool enable) { } 963 /* ftace_func_t type is not defined, use macro instead of static inline */ 964 #define ftrace_init_array_ops(tr, func) do { } while (0) 965 #endif /* CONFIG_FUNCTION_TRACER */ 966 967 #if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE) 968 969 struct ftrace_probe_ops { 970 void (*func)(unsigned long ip, 971 unsigned long parent_ip, 972 struct trace_array *tr, 973 struct ftrace_probe_ops *ops, 974 void *data); 975 int (*init)(struct ftrace_probe_ops *ops, 976 struct trace_array *tr, 977 unsigned long ip, void *init_data, 978 void **data); 979 void (*free)(struct ftrace_probe_ops *ops, 980 struct trace_array *tr, 981 unsigned long ip, void *data); 982 int (*print)(struct seq_file *m, 983 unsigned long ip, 984 struct ftrace_probe_ops *ops, 985 void *data); 986 }; 987 988 struct ftrace_func_mapper; 989 typedef int (*ftrace_mapper_func)(void *data); 990 991 struct ftrace_func_mapper *allocate_ftrace_func_mapper(void); 992 void **ftrace_func_mapper_find_ip(struct ftrace_func_mapper *mapper, 993 unsigned long ip); 994 int ftrace_func_mapper_add_ip(struct ftrace_func_mapper *mapper, 995 unsigned long ip, void *data); 996 void *ftrace_func_mapper_remove_ip(struct ftrace_func_mapper *mapper, 997 unsigned long ip); 998 void free_ftrace_func_mapper(struct ftrace_func_mapper *mapper, 999 ftrace_mapper_func free_func); 1000 1001 extern int 1002 register_ftrace_function_probe(char *glob, struct trace_array *tr, 1003 struct ftrace_probe_ops *ops, void *data); 1004 extern int 1005 unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr, 1006 struct ftrace_probe_ops *ops); 1007 extern void clear_ftrace_function_probes(struct trace_array *tr); 1008 1009 int register_ftrace_command(struct ftrace_func_command *cmd); 1010 int unregister_ftrace_command(struct ftrace_func_command *cmd); 1011 1012 void ftrace_create_filter_files(struct ftrace_ops *ops, 1013 struct dentry *parent); 1014 void ftrace_destroy_filter_files(struct ftrace_ops *ops); 1015 #else 1016 struct ftrace_func_command; 1017 1018 static inline __init int register_ftrace_command(struct ftrace_func_command *cmd) 1019 { 1020 return -EINVAL; 1021 } 1022 static inline __init int unregister_ftrace_command(char *cmd_name) 1023 { 1024 return -EINVAL; 1025 } 1026 static inline void clear_ftrace_function_probes(struct trace_array *tr) 1027 { 1028 } 1029 1030 /* 1031 * The ops parameter passed in is usually undefined. 1032 * This must be a macro. 1033 */ 1034 #define ftrace_create_filter_files(ops, parent) do { } while (0) 1035 #define ftrace_destroy_filter_files(ops) do { } while (0) 1036 #endif /* CONFIG_FUNCTION_TRACER && CONFIG_DYNAMIC_FTRACE */ 1037 1038 bool ftrace_event_is_function(struct trace_event_call *call); 1039 1040 /* 1041 * struct trace_parser - servers for reading the user input separated by spaces 1042 * @cont: set if the input is not complete - no final space char was found 1043 * @buffer: holds the parsed user input 1044 * @idx: user input length 1045 * @size: buffer size 1046 */ 1047 struct trace_parser { 1048 bool cont; 1049 char *buffer; 1050 unsigned idx; 1051 unsigned size; 1052 }; 1053 1054 static inline bool trace_parser_loaded(struct trace_parser *parser) 1055 { 1056 return (parser->idx != 0); 1057 } 1058 1059 static inline bool trace_parser_cont(struct trace_parser *parser) 1060 { 1061 return parser->cont; 1062 } 1063 1064 static inline void trace_parser_clear(struct trace_parser *parser) 1065 { 1066 parser->cont = false; 1067 parser->idx = 0; 1068 } 1069 1070 extern int trace_parser_get_init(struct trace_parser *parser, int size); 1071 extern void trace_parser_put(struct trace_parser *parser); 1072 extern int trace_get_user(struct trace_parser *parser, const char __user *ubuf, 1073 size_t cnt, loff_t *ppos); 1074 1075 /* 1076 * Only create function graph options if function graph is configured. 1077 */ 1078 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 1079 # define FGRAPH_FLAGS \ 1080 C(DISPLAY_GRAPH, "display-graph"), 1081 #else 1082 # define FGRAPH_FLAGS 1083 #endif 1084 1085 #ifdef CONFIG_BRANCH_TRACER 1086 # define BRANCH_FLAGS \ 1087 C(BRANCH, "branch"), 1088 #else 1089 # define BRANCH_FLAGS 1090 #endif 1091 1092 #ifdef CONFIG_FUNCTION_TRACER 1093 # define FUNCTION_FLAGS \ 1094 C(FUNCTION, "function-trace"), \ 1095 C(FUNC_FORK, "function-fork"), 1096 # define FUNCTION_DEFAULT_FLAGS TRACE_ITER_FUNCTION 1097 #else 1098 # define FUNCTION_FLAGS 1099 # define FUNCTION_DEFAULT_FLAGS 0UL 1100 # define TRACE_ITER_FUNC_FORK 0UL 1101 #endif 1102 1103 #ifdef CONFIG_STACKTRACE 1104 # define STACK_FLAGS \ 1105 C(STACKTRACE, "stacktrace"), 1106 #else 1107 # define STACK_FLAGS 1108 #endif 1109 1110 /* 1111 * trace_iterator_flags is an enumeration that defines bit 1112 * positions into trace_flags that controls the output. 1113 * 1114 * NOTE: These bits must match the trace_options array in 1115 * trace.c (this macro guarantees it). 1116 */ 1117 #define TRACE_FLAGS \ 1118 C(PRINT_PARENT, "print-parent"), \ 1119 C(SYM_OFFSET, "sym-offset"), \ 1120 C(SYM_ADDR, "sym-addr"), \ 1121 C(VERBOSE, "verbose"), \ 1122 C(RAW, "raw"), \ 1123 C(HEX, "hex"), \ 1124 C(BIN, "bin"), \ 1125 C(BLOCK, "block"), \ 1126 C(PRINTK, "trace_printk"), \ 1127 C(ANNOTATE, "annotate"), \ 1128 C(USERSTACKTRACE, "userstacktrace"), \ 1129 C(SYM_USEROBJ, "sym-userobj"), \ 1130 C(PRINTK_MSGONLY, "printk-msg-only"), \ 1131 C(CONTEXT_INFO, "context-info"), /* Print pid/cpu/time */ \ 1132 C(LATENCY_FMT, "latency-format"), \ 1133 C(RECORD_CMD, "record-cmd"), \ 1134 C(RECORD_TGID, "record-tgid"), \ 1135 C(OVERWRITE, "overwrite"), \ 1136 C(STOP_ON_FREE, "disable_on_free"), \ 1137 C(IRQ_INFO, "irq-info"), \ 1138 C(MARKERS, "markers"), \ 1139 C(EVENT_FORK, "event-fork"), \ 1140 FUNCTION_FLAGS \ 1141 FGRAPH_FLAGS \ 1142 STACK_FLAGS \ 1143 BRANCH_FLAGS 1144 1145 /* 1146 * By defining C, we can make TRACE_FLAGS a list of bit names 1147 * that will define the bits for the flag masks. 1148 */ 1149 #undef C 1150 #define C(a, b) TRACE_ITER_##a##_BIT 1151 1152 enum trace_iterator_bits { 1153 TRACE_FLAGS 1154 /* Make sure we don't go more than we have bits for */ 1155 TRACE_ITER_LAST_BIT 1156 }; 1157 1158 /* 1159 * By redefining C, we can make TRACE_FLAGS a list of masks that 1160 * use the bits as defined above. 1161 */ 1162 #undef C 1163 #define C(a, b) TRACE_ITER_##a = (1 << TRACE_ITER_##a##_BIT) 1164 1165 enum trace_iterator_flags { TRACE_FLAGS }; 1166 1167 /* 1168 * TRACE_ITER_SYM_MASK masks the options in trace_flags that 1169 * control the output of kernel symbols. 1170 */ 1171 #define TRACE_ITER_SYM_MASK \ 1172 (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR) 1173 1174 extern struct tracer nop_trace; 1175 1176 #ifdef CONFIG_BRANCH_TRACER 1177 extern int enable_branch_tracing(struct trace_array *tr); 1178 extern void disable_branch_tracing(void); 1179 static inline int trace_branch_enable(struct trace_array *tr) 1180 { 1181 if (tr->trace_flags & TRACE_ITER_BRANCH) 1182 return enable_branch_tracing(tr); 1183 return 0; 1184 } 1185 static inline void trace_branch_disable(void) 1186 { 1187 /* due to races, always disable */ 1188 disable_branch_tracing(); 1189 } 1190 #else 1191 static inline int trace_branch_enable(struct trace_array *tr) 1192 { 1193 return 0; 1194 } 1195 static inline void trace_branch_disable(void) 1196 { 1197 } 1198 #endif /* CONFIG_BRANCH_TRACER */ 1199 1200 /* set ring buffers to default size if not already done so */ 1201 int tracing_update_buffers(void); 1202 1203 struct ftrace_event_field { 1204 struct list_head link; 1205 const char *name; 1206 const char *type; 1207 int filter_type; 1208 int offset; 1209 int size; 1210 int is_signed; 1211 }; 1212 1213 struct event_filter { 1214 int n_preds; /* Number assigned */ 1215 int a_preds; /* allocated */ 1216 struct filter_pred __rcu *preds; 1217 struct filter_pred __rcu *root; 1218 char *filter_string; 1219 }; 1220 1221 struct event_subsystem { 1222 struct list_head list; 1223 const char *name; 1224 struct event_filter *filter; 1225 int ref_count; 1226 }; 1227 1228 struct trace_subsystem_dir { 1229 struct list_head list; 1230 struct event_subsystem *subsystem; 1231 struct trace_array *tr; 1232 struct dentry *entry; 1233 int ref_count; 1234 int nr_events; 1235 }; 1236 1237 extern int call_filter_check_discard(struct trace_event_call *call, void *rec, 1238 struct ring_buffer *buffer, 1239 struct ring_buffer_event *event); 1240 1241 void trace_buffer_unlock_commit_regs(struct trace_array *tr, 1242 struct ring_buffer *buffer, 1243 struct ring_buffer_event *event, 1244 unsigned long flags, int pc, 1245 struct pt_regs *regs); 1246 1247 static inline void trace_buffer_unlock_commit(struct trace_array *tr, 1248 struct ring_buffer *buffer, 1249 struct ring_buffer_event *event, 1250 unsigned long flags, int pc) 1251 { 1252 trace_buffer_unlock_commit_regs(tr, buffer, event, flags, pc, NULL); 1253 } 1254 1255 DECLARE_PER_CPU(struct ring_buffer_event *, trace_buffered_event); 1256 DECLARE_PER_CPU(int, trace_buffered_event_cnt); 1257 void trace_buffered_event_disable(void); 1258 void trace_buffered_event_enable(void); 1259 1260 static inline void 1261 __trace_event_discard_commit(struct ring_buffer *buffer, 1262 struct ring_buffer_event *event) 1263 { 1264 if (this_cpu_read(trace_buffered_event) == event) { 1265 /* Simply release the temp buffer */ 1266 this_cpu_dec(trace_buffered_event_cnt); 1267 return; 1268 } 1269 ring_buffer_discard_commit(buffer, event); 1270 } 1271 1272 /* 1273 * Helper function for event_trigger_unlock_commit{_regs}(). 1274 * If there are event triggers attached to this event that requires 1275 * filtering against its fields, then they wil be called as the 1276 * entry already holds the field information of the current event. 1277 * 1278 * It also checks if the event should be discarded or not. 1279 * It is to be discarded if the event is soft disabled and the 1280 * event was only recorded to process triggers, or if the event 1281 * filter is active and this event did not match the filters. 1282 * 1283 * Returns true if the event is discarded, false otherwise. 1284 */ 1285 static inline bool 1286 __event_trigger_test_discard(struct trace_event_file *file, 1287 struct ring_buffer *buffer, 1288 struct ring_buffer_event *event, 1289 void *entry, 1290 enum event_trigger_type *tt) 1291 { 1292 unsigned long eflags = file->flags; 1293 1294 if (eflags & EVENT_FILE_FL_TRIGGER_COND) 1295 *tt = event_triggers_call(file, entry); 1296 1297 if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) || 1298 (unlikely(file->flags & EVENT_FILE_FL_FILTERED) && 1299 !filter_match_preds(file->filter, entry))) { 1300 __trace_event_discard_commit(buffer, event); 1301 return true; 1302 } 1303 1304 return false; 1305 } 1306 1307 /** 1308 * event_trigger_unlock_commit - handle triggers and finish event commit 1309 * @file: The file pointer assoctiated to the event 1310 * @buffer: The ring buffer that the event is being written to 1311 * @event: The event meta data in the ring buffer 1312 * @entry: The event itself 1313 * @irq_flags: The state of the interrupts at the start of the event 1314 * @pc: The state of the preempt count at the start of the event. 1315 * 1316 * This is a helper function to handle triggers that require data 1317 * from the event itself. It also tests the event against filters and 1318 * if the event is soft disabled and should be discarded. 1319 */ 1320 static inline void 1321 event_trigger_unlock_commit(struct trace_event_file *file, 1322 struct ring_buffer *buffer, 1323 struct ring_buffer_event *event, 1324 void *entry, unsigned long irq_flags, int pc) 1325 { 1326 enum event_trigger_type tt = ETT_NONE; 1327 1328 if (!__event_trigger_test_discard(file, buffer, event, entry, &tt)) 1329 trace_buffer_unlock_commit(file->tr, buffer, event, irq_flags, pc); 1330 1331 if (tt) 1332 event_triggers_post_call(file, tt, entry); 1333 } 1334 1335 /** 1336 * event_trigger_unlock_commit_regs - handle triggers and finish event commit 1337 * @file: The file pointer assoctiated to the event 1338 * @buffer: The ring buffer that the event is being written to 1339 * @event: The event meta data in the ring buffer 1340 * @entry: The event itself 1341 * @irq_flags: The state of the interrupts at the start of the event 1342 * @pc: The state of the preempt count at the start of the event. 1343 * 1344 * This is a helper function to handle triggers that require data 1345 * from the event itself. It also tests the event against filters and 1346 * if the event is soft disabled and should be discarded. 1347 * 1348 * Same as event_trigger_unlock_commit() but calls 1349 * trace_buffer_unlock_commit_regs() instead of trace_buffer_unlock_commit(). 1350 */ 1351 static inline void 1352 event_trigger_unlock_commit_regs(struct trace_event_file *file, 1353 struct ring_buffer *buffer, 1354 struct ring_buffer_event *event, 1355 void *entry, unsigned long irq_flags, int pc, 1356 struct pt_regs *regs) 1357 { 1358 enum event_trigger_type tt = ETT_NONE; 1359 1360 if (!__event_trigger_test_discard(file, buffer, event, entry, &tt)) 1361 trace_buffer_unlock_commit_regs(file->tr, buffer, event, 1362 irq_flags, pc, regs); 1363 1364 if (tt) 1365 event_triggers_post_call(file, tt, entry); 1366 } 1367 1368 #define FILTER_PRED_INVALID ((unsigned short)-1) 1369 #define FILTER_PRED_IS_RIGHT (1 << 15) 1370 #define FILTER_PRED_FOLD (1 << 15) 1371 1372 /* 1373 * The max preds is the size of unsigned short with 1374 * two flags at the MSBs. One bit is used for both the IS_RIGHT 1375 * and FOLD flags. The other is reserved. 1376 * 1377 * 2^14 preds is way more than enough. 1378 */ 1379 #define MAX_FILTER_PRED 16384 1380 1381 struct filter_pred; 1382 struct regex; 1383 1384 typedef int (*filter_pred_fn_t) (struct filter_pred *pred, void *event); 1385 1386 typedef int (*regex_match_func)(char *str, struct regex *r, int len); 1387 1388 enum regex_type { 1389 MATCH_FULL = 0, 1390 MATCH_FRONT_ONLY, 1391 MATCH_MIDDLE_ONLY, 1392 MATCH_END_ONLY, 1393 MATCH_GLOB, 1394 }; 1395 1396 struct regex { 1397 char pattern[MAX_FILTER_STR_VAL]; 1398 int len; 1399 int field_len; 1400 regex_match_func match; 1401 }; 1402 1403 struct filter_pred { 1404 filter_pred_fn_t fn; 1405 u64 val; 1406 struct regex regex; 1407 unsigned short *ops; 1408 struct ftrace_event_field *field; 1409 int offset; 1410 int not; 1411 int op; 1412 unsigned short index; 1413 unsigned short parent; 1414 unsigned short left; 1415 unsigned short right; 1416 }; 1417 1418 static inline bool is_string_field(struct ftrace_event_field *field) 1419 { 1420 return field->filter_type == FILTER_DYN_STRING || 1421 field->filter_type == FILTER_STATIC_STRING || 1422 field->filter_type == FILTER_PTR_STRING || 1423 field->filter_type == FILTER_COMM; 1424 } 1425 1426 static inline bool is_function_field(struct ftrace_event_field *field) 1427 { 1428 return field->filter_type == FILTER_TRACE_FN; 1429 } 1430 1431 extern enum regex_type 1432 filter_parse_regex(char *buff, int len, char **search, int *not); 1433 extern void print_event_filter(struct trace_event_file *file, 1434 struct trace_seq *s); 1435 extern int apply_event_filter(struct trace_event_file *file, 1436 char *filter_string); 1437 extern int apply_subsystem_event_filter(struct trace_subsystem_dir *dir, 1438 char *filter_string); 1439 extern void print_subsystem_event_filter(struct event_subsystem *system, 1440 struct trace_seq *s); 1441 extern int filter_assign_type(const char *type); 1442 extern int create_event_filter(struct trace_event_call *call, 1443 char *filter_str, bool set_str, 1444 struct event_filter **filterp); 1445 extern void free_event_filter(struct event_filter *filter); 1446 1447 struct ftrace_event_field * 1448 trace_find_event_field(struct trace_event_call *call, char *name); 1449 1450 extern void trace_event_enable_cmd_record(bool enable); 1451 extern void trace_event_enable_tgid_record(bool enable); 1452 1453 extern int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr); 1454 extern int event_trace_del_tracer(struct trace_array *tr); 1455 1456 extern struct trace_event_file *find_event_file(struct trace_array *tr, 1457 const char *system, 1458 const char *event); 1459 1460 static inline void *event_file_data(struct file *filp) 1461 { 1462 return ACCESS_ONCE(file_inode(filp)->i_private); 1463 } 1464 1465 extern struct mutex event_mutex; 1466 extern struct list_head ftrace_events; 1467 1468 extern const struct file_operations event_trigger_fops; 1469 extern const struct file_operations event_hist_fops; 1470 1471 #ifdef CONFIG_HIST_TRIGGERS 1472 extern int register_trigger_hist_cmd(void); 1473 extern int register_trigger_hist_enable_disable_cmds(void); 1474 #else 1475 static inline int register_trigger_hist_cmd(void) { return 0; } 1476 static inline int register_trigger_hist_enable_disable_cmds(void) { return 0; } 1477 #endif 1478 1479 extern int register_trigger_cmds(void); 1480 extern void clear_event_triggers(struct trace_array *tr); 1481 1482 struct event_trigger_data { 1483 unsigned long count; 1484 int ref; 1485 struct event_trigger_ops *ops; 1486 struct event_command *cmd_ops; 1487 struct event_filter __rcu *filter; 1488 char *filter_str; 1489 void *private_data; 1490 bool paused; 1491 bool paused_tmp; 1492 struct list_head list; 1493 char *name; 1494 struct list_head named_list; 1495 struct event_trigger_data *named_data; 1496 }; 1497 1498 /* Avoid typos */ 1499 #define ENABLE_EVENT_STR "enable_event" 1500 #define DISABLE_EVENT_STR "disable_event" 1501 #define ENABLE_HIST_STR "enable_hist" 1502 #define DISABLE_HIST_STR "disable_hist" 1503 1504 struct enable_trigger_data { 1505 struct trace_event_file *file; 1506 bool enable; 1507 bool hist; 1508 }; 1509 1510 extern int event_enable_trigger_print(struct seq_file *m, 1511 struct event_trigger_ops *ops, 1512 struct event_trigger_data *data); 1513 extern void event_enable_trigger_free(struct event_trigger_ops *ops, 1514 struct event_trigger_data *data); 1515 extern int event_enable_trigger_func(struct event_command *cmd_ops, 1516 struct trace_event_file *file, 1517 char *glob, char *cmd, char *param); 1518 extern int event_enable_register_trigger(char *glob, 1519 struct event_trigger_ops *ops, 1520 struct event_trigger_data *data, 1521 struct trace_event_file *file); 1522 extern void event_enable_unregister_trigger(char *glob, 1523 struct event_trigger_ops *ops, 1524 struct event_trigger_data *test, 1525 struct trace_event_file *file); 1526 extern void trigger_data_free(struct event_trigger_data *data); 1527 extern int event_trigger_init(struct event_trigger_ops *ops, 1528 struct event_trigger_data *data); 1529 extern int trace_event_trigger_enable_disable(struct trace_event_file *file, 1530 int trigger_enable); 1531 extern void update_cond_flag(struct trace_event_file *file); 1532 extern void unregister_trigger(char *glob, struct event_trigger_ops *ops, 1533 struct event_trigger_data *test, 1534 struct trace_event_file *file); 1535 extern int set_trigger_filter(char *filter_str, 1536 struct event_trigger_data *trigger_data, 1537 struct trace_event_file *file); 1538 extern struct event_trigger_data *find_named_trigger(const char *name); 1539 extern bool is_named_trigger(struct event_trigger_data *test); 1540 extern int save_named_trigger(const char *name, 1541 struct event_trigger_data *data); 1542 extern void del_named_trigger(struct event_trigger_data *data); 1543 extern void pause_named_trigger(struct event_trigger_data *data); 1544 extern void unpause_named_trigger(struct event_trigger_data *data); 1545 extern void set_named_trigger_data(struct event_trigger_data *data, 1546 struct event_trigger_data *named_data); 1547 extern int register_event_command(struct event_command *cmd); 1548 extern int unregister_event_command(struct event_command *cmd); 1549 extern int register_trigger_hist_enable_disable_cmds(void); 1550 1551 /** 1552 * struct event_trigger_ops - callbacks for trace event triggers 1553 * 1554 * The methods in this structure provide per-event trigger hooks for 1555 * various trigger operations. 1556 * 1557 * All the methods below, except for @init() and @free(), must be 1558 * implemented. 1559 * 1560 * @func: The trigger 'probe' function called when the triggering 1561 * event occurs. The data passed into this callback is the data 1562 * that was supplied to the event_command @reg() function that 1563 * registered the trigger (see struct event_command) along with 1564 * the trace record, rec. 1565 * 1566 * @init: An optional initialization function called for the trigger 1567 * when the trigger is registered (via the event_command reg() 1568 * function). This can be used to perform per-trigger 1569 * initialization such as incrementing a per-trigger reference 1570 * count, for instance. This is usually implemented by the 1571 * generic utility function @event_trigger_init() (see 1572 * trace_event_triggers.c). 1573 * 1574 * @free: An optional de-initialization function called for the 1575 * trigger when the trigger is unregistered (via the 1576 * event_command @reg() function). This can be used to perform 1577 * per-trigger de-initialization such as decrementing a 1578 * per-trigger reference count and freeing corresponding trigger 1579 * data, for instance. This is usually implemented by the 1580 * generic utility function @event_trigger_free() (see 1581 * trace_event_triggers.c). 1582 * 1583 * @print: The callback function invoked to have the trigger print 1584 * itself. This is usually implemented by a wrapper function 1585 * that calls the generic utility function @event_trigger_print() 1586 * (see trace_event_triggers.c). 1587 */ 1588 struct event_trigger_ops { 1589 void (*func)(struct event_trigger_data *data, 1590 void *rec); 1591 int (*init)(struct event_trigger_ops *ops, 1592 struct event_trigger_data *data); 1593 void (*free)(struct event_trigger_ops *ops, 1594 struct event_trigger_data *data); 1595 int (*print)(struct seq_file *m, 1596 struct event_trigger_ops *ops, 1597 struct event_trigger_data *data); 1598 }; 1599 1600 /** 1601 * struct event_command - callbacks and data members for event commands 1602 * 1603 * Event commands are invoked by users by writing the command name 1604 * into the 'trigger' file associated with a trace event. The 1605 * parameters associated with a specific invocation of an event 1606 * command are used to create an event trigger instance, which is 1607 * added to the list of trigger instances associated with that trace 1608 * event. When the event is hit, the set of triggers associated with 1609 * that event is invoked. 1610 * 1611 * The data members in this structure provide per-event command data 1612 * for various event commands. 1613 * 1614 * All the data members below, except for @post_trigger, must be set 1615 * for each event command. 1616 * 1617 * @name: The unique name that identifies the event command. This is 1618 * the name used when setting triggers via trigger files. 1619 * 1620 * @trigger_type: A unique id that identifies the event command 1621 * 'type'. This value has two purposes, the first to ensure that 1622 * only one trigger of the same type can be set at a given time 1623 * for a particular event e.g. it doesn't make sense to have both 1624 * a traceon and traceoff trigger attached to a single event at 1625 * the same time, so traceon and traceoff have the same type 1626 * though they have different names. The @trigger_type value is 1627 * also used as a bit value for deferring the actual trigger 1628 * action until after the current event is finished. Some 1629 * commands need to do this if they themselves log to the trace 1630 * buffer (see the @post_trigger() member below). @trigger_type 1631 * values are defined by adding new values to the trigger_type 1632 * enum in include/linux/trace_events.h. 1633 * 1634 * @flags: See the enum event_command_flags below. 1635 * 1636 * All the methods below, except for @set_filter() and @unreg_all(), 1637 * must be implemented. 1638 * 1639 * @func: The callback function responsible for parsing and 1640 * registering the trigger written to the 'trigger' file by the 1641 * user. It allocates the trigger instance and registers it with 1642 * the appropriate trace event. It makes use of the other 1643 * event_command callback functions to orchestrate this, and is 1644 * usually implemented by the generic utility function 1645 * @event_trigger_callback() (see trace_event_triggers.c). 1646 * 1647 * @reg: Adds the trigger to the list of triggers associated with the 1648 * event, and enables the event trigger itself, after 1649 * initializing it (via the event_trigger_ops @init() function). 1650 * This is also where commands can use the @trigger_type value to 1651 * make the decision as to whether or not multiple instances of 1652 * the trigger should be allowed. This is usually implemented by 1653 * the generic utility function @register_trigger() (see 1654 * trace_event_triggers.c). 1655 * 1656 * @unreg: Removes the trigger from the list of triggers associated 1657 * with the event, and disables the event trigger itself, after 1658 * initializing it (via the event_trigger_ops @free() function). 1659 * This is usually implemented by the generic utility function 1660 * @unregister_trigger() (see trace_event_triggers.c). 1661 * 1662 * @unreg_all: An optional function called to remove all the triggers 1663 * from the list of triggers associated with the event. Called 1664 * when a trigger file is opened in truncate mode. 1665 * 1666 * @set_filter: An optional function called to parse and set a filter 1667 * for the trigger. If no @set_filter() method is set for the 1668 * event command, filters set by the user for the command will be 1669 * ignored. This is usually implemented by the generic utility 1670 * function @set_trigger_filter() (see trace_event_triggers.c). 1671 * 1672 * @get_trigger_ops: The callback function invoked to retrieve the 1673 * event_trigger_ops implementation associated with the command. 1674 */ 1675 struct event_command { 1676 struct list_head list; 1677 char *name; 1678 enum event_trigger_type trigger_type; 1679 int flags; 1680 int (*func)(struct event_command *cmd_ops, 1681 struct trace_event_file *file, 1682 char *glob, char *cmd, char *params); 1683 int (*reg)(char *glob, 1684 struct event_trigger_ops *ops, 1685 struct event_trigger_data *data, 1686 struct trace_event_file *file); 1687 void (*unreg)(char *glob, 1688 struct event_trigger_ops *ops, 1689 struct event_trigger_data *data, 1690 struct trace_event_file *file); 1691 void (*unreg_all)(struct trace_event_file *file); 1692 int (*set_filter)(char *filter_str, 1693 struct event_trigger_data *data, 1694 struct trace_event_file *file); 1695 struct event_trigger_ops *(*get_trigger_ops)(char *cmd, char *param); 1696 }; 1697 1698 /** 1699 * enum event_command_flags - flags for struct event_command 1700 * 1701 * @POST_TRIGGER: A flag that says whether or not this command needs 1702 * to have its action delayed until after the current event has 1703 * been closed. Some triggers need to avoid being invoked while 1704 * an event is currently in the process of being logged, since 1705 * the trigger may itself log data into the trace buffer. Thus 1706 * we make sure the current event is committed before invoking 1707 * those triggers. To do that, the trigger invocation is split 1708 * in two - the first part checks the filter using the current 1709 * trace record; if a command has the @post_trigger flag set, it 1710 * sets a bit for itself in the return value, otherwise it 1711 * directly invokes the trigger. Once all commands have been 1712 * either invoked or set their return flag, the current record is 1713 * either committed or discarded. At that point, if any commands 1714 * have deferred their triggers, those commands are finally 1715 * invoked following the close of the current event. In other 1716 * words, if the event_trigger_ops @func() probe implementation 1717 * itself logs to the trace buffer, this flag should be set, 1718 * otherwise it can be left unspecified. 1719 * 1720 * @NEEDS_REC: A flag that says whether or not this command needs 1721 * access to the trace record in order to perform its function, 1722 * regardless of whether or not it has a filter associated with 1723 * it (filters make a trigger require access to the trace record 1724 * but are not always present). 1725 */ 1726 enum event_command_flags { 1727 EVENT_CMD_FL_POST_TRIGGER = 1, 1728 EVENT_CMD_FL_NEEDS_REC = 2, 1729 }; 1730 1731 static inline bool event_command_post_trigger(struct event_command *cmd_ops) 1732 { 1733 return cmd_ops->flags & EVENT_CMD_FL_POST_TRIGGER; 1734 } 1735 1736 static inline bool event_command_needs_rec(struct event_command *cmd_ops) 1737 { 1738 return cmd_ops->flags & EVENT_CMD_FL_NEEDS_REC; 1739 } 1740 1741 extern int trace_event_enable_disable(struct trace_event_file *file, 1742 int enable, int soft_disable); 1743 extern int tracing_alloc_snapshot(void); 1744 1745 extern const char *__start___trace_bprintk_fmt[]; 1746 extern const char *__stop___trace_bprintk_fmt[]; 1747 1748 extern const char *__start___tracepoint_str[]; 1749 extern const char *__stop___tracepoint_str[]; 1750 1751 void trace_printk_control(bool enabled); 1752 void trace_printk_init_buffers(void); 1753 void trace_printk_start_comm(void); 1754 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set); 1755 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled); 1756 1757 /* 1758 * Normal trace_printk() and friends allocates special buffers 1759 * to do the manipulation, as well as saves the print formats 1760 * into sections to display. But the trace infrastructure wants 1761 * to use these without the added overhead at the price of being 1762 * a bit slower (used mainly for warnings, where we don't care 1763 * about performance). The internal_trace_puts() is for such 1764 * a purpose. 1765 */ 1766 #define internal_trace_puts(str) __trace_puts(_THIS_IP_, str, strlen(str)) 1767 1768 #undef FTRACE_ENTRY 1769 #define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter) \ 1770 extern struct trace_event_call \ 1771 __aligned(4) event_##call; 1772 #undef FTRACE_ENTRY_DUP 1773 #define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print, filter) \ 1774 FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print), \ 1775 filter) 1776 #undef FTRACE_ENTRY_PACKED 1777 #define FTRACE_ENTRY_PACKED(call, struct_name, id, tstruct, print, filter) \ 1778 FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print), \ 1779 filter) 1780 1781 #include "trace_entries.h" 1782 1783 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_FUNCTION_TRACER) 1784 int perf_ftrace_event_register(struct trace_event_call *call, 1785 enum trace_reg type, void *data); 1786 #else 1787 #define perf_ftrace_event_register NULL 1788 #endif 1789 1790 #ifdef CONFIG_FTRACE_SYSCALLS 1791 void init_ftrace_syscalls(void); 1792 const char *get_syscall_name(int syscall); 1793 #else 1794 static inline void init_ftrace_syscalls(void) { } 1795 static inline const char *get_syscall_name(int syscall) 1796 { 1797 return NULL; 1798 } 1799 #endif 1800 1801 #ifdef CONFIG_EVENT_TRACING 1802 void trace_event_init(void); 1803 void trace_event_eval_update(struct trace_eval_map **map, int len); 1804 #else 1805 static inline void __init trace_event_init(void) { } 1806 static inline void trace_event_eval_update(struct trace_eval_map **map, int len) { } 1807 #endif 1808 1809 extern struct trace_iterator *tracepoint_print_iter; 1810 1811 #endif /* _LINUX_KERNEL_TRACE_H */ 1812