1 // SPDX-License-Identifier: GPL-2.0 2 3 #ifndef _LINUX_KERNEL_TRACE_H 4 #define _LINUX_KERNEL_TRACE_H 5 6 #include <linux/fs.h> 7 #include <linux/atomic.h> 8 #include <linux/sched.h> 9 #include <linux/clocksource.h> 10 #include <linux/ring_buffer.h> 11 #include <linux/mmiotrace.h> 12 #include <linux/tracepoint.h> 13 #include <linux/ftrace.h> 14 #include <linux/trace.h> 15 #include <linux/hw_breakpoint.h> 16 #include <linux/trace_seq.h> 17 #include <linux/trace_events.h> 18 #include <linux/compiler.h> 19 #include <linux/glob.h> 20 #include <linux/irq_work.h> 21 #include <linux/workqueue.h> 22 #include <linux/ctype.h> 23 #include <linux/once_lite.h> 24 #include <linux/ftrace_regs.h> 25 26 #include "pid_list.h" 27 28 #ifdef CONFIG_FTRACE_SYSCALLS 29 #include <asm/unistd.h> /* For NR_syscalls */ 30 #include <asm/syscall.h> /* some archs define it here */ 31 #endif 32 33 #define TRACE_MODE_WRITE 0640 34 #define TRACE_MODE_READ 0440 35 36 enum trace_type { 37 __TRACE_FIRST_TYPE = 0, 38 39 TRACE_FN, 40 TRACE_CTX, 41 TRACE_WAKE, 42 TRACE_STACK, 43 TRACE_PRINT, 44 TRACE_BPRINT, 45 TRACE_MMIO_RW, 46 TRACE_MMIO_MAP, 47 TRACE_BRANCH, 48 TRACE_GRAPH_RET, 49 TRACE_GRAPH_ENT, 50 TRACE_GRAPH_RETADDR_ENT, 51 TRACE_USER_STACK, 52 TRACE_BLK, 53 TRACE_BPUTS, 54 TRACE_HWLAT, 55 TRACE_OSNOISE, 56 TRACE_TIMERLAT, 57 TRACE_RAW_DATA, 58 TRACE_FUNC_REPEATS, 59 60 __TRACE_LAST_TYPE, 61 }; 62 63 64 #undef __field 65 #define __field(type, item) type item; 66 67 #undef __field_fn 68 #define __field_fn(type, item) type item; 69 70 #undef __field_struct 71 #define __field_struct(type, item) __field(type, item) 72 73 #undef __field_desc 74 #define __field_desc(type, container, item) 75 76 #undef __field_packed 77 #define __field_packed(type, container, item) 78 79 #undef __array 80 #define __array(type, item, size) type item[size]; 81 82 /* 83 * For backward compatibility, older user space expects to see the 84 * kernel_stack event with a fixed size caller field. But today the fix 85 * size is ignored by the kernel, and the real structure is dynamic. 86 * Expose to user space: "unsigned long caller[8];" but the real structure 87 * will be "unsigned long caller[] __counted_by(size)" 88 */ 89 #undef __stack_array 90 #define __stack_array(type, item, size, field) type item[] __counted_by(field); 91 92 #undef __array_desc 93 #define __array_desc(type, container, item, size) 94 95 #undef __dynamic_array 96 #define __dynamic_array(type, item) type item[]; 97 98 #undef __rel_dynamic_array 99 #define __rel_dynamic_array(type, item) type item[]; 100 101 #undef F_STRUCT 102 #define F_STRUCT(args...) args 103 104 #undef FTRACE_ENTRY 105 #define FTRACE_ENTRY(name, struct_name, id, tstruct, print) \ 106 struct struct_name { \ 107 struct trace_entry ent; \ 108 tstruct \ 109 } 110 111 #undef FTRACE_ENTRY_DUP 112 #define FTRACE_ENTRY_DUP(name, name_struct, id, tstruct, printk) 113 114 #undef FTRACE_ENTRY_REG 115 #define FTRACE_ENTRY_REG(name, struct_name, id, tstruct, print, regfn) \ 116 FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print)) 117 118 #undef FTRACE_ENTRY_PACKED 119 #define FTRACE_ENTRY_PACKED(name, struct_name, id, tstruct, print) \ 120 FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print)) __packed 121 122 #include "trace_entries.h" 123 124 /* Use this for memory failure errors */ 125 #define MEM_FAIL(condition, fmt, ...) \ 126 DO_ONCE_LITE_IF(condition, pr_err, "ERROR: " fmt, ##__VA_ARGS__) 127 128 #define FAULT_STRING "(fault)" 129 130 #define HIST_STACKTRACE_DEPTH 16 131 #define HIST_STACKTRACE_SIZE (HIST_STACKTRACE_DEPTH * sizeof(unsigned long)) 132 #define HIST_STACKTRACE_SKIP 5 133 134 /* 135 * syscalls are special, and need special handling, this is why 136 * they are not included in trace_entries.h 137 */ 138 struct syscall_trace_enter { 139 struct trace_entry ent; 140 int nr; 141 unsigned long args[]; 142 }; 143 144 struct syscall_trace_exit { 145 struct trace_entry ent; 146 int nr; 147 long ret; 148 }; 149 150 struct kprobe_trace_entry_head { 151 struct trace_entry ent; 152 unsigned long ip; 153 }; 154 155 struct eprobe_trace_entry_head { 156 struct trace_entry ent; 157 }; 158 159 struct kretprobe_trace_entry_head { 160 struct trace_entry ent; 161 unsigned long func; 162 unsigned long ret_ip; 163 }; 164 165 struct fentry_trace_entry_head { 166 struct trace_entry ent; 167 unsigned long ip; 168 }; 169 170 struct fexit_trace_entry_head { 171 struct trace_entry ent; 172 unsigned long func; 173 unsigned long ret_ip; 174 }; 175 176 #define TRACE_BUF_SIZE 1024 177 178 struct trace_array; 179 180 /* 181 * The CPU trace array - it consists of thousands of trace entries 182 * plus some other descriptor data: (for example which task started 183 * the trace, etc.) 184 */ 185 struct trace_array_cpu { 186 atomic_t disabled; 187 void *buffer_page; /* ring buffer spare */ 188 189 unsigned long entries; 190 unsigned long saved_latency; 191 unsigned long critical_start; 192 unsigned long critical_end; 193 unsigned long critical_sequence; 194 unsigned long nice; 195 unsigned long policy; 196 unsigned long rt_priority; 197 unsigned long skipped_entries; 198 u64 preempt_timestamp; 199 pid_t pid; 200 kuid_t uid; 201 char comm[TASK_COMM_LEN]; 202 203 #ifdef CONFIG_FUNCTION_TRACER 204 int ftrace_ignore_pid; 205 #endif 206 bool ignore_pid; 207 }; 208 209 struct tracer; 210 struct trace_option_dentry; 211 212 struct array_buffer { 213 struct trace_array *tr; 214 struct trace_buffer *buffer; 215 struct trace_array_cpu __percpu *data; 216 u64 time_start; 217 int cpu; 218 }; 219 220 #define TRACE_FLAGS_MAX_SIZE 32 221 222 struct trace_options { 223 struct tracer *tracer; 224 struct trace_option_dentry *topts; 225 }; 226 227 struct trace_pid_list *trace_pid_list_alloc(void); 228 void trace_pid_list_free(struct trace_pid_list *pid_list); 229 bool trace_pid_list_is_set(struct trace_pid_list *pid_list, unsigned int pid); 230 int trace_pid_list_set(struct trace_pid_list *pid_list, unsigned int pid); 231 int trace_pid_list_clear(struct trace_pid_list *pid_list, unsigned int pid); 232 int trace_pid_list_first(struct trace_pid_list *pid_list, unsigned int *pid); 233 int trace_pid_list_next(struct trace_pid_list *pid_list, unsigned int pid, 234 unsigned int *next); 235 236 enum { 237 TRACE_PIDS = BIT(0), 238 TRACE_NO_PIDS = BIT(1), 239 }; 240 241 static inline bool pid_type_enabled(int type, struct trace_pid_list *pid_list, 242 struct trace_pid_list *no_pid_list) 243 { 244 /* Return true if the pid list in type has pids */ 245 return ((type & TRACE_PIDS) && pid_list) || 246 ((type & TRACE_NO_PIDS) && no_pid_list); 247 } 248 249 static inline bool still_need_pid_events(int type, struct trace_pid_list *pid_list, 250 struct trace_pid_list *no_pid_list) 251 { 252 /* 253 * Turning off what is in @type, return true if the "other" 254 * pid list, still has pids in it. 255 */ 256 return (!(type & TRACE_PIDS) && pid_list) || 257 (!(type & TRACE_NO_PIDS) && no_pid_list); 258 } 259 260 typedef bool (*cond_update_fn_t)(struct trace_array *tr, void *cond_data); 261 262 /** 263 * struct cond_snapshot - conditional snapshot data and callback 264 * 265 * The cond_snapshot structure encapsulates a callback function and 266 * data associated with the snapshot for a given tracing instance. 267 * 268 * When a snapshot is taken conditionally, by invoking 269 * tracing_snapshot_cond(tr, cond_data), the cond_data passed in is 270 * passed in turn to the cond_snapshot.update() function. That data 271 * can be compared by the update() implementation with the cond_data 272 * contained within the struct cond_snapshot instance associated with 273 * the trace_array. Because the tr->max_lock is held throughout the 274 * update() call, the update() function can directly retrieve the 275 * cond_snapshot and cond_data associated with the per-instance 276 * snapshot associated with the trace_array. 277 * 278 * The cond_snapshot.update() implementation can save data to be 279 * associated with the snapshot if it decides to, and returns 'true' 280 * in that case, or it returns 'false' if the conditional snapshot 281 * shouldn't be taken. 282 * 283 * The cond_snapshot instance is created and associated with the 284 * user-defined cond_data by tracing_cond_snapshot_enable(). 285 * Likewise, the cond_snapshot instance is destroyed and is no longer 286 * associated with the trace instance by 287 * tracing_cond_snapshot_disable(). 288 * 289 * The method below is required. 290 * 291 * @update: When a conditional snapshot is invoked, the update() 292 * callback function is invoked with the tr->max_lock held. The 293 * update() implementation signals whether or not to actually 294 * take the snapshot, by returning 'true' if so, 'false' if no 295 * snapshot should be taken. Because the max_lock is held for 296 * the duration of update(), the implementation is safe to 297 * directly retrieved and save any implementation data it needs 298 * to in association with the snapshot. 299 */ 300 struct cond_snapshot { 301 void *cond_data; 302 cond_update_fn_t update; 303 }; 304 305 /* 306 * struct trace_func_repeats - used to keep track of the consecutive 307 * (on the same CPU) calls of a single function. 308 */ 309 struct trace_func_repeats { 310 unsigned long ip; 311 unsigned long parent_ip; 312 unsigned long count; 313 u64 ts_last_call; 314 }; 315 316 /* 317 * The trace array - an array of per-CPU trace arrays. This is the 318 * highest level data structure that individual tracers deal with. 319 * They have on/off state as well: 320 */ 321 struct trace_array { 322 struct list_head list; 323 char *name; 324 struct array_buffer array_buffer; 325 #ifdef CONFIG_TRACER_MAX_TRACE 326 /* 327 * The max_buffer is used to snapshot the trace when a maximum 328 * latency is reached, or when the user initiates a snapshot. 329 * Some tracers will use this to store a maximum trace while 330 * it continues examining live traces. 331 * 332 * The buffers for the max_buffer are set up the same as the array_buffer 333 * When a snapshot is taken, the buffer of the max_buffer is swapped 334 * with the buffer of the array_buffer and the buffers are reset for 335 * the array_buffer so the tracing can continue. 336 */ 337 struct array_buffer max_buffer; 338 bool allocated_snapshot; 339 spinlock_t snapshot_trigger_lock; 340 unsigned int snapshot; 341 unsigned long max_latency; 342 #ifdef CONFIG_FSNOTIFY 343 struct dentry *d_max_latency; 344 struct work_struct fsnotify_work; 345 struct irq_work fsnotify_irqwork; 346 #endif 347 #endif 348 /* The below is for memory mapped ring buffer */ 349 unsigned int mapped; 350 unsigned long range_addr_start; 351 unsigned long range_addr_size; 352 long text_delta; 353 long data_delta; 354 355 struct trace_pid_list __rcu *filtered_pids; 356 struct trace_pid_list __rcu *filtered_no_pids; 357 /* 358 * max_lock is used to protect the swapping of buffers 359 * when taking a max snapshot. The buffers themselves are 360 * protected by per_cpu spinlocks. But the action of the swap 361 * needs its own lock. 362 * 363 * This is defined as a arch_spinlock_t in order to help 364 * with performance when lockdep debugging is enabled. 365 * 366 * It is also used in other places outside the update_max_tr 367 * so it needs to be defined outside of the 368 * CONFIG_TRACER_MAX_TRACE. 369 */ 370 arch_spinlock_t max_lock; 371 int buffer_disabled; 372 #ifdef CONFIG_FTRACE_SYSCALLS 373 int sys_refcount_enter; 374 int sys_refcount_exit; 375 struct trace_event_file __rcu *enter_syscall_files[NR_syscalls]; 376 struct trace_event_file __rcu *exit_syscall_files[NR_syscalls]; 377 #endif 378 int stop_count; 379 int clock_id; 380 int nr_topts; 381 bool clear_trace; 382 int buffer_percent; 383 unsigned int n_err_log_entries; 384 struct tracer *current_trace; 385 unsigned int trace_flags; 386 unsigned char trace_flags_index[TRACE_FLAGS_MAX_SIZE]; 387 unsigned int flags; 388 raw_spinlock_t start_lock; 389 const char *system_names; 390 struct list_head err_log; 391 struct dentry *dir; 392 struct dentry *options; 393 struct dentry *percpu_dir; 394 struct eventfs_inode *event_dir; 395 struct trace_options *topts; 396 struct list_head systems; 397 struct list_head events; 398 struct trace_event_file *trace_marker_file; 399 cpumask_var_t tracing_cpumask; /* only trace on set CPUs */ 400 /* one per_cpu trace_pipe can be opened by only one user */ 401 cpumask_var_t pipe_cpumask; 402 int ref; 403 int trace_ref; 404 #ifdef CONFIG_MODULES 405 struct list_head mod_events; 406 #endif 407 #ifdef CONFIG_FUNCTION_TRACER 408 struct ftrace_ops *ops; 409 struct trace_pid_list __rcu *function_pids; 410 struct trace_pid_list __rcu *function_no_pids; 411 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 412 struct fgraph_ops *gops; 413 #endif 414 #ifdef CONFIG_DYNAMIC_FTRACE 415 /* All of these are protected by the ftrace_lock */ 416 struct list_head func_probes; 417 struct list_head mod_trace; 418 struct list_head mod_notrace; 419 #endif 420 /* function tracing enabled */ 421 int function_enabled; 422 #endif 423 int no_filter_buffering_ref; 424 struct list_head hist_vars; 425 #ifdef CONFIG_TRACER_SNAPSHOT 426 struct cond_snapshot *cond_snapshot; 427 #endif 428 struct trace_func_repeats __percpu *last_func_repeats; 429 /* 430 * On boot up, the ring buffer is set to the minimum size, so that 431 * we do not waste memory on systems that are not using tracing. 432 */ 433 bool ring_buffer_expanded; 434 }; 435 436 enum { 437 TRACE_ARRAY_FL_GLOBAL = BIT(0), 438 TRACE_ARRAY_FL_BOOT = BIT(1), 439 TRACE_ARRAY_FL_MOD_INIT = BIT(2), 440 }; 441 442 #ifdef CONFIG_MODULES 443 bool module_exists(const char *module); 444 #else 445 static inline bool module_exists(const char *module) 446 { 447 return false; 448 } 449 #endif 450 451 extern struct list_head ftrace_trace_arrays; 452 453 extern struct mutex trace_types_lock; 454 455 extern int trace_array_get(struct trace_array *tr); 456 extern int tracing_check_open_get_tr(struct trace_array *tr); 457 extern struct trace_array *trace_array_find(const char *instance); 458 extern struct trace_array *trace_array_find_get(const char *instance); 459 460 extern u64 tracing_event_time_stamp(struct trace_buffer *buffer, struct ring_buffer_event *rbe); 461 extern int tracing_set_filter_buffering(struct trace_array *tr, bool set); 462 extern int tracing_set_clock(struct trace_array *tr, const char *clockstr); 463 464 extern bool trace_clock_in_ns(struct trace_array *tr); 465 466 /* 467 * The global tracer (top) should be the first trace array added, 468 * but we check the flag anyway. 469 */ 470 static inline struct trace_array *top_trace_array(void) 471 { 472 struct trace_array *tr; 473 474 if (list_empty(&ftrace_trace_arrays)) 475 return NULL; 476 477 tr = list_entry(ftrace_trace_arrays.prev, 478 typeof(*tr), list); 479 WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL)); 480 return tr; 481 } 482 483 #define FTRACE_CMP_TYPE(var, type) \ 484 __builtin_types_compatible_p(typeof(var), type *) 485 486 #undef IF_ASSIGN 487 #define IF_ASSIGN(var, entry, etype, id) \ 488 if (FTRACE_CMP_TYPE(var, etype)) { \ 489 var = (typeof(var))(entry); \ 490 WARN_ON(id != 0 && (entry)->type != id); \ 491 break; \ 492 } 493 494 /* Will cause compile errors if type is not found. */ 495 extern void __ftrace_bad_type(void); 496 497 /* 498 * The trace_assign_type is a verifier that the entry type is 499 * the same as the type being assigned. To add new types simply 500 * add a line with the following format: 501 * 502 * IF_ASSIGN(var, ent, type, id); 503 * 504 * Where "type" is the trace type that includes the trace_entry 505 * as the "ent" item. And "id" is the trace identifier that is 506 * used in the trace_type enum. 507 * 508 * If the type can have more than one id, then use zero. 509 */ 510 #define trace_assign_type(var, ent) \ 511 do { \ 512 IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN); \ 513 IF_ASSIGN(var, ent, struct ctx_switch_entry, 0); \ 514 IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \ 515 IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\ 516 IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \ 517 IF_ASSIGN(var, ent, struct bprint_entry, TRACE_BPRINT); \ 518 IF_ASSIGN(var, ent, struct bputs_entry, TRACE_BPUTS); \ 519 IF_ASSIGN(var, ent, struct hwlat_entry, TRACE_HWLAT); \ 520 IF_ASSIGN(var, ent, struct osnoise_entry, TRACE_OSNOISE);\ 521 IF_ASSIGN(var, ent, struct timerlat_entry, TRACE_TIMERLAT);\ 522 IF_ASSIGN(var, ent, struct raw_data_entry, TRACE_RAW_DATA);\ 523 IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \ 524 TRACE_MMIO_RW); \ 525 IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \ 526 TRACE_MMIO_MAP); \ 527 IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \ 528 IF_ASSIGN(var, ent, struct ftrace_graph_ent_entry, \ 529 TRACE_GRAPH_ENT); \ 530 IF_ASSIGN(var, ent, struct fgraph_retaddr_ent_entry,\ 531 TRACE_GRAPH_RETADDR_ENT); \ 532 IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \ 533 TRACE_GRAPH_RET); \ 534 IF_ASSIGN(var, ent, struct func_repeats_entry, \ 535 TRACE_FUNC_REPEATS); \ 536 __ftrace_bad_type(); \ 537 } while (0) 538 539 /* 540 * An option specific to a tracer. This is a boolean value. 541 * The bit is the bit index that sets its value on the 542 * flags value in struct tracer_flags. 543 */ 544 struct tracer_opt { 545 const char *name; /* Will appear on the trace_options file */ 546 u32 bit; /* Mask assigned in val field in tracer_flags */ 547 }; 548 549 /* 550 * The set of specific options for a tracer. Your tracer 551 * have to set the initial value of the flags val. 552 */ 553 struct tracer_flags { 554 u32 val; 555 struct tracer_opt *opts; 556 struct tracer *trace; 557 }; 558 559 /* Makes more easy to define a tracer opt */ 560 #define TRACER_OPT(s, b) .name = #s, .bit = b 561 562 563 struct trace_option_dentry { 564 struct tracer_opt *opt; 565 struct tracer_flags *flags; 566 struct trace_array *tr; 567 struct dentry *entry; 568 }; 569 570 /** 571 * struct tracer - a specific tracer and its callbacks to interact with tracefs 572 * @name: the name chosen to select it on the available_tracers file 573 * @init: called when one switches to this tracer (echo name > current_tracer) 574 * @reset: called when one switches to another tracer 575 * @start: called when tracing is unpaused (echo 1 > tracing_on) 576 * @stop: called when tracing is paused (echo 0 > tracing_on) 577 * @update_thresh: called when tracing_thresh is updated 578 * @open: called when the trace file is opened 579 * @pipe_open: called when the trace_pipe file is opened 580 * @close: called when the trace file is released 581 * @pipe_close: called when the trace_pipe file is released 582 * @read: override the default read callback on trace_pipe 583 * @splice_read: override the default splice_read callback on trace_pipe 584 * @selftest: selftest to run on boot (see trace_selftest.c) 585 * @print_headers: override the first lines that describe your columns 586 * @print_line: callback that prints a trace 587 * @set_flag: signals one of your private flags changed (trace_options file) 588 * @flags: your private flags 589 */ 590 struct tracer { 591 const char *name; 592 int (*init)(struct trace_array *tr); 593 void (*reset)(struct trace_array *tr); 594 void (*start)(struct trace_array *tr); 595 void (*stop)(struct trace_array *tr); 596 int (*update_thresh)(struct trace_array *tr); 597 void (*open)(struct trace_iterator *iter); 598 void (*pipe_open)(struct trace_iterator *iter); 599 void (*close)(struct trace_iterator *iter); 600 void (*pipe_close)(struct trace_iterator *iter); 601 ssize_t (*read)(struct trace_iterator *iter, 602 struct file *filp, char __user *ubuf, 603 size_t cnt, loff_t *ppos); 604 ssize_t (*splice_read)(struct trace_iterator *iter, 605 struct file *filp, 606 loff_t *ppos, 607 struct pipe_inode_info *pipe, 608 size_t len, 609 unsigned int flags); 610 #ifdef CONFIG_FTRACE_STARTUP_TEST 611 int (*selftest)(struct tracer *trace, 612 struct trace_array *tr); 613 #endif 614 void (*print_header)(struct seq_file *m); 615 enum print_line_t (*print_line)(struct trace_iterator *iter); 616 /* If you handled the flag setting, return 0 */ 617 int (*set_flag)(struct trace_array *tr, 618 u32 old_flags, u32 bit, int set); 619 /* Return 0 if OK with change, else return non-zero */ 620 int (*flag_changed)(struct trace_array *tr, 621 u32 mask, int set); 622 struct tracer *next; 623 struct tracer_flags *flags; 624 int enabled; 625 bool print_max; 626 bool allow_instances; 627 #ifdef CONFIG_TRACER_MAX_TRACE 628 bool use_max_tr; 629 #endif 630 /* True if tracer cannot be enabled in kernel param */ 631 bool noboot; 632 }; 633 634 static inline struct ring_buffer_iter * 635 trace_buffer_iter(struct trace_iterator *iter, int cpu) 636 { 637 return iter->buffer_iter ? iter->buffer_iter[cpu] : NULL; 638 } 639 640 int tracer_init(struct tracer *t, struct trace_array *tr); 641 int tracing_is_enabled(void); 642 void tracing_reset_online_cpus(struct array_buffer *buf); 643 void tracing_reset_all_online_cpus(void); 644 void tracing_reset_all_online_cpus_unlocked(void); 645 int tracing_open_generic(struct inode *inode, struct file *filp); 646 int tracing_open_generic_tr(struct inode *inode, struct file *filp); 647 int tracing_release_generic_tr(struct inode *inode, struct file *file); 648 int tracing_open_file_tr(struct inode *inode, struct file *filp); 649 int tracing_release_file_tr(struct inode *inode, struct file *filp); 650 int tracing_single_release_file_tr(struct inode *inode, struct file *filp); 651 bool tracing_is_disabled(void); 652 bool tracer_tracing_is_on(struct trace_array *tr); 653 void tracer_tracing_on(struct trace_array *tr); 654 void tracer_tracing_off(struct trace_array *tr); 655 struct dentry *trace_create_file(const char *name, 656 umode_t mode, 657 struct dentry *parent, 658 void *data, 659 const struct file_operations *fops); 660 661 int tracing_init_dentry(void); 662 663 struct ring_buffer_event; 664 665 struct ring_buffer_event * 666 trace_buffer_lock_reserve(struct trace_buffer *buffer, 667 int type, 668 unsigned long len, 669 unsigned int trace_ctx); 670 671 int ring_buffer_meta_seq_init(struct file *file, struct trace_buffer *buffer, int cpu); 672 673 struct trace_entry *tracing_get_trace_entry(struct trace_array *tr, 674 struct trace_array_cpu *data); 675 676 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, 677 int *ent_cpu, u64 *ent_ts); 678 679 void trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer, 680 struct ring_buffer_event *event); 681 682 bool trace_is_tracepoint_string(const char *str); 683 const char *trace_event_format(struct trace_iterator *iter, const char *fmt); 684 char *trace_iter_expand_format(struct trace_iterator *iter); 685 bool ignore_event(struct trace_iterator *iter); 686 687 int trace_empty(struct trace_iterator *iter); 688 689 void *trace_find_next_entry_inc(struct trace_iterator *iter); 690 691 void trace_init_global_iter(struct trace_iterator *iter); 692 693 void tracing_iter_reset(struct trace_iterator *iter, int cpu); 694 695 unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu); 696 unsigned long trace_total_entries(struct trace_array *tr); 697 698 void trace_function(struct trace_array *tr, 699 unsigned long ip, 700 unsigned long parent_ip, 701 unsigned int trace_ctx, 702 struct ftrace_regs *regs); 703 void trace_graph_function(struct trace_array *tr, 704 unsigned long ip, 705 unsigned long parent_ip, 706 unsigned int trace_ctx); 707 void trace_latency_header(struct seq_file *m); 708 void trace_default_header(struct seq_file *m); 709 void print_trace_header(struct seq_file *m, struct trace_iterator *iter); 710 711 void trace_graph_return(struct ftrace_graph_ret *trace, struct fgraph_ops *gops, 712 struct ftrace_regs *fregs); 713 int trace_graph_entry(struct ftrace_graph_ent *trace, struct fgraph_ops *gops, 714 struct ftrace_regs *fregs); 715 716 void tracing_start_cmdline_record(void); 717 void tracing_stop_cmdline_record(void); 718 void tracing_start_tgid_record(void); 719 void tracing_stop_tgid_record(void); 720 721 int register_tracer(struct tracer *type); 722 int is_tracing_stopped(void); 723 724 loff_t tracing_lseek(struct file *file, loff_t offset, int whence); 725 726 extern cpumask_var_t __read_mostly tracing_buffer_mask; 727 728 #define for_each_tracing_cpu(cpu) \ 729 for_each_cpu(cpu, tracing_buffer_mask) 730 731 extern unsigned long nsecs_to_usecs(unsigned long nsecs); 732 733 extern unsigned long tracing_thresh; 734 735 /* PID filtering */ 736 737 bool trace_find_filtered_pid(struct trace_pid_list *filtered_pids, 738 pid_t search_pid); 739 bool trace_ignore_this_task(struct trace_pid_list *filtered_pids, 740 struct trace_pid_list *filtered_no_pids, 741 struct task_struct *task); 742 void trace_filter_add_remove_task(struct trace_pid_list *pid_list, 743 struct task_struct *self, 744 struct task_struct *task); 745 void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos); 746 void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos); 747 int trace_pid_show(struct seq_file *m, void *v); 748 int trace_pid_write(struct trace_pid_list *filtered_pids, 749 struct trace_pid_list **new_pid_list, 750 const char __user *ubuf, size_t cnt); 751 752 #ifdef CONFIG_TRACER_MAX_TRACE 753 void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu, 754 void *cond_data); 755 void update_max_tr_single(struct trace_array *tr, 756 struct task_struct *tsk, int cpu); 757 758 #ifdef CONFIG_FSNOTIFY 759 #define LATENCY_FS_NOTIFY 760 #endif 761 #endif /* CONFIG_TRACER_MAX_TRACE */ 762 763 #ifdef LATENCY_FS_NOTIFY 764 void latency_fsnotify(struct trace_array *tr); 765 #else 766 static inline void latency_fsnotify(struct trace_array *tr) { } 767 #endif 768 769 #ifdef CONFIG_STACKTRACE 770 void __trace_stack(struct trace_array *tr, unsigned int trace_ctx, int skip); 771 #else 772 static inline void __trace_stack(struct trace_array *tr, unsigned int trace_ctx, 773 int skip) 774 { 775 } 776 #endif /* CONFIG_STACKTRACE */ 777 778 void trace_last_func_repeats(struct trace_array *tr, 779 struct trace_func_repeats *last_info, 780 unsigned int trace_ctx); 781 782 extern u64 ftrace_now(int cpu); 783 784 extern void trace_find_cmdline(int pid, char comm[]); 785 extern int trace_find_tgid(int pid); 786 extern void trace_event_follow_fork(struct trace_array *tr, bool enable); 787 788 #ifdef CONFIG_DYNAMIC_FTRACE 789 extern unsigned long ftrace_update_tot_cnt; 790 extern unsigned long ftrace_number_of_pages; 791 extern unsigned long ftrace_number_of_groups; 792 extern u64 ftrace_update_time; 793 extern u64 ftrace_total_mod_time; 794 void ftrace_init_trace_array(struct trace_array *tr); 795 #else 796 static inline void ftrace_init_trace_array(struct trace_array *tr) { } 797 #endif 798 #define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func 799 extern int DYN_FTRACE_TEST_NAME(void); 800 #define DYN_FTRACE_TEST_NAME2 trace_selftest_dynamic_test_func2 801 extern int DYN_FTRACE_TEST_NAME2(void); 802 803 extern void trace_set_ring_buffer_expanded(struct trace_array *tr); 804 extern bool tracing_selftest_disabled; 805 806 #ifdef CONFIG_FTRACE_STARTUP_TEST 807 extern void __init disable_tracing_selftest(const char *reason); 808 809 extern int trace_selftest_startup_function(struct tracer *trace, 810 struct trace_array *tr); 811 extern int trace_selftest_startup_function_graph(struct tracer *trace, 812 struct trace_array *tr); 813 extern int trace_selftest_startup_irqsoff(struct tracer *trace, 814 struct trace_array *tr); 815 extern int trace_selftest_startup_preemptoff(struct tracer *trace, 816 struct trace_array *tr); 817 extern int trace_selftest_startup_preemptirqsoff(struct tracer *trace, 818 struct trace_array *tr); 819 extern int trace_selftest_startup_wakeup(struct tracer *trace, 820 struct trace_array *tr); 821 extern int trace_selftest_startup_nop(struct tracer *trace, 822 struct trace_array *tr); 823 extern int trace_selftest_startup_branch(struct tracer *trace, 824 struct trace_array *tr); 825 /* 826 * Tracer data references selftest functions that only occur 827 * on boot up. These can be __init functions. Thus, when selftests 828 * are enabled, then the tracers need to reference __init functions. 829 */ 830 #define __tracer_data __refdata 831 #else 832 static inline void __init disable_tracing_selftest(const char *reason) 833 { 834 } 835 /* Tracers are seldom changed. Optimize when selftests are disabled. */ 836 #define __tracer_data __read_mostly 837 #endif /* CONFIG_FTRACE_STARTUP_TEST */ 838 839 extern void *head_page(struct trace_array_cpu *data); 840 extern unsigned long long ns2usecs(u64 nsec); 841 extern int 842 trace_vbprintk(unsigned long ip, const char *fmt, va_list args); 843 extern int 844 trace_vprintk(unsigned long ip, const char *fmt, va_list args); 845 extern int 846 trace_array_vprintk(struct trace_array *tr, 847 unsigned long ip, const char *fmt, va_list args); 848 int trace_array_printk_buf(struct trace_buffer *buffer, 849 unsigned long ip, const char *fmt, ...); 850 void trace_printk_seq(struct trace_seq *s); 851 enum print_line_t print_trace_line(struct trace_iterator *iter); 852 853 extern char trace_find_mark(unsigned long long duration); 854 855 struct ftrace_hash; 856 857 struct ftrace_mod_load { 858 struct list_head list; 859 char *func; 860 char *module; 861 int enable; 862 }; 863 864 enum { 865 FTRACE_HASH_FL_MOD = (1 << 0), 866 }; 867 868 struct ftrace_hash { 869 unsigned long size_bits; 870 struct hlist_head *buckets; 871 unsigned long count; 872 unsigned long flags; 873 struct rcu_head rcu; 874 }; 875 876 struct ftrace_func_entry * 877 ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip); 878 879 static __always_inline bool ftrace_hash_empty(struct ftrace_hash *hash) 880 { 881 return !hash || !(hash->count || (hash->flags & FTRACE_HASH_FL_MOD)); 882 } 883 884 /* Standard output formatting function used for function return traces */ 885 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 886 887 /* Flag options */ 888 #define TRACE_GRAPH_PRINT_OVERRUN 0x1 889 #define TRACE_GRAPH_PRINT_CPU 0x2 890 #define TRACE_GRAPH_PRINT_OVERHEAD 0x4 891 #define TRACE_GRAPH_PRINT_PROC 0x8 892 #define TRACE_GRAPH_PRINT_DURATION 0x10 893 #define TRACE_GRAPH_PRINT_ABS_TIME 0x20 894 #define TRACE_GRAPH_PRINT_REL_TIME 0x40 895 #define TRACE_GRAPH_PRINT_IRQS 0x80 896 #define TRACE_GRAPH_PRINT_TAIL 0x100 897 #define TRACE_GRAPH_SLEEP_TIME 0x200 898 #define TRACE_GRAPH_GRAPH_TIME 0x400 899 #define TRACE_GRAPH_PRINT_RETVAL 0x800 900 #define TRACE_GRAPH_PRINT_RETVAL_HEX 0x1000 901 #define TRACE_GRAPH_PRINT_RETADDR 0x2000 902 #define TRACE_GRAPH_ARGS 0x4000 903 #define TRACE_GRAPH_PRINT_FILL_SHIFT 28 904 #define TRACE_GRAPH_PRINT_FILL_MASK (0x3 << TRACE_GRAPH_PRINT_FILL_SHIFT) 905 906 extern void ftrace_graph_sleep_time_control(bool enable); 907 908 #ifdef CONFIG_FUNCTION_PROFILER 909 extern void ftrace_graph_graph_time_control(bool enable); 910 #else 911 static inline void ftrace_graph_graph_time_control(bool enable) { } 912 #endif 913 914 extern enum print_line_t 915 print_graph_function_flags(struct trace_iterator *iter, u32 flags); 916 extern void print_graph_headers_flags(struct seq_file *s, u32 flags); 917 extern void 918 trace_print_graph_duration(unsigned long long duration, struct trace_seq *s); 919 extern void graph_trace_open(struct trace_iterator *iter); 920 extern void graph_trace_close(struct trace_iterator *iter); 921 extern int __trace_graph_entry(struct trace_array *tr, 922 struct ftrace_graph_ent *trace, 923 unsigned int trace_ctx); 924 extern int __trace_graph_retaddr_entry(struct trace_array *tr, 925 struct ftrace_graph_ent *trace, 926 unsigned int trace_ctx, 927 unsigned long retaddr); 928 extern void __trace_graph_return(struct trace_array *tr, 929 struct ftrace_graph_ret *trace, 930 unsigned int trace_ctx, 931 u64 calltime, u64 rettime); 932 933 extern void init_array_fgraph_ops(struct trace_array *tr, struct ftrace_ops *ops); 934 extern int allocate_fgraph_ops(struct trace_array *tr, struct ftrace_ops *ops); 935 extern void free_fgraph_ops(struct trace_array *tr); 936 937 enum { 938 TRACE_GRAPH_FL = 1, 939 940 /* 941 * In the very unlikely case that an interrupt came in 942 * at a start of graph tracing, and we want to trace 943 * the function in that interrupt, the depth can be greater 944 * than zero, because of the preempted start of a previous 945 * trace. In an even more unlikely case, depth could be 2 946 * if a softirq interrupted the start of graph tracing, 947 * followed by an interrupt preempting a start of graph 948 * tracing in the softirq, and depth can even be 3 949 * if an NMI came in at the start of an interrupt function 950 * that preempted a softirq start of a function that 951 * preempted normal context!!!! Luckily, it can't be 952 * greater than 3, so the next two bits are a mask 953 * of what the depth is when we set TRACE_GRAPH_FL 954 */ 955 956 TRACE_GRAPH_DEPTH_START_BIT, 957 TRACE_GRAPH_DEPTH_END_BIT, 958 959 /* 960 * To implement set_graph_notrace, if this bit is set, we ignore 961 * function graph tracing of called functions, until the return 962 * function is called to clear it. 963 */ 964 TRACE_GRAPH_NOTRACE_BIT, 965 }; 966 967 #define TRACE_GRAPH_NOTRACE (1 << TRACE_GRAPH_NOTRACE_BIT) 968 969 static inline unsigned long ftrace_graph_depth(unsigned long *task_var) 970 { 971 return (*task_var >> TRACE_GRAPH_DEPTH_START_BIT) & 3; 972 } 973 974 static inline void ftrace_graph_set_depth(unsigned long *task_var, int depth) 975 { 976 *task_var &= ~(3 << TRACE_GRAPH_DEPTH_START_BIT); 977 *task_var |= (depth & 3) << TRACE_GRAPH_DEPTH_START_BIT; 978 } 979 980 #ifdef CONFIG_DYNAMIC_FTRACE 981 extern struct ftrace_hash __rcu *ftrace_graph_hash; 982 extern struct ftrace_hash __rcu *ftrace_graph_notrace_hash; 983 984 static inline int 985 ftrace_graph_addr(unsigned long *task_var, struct ftrace_graph_ent *trace) 986 { 987 unsigned long addr = trace->func; 988 int ret = 0; 989 struct ftrace_hash *hash; 990 991 preempt_disable_notrace(); 992 993 /* 994 * Have to open code "rcu_dereference_sched()" because the 995 * function graph tracer can be called when RCU is not 996 * "watching". 997 * Protected with schedule_on_each_cpu(ftrace_sync) 998 */ 999 hash = rcu_dereference_protected(ftrace_graph_hash, !preemptible()); 1000 1001 if (ftrace_hash_empty(hash)) { 1002 ret = 1; 1003 goto out; 1004 } 1005 1006 if (ftrace_lookup_ip(hash, addr)) { 1007 /* 1008 * This needs to be cleared on the return functions 1009 * when the depth is zero. 1010 */ 1011 *task_var |= TRACE_GRAPH_FL; 1012 ftrace_graph_set_depth(task_var, trace->depth); 1013 1014 /* 1015 * If no irqs are to be traced, but a set_graph_function 1016 * is set, and called by an interrupt handler, we still 1017 * want to trace it. 1018 */ 1019 if (in_hardirq()) 1020 trace_recursion_set(TRACE_IRQ_BIT); 1021 else 1022 trace_recursion_clear(TRACE_IRQ_BIT); 1023 ret = 1; 1024 } 1025 1026 out: 1027 preempt_enable_notrace(); 1028 return ret; 1029 } 1030 1031 static inline void 1032 ftrace_graph_addr_finish(struct fgraph_ops *gops, struct ftrace_graph_ret *trace) 1033 { 1034 unsigned long *task_var = fgraph_get_task_var(gops); 1035 1036 if ((*task_var & TRACE_GRAPH_FL) && 1037 trace->depth == ftrace_graph_depth(task_var)) 1038 *task_var &= ~TRACE_GRAPH_FL; 1039 } 1040 1041 static inline int ftrace_graph_notrace_addr(unsigned long addr) 1042 { 1043 int ret = 0; 1044 struct ftrace_hash *notrace_hash; 1045 1046 preempt_disable_notrace(); 1047 1048 /* 1049 * Have to open code "rcu_dereference_sched()" because the 1050 * function graph tracer can be called when RCU is not 1051 * "watching". 1052 * Protected with schedule_on_each_cpu(ftrace_sync) 1053 */ 1054 notrace_hash = rcu_dereference_protected(ftrace_graph_notrace_hash, 1055 !preemptible()); 1056 1057 if (ftrace_lookup_ip(notrace_hash, addr)) 1058 ret = 1; 1059 1060 preempt_enable_notrace(); 1061 return ret; 1062 } 1063 #else 1064 static inline int ftrace_graph_addr(unsigned long *task_var, struct ftrace_graph_ent *trace) 1065 { 1066 return 1; 1067 } 1068 1069 static inline int ftrace_graph_notrace_addr(unsigned long addr) 1070 { 1071 return 0; 1072 } 1073 static inline void ftrace_graph_addr_finish(struct fgraph_ops *gops, struct ftrace_graph_ret *trace) 1074 { } 1075 #endif /* CONFIG_DYNAMIC_FTRACE */ 1076 1077 extern unsigned int fgraph_max_depth; 1078 extern bool fgraph_sleep_time; 1079 1080 static inline bool 1081 ftrace_graph_ignore_func(struct fgraph_ops *gops, struct ftrace_graph_ent *trace) 1082 { 1083 unsigned long *task_var = fgraph_get_task_var(gops); 1084 1085 /* trace it when it is-nested-in or is a function enabled. */ 1086 return !((*task_var & TRACE_GRAPH_FL) || 1087 ftrace_graph_addr(task_var, trace)) || 1088 (trace->depth < 0) || 1089 (fgraph_max_depth && trace->depth >= fgraph_max_depth); 1090 } 1091 1092 void fgraph_init_ops(struct ftrace_ops *dst_ops, 1093 struct ftrace_ops *src_ops); 1094 1095 #else /* CONFIG_FUNCTION_GRAPH_TRACER */ 1096 static inline enum print_line_t 1097 print_graph_function_flags(struct trace_iterator *iter, u32 flags) 1098 { 1099 return TRACE_TYPE_UNHANDLED; 1100 } 1101 static inline void free_fgraph_ops(struct trace_array *tr) { } 1102 /* ftrace_ops may not be defined */ 1103 #define init_array_fgraph_ops(tr, ops) do { } while (0) 1104 #define allocate_fgraph_ops(tr, ops) ({ 0; }) 1105 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 1106 1107 extern struct list_head ftrace_pids; 1108 1109 #ifdef CONFIG_FUNCTION_TRACER 1110 1111 #define FTRACE_PID_IGNORE -1 1112 #define FTRACE_PID_TRACE -2 1113 1114 struct ftrace_func_command { 1115 struct list_head list; 1116 char *name; 1117 int (*func)(struct trace_array *tr, 1118 struct ftrace_hash *hash, 1119 char *func, char *cmd, 1120 char *params, int enable); 1121 }; 1122 extern bool ftrace_filter_param __initdata; 1123 static inline int ftrace_trace_task(struct trace_array *tr) 1124 { 1125 return this_cpu_read(tr->array_buffer.data->ftrace_ignore_pid) != 1126 FTRACE_PID_IGNORE; 1127 } 1128 extern int ftrace_is_dead(void); 1129 int ftrace_create_function_files(struct trace_array *tr, 1130 struct dentry *parent); 1131 void ftrace_destroy_function_files(struct trace_array *tr); 1132 int ftrace_allocate_ftrace_ops(struct trace_array *tr); 1133 void ftrace_free_ftrace_ops(struct trace_array *tr); 1134 void ftrace_init_global_array_ops(struct trace_array *tr); 1135 struct trace_array *trace_get_global_array(void); 1136 void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func); 1137 void ftrace_reset_array_ops(struct trace_array *tr); 1138 void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d_tracer); 1139 void ftrace_init_tracefs_toplevel(struct trace_array *tr, 1140 struct dentry *d_tracer); 1141 void ftrace_clear_pids(struct trace_array *tr); 1142 int init_function_trace(void); 1143 void ftrace_pid_follow_fork(struct trace_array *tr, bool enable); 1144 #else 1145 static inline int ftrace_trace_task(struct trace_array *tr) 1146 { 1147 return 1; 1148 } 1149 static inline int ftrace_is_dead(void) { return 0; } 1150 static inline int 1151 ftrace_create_function_files(struct trace_array *tr, 1152 struct dentry *parent) 1153 { 1154 return 0; 1155 } 1156 static inline int ftrace_allocate_ftrace_ops(struct trace_array *tr) 1157 { 1158 return 0; 1159 } 1160 static inline void ftrace_free_ftrace_ops(struct trace_array *tr) { } 1161 static inline void ftrace_destroy_function_files(struct trace_array *tr) { } 1162 static inline __init void 1163 ftrace_init_global_array_ops(struct trace_array *tr) { } 1164 static inline void ftrace_reset_array_ops(struct trace_array *tr) { } 1165 static inline void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d) { } 1166 static inline void ftrace_init_tracefs_toplevel(struct trace_array *tr, struct dentry *d) { } 1167 static inline void ftrace_clear_pids(struct trace_array *tr) { } 1168 static inline int init_function_trace(void) { return 0; } 1169 static inline void ftrace_pid_follow_fork(struct trace_array *tr, bool enable) { } 1170 /* ftace_func_t type is not defined, use macro instead of static inline */ 1171 #define ftrace_init_array_ops(tr, func) do { } while (0) 1172 #endif /* CONFIG_FUNCTION_TRACER */ 1173 1174 #if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE) 1175 1176 struct ftrace_probe_ops { 1177 void (*func)(unsigned long ip, 1178 unsigned long parent_ip, 1179 struct trace_array *tr, 1180 struct ftrace_probe_ops *ops, 1181 void *data); 1182 int (*init)(struct ftrace_probe_ops *ops, 1183 struct trace_array *tr, 1184 unsigned long ip, void *init_data, 1185 void **data); 1186 void (*free)(struct ftrace_probe_ops *ops, 1187 struct trace_array *tr, 1188 unsigned long ip, void *data); 1189 int (*print)(struct seq_file *m, 1190 unsigned long ip, 1191 struct ftrace_probe_ops *ops, 1192 void *data); 1193 }; 1194 1195 struct ftrace_func_mapper; 1196 typedef int (*ftrace_mapper_func)(void *data); 1197 1198 struct ftrace_func_mapper *allocate_ftrace_func_mapper(void); 1199 void **ftrace_func_mapper_find_ip(struct ftrace_func_mapper *mapper, 1200 unsigned long ip); 1201 int ftrace_func_mapper_add_ip(struct ftrace_func_mapper *mapper, 1202 unsigned long ip, void *data); 1203 void *ftrace_func_mapper_remove_ip(struct ftrace_func_mapper *mapper, 1204 unsigned long ip); 1205 void free_ftrace_func_mapper(struct ftrace_func_mapper *mapper, 1206 ftrace_mapper_func free_func); 1207 1208 extern int 1209 register_ftrace_function_probe(char *glob, struct trace_array *tr, 1210 struct ftrace_probe_ops *ops, void *data); 1211 extern int 1212 unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr, 1213 struct ftrace_probe_ops *ops); 1214 extern void clear_ftrace_function_probes(struct trace_array *tr); 1215 1216 int register_ftrace_command(struct ftrace_func_command *cmd); 1217 int unregister_ftrace_command(struct ftrace_func_command *cmd); 1218 1219 void ftrace_create_filter_files(struct ftrace_ops *ops, 1220 struct dentry *parent); 1221 void ftrace_destroy_filter_files(struct ftrace_ops *ops); 1222 1223 extern int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf, 1224 int len, int reset); 1225 extern int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf, 1226 int len, int reset); 1227 #else 1228 struct ftrace_func_command; 1229 1230 static inline __init int register_ftrace_command(struct ftrace_func_command *cmd) 1231 { 1232 return -EINVAL; 1233 } 1234 static inline __init int unregister_ftrace_command(char *cmd_name) 1235 { 1236 return -EINVAL; 1237 } 1238 static inline void clear_ftrace_function_probes(struct trace_array *tr) 1239 { 1240 } 1241 1242 /* 1243 * The ops parameter passed in is usually undefined. 1244 * This must be a macro. 1245 */ 1246 #define ftrace_create_filter_files(ops, parent) do { } while (0) 1247 #define ftrace_destroy_filter_files(ops) do { } while (0) 1248 #endif /* CONFIG_FUNCTION_TRACER && CONFIG_DYNAMIC_FTRACE */ 1249 1250 bool ftrace_event_is_function(struct trace_event_call *call); 1251 1252 /* 1253 * struct trace_parser - servers for reading the user input separated by spaces 1254 * @cont: set if the input is not complete - no final space char was found 1255 * @buffer: holds the parsed user input 1256 * @idx: user input length 1257 * @size: buffer size 1258 */ 1259 struct trace_parser { 1260 bool cont; 1261 char *buffer; 1262 unsigned idx; 1263 unsigned size; 1264 }; 1265 1266 static inline bool trace_parser_loaded(struct trace_parser *parser) 1267 { 1268 return (parser->idx != 0); 1269 } 1270 1271 static inline bool trace_parser_cont(struct trace_parser *parser) 1272 { 1273 return parser->cont; 1274 } 1275 1276 static inline void trace_parser_clear(struct trace_parser *parser) 1277 { 1278 parser->cont = false; 1279 parser->idx = 0; 1280 } 1281 1282 extern int trace_parser_get_init(struct trace_parser *parser, int size); 1283 extern void trace_parser_put(struct trace_parser *parser); 1284 extern int trace_get_user(struct trace_parser *parser, const char __user *ubuf, 1285 size_t cnt, loff_t *ppos); 1286 1287 /* 1288 * Only create function graph options if function graph is configured. 1289 */ 1290 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 1291 # define FGRAPH_FLAGS \ 1292 C(DISPLAY_GRAPH, "display-graph"), 1293 #else 1294 # define FGRAPH_FLAGS 1295 #endif 1296 1297 #ifdef CONFIG_BRANCH_TRACER 1298 # define BRANCH_FLAGS \ 1299 C(BRANCH, "branch"), 1300 #else 1301 # define BRANCH_FLAGS 1302 #endif 1303 1304 #ifdef CONFIG_FUNCTION_TRACER 1305 # define FUNCTION_FLAGS \ 1306 C(FUNCTION, "function-trace"), \ 1307 C(FUNC_FORK, "function-fork"), 1308 # define FUNCTION_DEFAULT_FLAGS TRACE_ITER_FUNCTION 1309 #else 1310 # define FUNCTION_FLAGS 1311 # define FUNCTION_DEFAULT_FLAGS 0UL 1312 # define TRACE_ITER_FUNC_FORK 0UL 1313 #endif 1314 1315 #ifdef CONFIG_STACKTRACE 1316 # define STACK_FLAGS \ 1317 C(STACKTRACE, "stacktrace"), 1318 #else 1319 # define STACK_FLAGS 1320 #endif 1321 1322 /* 1323 * trace_iterator_flags is an enumeration that defines bit 1324 * positions into trace_flags that controls the output. 1325 * 1326 * NOTE: These bits must match the trace_options array in 1327 * trace.c (this macro guarantees it). 1328 */ 1329 #define TRACE_FLAGS \ 1330 C(PRINT_PARENT, "print-parent"), \ 1331 C(SYM_OFFSET, "sym-offset"), \ 1332 C(SYM_ADDR, "sym-addr"), \ 1333 C(VERBOSE, "verbose"), \ 1334 C(RAW, "raw"), \ 1335 C(HEX, "hex"), \ 1336 C(BIN, "bin"), \ 1337 C(BLOCK, "block"), \ 1338 C(FIELDS, "fields"), \ 1339 C(PRINTK, "trace_printk"), \ 1340 C(ANNOTATE, "annotate"), \ 1341 C(USERSTACKTRACE, "userstacktrace"), \ 1342 C(SYM_USEROBJ, "sym-userobj"), \ 1343 C(PRINTK_MSGONLY, "printk-msg-only"), \ 1344 C(CONTEXT_INFO, "context-info"), /* Print pid/cpu/time */ \ 1345 C(LATENCY_FMT, "latency-format"), \ 1346 C(RECORD_CMD, "record-cmd"), \ 1347 C(RECORD_TGID, "record-tgid"), \ 1348 C(OVERWRITE, "overwrite"), \ 1349 C(STOP_ON_FREE, "disable_on_free"), \ 1350 C(IRQ_INFO, "irq-info"), \ 1351 C(MARKERS, "markers"), \ 1352 C(EVENT_FORK, "event-fork"), \ 1353 C(TRACE_PRINTK, "trace_printk_dest"), \ 1354 C(PAUSE_ON_TRACE, "pause-on-trace"), \ 1355 C(HASH_PTR, "hash-ptr"), /* Print hashed pointer */ \ 1356 FUNCTION_FLAGS \ 1357 FGRAPH_FLAGS \ 1358 STACK_FLAGS \ 1359 BRANCH_FLAGS 1360 1361 /* 1362 * By defining C, we can make TRACE_FLAGS a list of bit names 1363 * that will define the bits for the flag masks. 1364 */ 1365 #undef C 1366 #define C(a, b) TRACE_ITER_##a##_BIT 1367 1368 enum trace_iterator_bits { 1369 TRACE_FLAGS 1370 /* Make sure we don't go more than we have bits for */ 1371 TRACE_ITER_LAST_BIT 1372 }; 1373 1374 /* 1375 * By redefining C, we can make TRACE_FLAGS a list of masks that 1376 * use the bits as defined above. 1377 */ 1378 #undef C 1379 #define C(a, b) TRACE_ITER_##a = (1 << TRACE_ITER_##a##_BIT) 1380 1381 enum trace_iterator_flags { TRACE_FLAGS }; 1382 1383 /* 1384 * TRACE_ITER_SYM_MASK masks the options in trace_flags that 1385 * control the output of kernel symbols. 1386 */ 1387 #define TRACE_ITER_SYM_MASK \ 1388 (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR) 1389 1390 extern struct tracer nop_trace; 1391 1392 #ifdef CONFIG_BRANCH_TRACER 1393 extern int enable_branch_tracing(struct trace_array *tr); 1394 extern void disable_branch_tracing(void); 1395 static inline int trace_branch_enable(struct trace_array *tr) 1396 { 1397 if (tr->trace_flags & TRACE_ITER_BRANCH) 1398 return enable_branch_tracing(tr); 1399 return 0; 1400 } 1401 static inline void trace_branch_disable(void) 1402 { 1403 /* due to races, always disable */ 1404 disable_branch_tracing(); 1405 } 1406 #else 1407 static inline int trace_branch_enable(struct trace_array *tr) 1408 { 1409 return 0; 1410 } 1411 static inline void trace_branch_disable(void) 1412 { 1413 } 1414 #endif /* CONFIG_BRANCH_TRACER */ 1415 1416 /* set ring buffers to default size if not already done so */ 1417 int tracing_update_buffers(struct trace_array *tr); 1418 1419 union trace_synth_field { 1420 u8 as_u8; 1421 u16 as_u16; 1422 u32 as_u32; 1423 u64 as_u64; 1424 struct trace_dynamic_info as_dynamic; 1425 }; 1426 1427 struct ftrace_event_field { 1428 struct list_head link; 1429 const char *name; 1430 const char *type; 1431 int filter_type; 1432 int offset; 1433 int size; 1434 unsigned int is_signed:1; 1435 unsigned int needs_test:1; 1436 int len; 1437 }; 1438 1439 struct prog_entry; 1440 1441 struct event_filter { 1442 struct prog_entry __rcu *prog; 1443 char *filter_string; 1444 }; 1445 1446 struct event_subsystem { 1447 struct list_head list; 1448 const char *name; 1449 struct event_filter *filter; 1450 int ref_count; 1451 }; 1452 1453 struct trace_subsystem_dir { 1454 struct list_head list; 1455 struct event_subsystem *subsystem; 1456 struct trace_array *tr; 1457 struct eventfs_inode *ei; 1458 int ref_count; 1459 int nr_events; 1460 }; 1461 1462 void trace_buffer_unlock_commit_regs(struct trace_array *tr, 1463 struct trace_buffer *buffer, 1464 struct ring_buffer_event *event, 1465 unsigned int trcace_ctx, 1466 struct pt_regs *regs); 1467 1468 static inline void trace_buffer_unlock_commit(struct trace_array *tr, 1469 struct trace_buffer *buffer, 1470 struct ring_buffer_event *event, 1471 unsigned int trace_ctx) 1472 { 1473 trace_buffer_unlock_commit_regs(tr, buffer, event, trace_ctx, NULL); 1474 } 1475 1476 DECLARE_PER_CPU(bool, trace_taskinfo_save); 1477 int trace_save_cmdline(struct task_struct *tsk); 1478 int trace_create_savedcmd(void); 1479 int trace_alloc_tgid_map(void); 1480 void trace_free_saved_cmdlines_buffer(void); 1481 1482 extern const struct file_operations tracing_saved_cmdlines_fops; 1483 extern const struct file_operations tracing_saved_tgids_fops; 1484 extern const struct file_operations tracing_saved_cmdlines_size_fops; 1485 1486 DECLARE_PER_CPU(struct ring_buffer_event *, trace_buffered_event); 1487 DECLARE_PER_CPU(int, trace_buffered_event_cnt); 1488 void trace_buffered_event_disable(void); 1489 void trace_buffered_event_enable(void); 1490 1491 void early_enable_events(struct trace_array *tr, char *buf, bool disable_first); 1492 1493 static inline void 1494 __trace_event_discard_commit(struct trace_buffer *buffer, 1495 struct ring_buffer_event *event) 1496 { 1497 if (this_cpu_read(trace_buffered_event) == event) { 1498 /* Simply release the temp buffer and enable preemption */ 1499 this_cpu_dec(trace_buffered_event_cnt); 1500 preempt_enable_notrace(); 1501 return; 1502 } 1503 /* ring_buffer_discard_commit() enables preemption */ 1504 ring_buffer_discard_commit(buffer, event); 1505 } 1506 1507 /* 1508 * Helper function for event_trigger_unlock_commit{_regs}(). 1509 * If there are event triggers attached to this event that requires 1510 * filtering against its fields, then they will be called as the 1511 * entry already holds the field information of the current event. 1512 * 1513 * It also checks if the event should be discarded or not. 1514 * It is to be discarded if the event is soft disabled and the 1515 * event was only recorded to process triggers, or if the event 1516 * filter is active and this event did not match the filters. 1517 * 1518 * Returns true if the event is discarded, false otherwise. 1519 */ 1520 static inline bool 1521 __event_trigger_test_discard(struct trace_event_file *file, 1522 struct trace_buffer *buffer, 1523 struct ring_buffer_event *event, 1524 void *entry, 1525 enum event_trigger_type *tt) 1526 { 1527 unsigned long eflags = file->flags; 1528 1529 if (eflags & EVENT_FILE_FL_TRIGGER_COND) 1530 *tt = event_triggers_call(file, buffer, entry, event); 1531 1532 if (likely(!(file->flags & (EVENT_FILE_FL_SOFT_DISABLED | 1533 EVENT_FILE_FL_FILTERED | 1534 EVENT_FILE_FL_PID_FILTER)))) 1535 return false; 1536 1537 if (file->flags & EVENT_FILE_FL_SOFT_DISABLED) 1538 goto discard; 1539 1540 if (file->flags & EVENT_FILE_FL_FILTERED && 1541 !filter_match_preds(file->filter, entry)) 1542 goto discard; 1543 1544 if ((file->flags & EVENT_FILE_FL_PID_FILTER) && 1545 trace_event_ignore_this_pid(file)) 1546 goto discard; 1547 1548 return false; 1549 discard: 1550 __trace_event_discard_commit(buffer, event); 1551 return true; 1552 } 1553 1554 /** 1555 * event_trigger_unlock_commit - handle triggers and finish event commit 1556 * @file: The file pointer associated with the event 1557 * @buffer: The ring buffer that the event is being written to 1558 * @event: The event meta data in the ring buffer 1559 * @entry: The event itself 1560 * @trace_ctx: The tracing context flags. 1561 * 1562 * This is a helper function to handle triggers that require data 1563 * from the event itself. It also tests the event against filters and 1564 * if the event is soft disabled and should be discarded. 1565 */ 1566 static inline void 1567 event_trigger_unlock_commit(struct trace_event_file *file, 1568 struct trace_buffer *buffer, 1569 struct ring_buffer_event *event, 1570 void *entry, unsigned int trace_ctx) 1571 { 1572 enum event_trigger_type tt = ETT_NONE; 1573 1574 if (!__event_trigger_test_discard(file, buffer, event, entry, &tt)) 1575 trace_buffer_unlock_commit(file->tr, buffer, event, trace_ctx); 1576 1577 if (tt) 1578 event_triggers_post_call(file, tt); 1579 } 1580 1581 #define FILTER_PRED_INVALID ((unsigned short)-1) 1582 #define FILTER_PRED_IS_RIGHT (1 << 15) 1583 #define FILTER_PRED_FOLD (1 << 15) 1584 1585 /* 1586 * The max preds is the size of unsigned short with 1587 * two flags at the MSBs. One bit is used for both the IS_RIGHT 1588 * and FOLD flags. The other is reserved. 1589 * 1590 * 2^14 preds is way more than enough. 1591 */ 1592 #define MAX_FILTER_PRED 16384 1593 1594 struct filter_pred; 1595 struct regex; 1596 1597 typedef int (*regex_match_func)(char *str, struct regex *r, int len); 1598 1599 enum regex_type { 1600 MATCH_FULL = 0, 1601 MATCH_FRONT_ONLY, 1602 MATCH_MIDDLE_ONLY, 1603 MATCH_END_ONLY, 1604 MATCH_GLOB, 1605 MATCH_INDEX, 1606 }; 1607 1608 struct regex { 1609 char pattern[MAX_FILTER_STR_VAL]; 1610 int len; 1611 int field_len; 1612 regex_match_func match; 1613 }; 1614 1615 static inline bool is_string_field(struct ftrace_event_field *field) 1616 { 1617 return field->filter_type == FILTER_DYN_STRING || 1618 field->filter_type == FILTER_RDYN_STRING || 1619 field->filter_type == FILTER_STATIC_STRING || 1620 field->filter_type == FILTER_PTR_STRING || 1621 field->filter_type == FILTER_COMM; 1622 } 1623 1624 static inline bool is_function_field(struct ftrace_event_field *field) 1625 { 1626 return field->filter_type == FILTER_TRACE_FN; 1627 } 1628 1629 extern enum regex_type 1630 filter_parse_regex(char *buff, int len, char **search, int *not); 1631 extern void print_event_filter(struct trace_event_file *file, 1632 struct trace_seq *s); 1633 extern int apply_event_filter(struct trace_event_file *file, 1634 char *filter_string); 1635 extern int apply_subsystem_event_filter(struct trace_subsystem_dir *dir, 1636 char *filter_string); 1637 extern void print_subsystem_event_filter(struct event_subsystem *system, 1638 struct trace_seq *s); 1639 extern int filter_assign_type(const char *type); 1640 extern int create_event_filter(struct trace_array *tr, 1641 struct trace_event_call *call, 1642 char *filter_str, bool set_str, 1643 struct event_filter **filterp); 1644 extern void free_event_filter(struct event_filter *filter); 1645 1646 struct ftrace_event_field * 1647 trace_find_event_field(struct trace_event_call *call, char *name); 1648 1649 extern void trace_event_enable_cmd_record(bool enable); 1650 extern void trace_event_enable_tgid_record(bool enable); 1651 1652 extern int event_trace_init(void); 1653 extern int init_events(void); 1654 extern int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr); 1655 extern int event_trace_del_tracer(struct trace_array *tr); 1656 extern void __trace_early_add_events(struct trace_array *tr); 1657 1658 extern struct trace_event_file *__find_event_file(struct trace_array *tr, 1659 const char *system, 1660 const char *event); 1661 extern struct trace_event_file *find_event_file(struct trace_array *tr, 1662 const char *system, 1663 const char *event); 1664 1665 static inline void *event_file_data(struct file *filp) 1666 { 1667 return READ_ONCE(file_inode(filp)->i_private); 1668 } 1669 1670 extern struct mutex event_mutex; 1671 extern struct list_head ftrace_events; 1672 1673 /* 1674 * When the trace_event_file is the filp->i_private pointer, 1675 * it must be taken under the event_mutex lock, and then checked 1676 * if the EVENT_FILE_FL_FREED flag is set. If it is, then the 1677 * data pointed to by the trace_event_file can not be trusted. 1678 * 1679 * Use the event_file_file() to access the trace_event_file from 1680 * the filp the first time under the event_mutex and check for 1681 * NULL. If it is needed to be retrieved again and the event_mutex 1682 * is still held, then the event_file_data() can be used and it 1683 * is guaranteed to be valid. 1684 */ 1685 static inline struct trace_event_file *event_file_file(struct file *filp) 1686 { 1687 struct trace_event_file *file; 1688 1689 lockdep_assert_held(&event_mutex); 1690 file = READ_ONCE(file_inode(filp)->i_private); 1691 if (!file || file->flags & EVENT_FILE_FL_FREED) 1692 return NULL; 1693 return file; 1694 } 1695 1696 extern const struct file_operations event_trigger_fops; 1697 extern const struct file_operations event_hist_fops; 1698 extern const struct file_operations event_hist_debug_fops; 1699 extern const struct file_operations event_inject_fops; 1700 1701 #ifdef CONFIG_HIST_TRIGGERS 1702 extern int register_trigger_hist_cmd(void); 1703 extern int register_trigger_hist_enable_disable_cmds(void); 1704 #else 1705 static inline int register_trigger_hist_cmd(void) { return 0; } 1706 static inline int register_trigger_hist_enable_disable_cmds(void) { return 0; } 1707 #endif 1708 1709 extern int register_trigger_cmds(void); 1710 extern void clear_event_triggers(struct trace_array *tr); 1711 1712 enum { 1713 EVENT_TRIGGER_FL_PROBE = BIT(0), 1714 }; 1715 1716 struct event_trigger_data { 1717 unsigned long count; 1718 int ref; 1719 int flags; 1720 const struct event_trigger_ops *ops; 1721 struct event_command *cmd_ops; 1722 struct event_filter __rcu *filter; 1723 char *filter_str; 1724 void *private_data; 1725 bool paused; 1726 bool paused_tmp; 1727 struct list_head list; 1728 char *name; 1729 struct list_head named_list; 1730 struct event_trigger_data *named_data; 1731 }; 1732 1733 /* Avoid typos */ 1734 #define ENABLE_EVENT_STR "enable_event" 1735 #define DISABLE_EVENT_STR "disable_event" 1736 #define ENABLE_HIST_STR "enable_hist" 1737 #define DISABLE_HIST_STR "disable_hist" 1738 1739 struct enable_trigger_data { 1740 struct trace_event_file *file; 1741 bool enable; 1742 bool hist; 1743 }; 1744 1745 extern int event_enable_trigger_print(struct seq_file *m, 1746 struct event_trigger_data *data); 1747 extern void event_enable_trigger_free(struct event_trigger_data *data); 1748 extern int event_enable_trigger_parse(struct event_command *cmd_ops, 1749 struct trace_event_file *file, 1750 char *glob, char *cmd, 1751 char *param_and_filter); 1752 extern int event_enable_register_trigger(char *glob, 1753 struct event_trigger_data *data, 1754 struct trace_event_file *file); 1755 extern void event_enable_unregister_trigger(char *glob, 1756 struct event_trigger_data *test, 1757 struct trace_event_file *file); 1758 extern void trigger_data_free(struct event_trigger_data *data); 1759 extern int event_trigger_init(struct event_trigger_data *data); 1760 extern int trace_event_trigger_enable_disable(struct trace_event_file *file, 1761 int trigger_enable); 1762 extern void update_cond_flag(struct trace_event_file *file); 1763 extern int set_trigger_filter(char *filter_str, 1764 struct event_trigger_data *trigger_data, 1765 struct trace_event_file *file); 1766 extern struct event_trigger_data *find_named_trigger(const char *name); 1767 extern bool is_named_trigger(struct event_trigger_data *test); 1768 extern int save_named_trigger(const char *name, 1769 struct event_trigger_data *data); 1770 extern void del_named_trigger(struct event_trigger_data *data); 1771 extern void pause_named_trigger(struct event_trigger_data *data); 1772 extern void unpause_named_trigger(struct event_trigger_data *data); 1773 extern void set_named_trigger_data(struct event_trigger_data *data, 1774 struct event_trigger_data *named_data); 1775 extern struct event_trigger_data * 1776 get_named_trigger_data(struct event_trigger_data *data); 1777 extern int register_event_command(struct event_command *cmd); 1778 extern int unregister_event_command(struct event_command *cmd); 1779 extern int register_trigger_hist_enable_disable_cmds(void); 1780 extern bool event_trigger_check_remove(const char *glob); 1781 extern bool event_trigger_empty_param(const char *param); 1782 extern int event_trigger_separate_filter(char *param_and_filter, char **param, 1783 char **filter, bool param_required); 1784 extern struct event_trigger_data * 1785 event_trigger_alloc(struct event_command *cmd_ops, 1786 char *cmd, 1787 char *param, 1788 void *private_data); 1789 extern int event_trigger_parse_num(char *trigger, 1790 struct event_trigger_data *trigger_data); 1791 extern int event_trigger_set_filter(struct event_command *cmd_ops, 1792 struct trace_event_file *file, 1793 char *param, 1794 struct event_trigger_data *trigger_data); 1795 extern void event_trigger_reset_filter(struct event_command *cmd_ops, 1796 struct event_trigger_data *trigger_data); 1797 extern int event_trigger_register(struct event_command *cmd_ops, 1798 struct trace_event_file *file, 1799 char *glob, 1800 struct event_trigger_data *trigger_data); 1801 extern void event_trigger_unregister(struct event_command *cmd_ops, 1802 struct trace_event_file *file, 1803 char *glob, 1804 struct event_trigger_data *trigger_data); 1805 1806 extern void event_file_get(struct trace_event_file *file); 1807 extern void event_file_put(struct trace_event_file *file); 1808 1809 /** 1810 * struct event_trigger_ops - callbacks for trace event triggers 1811 * 1812 * The methods in this structure provide per-event trigger hooks for 1813 * various trigger operations. 1814 * 1815 * The @init and @free methods are used during trigger setup and 1816 * teardown, typically called from an event_command's @parse() 1817 * function implementation. 1818 * 1819 * The @print method is used to print the trigger spec. 1820 * 1821 * The @trigger method is the function that actually implements the 1822 * trigger and is called in the context of the triggering event 1823 * whenever that event occurs. 1824 * 1825 * All the methods below, except for @init() and @free(), must be 1826 * implemented. 1827 * 1828 * @trigger: The trigger 'probe' function called when the triggering 1829 * event occurs. The data passed into this callback is the data 1830 * that was supplied to the event_command @reg() function that 1831 * registered the trigger (see struct event_command) along with 1832 * the trace record, rec. 1833 * 1834 * @init: An optional initialization function called for the trigger 1835 * when the trigger is registered (via the event_command reg() 1836 * function). This can be used to perform per-trigger 1837 * initialization such as incrementing a per-trigger reference 1838 * count, for instance. This is usually implemented by the 1839 * generic utility function @event_trigger_init() (see 1840 * trace_event_triggers.c). 1841 * 1842 * @free: An optional de-initialization function called for the 1843 * trigger when the trigger is unregistered (via the 1844 * event_command @reg() function). This can be used to perform 1845 * per-trigger de-initialization such as decrementing a 1846 * per-trigger reference count and freeing corresponding trigger 1847 * data, for instance. This is usually implemented by the 1848 * generic utility function @event_trigger_free() (see 1849 * trace_event_triggers.c). 1850 * 1851 * @print: The callback function invoked to have the trigger print 1852 * itself. This is usually implemented by a wrapper function 1853 * that calls the generic utility function @event_trigger_print() 1854 * (see trace_event_triggers.c). 1855 */ 1856 struct event_trigger_ops { 1857 void (*trigger)(struct event_trigger_data *data, 1858 struct trace_buffer *buffer, 1859 void *rec, 1860 struct ring_buffer_event *rbe); 1861 int (*init)(struct event_trigger_data *data); 1862 void (*free)(struct event_trigger_data *data); 1863 int (*print)(struct seq_file *m, 1864 struct event_trigger_data *data); 1865 }; 1866 1867 /** 1868 * struct event_command - callbacks and data members for event commands 1869 * 1870 * Event commands are invoked by users by writing the command name 1871 * into the 'trigger' file associated with a trace event. The 1872 * parameters associated with a specific invocation of an event 1873 * command are used to create an event trigger instance, which is 1874 * added to the list of trigger instances associated with that trace 1875 * event. When the event is hit, the set of triggers associated with 1876 * that event is invoked. 1877 * 1878 * The data members in this structure provide per-event command data 1879 * for various event commands. 1880 * 1881 * All the data members below, except for @post_trigger, must be set 1882 * for each event command. 1883 * 1884 * @name: The unique name that identifies the event command. This is 1885 * the name used when setting triggers via trigger files. 1886 * 1887 * @trigger_type: A unique id that identifies the event command 1888 * 'type'. This value has two purposes, the first to ensure that 1889 * only one trigger of the same type can be set at a given time 1890 * for a particular event e.g. it doesn't make sense to have both 1891 * a traceon and traceoff trigger attached to a single event at 1892 * the same time, so traceon and traceoff have the same type 1893 * though they have different names. The @trigger_type value is 1894 * also used as a bit value for deferring the actual trigger 1895 * action until after the current event is finished. Some 1896 * commands need to do this if they themselves log to the trace 1897 * buffer (see the @post_trigger() member below). @trigger_type 1898 * values are defined by adding new values to the trigger_type 1899 * enum in include/linux/trace_events.h. 1900 * 1901 * @flags: See the enum event_command_flags below. 1902 * 1903 * All the methods below, except for @set_filter() and @unreg_all(), 1904 * must be implemented. 1905 * 1906 * @parse: The callback function responsible for parsing and 1907 * registering the trigger written to the 'trigger' file by the 1908 * user. It allocates the trigger instance and registers it with 1909 * the appropriate trace event. It makes use of the other 1910 * event_command callback functions to orchestrate this, and is 1911 * usually implemented by the generic utility function 1912 * @event_trigger_callback() (see trace_event_triggers.c). 1913 * 1914 * @reg: Adds the trigger to the list of triggers associated with the 1915 * event, and enables the event trigger itself, after 1916 * initializing it (via the event_trigger_ops @init() function). 1917 * This is also where commands can use the @trigger_type value to 1918 * make the decision as to whether or not multiple instances of 1919 * the trigger should be allowed. This is usually implemented by 1920 * the generic utility function @register_trigger() (see 1921 * trace_event_triggers.c). 1922 * 1923 * @unreg: Removes the trigger from the list of triggers associated 1924 * with the event, and disables the event trigger itself, after 1925 * initializing it (via the event_trigger_ops @free() function). 1926 * This is usually implemented by the generic utility function 1927 * @unregister_trigger() (see trace_event_triggers.c). 1928 * 1929 * @unreg_all: An optional function called to remove all the triggers 1930 * from the list of triggers associated with the event. Called 1931 * when a trigger file is opened in truncate mode. 1932 * 1933 * @set_filter: An optional function called to parse and set a filter 1934 * for the trigger. If no @set_filter() method is set for the 1935 * event command, filters set by the user for the command will be 1936 * ignored. This is usually implemented by the generic utility 1937 * function @set_trigger_filter() (see trace_event_triggers.c). 1938 * 1939 * @get_trigger_ops: The callback function invoked to retrieve the 1940 * event_trigger_ops implementation associated with the command. 1941 * This callback function allows a single event_command to 1942 * support multiple trigger implementations via different sets of 1943 * event_trigger_ops, depending on the value of the @param 1944 * string. 1945 */ 1946 struct event_command { 1947 struct list_head list; 1948 char *name; 1949 enum event_trigger_type trigger_type; 1950 int flags; 1951 int (*parse)(struct event_command *cmd_ops, 1952 struct trace_event_file *file, 1953 char *glob, char *cmd, 1954 char *param_and_filter); 1955 int (*reg)(char *glob, 1956 struct event_trigger_data *data, 1957 struct trace_event_file *file); 1958 void (*unreg)(char *glob, 1959 struct event_trigger_data *data, 1960 struct trace_event_file *file); 1961 void (*unreg_all)(struct trace_event_file *file); 1962 int (*set_filter)(char *filter_str, 1963 struct event_trigger_data *data, 1964 struct trace_event_file *file); 1965 const struct event_trigger_ops *(*get_trigger_ops)(char *cmd, char *param); 1966 }; 1967 1968 /** 1969 * enum event_command_flags - flags for struct event_command 1970 * 1971 * @POST_TRIGGER: A flag that says whether or not this command needs 1972 * to have its action delayed until after the current event has 1973 * been closed. Some triggers need to avoid being invoked while 1974 * an event is currently in the process of being logged, since 1975 * the trigger may itself log data into the trace buffer. Thus 1976 * we make sure the current event is committed before invoking 1977 * those triggers. To do that, the trigger invocation is split 1978 * in two - the first part checks the filter using the current 1979 * trace record; if a command has the @post_trigger flag set, it 1980 * sets a bit for itself in the return value, otherwise it 1981 * directly invokes the trigger. Once all commands have been 1982 * either invoked or set their return flag, the current record is 1983 * either committed or discarded. At that point, if any commands 1984 * have deferred their triggers, those commands are finally 1985 * invoked following the close of the current event. In other 1986 * words, if the event_trigger_ops @func() probe implementation 1987 * itself logs to the trace buffer, this flag should be set, 1988 * otherwise it can be left unspecified. 1989 * 1990 * @NEEDS_REC: A flag that says whether or not this command needs 1991 * access to the trace record in order to perform its function, 1992 * regardless of whether or not it has a filter associated with 1993 * it (filters make a trigger require access to the trace record 1994 * but are not always present). 1995 */ 1996 enum event_command_flags { 1997 EVENT_CMD_FL_POST_TRIGGER = 1, 1998 EVENT_CMD_FL_NEEDS_REC = 2, 1999 }; 2000 2001 static inline bool event_command_post_trigger(struct event_command *cmd_ops) 2002 { 2003 return cmd_ops->flags & EVENT_CMD_FL_POST_TRIGGER; 2004 } 2005 2006 static inline bool event_command_needs_rec(struct event_command *cmd_ops) 2007 { 2008 return cmd_ops->flags & EVENT_CMD_FL_NEEDS_REC; 2009 } 2010 2011 extern int trace_event_enable_disable(struct trace_event_file *file, 2012 int enable, int soft_disable); 2013 extern int tracing_alloc_snapshot(void); 2014 extern void tracing_snapshot_cond(struct trace_array *tr, void *cond_data); 2015 extern int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update); 2016 2017 extern int tracing_snapshot_cond_disable(struct trace_array *tr); 2018 extern void *tracing_cond_snapshot_data(struct trace_array *tr); 2019 2020 extern const char *__start___trace_bprintk_fmt[]; 2021 extern const char *__stop___trace_bprintk_fmt[]; 2022 2023 extern const char *__start___tracepoint_str[]; 2024 extern const char *__stop___tracepoint_str[]; 2025 2026 void trace_printk_control(bool enabled); 2027 void trace_printk_start_comm(void); 2028 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set); 2029 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled); 2030 2031 /* Used from boot time tracer */ 2032 extern int trace_set_options(struct trace_array *tr, char *option); 2033 extern int tracing_set_tracer(struct trace_array *tr, const char *buf); 2034 extern ssize_t tracing_resize_ring_buffer(struct trace_array *tr, 2035 unsigned long size, int cpu_id); 2036 extern int tracing_set_cpumask(struct trace_array *tr, 2037 cpumask_var_t tracing_cpumask_new); 2038 2039 2040 #define MAX_EVENT_NAME_LEN 64 2041 2042 extern ssize_t trace_parse_run_command(struct file *file, 2043 const char __user *buffer, size_t count, loff_t *ppos, 2044 int (*createfn)(const char *)); 2045 2046 extern unsigned int err_pos(char *cmd, const char *str); 2047 extern void tracing_log_err(struct trace_array *tr, 2048 const char *loc, const char *cmd, 2049 const char **errs, u8 type, u16 pos); 2050 2051 /* 2052 * Normal trace_printk() and friends allocates special buffers 2053 * to do the manipulation, as well as saves the print formats 2054 * into sections to display. But the trace infrastructure wants 2055 * to use these without the added overhead at the price of being 2056 * a bit slower (used mainly for warnings, where we don't care 2057 * about performance). The internal_trace_puts() is for such 2058 * a purpose. 2059 */ 2060 #define internal_trace_puts(str) __trace_puts(_THIS_IP_, str, strlen(str)) 2061 2062 #undef FTRACE_ENTRY 2063 #define FTRACE_ENTRY(call, struct_name, id, tstruct, print) \ 2064 extern struct trace_event_call \ 2065 __aligned(4) event_##call; 2066 #undef FTRACE_ENTRY_DUP 2067 #define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print) \ 2068 FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print)) 2069 #undef FTRACE_ENTRY_PACKED 2070 #define FTRACE_ENTRY_PACKED(call, struct_name, id, tstruct, print) \ 2071 FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print)) 2072 2073 #include "trace_entries.h" 2074 2075 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_FUNCTION_TRACER) 2076 int perf_ftrace_event_register(struct trace_event_call *call, 2077 enum trace_reg type, void *data); 2078 #else 2079 #define perf_ftrace_event_register NULL 2080 #endif 2081 2082 #ifdef CONFIG_FTRACE_SYSCALLS 2083 void init_ftrace_syscalls(void); 2084 const char *get_syscall_name(int syscall); 2085 #else 2086 static inline void init_ftrace_syscalls(void) { } 2087 static inline const char *get_syscall_name(int syscall) 2088 { 2089 return NULL; 2090 } 2091 #endif 2092 2093 #ifdef CONFIG_EVENT_TRACING 2094 void trace_event_init(void); 2095 void trace_event_eval_update(struct trace_eval_map **map, int len); 2096 /* Used from boot time tracer */ 2097 extern int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set); 2098 extern int trigger_process_regex(struct trace_event_file *file, char *buff); 2099 #else 2100 static inline void __init trace_event_init(void) { } 2101 static inline void trace_event_eval_update(struct trace_eval_map **map, int len) { } 2102 #endif 2103 2104 #ifdef CONFIG_TRACER_SNAPSHOT 2105 void tracing_snapshot_instance(struct trace_array *tr); 2106 int tracing_alloc_snapshot_instance(struct trace_array *tr); 2107 int tracing_arm_snapshot(struct trace_array *tr); 2108 void tracing_disarm_snapshot(struct trace_array *tr); 2109 #else 2110 static inline void tracing_snapshot_instance(struct trace_array *tr) { } 2111 static inline int tracing_alloc_snapshot_instance(struct trace_array *tr) 2112 { 2113 return 0; 2114 } 2115 static inline int tracing_arm_snapshot(struct trace_array *tr) { return 0; } 2116 static inline void tracing_disarm_snapshot(struct trace_array *tr) { } 2117 #endif 2118 2119 #ifdef CONFIG_PREEMPT_TRACER 2120 void tracer_preempt_on(unsigned long a0, unsigned long a1); 2121 void tracer_preempt_off(unsigned long a0, unsigned long a1); 2122 #else 2123 static inline void tracer_preempt_on(unsigned long a0, unsigned long a1) { } 2124 static inline void tracer_preempt_off(unsigned long a0, unsigned long a1) { } 2125 #endif 2126 #ifdef CONFIG_IRQSOFF_TRACER 2127 void tracer_hardirqs_on(unsigned long a0, unsigned long a1); 2128 void tracer_hardirqs_off(unsigned long a0, unsigned long a1); 2129 #else 2130 static inline void tracer_hardirqs_on(unsigned long a0, unsigned long a1) { } 2131 static inline void tracer_hardirqs_off(unsigned long a0, unsigned long a1) { } 2132 #endif 2133 2134 /* 2135 * Reset the state of the trace_iterator so that it can read consumed data. 2136 * Normally, the trace_iterator is used for reading the data when it is not 2137 * consumed, and must retain state. 2138 */ 2139 static __always_inline void trace_iterator_reset(struct trace_iterator *iter) 2140 { 2141 memset_startat(iter, 0, seq); 2142 iter->pos = -1; 2143 } 2144 2145 /* Check the name is good for event/group/fields */ 2146 static inline bool __is_good_name(const char *name, bool hash_ok) 2147 { 2148 if (!isalpha(*name) && *name != '_' && (!hash_ok || *name != '-')) 2149 return false; 2150 while (*++name != '\0') { 2151 if (!isalpha(*name) && !isdigit(*name) && *name != '_' && 2152 (!hash_ok || *name != '-')) 2153 return false; 2154 } 2155 return true; 2156 } 2157 2158 /* Check the name is good for event/group/fields */ 2159 static inline bool is_good_name(const char *name) 2160 { 2161 return __is_good_name(name, false); 2162 } 2163 2164 /* Check the name is good for system */ 2165 static inline bool is_good_system_name(const char *name) 2166 { 2167 return __is_good_name(name, true); 2168 } 2169 2170 /* Convert certain expected symbols into '_' when generating event names */ 2171 static inline void sanitize_event_name(char *name) 2172 { 2173 while (*name++ != '\0') 2174 if (*name == ':' || *name == '.') 2175 *name = '_'; 2176 } 2177 2178 /* 2179 * This is a generic way to read and write a u64 value from a file in tracefs. 2180 * 2181 * The value is stored on the variable pointed by *val. The value needs 2182 * to be at least *min and at most *max. The write is protected by an 2183 * existing *lock. 2184 */ 2185 struct trace_min_max_param { 2186 struct mutex *lock; 2187 u64 *val; 2188 u64 *min; 2189 u64 *max; 2190 }; 2191 2192 #define U64_STR_SIZE 24 /* 20 digits max */ 2193 2194 extern const struct file_operations trace_min_max_fops; 2195 2196 #ifdef CONFIG_RV 2197 extern int rv_init_interface(void); 2198 #else 2199 static inline int rv_init_interface(void) 2200 { 2201 return 0; 2202 } 2203 #endif 2204 2205 /* 2206 * This is used only to distinguish 2207 * function address from trampoline code. 2208 * So this value has no meaning. 2209 */ 2210 #define FTRACE_TRAMPOLINE_MARKER ((unsigned long) INT_MAX) 2211 2212 #endif /* _LINUX_KERNEL_TRACE_H */ 2213