1 // SPDX-License-Identifier: GPL-2.0 2 3 #ifndef _LINUX_KERNEL_TRACE_H 4 #define _LINUX_KERNEL_TRACE_H 5 6 #include <linux/fs.h> 7 #include <linux/atomic.h> 8 #include <linux/sched.h> 9 #include <linux/clocksource.h> 10 #include <linux/ring_buffer.h> 11 #include <linux/mmiotrace.h> 12 #include <linux/tracepoint.h> 13 #include <linux/ftrace.h> 14 #include <linux/trace.h> 15 #include <linux/hw_breakpoint.h> 16 #include <linux/trace_seq.h> 17 #include <linux/trace_events.h> 18 #include <linux/compiler.h> 19 #include <linux/glob.h> 20 #include <linux/irq_work.h> 21 #include <linux/workqueue.h> 22 23 #ifdef CONFIG_FTRACE_SYSCALLS 24 #include <asm/unistd.h> /* For NR_SYSCALLS */ 25 #include <asm/syscall.h> /* some archs define it here */ 26 #endif 27 28 enum trace_type { 29 __TRACE_FIRST_TYPE = 0, 30 31 TRACE_FN, 32 TRACE_CTX, 33 TRACE_WAKE, 34 TRACE_STACK, 35 TRACE_PRINT, 36 TRACE_BPRINT, 37 TRACE_MMIO_RW, 38 TRACE_MMIO_MAP, 39 TRACE_BRANCH, 40 TRACE_GRAPH_RET, 41 TRACE_GRAPH_ENT, 42 TRACE_USER_STACK, 43 TRACE_BLK, 44 TRACE_BPUTS, 45 TRACE_HWLAT, 46 TRACE_RAW_DATA, 47 48 __TRACE_LAST_TYPE, 49 }; 50 51 52 #undef __field 53 #define __field(type, item) type item; 54 55 #undef __field_struct 56 #define __field_struct(type, item) __field(type, item) 57 58 #undef __field_desc 59 #define __field_desc(type, container, item) 60 61 #undef __array 62 #define __array(type, item, size) type item[size]; 63 64 #undef __array_desc 65 #define __array_desc(type, container, item, size) 66 67 #undef __dynamic_array 68 #define __dynamic_array(type, item) type item[]; 69 70 #undef F_STRUCT 71 #define F_STRUCT(args...) args 72 73 #undef FTRACE_ENTRY 74 #define FTRACE_ENTRY(name, struct_name, id, tstruct, print, filter) \ 75 struct struct_name { \ 76 struct trace_entry ent; \ 77 tstruct \ 78 } 79 80 #undef FTRACE_ENTRY_DUP 81 #define FTRACE_ENTRY_DUP(name, name_struct, id, tstruct, printk, filter) 82 83 #undef FTRACE_ENTRY_REG 84 #define FTRACE_ENTRY_REG(name, struct_name, id, tstruct, print, \ 85 filter, regfn) \ 86 FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print), \ 87 filter) 88 89 #undef FTRACE_ENTRY_PACKED 90 #define FTRACE_ENTRY_PACKED(name, struct_name, id, tstruct, print, \ 91 filter) \ 92 FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print), \ 93 filter) __packed 94 95 #include "trace_entries.h" 96 97 /* Use this for memory failure errors */ 98 #define MEM_FAIL(condition, fmt, ...) ({ \ 99 static bool __section(.data.once) __warned; \ 100 int __ret_warn_once = !!(condition); \ 101 \ 102 if (unlikely(__ret_warn_once && !__warned)) { \ 103 __warned = true; \ 104 pr_err("ERROR: " fmt, ##__VA_ARGS__); \ 105 } \ 106 unlikely(__ret_warn_once); \ 107 }) 108 109 /* 110 * syscalls are special, and need special handling, this is why 111 * they are not included in trace_entries.h 112 */ 113 struct syscall_trace_enter { 114 struct trace_entry ent; 115 int nr; 116 unsigned long args[]; 117 }; 118 119 struct syscall_trace_exit { 120 struct trace_entry ent; 121 int nr; 122 long ret; 123 }; 124 125 struct kprobe_trace_entry_head { 126 struct trace_entry ent; 127 unsigned long ip; 128 }; 129 130 struct kretprobe_trace_entry_head { 131 struct trace_entry ent; 132 unsigned long func; 133 unsigned long ret_ip; 134 }; 135 136 /* 137 * trace_flag_type is an enumeration that holds different 138 * states when a trace occurs. These are: 139 * IRQS_OFF - interrupts were disabled 140 * IRQS_NOSUPPORT - arch does not support irqs_disabled_flags 141 * NEED_RESCHED - reschedule is requested 142 * HARDIRQ - inside an interrupt handler 143 * SOFTIRQ - inside a softirq handler 144 */ 145 enum trace_flag_type { 146 TRACE_FLAG_IRQS_OFF = 0x01, 147 TRACE_FLAG_IRQS_NOSUPPORT = 0x02, 148 TRACE_FLAG_NEED_RESCHED = 0x04, 149 TRACE_FLAG_HARDIRQ = 0x08, 150 TRACE_FLAG_SOFTIRQ = 0x10, 151 TRACE_FLAG_PREEMPT_RESCHED = 0x20, 152 TRACE_FLAG_NMI = 0x40, 153 }; 154 155 #define TRACE_BUF_SIZE 1024 156 157 struct trace_array; 158 159 /* 160 * The CPU trace array - it consists of thousands of trace entries 161 * plus some other descriptor data: (for example which task started 162 * the trace, etc.) 163 */ 164 struct trace_array_cpu { 165 atomic_t disabled; 166 void *buffer_page; /* ring buffer spare */ 167 168 unsigned long entries; 169 unsigned long saved_latency; 170 unsigned long critical_start; 171 unsigned long critical_end; 172 unsigned long critical_sequence; 173 unsigned long nice; 174 unsigned long policy; 175 unsigned long rt_priority; 176 unsigned long skipped_entries; 177 u64 preempt_timestamp; 178 pid_t pid; 179 kuid_t uid; 180 char comm[TASK_COMM_LEN]; 181 182 bool ignore_pid; 183 #ifdef CONFIG_FUNCTION_TRACER 184 bool ftrace_ignore_pid; 185 #endif 186 }; 187 188 struct tracer; 189 struct trace_option_dentry; 190 191 struct array_buffer { 192 struct trace_array *tr; 193 struct trace_buffer *buffer; 194 struct trace_array_cpu __percpu *data; 195 u64 time_start; 196 int cpu; 197 }; 198 199 #define TRACE_FLAGS_MAX_SIZE 32 200 201 struct trace_options { 202 struct tracer *tracer; 203 struct trace_option_dentry *topts; 204 }; 205 206 struct trace_pid_list { 207 int pid_max; 208 unsigned long *pids; 209 }; 210 211 typedef bool (*cond_update_fn_t)(struct trace_array *tr, void *cond_data); 212 213 /** 214 * struct cond_snapshot - conditional snapshot data and callback 215 * 216 * The cond_snapshot structure encapsulates a callback function and 217 * data associated with the snapshot for a given tracing instance. 218 * 219 * When a snapshot is taken conditionally, by invoking 220 * tracing_snapshot_cond(tr, cond_data), the cond_data passed in is 221 * passed in turn to the cond_snapshot.update() function. That data 222 * can be compared by the update() implementation with the cond_data 223 * contained wihin the struct cond_snapshot instance associated with 224 * the trace_array. Because the tr->max_lock is held throughout the 225 * update() call, the update() function can directly retrieve the 226 * cond_snapshot and cond_data associated with the per-instance 227 * snapshot associated with the trace_array. 228 * 229 * The cond_snapshot.update() implementation can save data to be 230 * associated with the snapshot if it decides to, and returns 'true' 231 * in that case, or it returns 'false' if the conditional snapshot 232 * shouldn't be taken. 233 * 234 * The cond_snapshot instance is created and associated with the 235 * user-defined cond_data by tracing_cond_snapshot_enable(). 236 * Likewise, the cond_snapshot instance is destroyed and is no longer 237 * associated with the trace instance by 238 * tracing_cond_snapshot_disable(). 239 * 240 * The method below is required. 241 * 242 * @update: When a conditional snapshot is invoked, the update() 243 * callback function is invoked with the tr->max_lock held. The 244 * update() implementation signals whether or not to actually 245 * take the snapshot, by returning 'true' if so, 'false' if no 246 * snapshot should be taken. Because the max_lock is held for 247 * the duration of update(), the implementation is safe to 248 * directly retrieven and save any implementation data it needs 249 * to in association with the snapshot. 250 */ 251 struct cond_snapshot { 252 void *cond_data; 253 cond_update_fn_t update; 254 }; 255 256 /* 257 * The trace array - an array of per-CPU trace arrays. This is the 258 * highest level data structure that individual tracers deal with. 259 * They have on/off state as well: 260 */ 261 struct trace_array { 262 struct list_head list; 263 char *name; 264 struct array_buffer array_buffer; 265 #ifdef CONFIG_TRACER_MAX_TRACE 266 /* 267 * The max_buffer is used to snapshot the trace when a maximum 268 * latency is reached, or when the user initiates a snapshot. 269 * Some tracers will use this to store a maximum trace while 270 * it continues examining live traces. 271 * 272 * The buffers for the max_buffer are set up the same as the array_buffer 273 * When a snapshot is taken, the buffer of the max_buffer is swapped 274 * with the buffer of the array_buffer and the buffers are reset for 275 * the array_buffer so the tracing can continue. 276 */ 277 struct array_buffer max_buffer; 278 bool allocated_snapshot; 279 #endif 280 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER) 281 unsigned long max_latency; 282 #ifdef CONFIG_FSNOTIFY 283 struct dentry *d_max_latency; 284 struct work_struct fsnotify_work; 285 struct irq_work fsnotify_irqwork; 286 #endif 287 #endif 288 struct trace_pid_list __rcu *filtered_pids; 289 /* 290 * max_lock is used to protect the swapping of buffers 291 * when taking a max snapshot. The buffers themselves are 292 * protected by per_cpu spinlocks. But the action of the swap 293 * needs its own lock. 294 * 295 * This is defined as a arch_spinlock_t in order to help 296 * with performance when lockdep debugging is enabled. 297 * 298 * It is also used in other places outside the update_max_tr 299 * so it needs to be defined outside of the 300 * CONFIG_TRACER_MAX_TRACE. 301 */ 302 arch_spinlock_t max_lock; 303 int buffer_disabled; 304 #ifdef CONFIG_FTRACE_SYSCALLS 305 int sys_refcount_enter; 306 int sys_refcount_exit; 307 struct trace_event_file __rcu *enter_syscall_files[NR_syscalls]; 308 struct trace_event_file __rcu *exit_syscall_files[NR_syscalls]; 309 #endif 310 int stop_count; 311 int clock_id; 312 int nr_topts; 313 bool clear_trace; 314 int buffer_percent; 315 unsigned int n_err_log_entries; 316 struct tracer *current_trace; 317 unsigned int trace_flags; 318 unsigned char trace_flags_index[TRACE_FLAGS_MAX_SIZE]; 319 unsigned int flags; 320 raw_spinlock_t start_lock; 321 struct list_head err_log; 322 struct dentry *dir; 323 struct dentry *options; 324 struct dentry *percpu_dir; 325 struct dentry *event_dir; 326 struct trace_options *topts; 327 struct list_head systems; 328 struct list_head events; 329 struct trace_event_file *trace_marker_file; 330 cpumask_var_t tracing_cpumask; /* only trace on set CPUs */ 331 int ref; 332 #ifdef CONFIG_FUNCTION_TRACER 333 struct ftrace_ops *ops; 334 struct trace_pid_list __rcu *function_pids; 335 #ifdef CONFIG_DYNAMIC_FTRACE 336 /* All of these are protected by the ftrace_lock */ 337 struct list_head func_probes; 338 struct list_head mod_trace; 339 struct list_head mod_notrace; 340 #endif 341 /* function tracing enabled */ 342 int function_enabled; 343 #endif 344 int time_stamp_abs_ref; 345 struct list_head hist_vars; 346 #ifdef CONFIG_TRACER_SNAPSHOT 347 struct cond_snapshot *cond_snapshot; 348 #endif 349 }; 350 351 enum { 352 TRACE_ARRAY_FL_GLOBAL = (1 << 0) 353 }; 354 355 extern struct list_head ftrace_trace_arrays; 356 357 extern struct mutex trace_types_lock; 358 359 extern int trace_array_get(struct trace_array *tr); 360 extern int tracing_check_open_get_tr(struct trace_array *tr); 361 362 extern int tracing_set_time_stamp_abs(struct trace_array *tr, bool abs); 363 extern int tracing_set_clock(struct trace_array *tr, const char *clockstr); 364 365 extern bool trace_clock_in_ns(struct trace_array *tr); 366 367 /* 368 * The global tracer (top) should be the first trace array added, 369 * but we check the flag anyway. 370 */ 371 static inline struct trace_array *top_trace_array(void) 372 { 373 struct trace_array *tr; 374 375 if (list_empty(&ftrace_trace_arrays)) 376 return NULL; 377 378 tr = list_entry(ftrace_trace_arrays.prev, 379 typeof(*tr), list); 380 WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL)); 381 return tr; 382 } 383 384 #define FTRACE_CMP_TYPE(var, type) \ 385 __builtin_types_compatible_p(typeof(var), type *) 386 387 #undef IF_ASSIGN 388 #define IF_ASSIGN(var, entry, etype, id) \ 389 if (FTRACE_CMP_TYPE(var, etype)) { \ 390 var = (typeof(var))(entry); \ 391 WARN_ON(id != 0 && (entry)->type != id); \ 392 break; \ 393 } 394 395 /* Will cause compile errors if type is not found. */ 396 extern void __ftrace_bad_type(void); 397 398 /* 399 * The trace_assign_type is a verifier that the entry type is 400 * the same as the type being assigned. To add new types simply 401 * add a line with the following format: 402 * 403 * IF_ASSIGN(var, ent, type, id); 404 * 405 * Where "type" is the trace type that includes the trace_entry 406 * as the "ent" item. And "id" is the trace identifier that is 407 * used in the trace_type enum. 408 * 409 * If the type can have more than one id, then use zero. 410 */ 411 #define trace_assign_type(var, ent) \ 412 do { \ 413 IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN); \ 414 IF_ASSIGN(var, ent, struct ctx_switch_entry, 0); \ 415 IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \ 416 IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\ 417 IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \ 418 IF_ASSIGN(var, ent, struct bprint_entry, TRACE_BPRINT); \ 419 IF_ASSIGN(var, ent, struct bputs_entry, TRACE_BPUTS); \ 420 IF_ASSIGN(var, ent, struct hwlat_entry, TRACE_HWLAT); \ 421 IF_ASSIGN(var, ent, struct raw_data_entry, TRACE_RAW_DATA);\ 422 IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \ 423 TRACE_MMIO_RW); \ 424 IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \ 425 TRACE_MMIO_MAP); \ 426 IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \ 427 IF_ASSIGN(var, ent, struct ftrace_graph_ent_entry, \ 428 TRACE_GRAPH_ENT); \ 429 IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \ 430 TRACE_GRAPH_RET); \ 431 __ftrace_bad_type(); \ 432 } while (0) 433 434 /* 435 * An option specific to a tracer. This is a boolean value. 436 * The bit is the bit index that sets its value on the 437 * flags value in struct tracer_flags. 438 */ 439 struct tracer_opt { 440 const char *name; /* Will appear on the trace_options file */ 441 u32 bit; /* Mask assigned in val field in tracer_flags */ 442 }; 443 444 /* 445 * The set of specific options for a tracer. Your tracer 446 * have to set the initial value of the flags val. 447 */ 448 struct tracer_flags { 449 u32 val; 450 struct tracer_opt *opts; 451 struct tracer *trace; 452 }; 453 454 /* Makes more easy to define a tracer opt */ 455 #define TRACER_OPT(s, b) .name = #s, .bit = b 456 457 458 struct trace_option_dentry { 459 struct tracer_opt *opt; 460 struct tracer_flags *flags; 461 struct trace_array *tr; 462 struct dentry *entry; 463 }; 464 465 /** 466 * struct tracer - a specific tracer and its callbacks to interact with tracefs 467 * @name: the name chosen to select it on the available_tracers file 468 * @init: called when one switches to this tracer (echo name > current_tracer) 469 * @reset: called when one switches to another tracer 470 * @start: called when tracing is unpaused (echo 1 > tracing_on) 471 * @stop: called when tracing is paused (echo 0 > tracing_on) 472 * @update_thresh: called when tracing_thresh is updated 473 * @open: called when the trace file is opened 474 * @pipe_open: called when the trace_pipe file is opened 475 * @close: called when the trace file is released 476 * @pipe_close: called when the trace_pipe file is released 477 * @read: override the default read callback on trace_pipe 478 * @splice_read: override the default splice_read callback on trace_pipe 479 * @selftest: selftest to run on boot (see trace_selftest.c) 480 * @print_headers: override the first lines that describe your columns 481 * @print_line: callback that prints a trace 482 * @set_flag: signals one of your private flags changed (trace_options file) 483 * @flags: your private flags 484 */ 485 struct tracer { 486 const char *name; 487 int (*init)(struct trace_array *tr); 488 void (*reset)(struct trace_array *tr); 489 void (*start)(struct trace_array *tr); 490 void (*stop)(struct trace_array *tr); 491 int (*update_thresh)(struct trace_array *tr); 492 void (*open)(struct trace_iterator *iter); 493 void (*pipe_open)(struct trace_iterator *iter); 494 void (*close)(struct trace_iterator *iter); 495 void (*pipe_close)(struct trace_iterator *iter); 496 ssize_t (*read)(struct trace_iterator *iter, 497 struct file *filp, char __user *ubuf, 498 size_t cnt, loff_t *ppos); 499 ssize_t (*splice_read)(struct trace_iterator *iter, 500 struct file *filp, 501 loff_t *ppos, 502 struct pipe_inode_info *pipe, 503 size_t len, 504 unsigned int flags); 505 #ifdef CONFIG_FTRACE_STARTUP_TEST 506 int (*selftest)(struct tracer *trace, 507 struct trace_array *tr); 508 #endif 509 void (*print_header)(struct seq_file *m); 510 enum print_line_t (*print_line)(struct trace_iterator *iter); 511 /* If you handled the flag setting, return 0 */ 512 int (*set_flag)(struct trace_array *tr, 513 u32 old_flags, u32 bit, int set); 514 /* Return 0 if OK with change, else return non-zero */ 515 int (*flag_changed)(struct trace_array *tr, 516 u32 mask, int set); 517 struct tracer *next; 518 struct tracer_flags *flags; 519 int enabled; 520 int ref; 521 bool print_max; 522 bool allow_instances; 523 #ifdef CONFIG_TRACER_MAX_TRACE 524 bool use_max_tr; 525 #endif 526 /* True if tracer cannot be enabled in kernel param */ 527 bool noboot; 528 }; 529 530 531 /* Only current can touch trace_recursion */ 532 533 /* 534 * For function tracing recursion: 535 * The order of these bits are important. 536 * 537 * When function tracing occurs, the following steps are made: 538 * If arch does not support a ftrace feature: 539 * call internal function (uses INTERNAL bits) which calls... 540 * If callback is registered to the "global" list, the list 541 * function is called and recursion checks the GLOBAL bits. 542 * then this function calls... 543 * The function callback, which can use the FTRACE bits to 544 * check for recursion. 545 * 546 * Now if the arch does not suppport a feature, and it calls 547 * the global list function which calls the ftrace callback 548 * all three of these steps will do a recursion protection. 549 * There's no reason to do one if the previous caller already 550 * did. The recursion that we are protecting against will 551 * go through the same steps again. 552 * 553 * To prevent the multiple recursion checks, if a recursion 554 * bit is set that is higher than the MAX bit of the current 555 * check, then we know that the check was made by the previous 556 * caller, and we can skip the current check. 557 */ 558 enum { 559 TRACE_BUFFER_BIT, 560 TRACE_BUFFER_NMI_BIT, 561 TRACE_BUFFER_IRQ_BIT, 562 TRACE_BUFFER_SIRQ_BIT, 563 564 /* Start of function recursion bits */ 565 TRACE_FTRACE_BIT, 566 TRACE_FTRACE_NMI_BIT, 567 TRACE_FTRACE_IRQ_BIT, 568 TRACE_FTRACE_SIRQ_BIT, 569 570 /* INTERNAL_BITs must be greater than FTRACE_BITs */ 571 TRACE_INTERNAL_BIT, 572 TRACE_INTERNAL_NMI_BIT, 573 TRACE_INTERNAL_IRQ_BIT, 574 TRACE_INTERNAL_SIRQ_BIT, 575 576 TRACE_BRANCH_BIT, 577 /* 578 * Abuse of the trace_recursion. 579 * As we need a way to maintain state if we are tracing the function 580 * graph in irq because we want to trace a particular function that 581 * was called in irq context but we have irq tracing off. Since this 582 * can only be modified by current, we can reuse trace_recursion. 583 */ 584 TRACE_IRQ_BIT, 585 586 /* Set if the function is in the set_graph_function file */ 587 TRACE_GRAPH_BIT, 588 589 /* 590 * In the very unlikely case that an interrupt came in 591 * at a start of graph tracing, and we want to trace 592 * the function in that interrupt, the depth can be greater 593 * than zero, because of the preempted start of a previous 594 * trace. In an even more unlikely case, depth could be 2 595 * if a softirq interrupted the start of graph tracing, 596 * followed by an interrupt preempting a start of graph 597 * tracing in the softirq, and depth can even be 3 598 * if an NMI came in at the start of an interrupt function 599 * that preempted a softirq start of a function that 600 * preempted normal context!!!! Luckily, it can't be 601 * greater than 3, so the next two bits are a mask 602 * of what the depth is when we set TRACE_GRAPH_BIT 603 */ 604 605 TRACE_GRAPH_DEPTH_START_BIT, 606 TRACE_GRAPH_DEPTH_END_BIT, 607 608 /* 609 * To implement set_graph_notrace, if this bit is set, we ignore 610 * function graph tracing of called functions, until the return 611 * function is called to clear it. 612 */ 613 TRACE_GRAPH_NOTRACE_BIT, 614 }; 615 616 #define trace_recursion_set(bit) do { (current)->trace_recursion |= (1<<(bit)); } while (0) 617 #define trace_recursion_clear(bit) do { (current)->trace_recursion &= ~(1<<(bit)); } while (0) 618 #define trace_recursion_test(bit) ((current)->trace_recursion & (1<<(bit))) 619 620 #define trace_recursion_depth() \ 621 (((current)->trace_recursion >> TRACE_GRAPH_DEPTH_START_BIT) & 3) 622 #define trace_recursion_set_depth(depth) \ 623 do { \ 624 current->trace_recursion &= \ 625 ~(3 << TRACE_GRAPH_DEPTH_START_BIT); \ 626 current->trace_recursion |= \ 627 ((depth) & 3) << TRACE_GRAPH_DEPTH_START_BIT; \ 628 } while (0) 629 630 #define TRACE_CONTEXT_BITS 4 631 632 #define TRACE_FTRACE_START TRACE_FTRACE_BIT 633 #define TRACE_FTRACE_MAX ((1 << (TRACE_FTRACE_START + TRACE_CONTEXT_BITS)) - 1) 634 635 #define TRACE_LIST_START TRACE_INTERNAL_BIT 636 #define TRACE_LIST_MAX ((1 << (TRACE_LIST_START + TRACE_CONTEXT_BITS)) - 1) 637 638 #define TRACE_CONTEXT_MASK TRACE_LIST_MAX 639 640 static __always_inline int trace_get_context_bit(void) 641 { 642 int bit; 643 644 if (in_interrupt()) { 645 if (in_nmi()) 646 bit = 0; 647 648 else if (in_irq()) 649 bit = 1; 650 else 651 bit = 2; 652 } else 653 bit = 3; 654 655 return bit; 656 } 657 658 static __always_inline int trace_test_and_set_recursion(int start, int max) 659 { 660 unsigned int val = current->trace_recursion; 661 int bit; 662 663 /* A previous recursion check was made */ 664 if ((val & TRACE_CONTEXT_MASK) > max) 665 return 0; 666 667 bit = trace_get_context_bit() + start; 668 if (unlikely(val & (1 << bit))) 669 return -1; 670 671 val |= 1 << bit; 672 current->trace_recursion = val; 673 barrier(); 674 675 return bit; 676 } 677 678 static __always_inline void trace_clear_recursion(int bit) 679 { 680 unsigned int val = current->trace_recursion; 681 682 if (!bit) 683 return; 684 685 bit = 1 << bit; 686 val &= ~bit; 687 688 barrier(); 689 current->trace_recursion = val; 690 } 691 692 static inline struct ring_buffer_iter * 693 trace_buffer_iter(struct trace_iterator *iter, int cpu) 694 { 695 return iter->buffer_iter ? iter->buffer_iter[cpu] : NULL; 696 } 697 698 int tracer_init(struct tracer *t, struct trace_array *tr); 699 int tracing_is_enabled(void); 700 void tracing_reset_online_cpus(struct array_buffer *buf); 701 void tracing_reset_current(int cpu); 702 void tracing_reset_all_online_cpus(void); 703 int tracing_open_generic(struct inode *inode, struct file *filp); 704 int tracing_open_generic_tr(struct inode *inode, struct file *filp); 705 bool tracing_is_disabled(void); 706 bool tracer_tracing_is_on(struct trace_array *tr); 707 void tracer_tracing_on(struct trace_array *tr); 708 void tracer_tracing_off(struct trace_array *tr); 709 struct dentry *trace_create_file(const char *name, 710 umode_t mode, 711 struct dentry *parent, 712 void *data, 713 const struct file_operations *fops); 714 715 struct dentry *tracing_init_dentry(void); 716 717 struct ring_buffer_event; 718 719 struct ring_buffer_event * 720 trace_buffer_lock_reserve(struct trace_buffer *buffer, 721 int type, 722 unsigned long len, 723 unsigned long flags, 724 int pc); 725 726 struct trace_entry *tracing_get_trace_entry(struct trace_array *tr, 727 struct trace_array_cpu *data); 728 729 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, 730 int *ent_cpu, u64 *ent_ts); 731 732 void trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer, 733 struct ring_buffer_event *event); 734 735 int trace_empty(struct trace_iterator *iter); 736 737 void *trace_find_next_entry_inc(struct trace_iterator *iter); 738 739 void trace_init_global_iter(struct trace_iterator *iter); 740 741 void tracing_iter_reset(struct trace_iterator *iter, int cpu); 742 743 unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu); 744 unsigned long trace_total_entries(struct trace_array *tr); 745 746 void trace_function(struct trace_array *tr, 747 unsigned long ip, 748 unsigned long parent_ip, 749 unsigned long flags, int pc); 750 void trace_graph_function(struct trace_array *tr, 751 unsigned long ip, 752 unsigned long parent_ip, 753 unsigned long flags, int pc); 754 void trace_latency_header(struct seq_file *m); 755 void trace_default_header(struct seq_file *m); 756 void print_trace_header(struct seq_file *m, struct trace_iterator *iter); 757 int trace_empty(struct trace_iterator *iter); 758 759 void trace_graph_return(struct ftrace_graph_ret *trace); 760 int trace_graph_entry(struct ftrace_graph_ent *trace); 761 void set_graph_array(struct trace_array *tr); 762 763 void tracing_start_cmdline_record(void); 764 void tracing_stop_cmdline_record(void); 765 void tracing_start_tgid_record(void); 766 void tracing_stop_tgid_record(void); 767 768 int register_tracer(struct tracer *type); 769 int is_tracing_stopped(void); 770 771 loff_t tracing_lseek(struct file *file, loff_t offset, int whence); 772 773 extern cpumask_var_t __read_mostly tracing_buffer_mask; 774 775 #define for_each_tracing_cpu(cpu) \ 776 for_each_cpu(cpu, tracing_buffer_mask) 777 778 extern unsigned long nsecs_to_usecs(unsigned long nsecs); 779 780 extern unsigned long tracing_thresh; 781 782 /* PID filtering */ 783 784 extern int pid_max; 785 786 bool trace_find_filtered_pid(struct trace_pid_list *filtered_pids, 787 pid_t search_pid); 788 bool trace_ignore_this_task(struct trace_pid_list *filtered_pids, 789 struct task_struct *task); 790 void trace_filter_add_remove_task(struct trace_pid_list *pid_list, 791 struct task_struct *self, 792 struct task_struct *task); 793 void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos); 794 void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos); 795 int trace_pid_show(struct seq_file *m, void *v); 796 void trace_free_pid_list(struct trace_pid_list *pid_list); 797 int trace_pid_write(struct trace_pid_list *filtered_pids, 798 struct trace_pid_list **new_pid_list, 799 const char __user *ubuf, size_t cnt); 800 801 #ifdef CONFIG_TRACER_MAX_TRACE 802 void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu, 803 void *cond_data); 804 void update_max_tr_single(struct trace_array *tr, 805 struct task_struct *tsk, int cpu); 806 #endif /* CONFIG_TRACER_MAX_TRACE */ 807 808 #if (defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)) && \ 809 defined(CONFIG_FSNOTIFY) 810 811 void latency_fsnotify(struct trace_array *tr); 812 813 #else 814 815 static inline void latency_fsnotify(struct trace_array *tr) { } 816 817 #endif 818 819 #ifdef CONFIG_STACKTRACE 820 void __trace_stack(struct trace_array *tr, unsigned long flags, int skip, 821 int pc); 822 #else 823 static inline void __trace_stack(struct trace_array *tr, unsigned long flags, 824 int skip, int pc) 825 { 826 } 827 #endif /* CONFIG_STACKTRACE */ 828 829 extern u64 ftrace_now(int cpu); 830 831 extern void trace_find_cmdline(int pid, char comm[]); 832 extern int trace_find_tgid(int pid); 833 extern void trace_event_follow_fork(struct trace_array *tr, bool enable); 834 835 #ifdef CONFIG_DYNAMIC_FTRACE 836 extern unsigned long ftrace_update_tot_cnt; 837 extern unsigned long ftrace_number_of_pages; 838 extern unsigned long ftrace_number_of_groups; 839 void ftrace_init_trace_array(struct trace_array *tr); 840 #else 841 static inline void ftrace_init_trace_array(struct trace_array *tr) { } 842 #endif 843 #define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func 844 extern int DYN_FTRACE_TEST_NAME(void); 845 #define DYN_FTRACE_TEST_NAME2 trace_selftest_dynamic_test_func2 846 extern int DYN_FTRACE_TEST_NAME2(void); 847 848 extern bool ring_buffer_expanded; 849 extern bool tracing_selftest_disabled; 850 851 #ifdef CONFIG_FTRACE_STARTUP_TEST 852 extern int trace_selftest_startup_function(struct tracer *trace, 853 struct trace_array *tr); 854 extern int trace_selftest_startup_function_graph(struct tracer *trace, 855 struct trace_array *tr); 856 extern int trace_selftest_startup_irqsoff(struct tracer *trace, 857 struct trace_array *tr); 858 extern int trace_selftest_startup_preemptoff(struct tracer *trace, 859 struct trace_array *tr); 860 extern int trace_selftest_startup_preemptirqsoff(struct tracer *trace, 861 struct trace_array *tr); 862 extern int trace_selftest_startup_wakeup(struct tracer *trace, 863 struct trace_array *tr); 864 extern int trace_selftest_startup_nop(struct tracer *trace, 865 struct trace_array *tr); 866 extern int trace_selftest_startup_branch(struct tracer *trace, 867 struct trace_array *tr); 868 /* 869 * Tracer data references selftest functions that only occur 870 * on boot up. These can be __init functions. Thus, when selftests 871 * are enabled, then the tracers need to reference __init functions. 872 */ 873 #define __tracer_data __refdata 874 #else 875 /* Tracers are seldom changed. Optimize when selftests are disabled. */ 876 #define __tracer_data __read_mostly 877 #endif /* CONFIG_FTRACE_STARTUP_TEST */ 878 879 extern void *head_page(struct trace_array_cpu *data); 880 extern unsigned long long ns2usecs(u64 nsec); 881 extern int 882 trace_vbprintk(unsigned long ip, const char *fmt, va_list args); 883 extern int 884 trace_vprintk(unsigned long ip, const char *fmt, va_list args); 885 extern int 886 trace_array_vprintk(struct trace_array *tr, 887 unsigned long ip, const char *fmt, va_list args); 888 int trace_array_printk_buf(struct trace_buffer *buffer, 889 unsigned long ip, const char *fmt, ...); 890 void trace_printk_seq(struct trace_seq *s); 891 enum print_line_t print_trace_line(struct trace_iterator *iter); 892 893 extern char trace_find_mark(unsigned long long duration); 894 895 struct ftrace_hash; 896 897 struct ftrace_mod_load { 898 struct list_head list; 899 char *func; 900 char *module; 901 int enable; 902 }; 903 904 enum { 905 FTRACE_HASH_FL_MOD = (1 << 0), 906 }; 907 908 struct ftrace_hash { 909 unsigned long size_bits; 910 struct hlist_head *buckets; 911 unsigned long count; 912 unsigned long flags; 913 struct rcu_head rcu; 914 }; 915 916 struct ftrace_func_entry * 917 ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip); 918 919 static __always_inline bool ftrace_hash_empty(struct ftrace_hash *hash) 920 { 921 return !hash || !(hash->count || (hash->flags & FTRACE_HASH_FL_MOD)); 922 } 923 924 /* Standard output formatting function used for function return traces */ 925 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 926 927 /* Flag options */ 928 #define TRACE_GRAPH_PRINT_OVERRUN 0x1 929 #define TRACE_GRAPH_PRINT_CPU 0x2 930 #define TRACE_GRAPH_PRINT_OVERHEAD 0x4 931 #define TRACE_GRAPH_PRINT_PROC 0x8 932 #define TRACE_GRAPH_PRINT_DURATION 0x10 933 #define TRACE_GRAPH_PRINT_ABS_TIME 0x20 934 #define TRACE_GRAPH_PRINT_REL_TIME 0x40 935 #define TRACE_GRAPH_PRINT_IRQS 0x80 936 #define TRACE_GRAPH_PRINT_TAIL 0x100 937 #define TRACE_GRAPH_SLEEP_TIME 0x200 938 #define TRACE_GRAPH_GRAPH_TIME 0x400 939 #define TRACE_GRAPH_PRINT_FILL_SHIFT 28 940 #define TRACE_GRAPH_PRINT_FILL_MASK (0x3 << TRACE_GRAPH_PRINT_FILL_SHIFT) 941 942 extern void ftrace_graph_sleep_time_control(bool enable); 943 944 #ifdef CONFIG_FUNCTION_PROFILER 945 extern void ftrace_graph_graph_time_control(bool enable); 946 #else 947 static inline void ftrace_graph_graph_time_control(bool enable) { } 948 #endif 949 950 extern enum print_line_t 951 print_graph_function_flags(struct trace_iterator *iter, u32 flags); 952 extern void print_graph_headers_flags(struct seq_file *s, u32 flags); 953 extern void 954 trace_print_graph_duration(unsigned long long duration, struct trace_seq *s); 955 extern void graph_trace_open(struct trace_iterator *iter); 956 extern void graph_trace_close(struct trace_iterator *iter); 957 extern int __trace_graph_entry(struct trace_array *tr, 958 struct ftrace_graph_ent *trace, 959 unsigned long flags, int pc); 960 extern void __trace_graph_return(struct trace_array *tr, 961 struct ftrace_graph_ret *trace, 962 unsigned long flags, int pc); 963 964 #ifdef CONFIG_DYNAMIC_FTRACE 965 extern struct ftrace_hash *ftrace_graph_hash; 966 extern struct ftrace_hash *ftrace_graph_notrace_hash; 967 968 static inline int ftrace_graph_addr(struct ftrace_graph_ent *trace) 969 { 970 unsigned long addr = trace->func; 971 int ret = 0; 972 973 preempt_disable_notrace(); 974 975 if (ftrace_hash_empty(ftrace_graph_hash)) { 976 ret = 1; 977 goto out; 978 } 979 980 if (ftrace_lookup_ip(ftrace_graph_hash, addr)) { 981 982 /* 983 * This needs to be cleared on the return functions 984 * when the depth is zero. 985 */ 986 trace_recursion_set(TRACE_GRAPH_BIT); 987 trace_recursion_set_depth(trace->depth); 988 989 /* 990 * If no irqs are to be traced, but a set_graph_function 991 * is set, and called by an interrupt handler, we still 992 * want to trace it. 993 */ 994 if (in_irq()) 995 trace_recursion_set(TRACE_IRQ_BIT); 996 else 997 trace_recursion_clear(TRACE_IRQ_BIT); 998 ret = 1; 999 } 1000 1001 out: 1002 preempt_enable_notrace(); 1003 return ret; 1004 } 1005 1006 static inline void ftrace_graph_addr_finish(struct ftrace_graph_ret *trace) 1007 { 1008 if (trace_recursion_test(TRACE_GRAPH_BIT) && 1009 trace->depth == trace_recursion_depth()) 1010 trace_recursion_clear(TRACE_GRAPH_BIT); 1011 } 1012 1013 static inline int ftrace_graph_notrace_addr(unsigned long addr) 1014 { 1015 int ret = 0; 1016 1017 preempt_disable_notrace(); 1018 1019 if (ftrace_lookup_ip(ftrace_graph_notrace_hash, addr)) 1020 ret = 1; 1021 1022 preempt_enable_notrace(); 1023 return ret; 1024 } 1025 #else 1026 static inline int ftrace_graph_addr(struct ftrace_graph_ent *trace) 1027 { 1028 return 1; 1029 } 1030 1031 static inline int ftrace_graph_notrace_addr(unsigned long addr) 1032 { 1033 return 0; 1034 } 1035 static inline void ftrace_graph_addr_finish(struct ftrace_graph_ret *trace) 1036 { } 1037 #endif /* CONFIG_DYNAMIC_FTRACE */ 1038 1039 extern unsigned int fgraph_max_depth; 1040 1041 static inline bool ftrace_graph_ignore_func(struct ftrace_graph_ent *trace) 1042 { 1043 /* trace it when it is-nested-in or is a function enabled. */ 1044 return !(trace_recursion_test(TRACE_GRAPH_BIT) || 1045 ftrace_graph_addr(trace)) || 1046 (trace->depth < 0) || 1047 (fgraph_max_depth && trace->depth >= fgraph_max_depth); 1048 } 1049 1050 #else /* CONFIG_FUNCTION_GRAPH_TRACER */ 1051 static inline enum print_line_t 1052 print_graph_function_flags(struct trace_iterator *iter, u32 flags) 1053 { 1054 return TRACE_TYPE_UNHANDLED; 1055 } 1056 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 1057 1058 extern struct list_head ftrace_pids; 1059 1060 #ifdef CONFIG_FUNCTION_TRACER 1061 struct ftrace_func_command { 1062 struct list_head list; 1063 char *name; 1064 int (*func)(struct trace_array *tr, 1065 struct ftrace_hash *hash, 1066 char *func, char *cmd, 1067 char *params, int enable); 1068 }; 1069 extern bool ftrace_filter_param __initdata; 1070 static inline int ftrace_trace_task(struct trace_array *tr) 1071 { 1072 return !this_cpu_read(tr->array_buffer.data->ftrace_ignore_pid); 1073 } 1074 extern int ftrace_is_dead(void); 1075 int ftrace_create_function_files(struct trace_array *tr, 1076 struct dentry *parent); 1077 void ftrace_destroy_function_files(struct trace_array *tr); 1078 void ftrace_init_global_array_ops(struct trace_array *tr); 1079 void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func); 1080 void ftrace_reset_array_ops(struct trace_array *tr); 1081 void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d_tracer); 1082 void ftrace_init_tracefs_toplevel(struct trace_array *tr, 1083 struct dentry *d_tracer); 1084 void ftrace_clear_pids(struct trace_array *tr); 1085 int init_function_trace(void); 1086 void ftrace_pid_follow_fork(struct trace_array *tr, bool enable); 1087 #else 1088 static inline int ftrace_trace_task(struct trace_array *tr) 1089 { 1090 return 1; 1091 } 1092 static inline int ftrace_is_dead(void) { return 0; } 1093 static inline int 1094 ftrace_create_function_files(struct trace_array *tr, 1095 struct dentry *parent) 1096 { 1097 return 0; 1098 } 1099 static inline void ftrace_destroy_function_files(struct trace_array *tr) { } 1100 static inline __init void 1101 ftrace_init_global_array_ops(struct trace_array *tr) { } 1102 static inline void ftrace_reset_array_ops(struct trace_array *tr) { } 1103 static inline void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d) { } 1104 static inline void ftrace_init_tracefs_toplevel(struct trace_array *tr, struct dentry *d) { } 1105 static inline void ftrace_clear_pids(struct trace_array *tr) { } 1106 static inline int init_function_trace(void) { return 0; } 1107 static inline void ftrace_pid_follow_fork(struct trace_array *tr, bool enable) { } 1108 /* ftace_func_t type is not defined, use macro instead of static inline */ 1109 #define ftrace_init_array_ops(tr, func) do { } while (0) 1110 #endif /* CONFIG_FUNCTION_TRACER */ 1111 1112 #if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE) 1113 1114 struct ftrace_probe_ops { 1115 void (*func)(unsigned long ip, 1116 unsigned long parent_ip, 1117 struct trace_array *tr, 1118 struct ftrace_probe_ops *ops, 1119 void *data); 1120 int (*init)(struct ftrace_probe_ops *ops, 1121 struct trace_array *tr, 1122 unsigned long ip, void *init_data, 1123 void **data); 1124 void (*free)(struct ftrace_probe_ops *ops, 1125 struct trace_array *tr, 1126 unsigned long ip, void *data); 1127 int (*print)(struct seq_file *m, 1128 unsigned long ip, 1129 struct ftrace_probe_ops *ops, 1130 void *data); 1131 }; 1132 1133 struct ftrace_func_mapper; 1134 typedef int (*ftrace_mapper_func)(void *data); 1135 1136 struct ftrace_func_mapper *allocate_ftrace_func_mapper(void); 1137 void **ftrace_func_mapper_find_ip(struct ftrace_func_mapper *mapper, 1138 unsigned long ip); 1139 int ftrace_func_mapper_add_ip(struct ftrace_func_mapper *mapper, 1140 unsigned long ip, void *data); 1141 void *ftrace_func_mapper_remove_ip(struct ftrace_func_mapper *mapper, 1142 unsigned long ip); 1143 void free_ftrace_func_mapper(struct ftrace_func_mapper *mapper, 1144 ftrace_mapper_func free_func); 1145 1146 extern int 1147 register_ftrace_function_probe(char *glob, struct trace_array *tr, 1148 struct ftrace_probe_ops *ops, void *data); 1149 extern int 1150 unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr, 1151 struct ftrace_probe_ops *ops); 1152 extern void clear_ftrace_function_probes(struct trace_array *tr); 1153 1154 int register_ftrace_command(struct ftrace_func_command *cmd); 1155 int unregister_ftrace_command(struct ftrace_func_command *cmd); 1156 1157 void ftrace_create_filter_files(struct ftrace_ops *ops, 1158 struct dentry *parent); 1159 void ftrace_destroy_filter_files(struct ftrace_ops *ops); 1160 #else 1161 struct ftrace_func_command; 1162 1163 static inline __init int register_ftrace_command(struct ftrace_func_command *cmd) 1164 { 1165 return -EINVAL; 1166 } 1167 static inline __init int unregister_ftrace_command(char *cmd_name) 1168 { 1169 return -EINVAL; 1170 } 1171 static inline void clear_ftrace_function_probes(struct trace_array *tr) 1172 { 1173 } 1174 1175 /* 1176 * The ops parameter passed in is usually undefined. 1177 * This must be a macro. 1178 */ 1179 #define ftrace_create_filter_files(ops, parent) do { } while (0) 1180 #define ftrace_destroy_filter_files(ops) do { } while (0) 1181 #endif /* CONFIG_FUNCTION_TRACER && CONFIG_DYNAMIC_FTRACE */ 1182 1183 bool ftrace_event_is_function(struct trace_event_call *call); 1184 1185 /* 1186 * struct trace_parser - servers for reading the user input separated by spaces 1187 * @cont: set if the input is not complete - no final space char was found 1188 * @buffer: holds the parsed user input 1189 * @idx: user input length 1190 * @size: buffer size 1191 */ 1192 struct trace_parser { 1193 bool cont; 1194 char *buffer; 1195 unsigned idx; 1196 unsigned size; 1197 }; 1198 1199 static inline bool trace_parser_loaded(struct trace_parser *parser) 1200 { 1201 return (parser->idx != 0); 1202 } 1203 1204 static inline bool trace_parser_cont(struct trace_parser *parser) 1205 { 1206 return parser->cont; 1207 } 1208 1209 static inline void trace_parser_clear(struct trace_parser *parser) 1210 { 1211 parser->cont = false; 1212 parser->idx = 0; 1213 } 1214 1215 extern int trace_parser_get_init(struct trace_parser *parser, int size); 1216 extern void trace_parser_put(struct trace_parser *parser); 1217 extern int trace_get_user(struct trace_parser *parser, const char __user *ubuf, 1218 size_t cnt, loff_t *ppos); 1219 1220 /* 1221 * Only create function graph options if function graph is configured. 1222 */ 1223 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 1224 # define FGRAPH_FLAGS \ 1225 C(DISPLAY_GRAPH, "display-graph"), 1226 #else 1227 # define FGRAPH_FLAGS 1228 #endif 1229 1230 #ifdef CONFIG_BRANCH_TRACER 1231 # define BRANCH_FLAGS \ 1232 C(BRANCH, "branch"), 1233 #else 1234 # define BRANCH_FLAGS 1235 #endif 1236 1237 #ifdef CONFIG_FUNCTION_TRACER 1238 # define FUNCTION_FLAGS \ 1239 C(FUNCTION, "function-trace"), \ 1240 C(FUNC_FORK, "function-fork"), 1241 # define FUNCTION_DEFAULT_FLAGS TRACE_ITER_FUNCTION 1242 #else 1243 # define FUNCTION_FLAGS 1244 # define FUNCTION_DEFAULT_FLAGS 0UL 1245 # define TRACE_ITER_FUNC_FORK 0UL 1246 #endif 1247 1248 #ifdef CONFIG_STACKTRACE 1249 # define STACK_FLAGS \ 1250 C(STACKTRACE, "stacktrace"), 1251 #else 1252 # define STACK_FLAGS 1253 #endif 1254 1255 /* 1256 * trace_iterator_flags is an enumeration that defines bit 1257 * positions into trace_flags that controls the output. 1258 * 1259 * NOTE: These bits must match the trace_options array in 1260 * trace.c (this macro guarantees it). 1261 */ 1262 #define TRACE_FLAGS \ 1263 C(PRINT_PARENT, "print-parent"), \ 1264 C(SYM_OFFSET, "sym-offset"), \ 1265 C(SYM_ADDR, "sym-addr"), \ 1266 C(VERBOSE, "verbose"), \ 1267 C(RAW, "raw"), \ 1268 C(HEX, "hex"), \ 1269 C(BIN, "bin"), \ 1270 C(BLOCK, "block"), \ 1271 C(PRINTK, "trace_printk"), \ 1272 C(ANNOTATE, "annotate"), \ 1273 C(USERSTACKTRACE, "userstacktrace"), \ 1274 C(SYM_USEROBJ, "sym-userobj"), \ 1275 C(PRINTK_MSGONLY, "printk-msg-only"), \ 1276 C(CONTEXT_INFO, "context-info"), /* Print pid/cpu/time */ \ 1277 C(LATENCY_FMT, "latency-format"), \ 1278 C(RECORD_CMD, "record-cmd"), \ 1279 C(RECORD_TGID, "record-tgid"), \ 1280 C(OVERWRITE, "overwrite"), \ 1281 C(STOP_ON_FREE, "disable_on_free"), \ 1282 C(IRQ_INFO, "irq-info"), \ 1283 C(MARKERS, "markers"), \ 1284 C(EVENT_FORK, "event-fork"), \ 1285 FUNCTION_FLAGS \ 1286 FGRAPH_FLAGS \ 1287 STACK_FLAGS \ 1288 BRANCH_FLAGS 1289 1290 /* 1291 * By defining C, we can make TRACE_FLAGS a list of bit names 1292 * that will define the bits for the flag masks. 1293 */ 1294 #undef C 1295 #define C(a, b) TRACE_ITER_##a##_BIT 1296 1297 enum trace_iterator_bits { 1298 TRACE_FLAGS 1299 /* Make sure we don't go more than we have bits for */ 1300 TRACE_ITER_LAST_BIT 1301 }; 1302 1303 /* 1304 * By redefining C, we can make TRACE_FLAGS a list of masks that 1305 * use the bits as defined above. 1306 */ 1307 #undef C 1308 #define C(a, b) TRACE_ITER_##a = (1 << TRACE_ITER_##a##_BIT) 1309 1310 enum trace_iterator_flags { TRACE_FLAGS }; 1311 1312 /* 1313 * TRACE_ITER_SYM_MASK masks the options in trace_flags that 1314 * control the output of kernel symbols. 1315 */ 1316 #define TRACE_ITER_SYM_MASK \ 1317 (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR) 1318 1319 extern struct tracer nop_trace; 1320 1321 #ifdef CONFIG_BRANCH_TRACER 1322 extern int enable_branch_tracing(struct trace_array *tr); 1323 extern void disable_branch_tracing(void); 1324 static inline int trace_branch_enable(struct trace_array *tr) 1325 { 1326 if (tr->trace_flags & TRACE_ITER_BRANCH) 1327 return enable_branch_tracing(tr); 1328 return 0; 1329 } 1330 static inline void trace_branch_disable(void) 1331 { 1332 /* due to races, always disable */ 1333 disable_branch_tracing(); 1334 } 1335 #else 1336 static inline int trace_branch_enable(struct trace_array *tr) 1337 { 1338 return 0; 1339 } 1340 static inline void trace_branch_disable(void) 1341 { 1342 } 1343 #endif /* CONFIG_BRANCH_TRACER */ 1344 1345 /* set ring buffers to default size if not already done so */ 1346 int tracing_update_buffers(void); 1347 1348 struct ftrace_event_field { 1349 struct list_head link; 1350 const char *name; 1351 const char *type; 1352 int filter_type; 1353 int offset; 1354 int size; 1355 int is_signed; 1356 }; 1357 1358 struct prog_entry; 1359 1360 struct event_filter { 1361 struct prog_entry __rcu *prog; 1362 char *filter_string; 1363 }; 1364 1365 struct event_subsystem { 1366 struct list_head list; 1367 const char *name; 1368 struct event_filter *filter; 1369 int ref_count; 1370 }; 1371 1372 struct trace_subsystem_dir { 1373 struct list_head list; 1374 struct event_subsystem *subsystem; 1375 struct trace_array *tr; 1376 struct dentry *entry; 1377 int ref_count; 1378 int nr_events; 1379 }; 1380 1381 extern int call_filter_check_discard(struct trace_event_call *call, void *rec, 1382 struct trace_buffer *buffer, 1383 struct ring_buffer_event *event); 1384 1385 void trace_buffer_unlock_commit_regs(struct trace_array *tr, 1386 struct trace_buffer *buffer, 1387 struct ring_buffer_event *event, 1388 unsigned long flags, int pc, 1389 struct pt_regs *regs); 1390 1391 static inline void trace_buffer_unlock_commit(struct trace_array *tr, 1392 struct trace_buffer *buffer, 1393 struct ring_buffer_event *event, 1394 unsigned long flags, int pc) 1395 { 1396 trace_buffer_unlock_commit_regs(tr, buffer, event, flags, pc, NULL); 1397 } 1398 1399 DECLARE_PER_CPU(struct ring_buffer_event *, trace_buffered_event); 1400 DECLARE_PER_CPU(int, trace_buffered_event_cnt); 1401 void trace_buffered_event_disable(void); 1402 void trace_buffered_event_enable(void); 1403 1404 static inline void 1405 __trace_event_discard_commit(struct trace_buffer *buffer, 1406 struct ring_buffer_event *event) 1407 { 1408 if (this_cpu_read(trace_buffered_event) == event) { 1409 /* Simply release the temp buffer */ 1410 this_cpu_dec(trace_buffered_event_cnt); 1411 return; 1412 } 1413 ring_buffer_discard_commit(buffer, event); 1414 } 1415 1416 /* 1417 * Helper function for event_trigger_unlock_commit{_regs}(). 1418 * If there are event triggers attached to this event that requires 1419 * filtering against its fields, then they wil be called as the 1420 * entry already holds the field information of the current event. 1421 * 1422 * It also checks if the event should be discarded or not. 1423 * It is to be discarded if the event is soft disabled and the 1424 * event was only recorded to process triggers, or if the event 1425 * filter is active and this event did not match the filters. 1426 * 1427 * Returns true if the event is discarded, false otherwise. 1428 */ 1429 static inline bool 1430 __event_trigger_test_discard(struct trace_event_file *file, 1431 struct trace_buffer *buffer, 1432 struct ring_buffer_event *event, 1433 void *entry, 1434 enum event_trigger_type *tt) 1435 { 1436 unsigned long eflags = file->flags; 1437 1438 if (eflags & EVENT_FILE_FL_TRIGGER_COND) 1439 *tt = event_triggers_call(file, entry, event); 1440 1441 if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) || 1442 (unlikely(file->flags & EVENT_FILE_FL_FILTERED) && 1443 !filter_match_preds(file->filter, entry))) { 1444 __trace_event_discard_commit(buffer, event); 1445 return true; 1446 } 1447 1448 return false; 1449 } 1450 1451 /** 1452 * event_trigger_unlock_commit - handle triggers and finish event commit 1453 * @file: The file pointer assoctiated to the event 1454 * @buffer: The ring buffer that the event is being written to 1455 * @event: The event meta data in the ring buffer 1456 * @entry: The event itself 1457 * @irq_flags: The state of the interrupts at the start of the event 1458 * @pc: The state of the preempt count at the start of the event. 1459 * 1460 * This is a helper function to handle triggers that require data 1461 * from the event itself. It also tests the event against filters and 1462 * if the event is soft disabled and should be discarded. 1463 */ 1464 static inline void 1465 event_trigger_unlock_commit(struct trace_event_file *file, 1466 struct trace_buffer *buffer, 1467 struct ring_buffer_event *event, 1468 void *entry, unsigned long irq_flags, int pc) 1469 { 1470 enum event_trigger_type tt = ETT_NONE; 1471 1472 if (!__event_trigger_test_discard(file, buffer, event, entry, &tt)) 1473 trace_buffer_unlock_commit(file->tr, buffer, event, irq_flags, pc); 1474 1475 if (tt) 1476 event_triggers_post_call(file, tt); 1477 } 1478 1479 /** 1480 * event_trigger_unlock_commit_regs - handle triggers and finish event commit 1481 * @file: The file pointer assoctiated to the event 1482 * @buffer: The ring buffer that the event is being written to 1483 * @event: The event meta data in the ring buffer 1484 * @entry: The event itself 1485 * @irq_flags: The state of the interrupts at the start of the event 1486 * @pc: The state of the preempt count at the start of the event. 1487 * 1488 * This is a helper function to handle triggers that require data 1489 * from the event itself. It also tests the event against filters and 1490 * if the event is soft disabled and should be discarded. 1491 * 1492 * Same as event_trigger_unlock_commit() but calls 1493 * trace_buffer_unlock_commit_regs() instead of trace_buffer_unlock_commit(). 1494 */ 1495 static inline void 1496 event_trigger_unlock_commit_regs(struct trace_event_file *file, 1497 struct trace_buffer *buffer, 1498 struct ring_buffer_event *event, 1499 void *entry, unsigned long irq_flags, int pc, 1500 struct pt_regs *regs) 1501 { 1502 enum event_trigger_type tt = ETT_NONE; 1503 1504 if (!__event_trigger_test_discard(file, buffer, event, entry, &tt)) 1505 trace_buffer_unlock_commit_regs(file->tr, buffer, event, 1506 irq_flags, pc, regs); 1507 1508 if (tt) 1509 event_triggers_post_call(file, tt); 1510 } 1511 1512 #define FILTER_PRED_INVALID ((unsigned short)-1) 1513 #define FILTER_PRED_IS_RIGHT (1 << 15) 1514 #define FILTER_PRED_FOLD (1 << 15) 1515 1516 /* 1517 * The max preds is the size of unsigned short with 1518 * two flags at the MSBs. One bit is used for both the IS_RIGHT 1519 * and FOLD flags. The other is reserved. 1520 * 1521 * 2^14 preds is way more than enough. 1522 */ 1523 #define MAX_FILTER_PRED 16384 1524 1525 struct filter_pred; 1526 struct regex; 1527 1528 typedef int (*filter_pred_fn_t) (struct filter_pred *pred, void *event); 1529 1530 typedef int (*regex_match_func)(char *str, struct regex *r, int len); 1531 1532 enum regex_type { 1533 MATCH_FULL = 0, 1534 MATCH_FRONT_ONLY, 1535 MATCH_MIDDLE_ONLY, 1536 MATCH_END_ONLY, 1537 MATCH_GLOB, 1538 MATCH_INDEX, 1539 }; 1540 1541 struct regex { 1542 char pattern[MAX_FILTER_STR_VAL]; 1543 int len; 1544 int field_len; 1545 regex_match_func match; 1546 }; 1547 1548 struct filter_pred { 1549 filter_pred_fn_t fn; 1550 u64 val; 1551 struct regex regex; 1552 unsigned short *ops; 1553 struct ftrace_event_field *field; 1554 int offset; 1555 int not; 1556 int op; 1557 }; 1558 1559 static inline bool is_string_field(struct ftrace_event_field *field) 1560 { 1561 return field->filter_type == FILTER_DYN_STRING || 1562 field->filter_type == FILTER_STATIC_STRING || 1563 field->filter_type == FILTER_PTR_STRING || 1564 field->filter_type == FILTER_COMM; 1565 } 1566 1567 static inline bool is_function_field(struct ftrace_event_field *field) 1568 { 1569 return field->filter_type == FILTER_TRACE_FN; 1570 } 1571 1572 extern enum regex_type 1573 filter_parse_regex(char *buff, int len, char **search, int *not); 1574 extern void print_event_filter(struct trace_event_file *file, 1575 struct trace_seq *s); 1576 extern int apply_event_filter(struct trace_event_file *file, 1577 char *filter_string); 1578 extern int apply_subsystem_event_filter(struct trace_subsystem_dir *dir, 1579 char *filter_string); 1580 extern void print_subsystem_event_filter(struct event_subsystem *system, 1581 struct trace_seq *s); 1582 extern int filter_assign_type(const char *type); 1583 extern int create_event_filter(struct trace_array *tr, 1584 struct trace_event_call *call, 1585 char *filter_str, bool set_str, 1586 struct event_filter **filterp); 1587 extern void free_event_filter(struct event_filter *filter); 1588 1589 struct ftrace_event_field * 1590 trace_find_event_field(struct trace_event_call *call, char *name); 1591 1592 extern void trace_event_enable_cmd_record(bool enable); 1593 extern void trace_event_enable_tgid_record(bool enable); 1594 1595 extern int event_trace_init(void); 1596 extern int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr); 1597 extern int event_trace_del_tracer(struct trace_array *tr); 1598 1599 extern struct trace_event_file *__find_event_file(struct trace_array *tr, 1600 const char *system, 1601 const char *event); 1602 extern struct trace_event_file *find_event_file(struct trace_array *tr, 1603 const char *system, 1604 const char *event); 1605 1606 static inline void *event_file_data(struct file *filp) 1607 { 1608 return READ_ONCE(file_inode(filp)->i_private); 1609 } 1610 1611 extern struct mutex event_mutex; 1612 extern struct list_head ftrace_events; 1613 1614 extern const struct file_operations event_trigger_fops; 1615 extern const struct file_operations event_hist_fops; 1616 extern const struct file_operations event_inject_fops; 1617 1618 #ifdef CONFIG_HIST_TRIGGERS 1619 extern int register_trigger_hist_cmd(void); 1620 extern int register_trigger_hist_enable_disable_cmds(void); 1621 #else 1622 static inline int register_trigger_hist_cmd(void) { return 0; } 1623 static inline int register_trigger_hist_enable_disable_cmds(void) { return 0; } 1624 #endif 1625 1626 extern int register_trigger_cmds(void); 1627 extern void clear_event_triggers(struct trace_array *tr); 1628 1629 struct event_trigger_data { 1630 unsigned long count; 1631 int ref; 1632 struct event_trigger_ops *ops; 1633 struct event_command *cmd_ops; 1634 struct event_filter __rcu *filter; 1635 char *filter_str; 1636 void *private_data; 1637 bool paused; 1638 bool paused_tmp; 1639 struct list_head list; 1640 char *name; 1641 struct list_head named_list; 1642 struct event_trigger_data *named_data; 1643 }; 1644 1645 /* Avoid typos */ 1646 #define ENABLE_EVENT_STR "enable_event" 1647 #define DISABLE_EVENT_STR "disable_event" 1648 #define ENABLE_HIST_STR "enable_hist" 1649 #define DISABLE_HIST_STR "disable_hist" 1650 1651 struct enable_trigger_data { 1652 struct trace_event_file *file; 1653 bool enable; 1654 bool hist; 1655 }; 1656 1657 extern int event_enable_trigger_print(struct seq_file *m, 1658 struct event_trigger_ops *ops, 1659 struct event_trigger_data *data); 1660 extern void event_enable_trigger_free(struct event_trigger_ops *ops, 1661 struct event_trigger_data *data); 1662 extern int event_enable_trigger_func(struct event_command *cmd_ops, 1663 struct trace_event_file *file, 1664 char *glob, char *cmd, char *param); 1665 extern int event_enable_register_trigger(char *glob, 1666 struct event_trigger_ops *ops, 1667 struct event_trigger_data *data, 1668 struct trace_event_file *file); 1669 extern void event_enable_unregister_trigger(char *glob, 1670 struct event_trigger_ops *ops, 1671 struct event_trigger_data *test, 1672 struct trace_event_file *file); 1673 extern void trigger_data_free(struct event_trigger_data *data); 1674 extern int event_trigger_init(struct event_trigger_ops *ops, 1675 struct event_trigger_data *data); 1676 extern int trace_event_trigger_enable_disable(struct trace_event_file *file, 1677 int trigger_enable); 1678 extern void update_cond_flag(struct trace_event_file *file); 1679 extern int set_trigger_filter(char *filter_str, 1680 struct event_trigger_data *trigger_data, 1681 struct trace_event_file *file); 1682 extern struct event_trigger_data *find_named_trigger(const char *name); 1683 extern bool is_named_trigger(struct event_trigger_data *test); 1684 extern int save_named_trigger(const char *name, 1685 struct event_trigger_data *data); 1686 extern void del_named_trigger(struct event_trigger_data *data); 1687 extern void pause_named_trigger(struct event_trigger_data *data); 1688 extern void unpause_named_trigger(struct event_trigger_data *data); 1689 extern void set_named_trigger_data(struct event_trigger_data *data, 1690 struct event_trigger_data *named_data); 1691 extern struct event_trigger_data * 1692 get_named_trigger_data(struct event_trigger_data *data); 1693 extern int register_event_command(struct event_command *cmd); 1694 extern int unregister_event_command(struct event_command *cmd); 1695 extern int register_trigger_hist_enable_disable_cmds(void); 1696 1697 /** 1698 * struct event_trigger_ops - callbacks for trace event triggers 1699 * 1700 * The methods in this structure provide per-event trigger hooks for 1701 * various trigger operations. 1702 * 1703 * All the methods below, except for @init() and @free(), must be 1704 * implemented. 1705 * 1706 * @func: The trigger 'probe' function called when the triggering 1707 * event occurs. The data passed into this callback is the data 1708 * that was supplied to the event_command @reg() function that 1709 * registered the trigger (see struct event_command) along with 1710 * the trace record, rec. 1711 * 1712 * @init: An optional initialization function called for the trigger 1713 * when the trigger is registered (via the event_command reg() 1714 * function). This can be used to perform per-trigger 1715 * initialization such as incrementing a per-trigger reference 1716 * count, for instance. This is usually implemented by the 1717 * generic utility function @event_trigger_init() (see 1718 * trace_event_triggers.c). 1719 * 1720 * @free: An optional de-initialization function called for the 1721 * trigger when the trigger is unregistered (via the 1722 * event_command @reg() function). This can be used to perform 1723 * per-trigger de-initialization such as decrementing a 1724 * per-trigger reference count and freeing corresponding trigger 1725 * data, for instance. This is usually implemented by the 1726 * generic utility function @event_trigger_free() (see 1727 * trace_event_triggers.c). 1728 * 1729 * @print: The callback function invoked to have the trigger print 1730 * itself. This is usually implemented by a wrapper function 1731 * that calls the generic utility function @event_trigger_print() 1732 * (see trace_event_triggers.c). 1733 */ 1734 struct event_trigger_ops { 1735 void (*func)(struct event_trigger_data *data, 1736 void *rec, 1737 struct ring_buffer_event *rbe); 1738 int (*init)(struct event_trigger_ops *ops, 1739 struct event_trigger_data *data); 1740 void (*free)(struct event_trigger_ops *ops, 1741 struct event_trigger_data *data); 1742 int (*print)(struct seq_file *m, 1743 struct event_trigger_ops *ops, 1744 struct event_trigger_data *data); 1745 }; 1746 1747 /** 1748 * struct event_command - callbacks and data members for event commands 1749 * 1750 * Event commands are invoked by users by writing the command name 1751 * into the 'trigger' file associated with a trace event. The 1752 * parameters associated with a specific invocation of an event 1753 * command are used to create an event trigger instance, which is 1754 * added to the list of trigger instances associated with that trace 1755 * event. When the event is hit, the set of triggers associated with 1756 * that event is invoked. 1757 * 1758 * The data members in this structure provide per-event command data 1759 * for various event commands. 1760 * 1761 * All the data members below, except for @post_trigger, must be set 1762 * for each event command. 1763 * 1764 * @name: The unique name that identifies the event command. This is 1765 * the name used when setting triggers via trigger files. 1766 * 1767 * @trigger_type: A unique id that identifies the event command 1768 * 'type'. This value has two purposes, the first to ensure that 1769 * only one trigger of the same type can be set at a given time 1770 * for a particular event e.g. it doesn't make sense to have both 1771 * a traceon and traceoff trigger attached to a single event at 1772 * the same time, so traceon and traceoff have the same type 1773 * though they have different names. The @trigger_type value is 1774 * also used as a bit value for deferring the actual trigger 1775 * action until after the current event is finished. Some 1776 * commands need to do this if they themselves log to the trace 1777 * buffer (see the @post_trigger() member below). @trigger_type 1778 * values are defined by adding new values to the trigger_type 1779 * enum in include/linux/trace_events.h. 1780 * 1781 * @flags: See the enum event_command_flags below. 1782 * 1783 * All the methods below, except for @set_filter() and @unreg_all(), 1784 * must be implemented. 1785 * 1786 * @func: The callback function responsible for parsing and 1787 * registering the trigger written to the 'trigger' file by the 1788 * user. It allocates the trigger instance and registers it with 1789 * the appropriate trace event. It makes use of the other 1790 * event_command callback functions to orchestrate this, and is 1791 * usually implemented by the generic utility function 1792 * @event_trigger_callback() (see trace_event_triggers.c). 1793 * 1794 * @reg: Adds the trigger to the list of triggers associated with the 1795 * event, and enables the event trigger itself, after 1796 * initializing it (via the event_trigger_ops @init() function). 1797 * This is also where commands can use the @trigger_type value to 1798 * make the decision as to whether or not multiple instances of 1799 * the trigger should be allowed. This is usually implemented by 1800 * the generic utility function @register_trigger() (see 1801 * trace_event_triggers.c). 1802 * 1803 * @unreg: Removes the trigger from the list of triggers associated 1804 * with the event, and disables the event trigger itself, after 1805 * initializing it (via the event_trigger_ops @free() function). 1806 * This is usually implemented by the generic utility function 1807 * @unregister_trigger() (see trace_event_triggers.c). 1808 * 1809 * @unreg_all: An optional function called to remove all the triggers 1810 * from the list of triggers associated with the event. Called 1811 * when a trigger file is opened in truncate mode. 1812 * 1813 * @set_filter: An optional function called to parse and set a filter 1814 * for the trigger. If no @set_filter() method is set for the 1815 * event command, filters set by the user for the command will be 1816 * ignored. This is usually implemented by the generic utility 1817 * function @set_trigger_filter() (see trace_event_triggers.c). 1818 * 1819 * @get_trigger_ops: The callback function invoked to retrieve the 1820 * event_trigger_ops implementation associated with the command. 1821 */ 1822 struct event_command { 1823 struct list_head list; 1824 char *name; 1825 enum event_trigger_type trigger_type; 1826 int flags; 1827 int (*func)(struct event_command *cmd_ops, 1828 struct trace_event_file *file, 1829 char *glob, char *cmd, char *params); 1830 int (*reg)(char *glob, 1831 struct event_trigger_ops *ops, 1832 struct event_trigger_data *data, 1833 struct trace_event_file *file); 1834 void (*unreg)(char *glob, 1835 struct event_trigger_ops *ops, 1836 struct event_trigger_data *data, 1837 struct trace_event_file *file); 1838 void (*unreg_all)(struct trace_event_file *file); 1839 int (*set_filter)(char *filter_str, 1840 struct event_trigger_data *data, 1841 struct trace_event_file *file); 1842 struct event_trigger_ops *(*get_trigger_ops)(char *cmd, char *param); 1843 }; 1844 1845 /** 1846 * enum event_command_flags - flags for struct event_command 1847 * 1848 * @POST_TRIGGER: A flag that says whether or not this command needs 1849 * to have its action delayed until after the current event has 1850 * been closed. Some triggers need to avoid being invoked while 1851 * an event is currently in the process of being logged, since 1852 * the trigger may itself log data into the trace buffer. Thus 1853 * we make sure the current event is committed before invoking 1854 * those triggers. To do that, the trigger invocation is split 1855 * in two - the first part checks the filter using the current 1856 * trace record; if a command has the @post_trigger flag set, it 1857 * sets a bit for itself in the return value, otherwise it 1858 * directly invokes the trigger. Once all commands have been 1859 * either invoked or set their return flag, the current record is 1860 * either committed or discarded. At that point, if any commands 1861 * have deferred their triggers, those commands are finally 1862 * invoked following the close of the current event. In other 1863 * words, if the event_trigger_ops @func() probe implementation 1864 * itself logs to the trace buffer, this flag should be set, 1865 * otherwise it can be left unspecified. 1866 * 1867 * @NEEDS_REC: A flag that says whether or not this command needs 1868 * access to the trace record in order to perform its function, 1869 * regardless of whether or not it has a filter associated with 1870 * it (filters make a trigger require access to the trace record 1871 * but are not always present). 1872 */ 1873 enum event_command_flags { 1874 EVENT_CMD_FL_POST_TRIGGER = 1, 1875 EVENT_CMD_FL_NEEDS_REC = 2, 1876 }; 1877 1878 static inline bool event_command_post_trigger(struct event_command *cmd_ops) 1879 { 1880 return cmd_ops->flags & EVENT_CMD_FL_POST_TRIGGER; 1881 } 1882 1883 static inline bool event_command_needs_rec(struct event_command *cmd_ops) 1884 { 1885 return cmd_ops->flags & EVENT_CMD_FL_NEEDS_REC; 1886 } 1887 1888 extern int trace_event_enable_disable(struct trace_event_file *file, 1889 int enable, int soft_disable); 1890 extern int tracing_alloc_snapshot(void); 1891 extern void tracing_snapshot_cond(struct trace_array *tr, void *cond_data); 1892 extern int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update); 1893 1894 extern int tracing_snapshot_cond_disable(struct trace_array *tr); 1895 extern void *tracing_cond_snapshot_data(struct trace_array *tr); 1896 1897 extern const char *__start___trace_bprintk_fmt[]; 1898 extern const char *__stop___trace_bprintk_fmt[]; 1899 1900 extern const char *__start___tracepoint_str[]; 1901 extern const char *__stop___tracepoint_str[]; 1902 1903 void trace_printk_control(bool enabled); 1904 void trace_printk_start_comm(void); 1905 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set); 1906 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled); 1907 1908 #define MAX_EVENT_NAME_LEN 64 1909 1910 extern int trace_run_command(const char *buf, int (*createfn)(int, char**)); 1911 extern ssize_t trace_parse_run_command(struct file *file, 1912 const char __user *buffer, size_t count, loff_t *ppos, 1913 int (*createfn)(int, char**)); 1914 1915 extern unsigned int err_pos(char *cmd, const char *str); 1916 extern void tracing_log_err(struct trace_array *tr, 1917 const char *loc, const char *cmd, 1918 const char **errs, u8 type, u8 pos); 1919 1920 /* 1921 * Normal trace_printk() and friends allocates special buffers 1922 * to do the manipulation, as well as saves the print formats 1923 * into sections to display. But the trace infrastructure wants 1924 * to use these without the added overhead at the price of being 1925 * a bit slower (used mainly for warnings, where we don't care 1926 * about performance). The internal_trace_puts() is for such 1927 * a purpose. 1928 */ 1929 #define internal_trace_puts(str) __trace_puts(_THIS_IP_, str, strlen(str)) 1930 1931 #undef FTRACE_ENTRY 1932 #define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter) \ 1933 extern struct trace_event_call \ 1934 __aligned(4) event_##call; 1935 #undef FTRACE_ENTRY_DUP 1936 #define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print, filter) \ 1937 FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print), \ 1938 filter) 1939 #undef FTRACE_ENTRY_PACKED 1940 #define FTRACE_ENTRY_PACKED(call, struct_name, id, tstruct, print, filter) \ 1941 FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print), \ 1942 filter) 1943 1944 #include "trace_entries.h" 1945 1946 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_FUNCTION_TRACER) 1947 int perf_ftrace_event_register(struct trace_event_call *call, 1948 enum trace_reg type, void *data); 1949 #else 1950 #define perf_ftrace_event_register NULL 1951 #endif 1952 1953 #ifdef CONFIG_FTRACE_SYSCALLS 1954 void init_ftrace_syscalls(void); 1955 const char *get_syscall_name(int syscall); 1956 #else 1957 static inline void init_ftrace_syscalls(void) { } 1958 static inline const char *get_syscall_name(int syscall) 1959 { 1960 return NULL; 1961 } 1962 #endif 1963 1964 #ifdef CONFIG_EVENT_TRACING 1965 void trace_event_init(void); 1966 void trace_event_eval_update(struct trace_eval_map **map, int len); 1967 #else 1968 static inline void __init trace_event_init(void) { } 1969 static inline void trace_event_eval_update(struct trace_eval_map **map, int len) { } 1970 #endif 1971 1972 #ifdef CONFIG_TRACER_SNAPSHOT 1973 void tracing_snapshot_instance(struct trace_array *tr); 1974 int tracing_alloc_snapshot_instance(struct trace_array *tr); 1975 #else 1976 static inline void tracing_snapshot_instance(struct trace_array *tr) { } 1977 static inline int tracing_alloc_snapshot_instance(struct trace_array *tr) 1978 { 1979 return 0; 1980 } 1981 #endif 1982 1983 #ifdef CONFIG_PREEMPT_TRACER 1984 void tracer_preempt_on(unsigned long a0, unsigned long a1); 1985 void tracer_preempt_off(unsigned long a0, unsigned long a1); 1986 #else 1987 static inline void tracer_preempt_on(unsigned long a0, unsigned long a1) { } 1988 static inline void tracer_preempt_off(unsigned long a0, unsigned long a1) { } 1989 #endif 1990 #ifdef CONFIG_IRQSOFF_TRACER 1991 void tracer_hardirqs_on(unsigned long a0, unsigned long a1); 1992 void tracer_hardirqs_off(unsigned long a0, unsigned long a1); 1993 #else 1994 static inline void tracer_hardirqs_on(unsigned long a0, unsigned long a1) { } 1995 static inline void tracer_hardirqs_off(unsigned long a0, unsigned long a1) { } 1996 #endif 1997 1998 extern struct trace_iterator *tracepoint_print_iter; 1999 2000 /* 2001 * Reset the state of the trace_iterator so that it can read consumed data. 2002 * Normally, the trace_iterator is used for reading the data when it is not 2003 * consumed, and must retain state. 2004 */ 2005 static __always_inline void trace_iterator_reset(struct trace_iterator *iter) 2006 { 2007 const size_t offset = offsetof(struct trace_iterator, seq); 2008 2009 /* 2010 * Keep gcc from complaining about overwriting more than just one 2011 * member in the structure. 2012 */ 2013 memset((char *)iter + offset, 0, sizeof(struct trace_iterator) - offset); 2014 2015 iter->pos = -1; 2016 } 2017 2018 #endif /* _LINUX_KERNEL_TRACE_H */ 2019