1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * event tracer
4 *
5 * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
6 *
7 * - Added format output of fields of the trace point.
8 * This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
9 *
10 */
11
12 #define pr_fmt(fmt) fmt
13
14 #include <linux/workqueue.h>
15 #include <linux/security.h>
16 #include <linux/spinlock.h>
17 #include <linux/kthread.h>
18 #include <linux/tracefs.h>
19 #include <linux/uaccess.h>
20 #include <linux/module.h>
21 #include <linux/ctype.h>
22 #include <linux/sort.h>
23 #include <linux/slab.h>
24 #include <linux/delay.h>
25
26 #include <trace/events/sched.h>
27 #include <trace/syscall.h>
28
29 #include <asm/setup.h>
30
31 #include "trace_output.h"
32
33 #undef TRACE_SYSTEM
34 #define TRACE_SYSTEM "TRACE_SYSTEM"
35
36 DEFINE_MUTEX(event_mutex);
37
38 LIST_HEAD(ftrace_events);
39 static LIST_HEAD(ftrace_generic_fields);
40 static LIST_HEAD(ftrace_common_fields);
41 static bool eventdir_initialized;
42
43 static LIST_HEAD(module_strings);
44
45 struct module_string {
46 struct list_head next;
47 struct module *module;
48 char *str;
49 };
50
51 #define GFP_TRACE (GFP_KERNEL | __GFP_ZERO)
52
53 static struct kmem_cache *field_cachep;
54 static struct kmem_cache *file_cachep;
55
system_refcount(struct event_subsystem * system)56 static inline int system_refcount(struct event_subsystem *system)
57 {
58 return system->ref_count;
59 }
60
system_refcount_inc(struct event_subsystem * system)61 static int system_refcount_inc(struct event_subsystem *system)
62 {
63 return system->ref_count++;
64 }
65
system_refcount_dec(struct event_subsystem * system)66 static int system_refcount_dec(struct event_subsystem *system)
67 {
68 return --system->ref_count;
69 }
70
71 /* Double loops, do not use break, only goto's work */
72 #define do_for_each_event_file(tr, file) \
73 list_for_each_entry(tr, &ftrace_trace_arrays, list) { \
74 list_for_each_entry(file, &tr->events, list)
75
76 #define do_for_each_event_file_safe(tr, file) \
77 list_for_each_entry(tr, &ftrace_trace_arrays, list) { \
78 struct trace_event_file *___n; \
79 list_for_each_entry_safe(file, ___n, &tr->events, list)
80
81 #define while_for_each_event_file() \
82 }
83
84 static struct ftrace_event_field *
__find_event_field(struct list_head * head,const char * name)85 __find_event_field(struct list_head *head, const char *name)
86 {
87 struct ftrace_event_field *field;
88
89 list_for_each_entry(field, head, link) {
90 if (!strcmp(field->name, name))
91 return field;
92 }
93
94 return NULL;
95 }
96
97 struct ftrace_event_field *
trace_find_event_field(struct trace_event_call * call,char * name)98 trace_find_event_field(struct trace_event_call *call, char *name)
99 {
100 struct ftrace_event_field *field;
101 struct list_head *head;
102
103 head = trace_get_fields(call);
104 field = __find_event_field(head, name);
105 if (field)
106 return field;
107
108 field = __find_event_field(&ftrace_generic_fields, name);
109 if (field)
110 return field;
111
112 return __find_event_field(&ftrace_common_fields, name);
113 }
114
__trace_define_field(struct list_head * head,const char * type,const char * name,int offset,int size,int is_signed,int filter_type,int len,int need_test)115 static int __trace_define_field(struct list_head *head, const char *type,
116 const char *name, int offset, int size,
117 int is_signed, int filter_type, int len,
118 int need_test)
119 {
120 struct ftrace_event_field *field;
121
122 field = kmem_cache_alloc(field_cachep, GFP_TRACE);
123 if (!field)
124 return -ENOMEM;
125
126 field->name = name;
127 field->type = type;
128
129 if (filter_type == FILTER_OTHER)
130 field->filter_type = filter_assign_type(type);
131 else
132 field->filter_type = filter_type;
133
134 field->offset = offset;
135 field->size = size;
136 field->is_signed = is_signed;
137 field->needs_test = need_test;
138 field->len = len;
139
140 list_add(&field->link, head);
141
142 return 0;
143 }
144
trace_define_field(struct trace_event_call * call,const char * type,const char * name,int offset,int size,int is_signed,int filter_type)145 int trace_define_field(struct trace_event_call *call, const char *type,
146 const char *name, int offset, int size, int is_signed,
147 int filter_type)
148 {
149 struct list_head *head;
150
151 if (WARN_ON(!call->class))
152 return 0;
153
154 head = trace_get_fields(call);
155 return __trace_define_field(head, type, name, offset, size,
156 is_signed, filter_type, 0, 0);
157 }
158 EXPORT_SYMBOL_GPL(trace_define_field);
159
trace_define_field_ext(struct trace_event_call * call,const char * type,const char * name,int offset,int size,int is_signed,int filter_type,int len,int need_test)160 static int trace_define_field_ext(struct trace_event_call *call, const char *type,
161 const char *name, int offset, int size, int is_signed,
162 int filter_type, int len, int need_test)
163 {
164 struct list_head *head;
165
166 if (WARN_ON(!call->class))
167 return 0;
168
169 head = trace_get_fields(call);
170 return __trace_define_field(head, type, name, offset, size,
171 is_signed, filter_type, len, need_test);
172 }
173
174 #define __generic_field(type, item, filter_type) \
175 ret = __trace_define_field(&ftrace_generic_fields, #type, \
176 #item, 0, 0, is_signed_type(type), \
177 filter_type, 0, 0); \
178 if (ret) \
179 return ret;
180
181 #define __common_field(type, item) \
182 ret = __trace_define_field(&ftrace_common_fields, #type, \
183 "common_" #item, \
184 offsetof(typeof(ent), item), \
185 sizeof(ent.item), \
186 is_signed_type(type), FILTER_OTHER, \
187 0, 0); \
188 if (ret) \
189 return ret;
190
trace_define_generic_fields(void)191 static int trace_define_generic_fields(void)
192 {
193 int ret;
194
195 __generic_field(int, CPU, FILTER_CPU);
196 __generic_field(int, cpu, FILTER_CPU);
197 __generic_field(int, common_cpu, FILTER_CPU);
198 __generic_field(char *, COMM, FILTER_COMM);
199 __generic_field(char *, comm, FILTER_COMM);
200 __generic_field(char *, stacktrace, FILTER_STACKTRACE);
201 __generic_field(char *, STACKTRACE, FILTER_STACKTRACE);
202
203 return ret;
204 }
205
trace_define_common_fields(void)206 static int trace_define_common_fields(void)
207 {
208 int ret;
209 struct trace_entry ent;
210
211 __common_field(unsigned short, type);
212 __common_field(unsigned char, flags);
213 /* Holds both preempt_count and migrate_disable */
214 __common_field(unsigned char, preempt_count);
215 __common_field(int, pid);
216
217 return ret;
218 }
219
trace_destroy_fields(struct trace_event_call * call)220 static void trace_destroy_fields(struct trace_event_call *call)
221 {
222 struct ftrace_event_field *field, *next;
223 struct list_head *head;
224
225 head = trace_get_fields(call);
226 list_for_each_entry_safe(field, next, head, link) {
227 list_del(&field->link);
228 kmem_cache_free(field_cachep, field);
229 }
230 }
231
232 /*
233 * run-time version of trace_event_get_offsets_<call>() that returns the last
234 * accessible offset of trace fields excluding __dynamic_array bytes
235 */
trace_event_get_offsets(struct trace_event_call * call)236 int trace_event_get_offsets(struct trace_event_call *call)
237 {
238 struct ftrace_event_field *tail;
239 struct list_head *head;
240
241 head = trace_get_fields(call);
242 /*
243 * head->next points to the last field with the largest offset,
244 * since it was added last by trace_define_field()
245 */
246 tail = list_first_entry(head, struct ftrace_event_field, link);
247 return tail->offset + tail->size;
248 }
249
250
find_event_field(const char * fmt,struct trace_event_call * call)251 static struct trace_event_fields *find_event_field(const char *fmt,
252 struct trace_event_call *call)
253 {
254 struct trace_event_fields *field = call->class->fields_array;
255 const char *p = fmt;
256 int len;
257
258 if (!(len = str_has_prefix(fmt, "REC->")))
259 return NULL;
260 fmt += len;
261 for (p = fmt; *p; p++) {
262 if (!isalnum(*p) && *p != '_')
263 break;
264 }
265 len = p - fmt;
266
267 for (; field->type; field++) {
268 if (strncmp(field->name, fmt, len) || field->name[len])
269 continue;
270
271 return field;
272 }
273 return NULL;
274 }
275
276 /*
277 * Check if the referenced field is an array and return true,
278 * as arrays are OK to dereference.
279 */
test_field(const char * fmt,struct trace_event_call * call)280 static bool test_field(const char *fmt, struct trace_event_call *call)
281 {
282 struct trace_event_fields *field;
283
284 field = find_event_field(fmt, call);
285 if (!field)
286 return false;
287
288 /* This is an array and is OK to dereference. */
289 return strchr(field->type, '[') != NULL;
290 }
291
292 /* Look for a string within an argument */
find_print_string(const char * arg,const char * str,const char * end)293 static bool find_print_string(const char *arg, const char *str, const char *end)
294 {
295 const char *r;
296
297 r = strstr(arg, str);
298 return r && r < end;
299 }
300
301 /* Return true if the argument pointer is safe */
process_pointer(const char * fmt,int len,struct trace_event_call * call)302 static bool process_pointer(const char *fmt, int len, struct trace_event_call *call)
303 {
304 const char *r, *e, *a;
305
306 e = fmt + len;
307
308 /* Find the REC-> in the argument */
309 r = strstr(fmt, "REC->");
310 if (r && r < e) {
311 /*
312 * Addresses of events on the buffer, or an array on the buffer is
313 * OK to dereference. There's ways to fool this, but
314 * this is to catch common mistakes, not malicious code.
315 */
316 a = strchr(fmt, '&');
317 if ((a && (a < r)) || test_field(r, call))
318 return true;
319 } else if (find_print_string(fmt, "__get_dynamic_array(", e)) {
320 return true;
321 } else if (find_print_string(fmt, "__get_rel_dynamic_array(", e)) {
322 return true;
323 } else if (find_print_string(fmt, "__get_dynamic_array_len(", e)) {
324 return true;
325 } else if (find_print_string(fmt, "__get_rel_dynamic_array_len(", e)) {
326 return true;
327 } else if (find_print_string(fmt, "__get_sockaddr(", e)) {
328 return true;
329 } else if (find_print_string(fmt, "__get_rel_sockaddr(", e)) {
330 return true;
331 }
332 return false;
333 }
334
335 /* Return true if the string is safe */
process_string(const char * fmt,int len,struct trace_event_call * call)336 static bool process_string(const char *fmt, int len, struct trace_event_call *call)
337 {
338 struct trace_event_fields *field;
339 const char *r, *e, *s;
340
341 e = fmt + len;
342
343 /*
344 * There are several helper functions that return strings.
345 * If the argument contains a function, then assume its field is valid.
346 * It is considered that the argument has a function if it has:
347 * alphanumeric or '_' before a parenthesis.
348 */
349 s = fmt;
350 do {
351 r = strstr(s, "(");
352 if (!r || r >= e)
353 break;
354 for (int i = 1; r - i >= s; i++) {
355 char ch = *(r - i);
356 if (isspace(ch))
357 continue;
358 if (isalnum(ch) || ch == '_')
359 return true;
360 /* Anything else, this isn't a function */
361 break;
362 }
363 /* A function could be wrapped in parethesis, try the next one */
364 s = r + 1;
365 } while (s < e);
366
367 /*
368 * Check for arrays. If the argument has: foo[REC->val]
369 * then it is very likely that foo is an array of strings
370 * that are safe to use.
371 */
372 r = strstr(s, "[");
373 if (r && r < e) {
374 r = strstr(r, "REC->");
375 if (r && r < e)
376 return true;
377 }
378
379 /*
380 * If there's any strings in the argument consider this arg OK as it
381 * could be: REC->field ? "foo" : "bar" and we don't want to get into
382 * verifying that logic here.
383 */
384 if (find_print_string(fmt, "\"", e))
385 return true;
386
387 /* Dereferenced strings are also valid like any other pointer */
388 if (process_pointer(fmt, len, call))
389 return true;
390
391 /* Make sure the field is found */
392 field = find_event_field(fmt, call);
393 if (!field)
394 return false;
395
396 /* Test this field's string before printing the event */
397 call->flags |= TRACE_EVENT_FL_TEST_STR;
398 field->needs_test = 1;
399
400 return true;
401 }
402
403 /*
404 * Examine the print fmt of the event looking for unsafe dereference
405 * pointers using %p* that could be recorded in the trace event and
406 * much later referenced after the pointer was freed. Dereferencing
407 * pointers are OK, if it is dereferenced into the event itself.
408 */
test_event_printk(struct trace_event_call * call)409 static void test_event_printk(struct trace_event_call *call)
410 {
411 u64 dereference_flags = 0;
412 u64 string_flags = 0;
413 bool first = true;
414 const char *fmt;
415 int parens = 0;
416 char in_quote = 0;
417 int start_arg = 0;
418 int arg = 0;
419 int i, e;
420
421 fmt = call->print_fmt;
422
423 if (!fmt)
424 return;
425
426 for (i = 0; fmt[i]; i++) {
427 switch (fmt[i]) {
428 case '\\':
429 i++;
430 if (!fmt[i])
431 return;
432 continue;
433 case '"':
434 case '\'':
435 /*
436 * The print fmt starts with a string that
437 * is processed first to find %p* usage,
438 * then after the first string, the print fmt
439 * contains arguments that are used to check
440 * if the dereferenced %p* usage is safe.
441 */
442 if (first) {
443 if (fmt[i] == '\'')
444 continue;
445 if (in_quote) {
446 arg = 0;
447 first = false;
448 /*
449 * If there was no %p* uses
450 * the fmt is OK.
451 */
452 if (!dereference_flags)
453 return;
454 }
455 }
456 if (in_quote) {
457 if (in_quote == fmt[i])
458 in_quote = 0;
459 } else {
460 in_quote = fmt[i];
461 }
462 continue;
463 case '%':
464 if (!first || !in_quote)
465 continue;
466 i++;
467 if (!fmt[i])
468 return;
469 switch (fmt[i]) {
470 case '%':
471 continue;
472 case 'p':
473 /* Find dereferencing fields */
474 switch (fmt[i + 1]) {
475 case 'B': case 'R': case 'r':
476 case 'b': case 'M': case 'm':
477 case 'I': case 'i': case 'E':
478 case 'U': case 'V': case 'N':
479 case 'a': case 'd': case 'D':
480 case 'g': case 't': case 'C':
481 case 'O': case 'f':
482 if (WARN_ONCE(arg == 63,
483 "Too many args for event: %s",
484 trace_event_name(call)))
485 return;
486 dereference_flags |= 1ULL << arg;
487 }
488 break;
489 default:
490 {
491 bool star = false;
492 int j;
493
494 /* Increment arg if %*s exists. */
495 for (j = 0; fmt[i + j]; j++) {
496 if (isdigit(fmt[i + j]) ||
497 fmt[i + j] == '.')
498 continue;
499 if (fmt[i + j] == '*') {
500 star = true;
501 continue;
502 }
503 if ((fmt[i + j] == 's')) {
504 if (star)
505 arg++;
506 if (WARN_ONCE(arg == 63,
507 "Too many args for event: %s",
508 trace_event_name(call)))
509 return;
510 dereference_flags |= 1ULL << arg;
511 string_flags |= 1ULL << arg;
512 }
513 break;
514 }
515 break;
516 } /* default */
517
518 } /* switch */
519 arg++;
520 continue;
521 case '(':
522 if (in_quote)
523 continue;
524 parens++;
525 continue;
526 case ')':
527 if (in_quote)
528 continue;
529 parens--;
530 if (WARN_ONCE(parens < 0,
531 "Paren mismatch for event: %s\narg='%s'\n%*s",
532 trace_event_name(call),
533 fmt + start_arg,
534 (i - start_arg) + 5, "^"))
535 return;
536 continue;
537 case ',':
538 if (in_quote || parens)
539 continue;
540 e = i;
541 i++;
542 while (isspace(fmt[i]))
543 i++;
544
545 /*
546 * If start_arg is zero, then this is the start of the
547 * first argument. The processing of the argument happens
548 * when the end of the argument is found, as it needs to
549 * handle paranthesis and such.
550 */
551 if (!start_arg) {
552 start_arg = i;
553 /* Balance out the i++ in the for loop */
554 i--;
555 continue;
556 }
557
558 if (dereference_flags & (1ULL << arg)) {
559 if (string_flags & (1ULL << arg)) {
560 if (process_string(fmt + start_arg, e - start_arg, call))
561 dereference_flags &= ~(1ULL << arg);
562 } else if (process_pointer(fmt + start_arg, e - start_arg, call))
563 dereference_flags &= ~(1ULL << arg);
564 }
565
566 start_arg = i;
567 arg++;
568 /* Balance out the i++ in the for loop */
569 i--;
570 }
571 }
572
573 if (dereference_flags & (1ULL << arg)) {
574 if (string_flags & (1ULL << arg)) {
575 if (process_string(fmt + start_arg, i - start_arg, call))
576 dereference_flags &= ~(1ULL << arg);
577 } else if (process_pointer(fmt + start_arg, i - start_arg, call))
578 dereference_flags &= ~(1ULL << arg);
579 }
580
581 /*
582 * If you triggered the below warning, the trace event reported
583 * uses an unsafe dereference pointer %p*. As the data stored
584 * at the trace event time may no longer exist when the trace
585 * event is printed, dereferencing to the original source is
586 * unsafe. The source of the dereference must be copied into the
587 * event itself, and the dereference must access the copy instead.
588 */
589 if (WARN_ON_ONCE(dereference_flags)) {
590 arg = 1;
591 while (!(dereference_flags & 1)) {
592 dereference_flags >>= 1;
593 arg++;
594 }
595 pr_warn("event %s has unsafe dereference of argument %d\n",
596 trace_event_name(call), arg);
597 pr_warn("print_fmt: %s\n", fmt);
598 }
599 }
600
trace_event_raw_init(struct trace_event_call * call)601 int trace_event_raw_init(struct trace_event_call *call)
602 {
603 int id;
604
605 id = register_trace_event(&call->event);
606 if (!id)
607 return -ENODEV;
608
609 test_event_printk(call);
610
611 return 0;
612 }
613 EXPORT_SYMBOL_GPL(trace_event_raw_init);
614
trace_event_ignore_this_pid(struct trace_event_file * trace_file)615 bool trace_event_ignore_this_pid(struct trace_event_file *trace_file)
616 {
617 struct trace_array *tr = trace_file->tr;
618 struct trace_array_cpu *data;
619 struct trace_pid_list *no_pid_list;
620 struct trace_pid_list *pid_list;
621
622 pid_list = rcu_dereference_raw(tr->filtered_pids);
623 no_pid_list = rcu_dereference_raw(tr->filtered_no_pids);
624
625 if (!pid_list && !no_pid_list)
626 return false;
627
628 data = this_cpu_ptr(tr->array_buffer.data);
629
630 return data->ignore_pid;
631 }
632 EXPORT_SYMBOL_GPL(trace_event_ignore_this_pid);
633
trace_event_buffer_reserve(struct trace_event_buffer * fbuffer,struct trace_event_file * trace_file,unsigned long len)634 void *trace_event_buffer_reserve(struct trace_event_buffer *fbuffer,
635 struct trace_event_file *trace_file,
636 unsigned long len)
637 {
638 struct trace_event_call *event_call = trace_file->event_call;
639
640 if ((trace_file->flags & EVENT_FILE_FL_PID_FILTER) &&
641 trace_event_ignore_this_pid(trace_file))
642 return NULL;
643
644 /*
645 * If CONFIG_PREEMPTION is enabled, then the tracepoint itself disables
646 * preemption (adding one to the preempt_count). Since we are
647 * interested in the preempt_count at the time the tracepoint was
648 * hit, we need to subtract one to offset the increment.
649 */
650 fbuffer->trace_ctx = tracing_gen_ctx_dec();
651 fbuffer->trace_file = trace_file;
652
653 fbuffer->event =
654 trace_event_buffer_lock_reserve(&fbuffer->buffer, trace_file,
655 event_call->event.type, len,
656 fbuffer->trace_ctx);
657 if (!fbuffer->event)
658 return NULL;
659
660 fbuffer->regs = NULL;
661 fbuffer->entry = ring_buffer_event_data(fbuffer->event);
662 return fbuffer->entry;
663 }
664 EXPORT_SYMBOL_GPL(trace_event_buffer_reserve);
665
trace_event_reg(struct trace_event_call * call,enum trace_reg type,void * data)666 int trace_event_reg(struct trace_event_call *call,
667 enum trace_reg type, void *data)
668 {
669 struct trace_event_file *file = data;
670
671 WARN_ON(!(call->flags & TRACE_EVENT_FL_TRACEPOINT));
672 switch (type) {
673 case TRACE_REG_REGISTER:
674 return tracepoint_probe_register(call->tp,
675 call->class->probe,
676 file);
677 case TRACE_REG_UNREGISTER:
678 tracepoint_probe_unregister(call->tp,
679 call->class->probe,
680 file);
681 return 0;
682
683 #ifdef CONFIG_PERF_EVENTS
684 case TRACE_REG_PERF_REGISTER:
685 return tracepoint_probe_register(call->tp,
686 call->class->perf_probe,
687 call);
688 case TRACE_REG_PERF_UNREGISTER:
689 tracepoint_probe_unregister(call->tp,
690 call->class->perf_probe,
691 call);
692 return 0;
693 case TRACE_REG_PERF_OPEN:
694 case TRACE_REG_PERF_CLOSE:
695 case TRACE_REG_PERF_ADD:
696 case TRACE_REG_PERF_DEL:
697 return 0;
698 #endif
699 }
700 return 0;
701 }
702 EXPORT_SYMBOL_GPL(trace_event_reg);
703
trace_event_enable_cmd_record(bool enable)704 void trace_event_enable_cmd_record(bool enable)
705 {
706 struct trace_event_file *file;
707 struct trace_array *tr;
708
709 lockdep_assert_held(&event_mutex);
710
711 do_for_each_event_file(tr, file) {
712
713 if (!(file->flags & EVENT_FILE_FL_ENABLED))
714 continue;
715
716 if (enable) {
717 tracing_start_cmdline_record();
718 set_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
719 } else {
720 tracing_stop_cmdline_record();
721 clear_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
722 }
723 } while_for_each_event_file();
724 }
725
trace_event_enable_tgid_record(bool enable)726 void trace_event_enable_tgid_record(bool enable)
727 {
728 struct trace_event_file *file;
729 struct trace_array *tr;
730
731 lockdep_assert_held(&event_mutex);
732
733 do_for_each_event_file(tr, file) {
734 if (!(file->flags & EVENT_FILE_FL_ENABLED))
735 continue;
736
737 if (enable) {
738 tracing_start_tgid_record();
739 set_bit(EVENT_FILE_FL_RECORDED_TGID_BIT, &file->flags);
740 } else {
741 tracing_stop_tgid_record();
742 clear_bit(EVENT_FILE_FL_RECORDED_TGID_BIT,
743 &file->flags);
744 }
745 } while_for_each_event_file();
746 }
747
__ftrace_event_enable_disable(struct trace_event_file * file,int enable,int soft_disable)748 static int __ftrace_event_enable_disable(struct trace_event_file *file,
749 int enable, int soft_disable)
750 {
751 struct trace_event_call *call = file->event_call;
752 struct trace_array *tr = file->tr;
753 int ret = 0;
754 int disable;
755
756 switch (enable) {
757 case 0:
758 /*
759 * When soft_disable is set and enable is cleared, the sm_ref
760 * reference counter is decremented. If it reaches 0, we want
761 * to clear the SOFT_DISABLED flag but leave the event in the
762 * state that it was. That is, if the event was enabled and
763 * SOFT_DISABLED isn't set, then do nothing. But if SOFT_DISABLED
764 * is set we do not want the event to be enabled before we
765 * clear the bit.
766 *
767 * When soft_disable is not set but the SOFT_MODE flag is,
768 * we do nothing. Do not disable the tracepoint, otherwise
769 * "soft enable"s (clearing the SOFT_DISABLED bit) wont work.
770 */
771 if (soft_disable) {
772 if (atomic_dec_return(&file->sm_ref) > 0)
773 break;
774 disable = file->flags & EVENT_FILE_FL_SOFT_DISABLED;
775 clear_bit(EVENT_FILE_FL_SOFT_MODE_BIT, &file->flags);
776 /* Disable use of trace_buffered_event */
777 trace_buffered_event_disable();
778 } else
779 disable = !(file->flags & EVENT_FILE_FL_SOFT_MODE);
780
781 if (disable && (file->flags & EVENT_FILE_FL_ENABLED)) {
782 clear_bit(EVENT_FILE_FL_ENABLED_BIT, &file->flags);
783 if (file->flags & EVENT_FILE_FL_RECORDED_CMD) {
784 tracing_stop_cmdline_record();
785 clear_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
786 }
787
788 if (file->flags & EVENT_FILE_FL_RECORDED_TGID) {
789 tracing_stop_tgid_record();
790 clear_bit(EVENT_FILE_FL_RECORDED_TGID_BIT, &file->flags);
791 }
792
793 call->class->reg(call, TRACE_REG_UNREGISTER, file);
794 }
795 /* If in SOFT_MODE, just set the SOFT_DISABLE_BIT, else clear it */
796 if (file->flags & EVENT_FILE_FL_SOFT_MODE)
797 set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags);
798 else
799 clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags);
800 break;
801 case 1:
802 /*
803 * When soft_disable is set and enable is set, we want to
804 * register the tracepoint for the event, but leave the event
805 * as is. That means, if the event was already enabled, we do
806 * nothing (but set SOFT_MODE). If the event is disabled, we
807 * set SOFT_DISABLED before enabling the event tracepoint, so
808 * it still seems to be disabled.
809 */
810 if (!soft_disable)
811 clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags);
812 else {
813 if (atomic_inc_return(&file->sm_ref) > 1)
814 break;
815 set_bit(EVENT_FILE_FL_SOFT_MODE_BIT, &file->flags);
816 /* Enable use of trace_buffered_event */
817 trace_buffered_event_enable();
818 }
819
820 if (!(file->flags & EVENT_FILE_FL_ENABLED)) {
821 bool cmd = false, tgid = false;
822
823 /* Keep the event disabled, when going to SOFT_MODE. */
824 if (soft_disable)
825 set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags);
826
827 if (tr->trace_flags & TRACE_ITER_RECORD_CMD) {
828 cmd = true;
829 tracing_start_cmdline_record();
830 set_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
831 }
832
833 if (tr->trace_flags & TRACE_ITER_RECORD_TGID) {
834 tgid = true;
835 tracing_start_tgid_record();
836 set_bit(EVENT_FILE_FL_RECORDED_TGID_BIT, &file->flags);
837 }
838
839 ret = call->class->reg(call, TRACE_REG_REGISTER, file);
840 if (ret) {
841 if (cmd)
842 tracing_stop_cmdline_record();
843 if (tgid)
844 tracing_stop_tgid_record();
845 pr_info("event trace: Could not enable event "
846 "%s\n", trace_event_name(call));
847 break;
848 }
849 set_bit(EVENT_FILE_FL_ENABLED_BIT, &file->flags);
850
851 /* WAS_ENABLED gets set but never cleared. */
852 set_bit(EVENT_FILE_FL_WAS_ENABLED_BIT, &file->flags);
853 }
854 break;
855 }
856
857 return ret;
858 }
859
trace_event_enable_disable(struct trace_event_file * file,int enable,int soft_disable)860 int trace_event_enable_disable(struct trace_event_file *file,
861 int enable, int soft_disable)
862 {
863 return __ftrace_event_enable_disable(file, enable, soft_disable);
864 }
865
ftrace_event_enable_disable(struct trace_event_file * file,int enable)866 static int ftrace_event_enable_disable(struct trace_event_file *file,
867 int enable)
868 {
869 return __ftrace_event_enable_disable(file, enable, 0);
870 }
871
ftrace_clear_events(struct trace_array * tr)872 static void ftrace_clear_events(struct trace_array *tr)
873 {
874 struct trace_event_file *file;
875
876 mutex_lock(&event_mutex);
877 list_for_each_entry(file, &tr->events, list) {
878 ftrace_event_enable_disable(file, 0);
879 }
880 mutex_unlock(&event_mutex);
881 }
882
883 static void
event_filter_pid_sched_process_exit(void * data,struct task_struct * task)884 event_filter_pid_sched_process_exit(void *data, struct task_struct *task)
885 {
886 struct trace_pid_list *pid_list;
887 struct trace_array *tr = data;
888
889 pid_list = rcu_dereference_raw(tr->filtered_pids);
890 trace_filter_add_remove_task(pid_list, NULL, task);
891
892 pid_list = rcu_dereference_raw(tr->filtered_no_pids);
893 trace_filter_add_remove_task(pid_list, NULL, task);
894 }
895
896 static void
event_filter_pid_sched_process_fork(void * data,struct task_struct * self,struct task_struct * task)897 event_filter_pid_sched_process_fork(void *data,
898 struct task_struct *self,
899 struct task_struct *task)
900 {
901 struct trace_pid_list *pid_list;
902 struct trace_array *tr = data;
903
904 pid_list = rcu_dereference_sched(tr->filtered_pids);
905 trace_filter_add_remove_task(pid_list, self, task);
906
907 pid_list = rcu_dereference_sched(tr->filtered_no_pids);
908 trace_filter_add_remove_task(pid_list, self, task);
909 }
910
trace_event_follow_fork(struct trace_array * tr,bool enable)911 void trace_event_follow_fork(struct trace_array *tr, bool enable)
912 {
913 if (enable) {
914 register_trace_prio_sched_process_fork(event_filter_pid_sched_process_fork,
915 tr, INT_MIN);
916 register_trace_prio_sched_process_free(event_filter_pid_sched_process_exit,
917 tr, INT_MAX);
918 } else {
919 unregister_trace_sched_process_fork(event_filter_pid_sched_process_fork,
920 tr);
921 unregister_trace_sched_process_free(event_filter_pid_sched_process_exit,
922 tr);
923 }
924 }
925
926 static void
event_filter_pid_sched_switch_probe_pre(void * data,bool preempt,struct task_struct * prev,struct task_struct * next,unsigned int prev_state)927 event_filter_pid_sched_switch_probe_pre(void *data, bool preempt,
928 struct task_struct *prev,
929 struct task_struct *next,
930 unsigned int prev_state)
931 {
932 struct trace_array *tr = data;
933 struct trace_pid_list *no_pid_list;
934 struct trace_pid_list *pid_list;
935 bool ret;
936
937 pid_list = rcu_dereference_sched(tr->filtered_pids);
938 no_pid_list = rcu_dereference_sched(tr->filtered_no_pids);
939
940 /*
941 * Sched switch is funny, as we only want to ignore it
942 * in the notrace case if both prev and next should be ignored.
943 */
944 ret = trace_ignore_this_task(NULL, no_pid_list, prev) &&
945 trace_ignore_this_task(NULL, no_pid_list, next);
946
947 this_cpu_write(tr->array_buffer.data->ignore_pid, ret ||
948 (trace_ignore_this_task(pid_list, NULL, prev) &&
949 trace_ignore_this_task(pid_list, NULL, next)));
950 }
951
952 static void
event_filter_pid_sched_switch_probe_post(void * data,bool preempt,struct task_struct * prev,struct task_struct * next,unsigned int prev_state)953 event_filter_pid_sched_switch_probe_post(void *data, bool preempt,
954 struct task_struct *prev,
955 struct task_struct *next,
956 unsigned int prev_state)
957 {
958 struct trace_array *tr = data;
959 struct trace_pid_list *no_pid_list;
960 struct trace_pid_list *pid_list;
961
962 pid_list = rcu_dereference_sched(tr->filtered_pids);
963 no_pid_list = rcu_dereference_sched(tr->filtered_no_pids);
964
965 this_cpu_write(tr->array_buffer.data->ignore_pid,
966 trace_ignore_this_task(pid_list, no_pid_list, next));
967 }
968
969 static void
event_filter_pid_sched_wakeup_probe_pre(void * data,struct task_struct * task)970 event_filter_pid_sched_wakeup_probe_pre(void *data, struct task_struct *task)
971 {
972 struct trace_array *tr = data;
973 struct trace_pid_list *no_pid_list;
974 struct trace_pid_list *pid_list;
975
976 /* Nothing to do if we are already tracing */
977 if (!this_cpu_read(tr->array_buffer.data->ignore_pid))
978 return;
979
980 pid_list = rcu_dereference_sched(tr->filtered_pids);
981 no_pid_list = rcu_dereference_sched(tr->filtered_no_pids);
982
983 this_cpu_write(tr->array_buffer.data->ignore_pid,
984 trace_ignore_this_task(pid_list, no_pid_list, task));
985 }
986
987 static void
event_filter_pid_sched_wakeup_probe_post(void * data,struct task_struct * task)988 event_filter_pid_sched_wakeup_probe_post(void *data, struct task_struct *task)
989 {
990 struct trace_array *tr = data;
991 struct trace_pid_list *no_pid_list;
992 struct trace_pid_list *pid_list;
993
994 /* Nothing to do if we are not tracing */
995 if (this_cpu_read(tr->array_buffer.data->ignore_pid))
996 return;
997
998 pid_list = rcu_dereference_sched(tr->filtered_pids);
999 no_pid_list = rcu_dereference_sched(tr->filtered_no_pids);
1000
1001 /* Set tracing if current is enabled */
1002 this_cpu_write(tr->array_buffer.data->ignore_pid,
1003 trace_ignore_this_task(pid_list, no_pid_list, current));
1004 }
1005
unregister_pid_events(struct trace_array * tr)1006 static void unregister_pid_events(struct trace_array *tr)
1007 {
1008 unregister_trace_sched_switch(event_filter_pid_sched_switch_probe_pre, tr);
1009 unregister_trace_sched_switch(event_filter_pid_sched_switch_probe_post, tr);
1010
1011 unregister_trace_sched_wakeup(event_filter_pid_sched_wakeup_probe_pre, tr);
1012 unregister_trace_sched_wakeup(event_filter_pid_sched_wakeup_probe_post, tr);
1013
1014 unregister_trace_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_pre, tr);
1015 unregister_trace_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_post, tr);
1016
1017 unregister_trace_sched_waking(event_filter_pid_sched_wakeup_probe_pre, tr);
1018 unregister_trace_sched_waking(event_filter_pid_sched_wakeup_probe_post, tr);
1019 }
1020
__ftrace_clear_event_pids(struct trace_array * tr,int type)1021 static void __ftrace_clear_event_pids(struct trace_array *tr, int type)
1022 {
1023 struct trace_pid_list *pid_list;
1024 struct trace_pid_list *no_pid_list;
1025 struct trace_event_file *file;
1026 int cpu;
1027
1028 pid_list = rcu_dereference_protected(tr->filtered_pids,
1029 lockdep_is_held(&event_mutex));
1030 no_pid_list = rcu_dereference_protected(tr->filtered_no_pids,
1031 lockdep_is_held(&event_mutex));
1032
1033 /* Make sure there's something to do */
1034 if (!pid_type_enabled(type, pid_list, no_pid_list))
1035 return;
1036
1037 if (!still_need_pid_events(type, pid_list, no_pid_list)) {
1038 unregister_pid_events(tr);
1039
1040 list_for_each_entry(file, &tr->events, list) {
1041 clear_bit(EVENT_FILE_FL_PID_FILTER_BIT, &file->flags);
1042 }
1043
1044 for_each_possible_cpu(cpu)
1045 per_cpu_ptr(tr->array_buffer.data, cpu)->ignore_pid = false;
1046 }
1047
1048 if (type & TRACE_PIDS)
1049 rcu_assign_pointer(tr->filtered_pids, NULL);
1050
1051 if (type & TRACE_NO_PIDS)
1052 rcu_assign_pointer(tr->filtered_no_pids, NULL);
1053
1054 /* Wait till all users are no longer using pid filtering */
1055 tracepoint_synchronize_unregister();
1056
1057 if ((type & TRACE_PIDS) && pid_list)
1058 trace_pid_list_free(pid_list);
1059
1060 if ((type & TRACE_NO_PIDS) && no_pid_list)
1061 trace_pid_list_free(no_pid_list);
1062 }
1063
ftrace_clear_event_pids(struct trace_array * tr,int type)1064 static void ftrace_clear_event_pids(struct trace_array *tr, int type)
1065 {
1066 mutex_lock(&event_mutex);
1067 __ftrace_clear_event_pids(tr, type);
1068 mutex_unlock(&event_mutex);
1069 }
1070
__put_system(struct event_subsystem * system)1071 static void __put_system(struct event_subsystem *system)
1072 {
1073 struct event_filter *filter = system->filter;
1074
1075 WARN_ON_ONCE(system_refcount(system) == 0);
1076 if (system_refcount_dec(system))
1077 return;
1078
1079 list_del(&system->list);
1080
1081 if (filter) {
1082 kfree(filter->filter_string);
1083 kfree(filter);
1084 }
1085 kfree_const(system->name);
1086 kfree(system);
1087 }
1088
__get_system(struct event_subsystem * system)1089 static void __get_system(struct event_subsystem *system)
1090 {
1091 WARN_ON_ONCE(system_refcount(system) == 0);
1092 system_refcount_inc(system);
1093 }
1094
__get_system_dir(struct trace_subsystem_dir * dir)1095 static void __get_system_dir(struct trace_subsystem_dir *dir)
1096 {
1097 WARN_ON_ONCE(dir->ref_count == 0);
1098 dir->ref_count++;
1099 __get_system(dir->subsystem);
1100 }
1101
__put_system_dir(struct trace_subsystem_dir * dir)1102 static void __put_system_dir(struct trace_subsystem_dir *dir)
1103 {
1104 WARN_ON_ONCE(dir->ref_count == 0);
1105 /* If the subsystem is about to be freed, the dir must be too */
1106 WARN_ON_ONCE(system_refcount(dir->subsystem) == 1 && dir->ref_count != 1);
1107
1108 __put_system(dir->subsystem);
1109 if (!--dir->ref_count)
1110 kfree(dir);
1111 }
1112
put_system(struct trace_subsystem_dir * dir)1113 static void put_system(struct trace_subsystem_dir *dir)
1114 {
1115 mutex_lock(&event_mutex);
1116 __put_system_dir(dir);
1117 mutex_unlock(&event_mutex);
1118 }
1119
remove_subsystem(struct trace_subsystem_dir * dir)1120 static void remove_subsystem(struct trace_subsystem_dir *dir)
1121 {
1122 if (!dir)
1123 return;
1124
1125 if (!--dir->nr_events) {
1126 eventfs_remove_dir(dir->ei);
1127 list_del(&dir->list);
1128 __put_system_dir(dir);
1129 }
1130 }
1131
event_file_get(struct trace_event_file * file)1132 void event_file_get(struct trace_event_file *file)
1133 {
1134 refcount_inc(&file->ref);
1135 }
1136
event_file_put(struct trace_event_file * file)1137 void event_file_put(struct trace_event_file *file)
1138 {
1139 if (WARN_ON_ONCE(!refcount_read(&file->ref))) {
1140 if (file->flags & EVENT_FILE_FL_FREED)
1141 kmem_cache_free(file_cachep, file);
1142 return;
1143 }
1144
1145 if (refcount_dec_and_test(&file->ref)) {
1146 /* Count should only go to zero when it is freed */
1147 if (WARN_ON_ONCE(!(file->flags & EVENT_FILE_FL_FREED)))
1148 return;
1149 kmem_cache_free(file_cachep, file);
1150 }
1151 }
1152
remove_event_file_dir(struct trace_event_file * file)1153 static void remove_event_file_dir(struct trace_event_file *file)
1154 {
1155 eventfs_remove_dir(file->ei);
1156 list_del(&file->list);
1157 remove_subsystem(file->system);
1158 free_event_filter(file->filter);
1159 file->flags |= EVENT_FILE_FL_FREED;
1160 event_file_put(file);
1161 }
1162
1163 /*
1164 * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events.
1165 */
1166 static int
__ftrace_set_clr_event_nolock(struct trace_array * tr,const char * match,const char * sub,const char * event,int set)1167 __ftrace_set_clr_event_nolock(struct trace_array *tr, const char *match,
1168 const char *sub, const char *event, int set)
1169 {
1170 struct trace_event_file *file;
1171 struct trace_event_call *call;
1172 const char *name;
1173 int ret = -EINVAL;
1174 int eret = 0;
1175
1176 list_for_each_entry(file, &tr->events, list) {
1177
1178 call = file->event_call;
1179 name = trace_event_name(call);
1180
1181 if (!name || !call->class || !call->class->reg)
1182 continue;
1183
1184 if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)
1185 continue;
1186
1187 if (match &&
1188 strcmp(match, name) != 0 &&
1189 strcmp(match, call->class->system) != 0)
1190 continue;
1191
1192 if (sub && strcmp(sub, call->class->system) != 0)
1193 continue;
1194
1195 if (event && strcmp(event, name) != 0)
1196 continue;
1197
1198 ret = ftrace_event_enable_disable(file, set);
1199
1200 /*
1201 * Save the first error and return that. Some events
1202 * may still have been enabled, but let the user
1203 * know that something went wrong.
1204 */
1205 if (ret && !eret)
1206 eret = ret;
1207
1208 ret = eret;
1209 }
1210
1211 return ret;
1212 }
1213
__ftrace_set_clr_event(struct trace_array * tr,const char * match,const char * sub,const char * event,int set)1214 static int __ftrace_set_clr_event(struct trace_array *tr, const char *match,
1215 const char *sub, const char *event, int set)
1216 {
1217 int ret;
1218
1219 mutex_lock(&event_mutex);
1220 ret = __ftrace_set_clr_event_nolock(tr, match, sub, event, set);
1221 mutex_unlock(&event_mutex);
1222
1223 return ret;
1224 }
1225
ftrace_set_clr_event(struct trace_array * tr,char * buf,int set)1226 int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set)
1227 {
1228 char *event = NULL, *sub = NULL, *match;
1229 int ret;
1230
1231 if (!tr)
1232 return -ENOENT;
1233 /*
1234 * The buf format can be <subsystem>:<event-name>
1235 * *:<event-name> means any event by that name.
1236 * :<event-name> is the same.
1237 *
1238 * <subsystem>:* means all events in that subsystem
1239 * <subsystem>: means the same.
1240 *
1241 * <name> (no ':') means all events in a subsystem with
1242 * the name <name> or any event that matches <name>
1243 */
1244
1245 match = strsep(&buf, ":");
1246 if (buf) {
1247 sub = match;
1248 event = buf;
1249 match = NULL;
1250
1251 if (!strlen(sub) || strcmp(sub, "*") == 0)
1252 sub = NULL;
1253 if (!strlen(event) || strcmp(event, "*") == 0)
1254 event = NULL;
1255 }
1256
1257 ret = __ftrace_set_clr_event(tr, match, sub, event, set);
1258
1259 /* Put back the colon to allow this to be called again */
1260 if (buf)
1261 *(buf - 1) = ':';
1262
1263 return ret;
1264 }
1265
1266 /**
1267 * trace_set_clr_event - enable or disable an event
1268 * @system: system name to match (NULL for any system)
1269 * @event: event name to match (NULL for all events, within system)
1270 * @set: 1 to enable, 0 to disable
1271 *
1272 * This is a way for other parts of the kernel to enable or disable
1273 * event recording.
1274 *
1275 * Returns 0 on success, -EINVAL if the parameters do not match any
1276 * registered events.
1277 */
trace_set_clr_event(const char * system,const char * event,int set)1278 int trace_set_clr_event(const char *system, const char *event, int set)
1279 {
1280 struct trace_array *tr = top_trace_array();
1281
1282 if (!tr)
1283 return -ENODEV;
1284
1285 return __ftrace_set_clr_event(tr, NULL, system, event, set);
1286 }
1287 EXPORT_SYMBOL_GPL(trace_set_clr_event);
1288
1289 /**
1290 * trace_array_set_clr_event - enable or disable an event for a trace array.
1291 * @tr: concerned trace array.
1292 * @system: system name to match (NULL for any system)
1293 * @event: event name to match (NULL for all events, within system)
1294 * @enable: true to enable, false to disable
1295 *
1296 * This is a way for other parts of the kernel to enable or disable
1297 * event recording.
1298 *
1299 * Returns 0 on success, -EINVAL if the parameters do not match any
1300 * registered events.
1301 */
trace_array_set_clr_event(struct trace_array * tr,const char * system,const char * event,bool enable)1302 int trace_array_set_clr_event(struct trace_array *tr, const char *system,
1303 const char *event, bool enable)
1304 {
1305 int set;
1306
1307 if (!tr)
1308 return -ENOENT;
1309
1310 set = (enable == true) ? 1 : 0;
1311 return __ftrace_set_clr_event(tr, NULL, system, event, set);
1312 }
1313 EXPORT_SYMBOL_GPL(trace_array_set_clr_event);
1314
1315 /* 128 should be much more than enough */
1316 #define EVENT_BUF_SIZE 127
1317
1318 static ssize_t
ftrace_event_write(struct file * file,const char __user * ubuf,size_t cnt,loff_t * ppos)1319 ftrace_event_write(struct file *file, const char __user *ubuf,
1320 size_t cnt, loff_t *ppos)
1321 {
1322 struct trace_parser parser;
1323 struct seq_file *m = file->private_data;
1324 struct trace_array *tr = m->private;
1325 ssize_t read, ret;
1326
1327 if (!cnt)
1328 return 0;
1329
1330 ret = tracing_update_buffers(tr);
1331 if (ret < 0)
1332 return ret;
1333
1334 if (trace_parser_get_init(&parser, EVENT_BUF_SIZE + 1))
1335 return -ENOMEM;
1336
1337 read = trace_get_user(&parser, ubuf, cnt, ppos);
1338
1339 if (read >= 0 && trace_parser_loaded((&parser))) {
1340 int set = 1;
1341
1342 if (*parser.buffer == '!')
1343 set = 0;
1344
1345 ret = ftrace_set_clr_event(tr, parser.buffer + !set, set);
1346 if (ret)
1347 goto out_put;
1348 }
1349
1350 ret = read;
1351
1352 out_put:
1353 trace_parser_put(&parser);
1354
1355 return ret;
1356 }
1357
1358 static void *
t_next(struct seq_file * m,void * v,loff_t * pos)1359 t_next(struct seq_file *m, void *v, loff_t *pos)
1360 {
1361 struct trace_event_file *file = v;
1362 struct trace_event_call *call;
1363 struct trace_array *tr = m->private;
1364
1365 (*pos)++;
1366
1367 list_for_each_entry_continue(file, &tr->events, list) {
1368 call = file->event_call;
1369 /*
1370 * The ftrace subsystem is for showing formats only.
1371 * They can not be enabled or disabled via the event files.
1372 */
1373 if (call->class && call->class->reg &&
1374 !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
1375 return file;
1376 }
1377
1378 return NULL;
1379 }
1380
t_start(struct seq_file * m,loff_t * pos)1381 static void *t_start(struct seq_file *m, loff_t *pos)
1382 {
1383 struct trace_event_file *file;
1384 struct trace_array *tr = m->private;
1385 loff_t l;
1386
1387 mutex_lock(&event_mutex);
1388
1389 file = list_entry(&tr->events, struct trace_event_file, list);
1390 for (l = 0; l <= *pos; ) {
1391 file = t_next(m, file, &l);
1392 if (!file)
1393 break;
1394 }
1395 return file;
1396 }
1397
1398 static void *
s_next(struct seq_file * m,void * v,loff_t * pos)1399 s_next(struct seq_file *m, void *v, loff_t *pos)
1400 {
1401 struct trace_event_file *file = v;
1402 struct trace_array *tr = m->private;
1403
1404 (*pos)++;
1405
1406 list_for_each_entry_continue(file, &tr->events, list) {
1407 if (file->flags & EVENT_FILE_FL_ENABLED)
1408 return file;
1409 }
1410
1411 return NULL;
1412 }
1413
s_start(struct seq_file * m,loff_t * pos)1414 static void *s_start(struct seq_file *m, loff_t *pos)
1415 {
1416 struct trace_event_file *file;
1417 struct trace_array *tr = m->private;
1418 loff_t l;
1419
1420 mutex_lock(&event_mutex);
1421
1422 file = list_entry(&tr->events, struct trace_event_file, list);
1423 for (l = 0; l <= *pos; ) {
1424 file = s_next(m, file, &l);
1425 if (!file)
1426 break;
1427 }
1428 return file;
1429 }
1430
t_show(struct seq_file * m,void * v)1431 static int t_show(struct seq_file *m, void *v)
1432 {
1433 struct trace_event_file *file = v;
1434 struct trace_event_call *call = file->event_call;
1435
1436 if (strcmp(call->class->system, TRACE_SYSTEM) != 0)
1437 seq_printf(m, "%s:", call->class->system);
1438 seq_printf(m, "%s\n", trace_event_name(call));
1439
1440 return 0;
1441 }
1442
t_stop(struct seq_file * m,void * p)1443 static void t_stop(struct seq_file *m, void *p)
1444 {
1445 mutex_unlock(&event_mutex);
1446 }
1447
1448 static void *
__next(struct seq_file * m,void * v,loff_t * pos,int type)1449 __next(struct seq_file *m, void *v, loff_t *pos, int type)
1450 {
1451 struct trace_array *tr = m->private;
1452 struct trace_pid_list *pid_list;
1453
1454 if (type == TRACE_PIDS)
1455 pid_list = rcu_dereference_sched(tr->filtered_pids);
1456 else
1457 pid_list = rcu_dereference_sched(tr->filtered_no_pids);
1458
1459 return trace_pid_next(pid_list, v, pos);
1460 }
1461
1462 static void *
p_next(struct seq_file * m,void * v,loff_t * pos)1463 p_next(struct seq_file *m, void *v, loff_t *pos)
1464 {
1465 return __next(m, v, pos, TRACE_PIDS);
1466 }
1467
1468 static void *
np_next(struct seq_file * m,void * v,loff_t * pos)1469 np_next(struct seq_file *m, void *v, loff_t *pos)
1470 {
1471 return __next(m, v, pos, TRACE_NO_PIDS);
1472 }
1473
__start(struct seq_file * m,loff_t * pos,int type)1474 static void *__start(struct seq_file *m, loff_t *pos, int type)
1475 __acquires(RCU)
1476 {
1477 struct trace_pid_list *pid_list;
1478 struct trace_array *tr = m->private;
1479
1480 /*
1481 * Grab the mutex, to keep calls to p_next() having the same
1482 * tr->filtered_pids as p_start() has.
1483 * If we just passed the tr->filtered_pids around, then RCU would
1484 * have been enough, but doing that makes things more complex.
1485 */
1486 mutex_lock(&event_mutex);
1487 rcu_read_lock_sched();
1488
1489 if (type == TRACE_PIDS)
1490 pid_list = rcu_dereference_sched(tr->filtered_pids);
1491 else
1492 pid_list = rcu_dereference_sched(tr->filtered_no_pids);
1493
1494 if (!pid_list)
1495 return NULL;
1496
1497 return trace_pid_start(pid_list, pos);
1498 }
1499
p_start(struct seq_file * m,loff_t * pos)1500 static void *p_start(struct seq_file *m, loff_t *pos)
1501 __acquires(RCU)
1502 {
1503 return __start(m, pos, TRACE_PIDS);
1504 }
1505
np_start(struct seq_file * m,loff_t * pos)1506 static void *np_start(struct seq_file *m, loff_t *pos)
1507 __acquires(RCU)
1508 {
1509 return __start(m, pos, TRACE_NO_PIDS);
1510 }
1511
p_stop(struct seq_file * m,void * p)1512 static void p_stop(struct seq_file *m, void *p)
1513 __releases(RCU)
1514 {
1515 rcu_read_unlock_sched();
1516 mutex_unlock(&event_mutex);
1517 }
1518
1519 static ssize_t
event_enable_read(struct file * filp,char __user * ubuf,size_t cnt,loff_t * ppos)1520 event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
1521 loff_t *ppos)
1522 {
1523 struct trace_event_file *file;
1524 unsigned long flags;
1525 char buf[4] = "0";
1526
1527 mutex_lock(&event_mutex);
1528 file = event_file_file(filp);
1529 if (likely(file))
1530 flags = file->flags;
1531 mutex_unlock(&event_mutex);
1532
1533 if (!file)
1534 return -ENODEV;
1535
1536 if (flags & EVENT_FILE_FL_ENABLED &&
1537 !(flags & EVENT_FILE_FL_SOFT_DISABLED))
1538 strcpy(buf, "1");
1539
1540 if (flags & EVENT_FILE_FL_SOFT_DISABLED ||
1541 flags & EVENT_FILE_FL_SOFT_MODE)
1542 strcat(buf, "*");
1543
1544 strcat(buf, "\n");
1545
1546 return simple_read_from_buffer(ubuf, cnt, ppos, buf, strlen(buf));
1547 }
1548
1549 static ssize_t
event_enable_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)1550 event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
1551 loff_t *ppos)
1552 {
1553 struct trace_event_file *file;
1554 unsigned long val;
1555 int ret;
1556
1557 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
1558 if (ret)
1559 return ret;
1560
1561 switch (val) {
1562 case 0:
1563 case 1:
1564 ret = -ENODEV;
1565 mutex_lock(&event_mutex);
1566 file = event_file_file(filp);
1567 if (likely(file)) {
1568 ret = tracing_update_buffers(file->tr);
1569 if (ret < 0) {
1570 mutex_unlock(&event_mutex);
1571 return ret;
1572 }
1573 ret = ftrace_event_enable_disable(file, val);
1574 }
1575 mutex_unlock(&event_mutex);
1576 break;
1577
1578 default:
1579 return -EINVAL;
1580 }
1581
1582 *ppos += cnt;
1583
1584 return ret ? ret : cnt;
1585 }
1586
1587 static ssize_t
system_enable_read(struct file * filp,char __user * ubuf,size_t cnt,loff_t * ppos)1588 system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
1589 loff_t *ppos)
1590 {
1591 const char set_to_char[4] = { '?', '0', '1', 'X' };
1592 struct trace_subsystem_dir *dir = filp->private_data;
1593 struct event_subsystem *system = dir->subsystem;
1594 struct trace_event_call *call;
1595 struct trace_event_file *file;
1596 struct trace_array *tr = dir->tr;
1597 char buf[2];
1598 int set = 0;
1599 int ret;
1600
1601 mutex_lock(&event_mutex);
1602 list_for_each_entry(file, &tr->events, list) {
1603 call = file->event_call;
1604 if ((call->flags & TRACE_EVENT_FL_IGNORE_ENABLE) ||
1605 !trace_event_name(call) || !call->class || !call->class->reg)
1606 continue;
1607
1608 if (system && strcmp(call->class->system, system->name) != 0)
1609 continue;
1610
1611 /*
1612 * We need to find out if all the events are set
1613 * or if all events or cleared, or if we have
1614 * a mixture.
1615 */
1616 set |= (1 << !!(file->flags & EVENT_FILE_FL_ENABLED));
1617
1618 /*
1619 * If we have a mixture, no need to look further.
1620 */
1621 if (set == 3)
1622 break;
1623 }
1624 mutex_unlock(&event_mutex);
1625
1626 buf[0] = set_to_char[set];
1627 buf[1] = '\n';
1628
1629 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
1630
1631 return ret;
1632 }
1633
1634 static ssize_t
system_enable_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)1635 system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
1636 loff_t *ppos)
1637 {
1638 struct trace_subsystem_dir *dir = filp->private_data;
1639 struct event_subsystem *system = dir->subsystem;
1640 const char *name = NULL;
1641 unsigned long val;
1642 ssize_t ret;
1643
1644 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
1645 if (ret)
1646 return ret;
1647
1648 ret = tracing_update_buffers(dir->tr);
1649 if (ret < 0)
1650 return ret;
1651
1652 if (val != 0 && val != 1)
1653 return -EINVAL;
1654
1655 /*
1656 * Opening of "enable" adds a ref count to system,
1657 * so the name is safe to use.
1658 */
1659 if (system)
1660 name = system->name;
1661
1662 ret = __ftrace_set_clr_event(dir->tr, NULL, name, NULL, val);
1663 if (ret)
1664 goto out;
1665
1666 ret = cnt;
1667
1668 out:
1669 *ppos += cnt;
1670
1671 return ret;
1672 }
1673
1674 enum {
1675 FORMAT_HEADER = 1,
1676 FORMAT_FIELD_SEPERATOR = 2,
1677 FORMAT_PRINTFMT = 3,
1678 };
1679
f_next(struct seq_file * m,void * v,loff_t * pos)1680 static void *f_next(struct seq_file *m, void *v, loff_t *pos)
1681 {
1682 struct trace_event_file *file = event_file_data(m->private);
1683 struct trace_event_call *call = file->event_call;
1684 struct list_head *common_head = &ftrace_common_fields;
1685 struct list_head *head = trace_get_fields(call);
1686 struct list_head *node = v;
1687
1688 (*pos)++;
1689
1690 switch ((unsigned long)v) {
1691 case FORMAT_HEADER:
1692 node = common_head;
1693 break;
1694
1695 case FORMAT_FIELD_SEPERATOR:
1696 node = head;
1697 break;
1698
1699 case FORMAT_PRINTFMT:
1700 /* all done */
1701 return NULL;
1702 }
1703
1704 node = node->prev;
1705 if (node == common_head)
1706 return (void *)FORMAT_FIELD_SEPERATOR;
1707 else if (node == head)
1708 return (void *)FORMAT_PRINTFMT;
1709 else
1710 return node;
1711 }
1712
f_show(struct seq_file * m,void * v)1713 static int f_show(struct seq_file *m, void *v)
1714 {
1715 struct trace_event_file *file = event_file_data(m->private);
1716 struct trace_event_call *call = file->event_call;
1717 struct ftrace_event_field *field;
1718 const char *array_descriptor;
1719
1720 switch ((unsigned long)v) {
1721 case FORMAT_HEADER:
1722 seq_printf(m, "name: %s\n", trace_event_name(call));
1723 seq_printf(m, "ID: %d\n", call->event.type);
1724 seq_puts(m, "format:\n");
1725 return 0;
1726
1727 case FORMAT_FIELD_SEPERATOR:
1728 seq_putc(m, '\n');
1729 return 0;
1730
1731 case FORMAT_PRINTFMT:
1732 seq_printf(m, "\nprint fmt: %s\n",
1733 call->print_fmt);
1734 return 0;
1735 }
1736
1737 field = list_entry(v, struct ftrace_event_field, link);
1738 /*
1739 * Smartly shows the array type(except dynamic array).
1740 * Normal:
1741 * field:TYPE VAR
1742 * If TYPE := TYPE[LEN], it is shown:
1743 * field:TYPE VAR[LEN]
1744 */
1745 array_descriptor = strchr(field->type, '[');
1746
1747 if (str_has_prefix(field->type, "__data_loc"))
1748 array_descriptor = NULL;
1749
1750 if (!array_descriptor)
1751 seq_printf(m, "\tfield:%s %s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
1752 field->type, field->name, field->offset,
1753 field->size, !!field->is_signed);
1754 else if (field->len)
1755 seq_printf(m, "\tfield:%.*s %s[%d];\toffset:%u;\tsize:%u;\tsigned:%d;\n",
1756 (int)(array_descriptor - field->type),
1757 field->type, field->name,
1758 field->len, field->offset,
1759 field->size, !!field->is_signed);
1760 else
1761 seq_printf(m, "\tfield:%.*s %s[];\toffset:%u;\tsize:%u;\tsigned:%d;\n",
1762 (int)(array_descriptor - field->type),
1763 field->type, field->name,
1764 field->offset, field->size, !!field->is_signed);
1765
1766 return 0;
1767 }
1768
f_start(struct seq_file * m,loff_t * pos)1769 static void *f_start(struct seq_file *m, loff_t *pos)
1770 {
1771 struct trace_event_file *file;
1772 void *p = (void *)FORMAT_HEADER;
1773 loff_t l = 0;
1774
1775 /* ->stop() is called even if ->start() fails */
1776 mutex_lock(&event_mutex);
1777 file = event_file_file(m->private);
1778 if (!file)
1779 return ERR_PTR(-ENODEV);
1780
1781 while (l < *pos && p)
1782 p = f_next(m, p, &l);
1783
1784 return p;
1785 }
1786
f_stop(struct seq_file * m,void * p)1787 static void f_stop(struct seq_file *m, void *p)
1788 {
1789 mutex_unlock(&event_mutex);
1790 }
1791
1792 static const struct seq_operations trace_format_seq_ops = {
1793 .start = f_start,
1794 .next = f_next,
1795 .stop = f_stop,
1796 .show = f_show,
1797 };
1798
trace_format_open(struct inode * inode,struct file * file)1799 static int trace_format_open(struct inode *inode, struct file *file)
1800 {
1801 struct seq_file *m;
1802 int ret;
1803
1804 /* Do we want to hide event format files on tracefs lockdown? */
1805
1806 ret = seq_open(file, &trace_format_seq_ops);
1807 if (ret < 0)
1808 return ret;
1809
1810 m = file->private_data;
1811 m->private = file;
1812
1813 return 0;
1814 }
1815
1816 #ifdef CONFIG_PERF_EVENTS
1817 static ssize_t
event_id_read(struct file * filp,char __user * ubuf,size_t cnt,loff_t * ppos)1818 event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
1819 {
1820 int id = (long)event_file_data(filp);
1821 char buf[32];
1822 int len;
1823
1824 if (unlikely(!id))
1825 return -ENODEV;
1826
1827 len = sprintf(buf, "%d\n", id);
1828
1829 return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
1830 }
1831 #endif
1832
1833 static ssize_t
event_filter_read(struct file * filp,char __user * ubuf,size_t cnt,loff_t * ppos)1834 event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
1835 loff_t *ppos)
1836 {
1837 struct trace_event_file *file;
1838 struct trace_seq *s;
1839 int r = -ENODEV;
1840
1841 if (*ppos)
1842 return 0;
1843
1844 s = kmalloc(sizeof(*s), GFP_KERNEL);
1845
1846 if (!s)
1847 return -ENOMEM;
1848
1849 trace_seq_init(s);
1850
1851 mutex_lock(&event_mutex);
1852 file = event_file_file(filp);
1853 if (file)
1854 print_event_filter(file, s);
1855 mutex_unlock(&event_mutex);
1856
1857 if (file)
1858 r = simple_read_from_buffer(ubuf, cnt, ppos,
1859 s->buffer, trace_seq_used(s));
1860
1861 kfree(s);
1862
1863 return r;
1864 }
1865
1866 static ssize_t
event_filter_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)1867 event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
1868 loff_t *ppos)
1869 {
1870 struct trace_event_file *file;
1871 char *buf;
1872 int err = -ENODEV;
1873
1874 if (cnt >= PAGE_SIZE)
1875 return -EINVAL;
1876
1877 buf = memdup_user_nul(ubuf, cnt);
1878 if (IS_ERR(buf))
1879 return PTR_ERR(buf);
1880
1881 mutex_lock(&event_mutex);
1882 file = event_file_file(filp);
1883 if (file) {
1884 if (file->flags & EVENT_FILE_FL_FREED)
1885 err = -ENODEV;
1886 else
1887 err = apply_event_filter(file, buf);
1888 }
1889 mutex_unlock(&event_mutex);
1890
1891 kfree(buf);
1892 if (err < 0)
1893 return err;
1894
1895 *ppos += cnt;
1896
1897 return cnt;
1898 }
1899
1900 static LIST_HEAD(event_subsystems);
1901
subsystem_open(struct inode * inode,struct file * filp)1902 static int subsystem_open(struct inode *inode, struct file *filp)
1903 {
1904 struct trace_subsystem_dir *dir = NULL, *iter_dir;
1905 struct trace_array *tr = NULL, *iter_tr;
1906 struct event_subsystem *system = NULL;
1907 int ret;
1908
1909 if (tracing_is_disabled())
1910 return -ENODEV;
1911
1912 /* Make sure the system still exists */
1913 mutex_lock(&event_mutex);
1914 mutex_lock(&trace_types_lock);
1915 list_for_each_entry(iter_tr, &ftrace_trace_arrays, list) {
1916 list_for_each_entry(iter_dir, &iter_tr->systems, list) {
1917 if (iter_dir == inode->i_private) {
1918 /* Don't open systems with no events */
1919 tr = iter_tr;
1920 dir = iter_dir;
1921 if (dir->nr_events) {
1922 __get_system_dir(dir);
1923 system = dir->subsystem;
1924 }
1925 goto exit_loop;
1926 }
1927 }
1928 }
1929 exit_loop:
1930 mutex_unlock(&trace_types_lock);
1931 mutex_unlock(&event_mutex);
1932
1933 if (!system)
1934 return -ENODEV;
1935
1936 /* Still need to increment the ref count of the system */
1937 if (trace_array_get(tr) < 0) {
1938 put_system(dir);
1939 return -ENODEV;
1940 }
1941
1942 ret = tracing_open_generic(inode, filp);
1943 if (ret < 0) {
1944 trace_array_put(tr);
1945 put_system(dir);
1946 }
1947
1948 return ret;
1949 }
1950
system_tr_open(struct inode * inode,struct file * filp)1951 static int system_tr_open(struct inode *inode, struct file *filp)
1952 {
1953 struct trace_subsystem_dir *dir;
1954 struct trace_array *tr = inode->i_private;
1955 int ret;
1956
1957 /* Make a temporary dir that has no system but points to tr */
1958 dir = kzalloc(sizeof(*dir), GFP_KERNEL);
1959 if (!dir)
1960 return -ENOMEM;
1961
1962 ret = tracing_open_generic_tr(inode, filp);
1963 if (ret < 0) {
1964 kfree(dir);
1965 return ret;
1966 }
1967 dir->tr = tr;
1968 filp->private_data = dir;
1969
1970 return 0;
1971 }
1972
subsystem_release(struct inode * inode,struct file * file)1973 static int subsystem_release(struct inode *inode, struct file *file)
1974 {
1975 struct trace_subsystem_dir *dir = file->private_data;
1976
1977 trace_array_put(dir->tr);
1978
1979 /*
1980 * If dir->subsystem is NULL, then this is a temporary
1981 * descriptor that was made for a trace_array to enable
1982 * all subsystems.
1983 */
1984 if (dir->subsystem)
1985 put_system(dir);
1986 else
1987 kfree(dir);
1988
1989 return 0;
1990 }
1991
1992 static ssize_t
subsystem_filter_read(struct file * filp,char __user * ubuf,size_t cnt,loff_t * ppos)1993 subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
1994 loff_t *ppos)
1995 {
1996 struct trace_subsystem_dir *dir = filp->private_data;
1997 struct event_subsystem *system = dir->subsystem;
1998 struct trace_seq *s;
1999 int r;
2000
2001 if (*ppos)
2002 return 0;
2003
2004 s = kmalloc(sizeof(*s), GFP_KERNEL);
2005 if (!s)
2006 return -ENOMEM;
2007
2008 trace_seq_init(s);
2009
2010 print_subsystem_event_filter(system, s);
2011 r = simple_read_from_buffer(ubuf, cnt, ppos,
2012 s->buffer, trace_seq_used(s));
2013
2014 kfree(s);
2015
2016 return r;
2017 }
2018
2019 static ssize_t
subsystem_filter_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)2020 subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
2021 loff_t *ppos)
2022 {
2023 struct trace_subsystem_dir *dir = filp->private_data;
2024 char *buf;
2025 int err;
2026
2027 if (cnt >= PAGE_SIZE)
2028 return -EINVAL;
2029
2030 buf = memdup_user_nul(ubuf, cnt);
2031 if (IS_ERR(buf))
2032 return PTR_ERR(buf);
2033
2034 err = apply_subsystem_event_filter(dir, buf);
2035 kfree(buf);
2036 if (err < 0)
2037 return err;
2038
2039 *ppos += cnt;
2040
2041 return cnt;
2042 }
2043
2044 static ssize_t
show_header_page_file(struct file * filp,char __user * ubuf,size_t cnt,loff_t * ppos)2045 show_header_page_file(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
2046 {
2047 struct trace_array *tr = filp->private_data;
2048 struct trace_seq *s;
2049 int r;
2050
2051 if (*ppos)
2052 return 0;
2053
2054 s = kmalloc(sizeof(*s), GFP_KERNEL);
2055 if (!s)
2056 return -ENOMEM;
2057
2058 trace_seq_init(s);
2059
2060 ring_buffer_print_page_header(tr->array_buffer.buffer, s);
2061 r = simple_read_from_buffer(ubuf, cnt, ppos,
2062 s->buffer, trace_seq_used(s));
2063
2064 kfree(s);
2065
2066 return r;
2067 }
2068
2069 static ssize_t
show_header_event_file(struct file * filp,char __user * ubuf,size_t cnt,loff_t * ppos)2070 show_header_event_file(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
2071 {
2072 struct trace_seq *s;
2073 int r;
2074
2075 if (*ppos)
2076 return 0;
2077
2078 s = kmalloc(sizeof(*s), GFP_KERNEL);
2079 if (!s)
2080 return -ENOMEM;
2081
2082 trace_seq_init(s);
2083
2084 ring_buffer_print_entry_header(s);
2085 r = simple_read_from_buffer(ubuf, cnt, ppos,
2086 s->buffer, trace_seq_used(s));
2087
2088 kfree(s);
2089
2090 return r;
2091 }
2092
ignore_task_cpu(void * data)2093 static void ignore_task_cpu(void *data)
2094 {
2095 struct trace_array *tr = data;
2096 struct trace_pid_list *pid_list;
2097 struct trace_pid_list *no_pid_list;
2098
2099 /*
2100 * This function is called by on_each_cpu() while the
2101 * event_mutex is held.
2102 */
2103 pid_list = rcu_dereference_protected(tr->filtered_pids,
2104 mutex_is_locked(&event_mutex));
2105 no_pid_list = rcu_dereference_protected(tr->filtered_no_pids,
2106 mutex_is_locked(&event_mutex));
2107
2108 this_cpu_write(tr->array_buffer.data->ignore_pid,
2109 trace_ignore_this_task(pid_list, no_pid_list, current));
2110 }
2111
register_pid_events(struct trace_array * tr)2112 static void register_pid_events(struct trace_array *tr)
2113 {
2114 /*
2115 * Register a probe that is called before all other probes
2116 * to set ignore_pid if next or prev do not match.
2117 * Register a probe this is called after all other probes
2118 * to only keep ignore_pid set if next pid matches.
2119 */
2120 register_trace_prio_sched_switch(event_filter_pid_sched_switch_probe_pre,
2121 tr, INT_MAX);
2122 register_trace_prio_sched_switch(event_filter_pid_sched_switch_probe_post,
2123 tr, 0);
2124
2125 register_trace_prio_sched_wakeup(event_filter_pid_sched_wakeup_probe_pre,
2126 tr, INT_MAX);
2127 register_trace_prio_sched_wakeup(event_filter_pid_sched_wakeup_probe_post,
2128 tr, 0);
2129
2130 register_trace_prio_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_pre,
2131 tr, INT_MAX);
2132 register_trace_prio_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_post,
2133 tr, 0);
2134
2135 register_trace_prio_sched_waking(event_filter_pid_sched_wakeup_probe_pre,
2136 tr, INT_MAX);
2137 register_trace_prio_sched_waking(event_filter_pid_sched_wakeup_probe_post,
2138 tr, 0);
2139 }
2140
2141 static ssize_t
event_pid_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos,int type)2142 event_pid_write(struct file *filp, const char __user *ubuf,
2143 size_t cnt, loff_t *ppos, int type)
2144 {
2145 struct seq_file *m = filp->private_data;
2146 struct trace_array *tr = m->private;
2147 struct trace_pid_list *filtered_pids = NULL;
2148 struct trace_pid_list *other_pids = NULL;
2149 struct trace_pid_list *pid_list;
2150 struct trace_event_file *file;
2151 ssize_t ret;
2152
2153 if (!cnt)
2154 return 0;
2155
2156 ret = tracing_update_buffers(tr);
2157 if (ret < 0)
2158 return ret;
2159
2160 mutex_lock(&event_mutex);
2161
2162 if (type == TRACE_PIDS) {
2163 filtered_pids = rcu_dereference_protected(tr->filtered_pids,
2164 lockdep_is_held(&event_mutex));
2165 other_pids = rcu_dereference_protected(tr->filtered_no_pids,
2166 lockdep_is_held(&event_mutex));
2167 } else {
2168 filtered_pids = rcu_dereference_protected(tr->filtered_no_pids,
2169 lockdep_is_held(&event_mutex));
2170 other_pids = rcu_dereference_protected(tr->filtered_pids,
2171 lockdep_is_held(&event_mutex));
2172 }
2173
2174 ret = trace_pid_write(filtered_pids, &pid_list, ubuf, cnt);
2175 if (ret < 0)
2176 goto out;
2177
2178 if (type == TRACE_PIDS)
2179 rcu_assign_pointer(tr->filtered_pids, pid_list);
2180 else
2181 rcu_assign_pointer(tr->filtered_no_pids, pid_list);
2182
2183 list_for_each_entry(file, &tr->events, list) {
2184 set_bit(EVENT_FILE_FL_PID_FILTER_BIT, &file->flags);
2185 }
2186
2187 if (filtered_pids) {
2188 tracepoint_synchronize_unregister();
2189 trace_pid_list_free(filtered_pids);
2190 } else if (pid_list && !other_pids) {
2191 register_pid_events(tr);
2192 }
2193
2194 /*
2195 * Ignoring of pids is done at task switch. But we have to
2196 * check for those tasks that are currently running.
2197 * Always do this in case a pid was appended or removed.
2198 */
2199 on_each_cpu(ignore_task_cpu, tr, 1);
2200
2201 out:
2202 mutex_unlock(&event_mutex);
2203
2204 if (ret > 0)
2205 *ppos += ret;
2206
2207 return ret;
2208 }
2209
2210 static ssize_t
ftrace_event_pid_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)2211 ftrace_event_pid_write(struct file *filp, const char __user *ubuf,
2212 size_t cnt, loff_t *ppos)
2213 {
2214 return event_pid_write(filp, ubuf, cnt, ppos, TRACE_PIDS);
2215 }
2216
2217 static ssize_t
ftrace_event_npid_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)2218 ftrace_event_npid_write(struct file *filp, const char __user *ubuf,
2219 size_t cnt, loff_t *ppos)
2220 {
2221 return event_pid_write(filp, ubuf, cnt, ppos, TRACE_NO_PIDS);
2222 }
2223
2224 static int ftrace_event_avail_open(struct inode *inode, struct file *file);
2225 static int ftrace_event_set_open(struct inode *inode, struct file *file);
2226 static int ftrace_event_set_pid_open(struct inode *inode, struct file *file);
2227 static int ftrace_event_set_npid_open(struct inode *inode, struct file *file);
2228 static int ftrace_event_release(struct inode *inode, struct file *file);
2229
2230 static const struct seq_operations show_event_seq_ops = {
2231 .start = t_start,
2232 .next = t_next,
2233 .show = t_show,
2234 .stop = t_stop,
2235 };
2236
2237 static const struct seq_operations show_set_event_seq_ops = {
2238 .start = s_start,
2239 .next = s_next,
2240 .show = t_show,
2241 .stop = t_stop,
2242 };
2243
2244 static const struct seq_operations show_set_pid_seq_ops = {
2245 .start = p_start,
2246 .next = p_next,
2247 .show = trace_pid_show,
2248 .stop = p_stop,
2249 };
2250
2251 static const struct seq_operations show_set_no_pid_seq_ops = {
2252 .start = np_start,
2253 .next = np_next,
2254 .show = trace_pid_show,
2255 .stop = p_stop,
2256 };
2257
2258 static const struct file_operations ftrace_avail_fops = {
2259 .open = ftrace_event_avail_open,
2260 .read = seq_read,
2261 .llseek = seq_lseek,
2262 .release = seq_release,
2263 };
2264
2265 static const struct file_operations ftrace_set_event_fops = {
2266 .open = ftrace_event_set_open,
2267 .read = seq_read,
2268 .write = ftrace_event_write,
2269 .llseek = seq_lseek,
2270 .release = ftrace_event_release,
2271 };
2272
2273 static const struct file_operations ftrace_set_event_pid_fops = {
2274 .open = ftrace_event_set_pid_open,
2275 .read = seq_read,
2276 .write = ftrace_event_pid_write,
2277 .llseek = seq_lseek,
2278 .release = ftrace_event_release,
2279 };
2280
2281 static const struct file_operations ftrace_set_event_notrace_pid_fops = {
2282 .open = ftrace_event_set_npid_open,
2283 .read = seq_read,
2284 .write = ftrace_event_npid_write,
2285 .llseek = seq_lseek,
2286 .release = ftrace_event_release,
2287 };
2288
2289 static const struct file_operations ftrace_enable_fops = {
2290 .open = tracing_open_file_tr,
2291 .read = event_enable_read,
2292 .write = event_enable_write,
2293 .release = tracing_release_file_tr,
2294 .llseek = default_llseek,
2295 };
2296
2297 static const struct file_operations ftrace_event_format_fops = {
2298 .open = trace_format_open,
2299 .read = seq_read,
2300 .llseek = seq_lseek,
2301 .release = seq_release,
2302 };
2303
2304 #ifdef CONFIG_PERF_EVENTS
2305 static const struct file_operations ftrace_event_id_fops = {
2306 .read = event_id_read,
2307 .llseek = default_llseek,
2308 };
2309 #endif
2310
2311 static const struct file_operations ftrace_event_filter_fops = {
2312 .open = tracing_open_file_tr,
2313 .read = event_filter_read,
2314 .write = event_filter_write,
2315 .release = tracing_release_file_tr,
2316 .llseek = default_llseek,
2317 };
2318
2319 static const struct file_operations ftrace_subsystem_filter_fops = {
2320 .open = subsystem_open,
2321 .read = subsystem_filter_read,
2322 .write = subsystem_filter_write,
2323 .llseek = default_llseek,
2324 .release = subsystem_release,
2325 };
2326
2327 static const struct file_operations ftrace_system_enable_fops = {
2328 .open = subsystem_open,
2329 .read = system_enable_read,
2330 .write = system_enable_write,
2331 .llseek = default_llseek,
2332 .release = subsystem_release,
2333 };
2334
2335 static const struct file_operations ftrace_tr_enable_fops = {
2336 .open = system_tr_open,
2337 .read = system_enable_read,
2338 .write = system_enable_write,
2339 .llseek = default_llseek,
2340 .release = subsystem_release,
2341 };
2342
2343 static const struct file_operations ftrace_show_header_page_fops = {
2344 .open = tracing_open_generic_tr,
2345 .read = show_header_page_file,
2346 .llseek = default_llseek,
2347 .release = tracing_release_generic_tr,
2348 };
2349
2350 static const struct file_operations ftrace_show_header_event_fops = {
2351 .open = tracing_open_generic_tr,
2352 .read = show_header_event_file,
2353 .llseek = default_llseek,
2354 .release = tracing_release_generic_tr,
2355 };
2356
2357 static int
ftrace_event_open(struct inode * inode,struct file * file,const struct seq_operations * seq_ops)2358 ftrace_event_open(struct inode *inode, struct file *file,
2359 const struct seq_operations *seq_ops)
2360 {
2361 struct seq_file *m;
2362 int ret;
2363
2364 ret = security_locked_down(LOCKDOWN_TRACEFS);
2365 if (ret)
2366 return ret;
2367
2368 ret = seq_open(file, seq_ops);
2369 if (ret < 0)
2370 return ret;
2371 m = file->private_data;
2372 /* copy tr over to seq ops */
2373 m->private = inode->i_private;
2374
2375 return ret;
2376 }
2377
ftrace_event_release(struct inode * inode,struct file * file)2378 static int ftrace_event_release(struct inode *inode, struct file *file)
2379 {
2380 struct trace_array *tr = inode->i_private;
2381
2382 trace_array_put(tr);
2383
2384 return seq_release(inode, file);
2385 }
2386
2387 static int
ftrace_event_avail_open(struct inode * inode,struct file * file)2388 ftrace_event_avail_open(struct inode *inode, struct file *file)
2389 {
2390 const struct seq_operations *seq_ops = &show_event_seq_ops;
2391
2392 /* Checks for tracefs lockdown */
2393 return ftrace_event_open(inode, file, seq_ops);
2394 }
2395
2396 static int
ftrace_event_set_open(struct inode * inode,struct file * file)2397 ftrace_event_set_open(struct inode *inode, struct file *file)
2398 {
2399 const struct seq_operations *seq_ops = &show_set_event_seq_ops;
2400 struct trace_array *tr = inode->i_private;
2401 int ret;
2402
2403 ret = tracing_check_open_get_tr(tr);
2404 if (ret)
2405 return ret;
2406
2407 if ((file->f_mode & FMODE_WRITE) &&
2408 (file->f_flags & O_TRUNC))
2409 ftrace_clear_events(tr);
2410
2411 ret = ftrace_event_open(inode, file, seq_ops);
2412 if (ret < 0)
2413 trace_array_put(tr);
2414 return ret;
2415 }
2416
2417 static int
ftrace_event_set_pid_open(struct inode * inode,struct file * file)2418 ftrace_event_set_pid_open(struct inode *inode, struct file *file)
2419 {
2420 const struct seq_operations *seq_ops = &show_set_pid_seq_ops;
2421 struct trace_array *tr = inode->i_private;
2422 int ret;
2423
2424 ret = tracing_check_open_get_tr(tr);
2425 if (ret)
2426 return ret;
2427
2428 if ((file->f_mode & FMODE_WRITE) &&
2429 (file->f_flags & O_TRUNC))
2430 ftrace_clear_event_pids(tr, TRACE_PIDS);
2431
2432 ret = ftrace_event_open(inode, file, seq_ops);
2433 if (ret < 0)
2434 trace_array_put(tr);
2435 return ret;
2436 }
2437
2438 static int
ftrace_event_set_npid_open(struct inode * inode,struct file * file)2439 ftrace_event_set_npid_open(struct inode *inode, struct file *file)
2440 {
2441 const struct seq_operations *seq_ops = &show_set_no_pid_seq_ops;
2442 struct trace_array *tr = inode->i_private;
2443 int ret;
2444
2445 ret = tracing_check_open_get_tr(tr);
2446 if (ret)
2447 return ret;
2448
2449 if ((file->f_mode & FMODE_WRITE) &&
2450 (file->f_flags & O_TRUNC))
2451 ftrace_clear_event_pids(tr, TRACE_NO_PIDS);
2452
2453 ret = ftrace_event_open(inode, file, seq_ops);
2454 if (ret < 0)
2455 trace_array_put(tr);
2456 return ret;
2457 }
2458
2459 static struct event_subsystem *
create_new_subsystem(const char * name)2460 create_new_subsystem(const char *name)
2461 {
2462 struct event_subsystem *system;
2463
2464 /* need to create new entry */
2465 system = kmalloc(sizeof(*system), GFP_KERNEL);
2466 if (!system)
2467 return NULL;
2468
2469 system->ref_count = 1;
2470
2471 /* Only allocate if dynamic (kprobes and modules) */
2472 system->name = kstrdup_const(name, GFP_KERNEL);
2473 if (!system->name)
2474 goto out_free;
2475
2476 system->filter = kzalloc(sizeof(struct event_filter), GFP_KERNEL);
2477 if (!system->filter)
2478 goto out_free;
2479
2480 list_add(&system->list, &event_subsystems);
2481
2482 return system;
2483
2484 out_free:
2485 kfree_const(system->name);
2486 kfree(system);
2487 return NULL;
2488 }
2489
system_callback(const char * name,umode_t * mode,void ** data,const struct file_operations ** fops)2490 static int system_callback(const char *name, umode_t *mode, void **data,
2491 const struct file_operations **fops)
2492 {
2493 if (strcmp(name, "filter") == 0)
2494 *fops = &ftrace_subsystem_filter_fops;
2495
2496 else if (strcmp(name, "enable") == 0)
2497 *fops = &ftrace_system_enable_fops;
2498
2499 else
2500 return 0;
2501
2502 *mode = TRACE_MODE_WRITE;
2503 return 1;
2504 }
2505
2506 static struct eventfs_inode *
event_subsystem_dir(struct trace_array * tr,const char * name,struct trace_event_file * file,struct eventfs_inode * parent)2507 event_subsystem_dir(struct trace_array *tr, const char *name,
2508 struct trace_event_file *file, struct eventfs_inode *parent)
2509 {
2510 struct event_subsystem *system, *iter;
2511 struct trace_subsystem_dir *dir;
2512 struct eventfs_inode *ei;
2513 int nr_entries;
2514 static struct eventfs_entry system_entries[] = {
2515 {
2516 .name = "filter",
2517 .callback = system_callback,
2518 },
2519 {
2520 .name = "enable",
2521 .callback = system_callback,
2522 }
2523 };
2524
2525 /* First see if we did not already create this dir */
2526 list_for_each_entry(dir, &tr->systems, list) {
2527 system = dir->subsystem;
2528 if (strcmp(system->name, name) == 0) {
2529 dir->nr_events++;
2530 file->system = dir;
2531 return dir->ei;
2532 }
2533 }
2534
2535 /* Now see if the system itself exists. */
2536 system = NULL;
2537 list_for_each_entry(iter, &event_subsystems, list) {
2538 if (strcmp(iter->name, name) == 0) {
2539 system = iter;
2540 break;
2541 }
2542 }
2543
2544 dir = kmalloc(sizeof(*dir), GFP_KERNEL);
2545 if (!dir)
2546 goto out_fail;
2547
2548 if (!system) {
2549 system = create_new_subsystem(name);
2550 if (!system)
2551 goto out_free;
2552 } else
2553 __get_system(system);
2554
2555 /* ftrace only has directories no files */
2556 if (strcmp(name, "ftrace") == 0)
2557 nr_entries = 0;
2558 else
2559 nr_entries = ARRAY_SIZE(system_entries);
2560
2561 ei = eventfs_create_dir(name, parent, system_entries, nr_entries, dir);
2562 if (IS_ERR(ei)) {
2563 pr_warn("Failed to create system directory %s\n", name);
2564 __put_system(system);
2565 goto out_free;
2566 }
2567
2568 dir->ei = ei;
2569 dir->tr = tr;
2570 dir->ref_count = 1;
2571 dir->nr_events = 1;
2572 dir->subsystem = system;
2573 file->system = dir;
2574
2575 list_add(&dir->list, &tr->systems);
2576
2577 return dir->ei;
2578
2579 out_free:
2580 kfree(dir);
2581 out_fail:
2582 /* Only print this message if failed on memory allocation */
2583 if (!dir || !system)
2584 pr_warn("No memory to create event subsystem %s\n", name);
2585 return NULL;
2586 }
2587
2588 static int
event_define_fields(struct trace_event_call * call)2589 event_define_fields(struct trace_event_call *call)
2590 {
2591 struct list_head *head;
2592 int ret = 0;
2593
2594 /*
2595 * Other events may have the same class. Only update
2596 * the fields if they are not already defined.
2597 */
2598 head = trace_get_fields(call);
2599 if (list_empty(head)) {
2600 struct trace_event_fields *field = call->class->fields_array;
2601 unsigned int offset = sizeof(struct trace_entry);
2602
2603 for (; field->type; field++) {
2604 if (field->type == TRACE_FUNCTION_TYPE) {
2605 field->define_fields(call);
2606 break;
2607 }
2608
2609 offset = ALIGN(offset, field->align);
2610 ret = trace_define_field_ext(call, field->type, field->name,
2611 offset, field->size,
2612 field->is_signed, field->filter_type,
2613 field->len, field->needs_test);
2614 if (WARN_ON_ONCE(ret)) {
2615 pr_err("error code is %d\n", ret);
2616 break;
2617 }
2618
2619 offset += field->size;
2620 }
2621 }
2622
2623 return ret;
2624 }
2625
event_callback(const char * name,umode_t * mode,void ** data,const struct file_operations ** fops)2626 static int event_callback(const char *name, umode_t *mode, void **data,
2627 const struct file_operations **fops)
2628 {
2629 struct trace_event_file *file = *data;
2630 struct trace_event_call *call = file->event_call;
2631
2632 if (strcmp(name, "format") == 0) {
2633 *mode = TRACE_MODE_READ;
2634 *fops = &ftrace_event_format_fops;
2635 return 1;
2636 }
2637
2638 /*
2639 * Only event directories that can be enabled should have
2640 * triggers or filters, with the exception of the "print"
2641 * event that can have a "trigger" file.
2642 */
2643 if (!(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)) {
2644 if (call->class->reg && strcmp(name, "enable") == 0) {
2645 *mode = TRACE_MODE_WRITE;
2646 *fops = &ftrace_enable_fops;
2647 return 1;
2648 }
2649
2650 if (strcmp(name, "filter") == 0) {
2651 *mode = TRACE_MODE_WRITE;
2652 *fops = &ftrace_event_filter_fops;
2653 return 1;
2654 }
2655 }
2656
2657 if (!(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE) ||
2658 strcmp(trace_event_name(call), "print") == 0) {
2659 if (strcmp(name, "trigger") == 0) {
2660 *mode = TRACE_MODE_WRITE;
2661 *fops = &event_trigger_fops;
2662 return 1;
2663 }
2664 }
2665
2666 #ifdef CONFIG_PERF_EVENTS
2667 if (call->event.type && call->class->reg &&
2668 strcmp(name, "id") == 0) {
2669 *mode = TRACE_MODE_READ;
2670 *data = (void *)(long)call->event.type;
2671 *fops = &ftrace_event_id_fops;
2672 return 1;
2673 }
2674 #endif
2675
2676 #ifdef CONFIG_HIST_TRIGGERS
2677 if (strcmp(name, "hist") == 0) {
2678 *mode = TRACE_MODE_READ;
2679 *fops = &event_hist_fops;
2680 return 1;
2681 }
2682 #endif
2683 #ifdef CONFIG_HIST_TRIGGERS_DEBUG
2684 if (strcmp(name, "hist_debug") == 0) {
2685 *mode = TRACE_MODE_READ;
2686 *fops = &event_hist_debug_fops;
2687 return 1;
2688 }
2689 #endif
2690 #ifdef CONFIG_TRACE_EVENT_INJECT
2691 if (call->event.type && call->class->reg &&
2692 strcmp(name, "inject") == 0) {
2693 *mode = 0200;
2694 *fops = &event_inject_fops;
2695 return 1;
2696 }
2697 #endif
2698 return 0;
2699 }
2700
2701 /* The file is incremented on creation and freeing the enable file decrements it */
event_release(const char * name,void * data)2702 static void event_release(const char *name, void *data)
2703 {
2704 struct trace_event_file *file = data;
2705
2706 event_file_put(file);
2707 }
2708
2709 static int
event_create_dir(struct eventfs_inode * parent,struct trace_event_file * file)2710 event_create_dir(struct eventfs_inode *parent, struct trace_event_file *file)
2711 {
2712 struct trace_event_call *call = file->event_call;
2713 struct trace_array *tr = file->tr;
2714 struct eventfs_inode *e_events;
2715 struct eventfs_inode *ei;
2716 const char *name;
2717 int nr_entries;
2718 int ret;
2719 static struct eventfs_entry event_entries[] = {
2720 {
2721 .name = "enable",
2722 .callback = event_callback,
2723 .release = event_release,
2724 },
2725 {
2726 .name = "filter",
2727 .callback = event_callback,
2728 },
2729 {
2730 .name = "trigger",
2731 .callback = event_callback,
2732 },
2733 {
2734 .name = "format",
2735 .callback = event_callback,
2736 },
2737 #ifdef CONFIG_PERF_EVENTS
2738 {
2739 .name = "id",
2740 .callback = event_callback,
2741 },
2742 #endif
2743 #ifdef CONFIG_HIST_TRIGGERS
2744 {
2745 .name = "hist",
2746 .callback = event_callback,
2747 },
2748 #endif
2749 #ifdef CONFIG_HIST_TRIGGERS_DEBUG
2750 {
2751 .name = "hist_debug",
2752 .callback = event_callback,
2753 },
2754 #endif
2755 #ifdef CONFIG_TRACE_EVENT_INJECT
2756 {
2757 .name = "inject",
2758 .callback = event_callback,
2759 },
2760 #endif
2761 };
2762
2763 /*
2764 * If the trace point header did not define TRACE_SYSTEM
2765 * then the system would be called "TRACE_SYSTEM". This should
2766 * never happen.
2767 */
2768 if (WARN_ON_ONCE(strcmp(call->class->system, TRACE_SYSTEM) == 0))
2769 return -ENODEV;
2770
2771 e_events = event_subsystem_dir(tr, call->class->system, file, parent);
2772 if (!e_events)
2773 return -ENOMEM;
2774
2775 nr_entries = ARRAY_SIZE(event_entries);
2776
2777 name = trace_event_name(call);
2778 ei = eventfs_create_dir(name, e_events, event_entries, nr_entries, file);
2779 if (IS_ERR(ei)) {
2780 pr_warn("Could not create tracefs '%s' directory\n", name);
2781 return -1;
2782 }
2783
2784 file->ei = ei;
2785
2786 ret = event_define_fields(call);
2787 if (ret < 0) {
2788 pr_warn("Could not initialize trace point events/%s\n", name);
2789 return ret;
2790 }
2791
2792 /* Gets decremented on freeing of the "enable" file */
2793 event_file_get(file);
2794
2795 return 0;
2796 }
2797
remove_event_from_tracers(struct trace_event_call * call)2798 static void remove_event_from_tracers(struct trace_event_call *call)
2799 {
2800 struct trace_event_file *file;
2801 struct trace_array *tr;
2802
2803 do_for_each_event_file_safe(tr, file) {
2804 if (file->event_call != call)
2805 continue;
2806
2807 remove_event_file_dir(file);
2808 /*
2809 * The do_for_each_event_file_safe() is
2810 * a double loop. After finding the call for this
2811 * trace_array, we use break to jump to the next
2812 * trace_array.
2813 */
2814 break;
2815 } while_for_each_event_file();
2816 }
2817
event_remove(struct trace_event_call * call)2818 static void event_remove(struct trace_event_call *call)
2819 {
2820 struct trace_array *tr;
2821 struct trace_event_file *file;
2822
2823 do_for_each_event_file(tr, file) {
2824 if (file->event_call != call)
2825 continue;
2826
2827 if (file->flags & EVENT_FILE_FL_WAS_ENABLED)
2828 tr->clear_trace = true;
2829
2830 ftrace_event_enable_disable(file, 0);
2831 /*
2832 * The do_for_each_event_file() is
2833 * a double loop. After finding the call for this
2834 * trace_array, we use break to jump to the next
2835 * trace_array.
2836 */
2837 break;
2838 } while_for_each_event_file();
2839
2840 if (call->event.funcs)
2841 __unregister_trace_event(&call->event);
2842 remove_event_from_tracers(call);
2843 list_del(&call->list);
2844 }
2845
event_init(struct trace_event_call * call)2846 static int event_init(struct trace_event_call *call)
2847 {
2848 int ret = 0;
2849 const char *name;
2850
2851 name = trace_event_name(call);
2852 if (WARN_ON(!name))
2853 return -EINVAL;
2854
2855 if (call->class->raw_init) {
2856 ret = call->class->raw_init(call);
2857 if (ret < 0 && ret != -ENOSYS)
2858 pr_warn("Could not initialize trace events/%s\n", name);
2859 }
2860
2861 return ret;
2862 }
2863
2864 static int
__register_event(struct trace_event_call * call,struct module * mod)2865 __register_event(struct trace_event_call *call, struct module *mod)
2866 {
2867 int ret;
2868
2869 ret = event_init(call);
2870 if (ret < 0)
2871 return ret;
2872
2873 list_add(&call->list, &ftrace_events);
2874 if (call->flags & TRACE_EVENT_FL_DYNAMIC)
2875 atomic_set(&call->refcnt, 0);
2876 else
2877 call->module = mod;
2878
2879 return 0;
2880 }
2881
eval_replace(char * ptr,struct trace_eval_map * map,int len)2882 static char *eval_replace(char *ptr, struct trace_eval_map *map, int len)
2883 {
2884 int rlen;
2885 int elen;
2886
2887 /* Find the length of the eval value as a string */
2888 elen = snprintf(ptr, 0, "%ld", map->eval_value);
2889 /* Make sure there's enough room to replace the string with the value */
2890 if (len < elen)
2891 return NULL;
2892
2893 snprintf(ptr, elen + 1, "%ld", map->eval_value);
2894
2895 /* Get the rest of the string of ptr */
2896 rlen = strlen(ptr + len);
2897 memmove(ptr + elen, ptr + len, rlen);
2898 /* Make sure we end the new string */
2899 ptr[elen + rlen] = 0;
2900
2901 return ptr + elen;
2902 }
2903
update_event_printk(struct trace_event_call * call,struct trace_eval_map * map)2904 static void update_event_printk(struct trace_event_call *call,
2905 struct trace_eval_map *map)
2906 {
2907 char *ptr;
2908 int quote = 0;
2909 int len = strlen(map->eval_string);
2910
2911 for (ptr = call->print_fmt; *ptr; ptr++) {
2912 if (*ptr == '\\') {
2913 ptr++;
2914 /* paranoid */
2915 if (!*ptr)
2916 break;
2917 continue;
2918 }
2919 if (*ptr == '"') {
2920 quote ^= 1;
2921 continue;
2922 }
2923 if (quote)
2924 continue;
2925 if (isdigit(*ptr)) {
2926 /* skip numbers */
2927 do {
2928 ptr++;
2929 /* Check for alpha chars like ULL */
2930 } while (isalnum(*ptr));
2931 if (!*ptr)
2932 break;
2933 /*
2934 * A number must have some kind of delimiter after
2935 * it, and we can ignore that too.
2936 */
2937 continue;
2938 }
2939 if (isalpha(*ptr) || *ptr == '_') {
2940 if (strncmp(map->eval_string, ptr, len) == 0 &&
2941 !isalnum(ptr[len]) && ptr[len] != '_') {
2942 ptr = eval_replace(ptr, map, len);
2943 /* enum/sizeof string smaller than value */
2944 if (WARN_ON_ONCE(!ptr))
2945 return;
2946 /*
2947 * No need to decrement here, as eval_replace()
2948 * returns the pointer to the character passed
2949 * the eval, and two evals can not be placed
2950 * back to back without something in between.
2951 * We can skip that something in between.
2952 */
2953 continue;
2954 }
2955 skip_more:
2956 do {
2957 ptr++;
2958 } while (isalnum(*ptr) || *ptr == '_');
2959 if (!*ptr)
2960 break;
2961 /*
2962 * If what comes after this variable is a '.' or
2963 * '->' then we can continue to ignore that string.
2964 */
2965 if (*ptr == '.' || (ptr[0] == '-' && ptr[1] == '>')) {
2966 ptr += *ptr == '.' ? 1 : 2;
2967 if (!*ptr)
2968 break;
2969 goto skip_more;
2970 }
2971 /*
2972 * Once again, we can skip the delimiter that came
2973 * after the string.
2974 */
2975 continue;
2976 }
2977 }
2978 }
2979
add_str_to_module(struct module * module,char * str)2980 static void add_str_to_module(struct module *module, char *str)
2981 {
2982 struct module_string *modstr;
2983
2984 modstr = kmalloc(sizeof(*modstr), GFP_KERNEL);
2985
2986 /*
2987 * If we failed to allocate memory here, then we'll just
2988 * let the str memory leak when the module is removed.
2989 * If this fails to allocate, there's worse problems than
2990 * a leaked string on module removal.
2991 */
2992 if (WARN_ON_ONCE(!modstr))
2993 return;
2994
2995 modstr->module = module;
2996 modstr->str = str;
2997
2998 list_add(&modstr->next, &module_strings);
2999 }
3000
update_event_fields(struct trace_event_call * call,struct trace_eval_map * map)3001 static void update_event_fields(struct trace_event_call *call,
3002 struct trace_eval_map *map)
3003 {
3004 struct ftrace_event_field *field;
3005 struct list_head *head;
3006 char *ptr;
3007 char *str;
3008 int len = strlen(map->eval_string);
3009
3010 /* Dynamic events should never have field maps */
3011 if (WARN_ON_ONCE(call->flags & TRACE_EVENT_FL_DYNAMIC))
3012 return;
3013
3014 head = trace_get_fields(call);
3015 list_for_each_entry(field, head, link) {
3016 ptr = strchr(field->type, '[');
3017 if (!ptr)
3018 continue;
3019 ptr++;
3020
3021 if (!isalpha(*ptr) && *ptr != '_')
3022 continue;
3023
3024 if (strncmp(map->eval_string, ptr, len) != 0)
3025 continue;
3026
3027 str = kstrdup(field->type, GFP_KERNEL);
3028 if (WARN_ON_ONCE(!str))
3029 return;
3030 ptr = str + (ptr - field->type);
3031 ptr = eval_replace(ptr, map, len);
3032 /* enum/sizeof string smaller than value */
3033 if (WARN_ON_ONCE(!ptr)) {
3034 kfree(str);
3035 continue;
3036 }
3037
3038 /*
3039 * If the event is part of a module, then we need to free the string
3040 * when the module is removed. Otherwise, it will stay allocated
3041 * until a reboot.
3042 */
3043 if (call->module)
3044 add_str_to_module(call->module, str);
3045
3046 field->type = str;
3047 }
3048 }
3049
trace_event_eval_update(struct trace_eval_map ** map,int len)3050 void trace_event_eval_update(struct trace_eval_map **map, int len)
3051 {
3052 struct trace_event_call *call, *p;
3053 const char *last_system = NULL;
3054 bool first = false;
3055 int last_i;
3056 int i;
3057
3058 down_write(&trace_event_sem);
3059 list_for_each_entry_safe(call, p, &ftrace_events, list) {
3060 /* events are usually grouped together with systems */
3061 if (!last_system || call->class->system != last_system) {
3062 first = true;
3063 last_i = 0;
3064 last_system = call->class->system;
3065 }
3066
3067 /*
3068 * Since calls are grouped by systems, the likelihood that the
3069 * next call in the iteration belongs to the same system as the
3070 * previous call is high. As an optimization, we skip searching
3071 * for a map[] that matches the call's system if the last call
3072 * was from the same system. That's what last_i is for. If the
3073 * call has the same system as the previous call, then last_i
3074 * will be the index of the first map[] that has a matching
3075 * system.
3076 */
3077 for (i = last_i; i < len; i++) {
3078 if (call->class->system == map[i]->system) {
3079 /* Save the first system if need be */
3080 if (first) {
3081 last_i = i;
3082 first = false;
3083 }
3084 update_event_printk(call, map[i]);
3085 update_event_fields(call, map[i]);
3086 }
3087 }
3088 cond_resched();
3089 }
3090 up_write(&trace_event_sem);
3091 }
3092
event_in_systems(struct trace_event_call * call,const char * systems)3093 static bool event_in_systems(struct trace_event_call *call,
3094 const char *systems)
3095 {
3096 const char *system;
3097 const char *p;
3098
3099 if (!systems)
3100 return true;
3101
3102 system = call->class->system;
3103 p = strstr(systems, system);
3104 if (!p)
3105 return false;
3106
3107 if (p != systems && !isspace(*(p - 1)) && *(p - 1) != ',')
3108 return false;
3109
3110 p += strlen(system);
3111 return !*p || isspace(*p) || *p == ',';
3112 }
3113
3114 static struct trace_event_file *
trace_create_new_event(struct trace_event_call * call,struct trace_array * tr)3115 trace_create_new_event(struct trace_event_call *call,
3116 struct trace_array *tr)
3117 {
3118 struct trace_pid_list *no_pid_list;
3119 struct trace_pid_list *pid_list;
3120 struct trace_event_file *file;
3121 unsigned int first;
3122
3123 if (!event_in_systems(call, tr->system_names))
3124 return NULL;
3125
3126 file = kmem_cache_alloc(file_cachep, GFP_TRACE);
3127 if (!file)
3128 return ERR_PTR(-ENOMEM);
3129
3130 pid_list = rcu_dereference_protected(tr->filtered_pids,
3131 lockdep_is_held(&event_mutex));
3132 no_pid_list = rcu_dereference_protected(tr->filtered_no_pids,
3133 lockdep_is_held(&event_mutex));
3134
3135 if (!trace_pid_list_first(pid_list, &first) ||
3136 !trace_pid_list_first(no_pid_list, &first))
3137 file->flags |= EVENT_FILE_FL_PID_FILTER;
3138
3139 file->event_call = call;
3140 file->tr = tr;
3141 atomic_set(&file->sm_ref, 0);
3142 atomic_set(&file->tm_ref, 0);
3143 INIT_LIST_HEAD(&file->triggers);
3144 list_add(&file->list, &tr->events);
3145 refcount_set(&file->ref, 1);
3146
3147 return file;
3148 }
3149
3150 #define MAX_BOOT_TRIGGERS 32
3151
3152 static struct boot_triggers {
3153 const char *event;
3154 char *trigger;
3155 } bootup_triggers[MAX_BOOT_TRIGGERS];
3156
3157 static char bootup_trigger_buf[COMMAND_LINE_SIZE];
3158 static int nr_boot_triggers;
3159
setup_trace_triggers(char * str)3160 static __init int setup_trace_triggers(char *str)
3161 {
3162 char *trigger;
3163 char *buf;
3164 int i;
3165
3166 strscpy(bootup_trigger_buf, str, COMMAND_LINE_SIZE);
3167 trace_set_ring_buffer_expanded(NULL);
3168 disable_tracing_selftest("running event triggers");
3169
3170 buf = bootup_trigger_buf;
3171 for (i = 0; i < MAX_BOOT_TRIGGERS; i++) {
3172 trigger = strsep(&buf, ",");
3173 if (!trigger)
3174 break;
3175 bootup_triggers[i].event = strsep(&trigger, ".");
3176 bootup_triggers[i].trigger = trigger;
3177 if (!bootup_triggers[i].trigger)
3178 break;
3179 }
3180
3181 nr_boot_triggers = i;
3182 return 1;
3183 }
3184 __setup("trace_trigger=", setup_trace_triggers);
3185
3186 /* Add an event to a trace directory */
3187 static int
__trace_add_new_event(struct trace_event_call * call,struct trace_array * tr)3188 __trace_add_new_event(struct trace_event_call *call, struct trace_array *tr)
3189 {
3190 struct trace_event_file *file;
3191
3192 file = trace_create_new_event(call, tr);
3193 /*
3194 * trace_create_new_event() returns ERR_PTR(-ENOMEM) if failed
3195 * allocation, or NULL if the event is not part of the tr->system_names.
3196 * When the event is not part of the tr->system_names, return zero, not
3197 * an error.
3198 */
3199 if (!file)
3200 return 0;
3201
3202 if (IS_ERR(file))
3203 return PTR_ERR(file);
3204
3205 if (eventdir_initialized)
3206 return event_create_dir(tr->event_dir, file);
3207 else
3208 return event_define_fields(call);
3209 }
3210
trace_early_triggers(struct trace_event_file * file,const char * name)3211 static void trace_early_triggers(struct trace_event_file *file, const char *name)
3212 {
3213 int ret;
3214 int i;
3215
3216 for (i = 0; i < nr_boot_triggers; i++) {
3217 if (strcmp(name, bootup_triggers[i].event))
3218 continue;
3219 mutex_lock(&event_mutex);
3220 ret = trigger_process_regex(file, bootup_triggers[i].trigger);
3221 mutex_unlock(&event_mutex);
3222 if (ret)
3223 pr_err("Failed to register trigger '%s' on event %s\n",
3224 bootup_triggers[i].trigger,
3225 bootup_triggers[i].event);
3226 }
3227 }
3228
3229 /*
3230 * Just create a descriptor for early init. A descriptor is required
3231 * for enabling events at boot. We want to enable events before
3232 * the filesystem is initialized.
3233 */
3234 static int
__trace_early_add_new_event(struct trace_event_call * call,struct trace_array * tr)3235 __trace_early_add_new_event(struct trace_event_call *call,
3236 struct trace_array *tr)
3237 {
3238 struct trace_event_file *file;
3239 int ret;
3240
3241 file = trace_create_new_event(call, tr);
3242 /*
3243 * trace_create_new_event() returns ERR_PTR(-ENOMEM) if failed
3244 * allocation, or NULL if the event is not part of the tr->system_names.
3245 * When the event is not part of the tr->system_names, return zero, not
3246 * an error.
3247 */
3248 if (!file)
3249 return 0;
3250
3251 if (IS_ERR(file))
3252 return PTR_ERR(file);
3253
3254 ret = event_define_fields(call);
3255 if (ret)
3256 return ret;
3257
3258 trace_early_triggers(file, trace_event_name(call));
3259
3260 return 0;
3261 }
3262
3263 struct ftrace_module_file_ops;
3264 static void __add_event_to_tracers(struct trace_event_call *call);
3265
3266 /* Add an additional event_call dynamically */
trace_add_event_call(struct trace_event_call * call)3267 int trace_add_event_call(struct trace_event_call *call)
3268 {
3269 int ret;
3270 lockdep_assert_held(&event_mutex);
3271
3272 mutex_lock(&trace_types_lock);
3273
3274 ret = __register_event(call, NULL);
3275 if (ret >= 0)
3276 __add_event_to_tracers(call);
3277
3278 mutex_unlock(&trace_types_lock);
3279 return ret;
3280 }
3281 EXPORT_SYMBOL_GPL(trace_add_event_call);
3282
3283 /*
3284 * Must be called under locking of trace_types_lock, event_mutex and
3285 * trace_event_sem.
3286 */
__trace_remove_event_call(struct trace_event_call * call)3287 static void __trace_remove_event_call(struct trace_event_call *call)
3288 {
3289 event_remove(call);
3290 trace_destroy_fields(call);
3291 }
3292
probe_remove_event_call(struct trace_event_call * call)3293 static int probe_remove_event_call(struct trace_event_call *call)
3294 {
3295 struct trace_array *tr;
3296 struct trace_event_file *file;
3297
3298 #ifdef CONFIG_PERF_EVENTS
3299 if (call->perf_refcount)
3300 return -EBUSY;
3301 #endif
3302 do_for_each_event_file(tr, file) {
3303 if (file->event_call != call)
3304 continue;
3305 /*
3306 * We can't rely on ftrace_event_enable_disable(enable => 0)
3307 * we are going to do, EVENT_FILE_FL_SOFT_MODE can suppress
3308 * TRACE_REG_UNREGISTER.
3309 */
3310 if (file->flags & EVENT_FILE_FL_ENABLED)
3311 goto busy;
3312
3313 if (file->flags & EVENT_FILE_FL_WAS_ENABLED)
3314 tr->clear_trace = true;
3315 /*
3316 * The do_for_each_event_file_safe() is
3317 * a double loop. After finding the call for this
3318 * trace_array, we use break to jump to the next
3319 * trace_array.
3320 */
3321 break;
3322 } while_for_each_event_file();
3323
3324 __trace_remove_event_call(call);
3325
3326 return 0;
3327 busy:
3328 /* No need to clear the trace now */
3329 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
3330 tr->clear_trace = false;
3331 }
3332 return -EBUSY;
3333 }
3334
3335 /* Remove an event_call */
trace_remove_event_call(struct trace_event_call * call)3336 int trace_remove_event_call(struct trace_event_call *call)
3337 {
3338 int ret;
3339
3340 lockdep_assert_held(&event_mutex);
3341
3342 mutex_lock(&trace_types_lock);
3343 down_write(&trace_event_sem);
3344 ret = probe_remove_event_call(call);
3345 up_write(&trace_event_sem);
3346 mutex_unlock(&trace_types_lock);
3347
3348 return ret;
3349 }
3350 EXPORT_SYMBOL_GPL(trace_remove_event_call);
3351
3352 #define for_each_event(event, start, end) \
3353 for (event = start; \
3354 (unsigned long)event < (unsigned long)end; \
3355 event++)
3356
3357 #ifdef CONFIG_MODULES
3358
trace_module_add_events(struct module * mod)3359 static void trace_module_add_events(struct module *mod)
3360 {
3361 struct trace_event_call **call, **start, **end;
3362
3363 if (!mod->num_trace_events)
3364 return;
3365
3366 /* Don't add infrastructure for mods without tracepoints */
3367 if (trace_module_has_bad_taint(mod)) {
3368 pr_err("%s: module has bad taint, not creating trace events\n",
3369 mod->name);
3370 return;
3371 }
3372
3373 start = mod->trace_events;
3374 end = mod->trace_events + mod->num_trace_events;
3375
3376 for_each_event(call, start, end) {
3377 __register_event(*call, mod);
3378 __add_event_to_tracers(*call);
3379 }
3380 }
3381
trace_module_remove_events(struct module * mod)3382 static void trace_module_remove_events(struct module *mod)
3383 {
3384 struct trace_event_call *call, *p;
3385 struct module_string *modstr, *m;
3386
3387 down_write(&trace_event_sem);
3388 list_for_each_entry_safe(call, p, &ftrace_events, list) {
3389 if ((call->flags & TRACE_EVENT_FL_DYNAMIC) || !call->module)
3390 continue;
3391 if (call->module == mod)
3392 __trace_remove_event_call(call);
3393 }
3394 /* Check for any strings allocade for this module */
3395 list_for_each_entry_safe(modstr, m, &module_strings, next) {
3396 if (modstr->module != mod)
3397 continue;
3398 list_del(&modstr->next);
3399 kfree(modstr->str);
3400 kfree(modstr);
3401 }
3402 up_write(&trace_event_sem);
3403
3404 /*
3405 * It is safest to reset the ring buffer if the module being unloaded
3406 * registered any events that were used. The only worry is if
3407 * a new module gets loaded, and takes on the same id as the events
3408 * of this module. When printing out the buffer, traced events left
3409 * over from this module may be passed to the new module events and
3410 * unexpected results may occur.
3411 */
3412 tracing_reset_all_online_cpus_unlocked();
3413 }
3414
trace_module_notify(struct notifier_block * self,unsigned long val,void * data)3415 static int trace_module_notify(struct notifier_block *self,
3416 unsigned long val, void *data)
3417 {
3418 struct module *mod = data;
3419
3420 mutex_lock(&event_mutex);
3421 mutex_lock(&trace_types_lock);
3422 switch (val) {
3423 case MODULE_STATE_COMING:
3424 trace_module_add_events(mod);
3425 break;
3426 case MODULE_STATE_GOING:
3427 trace_module_remove_events(mod);
3428 break;
3429 }
3430 mutex_unlock(&trace_types_lock);
3431 mutex_unlock(&event_mutex);
3432
3433 return NOTIFY_OK;
3434 }
3435
3436 static struct notifier_block trace_module_nb = {
3437 .notifier_call = trace_module_notify,
3438 .priority = 1, /* higher than trace.c module notify */
3439 };
3440 #endif /* CONFIG_MODULES */
3441
3442 /* Create a new event directory structure for a trace directory. */
3443 static void
__trace_add_event_dirs(struct trace_array * tr)3444 __trace_add_event_dirs(struct trace_array *tr)
3445 {
3446 struct trace_event_call *call;
3447 int ret;
3448
3449 list_for_each_entry(call, &ftrace_events, list) {
3450 ret = __trace_add_new_event(call, tr);
3451 if (ret < 0)
3452 pr_warn("Could not create directory for event %s\n",
3453 trace_event_name(call));
3454 }
3455 }
3456
3457 /* Returns any file that matches the system and event */
3458 struct trace_event_file *
__find_event_file(struct trace_array * tr,const char * system,const char * event)3459 __find_event_file(struct trace_array *tr, const char *system, const char *event)
3460 {
3461 struct trace_event_file *file;
3462 struct trace_event_call *call;
3463 const char *name;
3464
3465 list_for_each_entry(file, &tr->events, list) {
3466
3467 call = file->event_call;
3468 name = trace_event_name(call);
3469
3470 if (!name || !call->class)
3471 continue;
3472
3473 if (strcmp(event, name) == 0 &&
3474 strcmp(system, call->class->system) == 0)
3475 return file;
3476 }
3477 return NULL;
3478 }
3479
3480 /* Returns valid trace event files that match system and event */
3481 struct trace_event_file *
find_event_file(struct trace_array * tr,const char * system,const char * event)3482 find_event_file(struct trace_array *tr, const char *system, const char *event)
3483 {
3484 struct trace_event_file *file;
3485
3486 file = __find_event_file(tr, system, event);
3487 if (!file || !file->event_call->class->reg ||
3488 file->event_call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)
3489 return NULL;
3490
3491 return file;
3492 }
3493
3494 /**
3495 * trace_get_event_file - Find and return a trace event file
3496 * @instance: The name of the trace instance containing the event
3497 * @system: The name of the system containing the event
3498 * @event: The name of the event
3499 *
3500 * Return a trace event file given the trace instance name, trace
3501 * system, and trace event name. If the instance name is NULL, it
3502 * refers to the top-level trace array.
3503 *
3504 * This function will look it up and return it if found, after calling
3505 * trace_array_get() to prevent the instance from going away, and
3506 * increment the event's module refcount to prevent it from being
3507 * removed.
3508 *
3509 * To release the file, call trace_put_event_file(), which will call
3510 * trace_array_put() and decrement the event's module refcount.
3511 *
3512 * Return: The trace event on success, ERR_PTR otherwise.
3513 */
trace_get_event_file(const char * instance,const char * system,const char * event)3514 struct trace_event_file *trace_get_event_file(const char *instance,
3515 const char *system,
3516 const char *event)
3517 {
3518 struct trace_array *tr = top_trace_array();
3519 struct trace_event_file *file = NULL;
3520 int ret = -EINVAL;
3521
3522 if (instance) {
3523 tr = trace_array_find_get(instance);
3524 if (!tr)
3525 return ERR_PTR(-ENOENT);
3526 } else {
3527 ret = trace_array_get(tr);
3528 if (ret)
3529 return ERR_PTR(ret);
3530 }
3531
3532 mutex_lock(&event_mutex);
3533
3534 file = find_event_file(tr, system, event);
3535 if (!file) {
3536 trace_array_put(tr);
3537 ret = -EINVAL;
3538 goto out;
3539 }
3540
3541 /* Don't let event modules unload while in use */
3542 ret = trace_event_try_get_ref(file->event_call);
3543 if (!ret) {
3544 trace_array_put(tr);
3545 ret = -EBUSY;
3546 goto out;
3547 }
3548
3549 ret = 0;
3550 out:
3551 mutex_unlock(&event_mutex);
3552
3553 if (ret)
3554 file = ERR_PTR(ret);
3555
3556 return file;
3557 }
3558 EXPORT_SYMBOL_GPL(trace_get_event_file);
3559
3560 /**
3561 * trace_put_event_file - Release a file from trace_get_event_file()
3562 * @file: The trace event file
3563 *
3564 * If a file was retrieved using trace_get_event_file(), this should
3565 * be called when it's no longer needed. It will cancel the previous
3566 * trace_array_get() called by that function, and decrement the
3567 * event's module refcount.
3568 */
trace_put_event_file(struct trace_event_file * file)3569 void trace_put_event_file(struct trace_event_file *file)
3570 {
3571 mutex_lock(&event_mutex);
3572 trace_event_put_ref(file->event_call);
3573 mutex_unlock(&event_mutex);
3574
3575 trace_array_put(file->tr);
3576 }
3577 EXPORT_SYMBOL_GPL(trace_put_event_file);
3578
3579 #ifdef CONFIG_DYNAMIC_FTRACE
3580
3581 /* Avoid typos */
3582 #define ENABLE_EVENT_STR "enable_event"
3583 #define DISABLE_EVENT_STR "disable_event"
3584
3585 struct event_probe_data {
3586 struct trace_event_file *file;
3587 unsigned long count;
3588 int ref;
3589 bool enable;
3590 };
3591
update_event_probe(struct event_probe_data * data)3592 static void update_event_probe(struct event_probe_data *data)
3593 {
3594 if (data->enable)
3595 clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &data->file->flags);
3596 else
3597 set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &data->file->flags);
3598 }
3599
3600 static void
event_enable_probe(unsigned long ip,unsigned long parent_ip,struct trace_array * tr,struct ftrace_probe_ops * ops,void * data)3601 event_enable_probe(unsigned long ip, unsigned long parent_ip,
3602 struct trace_array *tr, struct ftrace_probe_ops *ops,
3603 void *data)
3604 {
3605 struct ftrace_func_mapper *mapper = data;
3606 struct event_probe_data *edata;
3607 void **pdata;
3608
3609 pdata = ftrace_func_mapper_find_ip(mapper, ip);
3610 if (!pdata || !*pdata)
3611 return;
3612
3613 edata = *pdata;
3614 update_event_probe(edata);
3615 }
3616
3617 static void
event_enable_count_probe(unsigned long ip,unsigned long parent_ip,struct trace_array * tr,struct ftrace_probe_ops * ops,void * data)3618 event_enable_count_probe(unsigned long ip, unsigned long parent_ip,
3619 struct trace_array *tr, struct ftrace_probe_ops *ops,
3620 void *data)
3621 {
3622 struct ftrace_func_mapper *mapper = data;
3623 struct event_probe_data *edata;
3624 void **pdata;
3625
3626 pdata = ftrace_func_mapper_find_ip(mapper, ip);
3627 if (!pdata || !*pdata)
3628 return;
3629
3630 edata = *pdata;
3631
3632 if (!edata->count)
3633 return;
3634
3635 /* Skip if the event is in a state we want to switch to */
3636 if (edata->enable == !(edata->file->flags & EVENT_FILE_FL_SOFT_DISABLED))
3637 return;
3638
3639 if (edata->count != -1)
3640 (edata->count)--;
3641
3642 update_event_probe(edata);
3643 }
3644
3645 static int
event_enable_print(struct seq_file * m,unsigned long ip,struct ftrace_probe_ops * ops,void * data)3646 event_enable_print(struct seq_file *m, unsigned long ip,
3647 struct ftrace_probe_ops *ops, void *data)
3648 {
3649 struct ftrace_func_mapper *mapper = data;
3650 struct event_probe_data *edata;
3651 void **pdata;
3652
3653 pdata = ftrace_func_mapper_find_ip(mapper, ip);
3654
3655 if (WARN_ON_ONCE(!pdata || !*pdata))
3656 return 0;
3657
3658 edata = *pdata;
3659
3660 seq_printf(m, "%ps:", (void *)ip);
3661
3662 seq_printf(m, "%s:%s:%s",
3663 edata->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR,
3664 edata->file->event_call->class->system,
3665 trace_event_name(edata->file->event_call));
3666
3667 if (edata->count == -1)
3668 seq_puts(m, ":unlimited\n");
3669 else
3670 seq_printf(m, ":count=%ld\n", edata->count);
3671
3672 return 0;
3673 }
3674
3675 static int
event_enable_init(struct ftrace_probe_ops * ops,struct trace_array * tr,unsigned long ip,void * init_data,void ** data)3676 event_enable_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
3677 unsigned long ip, void *init_data, void **data)
3678 {
3679 struct ftrace_func_mapper *mapper = *data;
3680 struct event_probe_data *edata = init_data;
3681 int ret;
3682
3683 if (!mapper) {
3684 mapper = allocate_ftrace_func_mapper();
3685 if (!mapper)
3686 return -ENODEV;
3687 *data = mapper;
3688 }
3689
3690 ret = ftrace_func_mapper_add_ip(mapper, ip, edata);
3691 if (ret < 0)
3692 return ret;
3693
3694 edata->ref++;
3695
3696 return 0;
3697 }
3698
free_probe_data(void * data)3699 static int free_probe_data(void *data)
3700 {
3701 struct event_probe_data *edata = data;
3702
3703 edata->ref--;
3704 if (!edata->ref) {
3705 /* Remove the SOFT_MODE flag */
3706 __ftrace_event_enable_disable(edata->file, 0, 1);
3707 trace_event_put_ref(edata->file->event_call);
3708 kfree(edata);
3709 }
3710 return 0;
3711 }
3712
3713 static void
event_enable_free(struct ftrace_probe_ops * ops,struct trace_array * tr,unsigned long ip,void * data)3714 event_enable_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
3715 unsigned long ip, void *data)
3716 {
3717 struct ftrace_func_mapper *mapper = data;
3718 struct event_probe_data *edata;
3719
3720 if (!ip) {
3721 if (!mapper)
3722 return;
3723 free_ftrace_func_mapper(mapper, free_probe_data);
3724 return;
3725 }
3726
3727 edata = ftrace_func_mapper_remove_ip(mapper, ip);
3728
3729 if (WARN_ON_ONCE(!edata))
3730 return;
3731
3732 if (WARN_ON_ONCE(edata->ref <= 0))
3733 return;
3734
3735 free_probe_data(edata);
3736 }
3737
3738 static struct ftrace_probe_ops event_enable_probe_ops = {
3739 .func = event_enable_probe,
3740 .print = event_enable_print,
3741 .init = event_enable_init,
3742 .free = event_enable_free,
3743 };
3744
3745 static struct ftrace_probe_ops event_enable_count_probe_ops = {
3746 .func = event_enable_count_probe,
3747 .print = event_enable_print,
3748 .init = event_enable_init,
3749 .free = event_enable_free,
3750 };
3751
3752 static struct ftrace_probe_ops event_disable_probe_ops = {
3753 .func = event_enable_probe,
3754 .print = event_enable_print,
3755 .init = event_enable_init,
3756 .free = event_enable_free,
3757 };
3758
3759 static struct ftrace_probe_ops event_disable_count_probe_ops = {
3760 .func = event_enable_count_probe,
3761 .print = event_enable_print,
3762 .init = event_enable_init,
3763 .free = event_enable_free,
3764 };
3765
3766 static int
event_enable_func(struct trace_array * tr,struct ftrace_hash * hash,char * glob,char * cmd,char * param,int enabled)3767 event_enable_func(struct trace_array *tr, struct ftrace_hash *hash,
3768 char *glob, char *cmd, char *param, int enabled)
3769 {
3770 struct trace_event_file *file;
3771 struct ftrace_probe_ops *ops;
3772 struct event_probe_data *data;
3773 const char *system;
3774 const char *event;
3775 char *number;
3776 bool enable;
3777 int ret;
3778
3779 if (!tr)
3780 return -ENODEV;
3781
3782 /* hash funcs only work with set_ftrace_filter */
3783 if (!enabled || !param)
3784 return -EINVAL;
3785
3786 system = strsep(¶m, ":");
3787 if (!param)
3788 return -EINVAL;
3789
3790 event = strsep(¶m, ":");
3791
3792 mutex_lock(&event_mutex);
3793
3794 ret = -EINVAL;
3795 file = find_event_file(tr, system, event);
3796 if (!file)
3797 goto out;
3798
3799 enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
3800
3801 if (enable)
3802 ops = param ? &event_enable_count_probe_ops : &event_enable_probe_ops;
3803 else
3804 ops = param ? &event_disable_count_probe_ops : &event_disable_probe_ops;
3805
3806 if (glob[0] == '!') {
3807 ret = unregister_ftrace_function_probe_func(glob+1, tr, ops);
3808 goto out;
3809 }
3810
3811 ret = -ENOMEM;
3812
3813 data = kzalloc(sizeof(*data), GFP_KERNEL);
3814 if (!data)
3815 goto out;
3816
3817 data->enable = enable;
3818 data->count = -1;
3819 data->file = file;
3820
3821 if (!param)
3822 goto out_reg;
3823
3824 number = strsep(¶m, ":");
3825
3826 ret = -EINVAL;
3827 if (!strlen(number))
3828 goto out_free;
3829
3830 /*
3831 * We use the callback data field (which is a pointer)
3832 * as our counter.
3833 */
3834 ret = kstrtoul(number, 0, &data->count);
3835 if (ret)
3836 goto out_free;
3837
3838 out_reg:
3839 /* Don't let event modules unload while probe registered */
3840 ret = trace_event_try_get_ref(file->event_call);
3841 if (!ret) {
3842 ret = -EBUSY;
3843 goto out_free;
3844 }
3845
3846 ret = __ftrace_event_enable_disable(file, 1, 1);
3847 if (ret < 0)
3848 goto out_put;
3849
3850 ret = register_ftrace_function_probe(glob, tr, ops, data);
3851 /*
3852 * The above returns on success the # of functions enabled,
3853 * but if it didn't find any functions it returns zero.
3854 * Consider no functions a failure too.
3855 */
3856 if (!ret) {
3857 ret = -ENOENT;
3858 goto out_disable;
3859 } else if (ret < 0)
3860 goto out_disable;
3861 /* Just return zero, not the number of enabled functions */
3862 ret = 0;
3863 out:
3864 mutex_unlock(&event_mutex);
3865 return ret;
3866
3867 out_disable:
3868 __ftrace_event_enable_disable(file, 0, 1);
3869 out_put:
3870 trace_event_put_ref(file->event_call);
3871 out_free:
3872 kfree(data);
3873 goto out;
3874 }
3875
3876 static struct ftrace_func_command event_enable_cmd = {
3877 .name = ENABLE_EVENT_STR,
3878 .func = event_enable_func,
3879 };
3880
3881 static struct ftrace_func_command event_disable_cmd = {
3882 .name = DISABLE_EVENT_STR,
3883 .func = event_enable_func,
3884 };
3885
register_event_cmds(void)3886 static __init int register_event_cmds(void)
3887 {
3888 int ret;
3889
3890 ret = register_ftrace_command(&event_enable_cmd);
3891 if (WARN_ON(ret < 0))
3892 return ret;
3893 ret = register_ftrace_command(&event_disable_cmd);
3894 if (WARN_ON(ret < 0))
3895 unregister_ftrace_command(&event_enable_cmd);
3896 return ret;
3897 }
3898 #else
register_event_cmds(void)3899 static inline int register_event_cmds(void) { return 0; }
3900 #endif /* CONFIG_DYNAMIC_FTRACE */
3901
3902 /*
3903 * The top level array and trace arrays created by boot-time tracing
3904 * have already had its trace_event_file descriptors created in order
3905 * to allow for early events to be recorded.
3906 * This function is called after the tracefs has been initialized,
3907 * and we now have to create the files associated to the events.
3908 */
__trace_early_add_event_dirs(struct trace_array * tr)3909 static void __trace_early_add_event_dirs(struct trace_array *tr)
3910 {
3911 struct trace_event_file *file;
3912 int ret;
3913
3914
3915 list_for_each_entry(file, &tr->events, list) {
3916 ret = event_create_dir(tr->event_dir, file);
3917 if (ret < 0)
3918 pr_warn("Could not create directory for event %s\n",
3919 trace_event_name(file->event_call));
3920 }
3921 }
3922
3923 /*
3924 * For early boot up, the top trace array and the trace arrays created
3925 * by boot-time tracing require to have a list of events that can be
3926 * enabled. This must be done before the filesystem is set up in order
3927 * to allow events to be traced early.
3928 */
__trace_early_add_events(struct trace_array * tr)3929 void __trace_early_add_events(struct trace_array *tr)
3930 {
3931 struct trace_event_call *call;
3932 int ret;
3933
3934 list_for_each_entry(call, &ftrace_events, list) {
3935 /* Early boot up should not have any modules loaded */
3936 if (!(call->flags & TRACE_EVENT_FL_DYNAMIC) &&
3937 WARN_ON_ONCE(call->module))
3938 continue;
3939
3940 ret = __trace_early_add_new_event(call, tr);
3941 if (ret < 0)
3942 pr_warn("Could not create early event %s\n",
3943 trace_event_name(call));
3944 }
3945 }
3946
3947 /* Remove the event directory structure for a trace directory. */
3948 static void
__trace_remove_event_dirs(struct trace_array * tr)3949 __trace_remove_event_dirs(struct trace_array *tr)
3950 {
3951 struct trace_event_file *file, *next;
3952
3953 list_for_each_entry_safe(file, next, &tr->events, list)
3954 remove_event_file_dir(file);
3955 }
3956
__add_event_to_tracers(struct trace_event_call * call)3957 static void __add_event_to_tracers(struct trace_event_call *call)
3958 {
3959 struct trace_array *tr;
3960
3961 list_for_each_entry(tr, &ftrace_trace_arrays, list)
3962 __trace_add_new_event(call, tr);
3963 }
3964
3965 extern struct trace_event_call *__start_ftrace_events[];
3966 extern struct trace_event_call *__stop_ftrace_events[];
3967
3968 static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata;
3969
setup_trace_event(char * str)3970 static __init int setup_trace_event(char *str)
3971 {
3972 strscpy(bootup_event_buf, str, COMMAND_LINE_SIZE);
3973 trace_set_ring_buffer_expanded(NULL);
3974 disable_tracing_selftest("running event tracing");
3975
3976 return 1;
3977 }
3978 __setup("trace_event=", setup_trace_event);
3979
events_callback(const char * name,umode_t * mode,void ** data,const struct file_operations ** fops)3980 static int events_callback(const char *name, umode_t *mode, void **data,
3981 const struct file_operations **fops)
3982 {
3983 if (strcmp(name, "enable") == 0) {
3984 *mode = TRACE_MODE_WRITE;
3985 *fops = &ftrace_tr_enable_fops;
3986 return 1;
3987 }
3988
3989 if (strcmp(name, "header_page") == 0) {
3990 *mode = TRACE_MODE_READ;
3991 *fops = &ftrace_show_header_page_fops;
3992
3993 } else if (strcmp(name, "header_event") == 0) {
3994 *mode = TRACE_MODE_READ;
3995 *fops = &ftrace_show_header_event_fops;
3996 } else
3997 return 0;
3998
3999 return 1;
4000 }
4001
4002 /* Expects to have event_mutex held when called */
4003 static int
create_event_toplevel_files(struct dentry * parent,struct trace_array * tr)4004 create_event_toplevel_files(struct dentry *parent, struct trace_array *tr)
4005 {
4006 struct eventfs_inode *e_events;
4007 struct dentry *entry;
4008 int nr_entries;
4009 static struct eventfs_entry events_entries[] = {
4010 {
4011 .name = "enable",
4012 .callback = events_callback,
4013 },
4014 {
4015 .name = "header_page",
4016 .callback = events_callback,
4017 },
4018 {
4019 .name = "header_event",
4020 .callback = events_callback,
4021 },
4022 };
4023
4024 entry = trace_create_file("set_event", TRACE_MODE_WRITE, parent,
4025 tr, &ftrace_set_event_fops);
4026 if (!entry)
4027 return -ENOMEM;
4028
4029 nr_entries = ARRAY_SIZE(events_entries);
4030
4031 e_events = eventfs_create_events_dir("events", parent, events_entries,
4032 nr_entries, tr);
4033 if (IS_ERR(e_events)) {
4034 pr_warn("Could not create tracefs 'events' directory\n");
4035 return -ENOMEM;
4036 }
4037
4038 /* There are not as crucial, just warn if they are not created */
4039
4040 trace_create_file("set_event_pid", TRACE_MODE_WRITE, parent,
4041 tr, &ftrace_set_event_pid_fops);
4042
4043 trace_create_file("set_event_notrace_pid",
4044 TRACE_MODE_WRITE, parent, tr,
4045 &ftrace_set_event_notrace_pid_fops);
4046
4047 tr->event_dir = e_events;
4048
4049 return 0;
4050 }
4051
4052 /**
4053 * event_trace_add_tracer - add a instance of a trace_array to events
4054 * @parent: The parent dentry to place the files/directories for events in
4055 * @tr: The trace array associated with these events
4056 *
4057 * When a new instance is created, it needs to set up its events
4058 * directory, as well as other files associated with events. It also
4059 * creates the event hierarchy in the @parent/events directory.
4060 *
4061 * Returns 0 on success.
4062 *
4063 * Must be called with event_mutex held.
4064 */
event_trace_add_tracer(struct dentry * parent,struct trace_array * tr)4065 int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr)
4066 {
4067 int ret;
4068
4069 lockdep_assert_held(&event_mutex);
4070
4071 ret = create_event_toplevel_files(parent, tr);
4072 if (ret)
4073 goto out;
4074
4075 down_write(&trace_event_sem);
4076 /* If tr already has the event list, it is initialized in early boot. */
4077 if (unlikely(!list_empty(&tr->events)))
4078 __trace_early_add_event_dirs(tr);
4079 else
4080 __trace_add_event_dirs(tr);
4081 up_write(&trace_event_sem);
4082
4083 out:
4084 return ret;
4085 }
4086
4087 /*
4088 * The top trace array already had its file descriptors created.
4089 * Now the files themselves need to be created.
4090 */
4091 static __init int
early_event_add_tracer(struct dentry * parent,struct trace_array * tr)4092 early_event_add_tracer(struct dentry *parent, struct trace_array *tr)
4093 {
4094 int ret;
4095
4096 mutex_lock(&event_mutex);
4097
4098 ret = create_event_toplevel_files(parent, tr);
4099 if (ret)
4100 goto out_unlock;
4101
4102 down_write(&trace_event_sem);
4103 __trace_early_add_event_dirs(tr);
4104 up_write(&trace_event_sem);
4105
4106 out_unlock:
4107 mutex_unlock(&event_mutex);
4108
4109 return ret;
4110 }
4111
4112 /* Must be called with event_mutex held */
event_trace_del_tracer(struct trace_array * tr)4113 int event_trace_del_tracer(struct trace_array *tr)
4114 {
4115 lockdep_assert_held(&event_mutex);
4116
4117 /* Disable any event triggers and associated soft-disabled events */
4118 clear_event_triggers(tr);
4119
4120 /* Clear the pid list */
4121 __ftrace_clear_event_pids(tr, TRACE_PIDS | TRACE_NO_PIDS);
4122
4123 /* Disable any running events */
4124 __ftrace_set_clr_event_nolock(tr, NULL, NULL, NULL, 0);
4125
4126 /* Make sure no more events are being executed */
4127 tracepoint_synchronize_unregister();
4128
4129 down_write(&trace_event_sem);
4130 __trace_remove_event_dirs(tr);
4131 eventfs_remove_events_dir(tr->event_dir);
4132 up_write(&trace_event_sem);
4133
4134 tr->event_dir = NULL;
4135
4136 return 0;
4137 }
4138
event_trace_memsetup(void)4139 static __init int event_trace_memsetup(void)
4140 {
4141 field_cachep = KMEM_CACHE(ftrace_event_field, SLAB_PANIC);
4142 file_cachep = KMEM_CACHE(trace_event_file, SLAB_PANIC);
4143 return 0;
4144 }
4145
4146 __init void
early_enable_events(struct trace_array * tr,char * buf,bool disable_first)4147 early_enable_events(struct trace_array *tr, char *buf, bool disable_first)
4148 {
4149 char *token;
4150 int ret;
4151
4152 while (true) {
4153 token = strsep(&buf, ",");
4154
4155 if (!token)
4156 break;
4157
4158 if (*token) {
4159 /* Restarting syscalls requires that we stop them first */
4160 if (disable_first)
4161 ftrace_set_clr_event(tr, token, 0);
4162
4163 ret = ftrace_set_clr_event(tr, token, 1);
4164 if (ret)
4165 pr_warn("Failed to enable trace event: %s\n", token);
4166 }
4167
4168 /* Put back the comma to allow this to be called again */
4169 if (buf)
4170 *(buf - 1) = ',';
4171 }
4172 }
4173
event_trace_enable(void)4174 static __init int event_trace_enable(void)
4175 {
4176 struct trace_array *tr = top_trace_array();
4177 struct trace_event_call **iter, *call;
4178 int ret;
4179
4180 if (!tr)
4181 return -ENODEV;
4182
4183 for_each_event(iter, __start_ftrace_events, __stop_ftrace_events) {
4184
4185 call = *iter;
4186 ret = event_init(call);
4187 if (!ret)
4188 list_add(&call->list, &ftrace_events);
4189 }
4190
4191 register_trigger_cmds();
4192
4193 /*
4194 * We need the top trace array to have a working set of trace
4195 * points at early init, before the debug files and directories
4196 * are created. Create the file entries now, and attach them
4197 * to the actual file dentries later.
4198 */
4199 __trace_early_add_events(tr);
4200
4201 early_enable_events(tr, bootup_event_buf, false);
4202
4203 trace_printk_start_comm();
4204
4205 register_event_cmds();
4206
4207
4208 return 0;
4209 }
4210
4211 /*
4212 * event_trace_enable() is called from trace_event_init() first to
4213 * initialize events and perhaps start any events that are on the
4214 * command line. Unfortunately, there are some events that will not
4215 * start this early, like the system call tracepoints that need
4216 * to set the %SYSCALL_WORK_SYSCALL_TRACEPOINT flag of pid 1. But
4217 * event_trace_enable() is called before pid 1 starts, and this flag
4218 * is never set, making the syscall tracepoint never get reached, but
4219 * the event is enabled regardless (and not doing anything).
4220 */
event_trace_enable_again(void)4221 static __init int event_trace_enable_again(void)
4222 {
4223 struct trace_array *tr;
4224
4225 tr = top_trace_array();
4226 if (!tr)
4227 return -ENODEV;
4228
4229 early_enable_events(tr, bootup_event_buf, true);
4230
4231 return 0;
4232 }
4233
4234 early_initcall(event_trace_enable_again);
4235
4236 /* Init fields which doesn't related to the tracefs */
event_trace_init_fields(void)4237 static __init int event_trace_init_fields(void)
4238 {
4239 if (trace_define_generic_fields())
4240 pr_warn("tracing: Failed to allocated generic fields");
4241
4242 if (trace_define_common_fields())
4243 pr_warn("tracing: Failed to allocate common fields");
4244
4245 return 0;
4246 }
4247
event_trace_init(void)4248 __init int event_trace_init(void)
4249 {
4250 struct trace_array *tr;
4251 int ret;
4252
4253 tr = top_trace_array();
4254 if (!tr)
4255 return -ENODEV;
4256
4257 trace_create_file("available_events", TRACE_MODE_READ,
4258 NULL, tr, &ftrace_avail_fops);
4259
4260 ret = early_event_add_tracer(NULL, tr);
4261 if (ret)
4262 return ret;
4263
4264 #ifdef CONFIG_MODULES
4265 ret = register_module_notifier(&trace_module_nb);
4266 if (ret)
4267 pr_warn("Failed to register trace events module notifier\n");
4268 #endif
4269
4270 eventdir_initialized = true;
4271
4272 return 0;
4273 }
4274
trace_event_init(void)4275 void __init trace_event_init(void)
4276 {
4277 event_trace_memsetup();
4278 init_ftrace_syscalls();
4279 event_trace_enable();
4280 event_trace_init_fields();
4281 }
4282
4283 #ifdef CONFIG_EVENT_TRACE_STARTUP_TEST
4284
4285 static DEFINE_SPINLOCK(test_spinlock);
4286 static DEFINE_SPINLOCK(test_spinlock_irq);
4287 static DEFINE_MUTEX(test_mutex);
4288
test_work(struct work_struct * dummy)4289 static __init void test_work(struct work_struct *dummy)
4290 {
4291 spin_lock(&test_spinlock);
4292 spin_lock_irq(&test_spinlock_irq);
4293 udelay(1);
4294 spin_unlock_irq(&test_spinlock_irq);
4295 spin_unlock(&test_spinlock);
4296
4297 mutex_lock(&test_mutex);
4298 msleep(1);
4299 mutex_unlock(&test_mutex);
4300 }
4301
event_test_thread(void * unused)4302 static __init int event_test_thread(void *unused)
4303 {
4304 void *test_malloc;
4305
4306 test_malloc = kmalloc(1234, GFP_KERNEL);
4307 if (!test_malloc)
4308 pr_info("failed to kmalloc\n");
4309
4310 schedule_on_each_cpu(test_work);
4311
4312 kfree(test_malloc);
4313
4314 set_current_state(TASK_INTERRUPTIBLE);
4315 while (!kthread_should_stop()) {
4316 schedule();
4317 set_current_state(TASK_INTERRUPTIBLE);
4318 }
4319 __set_current_state(TASK_RUNNING);
4320
4321 return 0;
4322 }
4323
4324 /*
4325 * Do various things that may trigger events.
4326 */
event_test_stuff(void)4327 static __init void event_test_stuff(void)
4328 {
4329 struct task_struct *test_thread;
4330
4331 test_thread = kthread_run(event_test_thread, NULL, "test-events");
4332 msleep(1);
4333 kthread_stop(test_thread);
4334 }
4335
4336 /*
4337 * For every trace event defined, we will test each trace point separately,
4338 * and then by groups, and finally all trace points.
4339 */
event_trace_self_tests(void)4340 static __init void event_trace_self_tests(void)
4341 {
4342 struct trace_subsystem_dir *dir;
4343 struct trace_event_file *file;
4344 struct trace_event_call *call;
4345 struct event_subsystem *system;
4346 struct trace_array *tr;
4347 int ret;
4348
4349 tr = top_trace_array();
4350 if (!tr)
4351 return;
4352
4353 pr_info("Running tests on trace events:\n");
4354
4355 list_for_each_entry(file, &tr->events, list) {
4356
4357 call = file->event_call;
4358
4359 /* Only test those that have a probe */
4360 if (!call->class || !call->class->probe)
4361 continue;
4362
4363 /*
4364 * Testing syscall events here is pretty useless, but
4365 * we still do it if configured. But this is time consuming.
4366 * What we really need is a user thread to perform the
4367 * syscalls as we test.
4368 */
4369 #ifndef CONFIG_EVENT_TRACE_TEST_SYSCALLS
4370 if (call->class->system &&
4371 strcmp(call->class->system, "syscalls") == 0)
4372 continue;
4373 #endif
4374
4375 pr_info("Testing event %s: ", trace_event_name(call));
4376
4377 /*
4378 * If an event is already enabled, someone is using
4379 * it and the self test should not be on.
4380 */
4381 if (file->flags & EVENT_FILE_FL_ENABLED) {
4382 pr_warn("Enabled event during self test!\n");
4383 WARN_ON_ONCE(1);
4384 continue;
4385 }
4386
4387 ftrace_event_enable_disable(file, 1);
4388 event_test_stuff();
4389 ftrace_event_enable_disable(file, 0);
4390
4391 pr_cont("OK\n");
4392 }
4393
4394 /* Now test at the sub system level */
4395
4396 pr_info("Running tests on trace event systems:\n");
4397
4398 list_for_each_entry(dir, &tr->systems, list) {
4399
4400 system = dir->subsystem;
4401
4402 /* the ftrace system is special, skip it */
4403 if (strcmp(system->name, "ftrace") == 0)
4404 continue;
4405
4406 pr_info("Testing event system %s: ", system->name);
4407
4408 ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 1);
4409 if (WARN_ON_ONCE(ret)) {
4410 pr_warn("error enabling system %s\n",
4411 system->name);
4412 continue;
4413 }
4414
4415 event_test_stuff();
4416
4417 ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 0);
4418 if (WARN_ON_ONCE(ret)) {
4419 pr_warn("error disabling system %s\n",
4420 system->name);
4421 continue;
4422 }
4423
4424 pr_cont("OK\n");
4425 }
4426
4427 /* Test with all events enabled */
4428
4429 pr_info("Running tests on all trace events:\n");
4430 pr_info("Testing all events: ");
4431
4432 ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 1);
4433 if (WARN_ON_ONCE(ret)) {
4434 pr_warn("error enabling all events\n");
4435 return;
4436 }
4437
4438 event_test_stuff();
4439
4440 /* reset sysname */
4441 ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 0);
4442 if (WARN_ON_ONCE(ret)) {
4443 pr_warn("error disabling all events\n");
4444 return;
4445 }
4446
4447 pr_cont("OK\n");
4448 }
4449
4450 #ifdef CONFIG_FUNCTION_TRACER
4451
4452 static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable);
4453
4454 static struct trace_event_file event_trace_file __initdata;
4455
4456 static void __init
function_test_events_call(unsigned long ip,unsigned long parent_ip,struct ftrace_ops * op,struct ftrace_regs * regs)4457 function_test_events_call(unsigned long ip, unsigned long parent_ip,
4458 struct ftrace_ops *op, struct ftrace_regs *regs)
4459 {
4460 struct trace_buffer *buffer;
4461 struct ring_buffer_event *event;
4462 struct ftrace_entry *entry;
4463 unsigned int trace_ctx;
4464 long disabled;
4465 int cpu;
4466
4467 trace_ctx = tracing_gen_ctx();
4468 preempt_disable_notrace();
4469 cpu = raw_smp_processor_id();
4470 disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu));
4471
4472 if (disabled != 1)
4473 goto out;
4474
4475 event = trace_event_buffer_lock_reserve(&buffer, &event_trace_file,
4476 TRACE_FN, sizeof(*entry),
4477 trace_ctx);
4478 if (!event)
4479 goto out;
4480 entry = ring_buffer_event_data(event);
4481 entry->ip = ip;
4482 entry->parent_ip = parent_ip;
4483
4484 event_trigger_unlock_commit(&event_trace_file, buffer, event,
4485 entry, trace_ctx);
4486 out:
4487 atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
4488 preempt_enable_notrace();
4489 }
4490
4491 static struct ftrace_ops trace_ops __initdata =
4492 {
4493 .func = function_test_events_call,
4494 };
4495
event_trace_self_test_with_function(void)4496 static __init void event_trace_self_test_with_function(void)
4497 {
4498 int ret;
4499
4500 event_trace_file.tr = top_trace_array();
4501 if (WARN_ON(!event_trace_file.tr))
4502 return;
4503
4504 ret = register_ftrace_function(&trace_ops);
4505 if (WARN_ON(ret < 0)) {
4506 pr_info("Failed to enable function tracer for event tests\n");
4507 return;
4508 }
4509 pr_info("Running tests again, along with the function tracer\n");
4510 event_trace_self_tests();
4511 unregister_ftrace_function(&trace_ops);
4512 }
4513 #else
event_trace_self_test_with_function(void)4514 static __init void event_trace_self_test_with_function(void)
4515 {
4516 }
4517 #endif
4518
event_trace_self_tests_init(void)4519 static __init int event_trace_self_tests_init(void)
4520 {
4521 if (!tracing_selftest_disabled) {
4522 event_trace_self_tests();
4523 event_trace_self_test_with_function();
4524 }
4525
4526 return 0;
4527 }
4528
4529 late_initcall(event_trace_self_tests_init);
4530
4531 #endif
4532