1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * event tracer
4 *
5 * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
6 *
7 * - Added format output of fields of the trace point.
8 * This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
9 *
10 */
11
12 #define pr_fmt(fmt) fmt
13
14 #include <linux/workqueue.h>
15 #include <linux/security.h>
16 #include <linux/spinlock.h>
17 #include <linux/kthread.h>
18 #include <linux/tracefs.h>
19 #include <linux/uaccess.h>
20 #include <linux/module.h>
21 #include <linux/ctype.h>
22 #include <linux/sort.h>
23 #include <linux/slab.h>
24 #include <linux/delay.h>
25
26 #include <trace/events/sched.h>
27 #include <trace/syscall.h>
28
29 #include <asm/setup.h>
30
31 #include "trace_output.h"
32
33 #undef TRACE_SYSTEM
34 #define TRACE_SYSTEM "TRACE_SYSTEM"
35
36 DEFINE_MUTEX(event_mutex);
37
38 LIST_HEAD(ftrace_events);
39 static LIST_HEAD(ftrace_generic_fields);
40 static LIST_HEAD(ftrace_common_fields);
41 static bool eventdir_initialized;
42
43 static LIST_HEAD(module_strings);
44
45 struct module_string {
46 struct list_head next;
47 struct module *module;
48 char *str;
49 };
50
51 #define GFP_TRACE (GFP_KERNEL | __GFP_ZERO)
52
53 static struct kmem_cache *field_cachep;
54 static struct kmem_cache *file_cachep;
55
system_refcount(struct event_subsystem * system)56 static inline int system_refcount(struct event_subsystem *system)
57 {
58 return system->ref_count;
59 }
60
system_refcount_inc(struct event_subsystem * system)61 static int system_refcount_inc(struct event_subsystem *system)
62 {
63 return system->ref_count++;
64 }
65
system_refcount_dec(struct event_subsystem * system)66 static int system_refcount_dec(struct event_subsystem *system)
67 {
68 return --system->ref_count;
69 }
70
71 /* Double loops, do not use break, only goto's work */
72 #define do_for_each_event_file(tr, file) \
73 list_for_each_entry(tr, &ftrace_trace_arrays, list) { \
74 list_for_each_entry(file, &tr->events, list)
75
76 #define do_for_each_event_file_safe(tr, file) \
77 list_for_each_entry(tr, &ftrace_trace_arrays, list) { \
78 struct trace_event_file *___n; \
79 list_for_each_entry_safe(file, ___n, &tr->events, list)
80
81 #define while_for_each_event_file() \
82 }
83
84 static struct ftrace_event_field *
__find_event_field(struct list_head * head,const char * name)85 __find_event_field(struct list_head *head, const char *name)
86 {
87 struct ftrace_event_field *field;
88
89 list_for_each_entry(field, head, link) {
90 if (!strcmp(field->name, name))
91 return field;
92 }
93
94 return NULL;
95 }
96
97 struct ftrace_event_field *
trace_find_event_field(struct trace_event_call * call,char * name)98 trace_find_event_field(struct trace_event_call *call, char *name)
99 {
100 struct ftrace_event_field *field;
101 struct list_head *head;
102
103 head = trace_get_fields(call);
104 field = __find_event_field(head, name);
105 if (field)
106 return field;
107
108 field = __find_event_field(&ftrace_generic_fields, name);
109 if (field)
110 return field;
111
112 return __find_event_field(&ftrace_common_fields, name);
113 }
114
__trace_define_field(struct list_head * head,const char * type,const char * name,int offset,int size,int is_signed,int filter_type,int len,int need_test)115 static int __trace_define_field(struct list_head *head, const char *type,
116 const char *name, int offset, int size,
117 int is_signed, int filter_type, int len,
118 int need_test)
119 {
120 struct ftrace_event_field *field;
121
122 field = kmem_cache_alloc(field_cachep, GFP_TRACE);
123 if (!field)
124 return -ENOMEM;
125
126 field->name = name;
127 field->type = type;
128
129 if (filter_type == FILTER_OTHER)
130 field->filter_type = filter_assign_type(type);
131 else
132 field->filter_type = filter_type;
133
134 field->offset = offset;
135 field->size = size;
136 field->is_signed = is_signed;
137 field->needs_test = need_test;
138 field->len = len;
139
140 list_add(&field->link, head);
141
142 return 0;
143 }
144
trace_define_field(struct trace_event_call * call,const char * type,const char * name,int offset,int size,int is_signed,int filter_type)145 int trace_define_field(struct trace_event_call *call, const char *type,
146 const char *name, int offset, int size, int is_signed,
147 int filter_type)
148 {
149 struct list_head *head;
150
151 if (WARN_ON(!call->class))
152 return 0;
153
154 head = trace_get_fields(call);
155 return __trace_define_field(head, type, name, offset, size,
156 is_signed, filter_type, 0, 0);
157 }
158 EXPORT_SYMBOL_GPL(trace_define_field);
159
trace_define_field_ext(struct trace_event_call * call,const char * type,const char * name,int offset,int size,int is_signed,int filter_type,int len,int need_test)160 static int trace_define_field_ext(struct trace_event_call *call, const char *type,
161 const char *name, int offset, int size, int is_signed,
162 int filter_type, int len, int need_test)
163 {
164 struct list_head *head;
165
166 if (WARN_ON(!call->class))
167 return 0;
168
169 head = trace_get_fields(call);
170 return __trace_define_field(head, type, name, offset, size,
171 is_signed, filter_type, len, need_test);
172 }
173
174 #define __generic_field(type, item, filter_type) \
175 ret = __trace_define_field(&ftrace_generic_fields, #type, \
176 #item, 0, 0, is_signed_type(type), \
177 filter_type, 0, 0); \
178 if (ret) \
179 return ret;
180
181 #define __common_field(type, item) \
182 ret = __trace_define_field(&ftrace_common_fields, #type, \
183 "common_" #item, \
184 offsetof(typeof(ent), item), \
185 sizeof(ent.item), \
186 is_signed_type(type), FILTER_OTHER, \
187 0, 0); \
188 if (ret) \
189 return ret;
190
trace_define_generic_fields(void)191 static int trace_define_generic_fields(void)
192 {
193 int ret;
194
195 __generic_field(int, CPU, FILTER_CPU);
196 __generic_field(int, cpu, FILTER_CPU);
197 __generic_field(int, common_cpu, FILTER_CPU);
198 __generic_field(char *, COMM, FILTER_COMM);
199 __generic_field(char *, comm, FILTER_COMM);
200 __generic_field(char *, stacktrace, FILTER_STACKTRACE);
201 __generic_field(char *, STACKTRACE, FILTER_STACKTRACE);
202
203 return ret;
204 }
205
trace_define_common_fields(void)206 static int trace_define_common_fields(void)
207 {
208 int ret;
209 struct trace_entry ent;
210
211 __common_field(unsigned short, type);
212 __common_field(unsigned char, flags);
213 /* Holds both preempt_count and migrate_disable */
214 __common_field(unsigned char, preempt_count);
215 __common_field(int, pid);
216
217 return ret;
218 }
219
trace_destroy_fields(struct trace_event_call * call)220 static void trace_destroy_fields(struct trace_event_call *call)
221 {
222 struct ftrace_event_field *field, *next;
223 struct list_head *head;
224
225 head = trace_get_fields(call);
226 list_for_each_entry_safe(field, next, head, link) {
227 list_del(&field->link);
228 kmem_cache_free(field_cachep, field);
229 }
230 }
231
232 /*
233 * run-time version of trace_event_get_offsets_<call>() that returns the last
234 * accessible offset of trace fields excluding __dynamic_array bytes
235 */
trace_event_get_offsets(struct trace_event_call * call)236 int trace_event_get_offsets(struct trace_event_call *call)
237 {
238 struct ftrace_event_field *tail;
239 struct list_head *head;
240
241 head = trace_get_fields(call);
242 /*
243 * head->next points to the last field with the largest offset,
244 * since it was added last by trace_define_field()
245 */
246 tail = list_first_entry(head, struct ftrace_event_field, link);
247 return tail->offset + tail->size;
248 }
249
250
find_event_field(const char * fmt,struct trace_event_call * call)251 static struct trace_event_fields *find_event_field(const char *fmt,
252 struct trace_event_call *call)
253 {
254 struct trace_event_fields *field = call->class->fields_array;
255 const char *p = fmt;
256 int len;
257
258 if (!(len = str_has_prefix(fmt, "REC->")))
259 return NULL;
260 fmt += len;
261 for (p = fmt; *p; p++) {
262 if (!isalnum(*p) && *p != '_')
263 break;
264 }
265 len = p - fmt;
266
267 for (; field->type; field++) {
268 if (strncmp(field->name, fmt, len) || field->name[len])
269 continue;
270
271 return field;
272 }
273 return NULL;
274 }
275
276 /*
277 * Check if the referenced field is an array and return true,
278 * as arrays are OK to dereference.
279 */
test_field(const char * fmt,struct trace_event_call * call)280 static bool test_field(const char *fmt, struct trace_event_call *call)
281 {
282 struct trace_event_fields *field;
283
284 field = find_event_field(fmt, call);
285 if (!field)
286 return false;
287
288 /* This is an array and is OK to dereference. */
289 return strchr(field->type, '[') != NULL;
290 }
291
292 /* Look for a string within an argument */
find_print_string(const char * arg,const char * str,const char * end)293 static bool find_print_string(const char *arg, const char *str, const char *end)
294 {
295 const char *r;
296
297 r = strstr(arg, str);
298 return r && r < end;
299 }
300
301 /* Return true if the argument pointer is safe */
process_pointer(const char * fmt,int len,struct trace_event_call * call)302 static bool process_pointer(const char *fmt, int len, struct trace_event_call *call)
303 {
304 const char *r, *e, *a;
305
306 e = fmt + len;
307
308 /* Find the REC-> in the argument */
309 r = strstr(fmt, "REC->");
310 if (r && r < e) {
311 /*
312 * Addresses of events on the buffer, or an array on the buffer is
313 * OK to dereference. There's ways to fool this, but
314 * this is to catch common mistakes, not malicious code.
315 */
316 a = strchr(fmt, '&');
317 if ((a && (a < r)) || test_field(r, call))
318 return true;
319 } else if (find_print_string(fmt, "__get_dynamic_array(", e)) {
320 return true;
321 } else if (find_print_string(fmt, "__get_rel_dynamic_array(", e)) {
322 return true;
323 } else if (find_print_string(fmt, "__get_dynamic_array_len(", e)) {
324 return true;
325 } else if (find_print_string(fmt, "__get_rel_dynamic_array_len(", e)) {
326 return true;
327 } else if (find_print_string(fmt, "__get_sockaddr(", e)) {
328 return true;
329 } else if (find_print_string(fmt, "__get_rel_sockaddr(", e)) {
330 return true;
331 }
332 return false;
333 }
334
335 /* Return true if the string is safe */
process_string(const char * fmt,int len,struct trace_event_call * call)336 static bool process_string(const char *fmt, int len, struct trace_event_call *call)
337 {
338 struct trace_event_fields *field;
339 const char *r, *e, *s;
340
341 e = fmt + len;
342
343 /*
344 * There are several helper functions that return strings.
345 * If the argument contains a function, then assume its field is valid.
346 * It is considered that the argument has a function if it has:
347 * alphanumeric or '_' before a parenthesis.
348 */
349 s = fmt;
350 do {
351 r = strstr(s, "(");
352 if (!r || r >= e)
353 break;
354 for (int i = 1; r - i >= s; i++) {
355 char ch = *(r - i);
356 if (isspace(ch))
357 continue;
358 if (isalnum(ch) || ch == '_')
359 return true;
360 /* Anything else, this isn't a function */
361 break;
362 }
363 /* A function could be wrapped in parenthesis, try the next one */
364 s = r + 1;
365 } while (s < e);
366
367 /*
368 * Check for arrays. If the argument has: foo[REC->val]
369 * then it is very likely that foo is an array of strings
370 * that are safe to use.
371 */
372 r = strstr(s, "[");
373 if (r && r < e) {
374 r = strstr(r, "REC->");
375 if (r && r < e)
376 return true;
377 }
378
379 /*
380 * If there's any strings in the argument consider this arg OK as it
381 * could be: REC->field ? "foo" : "bar" and we don't want to get into
382 * verifying that logic here.
383 */
384 if (find_print_string(fmt, "\"", e))
385 return true;
386
387 /* Dereferenced strings are also valid like any other pointer */
388 if (process_pointer(fmt, len, call))
389 return true;
390
391 /* Make sure the field is found */
392 field = find_event_field(fmt, call);
393 if (!field)
394 return false;
395
396 /* Test this field's string before printing the event */
397 call->flags |= TRACE_EVENT_FL_TEST_STR;
398 field->needs_test = 1;
399
400 return true;
401 }
402
handle_dereference_arg(const char * arg_str,u64 string_flags,int len,u64 * dereference_flags,int arg,struct trace_event_call * call)403 static void handle_dereference_arg(const char *arg_str, u64 string_flags, int len,
404 u64 *dereference_flags, int arg,
405 struct trace_event_call *call)
406 {
407 if (string_flags & (1ULL << arg)) {
408 if (process_string(arg_str, len, call))
409 *dereference_flags &= ~(1ULL << arg);
410 } else if (process_pointer(arg_str, len, call))
411 *dereference_flags &= ~(1ULL << arg);
412 else
413 pr_warn("TRACE EVENT ERROR: Bad dereference argument: '%.*s'\n",
414 len, arg_str);
415 }
416
417 /*
418 * Examine the print fmt of the event looking for unsafe dereference
419 * pointers using %p* that could be recorded in the trace event and
420 * much later referenced after the pointer was freed. Dereferencing
421 * pointers are OK, if it is dereferenced into the event itself.
422 */
test_event_printk(struct trace_event_call * call)423 static void test_event_printk(struct trace_event_call *call)
424 {
425 u64 dereference_flags = 0;
426 u64 string_flags = 0;
427 bool first = true;
428 const char *fmt;
429 int parens = 0;
430 char in_quote = 0;
431 int start_arg = 0;
432 int arg = 0;
433 int i, e;
434
435 fmt = call->print_fmt;
436
437 if (!fmt)
438 return;
439
440 for (i = 0; fmt[i]; i++) {
441 switch (fmt[i]) {
442 case '\\':
443 i++;
444 if (!fmt[i])
445 return;
446 continue;
447 case '"':
448 case '\'':
449 /*
450 * The print fmt starts with a string that
451 * is processed first to find %p* usage,
452 * then after the first string, the print fmt
453 * contains arguments that are used to check
454 * if the dereferenced %p* usage is safe.
455 */
456 if (first) {
457 if (fmt[i] == '\'')
458 continue;
459 if (in_quote) {
460 arg = 0;
461 first = false;
462 /*
463 * If there was no %p* uses
464 * the fmt is OK.
465 */
466 if (!dereference_flags)
467 return;
468 }
469 }
470 if (in_quote) {
471 if (in_quote == fmt[i])
472 in_quote = 0;
473 } else {
474 in_quote = fmt[i];
475 }
476 continue;
477 case '%':
478 if (!first || !in_quote)
479 continue;
480 i++;
481 if (!fmt[i])
482 return;
483 switch (fmt[i]) {
484 case '%':
485 continue;
486 case 'p':
487 do_pointer:
488 /* Find dereferencing fields */
489 switch (fmt[i + 1]) {
490 case 'B': case 'R': case 'r':
491 case 'b': case 'M': case 'm':
492 case 'I': case 'i': case 'E':
493 case 'U': case 'V': case 'N':
494 case 'a': case 'd': case 'D':
495 case 'g': case 't': case 'C':
496 case 'O': case 'f':
497 if (WARN_ONCE(arg == 63,
498 "Too many args for event: %s",
499 trace_event_name(call)))
500 return;
501 dereference_flags |= 1ULL << arg;
502 }
503 break;
504 default:
505 {
506 bool star = false;
507 int j;
508
509 /* Increment arg if %*s exists. */
510 for (j = 0; fmt[i + j]; j++) {
511 if (isdigit(fmt[i + j]) ||
512 fmt[i + j] == '.')
513 continue;
514 if (fmt[i + j] == '*') {
515 star = true;
516 /* Handle %*pbl case */
517 if (!j && fmt[i + 1] == 'p') {
518 arg++;
519 i++;
520 goto do_pointer;
521 }
522 continue;
523 }
524 if ((fmt[i + j] == 's')) {
525 if (star)
526 arg++;
527 if (WARN_ONCE(arg == 63,
528 "Too many args for event: %s",
529 trace_event_name(call)))
530 return;
531 dereference_flags |= 1ULL << arg;
532 string_flags |= 1ULL << arg;
533 }
534 break;
535 }
536 break;
537 } /* default */
538
539 } /* switch */
540 arg++;
541 continue;
542 case '(':
543 if (in_quote)
544 continue;
545 parens++;
546 continue;
547 case ')':
548 if (in_quote)
549 continue;
550 parens--;
551 if (WARN_ONCE(parens < 0,
552 "Paren mismatch for event: %s\narg='%s'\n%*s",
553 trace_event_name(call),
554 fmt + start_arg,
555 (i - start_arg) + 5, "^"))
556 return;
557 continue;
558 case ',':
559 if (in_quote || parens)
560 continue;
561 e = i;
562 i++;
563 while (isspace(fmt[i]))
564 i++;
565
566 /*
567 * If start_arg is zero, then this is the start of the
568 * first argument. The processing of the argument happens
569 * when the end of the argument is found, as it needs to
570 * handle parenthesis and such.
571 */
572 if (!start_arg) {
573 start_arg = i;
574 /* Balance out the i++ in the for loop */
575 i--;
576 continue;
577 }
578
579 if (dereference_flags & (1ULL << arg)) {
580 handle_dereference_arg(fmt + start_arg, string_flags,
581 e - start_arg,
582 &dereference_flags, arg, call);
583 }
584
585 start_arg = i;
586 arg++;
587 /* Balance out the i++ in the for loop */
588 i--;
589 }
590 }
591
592 if (dereference_flags & (1ULL << arg)) {
593 handle_dereference_arg(fmt + start_arg, string_flags,
594 i - start_arg,
595 &dereference_flags, arg, call);
596 }
597
598 /*
599 * If you triggered the below warning, the trace event reported
600 * uses an unsafe dereference pointer %p*. As the data stored
601 * at the trace event time may no longer exist when the trace
602 * event is printed, dereferencing to the original source is
603 * unsafe. The source of the dereference must be copied into the
604 * event itself, and the dereference must access the copy instead.
605 */
606 if (WARN_ON_ONCE(dereference_flags)) {
607 arg = 1;
608 while (!(dereference_flags & 1)) {
609 dereference_flags >>= 1;
610 arg++;
611 }
612 pr_warn("event %s has unsafe dereference of argument %d\n",
613 trace_event_name(call), arg);
614 pr_warn("print_fmt: %s\n", fmt);
615 }
616 }
617
trace_event_raw_init(struct trace_event_call * call)618 int trace_event_raw_init(struct trace_event_call *call)
619 {
620 int id;
621
622 id = register_trace_event(&call->event);
623 if (!id)
624 return -ENODEV;
625
626 test_event_printk(call);
627
628 return 0;
629 }
630 EXPORT_SYMBOL_GPL(trace_event_raw_init);
631
trace_event_ignore_this_pid(struct trace_event_file * trace_file)632 bool trace_event_ignore_this_pid(struct trace_event_file *trace_file)
633 {
634 struct trace_array *tr = trace_file->tr;
635 struct trace_pid_list *no_pid_list;
636 struct trace_pid_list *pid_list;
637
638 pid_list = rcu_dereference_raw(tr->filtered_pids);
639 no_pid_list = rcu_dereference_raw(tr->filtered_no_pids);
640
641 if (!pid_list && !no_pid_list)
642 return false;
643
644 /*
645 * This is recorded at every sched_switch for this task.
646 * Thus, even if the task migrates the ignore value will be the same.
647 */
648 return this_cpu_read(tr->array_buffer.data->ignore_pid) != 0;
649 }
650 EXPORT_SYMBOL_GPL(trace_event_ignore_this_pid);
651
652 /**
653 * trace_event_buffer_reserve - reserve space on the ring buffer for an event
654 * @fbuffer: information about how to save the event
655 * @trace_file: the instance file descriptor for the event
656 * @len: The length of the event
657 *
658 * The @fbuffer has information about the ring buffer and data will
659 * be added to it to be used by the call to trace_event_buffer_commit().
660 * The @trace_file is the desrciptor with information about the status
661 * of the given event for a specific trace_array instance.
662 * The @len is the length of data to save for the event.
663 *
664 * Returns a pointer to the data on the ring buffer or NULL if the
665 * event was not reserved (event was filtered, too big, or the buffer
666 * simply was disabled for write).
667 */
trace_event_buffer_reserve(struct trace_event_buffer * fbuffer,struct trace_event_file * trace_file,unsigned long len)668 void *trace_event_buffer_reserve(struct trace_event_buffer *fbuffer,
669 struct trace_event_file *trace_file,
670 unsigned long len)
671 {
672 struct trace_event_call *event_call = trace_file->event_call;
673
674 if ((trace_file->flags & EVENT_FILE_FL_PID_FILTER) &&
675 trace_event_ignore_this_pid(trace_file))
676 return NULL;
677
678 /*
679 * If CONFIG_PREEMPTION is enabled, then the tracepoint itself disables
680 * preemption (adding one to the preempt_count). Since we are
681 * interested in the preempt_count at the time the tracepoint was
682 * hit, we need to subtract one to offset the increment.
683 */
684 fbuffer->trace_ctx = tracing_gen_ctx_dec();
685 fbuffer->trace_file = trace_file;
686
687 fbuffer->event =
688 trace_event_buffer_lock_reserve(&fbuffer->buffer, trace_file,
689 event_call->event.type, len,
690 fbuffer->trace_ctx);
691 if (!fbuffer->event)
692 return NULL;
693
694 fbuffer->regs = NULL;
695 fbuffer->entry = ring_buffer_event_data(fbuffer->event);
696 return fbuffer->entry;
697 }
698 EXPORT_SYMBOL_GPL(trace_event_buffer_reserve);
699
trace_event_reg(struct trace_event_call * call,enum trace_reg type,void * data)700 int trace_event_reg(struct trace_event_call *call,
701 enum trace_reg type, void *data)
702 {
703 struct trace_event_file *file = data;
704
705 WARN_ON(!(call->flags & TRACE_EVENT_FL_TRACEPOINT));
706 switch (type) {
707 case TRACE_REG_REGISTER:
708 return tracepoint_probe_register(call->tp,
709 call->class->probe,
710 file);
711 case TRACE_REG_UNREGISTER:
712 tracepoint_probe_unregister(call->tp,
713 call->class->probe,
714 file);
715 return 0;
716
717 #ifdef CONFIG_PERF_EVENTS
718 case TRACE_REG_PERF_REGISTER:
719 if (!call->class->perf_probe)
720 return -ENODEV;
721 return tracepoint_probe_register(call->tp,
722 call->class->perf_probe,
723 call);
724 case TRACE_REG_PERF_UNREGISTER:
725 tracepoint_probe_unregister(call->tp,
726 call->class->perf_probe,
727 call);
728 return 0;
729 case TRACE_REG_PERF_OPEN:
730 case TRACE_REG_PERF_CLOSE:
731 case TRACE_REG_PERF_ADD:
732 case TRACE_REG_PERF_DEL:
733 return 0;
734 #endif
735 }
736 return 0;
737 }
738 EXPORT_SYMBOL_GPL(trace_event_reg);
739
trace_event_enable_cmd_record(bool enable)740 void trace_event_enable_cmd_record(bool enable)
741 {
742 struct trace_event_file *file;
743 struct trace_array *tr;
744
745 lockdep_assert_held(&event_mutex);
746
747 do_for_each_event_file(tr, file) {
748
749 if (!(file->flags & EVENT_FILE_FL_ENABLED))
750 continue;
751
752 if (enable) {
753 tracing_start_cmdline_record();
754 set_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
755 } else {
756 tracing_stop_cmdline_record();
757 clear_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
758 }
759 } while_for_each_event_file();
760 }
761
trace_event_enable_tgid_record(bool enable)762 void trace_event_enable_tgid_record(bool enable)
763 {
764 struct trace_event_file *file;
765 struct trace_array *tr;
766
767 lockdep_assert_held(&event_mutex);
768
769 do_for_each_event_file(tr, file) {
770 if (!(file->flags & EVENT_FILE_FL_ENABLED))
771 continue;
772
773 if (enable) {
774 tracing_start_tgid_record();
775 set_bit(EVENT_FILE_FL_RECORDED_TGID_BIT, &file->flags);
776 } else {
777 tracing_stop_tgid_record();
778 clear_bit(EVENT_FILE_FL_RECORDED_TGID_BIT,
779 &file->flags);
780 }
781 } while_for_each_event_file();
782 }
783
__ftrace_event_enable_disable(struct trace_event_file * file,int enable,int soft_disable)784 static int __ftrace_event_enable_disable(struct trace_event_file *file,
785 int enable, int soft_disable)
786 {
787 struct trace_event_call *call = file->event_call;
788 struct trace_array *tr = file->tr;
789 bool soft_mode = atomic_read(&file->sm_ref) != 0;
790 int ret = 0;
791 int disable;
792
793 switch (enable) {
794 case 0:
795 /*
796 * When soft_disable is set and enable is cleared, the sm_ref
797 * reference counter is decremented. If it reaches 0, we want
798 * to clear the SOFT_DISABLED flag but leave the event in the
799 * state that it was. That is, if the event was enabled and
800 * SOFT_DISABLED isn't set, then do nothing. But if SOFT_DISABLED
801 * is set we do not want the event to be enabled before we
802 * clear the bit.
803 *
804 * When soft_disable is not set but the soft_mode is,
805 * we do nothing. Do not disable the tracepoint, otherwise
806 * "soft enable"s (clearing the SOFT_DISABLED bit) won't work.
807 */
808 if (soft_disable) {
809 if (atomic_dec_return(&file->sm_ref) > 0)
810 break;
811 disable = file->flags & EVENT_FILE_FL_SOFT_DISABLED;
812 soft_mode = false;
813 /* Disable use of trace_buffered_event */
814 trace_buffered_event_disable();
815 } else
816 disable = !soft_mode;
817
818 if (disable && (file->flags & EVENT_FILE_FL_ENABLED)) {
819 clear_bit(EVENT_FILE_FL_ENABLED_BIT, &file->flags);
820 if (file->flags & EVENT_FILE_FL_RECORDED_CMD) {
821 tracing_stop_cmdline_record();
822 clear_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
823 }
824
825 if (file->flags & EVENT_FILE_FL_RECORDED_TGID) {
826 tracing_stop_tgid_record();
827 clear_bit(EVENT_FILE_FL_RECORDED_TGID_BIT, &file->flags);
828 }
829
830 ret = call->class->reg(call, TRACE_REG_UNREGISTER, file);
831
832 WARN_ON_ONCE(ret);
833 }
834 /* If in soft mode, just set the SOFT_DISABLE_BIT, else clear it */
835 if (soft_mode)
836 set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags);
837 else
838 clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags);
839 break;
840 case 1:
841 /*
842 * When soft_disable is set and enable is set, we want to
843 * register the tracepoint for the event, but leave the event
844 * as is. That means, if the event was already enabled, we do
845 * nothing. If the event is disabled, we set SOFT_DISABLED
846 * before enabling the event tracepoint, so it still seems
847 * to be disabled.
848 */
849 if (!soft_disable)
850 clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags);
851 else {
852 if (atomic_inc_return(&file->sm_ref) > 1)
853 break;
854 /* Enable use of trace_buffered_event */
855 trace_buffered_event_enable();
856 }
857
858 if (!(file->flags & EVENT_FILE_FL_ENABLED)) {
859 bool cmd = false, tgid = false;
860
861 /* Keep the event disabled, when going to soft mode. */
862 if (soft_disable)
863 set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags);
864
865 if (tr->trace_flags & TRACE_ITER(RECORD_CMD)) {
866 cmd = true;
867 tracing_start_cmdline_record();
868 set_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
869 }
870
871 if (tr->trace_flags & TRACE_ITER(RECORD_TGID)) {
872 tgid = true;
873 tracing_start_tgid_record();
874 set_bit(EVENT_FILE_FL_RECORDED_TGID_BIT, &file->flags);
875 }
876
877 ret = call->class->reg(call, TRACE_REG_REGISTER, file);
878 if (ret) {
879 if (cmd)
880 tracing_stop_cmdline_record();
881 if (tgid)
882 tracing_stop_tgid_record();
883 pr_info("event trace: Could not enable event "
884 "%s\n", trace_event_name(call));
885 break;
886 }
887 set_bit(EVENT_FILE_FL_ENABLED_BIT, &file->flags);
888
889 /* WAS_ENABLED gets set but never cleared. */
890 set_bit(EVENT_FILE_FL_WAS_ENABLED_BIT, &file->flags);
891 }
892 break;
893 }
894
895 return ret;
896 }
897
trace_event_enable_disable(struct trace_event_file * file,int enable,int soft_disable)898 int trace_event_enable_disable(struct trace_event_file *file,
899 int enable, int soft_disable)
900 {
901 return __ftrace_event_enable_disable(file, enable, soft_disable);
902 }
903
ftrace_event_enable_disable(struct trace_event_file * file,int enable)904 static int ftrace_event_enable_disable(struct trace_event_file *file,
905 int enable)
906 {
907 return __ftrace_event_enable_disable(file, enable, 0);
908 }
909
910 #ifdef CONFIG_MODULES
911 struct event_mod_load {
912 struct list_head list;
913 char *module;
914 char *match;
915 char *system;
916 char *event;
917 };
918
free_event_mod(struct event_mod_load * event_mod)919 static void free_event_mod(struct event_mod_load *event_mod)
920 {
921 list_del(&event_mod->list);
922 kfree(event_mod->module);
923 kfree(event_mod->match);
924 kfree(event_mod->system);
925 kfree(event_mod->event);
926 kfree(event_mod);
927 }
928
clear_mod_events(struct trace_array * tr)929 static void clear_mod_events(struct trace_array *tr)
930 {
931 struct event_mod_load *event_mod, *n;
932
933 list_for_each_entry_safe(event_mod, n, &tr->mod_events, list) {
934 free_event_mod(event_mod);
935 }
936 }
937
remove_cache_mod(struct trace_array * tr,const char * mod,const char * match,const char * system,const char * event)938 static int remove_cache_mod(struct trace_array *tr, const char *mod,
939 const char *match, const char *system, const char *event)
940 {
941 struct event_mod_load *event_mod, *n;
942 int ret = -EINVAL;
943
944 list_for_each_entry_safe(event_mod, n, &tr->mod_events, list) {
945 if (strcmp(event_mod->module, mod) != 0)
946 continue;
947
948 if (match && strcmp(event_mod->match, match) != 0)
949 continue;
950
951 if (system &&
952 (!event_mod->system || strcmp(event_mod->system, system) != 0))
953 continue;
954
955 if (event &&
956 (!event_mod->event || strcmp(event_mod->event, event) != 0))
957 continue;
958
959 free_event_mod(event_mod);
960 ret = 0;
961 }
962
963 return ret;
964 }
965
cache_mod(struct trace_array * tr,const char * mod,int set,const char * match,const char * system,const char * event)966 static int cache_mod(struct trace_array *tr, const char *mod, int set,
967 const char *match, const char *system, const char *event)
968 {
969 struct event_mod_load *event_mod;
970
971 /* If the module exists, then this just failed to find an event */
972 if (module_exists(mod))
973 return -EINVAL;
974
975 /* See if this is to remove a cached filter */
976 if (!set)
977 return remove_cache_mod(tr, mod, match, system, event);
978
979 event_mod = kzalloc_obj(*event_mod);
980 if (!event_mod)
981 return -ENOMEM;
982
983 INIT_LIST_HEAD(&event_mod->list);
984 event_mod->module = kstrdup(mod, GFP_KERNEL);
985 if (!event_mod->module)
986 goto out_free;
987
988 if (match) {
989 event_mod->match = kstrdup(match, GFP_KERNEL);
990 if (!event_mod->match)
991 goto out_free;
992 }
993
994 if (system) {
995 event_mod->system = kstrdup(system, GFP_KERNEL);
996 if (!event_mod->system)
997 goto out_free;
998 }
999
1000 if (event) {
1001 event_mod->event = kstrdup(event, GFP_KERNEL);
1002 if (!event_mod->event)
1003 goto out_free;
1004 }
1005
1006 list_add(&event_mod->list, &tr->mod_events);
1007
1008 return 0;
1009
1010 out_free:
1011 free_event_mod(event_mod);
1012
1013 return -ENOMEM;
1014 }
1015 #else /* CONFIG_MODULES */
clear_mod_events(struct trace_array * tr)1016 static inline void clear_mod_events(struct trace_array *tr) { }
cache_mod(struct trace_array * tr,const char * mod,int set,const char * match,const char * system,const char * event)1017 static int cache_mod(struct trace_array *tr, const char *mod, int set,
1018 const char *match, const char *system, const char *event)
1019 {
1020 return -EINVAL;
1021 }
1022 #endif
1023
ftrace_clear_events(struct trace_array * tr)1024 static void ftrace_clear_events(struct trace_array *tr)
1025 {
1026 struct trace_event_file *file;
1027
1028 mutex_lock(&event_mutex);
1029 list_for_each_entry(file, &tr->events, list) {
1030 ftrace_event_enable_disable(file, 0);
1031 }
1032 clear_mod_events(tr);
1033 mutex_unlock(&event_mutex);
1034 }
1035
1036 static void
event_filter_pid_sched_process_exit(void * data,struct task_struct * task)1037 event_filter_pid_sched_process_exit(void *data, struct task_struct *task)
1038 {
1039 struct trace_pid_list *pid_list;
1040 struct trace_array *tr = data;
1041
1042 guard(preempt)();
1043 pid_list = rcu_dereference_raw(tr->filtered_pids);
1044 trace_filter_add_remove_task(pid_list, NULL, task);
1045
1046 pid_list = rcu_dereference_raw(tr->filtered_no_pids);
1047 trace_filter_add_remove_task(pid_list, NULL, task);
1048 }
1049
1050 static void
event_filter_pid_sched_process_fork(void * data,struct task_struct * self,struct task_struct * task)1051 event_filter_pid_sched_process_fork(void *data,
1052 struct task_struct *self,
1053 struct task_struct *task)
1054 {
1055 struct trace_pid_list *pid_list;
1056 struct trace_array *tr = data;
1057
1058 guard(preempt)();
1059 pid_list = rcu_dereference_sched(tr->filtered_pids);
1060 trace_filter_add_remove_task(pid_list, self, task);
1061
1062 pid_list = rcu_dereference_sched(tr->filtered_no_pids);
1063 trace_filter_add_remove_task(pid_list, self, task);
1064 }
1065
trace_event_follow_fork(struct trace_array * tr,bool enable)1066 void trace_event_follow_fork(struct trace_array *tr, bool enable)
1067 {
1068 if (enable) {
1069 register_trace_prio_sched_process_fork(event_filter_pid_sched_process_fork,
1070 tr, INT_MIN);
1071 register_trace_prio_sched_process_free(event_filter_pid_sched_process_exit,
1072 tr, INT_MAX);
1073 } else {
1074 unregister_trace_sched_process_fork(event_filter_pid_sched_process_fork,
1075 tr);
1076 unregister_trace_sched_process_free(event_filter_pid_sched_process_exit,
1077 tr);
1078 }
1079 }
1080
1081 static void
event_filter_pid_sched_switch_probe_pre(void * data,bool preempt,struct task_struct * prev,struct task_struct * next,unsigned int prev_state)1082 event_filter_pid_sched_switch_probe_pre(void *data, bool preempt,
1083 struct task_struct *prev,
1084 struct task_struct *next,
1085 unsigned int prev_state)
1086 {
1087 struct trace_array *tr = data;
1088 struct trace_pid_list *no_pid_list;
1089 struct trace_pid_list *pid_list;
1090 bool ret;
1091
1092 pid_list = rcu_dereference_sched(tr->filtered_pids);
1093 no_pid_list = rcu_dereference_sched(tr->filtered_no_pids);
1094
1095 /*
1096 * Sched switch is funny, as we only want to ignore it
1097 * in the notrace case if both prev and next should be ignored.
1098 */
1099 ret = trace_ignore_this_task(NULL, no_pid_list, prev) &&
1100 trace_ignore_this_task(NULL, no_pid_list, next);
1101
1102 this_cpu_write(tr->array_buffer.data->ignore_pid, ret ||
1103 (trace_ignore_this_task(pid_list, NULL, prev) &&
1104 trace_ignore_this_task(pid_list, NULL, next)));
1105 }
1106
1107 static void
event_filter_pid_sched_switch_probe_post(void * data,bool preempt,struct task_struct * prev,struct task_struct * next,unsigned int prev_state)1108 event_filter_pid_sched_switch_probe_post(void *data, bool preempt,
1109 struct task_struct *prev,
1110 struct task_struct *next,
1111 unsigned int prev_state)
1112 {
1113 struct trace_array *tr = data;
1114 struct trace_pid_list *no_pid_list;
1115 struct trace_pid_list *pid_list;
1116
1117 pid_list = rcu_dereference_sched(tr->filtered_pids);
1118 no_pid_list = rcu_dereference_sched(tr->filtered_no_pids);
1119
1120 this_cpu_write(tr->array_buffer.data->ignore_pid,
1121 trace_ignore_this_task(pid_list, no_pid_list, next));
1122 }
1123
1124 static void
event_filter_pid_sched_wakeup_probe_pre(void * data,struct task_struct * task)1125 event_filter_pid_sched_wakeup_probe_pre(void *data, struct task_struct *task)
1126 {
1127 struct trace_array *tr = data;
1128 struct trace_pid_list *no_pid_list;
1129 struct trace_pid_list *pid_list;
1130
1131 /* Nothing to do if we are already tracing */
1132 if (!this_cpu_read(tr->array_buffer.data->ignore_pid))
1133 return;
1134
1135 pid_list = rcu_dereference_sched(tr->filtered_pids);
1136 no_pid_list = rcu_dereference_sched(tr->filtered_no_pids);
1137
1138 this_cpu_write(tr->array_buffer.data->ignore_pid,
1139 trace_ignore_this_task(pid_list, no_pid_list, task));
1140 }
1141
1142 static void
event_filter_pid_sched_wakeup_probe_post(void * data,struct task_struct * task)1143 event_filter_pid_sched_wakeup_probe_post(void *data, struct task_struct *task)
1144 {
1145 struct trace_array *tr = data;
1146 struct trace_pid_list *no_pid_list;
1147 struct trace_pid_list *pid_list;
1148
1149 /* Nothing to do if we are not tracing */
1150 if (this_cpu_read(tr->array_buffer.data->ignore_pid))
1151 return;
1152
1153 pid_list = rcu_dereference_sched(tr->filtered_pids);
1154 no_pid_list = rcu_dereference_sched(tr->filtered_no_pids);
1155
1156 /* Set tracing if current is enabled */
1157 this_cpu_write(tr->array_buffer.data->ignore_pid,
1158 trace_ignore_this_task(pid_list, no_pid_list, current));
1159 }
1160
unregister_pid_events(struct trace_array * tr)1161 static void unregister_pid_events(struct trace_array *tr)
1162 {
1163 unregister_trace_sched_switch(event_filter_pid_sched_switch_probe_pre, tr);
1164 unregister_trace_sched_switch(event_filter_pid_sched_switch_probe_post, tr);
1165
1166 unregister_trace_sched_wakeup(event_filter_pid_sched_wakeup_probe_pre, tr);
1167 unregister_trace_sched_wakeup(event_filter_pid_sched_wakeup_probe_post, tr);
1168
1169 unregister_trace_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_pre, tr);
1170 unregister_trace_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_post, tr);
1171
1172 unregister_trace_sched_waking(event_filter_pid_sched_wakeup_probe_pre, tr);
1173 unregister_trace_sched_waking(event_filter_pid_sched_wakeup_probe_post, tr);
1174 }
1175
__ftrace_clear_event_pids(struct trace_array * tr,int type)1176 static void __ftrace_clear_event_pids(struct trace_array *tr, int type)
1177 {
1178 struct trace_pid_list *pid_list;
1179 struct trace_pid_list *no_pid_list;
1180 struct trace_event_file *file;
1181 int cpu;
1182
1183 pid_list = rcu_dereference_protected(tr->filtered_pids,
1184 lockdep_is_held(&event_mutex));
1185 no_pid_list = rcu_dereference_protected(tr->filtered_no_pids,
1186 lockdep_is_held(&event_mutex));
1187
1188 /* Make sure there's something to do */
1189 if (!pid_type_enabled(type, pid_list, no_pid_list))
1190 return;
1191
1192 if (!still_need_pid_events(type, pid_list, no_pid_list)) {
1193 unregister_pid_events(tr);
1194
1195 list_for_each_entry(file, &tr->events, list) {
1196 clear_bit(EVENT_FILE_FL_PID_FILTER_BIT, &file->flags);
1197 }
1198
1199 for_each_possible_cpu(cpu)
1200 per_cpu_ptr(tr->array_buffer.data, cpu)->ignore_pid = false;
1201 }
1202
1203 if (type & TRACE_PIDS)
1204 rcu_assign_pointer(tr->filtered_pids, NULL);
1205
1206 if (type & TRACE_NO_PIDS)
1207 rcu_assign_pointer(tr->filtered_no_pids, NULL);
1208
1209 /* Wait till all users are no longer using pid filtering */
1210 tracepoint_synchronize_unregister();
1211
1212 if ((type & TRACE_PIDS) && pid_list)
1213 trace_pid_list_free(pid_list);
1214
1215 if ((type & TRACE_NO_PIDS) && no_pid_list)
1216 trace_pid_list_free(no_pid_list);
1217 }
1218
ftrace_clear_event_pids(struct trace_array * tr,int type)1219 static void ftrace_clear_event_pids(struct trace_array *tr, int type)
1220 {
1221 mutex_lock(&event_mutex);
1222 __ftrace_clear_event_pids(tr, type);
1223 mutex_unlock(&event_mutex);
1224 }
1225
__put_system(struct event_subsystem * system)1226 static void __put_system(struct event_subsystem *system)
1227 {
1228 struct event_filter *filter = system->filter;
1229
1230 WARN_ON_ONCE(system_refcount(system) == 0);
1231 if (system_refcount_dec(system))
1232 return;
1233
1234 list_del(&system->list);
1235
1236 if (filter) {
1237 kfree(filter->filter_string);
1238 kfree(filter);
1239 }
1240 kfree_const(system->name);
1241 kfree(system);
1242 }
1243
__get_system(struct event_subsystem * system)1244 static void __get_system(struct event_subsystem *system)
1245 {
1246 WARN_ON_ONCE(system_refcount(system) == 0);
1247 system_refcount_inc(system);
1248 }
1249
__get_system_dir(struct trace_subsystem_dir * dir)1250 static void __get_system_dir(struct trace_subsystem_dir *dir)
1251 {
1252 WARN_ON_ONCE(dir->ref_count == 0);
1253 dir->ref_count++;
1254 __get_system(dir->subsystem);
1255 }
1256
__put_system_dir(struct trace_subsystem_dir * dir)1257 static void __put_system_dir(struct trace_subsystem_dir *dir)
1258 {
1259 WARN_ON_ONCE(dir->ref_count == 0);
1260 /* If the subsystem is about to be freed, the dir must be too */
1261 WARN_ON_ONCE(system_refcount(dir->subsystem) == 1 && dir->ref_count != 1);
1262
1263 __put_system(dir->subsystem);
1264 if (!--dir->ref_count)
1265 kfree(dir);
1266 }
1267
put_system(struct trace_subsystem_dir * dir)1268 static void put_system(struct trace_subsystem_dir *dir)
1269 {
1270 mutex_lock(&event_mutex);
1271 __put_system_dir(dir);
1272 mutex_unlock(&event_mutex);
1273 }
1274
remove_subsystem(struct trace_subsystem_dir * dir)1275 static void remove_subsystem(struct trace_subsystem_dir *dir)
1276 {
1277 if (!dir)
1278 return;
1279
1280 if (!--dir->nr_events) {
1281 eventfs_remove_dir(dir->ei);
1282 list_del(&dir->list);
1283 __put_system_dir(dir);
1284 }
1285 }
1286
event_file_get(struct trace_event_file * file)1287 void event_file_get(struct trace_event_file *file)
1288 {
1289 refcount_inc(&file->ref);
1290 }
1291
event_file_put(struct trace_event_file * file)1292 void event_file_put(struct trace_event_file *file)
1293 {
1294 if (WARN_ON_ONCE(!refcount_read(&file->ref))) {
1295 if (file->flags & EVENT_FILE_FL_FREED)
1296 kmem_cache_free(file_cachep, file);
1297 return;
1298 }
1299
1300 if (refcount_dec_and_test(&file->ref)) {
1301 /* Count should only go to zero when it is freed */
1302 if (WARN_ON_ONCE(!(file->flags & EVENT_FILE_FL_FREED)))
1303 return;
1304 kmem_cache_free(file_cachep, file);
1305 }
1306 }
1307
remove_event_file_dir(struct trace_event_file * file)1308 static void remove_event_file_dir(struct trace_event_file *file)
1309 {
1310 eventfs_remove_dir(file->ei);
1311 list_del(&file->list);
1312 remove_subsystem(file->system);
1313 free_event_filter(file->filter);
1314 file->flags |= EVENT_FILE_FL_FREED;
1315 event_file_put(file);
1316
1317 /* Wake up hist poll waiters to notice the EVENT_FILE_FL_FREED flag. */
1318 hist_poll_wakeup();
1319 }
1320
1321 /*
1322 * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events.
1323 */
1324 static int
__ftrace_set_clr_event_nolock(struct trace_array * tr,const char * match,const char * sub,const char * event,int set,const char * mod)1325 __ftrace_set_clr_event_nolock(struct trace_array *tr, const char *match,
1326 const char *sub, const char *event, int set,
1327 const char *mod)
1328 {
1329 struct trace_event_file *file;
1330 struct trace_event_call *call;
1331 char *module __free(kfree) = NULL;
1332 const char *name;
1333 int ret = -EINVAL;
1334 int eret = 0;
1335
1336 if (mod) {
1337 char *p;
1338
1339 module = kstrdup(mod, GFP_KERNEL);
1340 if (!module)
1341 return -ENOMEM;
1342
1343 /* Replace all '-' with '_' as that's what modules do */
1344 for (p = strchr(module, '-'); p; p = strchr(p + 1, '-'))
1345 *p = '_';
1346 }
1347
1348 list_for_each_entry(file, &tr->events, list) {
1349
1350 call = file->event_call;
1351
1352 /* If a module is specified, skip events that are not that module */
1353 if (module && (!call->module || strcmp(module_name(call->module), module)))
1354 continue;
1355
1356 name = trace_event_name(call);
1357
1358 if (!name || !call->class || !call->class->reg)
1359 continue;
1360
1361 if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)
1362 continue;
1363
1364 if (match &&
1365 strcmp(match, name) != 0 &&
1366 strcmp(match, call->class->system) != 0)
1367 continue;
1368
1369 if (sub && strcmp(sub, call->class->system) != 0)
1370 continue;
1371
1372 if (event && strcmp(event, name) != 0)
1373 continue;
1374
1375 ret = ftrace_event_enable_disable(file, set);
1376
1377 /*
1378 * Save the first error and return that. Some events
1379 * may still have been enabled, but let the user
1380 * know that something went wrong.
1381 */
1382 if (ret && !eret)
1383 eret = ret;
1384
1385 ret = eret;
1386 }
1387
1388 /*
1389 * If this is a module setting and nothing was found,
1390 * check if the module was loaded. If it wasn't cache it.
1391 */
1392 if (module && ret == -EINVAL && !eret)
1393 ret = cache_mod(tr, module, set, match, sub, event);
1394
1395 return ret;
1396 }
1397
__ftrace_set_clr_event(struct trace_array * tr,const char * match,const char * sub,const char * event,int set,const char * mod)1398 static int __ftrace_set_clr_event(struct trace_array *tr, const char *match,
1399 const char *sub, const char *event, int set,
1400 const char *mod)
1401 {
1402 int ret;
1403
1404 if (trace_array_is_readonly(tr))
1405 return -EACCES;
1406
1407 mutex_lock(&event_mutex);
1408 ret = __ftrace_set_clr_event_nolock(tr, match, sub, event, set, mod);
1409 mutex_unlock(&event_mutex);
1410
1411 return ret;
1412 }
1413
ftrace_set_clr_event(struct trace_array * tr,char * buf,int set)1414 int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set)
1415 {
1416 char *event = NULL, *sub = NULL, *match, *mod;
1417 int ret;
1418
1419 if (!tr)
1420 return -ENOENT;
1421
1422 /* Modules events can be appended with :mod:<module> */
1423 mod = strstr(buf, ":mod:");
1424 if (mod) {
1425 *mod = '\0';
1426 /* move to the module name */
1427 mod += 5;
1428 }
1429
1430 /*
1431 * The buf format can be <subsystem>:<event-name>
1432 * *:<event-name> means any event by that name.
1433 * :<event-name> is the same.
1434 *
1435 * <subsystem>:* means all events in that subsystem
1436 * <subsystem>: means the same.
1437 *
1438 * <name> (no ':') means all events in a subsystem with
1439 * the name <name> or any event that matches <name>
1440 */
1441
1442 match = strsep(&buf, ":");
1443 if (buf) {
1444 sub = match;
1445 event = buf;
1446 match = NULL;
1447
1448 if (!strlen(sub) || strcmp(sub, "*") == 0)
1449 sub = NULL;
1450 if (!strlen(event) || strcmp(event, "*") == 0)
1451 event = NULL;
1452 } else if (mod) {
1453 /* Allow wildcard for no length or star */
1454 if (!strlen(match) || strcmp(match, "*") == 0)
1455 match = NULL;
1456 }
1457
1458 ret = __ftrace_set_clr_event(tr, match, sub, event, set, mod);
1459
1460 /* Put back the colon to allow this to be called again */
1461 if (buf)
1462 *(buf - 1) = ':';
1463
1464 return ret;
1465 }
1466
1467 /**
1468 * trace_set_clr_event - enable or disable an event
1469 * @system: system name to match (NULL for any system)
1470 * @event: event name to match (NULL for all events, within system)
1471 * @set: 1 to enable, 0 to disable
1472 *
1473 * This is a way for other parts of the kernel to enable or disable
1474 * event recording.
1475 *
1476 * Returns 0 on success, -EINVAL if the parameters do not match any
1477 * registered events.
1478 */
trace_set_clr_event(const char * system,const char * event,int set)1479 int trace_set_clr_event(const char *system, const char *event, int set)
1480 {
1481 struct trace_array *tr = top_trace_array();
1482
1483 if (!tr)
1484 return -ENODEV;
1485
1486 return __ftrace_set_clr_event(tr, NULL, system, event, set, NULL);
1487 }
1488 EXPORT_SYMBOL_GPL(trace_set_clr_event);
1489
1490 /**
1491 * trace_array_set_clr_event - enable or disable an event for a trace array.
1492 * @tr: concerned trace array.
1493 * @system: system name to match (NULL for any system)
1494 * @event: event name to match (NULL for all events, within system)
1495 * @enable: true to enable, false to disable
1496 *
1497 * This is a way for other parts of the kernel to enable or disable
1498 * event recording.
1499 *
1500 * Returns 0 on success, -EINVAL if the parameters do not match any
1501 * registered events.
1502 */
trace_array_set_clr_event(struct trace_array * tr,const char * system,const char * event,bool enable)1503 int trace_array_set_clr_event(struct trace_array *tr, const char *system,
1504 const char *event, bool enable)
1505 {
1506 int set;
1507
1508 if (!tr)
1509 return -ENOENT;
1510
1511 set = (enable == true) ? 1 : 0;
1512 return __ftrace_set_clr_event(tr, NULL, system, event, set, NULL);
1513 }
1514 EXPORT_SYMBOL_GPL(trace_array_set_clr_event);
1515
1516 /* 128 should be much more than enough */
1517 #define EVENT_BUF_SIZE 127
1518
1519 static ssize_t
ftrace_event_write(struct file * file,const char __user * ubuf,size_t cnt,loff_t * ppos)1520 ftrace_event_write(struct file *file, const char __user *ubuf,
1521 size_t cnt, loff_t *ppos)
1522 {
1523 struct trace_parser parser;
1524 struct seq_file *m = file->private_data;
1525 struct trace_array *tr = m->private;
1526 ssize_t read, ret;
1527
1528 if (!cnt)
1529 return 0;
1530
1531 ret = tracing_update_buffers(tr);
1532 if (ret < 0)
1533 return ret;
1534
1535 if (trace_parser_get_init(&parser, EVENT_BUF_SIZE + 1))
1536 return -ENOMEM;
1537
1538 read = trace_get_user(&parser, ubuf, cnt, ppos);
1539
1540 if (read >= 0 && trace_parser_loaded((&parser))) {
1541 int set = 1;
1542
1543 if (*parser.buffer == '!')
1544 set = 0;
1545
1546 ret = ftrace_set_clr_event(tr, parser.buffer + !set, set);
1547 if (ret)
1548 goto out_put;
1549 }
1550
1551 ret = read;
1552
1553 out_put:
1554 trace_parser_put(&parser);
1555
1556 return ret;
1557 }
1558
1559 static void *
t_next(struct seq_file * m,void * v,loff_t * pos)1560 t_next(struct seq_file *m, void *v, loff_t *pos)
1561 {
1562 struct trace_event_file *file = v;
1563 struct trace_event_call *call;
1564 struct trace_array *tr = m->private;
1565
1566 (*pos)++;
1567
1568 list_for_each_entry_continue(file, &tr->events, list) {
1569 call = file->event_call;
1570 /*
1571 * The ftrace subsystem is for showing formats only.
1572 * They can not be enabled or disabled via the event files.
1573 */
1574 if (call->class && call->class->reg &&
1575 !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
1576 return file;
1577 }
1578
1579 return NULL;
1580 }
1581
t_start(struct seq_file * m,loff_t * pos)1582 static void *t_start(struct seq_file *m, loff_t *pos)
1583 {
1584 struct trace_event_file *file;
1585 struct trace_array *tr = m->private;
1586 loff_t l;
1587
1588 mutex_lock(&event_mutex);
1589
1590 file = list_entry(&tr->events, struct trace_event_file, list);
1591 for (l = 0; l <= *pos; ) {
1592 file = t_next(m, file, &l);
1593 if (!file)
1594 break;
1595 }
1596 return file;
1597 }
1598
1599 enum set_event_iter_type {
1600 SET_EVENT_FILE,
1601 SET_EVENT_MOD,
1602 };
1603
1604 struct set_event_iter {
1605 enum set_event_iter_type type;
1606 union {
1607 struct trace_event_file *file;
1608 struct event_mod_load *event_mod;
1609 };
1610 };
1611
1612 static void *
s_next(struct seq_file * m,void * v,loff_t * pos)1613 s_next(struct seq_file *m, void *v, loff_t *pos)
1614 {
1615 struct set_event_iter *iter = v;
1616 struct trace_event_file *file;
1617 struct trace_array *tr = m->private;
1618
1619 (*pos)++;
1620
1621 if (iter->type == SET_EVENT_FILE) {
1622 file = iter->file;
1623 list_for_each_entry_continue(file, &tr->events, list) {
1624 if (file->flags & EVENT_FILE_FL_ENABLED) {
1625 iter->file = file;
1626 return iter;
1627 }
1628 }
1629 #ifdef CONFIG_MODULES
1630 iter->type = SET_EVENT_MOD;
1631 iter->event_mod = list_entry(&tr->mod_events, struct event_mod_load, list);
1632 #endif
1633 }
1634
1635 #ifdef CONFIG_MODULES
1636 list_for_each_entry_continue(iter->event_mod, &tr->mod_events, list)
1637 return iter;
1638 #endif
1639
1640 /*
1641 * The iter is allocated in s_start() and passed via the 'v'
1642 * parameter. To stop the iterator, NULL must be returned. But
1643 * the return value is what the 'v' parameter in s_stop() receives
1644 * and frees. Free iter here as it will no longer be used.
1645 */
1646 kfree(iter);
1647 return NULL;
1648 }
1649
s_start(struct seq_file * m,loff_t * pos)1650 static void *s_start(struct seq_file *m, loff_t *pos)
1651 {
1652 struct trace_array *tr = m->private;
1653 struct set_event_iter *iter;
1654 loff_t l;
1655
1656 iter = kzalloc_obj(*iter);
1657 mutex_lock(&event_mutex);
1658 if (!iter)
1659 return NULL;
1660
1661 iter->type = SET_EVENT_FILE;
1662 iter->file = list_entry(&tr->events, struct trace_event_file, list);
1663
1664 for (l = 0; l <= *pos; ) {
1665 iter = s_next(m, iter, &l);
1666 if (!iter)
1667 break;
1668 }
1669 return iter;
1670 }
1671
t_show(struct seq_file * m,void * v)1672 static int t_show(struct seq_file *m, void *v)
1673 {
1674 struct trace_event_file *file = v;
1675 struct trace_event_call *call = file->event_call;
1676
1677 if (strcmp(call->class->system, TRACE_SYSTEM) != 0)
1678 seq_printf(m, "%s:", call->class->system);
1679 seq_printf(m, "%s\n", trace_event_name(call));
1680
1681 return 0;
1682 }
1683
t_stop(struct seq_file * m,void * p)1684 static void t_stop(struct seq_file *m, void *p)
1685 {
1686 mutex_unlock(&event_mutex);
1687 }
1688
get_call_len(struct trace_event_call * call)1689 static int get_call_len(struct trace_event_call *call)
1690 {
1691 int len;
1692
1693 /* Get the length of "<system>:<event>" */
1694 len = strlen(call->class->system) + 1;
1695 len += strlen(trace_event_name(call));
1696
1697 /* Set the index to 32 bytes to separate event from data */
1698 return len >= 32 ? 1 : 32 - len;
1699 }
1700
1701 /**
1702 * t_show_filters - seq_file callback to display active event filters
1703 * @m: The seq_file interface for formatted output
1704 * @v: The current trace_event_file being iterated
1705 *
1706 * Identifies and prints active filters for the current event file in the
1707 * iteration. If a filter is applied to the current event and, if so,
1708 * prints the system name, event name, and the filter string.
1709 */
t_show_filters(struct seq_file * m,void * v)1710 static int t_show_filters(struct seq_file *m, void *v)
1711 {
1712 struct trace_event_file *file = v;
1713 struct trace_event_call *call = file->event_call;
1714 struct event_filter *filter;
1715 int len;
1716
1717 guard(rcu)();
1718 filter = rcu_dereference(file->filter);
1719 if (!filter || !filter->filter_string)
1720 return 0;
1721
1722 len = get_call_len(call);
1723
1724 seq_printf(m, "%s:%s%*s%s\n", call->class->system,
1725 trace_event_name(call), len, "", filter->filter_string);
1726
1727 return 0;
1728 }
1729
1730 /**
1731 * t_show_triggers - seq_file callback to display active event triggers
1732 * @m: The seq_file interface for formatted output
1733 * @v: The current trace_event_file being iterated
1734 *
1735 * Iterates through the trigger list of the current event file and prints
1736 * each active trigger's configuration using its associated print
1737 * operation.
1738 */
t_show_triggers(struct seq_file * m,void * v)1739 static int t_show_triggers(struct seq_file *m, void *v)
1740 {
1741 struct trace_event_file *file = v;
1742 struct trace_event_call *call = file->event_call;
1743 struct event_trigger_data *data;
1744 int len;
1745
1746 /*
1747 * The event_mutex is held by t_start(), protecting the
1748 * file->triggers list traversal.
1749 */
1750 if (list_empty(&file->triggers))
1751 return 0;
1752
1753 len = get_call_len(call);
1754
1755 list_for_each_entry_rcu(data, &file->triggers, list) {
1756 seq_printf(m, "%s:%s%*s", call->class->system,
1757 trace_event_name(call), len, "");
1758
1759 data->cmd_ops->print(m, data);
1760 }
1761
1762 return 0;
1763 }
1764
1765 #ifdef CONFIG_MODULES
s_show(struct seq_file * m,void * v)1766 static int s_show(struct seq_file *m, void *v)
1767 {
1768 struct set_event_iter *iter = v;
1769 const char *system;
1770 const char *event;
1771
1772 if (iter->type == SET_EVENT_FILE)
1773 return t_show(m, iter->file);
1774
1775 /* When match is set, system and event are not */
1776 if (iter->event_mod->match) {
1777 seq_printf(m, "%s:mod:%s\n", iter->event_mod->match,
1778 iter->event_mod->module);
1779 return 0;
1780 }
1781
1782 system = iter->event_mod->system ? : "*";
1783 event = iter->event_mod->event ? : "*";
1784
1785 seq_printf(m, "%s:%s:mod:%s\n", system, event, iter->event_mod->module);
1786
1787 return 0;
1788 }
1789 #else /* CONFIG_MODULES */
s_show(struct seq_file * m,void * v)1790 static int s_show(struct seq_file *m, void *v)
1791 {
1792 struct set_event_iter *iter = v;
1793
1794 return t_show(m, iter->file);
1795 }
1796 #endif
1797
s_stop(struct seq_file * m,void * v)1798 static void s_stop(struct seq_file *m, void *v)
1799 {
1800 kfree(v);
1801 t_stop(m, NULL);
1802 }
1803
1804 static void *
__next(struct seq_file * m,void * v,loff_t * pos,int type)1805 __next(struct seq_file *m, void *v, loff_t *pos, int type)
1806 {
1807 struct trace_array *tr = m->private;
1808 struct trace_pid_list *pid_list;
1809
1810 if (type == TRACE_PIDS)
1811 pid_list = rcu_dereference_sched(tr->filtered_pids);
1812 else
1813 pid_list = rcu_dereference_sched(tr->filtered_no_pids);
1814
1815 return trace_pid_next(pid_list, v, pos);
1816 }
1817
1818 static void *
p_next(struct seq_file * m,void * v,loff_t * pos)1819 p_next(struct seq_file *m, void *v, loff_t *pos)
1820 {
1821 return __next(m, v, pos, TRACE_PIDS);
1822 }
1823
1824 static void *
np_next(struct seq_file * m,void * v,loff_t * pos)1825 np_next(struct seq_file *m, void *v, loff_t *pos)
1826 {
1827 return __next(m, v, pos, TRACE_NO_PIDS);
1828 }
1829
__start(struct seq_file * m,loff_t * pos,int type)1830 static void *__start(struct seq_file *m, loff_t *pos, int type)
1831 __acquires(RCU)
1832 {
1833 struct trace_pid_list *pid_list;
1834 struct trace_array *tr = m->private;
1835
1836 /*
1837 * Grab the mutex, to keep calls to p_next() having the same
1838 * tr->filtered_pids as p_start() has.
1839 * If we just passed the tr->filtered_pids around, then RCU would
1840 * have been enough, but doing that makes things more complex.
1841 */
1842 mutex_lock(&event_mutex);
1843 rcu_read_lock_sched();
1844
1845 if (type == TRACE_PIDS)
1846 pid_list = rcu_dereference_sched(tr->filtered_pids);
1847 else
1848 pid_list = rcu_dereference_sched(tr->filtered_no_pids);
1849
1850 if (!pid_list)
1851 return NULL;
1852
1853 return trace_pid_start(pid_list, pos);
1854 }
1855
p_start(struct seq_file * m,loff_t * pos)1856 static void *p_start(struct seq_file *m, loff_t *pos)
1857 __acquires(RCU)
1858 {
1859 return __start(m, pos, TRACE_PIDS);
1860 }
1861
np_start(struct seq_file * m,loff_t * pos)1862 static void *np_start(struct seq_file *m, loff_t *pos)
1863 __acquires(RCU)
1864 {
1865 return __start(m, pos, TRACE_NO_PIDS);
1866 }
1867
p_stop(struct seq_file * m,void * p)1868 static void p_stop(struct seq_file *m, void *p)
1869 __releases(RCU)
1870 {
1871 rcu_read_unlock_sched();
1872 mutex_unlock(&event_mutex);
1873 }
1874
1875 static ssize_t
event_enable_read(struct file * filp,char __user * ubuf,size_t cnt,loff_t * ppos)1876 event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
1877 loff_t *ppos)
1878 {
1879 struct trace_event_file *file;
1880 unsigned long flags;
1881 char buf[4] = "0";
1882
1883 mutex_lock(&event_mutex);
1884 file = event_file_file(filp);
1885 if (likely(file))
1886 flags = file->flags;
1887 mutex_unlock(&event_mutex);
1888
1889 if (!file)
1890 return -ENODEV;
1891
1892 if (flags & EVENT_FILE_FL_ENABLED &&
1893 !(flags & EVENT_FILE_FL_SOFT_DISABLED))
1894 strcpy(buf, "1");
1895
1896 if (atomic_read(&file->sm_ref) != 0)
1897 strcat(buf, "*");
1898
1899 strcat(buf, "\n");
1900
1901 return simple_read_from_buffer(ubuf, cnt, ppos, buf, strlen(buf));
1902 }
1903
1904 static ssize_t
event_enable_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)1905 event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
1906 loff_t *ppos)
1907 {
1908 struct trace_event_file *file;
1909 unsigned long val;
1910 int ret;
1911
1912 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
1913 if (ret)
1914 return ret;
1915
1916 guard(mutex)(&event_mutex);
1917
1918 switch (val) {
1919 case 0:
1920 case 1:
1921 file = event_file_file(filp);
1922 if (!file)
1923 return -ENODEV;
1924 ret = tracing_update_buffers(file->tr);
1925 if (ret < 0)
1926 return ret;
1927 ret = ftrace_event_enable_disable(file, val);
1928 if (ret < 0)
1929 return ret;
1930 break;
1931
1932 default:
1933 return -EINVAL;
1934 }
1935
1936 *ppos += cnt;
1937
1938 return cnt;
1939 }
1940
1941 /*
1942 * Returns:
1943 * 0 : no events exist?
1944 * 1 : all events are disabled
1945 * 2 : all events are enabled
1946 * 3 : some events are enabled and some are enabled
1947 */
trace_events_enabled(struct trace_array * tr,const char * system)1948 int trace_events_enabled(struct trace_array *tr, const char *system)
1949 {
1950 struct trace_event_call *call;
1951 struct trace_event_file *file;
1952 int set = 0;
1953
1954 guard(mutex)(&event_mutex);
1955
1956 list_for_each_entry(file, &tr->events, list) {
1957 call = file->event_call;
1958 if ((call->flags & TRACE_EVENT_FL_IGNORE_ENABLE) ||
1959 !trace_event_name(call) || !call->class || !call->class->reg)
1960 continue;
1961
1962 if (system && strcmp(call->class->system, system) != 0)
1963 continue;
1964
1965 /*
1966 * We need to find out if all the events are set
1967 * or if all events or cleared, or if we have
1968 * a mixture.
1969 */
1970 set |= (1 << !!(file->flags & EVENT_FILE_FL_ENABLED));
1971
1972 /*
1973 * If we have a mixture, no need to look further.
1974 */
1975 if (set == 3)
1976 break;
1977 }
1978
1979 return set;
1980 }
1981
1982 static ssize_t
system_enable_read(struct file * filp,char __user * ubuf,size_t cnt,loff_t * ppos)1983 system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
1984 loff_t *ppos)
1985 {
1986 const char set_to_char[4] = { '?', '0', '1', 'X' };
1987 struct trace_subsystem_dir *dir = filp->private_data;
1988 struct event_subsystem *system = dir->subsystem;
1989 struct trace_array *tr = dir->tr;
1990 char buf[2];
1991 int set;
1992 int ret;
1993
1994 set = trace_events_enabled(tr, system ? system->name : NULL);
1995
1996 buf[0] = set_to_char[set];
1997 buf[1] = '\n';
1998
1999 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
2000
2001 return ret;
2002 }
2003
2004 static ssize_t
system_enable_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)2005 system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
2006 loff_t *ppos)
2007 {
2008 struct trace_subsystem_dir *dir = filp->private_data;
2009 struct event_subsystem *system = dir->subsystem;
2010 const char *name = NULL;
2011 unsigned long val;
2012 ssize_t ret;
2013
2014 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
2015 if (ret)
2016 return ret;
2017
2018 ret = tracing_update_buffers(dir->tr);
2019 if (ret < 0)
2020 return ret;
2021
2022 if (val != 0 && val != 1)
2023 return -EINVAL;
2024
2025 /*
2026 * Opening of "enable" adds a ref count to system,
2027 * so the name is safe to use.
2028 */
2029 if (system)
2030 name = system->name;
2031
2032 ret = __ftrace_set_clr_event(dir->tr, NULL, name, NULL, val, NULL);
2033 if (ret)
2034 goto out;
2035
2036 ret = cnt;
2037
2038 out:
2039 *ppos += cnt;
2040
2041 return ret;
2042 }
2043
2044 enum {
2045 FORMAT_HEADER = 1,
2046 FORMAT_FIELD_SEPERATOR = 2,
2047 FORMAT_PRINTFMT = 3,
2048 };
2049
f_next(struct seq_file * m,void * v,loff_t * pos)2050 static void *f_next(struct seq_file *m, void *v, loff_t *pos)
2051 {
2052 struct trace_event_file *file = event_file_data(m->private);
2053 struct trace_event_call *call = file->event_call;
2054 struct list_head *common_head = &ftrace_common_fields;
2055 struct list_head *head = trace_get_fields(call);
2056 struct list_head *node = v;
2057
2058 (*pos)++;
2059
2060 switch ((unsigned long)v) {
2061 case FORMAT_HEADER:
2062 node = common_head;
2063 break;
2064
2065 case FORMAT_FIELD_SEPERATOR:
2066 node = head;
2067 break;
2068
2069 case FORMAT_PRINTFMT:
2070 /* all done */
2071 return NULL;
2072 }
2073
2074 node = node->prev;
2075 if (node == common_head)
2076 return (void *)FORMAT_FIELD_SEPERATOR;
2077 else if (node == head)
2078 return (void *)FORMAT_PRINTFMT;
2079 else
2080 return node;
2081 }
2082
f_show(struct seq_file * m,void * v)2083 static int f_show(struct seq_file *m, void *v)
2084 {
2085 struct trace_event_file *file = event_file_data(m->private);
2086 struct trace_event_call *call = file->event_call;
2087 struct ftrace_event_field *field;
2088 const char *array_descriptor;
2089
2090 switch ((unsigned long)v) {
2091 case FORMAT_HEADER:
2092 seq_printf(m, "name: %s\n", trace_event_name(call));
2093 seq_printf(m, "ID: %d\n", call->event.type);
2094 seq_puts(m, "format:\n");
2095 return 0;
2096
2097 case FORMAT_FIELD_SEPERATOR:
2098 seq_putc(m, '\n');
2099 return 0;
2100
2101 case FORMAT_PRINTFMT:
2102 seq_printf(m, "\nprint fmt: %s\n",
2103 call->print_fmt);
2104 return 0;
2105 }
2106
2107 field = list_entry(v, struct ftrace_event_field, link);
2108 /*
2109 * Smartly shows the array type(except dynamic array).
2110 * Normal:
2111 * field:TYPE VAR
2112 * If TYPE := TYPE[LEN], it is shown:
2113 * field:TYPE VAR[LEN]
2114 */
2115 array_descriptor = strchr(field->type, '[');
2116
2117 if (str_has_prefix(field->type, "__data_loc"))
2118 array_descriptor = NULL;
2119
2120 if (!array_descriptor)
2121 seq_printf(m, "\tfield:%s %s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
2122 field->type, field->name, field->offset,
2123 field->size, !!field->is_signed);
2124 else if (field->len)
2125 seq_printf(m, "\tfield:%.*s %s[%d];\toffset:%u;\tsize:%u;\tsigned:%d;\n",
2126 (int)(array_descriptor - field->type),
2127 field->type, field->name,
2128 field->len, field->offset,
2129 field->size, !!field->is_signed);
2130 else
2131 seq_printf(m, "\tfield:%.*s %s[];\toffset:%u;\tsize:%u;\tsigned:%d;\n",
2132 (int)(array_descriptor - field->type),
2133 field->type, field->name,
2134 field->offset, field->size, !!field->is_signed);
2135
2136 return 0;
2137 }
2138
f_start(struct seq_file * m,loff_t * pos)2139 static void *f_start(struct seq_file *m, loff_t *pos)
2140 {
2141 struct trace_event_file *file;
2142 void *p = (void *)FORMAT_HEADER;
2143 loff_t l = 0;
2144
2145 /* ->stop() is called even if ->start() fails */
2146 mutex_lock(&event_mutex);
2147 file = event_file_file(m->private);
2148 if (!file)
2149 return ERR_PTR(-ENODEV);
2150
2151 while (l < *pos && p)
2152 p = f_next(m, p, &l);
2153
2154 return p;
2155 }
2156
f_stop(struct seq_file * m,void * p)2157 static void f_stop(struct seq_file *m, void *p)
2158 {
2159 mutex_unlock(&event_mutex);
2160 }
2161
2162 static const struct seq_operations trace_format_seq_ops = {
2163 .start = f_start,
2164 .next = f_next,
2165 .stop = f_stop,
2166 .show = f_show,
2167 };
2168
trace_format_open(struct inode * inode,struct file * file)2169 static int trace_format_open(struct inode *inode, struct file *file)
2170 {
2171 struct seq_file *m;
2172 int ret;
2173
2174 /* Do we want to hide event format files on tracefs lockdown? */
2175
2176 ret = seq_open(file, &trace_format_seq_ops);
2177 if (ret < 0)
2178 return ret;
2179
2180 m = file->private_data;
2181 m->private = file;
2182
2183 return 0;
2184 }
2185
2186 #ifdef CONFIG_PERF_EVENTS
2187 static ssize_t
event_id_read(struct file * filp,char __user * ubuf,size_t cnt,loff_t * ppos)2188 event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
2189 {
2190 /* id is directly in i_private and available for inode's lifetime. */
2191 int id = (long)file_inode(filp)->i_private;
2192 char buf[32];
2193 int len;
2194
2195 WARN_ON(!id);
2196
2197 len = sprintf(buf, "%d\n", id);
2198
2199 return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
2200 }
2201 #endif
2202
2203 static ssize_t
event_filter_read(struct file * filp,char __user * ubuf,size_t cnt,loff_t * ppos)2204 event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
2205 loff_t *ppos)
2206 {
2207 struct trace_event_file *file;
2208 struct trace_seq *s;
2209 int r = -ENODEV;
2210
2211 if (*ppos)
2212 return 0;
2213
2214 s = kmalloc_obj(*s);
2215
2216 if (!s)
2217 return -ENOMEM;
2218
2219 trace_seq_init(s);
2220
2221 mutex_lock(&event_mutex);
2222 file = event_file_file(filp);
2223 if (file)
2224 print_event_filter(file, s);
2225 mutex_unlock(&event_mutex);
2226
2227 if (file)
2228 r = simple_read_from_buffer(ubuf, cnt, ppos,
2229 s->buffer, trace_seq_used(s));
2230
2231 kfree(s);
2232
2233 return r;
2234 }
2235
2236 static ssize_t
event_filter_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)2237 event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
2238 loff_t *ppos)
2239 {
2240 struct trace_event_file *file;
2241 char *buf;
2242 int err = -ENODEV;
2243
2244 if (cnt >= PAGE_SIZE)
2245 return -EINVAL;
2246
2247 buf = memdup_user_nul(ubuf, cnt);
2248 if (IS_ERR(buf))
2249 return PTR_ERR(buf);
2250
2251 mutex_lock(&event_mutex);
2252 file = event_file_file(filp);
2253 if (file)
2254 err = apply_event_filter(file, buf);
2255 mutex_unlock(&event_mutex);
2256
2257 kfree(buf);
2258 if (err < 0)
2259 return err;
2260
2261 *ppos += cnt;
2262
2263 return cnt;
2264 }
2265
2266 static LIST_HEAD(event_subsystems);
2267
subsystem_open(struct inode * inode,struct file * filp)2268 static int subsystem_open(struct inode *inode, struct file *filp)
2269 {
2270 struct trace_subsystem_dir *dir = NULL, *iter_dir;
2271 struct trace_array *tr = NULL, *iter_tr;
2272 struct event_subsystem *system = NULL;
2273 int ret;
2274
2275 if (unlikely(tracing_disabled))
2276 return -ENODEV;
2277
2278 /* Make sure the system still exists */
2279 mutex_lock(&event_mutex);
2280 mutex_lock(&trace_types_lock);
2281 list_for_each_entry(iter_tr, &ftrace_trace_arrays, list) {
2282 list_for_each_entry(iter_dir, &iter_tr->systems, list) {
2283 if (iter_dir == inode->i_private) {
2284 /* Don't open systems with no events */
2285 tr = iter_tr;
2286 dir = iter_dir;
2287 if (dir->nr_events) {
2288 __get_system_dir(dir);
2289 system = dir->subsystem;
2290 }
2291 goto exit_loop;
2292 }
2293 }
2294 }
2295 exit_loop:
2296 mutex_unlock(&trace_types_lock);
2297 mutex_unlock(&event_mutex);
2298
2299 if (!system)
2300 return -ENODEV;
2301
2302 /* Still need to increment the ref count of the system */
2303 if (trace_array_get(tr) < 0) {
2304 put_system(dir);
2305 return -ENODEV;
2306 }
2307
2308 ret = tracing_open_generic(inode, filp);
2309 if (ret < 0) {
2310 trace_array_put(tr);
2311 put_system(dir);
2312 }
2313
2314 return ret;
2315 }
2316
system_tr_open(struct inode * inode,struct file * filp)2317 static int system_tr_open(struct inode *inode, struct file *filp)
2318 {
2319 struct trace_subsystem_dir *dir;
2320 struct trace_array *tr = inode->i_private;
2321 int ret;
2322
2323 /* Make a temporary dir that has no system but points to tr */
2324 dir = kzalloc_obj(*dir);
2325 if (!dir)
2326 return -ENOMEM;
2327
2328 ret = tracing_open_generic_tr(inode, filp);
2329 if (ret < 0) {
2330 kfree(dir);
2331 return ret;
2332 }
2333 dir->tr = tr;
2334 filp->private_data = dir;
2335
2336 return 0;
2337 }
2338
subsystem_release(struct inode * inode,struct file * file)2339 static int subsystem_release(struct inode *inode, struct file *file)
2340 {
2341 struct trace_subsystem_dir *dir = file->private_data;
2342
2343 trace_array_put(dir->tr);
2344
2345 /*
2346 * If dir->subsystem is NULL, then this is a temporary
2347 * descriptor that was made for a trace_array to enable
2348 * all subsystems.
2349 */
2350 if (dir->subsystem)
2351 put_system(dir);
2352 else
2353 kfree(dir);
2354
2355 return 0;
2356 }
2357
2358 static ssize_t
subsystem_filter_read(struct file * filp,char __user * ubuf,size_t cnt,loff_t * ppos)2359 subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
2360 loff_t *ppos)
2361 {
2362 struct trace_subsystem_dir *dir = filp->private_data;
2363 struct event_subsystem *system = dir->subsystem;
2364 struct trace_seq *s;
2365 int r;
2366
2367 if (*ppos)
2368 return 0;
2369
2370 s = kmalloc_obj(*s);
2371 if (!s)
2372 return -ENOMEM;
2373
2374 trace_seq_init(s);
2375
2376 print_subsystem_event_filter(system, s);
2377 r = simple_read_from_buffer(ubuf, cnt, ppos,
2378 s->buffer, trace_seq_used(s));
2379
2380 kfree(s);
2381
2382 return r;
2383 }
2384
2385 static ssize_t
subsystem_filter_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)2386 subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
2387 loff_t *ppos)
2388 {
2389 struct trace_subsystem_dir *dir = filp->private_data;
2390 char *buf;
2391 int err;
2392
2393 if (cnt >= PAGE_SIZE)
2394 return -EINVAL;
2395
2396 buf = memdup_user_nul(ubuf, cnt);
2397 if (IS_ERR(buf))
2398 return PTR_ERR(buf);
2399
2400 err = apply_subsystem_event_filter(dir, buf);
2401 kfree(buf);
2402 if (err < 0)
2403 return err;
2404
2405 *ppos += cnt;
2406
2407 return cnt;
2408 }
2409
2410 static ssize_t
show_header_page_file(struct file * filp,char __user * ubuf,size_t cnt,loff_t * ppos)2411 show_header_page_file(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
2412 {
2413 struct trace_array *tr = filp->private_data;
2414 struct trace_seq *s;
2415 int r;
2416
2417 if (*ppos)
2418 return 0;
2419
2420 s = kmalloc_obj(*s);
2421 if (!s)
2422 return -ENOMEM;
2423
2424 trace_seq_init(s);
2425
2426 ring_buffer_print_page_header(tr->array_buffer.buffer, s);
2427 r = simple_read_from_buffer(ubuf, cnt, ppos,
2428 s->buffer, trace_seq_used(s));
2429
2430 kfree(s);
2431
2432 return r;
2433 }
2434
2435 static ssize_t
show_header_event_file(struct file * filp,char __user * ubuf,size_t cnt,loff_t * ppos)2436 show_header_event_file(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
2437 {
2438 struct trace_seq *s;
2439 int r;
2440
2441 if (*ppos)
2442 return 0;
2443
2444 s = kmalloc_obj(*s);
2445 if (!s)
2446 return -ENOMEM;
2447
2448 trace_seq_init(s);
2449
2450 ring_buffer_print_entry_header(s);
2451 r = simple_read_from_buffer(ubuf, cnt, ppos,
2452 s->buffer, trace_seq_used(s));
2453
2454 kfree(s);
2455
2456 return r;
2457 }
2458
ignore_task_cpu(void * data)2459 static void ignore_task_cpu(void *data)
2460 {
2461 struct trace_array *tr = data;
2462 struct trace_pid_list *pid_list;
2463 struct trace_pid_list *no_pid_list;
2464
2465 /*
2466 * This function is called by on_each_cpu() while the
2467 * event_mutex is held.
2468 */
2469 pid_list = rcu_dereference_protected(tr->filtered_pids,
2470 mutex_is_locked(&event_mutex));
2471 no_pid_list = rcu_dereference_protected(tr->filtered_no_pids,
2472 mutex_is_locked(&event_mutex));
2473
2474 this_cpu_write(tr->array_buffer.data->ignore_pid,
2475 trace_ignore_this_task(pid_list, no_pid_list, current));
2476 }
2477
register_pid_events(struct trace_array * tr)2478 static void register_pid_events(struct trace_array *tr)
2479 {
2480 /*
2481 * Register a probe that is called before all other probes
2482 * to set ignore_pid if next or prev do not match.
2483 * Register a probe this is called after all other probes
2484 * to only keep ignore_pid set if next pid matches.
2485 */
2486 register_trace_prio_sched_switch(event_filter_pid_sched_switch_probe_pre,
2487 tr, INT_MAX);
2488 register_trace_prio_sched_switch(event_filter_pid_sched_switch_probe_post,
2489 tr, 0);
2490
2491 register_trace_prio_sched_wakeup(event_filter_pid_sched_wakeup_probe_pre,
2492 tr, INT_MAX);
2493 register_trace_prio_sched_wakeup(event_filter_pid_sched_wakeup_probe_post,
2494 tr, 0);
2495
2496 register_trace_prio_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_pre,
2497 tr, INT_MAX);
2498 register_trace_prio_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_post,
2499 tr, 0);
2500
2501 register_trace_prio_sched_waking(event_filter_pid_sched_wakeup_probe_pre,
2502 tr, INT_MAX);
2503 register_trace_prio_sched_waking(event_filter_pid_sched_wakeup_probe_post,
2504 tr, 0);
2505 }
2506
2507 static ssize_t
event_pid_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos,int type)2508 event_pid_write(struct file *filp, const char __user *ubuf,
2509 size_t cnt, loff_t *ppos, int type)
2510 {
2511 struct seq_file *m = filp->private_data;
2512 struct trace_array *tr = m->private;
2513 struct trace_pid_list *filtered_pids = NULL;
2514 struct trace_pid_list *other_pids = NULL;
2515 struct trace_pid_list *pid_list;
2516 struct trace_event_file *file;
2517 ssize_t ret;
2518
2519 if (!cnt)
2520 return 0;
2521
2522 ret = tracing_update_buffers(tr);
2523 if (ret < 0)
2524 return ret;
2525
2526 guard(mutex)(&event_mutex);
2527
2528 if (type == TRACE_PIDS) {
2529 filtered_pids = rcu_dereference_protected(tr->filtered_pids,
2530 lockdep_is_held(&event_mutex));
2531 other_pids = rcu_dereference_protected(tr->filtered_no_pids,
2532 lockdep_is_held(&event_mutex));
2533 } else {
2534 filtered_pids = rcu_dereference_protected(tr->filtered_no_pids,
2535 lockdep_is_held(&event_mutex));
2536 other_pids = rcu_dereference_protected(tr->filtered_pids,
2537 lockdep_is_held(&event_mutex));
2538 }
2539
2540 ret = trace_pid_write(filtered_pids, &pid_list, ubuf, cnt);
2541 if (ret < 0)
2542 return ret;
2543
2544 if (type == TRACE_PIDS)
2545 rcu_assign_pointer(tr->filtered_pids, pid_list);
2546 else
2547 rcu_assign_pointer(tr->filtered_no_pids, pid_list);
2548
2549 list_for_each_entry(file, &tr->events, list) {
2550 set_bit(EVENT_FILE_FL_PID_FILTER_BIT, &file->flags);
2551 }
2552
2553 if (filtered_pids) {
2554 tracepoint_synchronize_unregister();
2555 trace_pid_list_free(filtered_pids);
2556 } else if (pid_list && !other_pids) {
2557 register_pid_events(tr);
2558 }
2559
2560 /*
2561 * Ignoring of pids is done at task switch. But we have to
2562 * check for those tasks that are currently running.
2563 * Always do this in case a pid was appended or removed.
2564 */
2565 on_each_cpu(ignore_task_cpu, tr, 1);
2566
2567 *ppos += ret;
2568
2569 return ret;
2570 }
2571
2572 static ssize_t
ftrace_event_pid_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)2573 ftrace_event_pid_write(struct file *filp, const char __user *ubuf,
2574 size_t cnt, loff_t *ppos)
2575 {
2576 return event_pid_write(filp, ubuf, cnt, ppos, TRACE_PIDS);
2577 }
2578
2579 static ssize_t
ftrace_event_npid_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)2580 ftrace_event_npid_write(struct file *filp, const char __user *ubuf,
2581 size_t cnt, loff_t *ppos)
2582 {
2583 return event_pid_write(filp, ubuf, cnt, ppos, TRACE_NO_PIDS);
2584 }
2585
2586 static int ftrace_event_avail_open(struct inode *inode, struct file *file);
2587 static int ftrace_event_set_open(struct inode *inode, struct file *file);
2588 static int ftrace_event_show_filters_open(struct inode *inode, struct file *file);
2589 static int ftrace_event_show_triggers_open(struct inode *inode, struct file *file);
2590 static int ftrace_event_set_pid_open(struct inode *inode, struct file *file);
2591 static int ftrace_event_set_npid_open(struct inode *inode, struct file *file);
2592 static int ftrace_event_release(struct inode *inode, struct file *file);
2593
2594 static const struct seq_operations show_event_seq_ops = {
2595 .start = t_start,
2596 .next = t_next,
2597 .show = t_show,
2598 .stop = t_stop,
2599 };
2600
2601 static const struct seq_operations show_set_event_seq_ops = {
2602 .start = s_start,
2603 .next = s_next,
2604 .show = s_show,
2605 .stop = s_stop,
2606 };
2607
2608 static const struct seq_operations show_show_event_filters_seq_ops = {
2609 .start = t_start,
2610 .next = t_next,
2611 .show = t_show_filters,
2612 .stop = t_stop,
2613 };
2614
2615 static const struct seq_operations show_show_event_triggers_seq_ops = {
2616 .start = t_start,
2617 .next = t_next,
2618 .show = t_show_triggers,
2619 .stop = t_stop,
2620 };
2621
2622 static const struct seq_operations show_set_pid_seq_ops = {
2623 .start = p_start,
2624 .next = p_next,
2625 .show = trace_pid_show,
2626 .stop = p_stop,
2627 };
2628
2629 static const struct seq_operations show_set_no_pid_seq_ops = {
2630 .start = np_start,
2631 .next = np_next,
2632 .show = trace_pid_show,
2633 .stop = p_stop,
2634 };
2635
2636 static const struct file_operations ftrace_avail_fops = {
2637 .open = ftrace_event_avail_open,
2638 .read = seq_read,
2639 .llseek = seq_lseek,
2640 .release = seq_release,
2641 };
2642
2643 static const struct file_operations ftrace_set_event_fops = {
2644 .open = ftrace_event_set_open,
2645 .read = seq_read,
2646 .write = ftrace_event_write,
2647 .llseek = seq_lseek,
2648 .release = ftrace_event_release,
2649 };
2650
2651 static const struct file_operations ftrace_show_event_filters_fops = {
2652 .open = ftrace_event_show_filters_open,
2653 .read = seq_read,
2654 .llseek = seq_lseek,
2655 .release = seq_release,
2656 };
2657
2658 static const struct file_operations ftrace_show_event_triggers_fops = {
2659 .open = ftrace_event_show_triggers_open,
2660 .read = seq_read,
2661 .llseek = seq_lseek,
2662 .release = seq_release,
2663 };
2664
2665 static const struct file_operations ftrace_set_event_pid_fops = {
2666 .open = ftrace_event_set_pid_open,
2667 .read = seq_read,
2668 .write = ftrace_event_pid_write,
2669 .llseek = seq_lseek,
2670 .release = ftrace_event_release,
2671 };
2672
2673 static const struct file_operations ftrace_set_event_notrace_pid_fops = {
2674 .open = ftrace_event_set_npid_open,
2675 .read = seq_read,
2676 .write = ftrace_event_npid_write,
2677 .llseek = seq_lseek,
2678 .release = ftrace_event_release,
2679 };
2680
2681 static const struct file_operations ftrace_enable_fops = {
2682 .open = tracing_open_file_tr,
2683 .read = event_enable_read,
2684 .write = event_enable_write,
2685 .release = tracing_release_file_tr,
2686 .llseek = default_llseek,
2687 };
2688
2689 static const struct file_operations ftrace_event_format_fops = {
2690 .open = trace_format_open,
2691 .read = seq_read,
2692 .llseek = seq_lseek,
2693 .release = seq_release,
2694 };
2695
2696 #ifdef CONFIG_PERF_EVENTS
2697 static const struct file_operations ftrace_event_id_fops = {
2698 .read = event_id_read,
2699 .llseek = default_llseek,
2700 };
2701 #endif
2702
2703 static const struct file_operations ftrace_event_filter_fops = {
2704 .open = tracing_open_file_tr,
2705 .read = event_filter_read,
2706 .write = event_filter_write,
2707 .release = tracing_release_file_tr,
2708 .llseek = default_llseek,
2709 };
2710
2711 static const struct file_operations ftrace_subsystem_filter_fops = {
2712 .open = subsystem_open,
2713 .read = subsystem_filter_read,
2714 .write = subsystem_filter_write,
2715 .llseek = default_llseek,
2716 .release = subsystem_release,
2717 };
2718
2719 static const struct file_operations ftrace_system_enable_fops = {
2720 .open = subsystem_open,
2721 .read = system_enable_read,
2722 .write = system_enable_write,
2723 .llseek = default_llseek,
2724 .release = subsystem_release,
2725 };
2726
2727 static const struct file_operations ftrace_tr_enable_fops = {
2728 .open = system_tr_open,
2729 .read = system_enable_read,
2730 .write = system_enable_write,
2731 .llseek = default_llseek,
2732 .release = subsystem_release,
2733 };
2734
2735 static const struct file_operations ftrace_show_header_page_fops = {
2736 .open = tracing_open_generic_tr,
2737 .read = show_header_page_file,
2738 .llseek = default_llseek,
2739 .release = tracing_release_generic_tr,
2740 };
2741
2742 static const struct file_operations ftrace_show_header_event_fops = {
2743 .open = tracing_open_generic_tr,
2744 .read = show_header_event_file,
2745 .llseek = default_llseek,
2746 .release = tracing_release_generic_tr,
2747 };
2748
2749 static int
ftrace_event_open(struct inode * inode,struct file * file,const struct seq_operations * seq_ops)2750 ftrace_event_open(struct inode *inode, struct file *file,
2751 const struct seq_operations *seq_ops)
2752 {
2753 struct seq_file *m;
2754 int ret;
2755
2756 ret = security_locked_down(LOCKDOWN_TRACEFS);
2757 if (ret)
2758 return ret;
2759
2760 ret = seq_open(file, seq_ops);
2761 if (ret < 0)
2762 return ret;
2763 m = file->private_data;
2764 /* copy tr over to seq ops */
2765 m->private = inode->i_private;
2766
2767 return ret;
2768 }
2769
ftrace_event_release(struct inode * inode,struct file * file)2770 static int ftrace_event_release(struct inode *inode, struct file *file)
2771 {
2772 struct trace_array *tr = inode->i_private;
2773
2774 trace_array_put(tr);
2775
2776 return seq_release(inode, file);
2777 }
2778
2779 static int
ftrace_event_avail_open(struct inode * inode,struct file * file)2780 ftrace_event_avail_open(struct inode *inode, struct file *file)
2781 {
2782 const struct seq_operations *seq_ops = &show_event_seq_ops;
2783
2784 /* Checks for tracefs lockdown */
2785 return ftrace_event_open(inode, file, seq_ops);
2786 }
2787
2788 static int
ftrace_event_set_open(struct inode * inode,struct file * file)2789 ftrace_event_set_open(struct inode *inode, struct file *file)
2790 {
2791 const struct seq_operations *seq_ops = &show_set_event_seq_ops;
2792 struct trace_array *tr = inode->i_private;
2793 int ret;
2794
2795 ret = tracing_check_open_get_tr(tr);
2796 if (ret)
2797 return ret;
2798
2799 if ((file->f_mode & FMODE_WRITE) &&
2800 (file->f_flags & O_TRUNC))
2801 ftrace_clear_events(tr);
2802
2803 ret = ftrace_event_open(inode, file, seq_ops);
2804 if (ret < 0)
2805 trace_array_put(tr);
2806 return ret;
2807 }
2808
2809 /**
2810 * ftrace_event_show_filters_open - open interface for set_event_filters
2811 * @inode: The inode of the file
2812 * @file: The file being opened
2813 *
2814 * Connects the set_event_filters file to the sequence operations
2815 * required to iterate over and display active event filters.
2816 */
2817 static int
ftrace_event_show_filters_open(struct inode * inode,struct file * file)2818 ftrace_event_show_filters_open(struct inode *inode, struct file *file)
2819 {
2820 return ftrace_event_open(inode, file, &show_show_event_filters_seq_ops);
2821 }
2822
2823 /**
2824 * ftrace_event_show_triggers_open - open interface for show_event_triggers
2825 * @inode: The inode of the file
2826 * @file: The file being opened
2827 *
2828 * Connects the show_event_triggers file to the sequence operations
2829 * required to iterate over and display active event triggers.
2830 */
2831 static int
ftrace_event_show_triggers_open(struct inode * inode,struct file * file)2832 ftrace_event_show_triggers_open(struct inode *inode, struct file *file)
2833 {
2834 return ftrace_event_open(inode, file, &show_show_event_triggers_seq_ops);
2835 }
2836
2837 static int
ftrace_event_set_pid_open(struct inode * inode,struct file * file)2838 ftrace_event_set_pid_open(struct inode *inode, struct file *file)
2839 {
2840 const struct seq_operations *seq_ops = &show_set_pid_seq_ops;
2841 struct trace_array *tr = inode->i_private;
2842 int ret;
2843
2844 ret = tracing_check_open_get_tr(tr);
2845 if (ret)
2846 return ret;
2847
2848 if ((file->f_mode & FMODE_WRITE) &&
2849 (file->f_flags & O_TRUNC))
2850 ftrace_clear_event_pids(tr, TRACE_PIDS);
2851
2852 ret = ftrace_event_open(inode, file, seq_ops);
2853 if (ret < 0)
2854 trace_array_put(tr);
2855 return ret;
2856 }
2857
2858 static int
ftrace_event_set_npid_open(struct inode * inode,struct file * file)2859 ftrace_event_set_npid_open(struct inode *inode, struct file *file)
2860 {
2861 const struct seq_operations *seq_ops = &show_set_no_pid_seq_ops;
2862 struct trace_array *tr = inode->i_private;
2863 int ret;
2864
2865 ret = tracing_check_open_get_tr(tr);
2866 if (ret)
2867 return ret;
2868
2869 if ((file->f_mode & FMODE_WRITE) &&
2870 (file->f_flags & O_TRUNC))
2871 ftrace_clear_event_pids(tr, TRACE_NO_PIDS);
2872
2873 ret = ftrace_event_open(inode, file, seq_ops);
2874 if (ret < 0)
2875 trace_array_put(tr);
2876 return ret;
2877 }
2878
2879 static struct event_subsystem *
create_new_subsystem(const char * name)2880 create_new_subsystem(const char *name)
2881 {
2882 struct event_subsystem *system;
2883
2884 /* need to create new entry */
2885 system = kmalloc_obj(*system);
2886 if (!system)
2887 return NULL;
2888
2889 system->ref_count = 1;
2890
2891 /* Only allocate if dynamic (kprobes and modules) */
2892 system->name = kstrdup_const(name, GFP_KERNEL);
2893 if (!system->name)
2894 goto out_free;
2895
2896 system->filter = kzalloc_obj(struct event_filter);
2897 if (!system->filter)
2898 goto out_free;
2899
2900 list_add(&system->list, &event_subsystems);
2901
2902 return system;
2903
2904 out_free:
2905 kfree_const(system->name);
2906 kfree(system);
2907 return NULL;
2908 }
2909
system_callback(const char * name,umode_t * mode,void ** data,const struct file_operations ** fops)2910 static int system_callback(const char *name, umode_t *mode, void **data,
2911 const struct file_operations **fops)
2912 {
2913 if (strcmp(name, "filter") == 0)
2914 *fops = &ftrace_subsystem_filter_fops;
2915
2916 else if (strcmp(name, "enable") == 0)
2917 *fops = &ftrace_system_enable_fops;
2918
2919 else
2920 return 0;
2921
2922 *mode = TRACE_MODE_WRITE;
2923 return 1;
2924 }
2925
2926 static struct eventfs_inode *
event_subsystem_dir(struct trace_array * tr,const char * name,struct trace_event_file * file,struct eventfs_inode * parent)2927 event_subsystem_dir(struct trace_array *tr, const char *name,
2928 struct trace_event_file *file, struct eventfs_inode *parent)
2929 {
2930 struct event_subsystem *system, *iter;
2931 struct trace_subsystem_dir *dir;
2932 struct eventfs_inode *ei;
2933 int nr_entries;
2934 static struct eventfs_entry system_entries[] = {
2935 {
2936 .name = "filter",
2937 .callback = system_callback,
2938 },
2939 {
2940 .name = "enable",
2941 .callback = system_callback,
2942 }
2943 };
2944
2945 /* First see if we did not already create this dir */
2946 list_for_each_entry(dir, &tr->systems, list) {
2947 system = dir->subsystem;
2948 if (strcmp(system->name, name) == 0) {
2949 dir->nr_events++;
2950 file->system = dir;
2951 return dir->ei;
2952 }
2953 }
2954
2955 /* Now see if the system itself exists. */
2956 system = NULL;
2957 list_for_each_entry(iter, &event_subsystems, list) {
2958 if (strcmp(iter->name, name) == 0) {
2959 system = iter;
2960 break;
2961 }
2962 }
2963
2964 dir = kmalloc_obj(*dir);
2965 if (!dir)
2966 goto out_fail;
2967
2968 if (!system) {
2969 system = create_new_subsystem(name);
2970 if (!system)
2971 goto out_free;
2972 } else
2973 __get_system(system);
2974
2975 /* ftrace only has directories no files, readonly instance too. */
2976 if (strcmp(name, "ftrace") == 0 || trace_array_is_readonly(tr))
2977 nr_entries = 0;
2978 else
2979 nr_entries = ARRAY_SIZE(system_entries);
2980
2981 ei = eventfs_create_dir(name, parent, system_entries, nr_entries, dir);
2982 if (IS_ERR(ei)) {
2983 pr_warn("Failed to create system directory %s\n", name);
2984 __put_system(system);
2985 goto out_free;
2986 }
2987
2988 dir->ei = ei;
2989 dir->tr = tr;
2990 dir->ref_count = 1;
2991 dir->nr_events = 1;
2992 dir->subsystem = system;
2993 file->system = dir;
2994
2995 list_add(&dir->list, &tr->systems);
2996
2997 return dir->ei;
2998
2999 out_free:
3000 kfree(dir);
3001 out_fail:
3002 /* Only print this message if failed on memory allocation */
3003 if (!dir || !system)
3004 pr_warn("No memory to create event subsystem %s\n", name);
3005 return NULL;
3006 }
3007
3008 static int
event_define_fields(struct trace_event_call * call)3009 event_define_fields(struct trace_event_call *call)
3010 {
3011 struct list_head *head;
3012 int ret = 0;
3013
3014 /*
3015 * Other events may have the same class. Only update
3016 * the fields if they are not already defined.
3017 */
3018 head = trace_get_fields(call);
3019 if (list_empty(head)) {
3020 struct trace_event_fields *field = call->class->fields_array;
3021 unsigned int offset = sizeof(struct trace_entry);
3022
3023 for (; field->type; field++) {
3024 if (field->type == TRACE_FUNCTION_TYPE) {
3025 field->define_fields(call);
3026 break;
3027 }
3028
3029 offset = ALIGN(offset, field->align);
3030 ret = trace_define_field_ext(call, field->type, field->name,
3031 offset, field->size,
3032 field->is_signed, field->filter_type,
3033 field->len, field->needs_test);
3034 if (WARN_ON_ONCE(ret)) {
3035 pr_err("error code is %d\n", ret);
3036 break;
3037 }
3038
3039 offset += field->size;
3040 }
3041 }
3042
3043 return ret;
3044 }
3045
event_callback(const char * name,umode_t * mode,void ** data,const struct file_operations ** fops)3046 static int event_callback(const char *name, umode_t *mode, void **data,
3047 const struct file_operations **fops)
3048 {
3049 struct trace_event_file *file = *data;
3050 struct trace_event_call *call = file->event_call;
3051
3052 if (strcmp(name, "format") == 0) {
3053 *mode = TRACE_MODE_READ;
3054 *fops = &ftrace_event_format_fops;
3055 return 1;
3056 }
3057
3058 /*
3059 * Only event directories that can be enabled should have
3060 * triggers or filters, with the exception of the "print"
3061 * event that can have a "trigger" file.
3062 */
3063 if (!(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)) {
3064 if (call->class->reg && strcmp(name, "enable") == 0) {
3065 *mode = TRACE_MODE_WRITE;
3066 *fops = &ftrace_enable_fops;
3067 return 1;
3068 }
3069
3070 if (strcmp(name, "filter") == 0) {
3071 *mode = TRACE_MODE_WRITE;
3072 *fops = &ftrace_event_filter_fops;
3073 return 1;
3074 }
3075 }
3076
3077 if (!(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE) ||
3078 strcmp(trace_event_name(call), "print") == 0) {
3079 if (strcmp(name, "trigger") == 0) {
3080 *mode = TRACE_MODE_WRITE;
3081 *fops = &event_trigger_fops;
3082 return 1;
3083 }
3084 }
3085
3086 #ifdef CONFIG_PERF_EVENTS
3087 if (call->event.type && call->class->reg &&
3088 strcmp(name, "id") == 0) {
3089 *mode = TRACE_MODE_READ;
3090 *data = (void *)(long)call->event.type;
3091 *fops = &ftrace_event_id_fops;
3092 return 1;
3093 }
3094 #endif
3095
3096 #ifdef CONFIG_HIST_TRIGGERS
3097 if (strcmp(name, "hist") == 0) {
3098 *mode = TRACE_MODE_READ;
3099 *fops = &event_hist_fops;
3100 return 1;
3101 }
3102 #endif
3103 #ifdef CONFIG_HIST_TRIGGERS_DEBUG
3104 if (strcmp(name, "hist_debug") == 0) {
3105 *mode = TRACE_MODE_READ;
3106 *fops = &event_hist_debug_fops;
3107 return 1;
3108 }
3109 #endif
3110 #ifdef CONFIG_TRACE_EVENT_INJECT
3111 if (call->event.type && call->class->reg &&
3112 strcmp(name, "inject") == 0) {
3113 *mode = 0200;
3114 *fops = &event_inject_fops;
3115 return 1;
3116 }
3117 #endif
3118 return 0;
3119 }
3120
3121 /* The file is incremented on creation and freeing the enable file decrements it */
event_release(const char * name,void * data)3122 static void event_release(const char *name, void *data)
3123 {
3124 struct trace_event_file *file = data;
3125
3126 event_file_put(file);
3127 }
3128
3129 static int
event_create_dir(struct eventfs_inode * parent,struct trace_event_file * file)3130 event_create_dir(struct eventfs_inode *parent, struct trace_event_file *file)
3131 {
3132 struct trace_event_call *call = file->event_call;
3133 struct trace_array *tr = file->tr;
3134 struct eventfs_inode *e_events;
3135 struct eventfs_inode *ei;
3136 const char *name;
3137 int nr_entries;
3138 int ret;
3139 static struct eventfs_entry event_entries[] = {
3140 {
3141 .name = "format",
3142 .callback = event_callback,
3143 },
3144 #ifdef CONFIG_PERF_EVENTS
3145 {
3146 .name = "id",
3147 .callback = event_callback,
3148 },
3149 #endif
3150 #define NR_RO_EVENT_ENTRIES (1 + IS_ENABLED(CONFIG_PERF_EVENTS))
3151 /* Readonly files must be above this line and counted by NR_RO_EVENT_ENTRIES. */
3152 {
3153 .name = "enable",
3154 .callback = event_callback,
3155 .release = event_release,
3156 },
3157 {
3158 .name = "filter",
3159 .callback = event_callback,
3160 },
3161 {
3162 .name = "trigger",
3163 .callback = event_callback,
3164 },
3165 #ifdef CONFIG_HIST_TRIGGERS
3166 {
3167 .name = "hist",
3168 .callback = event_callback,
3169 },
3170 #endif
3171 #ifdef CONFIG_HIST_TRIGGERS_DEBUG
3172 {
3173 .name = "hist_debug",
3174 .callback = event_callback,
3175 },
3176 #endif
3177 #ifdef CONFIG_TRACE_EVENT_INJECT
3178 {
3179 .name = "inject",
3180 .callback = event_callback,
3181 },
3182 #endif
3183 };
3184
3185 /*
3186 * If the trace point header did not define TRACE_SYSTEM
3187 * then the system would be called "TRACE_SYSTEM". This should
3188 * never happen.
3189 */
3190 if (WARN_ON_ONCE(strcmp(call->class->system, TRACE_SYSTEM) == 0))
3191 return -ENODEV;
3192
3193 e_events = event_subsystem_dir(tr, call->class->system, file, parent);
3194 if (!e_events)
3195 return -ENOMEM;
3196
3197 if (trace_array_is_readonly(tr))
3198 nr_entries = NR_RO_EVENT_ENTRIES;
3199 else
3200 nr_entries = ARRAY_SIZE(event_entries);
3201
3202 name = trace_event_name(call);
3203 ei = eventfs_create_dir(name, e_events, event_entries, nr_entries, file);
3204 if (IS_ERR(ei)) {
3205 pr_warn("Could not create tracefs '%s' directory\n", name);
3206 return -1;
3207 }
3208
3209 file->ei = ei;
3210
3211 ret = event_define_fields(call);
3212 if (ret < 0) {
3213 pr_warn("Could not initialize trace point events/%s\n", name);
3214 return ret;
3215 }
3216
3217 /* Gets decremented on freeing of the "enable" file */
3218 event_file_get(file);
3219
3220 return 0;
3221 }
3222
remove_event_from_tracers(struct trace_event_call * call)3223 static void remove_event_from_tracers(struct trace_event_call *call)
3224 {
3225 struct trace_event_file *file;
3226 struct trace_array *tr;
3227
3228 do_for_each_event_file_safe(tr, file) {
3229 if (file->event_call != call)
3230 continue;
3231
3232 remove_event_file_dir(file);
3233 /*
3234 * The do_for_each_event_file_safe() is
3235 * a double loop. After finding the call for this
3236 * trace_array, we use break to jump to the next
3237 * trace_array.
3238 */
3239 break;
3240 } while_for_each_event_file();
3241 }
3242
event_remove(struct trace_event_call * call)3243 static void event_remove(struct trace_event_call *call)
3244 {
3245 struct trace_array *tr;
3246 struct trace_event_file *file;
3247
3248 do_for_each_event_file(tr, file) {
3249 if (file->event_call != call)
3250 continue;
3251
3252 if (file->flags & EVENT_FILE_FL_WAS_ENABLED)
3253 tr->clear_trace = true;
3254
3255 ftrace_event_enable_disable(file, 0);
3256 /*
3257 * The do_for_each_event_file() is
3258 * a double loop. After finding the call for this
3259 * trace_array, we use break to jump to the next
3260 * trace_array.
3261 */
3262 break;
3263 } while_for_each_event_file();
3264
3265 if (call->event.funcs)
3266 __unregister_trace_event(&call->event);
3267 remove_event_from_tracers(call);
3268 list_del(&call->list);
3269 }
3270
event_init(struct trace_event_call * call)3271 static int event_init(struct trace_event_call *call)
3272 {
3273 int ret = 0;
3274 const char *name;
3275
3276 name = trace_event_name(call);
3277 if (WARN_ON(!name))
3278 return -EINVAL;
3279
3280 if (call->class->raw_init) {
3281 ret = call->class->raw_init(call);
3282 if (ret < 0 && ret != -ENOSYS)
3283 pr_warn("Could not initialize trace events/%s\n", name);
3284 }
3285
3286 return ret;
3287 }
3288
3289 static int
__register_event(struct trace_event_call * call,struct module * mod)3290 __register_event(struct trace_event_call *call, struct module *mod)
3291 {
3292 int ret;
3293
3294 ret = event_init(call);
3295 if (ret < 0)
3296 return ret;
3297
3298 down_write(&trace_event_sem);
3299 list_add(&call->list, &ftrace_events);
3300 up_write(&trace_event_sem);
3301
3302 if (call->flags & TRACE_EVENT_FL_DYNAMIC)
3303 atomic_set(&call->refcnt, 0);
3304 else
3305 call->module = mod;
3306
3307 return 0;
3308 }
3309
eval_replace(char * ptr,struct trace_eval_map * map,int len)3310 static char *eval_replace(char *ptr, struct trace_eval_map *map, int len)
3311 {
3312 int rlen;
3313 int elen;
3314
3315 /* Find the length of the eval value as a string */
3316 elen = snprintf(ptr, 0, "%ld", map->eval_value);
3317 /* Make sure there's enough room to replace the string with the value */
3318 if (len < elen)
3319 return NULL;
3320
3321 snprintf(ptr, elen + 1, "%ld", map->eval_value);
3322
3323 /* Get the rest of the string of ptr */
3324 rlen = strlen(ptr + len);
3325 memmove(ptr + elen, ptr + len, rlen);
3326 /* Make sure we end the new string */
3327 ptr[elen + rlen] = 0;
3328
3329 return ptr + elen;
3330 }
3331
update_event_printk(struct trace_event_call * call,struct trace_eval_map * map)3332 static void update_event_printk(struct trace_event_call *call,
3333 struct trace_eval_map *map)
3334 {
3335 char *ptr;
3336 int quote = 0;
3337 int len = strlen(map->eval_string);
3338
3339 for (ptr = call->print_fmt; *ptr; ptr++) {
3340 if (*ptr == '\\') {
3341 ptr++;
3342 /* paranoid */
3343 if (!*ptr)
3344 break;
3345 continue;
3346 }
3347 if (*ptr == '"') {
3348 quote ^= 1;
3349 continue;
3350 }
3351 if (quote)
3352 continue;
3353 if (isdigit(*ptr)) {
3354 /* skip numbers */
3355 do {
3356 ptr++;
3357 /* Check for alpha chars like ULL */
3358 } while (isalnum(*ptr));
3359 if (!*ptr)
3360 break;
3361 /*
3362 * A number must have some kind of delimiter after
3363 * it, and we can ignore that too.
3364 */
3365 continue;
3366 }
3367 if (isalpha(*ptr) || *ptr == '_') {
3368 if (strncmp(map->eval_string, ptr, len) == 0 &&
3369 !isalnum(ptr[len]) && ptr[len] != '_') {
3370 ptr = eval_replace(ptr, map, len);
3371 /* enum/sizeof string smaller than value */
3372 if (WARN_ON_ONCE(!ptr))
3373 return;
3374 /*
3375 * No need to decrement here, as eval_replace()
3376 * returns the pointer to the character passed
3377 * the eval, and two evals can not be placed
3378 * back to back without something in between.
3379 * We can skip that something in between.
3380 */
3381 continue;
3382 }
3383 skip_more:
3384 do {
3385 ptr++;
3386 } while (isalnum(*ptr) || *ptr == '_');
3387 if (!*ptr)
3388 break;
3389 /*
3390 * If what comes after this variable is a '.' or
3391 * '->' then we can continue to ignore that string.
3392 */
3393 if (*ptr == '.' || (ptr[0] == '-' && ptr[1] == '>')) {
3394 ptr += *ptr == '.' ? 1 : 2;
3395 if (!*ptr)
3396 break;
3397 goto skip_more;
3398 }
3399 /*
3400 * Once again, we can skip the delimiter that came
3401 * after the string.
3402 */
3403 continue;
3404 }
3405 }
3406 }
3407
add_str_to_module(struct module * module,char * str)3408 static void add_str_to_module(struct module *module, char *str)
3409 {
3410 struct module_string *modstr;
3411
3412 modstr = kmalloc_obj(*modstr);
3413
3414 /*
3415 * If we failed to allocate memory here, then we'll just
3416 * let the str memory leak when the module is removed.
3417 * If this fails to allocate, there's worse problems than
3418 * a leaked string on module removal.
3419 */
3420 if (WARN_ON_ONCE(!modstr))
3421 return;
3422
3423 modstr->module = module;
3424 modstr->str = str;
3425
3426 list_add(&modstr->next, &module_strings);
3427 }
3428
3429 #define ATTRIBUTE_STR "__attribute__("
3430 #define ATTRIBUTE_STR_LEN (sizeof(ATTRIBUTE_STR) - 1)
3431
3432 /* Remove all __attribute__() from @type. Return allocated string or @type. */
sanitize_field_type(const char * type)3433 static char *sanitize_field_type(const char *type)
3434 {
3435 char *attr, *tmp, *next, *ret = (char *)type;
3436 int depth;
3437
3438 next = (char *)type;
3439 while ((attr = strstr(next, ATTRIBUTE_STR))) {
3440 /* Retry if "__attribute__(" is a part of another word. */
3441 if (attr != next && !isspace(attr[-1])) {
3442 next = attr + ATTRIBUTE_STR_LEN;
3443 continue;
3444 }
3445
3446 if (ret == type) {
3447 ret = kstrdup(type, GFP_KERNEL);
3448 if (WARN_ON_ONCE(!ret))
3449 return NULL;
3450 attr = ret + (attr - type);
3451 }
3452
3453 /* the ATTRIBUTE_STR already has the first '(' */
3454 depth = 1;
3455 next = attr + ATTRIBUTE_STR_LEN;
3456 do {
3457 tmp = strpbrk(next, "()");
3458 /* There is unbalanced parentheses */
3459 if (WARN_ON_ONCE(!tmp)) {
3460 kfree(ret);
3461 return (char *)type;
3462 }
3463
3464 if (*tmp == '(')
3465 depth++;
3466 else
3467 depth--;
3468 next = tmp + 1;
3469 } while (depth > 0);
3470 next = skip_spaces(next);
3471 strcpy(attr, next);
3472 next = attr;
3473 }
3474 return ret;
3475 }
3476
find_replacable_eval(const char * type,const char * eval_string,int len)3477 static char *find_replacable_eval(const char *type, const char *eval_string,
3478 int len)
3479 {
3480 char *ptr;
3481
3482 if (!eval_string)
3483 return NULL;
3484
3485 ptr = strchr(type, '[');
3486 if (!ptr)
3487 return NULL;
3488 ptr++;
3489
3490 if (!isalpha(*ptr) && *ptr != '_')
3491 return NULL;
3492
3493 if (strncmp(eval_string, ptr, len) != 0)
3494 return NULL;
3495
3496 return ptr;
3497 }
3498
update_event_fields(struct trace_event_call * call,struct trace_eval_map * map)3499 static void update_event_fields(struct trace_event_call *call,
3500 struct trace_eval_map *map)
3501 {
3502 struct ftrace_event_field *field;
3503 const char *eval_string = NULL;
3504 struct list_head *head;
3505 int len = 0;
3506 char *ptr;
3507 char *str;
3508
3509 /* Dynamic events should never have field maps */
3510 if (call->flags & TRACE_EVENT_FL_DYNAMIC)
3511 return;
3512
3513 if (map) {
3514 eval_string = map->eval_string;
3515 len = strlen(map->eval_string);
3516 }
3517
3518 head = trace_get_fields(call);
3519 list_for_each_entry(field, head, link) {
3520 str = sanitize_field_type(field->type);
3521 if (!str)
3522 return;
3523
3524 ptr = find_replacable_eval(str, eval_string, len);
3525 if (ptr) {
3526 if (str == field->type) {
3527 str = kstrdup(field->type, GFP_KERNEL);
3528 if (WARN_ON_ONCE(!str))
3529 return;
3530 ptr = str + (ptr - field->type);
3531 }
3532
3533 ptr = eval_replace(ptr, map, len);
3534 /* enum/sizeof string smaller than value */
3535 if (WARN_ON_ONCE(!ptr)) {
3536 kfree(str);
3537 continue;
3538 }
3539 }
3540
3541 if (str == field->type)
3542 continue;
3543 /*
3544 * If the event is part of a module, then we need to free the string
3545 * when the module is removed. Otherwise, it will stay allocated
3546 * until a reboot.
3547 */
3548 if (call->module)
3549 add_str_to_module(call->module, str);
3550
3551 field->type = str;
3552 if (field->filter_type == FILTER_OTHER)
3553 field->filter_type = filter_assign_type(field->type);
3554 }
3555 }
3556
3557 /* Update all events for replacing eval and sanitizing */
trace_event_update_all(struct trace_eval_map ** map,int len)3558 void trace_event_update_all(struct trace_eval_map **map, int len)
3559 {
3560 struct trace_event_call *call, *p;
3561 const char *last_system = NULL;
3562 bool first = false;
3563 bool updated;
3564 int last_i;
3565 int i;
3566
3567 down_write(&trace_event_sem);
3568 list_for_each_entry_safe(call, p, &ftrace_events, list) {
3569 /* events are usually grouped together with systems */
3570 if (!last_system || call->class->system != last_system) {
3571 first = true;
3572 last_i = 0;
3573 last_system = call->class->system;
3574 }
3575
3576 updated = false;
3577 /*
3578 * Since calls are grouped by systems, the likelihood that the
3579 * next call in the iteration belongs to the same system as the
3580 * previous call is high. As an optimization, we skip searching
3581 * for a map[] that matches the call's system if the last call
3582 * was from the same system. That's what last_i is for. If the
3583 * call has the same system as the previous call, then last_i
3584 * will be the index of the first map[] that has a matching
3585 * system.
3586 */
3587 for (i = last_i; i < len; i++) {
3588 if (call->class->system == map[i]->system) {
3589 /* Save the first system if need be */
3590 if (first) {
3591 last_i = i;
3592 first = false;
3593 }
3594 update_event_printk(call, map[i]);
3595 update_event_fields(call, map[i]);
3596 updated = true;
3597 }
3598 }
3599 /* If not updated yet, update field for sanitizing. */
3600 if (!updated)
3601 update_event_fields(call, NULL);
3602 cond_resched();
3603 }
3604 up_write(&trace_event_sem);
3605 }
3606
event_in_systems(struct trace_event_call * call,const char * systems)3607 static bool event_in_systems(struct trace_event_call *call,
3608 const char *systems)
3609 {
3610 const char *system;
3611 const char *p;
3612
3613 if (!systems)
3614 return true;
3615
3616 system = call->class->system;
3617 p = strstr(systems, system);
3618 if (!p)
3619 return false;
3620
3621 if (p != systems && !isspace(*(p - 1)) && *(p - 1) != ',')
3622 return false;
3623
3624 p += strlen(system);
3625 return !*p || isspace(*p) || *p == ',';
3626 }
3627
3628 #ifdef CONFIG_HIST_TRIGGERS
3629 /*
3630 * Wake up waiter on the hist_poll_wq from irq_work because the hist trigger
3631 * may happen in any context.
3632 */
hist_poll_event_irq_work(struct irq_work * work)3633 static void hist_poll_event_irq_work(struct irq_work *work)
3634 {
3635 wake_up_all(&hist_poll_wq);
3636 }
3637
3638 DEFINE_IRQ_WORK(hist_poll_work, hist_poll_event_irq_work);
3639 DECLARE_WAIT_QUEUE_HEAD(hist_poll_wq);
3640 #endif
3641
3642 static struct trace_event_file *
trace_create_new_event(struct trace_event_call * call,struct trace_array * tr)3643 trace_create_new_event(struct trace_event_call *call,
3644 struct trace_array *tr)
3645 {
3646 struct trace_pid_list *no_pid_list;
3647 struct trace_pid_list *pid_list;
3648 struct trace_event_file *file;
3649 unsigned int first;
3650
3651 if (!event_in_systems(call, tr->system_names))
3652 return NULL;
3653
3654 file = kmem_cache_alloc(file_cachep, GFP_TRACE);
3655 if (!file)
3656 return ERR_PTR(-ENOMEM);
3657
3658 pid_list = rcu_dereference_protected(tr->filtered_pids,
3659 lockdep_is_held(&event_mutex));
3660 no_pid_list = rcu_dereference_protected(tr->filtered_no_pids,
3661 lockdep_is_held(&event_mutex));
3662
3663 if (!trace_pid_list_first(pid_list, &first) ||
3664 !trace_pid_list_first(no_pid_list, &first))
3665 file->flags |= EVENT_FILE_FL_PID_FILTER;
3666
3667 file->event_call = call;
3668 file->tr = tr;
3669 atomic_set(&file->sm_ref, 0);
3670 atomic_set(&file->tm_ref, 0);
3671 INIT_LIST_HEAD(&file->triggers);
3672 list_add(&file->list, &tr->events);
3673 refcount_set(&file->ref, 1);
3674
3675 return file;
3676 }
3677
3678 #define MAX_BOOT_TRIGGERS 32
3679
3680 static struct boot_triggers {
3681 const char *event;
3682 char *trigger;
3683 } bootup_triggers[MAX_BOOT_TRIGGERS];
3684
3685 static char bootup_trigger_buf[COMMAND_LINE_SIZE];
3686 static int boot_trigger_buf_len;
3687 static int nr_boot_triggers;
3688
setup_trace_triggers(char * str)3689 static __init int setup_trace_triggers(char *str)
3690 {
3691 char *trigger;
3692 char *buf;
3693 int len = boot_trigger_buf_len;
3694 int i;
3695
3696 if (len >= COMMAND_LINE_SIZE)
3697 return 1;
3698
3699 strscpy(bootup_trigger_buf + len, str, COMMAND_LINE_SIZE - len);
3700 trace_set_ring_buffer_expanded(NULL);
3701 disable_tracing_selftest("running event triggers");
3702
3703 buf = bootup_trigger_buf + len;
3704 boot_trigger_buf_len += strlen(buf) + 1;
3705
3706 for (i = nr_boot_triggers; i < MAX_BOOT_TRIGGERS; i++) {
3707 trigger = strsep(&buf, ",");
3708 if (!trigger)
3709 break;
3710 bootup_triggers[i].event = strsep(&trigger, ".");
3711 bootup_triggers[i].trigger = trigger;
3712 if (!bootup_triggers[i].trigger)
3713 break;
3714 }
3715
3716 nr_boot_triggers = i;
3717 return 1;
3718 }
3719 __setup("trace_trigger=", setup_trace_triggers);
3720
3721 /* Add an event to a trace directory */
3722 static int
__trace_add_new_event(struct trace_event_call * call,struct trace_array * tr)3723 __trace_add_new_event(struct trace_event_call *call, struct trace_array *tr)
3724 {
3725 struct trace_event_file *file;
3726
3727 file = trace_create_new_event(call, tr);
3728 /*
3729 * trace_create_new_event() returns ERR_PTR(-ENOMEM) if failed
3730 * allocation, or NULL if the event is not part of the tr->system_names.
3731 * When the event is not part of the tr->system_names, return zero, not
3732 * an error.
3733 */
3734 if (!file)
3735 return 0;
3736
3737 if (IS_ERR(file))
3738 return PTR_ERR(file);
3739
3740 if (eventdir_initialized)
3741 return event_create_dir(tr->event_dir, file);
3742 else
3743 return event_define_fields(call);
3744 }
3745
trace_early_triggers(struct trace_event_file * file,const char * name)3746 static void trace_early_triggers(struct trace_event_file *file, const char *name)
3747 {
3748 int ret;
3749 int i;
3750
3751 for (i = 0; i < nr_boot_triggers; i++) {
3752 if (strcmp(name, bootup_triggers[i].event))
3753 continue;
3754 mutex_lock(&event_mutex);
3755 ret = trigger_process_regex(file, bootup_triggers[i].trigger);
3756 mutex_unlock(&event_mutex);
3757 if (ret)
3758 pr_err("Failed to register trigger '%s' on event %s\n",
3759 bootup_triggers[i].trigger,
3760 bootup_triggers[i].event);
3761 }
3762 }
3763
3764 /*
3765 * Just create a descriptor for early init. A descriptor is required
3766 * for enabling events at boot. We want to enable events before
3767 * the filesystem is initialized.
3768 */
3769 static int
__trace_early_add_new_event(struct trace_event_call * call,struct trace_array * tr)3770 __trace_early_add_new_event(struct trace_event_call *call,
3771 struct trace_array *tr)
3772 {
3773 struct trace_event_file *file;
3774 int ret;
3775
3776 file = trace_create_new_event(call, tr);
3777 /*
3778 * trace_create_new_event() returns ERR_PTR(-ENOMEM) if failed
3779 * allocation, or NULL if the event is not part of the tr->system_names.
3780 * When the event is not part of the tr->system_names, return zero, not
3781 * an error.
3782 */
3783 if (!file)
3784 return 0;
3785
3786 if (IS_ERR(file))
3787 return PTR_ERR(file);
3788
3789 ret = event_define_fields(call);
3790 if (ret)
3791 return ret;
3792
3793 trace_early_triggers(file, trace_event_name(call));
3794
3795 return 0;
3796 }
3797
3798 struct ftrace_module_file_ops;
3799 static void __add_event_to_tracers(struct trace_event_call *call);
3800
3801 /* Add an additional event_call dynamically */
trace_add_event_call(struct trace_event_call * call)3802 int trace_add_event_call(struct trace_event_call *call)
3803 {
3804 int ret;
3805 lockdep_assert_held(&event_mutex);
3806
3807 guard(mutex)(&trace_types_lock);
3808
3809 ret = __register_event(call, NULL);
3810 if (ret < 0)
3811 return ret;
3812
3813 __add_event_to_tracers(call);
3814 return ret;
3815 }
3816 EXPORT_SYMBOL_GPL(trace_add_event_call);
3817
3818 /*
3819 * Must be called under locking of trace_types_lock, event_mutex and
3820 * trace_event_sem.
3821 */
__trace_remove_event_call(struct trace_event_call * call)3822 static void __trace_remove_event_call(struct trace_event_call *call)
3823 {
3824 event_remove(call);
3825 trace_destroy_fields(call);
3826 }
3827
probe_remove_event_call(struct trace_event_call * call)3828 static int probe_remove_event_call(struct trace_event_call *call)
3829 {
3830 struct trace_array *tr;
3831 struct trace_event_file *file;
3832
3833 #ifdef CONFIG_PERF_EVENTS
3834 if (call->perf_refcount)
3835 return -EBUSY;
3836 #endif
3837 do_for_each_event_file(tr, file) {
3838 if (file->event_call != call)
3839 continue;
3840 /*
3841 * We can't rely on ftrace_event_enable_disable(enable => 0)
3842 * we are going to do, soft mode can suppress
3843 * TRACE_REG_UNREGISTER.
3844 */
3845 if (file->flags & EVENT_FILE_FL_ENABLED)
3846 goto busy;
3847
3848 if (file->flags & EVENT_FILE_FL_WAS_ENABLED)
3849 tr->clear_trace = true;
3850 /*
3851 * The do_for_each_event_file_safe() is
3852 * a double loop. After finding the call for this
3853 * trace_array, we use break to jump to the next
3854 * trace_array.
3855 */
3856 break;
3857 } while_for_each_event_file();
3858
3859 __trace_remove_event_call(call);
3860
3861 return 0;
3862 busy:
3863 /* No need to clear the trace now */
3864 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
3865 tr->clear_trace = false;
3866 }
3867 return -EBUSY;
3868 }
3869
3870 /* Remove an event_call */
trace_remove_event_call(struct trace_event_call * call)3871 int trace_remove_event_call(struct trace_event_call *call)
3872 {
3873 int ret;
3874
3875 lockdep_assert_held(&event_mutex);
3876
3877 mutex_lock(&trace_types_lock);
3878 down_write(&trace_event_sem);
3879 ret = probe_remove_event_call(call);
3880 up_write(&trace_event_sem);
3881 mutex_unlock(&trace_types_lock);
3882
3883 return ret;
3884 }
3885 EXPORT_SYMBOL_GPL(trace_remove_event_call);
3886
3887 #define for_each_event(event, start, end) \
3888 for (event = start; \
3889 (unsigned long)event < (unsigned long)end; \
3890 event++)
3891
3892 #ifdef CONFIG_MODULES
update_mod_cache(struct trace_array * tr,struct module * mod)3893 static void update_mod_cache(struct trace_array *tr, struct module *mod)
3894 {
3895 struct event_mod_load *event_mod, *n;
3896
3897 list_for_each_entry_safe(event_mod, n, &tr->mod_events, list) {
3898 if (strcmp(event_mod->module, mod->name) != 0)
3899 continue;
3900
3901 __ftrace_set_clr_event_nolock(tr, event_mod->match,
3902 event_mod->system,
3903 event_mod->event, 1, mod->name);
3904 free_event_mod(event_mod);
3905 }
3906 }
3907
update_cache_events(struct module * mod)3908 static void update_cache_events(struct module *mod)
3909 {
3910 struct trace_array *tr;
3911
3912 list_for_each_entry(tr, &ftrace_trace_arrays, list)
3913 update_mod_cache(tr, mod);
3914 }
3915
trace_module_add_events(struct module * mod)3916 static void trace_module_add_events(struct module *mod)
3917 {
3918 struct trace_event_call **call, **start, **end;
3919
3920 if (!mod->num_trace_events)
3921 return;
3922
3923 /* Don't add infrastructure for mods without tracepoints */
3924 if (trace_module_has_bad_taint(mod)) {
3925 pr_err("%s: module has bad taint, not creating trace events\n",
3926 mod->name);
3927 return;
3928 }
3929
3930 start = mod->trace_events;
3931 end = mod->trace_events + mod->num_trace_events;
3932
3933 for_each_event(call, start, end) {
3934 __register_event(*call, mod);
3935 __add_event_to_tracers(*call);
3936 }
3937
3938 update_cache_events(mod);
3939 }
3940
trace_module_remove_events(struct module * mod)3941 static void trace_module_remove_events(struct module *mod)
3942 {
3943 struct trace_event_call *call, *p;
3944 struct module_string *modstr, *m;
3945
3946 down_write(&trace_event_sem);
3947 list_for_each_entry_safe(call, p, &ftrace_events, list) {
3948 if ((call->flags & TRACE_EVENT_FL_DYNAMIC) || !call->module)
3949 continue;
3950 if (call->module == mod)
3951 __trace_remove_event_call(call);
3952 }
3953 /* Check for any strings allocated for this module */
3954 list_for_each_entry_safe(modstr, m, &module_strings, next) {
3955 if (modstr->module != mod)
3956 continue;
3957 list_del(&modstr->next);
3958 kfree(modstr->str);
3959 kfree(modstr);
3960 }
3961 up_write(&trace_event_sem);
3962
3963 /*
3964 * It is safest to reset the ring buffer if the module being unloaded
3965 * registered any events that were used. The only worry is if
3966 * a new module gets loaded, and takes on the same id as the events
3967 * of this module. When printing out the buffer, traced events left
3968 * over from this module may be passed to the new module events and
3969 * unexpected results may occur.
3970 */
3971 tracing_reset_all_online_cpus_unlocked();
3972 }
3973
trace_module_notify(struct notifier_block * self,unsigned long val,void * data)3974 static int trace_module_notify(struct notifier_block *self,
3975 unsigned long val, void *data)
3976 {
3977 struct module *mod = data;
3978
3979 mutex_lock(&event_mutex);
3980 mutex_lock(&trace_types_lock);
3981 switch (val) {
3982 case MODULE_STATE_COMING:
3983 trace_module_add_events(mod);
3984 break;
3985 case MODULE_STATE_GOING:
3986 trace_module_remove_events(mod);
3987 break;
3988 }
3989 mutex_unlock(&trace_types_lock);
3990 mutex_unlock(&event_mutex);
3991
3992 return NOTIFY_OK;
3993 }
3994
3995 static struct notifier_block trace_module_nb = {
3996 .notifier_call = trace_module_notify,
3997 .priority = 1, /* higher than trace.c module notify */
3998 };
3999 #endif /* CONFIG_MODULES */
4000
4001 /* Create a new event directory structure for a trace directory. */
4002 static void
__trace_add_event_dirs(struct trace_array * tr)4003 __trace_add_event_dirs(struct trace_array *tr)
4004 {
4005 struct trace_event_call *call;
4006 int ret;
4007
4008 lockdep_assert_held(&trace_event_sem);
4009
4010 list_for_each_entry(call, &ftrace_events, list) {
4011 ret = __trace_add_new_event(call, tr);
4012 if (ret < 0)
4013 pr_warn("Could not create directory for event %s\n",
4014 trace_event_name(call));
4015 }
4016 }
4017
4018 /* Returns any file that matches the system and event */
4019 struct trace_event_file *
__find_event_file(struct trace_array * tr,const char * system,const char * event)4020 __find_event_file(struct trace_array *tr, const char *system, const char *event)
4021 {
4022 struct trace_event_file *file;
4023 struct trace_event_call *call;
4024 const char *name;
4025
4026 list_for_each_entry(file, &tr->events, list) {
4027
4028 call = file->event_call;
4029 name = trace_event_name(call);
4030
4031 if (!name || !call->class)
4032 continue;
4033
4034 if (strcmp(event, name) == 0 &&
4035 strcmp(system, call->class->system) == 0)
4036 return file;
4037 }
4038 return NULL;
4039 }
4040
4041 /* Returns valid trace event files that match system and event */
4042 struct trace_event_file *
find_event_file(struct trace_array * tr,const char * system,const char * event)4043 find_event_file(struct trace_array *tr, const char *system, const char *event)
4044 {
4045 struct trace_event_file *file;
4046
4047 file = __find_event_file(tr, system, event);
4048 if (!file || !file->event_call->class->reg ||
4049 file->event_call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)
4050 return NULL;
4051
4052 return file;
4053 }
4054
4055 /**
4056 * trace_get_event_file - Find and return a trace event file
4057 * @instance: The name of the trace instance containing the event
4058 * @system: The name of the system containing the event
4059 * @event: The name of the event
4060 *
4061 * Return a trace event file given the trace instance name, trace
4062 * system, and trace event name. If the instance name is NULL, it
4063 * refers to the top-level trace array.
4064 *
4065 * This function will look it up and return it if found, after calling
4066 * trace_array_get() to prevent the instance from going away, and
4067 * increment the event's module refcount to prevent it from being
4068 * removed.
4069 *
4070 * To release the file, call trace_put_event_file(), which will call
4071 * trace_array_put() and decrement the event's module refcount.
4072 *
4073 * Return: The trace event on success, ERR_PTR otherwise.
4074 */
trace_get_event_file(const char * instance,const char * system,const char * event)4075 struct trace_event_file *trace_get_event_file(const char *instance,
4076 const char *system,
4077 const char *event)
4078 {
4079 struct trace_array *tr = top_trace_array();
4080 struct trace_event_file *file = NULL;
4081 int ret = -EINVAL;
4082
4083 if (instance) {
4084 tr = trace_array_find_get(instance);
4085 if (!tr)
4086 return ERR_PTR(-ENOENT);
4087 } else {
4088 ret = trace_array_get(tr);
4089 if (ret)
4090 return ERR_PTR(ret);
4091 }
4092
4093 guard(mutex)(&event_mutex);
4094
4095 file = find_event_file(tr, system, event);
4096 if (!file) {
4097 trace_array_put(tr);
4098 return ERR_PTR(-EINVAL);
4099 }
4100
4101 /* Don't let event modules unload while in use */
4102 ret = trace_event_try_get_ref(file->event_call);
4103 if (!ret) {
4104 trace_array_put(tr);
4105 return ERR_PTR(-EBUSY);
4106 }
4107
4108 return file;
4109 }
4110 EXPORT_SYMBOL_GPL(trace_get_event_file);
4111
4112 /**
4113 * trace_put_event_file - Release a file from trace_get_event_file()
4114 * @file: The trace event file
4115 *
4116 * If a file was retrieved using trace_get_event_file(), this should
4117 * be called when it's no longer needed. It will cancel the previous
4118 * trace_array_get() called by that function, and decrement the
4119 * event's module refcount.
4120 */
trace_put_event_file(struct trace_event_file * file)4121 void trace_put_event_file(struct trace_event_file *file)
4122 {
4123 mutex_lock(&event_mutex);
4124 trace_event_put_ref(file->event_call);
4125 mutex_unlock(&event_mutex);
4126
4127 trace_array_put(file->tr);
4128 }
4129 EXPORT_SYMBOL_GPL(trace_put_event_file);
4130
4131 #ifdef CONFIG_DYNAMIC_FTRACE
4132 struct event_probe_data {
4133 struct trace_event_file *file;
4134 unsigned long count;
4135 int ref;
4136 bool enable;
4137 };
4138
update_event_probe(struct event_probe_data * data)4139 static void update_event_probe(struct event_probe_data *data)
4140 {
4141 if (data->enable)
4142 clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &data->file->flags);
4143 else
4144 set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &data->file->flags);
4145 }
4146
4147 static void
event_enable_probe(unsigned long ip,unsigned long parent_ip,struct trace_array * tr,struct ftrace_probe_ops * ops,void * data)4148 event_enable_probe(unsigned long ip, unsigned long parent_ip,
4149 struct trace_array *tr, struct ftrace_probe_ops *ops,
4150 void *data)
4151 {
4152 struct ftrace_func_mapper *mapper = data;
4153 struct event_probe_data *edata;
4154 void **pdata;
4155
4156 pdata = ftrace_func_mapper_find_ip(mapper, ip);
4157 if (!pdata || !*pdata)
4158 return;
4159
4160 edata = *pdata;
4161 update_event_probe(edata);
4162 }
4163
4164 static void
event_enable_count_probe(unsigned long ip,unsigned long parent_ip,struct trace_array * tr,struct ftrace_probe_ops * ops,void * data)4165 event_enable_count_probe(unsigned long ip, unsigned long parent_ip,
4166 struct trace_array *tr, struct ftrace_probe_ops *ops,
4167 void *data)
4168 {
4169 struct ftrace_func_mapper *mapper = data;
4170 struct event_probe_data *edata;
4171 void **pdata;
4172
4173 pdata = ftrace_func_mapper_find_ip(mapper, ip);
4174 if (!pdata || !*pdata)
4175 return;
4176
4177 edata = *pdata;
4178
4179 if (!edata->count)
4180 return;
4181
4182 /* Skip if the event is in a state we want to switch to */
4183 if (edata->enable == !(edata->file->flags & EVENT_FILE_FL_SOFT_DISABLED))
4184 return;
4185
4186 if (edata->count != -1)
4187 (edata->count)--;
4188
4189 update_event_probe(edata);
4190 }
4191
4192 static int
event_enable_print(struct seq_file * m,unsigned long ip,struct ftrace_probe_ops * ops,void * data)4193 event_enable_print(struct seq_file *m, unsigned long ip,
4194 struct ftrace_probe_ops *ops, void *data)
4195 {
4196 struct ftrace_func_mapper *mapper = data;
4197 struct event_probe_data *edata;
4198 void **pdata;
4199
4200 pdata = ftrace_func_mapper_find_ip(mapper, ip);
4201
4202 if (WARN_ON_ONCE(!pdata || !*pdata))
4203 return 0;
4204
4205 edata = *pdata;
4206
4207 seq_printf(m, "%ps:", (void *)ip);
4208
4209 seq_printf(m, "%s:%s:%s",
4210 edata->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR,
4211 edata->file->event_call->class->system,
4212 trace_event_name(edata->file->event_call));
4213
4214 if (edata->count == -1)
4215 seq_puts(m, ":unlimited\n");
4216 else
4217 seq_printf(m, ":count=%ld\n", edata->count);
4218
4219 return 0;
4220 }
4221
4222 static int
event_enable_init(struct ftrace_probe_ops * ops,struct trace_array * tr,unsigned long ip,void * init_data,void ** data)4223 event_enable_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
4224 unsigned long ip, void *init_data, void **data)
4225 {
4226 struct ftrace_func_mapper *mapper = *data;
4227 struct event_probe_data *edata = init_data;
4228 int ret;
4229
4230 if (!mapper) {
4231 mapper = allocate_ftrace_func_mapper();
4232 if (!mapper)
4233 return -ENODEV;
4234 *data = mapper;
4235 }
4236
4237 ret = ftrace_func_mapper_add_ip(mapper, ip, edata);
4238 if (ret < 0)
4239 return ret;
4240
4241 edata->ref++;
4242
4243 return 0;
4244 }
4245
free_probe_data(void * data)4246 static int free_probe_data(void *data)
4247 {
4248 struct event_probe_data *edata = data;
4249
4250 edata->ref--;
4251 if (!edata->ref) {
4252 /* Remove soft mode */
4253 __ftrace_event_enable_disable(edata->file, 0, 1);
4254 trace_event_put_ref(edata->file->event_call);
4255 kfree(edata);
4256 }
4257 return 0;
4258 }
4259
4260 static void
event_enable_free(struct ftrace_probe_ops * ops,struct trace_array * tr,unsigned long ip,void * data)4261 event_enable_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
4262 unsigned long ip, void *data)
4263 {
4264 struct ftrace_func_mapper *mapper = data;
4265 struct event_probe_data *edata;
4266
4267 if (!ip) {
4268 if (!mapper)
4269 return;
4270 free_ftrace_func_mapper(mapper, free_probe_data);
4271 return;
4272 }
4273
4274 edata = ftrace_func_mapper_remove_ip(mapper, ip);
4275
4276 if (WARN_ON_ONCE(!edata))
4277 return;
4278
4279 if (WARN_ON_ONCE(edata->ref <= 0))
4280 return;
4281
4282 free_probe_data(edata);
4283 }
4284
4285 static struct ftrace_probe_ops event_enable_probe_ops = {
4286 .func = event_enable_probe,
4287 .print = event_enable_print,
4288 .init = event_enable_init,
4289 .free = event_enable_free,
4290 };
4291
4292 static struct ftrace_probe_ops event_enable_count_probe_ops = {
4293 .func = event_enable_count_probe,
4294 .print = event_enable_print,
4295 .init = event_enable_init,
4296 .free = event_enable_free,
4297 };
4298
4299 static struct ftrace_probe_ops event_disable_probe_ops = {
4300 .func = event_enable_probe,
4301 .print = event_enable_print,
4302 .init = event_enable_init,
4303 .free = event_enable_free,
4304 };
4305
4306 static struct ftrace_probe_ops event_disable_count_probe_ops = {
4307 .func = event_enable_count_probe,
4308 .print = event_enable_print,
4309 .init = event_enable_init,
4310 .free = event_enable_free,
4311 };
4312
4313 static int
event_enable_func(struct trace_array * tr,struct ftrace_hash * hash,char * glob,char * cmd,char * param,int enabled)4314 event_enable_func(struct trace_array *tr, struct ftrace_hash *hash,
4315 char *glob, char *cmd, char *param, int enabled)
4316 {
4317 struct trace_event_file *file;
4318 struct ftrace_probe_ops *ops;
4319 struct event_probe_data *data;
4320 unsigned long count = -1;
4321 const char *system;
4322 const char *event;
4323 char *number;
4324 bool enable;
4325 int ret;
4326
4327 if (!tr)
4328 return -ENODEV;
4329
4330 /* hash funcs only work with set_ftrace_filter */
4331 if (!enabled || !param)
4332 return -EINVAL;
4333
4334 system = strsep(¶m, ":");
4335 if (!param)
4336 return -EINVAL;
4337
4338 event = strsep(¶m, ":");
4339
4340 guard(mutex)(&event_mutex);
4341
4342 file = find_event_file(tr, system, event);
4343 if (!file)
4344 return -EINVAL;
4345
4346 enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
4347
4348 if (enable)
4349 ops = param ? &event_enable_count_probe_ops : &event_enable_probe_ops;
4350 else
4351 ops = param ? &event_disable_count_probe_ops : &event_disable_probe_ops;
4352
4353 if (glob[0] == '!')
4354 return unregister_ftrace_function_probe_func(glob+1, tr, ops);
4355
4356 if (param) {
4357 number = strsep(¶m, ":");
4358
4359 if (!strlen(number))
4360 return -EINVAL;
4361
4362 /*
4363 * We use the callback data field (which is a pointer)
4364 * as our counter.
4365 */
4366 ret = kstrtoul(number, 0, &count);
4367 if (ret)
4368 return ret;
4369 }
4370
4371 /* Don't let event modules unload while probe registered */
4372 ret = trace_event_try_get_ref(file->event_call);
4373 if (!ret)
4374 return -EBUSY;
4375
4376 ret = __ftrace_event_enable_disable(file, 1, 1);
4377 if (ret < 0)
4378 goto out_put;
4379
4380 ret = -ENOMEM;
4381 data = kzalloc_obj(*data);
4382 if (!data)
4383 goto out_put;
4384
4385 data->enable = enable;
4386 data->count = count;
4387 data->file = file;
4388
4389 ret = register_ftrace_function_probe(glob, tr, ops, data);
4390 /*
4391 * The above returns on success the # of functions enabled,
4392 * but if it didn't find any functions it returns zero.
4393 * Consider no functions a failure too.
4394 */
4395
4396 /* Just return zero, not the number of enabled functions */
4397 if (ret > 0)
4398 return 0;
4399
4400 kfree(data);
4401
4402 if (!ret)
4403 ret = -ENOENT;
4404
4405 __ftrace_event_enable_disable(file, 0, 1);
4406 out_put:
4407 trace_event_put_ref(file->event_call);
4408 return ret;
4409 }
4410
4411 static struct ftrace_func_command event_enable_cmd = {
4412 .name = ENABLE_EVENT_STR,
4413 .func = event_enable_func,
4414 };
4415
4416 static struct ftrace_func_command event_disable_cmd = {
4417 .name = DISABLE_EVENT_STR,
4418 .func = event_enable_func,
4419 };
4420
register_event_cmds(void)4421 static __init int register_event_cmds(void)
4422 {
4423 int ret;
4424
4425 ret = register_ftrace_command(&event_enable_cmd);
4426 if (WARN_ON(ret < 0))
4427 return ret;
4428 ret = register_ftrace_command(&event_disable_cmd);
4429 if (WARN_ON(ret < 0))
4430 unregister_ftrace_command(&event_enable_cmd);
4431 return ret;
4432 }
4433 #else
register_event_cmds(void)4434 static inline int register_event_cmds(void) { return 0; }
4435 #endif /* CONFIG_DYNAMIC_FTRACE */
4436
4437 /*
4438 * The top level array and trace arrays created by boot-time tracing
4439 * have already had its trace_event_file descriptors created in order
4440 * to allow for early events to be recorded.
4441 * This function is called after the tracefs has been initialized,
4442 * and we now have to create the files associated to the events.
4443 */
__trace_early_add_event_dirs(struct trace_array * tr)4444 static void __trace_early_add_event_dirs(struct trace_array *tr)
4445 {
4446 struct trace_event_file *file;
4447 int ret;
4448
4449
4450 list_for_each_entry(file, &tr->events, list) {
4451 ret = event_create_dir(tr->event_dir, file);
4452 if (ret < 0)
4453 pr_warn("Could not create directory for event %s\n",
4454 trace_event_name(file->event_call));
4455 }
4456 }
4457
4458 /*
4459 * For early boot up, the top trace array and the trace arrays created
4460 * by boot-time tracing require to have a list of events that can be
4461 * enabled. This must be done before the filesystem is set up in order
4462 * to allow events to be traced early.
4463 */
__trace_early_add_events(struct trace_array * tr)4464 void __trace_early_add_events(struct trace_array *tr)
4465 {
4466 struct trace_event_call *call;
4467 int ret;
4468
4469 list_for_each_entry(call, &ftrace_events, list) {
4470 /* Early boot up should not have any modules loaded */
4471 if (!(call->flags & TRACE_EVENT_FL_DYNAMIC) &&
4472 WARN_ON_ONCE(call->module))
4473 continue;
4474
4475 ret = __trace_early_add_new_event(call, tr);
4476 if (ret < 0)
4477 pr_warn("Could not create early event %s\n",
4478 trace_event_name(call));
4479 }
4480 }
4481
4482 /* Remove the event directory structure for a trace directory. */
4483 static void
__trace_remove_event_dirs(struct trace_array * tr)4484 __trace_remove_event_dirs(struct trace_array *tr)
4485 {
4486 struct trace_event_file *file, *next;
4487
4488 list_for_each_entry_safe(file, next, &tr->events, list)
4489 remove_event_file_dir(file);
4490 }
4491
__add_event_to_tracers(struct trace_event_call * call)4492 static void __add_event_to_tracers(struct trace_event_call *call)
4493 {
4494 struct trace_array *tr;
4495
4496 list_for_each_entry(tr, &ftrace_trace_arrays, list)
4497 __trace_add_new_event(call, tr);
4498 }
4499
4500 extern struct trace_event_call *__start_ftrace_events[];
4501 extern struct trace_event_call *__stop_ftrace_events[];
4502
4503 static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata;
4504
setup_trace_event(char * str)4505 static __init int setup_trace_event(char *str)
4506 {
4507 if (bootup_event_buf[0] != '\0')
4508 strlcat(bootup_event_buf, ",", COMMAND_LINE_SIZE);
4509
4510 strlcat(bootup_event_buf, str, COMMAND_LINE_SIZE);
4511
4512 trace_set_ring_buffer_expanded(NULL);
4513 disable_tracing_selftest("running event tracing");
4514
4515 return 1;
4516 }
4517 __setup("trace_event=", setup_trace_event);
4518
events_callback(const char * name,umode_t * mode,void ** data,const struct file_operations ** fops)4519 static int events_callback(const char *name, umode_t *mode, void **data,
4520 const struct file_operations **fops)
4521 {
4522 if (strcmp(name, "enable") == 0) {
4523 *mode = TRACE_MODE_WRITE;
4524 *fops = &ftrace_tr_enable_fops;
4525 return 1;
4526 }
4527
4528 if (strcmp(name, "header_page") == 0) {
4529 *mode = TRACE_MODE_READ;
4530 *fops = &ftrace_show_header_page_fops;
4531
4532 } else if (strcmp(name, "header_event") == 0) {
4533 *mode = TRACE_MODE_READ;
4534 *fops = &ftrace_show_header_event_fops;
4535 } else
4536 return 0;
4537
4538 return 1;
4539 }
4540
4541 /* Expects to have event_mutex held when called */
4542 static int
create_event_toplevel_files(struct dentry * parent,struct trace_array * tr)4543 create_event_toplevel_files(struct dentry *parent, struct trace_array *tr)
4544 {
4545 struct eventfs_inode *e_events;
4546 struct dentry *entry;
4547 int nr_entries;
4548 static struct eventfs_entry events_entries[] = {
4549 {
4550 .name = "header_page",
4551 .callback = events_callback,
4552 },
4553 {
4554 .name = "header_event",
4555 .callback = events_callback,
4556 },
4557 #define NR_RO_TOP_ENTRIES 2
4558 /* Readonly files must be above this line and counted by NR_RO_TOP_ENTRIES. */
4559 {
4560 .name = "enable",
4561 .callback = events_callback,
4562 },
4563 };
4564
4565 if (!trace_array_is_readonly(tr)) {
4566 entry = trace_create_file("set_event", TRACE_MODE_WRITE, parent,
4567 tr, &ftrace_set_event_fops);
4568 if (!entry)
4569 return -ENOMEM;
4570
4571 /* There are not as crucial, just warn if they are not created */
4572 trace_create_file("show_event_filters", TRACE_MODE_READ, parent, tr,
4573 &ftrace_show_event_filters_fops);
4574
4575 trace_create_file("show_event_triggers", TRACE_MODE_READ, parent, tr,
4576 &ftrace_show_event_triggers_fops);
4577
4578 trace_create_file("set_event_pid", TRACE_MODE_WRITE, parent,
4579 tr, &ftrace_set_event_pid_fops);
4580
4581 trace_create_file("set_event_notrace_pid",
4582 TRACE_MODE_WRITE, parent, tr,
4583 &ftrace_set_event_notrace_pid_fops);
4584 nr_entries = ARRAY_SIZE(events_entries);
4585 } else {
4586 nr_entries = NR_RO_TOP_ENTRIES;
4587 }
4588
4589 e_events = eventfs_create_events_dir("events", parent, events_entries,
4590 nr_entries, tr);
4591 if (IS_ERR(e_events)) {
4592 pr_warn("Could not create tracefs 'events' directory\n");
4593 return -ENOMEM;
4594 }
4595
4596 tr->event_dir = e_events;
4597
4598 return 0;
4599 }
4600
4601 /**
4602 * event_trace_add_tracer - add a instance of a trace_array to events
4603 * @parent: The parent dentry to place the files/directories for events in
4604 * @tr: The trace array associated with these events
4605 *
4606 * When a new instance is created, it needs to set up its events
4607 * directory, as well as other files associated with events. It also
4608 * creates the event hierarchy in the @parent/events directory.
4609 *
4610 * Returns 0 on success.
4611 *
4612 * Must be called with event_mutex held.
4613 */
event_trace_add_tracer(struct dentry * parent,struct trace_array * tr)4614 int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr)
4615 {
4616 int ret;
4617
4618 lockdep_assert_held(&event_mutex);
4619
4620 ret = create_event_toplevel_files(parent, tr);
4621 if (ret)
4622 goto out;
4623
4624 down_write(&trace_event_sem);
4625 /* If tr already has the event list, it is initialized in early boot. */
4626 if (unlikely(!list_empty(&tr->events)))
4627 __trace_early_add_event_dirs(tr);
4628 else
4629 __trace_add_event_dirs(tr);
4630 up_write(&trace_event_sem);
4631
4632 out:
4633 return ret;
4634 }
4635
4636 /*
4637 * The top trace array already had its file descriptors created.
4638 * Now the files themselves need to be created.
4639 */
4640 static __init int
early_event_add_tracer(struct dentry * parent,struct trace_array * tr)4641 early_event_add_tracer(struct dentry *parent, struct trace_array *tr)
4642 {
4643 int ret;
4644
4645 guard(mutex)(&event_mutex);
4646
4647 ret = create_event_toplevel_files(parent, tr);
4648 if (ret)
4649 return ret;
4650
4651 down_write(&trace_event_sem);
4652 __trace_early_add_event_dirs(tr);
4653 up_write(&trace_event_sem);
4654
4655 return 0;
4656 }
4657
4658 /* Must be called with event_mutex held */
event_trace_del_tracer(struct trace_array * tr)4659 int event_trace_del_tracer(struct trace_array *tr)
4660 {
4661 lockdep_assert_held(&event_mutex);
4662
4663 /* Disable any event triggers and associated soft-disabled events */
4664 clear_event_triggers(tr);
4665
4666 /* Clear the pid list */
4667 __ftrace_clear_event_pids(tr, TRACE_PIDS | TRACE_NO_PIDS);
4668
4669 /* Disable any running events */
4670 __ftrace_set_clr_event_nolock(tr, NULL, NULL, NULL, 0, NULL);
4671
4672 /* Make sure no more events are being executed */
4673 tracepoint_synchronize_unregister();
4674
4675 down_write(&trace_event_sem);
4676 __trace_remove_event_dirs(tr);
4677 eventfs_remove_events_dir(tr->event_dir);
4678 up_write(&trace_event_sem);
4679
4680 tr->event_dir = NULL;
4681
4682 return 0;
4683 }
4684
event_trace_memsetup(void)4685 static __init int event_trace_memsetup(void)
4686 {
4687 field_cachep = KMEM_CACHE(ftrace_event_field, SLAB_PANIC);
4688 file_cachep = KMEM_CACHE(trace_event_file, SLAB_PANIC);
4689 return 0;
4690 }
4691
4692 /*
4693 * Helper function to enable or disable a comma-separated list of events
4694 * from the bootup buffer.
4695 */
__early_set_events(struct trace_array * tr,char * buf,bool enable)4696 static __init void __early_set_events(struct trace_array *tr, char *buf, bool enable)
4697 {
4698 char *token;
4699
4700 while ((token = strsep(&buf, ","))) {
4701 if (*token) {
4702 if (enable) {
4703 if (ftrace_set_clr_event(tr, token, 1))
4704 pr_warn("Failed to enable trace event: %s\n", token);
4705 } else {
4706 ftrace_set_clr_event(tr, token, 0);
4707 }
4708 }
4709
4710 /* Put back the comma to allow this to be called again */
4711 if (buf)
4712 *(buf - 1) = ',';
4713 }
4714 }
4715
4716 /**
4717 * early_enable_events - enable events from the bootup buffer
4718 * @tr: The trace array to enable the events in
4719 * @buf: The buffer containing the comma separated list of events
4720 * @disable_first: If true, disable all events in @buf before enabling them
4721 *
4722 * This function enables events from the bootup buffer. If @disable_first
4723 * is true, it will first disable all events in the buffer before enabling
4724 * them.
4725 *
4726 * For syscall events, which rely on a global refcount to register the
4727 * SYSCALL_WORK_SYSCALL_TRACEPOINT flag (especially for pid 1), we must
4728 * ensure the refcount hits zero before re-enabling them. A simple
4729 * "disable then enable" per-event is not enough if multiple syscalls are
4730 * used, as the refcount will stay above zero. Thus, we need a two-phase
4731 * approach: disable all, then enable all.
4732 */
4733 __init void
early_enable_events(struct trace_array * tr,char * buf,bool disable_first)4734 early_enable_events(struct trace_array *tr, char *buf, bool disable_first)
4735 {
4736 if (disable_first)
4737 __early_set_events(tr, buf, false);
4738
4739 __early_set_events(tr, buf, true);
4740 }
4741
event_trace_enable(void)4742 static __init int event_trace_enable(void)
4743 {
4744 struct trace_array *tr = top_trace_array();
4745 struct trace_event_call **iter, *call;
4746 int ret;
4747
4748 if (!tr)
4749 return -ENODEV;
4750
4751 for_each_event(iter, __start_ftrace_events, __stop_ftrace_events) {
4752
4753 call = *iter;
4754 ret = event_init(call);
4755 if (!ret)
4756 list_add(&call->list, &ftrace_events);
4757 }
4758
4759 register_trigger_cmds();
4760
4761 /*
4762 * We need the top trace array to have a working set of trace
4763 * points at early init, before the debug files and directories
4764 * are created. Create the file entries now, and attach them
4765 * to the actual file dentries later.
4766 */
4767 __trace_early_add_events(tr);
4768
4769 early_enable_events(tr, bootup_event_buf, false);
4770
4771 trace_printk_start_comm();
4772
4773 register_event_cmds();
4774
4775
4776 return 0;
4777 }
4778
4779 /*
4780 * event_trace_enable() is called from trace_event_init() first to
4781 * initialize events and perhaps start any events that are on the
4782 * command line. Unfortunately, there are some events that will not
4783 * start this early, like the system call tracepoints that need
4784 * to set the %SYSCALL_WORK_SYSCALL_TRACEPOINT flag of pid 1. But
4785 * event_trace_enable() is called before pid 1 starts, and this flag
4786 * is never set, making the syscall tracepoint never get reached, but
4787 * the event is enabled regardless (and not doing anything).
4788 */
event_trace_enable_again(void)4789 static __init int event_trace_enable_again(void)
4790 {
4791 struct trace_array *tr;
4792
4793 tr = top_trace_array();
4794 if (!tr)
4795 return -ENODEV;
4796
4797 early_enable_events(tr, bootup_event_buf, true);
4798
4799 return 0;
4800 }
4801
4802 early_initcall(event_trace_enable_again);
4803
4804 /* Init fields which doesn't related to the tracefs */
event_trace_init_fields(void)4805 static __init int event_trace_init_fields(void)
4806 {
4807 if (trace_define_generic_fields())
4808 pr_warn("tracing: Failed to allocated generic fields");
4809
4810 if (trace_define_common_fields())
4811 pr_warn("tracing: Failed to allocate common fields");
4812
4813 return 0;
4814 }
4815
event_trace_init(void)4816 __init int event_trace_init(void)
4817 {
4818 struct trace_array *tr;
4819 int ret;
4820
4821 tr = top_trace_array();
4822 if (!tr)
4823 return -ENODEV;
4824
4825 trace_create_file("available_events", TRACE_MODE_READ,
4826 NULL, tr, &ftrace_avail_fops);
4827
4828 ret = early_event_add_tracer(NULL, tr);
4829 if (ret)
4830 return ret;
4831
4832 #ifdef CONFIG_MODULES
4833 ret = register_module_notifier(&trace_module_nb);
4834 if (ret)
4835 pr_warn("Failed to register trace events module notifier\n");
4836 #endif
4837
4838 eventdir_initialized = true;
4839
4840 return 0;
4841 }
4842
trace_event_init(void)4843 void __init trace_event_init(void)
4844 {
4845 event_trace_memsetup();
4846 init_ftrace_syscalls();
4847 event_trace_enable();
4848 event_trace_init_fields();
4849 }
4850
4851 #ifdef CONFIG_EVENT_TRACE_STARTUP_TEST
4852
4853 static DEFINE_SPINLOCK(test_spinlock);
4854 static DEFINE_SPINLOCK(test_spinlock_irq);
4855 static DEFINE_MUTEX(test_mutex);
4856
test_work(struct work_struct * dummy)4857 static __init void test_work(struct work_struct *dummy)
4858 {
4859 spin_lock(&test_spinlock);
4860 spin_lock_irq(&test_spinlock_irq);
4861 udelay(1);
4862 spin_unlock_irq(&test_spinlock_irq);
4863 spin_unlock(&test_spinlock);
4864
4865 mutex_lock(&test_mutex);
4866 msleep(1);
4867 mutex_unlock(&test_mutex);
4868 }
4869
event_test_thread(void * unused)4870 static __init int event_test_thread(void *unused)
4871 {
4872 void *test_malloc;
4873
4874 test_malloc = kmalloc(1234, GFP_KERNEL);
4875 if (!test_malloc)
4876 pr_info("failed to kmalloc\n");
4877
4878 schedule_on_each_cpu(test_work);
4879
4880 kfree(test_malloc);
4881
4882 set_current_state(TASK_INTERRUPTIBLE);
4883 while (!kthread_should_stop()) {
4884 schedule();
4885 set_current_state(TASK_INTERRUPTIBLE);
4886 }
4887 __set_current_state(TASK_RUNNING);
4888
4889 return 0;
4890 }
4891
4892 /*
4893 * Do various things that may trigger events.
4894 */
event_test_stuff(void)4895 static __init void event_test_stuff(void)
4896 {
4897 struct task_struct *test_thread;
4898
4899 test_thread = kthread_run(event_test_thread, NULL, "test-events");
4900 msleep(1);
4901 kthread_stop(test_thread);
4902 }
4903
4904 /*
4905 * For every trace event defined, we will test each trace point separately,
4906 * and then by groups, and finally all trace points.
4907 */
event_trace_self_tests(void)4908 static __init void event_trace_self_tests(void)
4909 {
4910 struct trace_subsystem_dir *dir;
4911 struct trace_event_file *file;
4912 struct trace_event_call *call;
4913 struct event_subsystem *system;
4914 struct trace_array *tr;
4915 int ret;
4916
4917 tr = top_trace_array();
4918 if (!tr)
4919 return;
4920
4921 pr_info("Running tests on trace events:\n");
4922
4923 list_for_each_entry(file, &tr->events, list) {
4924
4925 call = file->event_call;
4926
4927 /* Only test those that have a probe */
4928 if (!call->class || !call->class->probe)
4929 continue;
4930
4931 /*
4932 * Testing syscall events here is pretty useless, but
4933 * we still do it if configured. But this is time consuming.
4934 * What we really need is a user thread to perform the
4935 * syscalls as we test.
4936 */
4937 #ifndef CONFIG_EVENT_TRACE_TEST_SYSCALLS
4938 if (call->class->system &&
4939 strcmp(call->class->system, "syscalls") == 0)
4940 continue;
4941 #endif
4942
4943 pr_info("Testing event %s: ", trace_event_name(call));
4944
4945 /*
4946 * If an event is already enabled, someone is using
4947 * it and the self test should not be on.
4948 */
4949 if (file->flags & EVENT_FILE_FL_ENABLED) {
4950 pr_warn("Enabled event during self test!\n");
4951 WARN_ON_ONCE(1);
4952 continue;
4953 }
4954
4955 ftrace_event_enable_disable(file, 1);
4956 event_test_stuff();
4957 ftrace_event_enable_disable(file, 0);
4958
4959 pr_cont("OK\n");
4960 }
4961
4962 /* Now test at the sub system level */
4963
4964 pr_info("Running tests on trace event systems:\n");
4965
4966 list_for_each_entry(dir, &tr->systems, list) {
4967
4968 system = dir->subsystem;
4969
4970 /* the ftrace system is special, skip it */
4971 if (strcmp(system->name, "ftrace") == 0)
4972 continue;
4973
4974 pr_info("Testing event system %s: ", system->name);
4975
4976 ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 1, NULL);
4977 if (WARN_ON_ONCE(ret)) {
4978 pr_warn("error enabling system %s\n",
4979 system->name);
4980 continue;
4981 }
4982
4983 event_test_stuff();
4984
4985 ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 0, NULL);
4986 if (WARN_ON_ONCE(ret)) {
4987 pr_warn("error disabling system %s\n",
4988 system->name);
4989 continue;
4990 }
4991
4992 pr_cont("OK\n");
4993 }
4994
4995 /* Test with all events enabled */
4996
4997 pr_info("Running tests on all trace events:\n");
4998 pr_info("Testing all events: ");
4999
5000 ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 1, NULL);
5001 if (WARN_ON_ONCE(ret)) {
5002 pr_warn("error enabling all events\n");
5003 return;
5004 }
5005
5006 event_test_stuff();
5007
5008 /* reset sysname */
5009 ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 0, NULL);
5010 if (WARN_ON_ONCE(ret)) {
5011 pr_warn("error disabling all events\n");
5012 return;
5013 }
5014
5015 pr_cont("OK\n");
5016 }
5017
5018 #ifdef CONFIG_FUNCTION_TRACER
5019
5020 static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable);
5021
5022 static struct trace_event_file event_trace_file __initdata;
5023
5024 static void __init
function_test_events_call(unsigned long ip,unsigned long parent_ip,struct ftrace_ops * op,struct ftrace_regs * regs)5025 function_test_events_call(unsigned long ip, unsigned long parent_ip,
5026 struct ftrace_ops *op, struct ftrace_regs *regs)
5027 {
5028 struct trace_buffer *buffer;
5029 struct ring_buffer_event *event;
5030 struct ftrace_entry *entry;
5031 unsigned int trace_ctx;
5032 long disabled;
5033 int cpu;
5034
5035 trace_ctx = tracing_gen_ctx();
5036 preempt_disable_notrace();
5037 cpu = raw_smp_processor_id();
5038 disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu));
5039
5040 if (disabled != 1)
5041 goto out;
5042
5043 event = trace_event_buffer_lock_reserve(&buffer, &event_trace_file,
5044 TRACE_FN, sizeof(*entry),
5045 trace_ctx);
5046 if (!event)
5047 goto out;
5048 entry = ring_buffer_event_data(event);
5049 entry->ip = ip;
5050 entry->parent_ip = parent_ip;
5051
5052 event_trigger_unlock_commit(&event_trace_file, buffer, event,
5053 entry, trace_ctx);
5054 out:
5055 atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
5056 preempt_enable_notrace();
5057 }
5058
5059 static struct ftrace_ops trace_ops __initdata =
5060 {
5061 .func = function_test_events_call,
5062 };
5063
event_trace_self_test_with_function(void)5064 static __init void event_trace_self_test_with_function(void)
5065 {
5066 int ret;
5067
5068 event_trace_file.tr = top_trace_array();
5069 if (WARN_ON(!event_trace_file.tr))
5070 return;
5071
5072 ret = register_ftrace_function(&trace_ops);
5073 if (WARN_ON(ret < 0)) {
5074 pr_info("Failed to enable function tracer for event tests\n");
5075 return;
5076 }
5077 pr_info("Running tests again, along with the function tracer\n");
5078 event_trace_self_tests();
5079 unregister_ftrace_function(&trace_ops);
5080 }
5081 #else
event_trace_self_test_with_function(void)5082 static __init void event_trace_self_test_with_function(void)
5083 {
5084 }
5085 #endif
5086
event_trace_self_tests_init(void)5087 static __init int event_trace_self_tests_init(void)
5088 {
5089 if (!tracing_selftest_disabled) {
5090 event_trace_self_tests();
5091 event_trace_self_test_with_function();
5092 }
5093
5094 return 0;
5095 }
5096
5097 late_initcall(event_trace_self_tests_init);
5098
5099 #endif
5100