xref: /linux/kernel/trace/trace_events.c (revision 092e0e7e520a1fca03e13c9f2d157432a8657ff2)
1 /*
2  * event tracer
3  *
4  * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
5  *
6  *  - Added format output of fields of the trace point.
7  *    This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
8  *
9  */
10 
11 #include <linux/workqueue.h>
12 #include <linux/spinlock.h>
13 #include <linux/kthread.h>
14 #include <linux/debugfs.h>
15 #include <linux/uaccess.h>
16 #include <linux/module.h>
17 #include <linux/ctype.h>
18 #include <linux/slab.h>
19 #include <linux/delay.h>
20 
21 #include <asm/setup.h>
22 
23 #include "trace_output.h"
24 
25 #undef TRACE_SYSTEM
26 #define TRACE_SYSTEM "TRACE_SYSTEM"
27 
28 DEFINE_MUTEX(event_mutex);
29 
30 LIST_HEAD(ftrace_events);
31 LIST_HEAD(ftrace_common_fields);
32 
33 struct list_head *
34 trace_get_fields(struct ftrace_event_call *event_call)
35 {
36 	if (!event_call->class->get_fields)
37 		return &event_call->class->fields;
38 	return event_call->class->get_fields(event_call);
39 }
40 
41 static int __trace_define_field(struct list_head *head, const char *type,
42 				const char *name, int offset, int size,
43 				int is_signed, int filter_type)
44 {
45 	struct ftrace_event_field *field;
46 
47 	field = kzalloc(sizeof(*field), GFP_KERNEL);
48 	if (!field)
49 		goto err;
50 
51 	field->name = kstrdup(name, GFP_KERNEL);
52 	if (!field->name)
53 		goto err;
54 
55 	field->type = kstrdup(type, GFP_KERNEL);
56 	if (!field->type)
57 		goto err;
58 
59 	if (filter_type == FILTER_OTHER)
60 		field->filter_type = filter_assign_type(type);
61 	else
62 		field->filter_type = filter_type;
63 
64 	field->offset = offset;
65 	field->size = size;
66 	field->is_signed = is_signed;
67 
68 	list_add(&field->link, head);
69 
70 	return 0;
71 
72 err:
73 	if (field)
74 		kfree(field->name);
75 	kfree(field);
76 
77 	return -ENOMEM;
78 }
79 
80 int trace_define_field(struct ftrace_event_call *call, const char *type,
81 		       const char *name, int offset, int size, int is_signed,
82 		       int filter_type)
83 {
84 	struct list_head *head;
85 
86 	if (WARN_ON(!call->class))
87 		return 0;
88 
89 	head = trace_get_fields(call);
90 	return __trace_define_field(head, type, name, offset, size,
91 				    is_signed, filter_type);
92 }
93 EXPORT_SYMBOL_GPL(trace_define_field);
94 
95 #define __common_field(type, item)					\
96 	ret = __trace_define_field(&ftrace_common_fields, #type,	\
97 				   "common_" #item,			\
98 				   offsetof(typeof(ent), item),		\
99 				   sizeof(ent.item),			\
100 				   is_signed_type(type), FILTER_OTHER);	\
101 	if (ret)							\
102 		return ret;
103 
104 static int trace_define_common_fields(void)
105 {
106 	int ret;
107 	struct trace_entry ent;
108 
109 	__common_field(unsigned short, type);
110 	__common_field(unsigned char, flags);
111 	__common_field(unsigned char, preempt_count);
112 	__common_field(int, pid);
113 	__common_field(int, lock_depth);
114 
115 	return ret;
116 }
117 
118 void trace_destroy_fields(struct ftrace_event_call *call)
119 {
120 	struct ftrace_event_field *field, *next;
121 	struct list_head *head;
122 
123 	head = trace_get_fields(call);
124 	list_for_each_entry_safe(field, next, head, link) {
125 		list_del(&field->link);
126 		kfree(field->type);
127 		kfree(field->name);
128 		kfree(field);
129 	}
130 }
131 
132 int trace_event_raw_init(struct ftrace_event_call *call)
133 {
134 	int id;
135 
136 	id = register_ftrace_event(&call->event);
137 	if (!id)
138 		return -ENODEV;
139 
140 	return 0;
141 }
142 EXPORT_SYMBOL_GPL(trace_event_raw_init);
143 
144 int ftrace_event_reg(struct ftrace_event_call *call, enum trace_reg type)
145 {
146 	switch (type) {
147 	case TRACE_REG_REGISTER:
148 		return tracepoint_probe_register(call->name,
149 						 call->class->probe,
150 						 call);
151 	case TRACE_REG_UNREGISTER:
152 		tracepoint_probe_unregister(call->name,
153 					    call->class->probe,
154 					    call);
155 		return 0;
156 
157 #ifdef CONFIG_PERF_EVENTS
158 	case TRACE_REG_PERF_REGISTER:
159 		return tracepoint_probe_register(call->name,
160 						 call->class->perf_probe,
161 						 call);
162 	case TRACE_REG_PERF_UNREGISTER:
163 		tracepoint_probe_unregister(call->name,
164 					    call->class->perf_probe,
165 					    call);
166 		return 0;
167 #endif
168 	}
169 	return 0;
170 }
171 EXPORT_SYMBOL_GPL(ftrace_event_reg);
172 
173 void trace_event_enable_cmd_record(bool enable)
174 {
175 	struct ftrace_event_call *call;
176 
177 	mutex_lock(&event_mutex);
178 	list_for_each_entry(call, &ftrace_events, list) {
179 		if (!(call->flags & TRACE_EVENT_FL_ENABLED))
180 			continue;
181 
182 		if (enable) {
183 			tracing_start_cmdline_record();
184 			call->flags |= TRACE_EVENT_FL_RECORDED_CMD;
185 		} else {
186 			tracing_stop_cmdline_record();
187 			call->flags &= ~TRACE_EVENT_FL_RECORDED_CMD;
188 		}
189 	}
190 	mutex_unlock(&event_mutex);
191 }
192 
193 static int ftrace_event_enable_disable(struct ftrace_event_call *call,
194 					int enable)
195 {
196 	int ret = 0;
197 
198 	switch (enable) {
199 	case 0:
200 		if (call->flags & TRACE_EVENT_FL_ENABLED) {
201 			call->flags &= ~TRACE_EVENT_FL_ENABLED;
202 			if (call->flags & TRACE_EVENT_FL_RECORDED_CMD) {
203 				tracing_stop_cmdline_record();
204 				call->flags &= ~TRACE_EVENT_FL_RECORDED_CMD;
205 			}
206 			call->class->reg(call, TRACE_REG_UNREGISTER);
207 		}
208 		break;
209 	case 1:
210 		if (!(call->flags & TRACE_EVENT_FL_ENABLED)) {
211 			if (trace_flags & TRACE_ITER_RECORD_CMD) {
212 				tracing_start_cmdline_record();
213 				call->flags |= TRACE_EVENT_FL_RECORDED_CMD;
214 			}
215 			ret = call->class->reg(call, TRACE_REG_REGISTER);
216 			if (ret) {
217 				tracing_stop_cmdline_record();
218 				pr_info("event trace: Could not enable event "
219 					"%s\n", call->name);
220 				break;
221 			}
222 			call->flags |= TRACE_EVENT_FL_ENABLED;
223 		}
224 		break;
225 	}
226 
227 	return ret;
228 }
229 
230 static void ftrace_clear_events(void)
231 {
232 	struct ftrace_event_call *call;
233 
234 	mutex_lock(&event_mutex);
235 	list_for_each_entry(call, &ftrace_events, list) {
236 		ftrace_event_enable_disable(call, 0);
237 	}
238 	mutex_unlock(&event_mutex);
239 }
240 
241 /*
242  * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events.
243  */
244 static int __ftrace_set_clr_event(const char *match, const char *sub,
245 				  const char *event, int set)
246 {
247 	struct ftrace_event_call *call;
248 	int ret = -EINVAL;
249 
250 	mutex_lock(&event_mutex);
251 	list_for_each_entry(call, &ftrace_events, list) {
252 
253 		if (!call->name || !call->class || !call->class->reg)
254 			continue;
255 
256 		if (match &&
257 		    strcmp(match, call->name) != 0 &&
258 		    strcmp(match, call->class->system) != 0)
259 			continue;
260 
261 		if (sub && strcmp(sub, call->class->system) != 0)
262 			continue;
263 
264 		if (event && strcmp(event, call->name) != 0)
265 			continue;
266 
267 		ftrace_event_enable_disable(call, set);
268 
269 		ret = 0;
270 	}
271 	mutex_unlock(&event_mutex);
272 
273 	return ret;
274 }
275 
276 static int ftrace_set_clr_event(char *buf, int set)
277 {
278 	char *event = NULL, *sub = NULL, *match;
279 
280 	/*
281 	 * The buf format can be <subsystem>:<event-name>
282 	 *  *:<event-name> means any event by that name.
283 	 *  :<event-name> is the same.
284 	 *
285 	 *  <subsystem>:* means all events in that subsystem
286 	 *  <subsystem>: means the same.
287 	 *
288 	 *  <name> (no ':') means all events in a subsystem with
289 	 *  the name <name> or any event that matches <name>
290 	 */
291 
292 	match = strsep(&buf, ":");
293 	if (buf) {
294 		sub = match;
295 		event = buf;
296 		match = NULL;
297 
298 		if (!strlen(sub) || strcmp(sub, "*") == 0)
299 			sub = NULL;
300 		if (!strlen(event) || strcmp(event, "*") == 0)
301 			event = NULL;
302 	}
303 
304 	return __ftrace_set_clr_event(match, sub, event, set);
305 }
306 
307 /**
308  * trace_set_clr_event - enable or disable an event
309  * @system: system name to match (NULL for any system)
310  * @event: event name to match (NULL for all events, within system)
311  * @set: 1 to enable, 0 to disable
312  *
313  * This is a way for other parts of the kernel to enable or disable
314  * event recording.
315  *
316  * Returns 0 on success, -EINVAL if the parameters do not match any
317  * registered events.
318  */
319 int trace_set_clr_event(const char *system, const char *event, int set)
320 {
321 	return __ftrace_set_clr_event(NULL, system, event, set);
322 }
323 
324 /* 128 should be much more than enough */
325 #define EVENT_BUF_SIZE		127
326 
327 static ssize_t
328 ftrace_event_write(struct file *file, const char __user *ubuf,
329 		   size_t cnt, loff_t *ppos)
330 {
331 	struct trace_parser parser;
332 	ssize_t read, ret;
333 
334 	if (!cnt)
335 		return 0;
336 
337 	ret = tracing_update_buffers();
338 	if (ret < 0)
339 		return ret;
340 
341 	if (trace_parser_get_init(&parser, EVENT_BUF_SIZE + 1))
342 		return -ENOMEM;
343 
344 	read = trace_get_user(&parser, ubuf, cnt, ppos);
345 
346 	if (read >= 0 && trace_parser_loaded((&parser))) {
347 		int set = 1;
348 
349 		if (*parser.buffer == '!')
350 			set = 0;
351 
352 		parser.buffer[parser.idx] = 0;
353 
354 		ret = ftrace_set_clr_event(parser.buffer + !set, set);
355 		if (ret)
356 			goto out_put;
357 	}
358 
359 	ret = read;
360 
361  out_put:
362 	trace_parser_put(&parser);
363 
364 	return ret;
365 }
366 
367 static void *
368 t_next(struct seq_file *m, void *v, loff_t *pos)
369 {
370 	struct ftrace_event_call *call = v;
371 
372 	(*pos)++;
373 
374 	list_for_each_entry_continue(call, &ftrace_events, list) {
375 		/*
376 		 * The ftrace subsystem is for showing formats only.
377 		 * They can not be enabled or disabled via the event files.
378 		 */
379 		if (call->class && call->class->reg)
380 			return call;
381 	}
382 
383 	return NULL;
384 }
385 
386 static void *t_start(struct seq_file *m, loff_t *pos)
387 {
388 	struct ftrace_event_call *call;
389 	loff_t l;
390 
391 	mutex_lock(&event_mutex);
392 
393 	call = list_entry(&ftrace_events, struct ftrace_event_call, list);
394 	for (l = 0; l <= *pos; ) {
395 		call = t_next(m, call, &l);
396 		if (!call)
397 			break;
398 	}
399 	return call;
400 }
401 
402 static void *
403 s_next(struct seq_file *m, void *v, loff_t *pos)
404 {
405 	struct ftrace_event_call *call = v;
406 
407 	(*pos)++;
408 
409 	list_for_each_entry_continue(call, &ftrace_events, list) {
410 		if (call->flags & TRACE_EVENT_FL_ENABLED)
411 			return call;
412 	}
413 
414 	return NULL;
415 }
416 
417 static void *s_start(struct seq_file *m, loff_t *pos)
418 {
419 	struct ftrace_event_call *call;
420 	loff_t l;
421 
422 	mutex_lock(&event_mutex);
423 
424 	call = list_entry(&ftrace_events, struct ftrace_event_call, list);
425 	for (l = 0; l <= *pos; ) {
426 		call = s_next(m, call, &l);
427 		if (!call)
428 			break;
429 	}
430 	return call;
431 }
432 
433 static int t_show(struct seq_file *m, void *v)
434 {
435 	struct ftrace_event_call *call = v;
436 
437 	if (strcmp(call->class->system, TRACE_SYSTEM) != 0)
438 		seq_printf(m, "%s:", call->class->system);
439 	seq_printf(m, "%s\n", call->name);
440 
441 	return 0;
442 }
443 
444 static void t_stop(struct seq_file *m, void *p)
445 {
446 	mutex_unlock(&event_mutex);
447 }
448 
449 static int
450 ftrace_event_seq_open(struct inode *inode, struct file *file)
451 {
452 	const struct seq_operations *seq_ops;
453 
454 	if ((file->f_mode & FMODE_WRITE) &&
455 	    (file->f_flags & O_TRUNC))
456 		ftrace_clear_events();
457 
458 	seq_ops = inode->i_private;
459 	return seq_open(file, seq_ops);
460 }
461 
462 static ssize_t
463 event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
464 		  loff_t *ppos)
465 {
466 	struct ftrace_event_call *call = filp->private_data;
467 	char *buf;
468 
469 	if (call->flags & TRACE_EVENT_FL_ENABLED)
470 		buf = "1\n";
471 	else
472 		buf = "0\n";
473 
474 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
475 }
476 
477 static ssize_t
478 event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
479 		   loff_t *ppos)
480 {
481 	struct ftrace_event_call *call = filp->private_data;
482 	char buf[64];
483 	unsigned long val;
484 	int ret;
485 
486 	if (cnt >= sizeof(buf))
487 		return -EINVAL;
488 
489 	if (copy_from_user(&buf, ubuf, cnt))
490 		return -EFAULT;
491 
492 	buf[cnt] = 0;
493 
494 	ret = strict_strtoul(buf, 10, &val);
495 	if (ret < 0)
496 		return ret;
497 
498 	ret = tracing_update_buffers();
499 	if (ret < 0)
500 		return ret;
501 
502 	switch (val) {
503 	case 0:
504 	case 1:
505 		mutex_lock(&event_mutex);
506 		ret = ftrace_event_enable_disable(call, val);
507 		mutex_unlock(&event_mutex);
508 		break;
509 
510 	default:
511 		return -EINVAL;
512 	}
513 
514 	*ppos += cnt;
515 
516 	return ret ? ret : cnt;
517 }
518 
519 static ssize_t
520 system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
521 		   loff_t *ppos)
522 {
523 	const char set_to_char[4] = { '?', '0', '1', 'X' };
524 	const char *system = filp->private_data;
525 	struct ftrace_event_call *call;
526 	char buf[2];
527 	int set = 0;
528 	int ret;
529 
530 	mutex_lock(&event_mutex);
531 	list_for_each_entry(call, &ftrace_events, list) {
532 		if (!call->name || !call->class || !call->class->reg)
533 			continue;
534 
535 		if (system && strcmp(call->class->system, system) != 0)
536 			continue;
537 
538 		/*
539 		 * We need to find out if all the events are set
540 		 * or if all events or cleared, or if we have
541 		 * a mixture.
542 		 */
543 		set |= (1 << !!(call->flags & TRACE_EVENT_FL_ENABLED));
544 
545 		/*
546 		 * If we have a mixture, no need to look further.
547 		 */
548 		if (set == 3)
549 			break;
550 	}
551 	mutex_unlock(&event_mutex);
552 
553 	buf[0] = set_to_char[set];
554 	buf[1] = '\n';
555 
556 	ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
557 
558 	return ret;
559 }
560 
561 static ssize_t
562 system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
563 		    loff_t *ppos)
564 {
565 	const char *system = filp->private_data;
566 	unsigned long val;
567 	char buf[64];
568 	ssize_t ret;
569 
570 	if (cnt >= sizeof(buf))
571 		return -EINVAL;
572 
573 	if (copy_from_user(&buf, ubuf, cnt))
574 		return -EFAULT;
575 
576 	buf[cnt] = 0;
577 
578 	ret = strict_strtoul(buf, 10, &val);
579 	if (ret < 0)
580 		return ret;
581 
582 	ret = tracing_update_buffers();
583 	if (ret < 0)
584 		return ret;
585 
586 	if (val != 0 && val != 1)
587 		return -EINVAL;
588 
589 	ret = __ftrace_set_clr_event(NULL, system, NULL, val);
590 	if (ret)
591 		goto out;
592 
593 	ret = cnt;
594 
595 out:
596 	*ppos += cnt;
597 
598 	return ret;
599 }
600 
601 enum {
602 	FORMAT_HEADER		= 1,
603 	FORMAT_FIELD_SEPERATOR	= 2,
604 	FORMAT_PRINTFMT		= 3,
605 };
606 
607 static void *f_next(struct seq_file *m, void *v, loff_t *pos)
608 {
609 	struct ftrace_event_call *call = m->private;
610 	struct ftrace_event_field *field;
611 	struct list_head *common_head = &ftrace_common_fields;
612 	struct list_head *head = trace_get_fields(call);
613 
614 	(*pos)++;
615 
616 	switch ((unsigned long)v) {
617 	case FORMAT_HEADER:
618 		if (unlikely(list_empty(common_head)))
619 			return NULL;
620 
621 		field = list_entry(common_head->prev,
622 				   struct ftrace_event_field, link);
623 		return field;
624 
625 	case FORMAT_FIELD_SEPERATOR:
626 		if (unlikely(list_empty(head)))
627 			return NULL;
628 
629 		field = list_entry(head->prev, struct ftrace_event_field, link);
630 		return field;
631 
632 	case FORMAT_PRINTFMT:
633 		/* all done */
634 		return NULL;
635 	}
636 
637 	field = v;
638 	if (field->link.prev == common_head)
639 		return (void *)FORMAT_FIELD_SEPERATOR;
640 	else if (field->link.prev == head)
641 		return (void *)FORMAT_PRINTFMT;
642 
643 	field = list_entry(field->link.prev, struct ftrace_event_field, link);
644 
645 	return field;
646 }
647 
648 static void *f_start(struct seq_file *m, loff_t *pos)
649 {
650 	loff_t l = 0;
651 	void *p;
652 
653 	/* Start by showing the header */
654 	if (!*pos)
655 		return (void *)FORMAT_HEADER;
656 
657 	p = (void *)FORMAT_HEADER;
658 	do {
659 		p = f_next(m, p, &l);
660 	} while (p && l < *pos);
661 
662 	return p;
663 }
664 
665 static int f_show(struct seq_file *m, void *v)
666 {
667 	struct ftrace_event_call *call = m->private;
668 	struct ftrace_event_field *field;
669 	const char *array_descriptor;
670 
671 	switch ((unsigned long)v) {
672 	case FORMAT_HEADER:
673 		seq_printf(m, "name: %s\n", call->name);
674 		seq_printf(m, "ID: %d\n", call->event.type);
675 		seq_printf(m, "format:\n");
676 		return 0;
677 
678 	case FORMAT_FIELD_SEPERATOR:
679 		seq_putc(m, '\n');
680 		return 0;
681 
682 	case FORMAT_PRINTFMT:
683 		seq_printf(m, "\nprint fmt: %s\n",
684 			   call->print_fmt);
685 		return 0;
686 	}
687 
688 	field = v;
689 
690 	/*
691 	 * Smartly shows the array type(except dynamic array).
692 	 * Normal:
693 	 *	field:TYPE VAR
694 	 * If TYPE := TYPE[LEN], it is shown:
695 	 *	field:TYPE VAR[LEN]
696 	 */
697 	array_descriptor = strchr(field->type, '[');
698 
699 	if (!strncmp(field->type, "__data_loc", 10))
700 		array_descriptor = NULL;
701 
702 	if (!array_descriptor)
703 		seq_printf(m, "\tfield:%s %s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
704 			   field->type, field->name, field->offset,
705 			   field->size, !!field->is_signed);
706 	else
707 		seq_printf(m, "\tfield:%.*s %s%s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
708 			   (int)(array_descriptor - field->type),
709 			   field->type, field->name,
710 			   array_descriptor, field->offset,
711 			   field->size, !!field->is_signed);
712 
713 	return 0;
714 }
715 
716 static void f_stop(struct seq_file *m, void *p)
717 {
718 }
719 
720 static const struct seq_operations trace_format_seq_ops = {
721 	.start		= f_start,
722 	.next		= f_next,
723 	.stop		= f_stop,
724 	.show		= f_show,
725 };
726 
727 static int trace_format_open(struct inode *inode, struct file *file)
728 {
729 	struct ftrace_event_call *call = inode->i_private;
730 	struct seq_file *m;
731 	int ret;
732 
733 	ret = seq_open(file, &trace_format_seq_ops);
734 	if (ret < 0)
735 		return ret;
736 
737 	m = file->private_data;
738 	m->private = call;
739 
740 	return 0;
741 }
742 
743 static ssize_t
744 event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
745 {
746 	struct ftrace_event_call *call = filp->private_data;
747 	struct trace_seq *s;
748 	int r;
749 
750 	if (*ppos)
751 		return 0;
752 
753 	s = kmalloc(sizeof(*s), GFP_KERNEL);
754 	if (!s)
755 		return -ENOMEM;
756 
757 	trace_seq_init(s);
758 	trace_seq_printf(s, "%d\n", call->event.type);
759 
760 	r = simple_read_from_buffer(ubuf, cnt, ppos,
761 				    s->buffer, s->len);
762 	kfree(s);
763 	return r;
764 }
765 
766 static ssize_t
767 event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
768 		  loff_t *ppos)
769 {
770 	struct ftrace_event_call *call = filp->private_data;
771 	struct trace_seq *s;
772 	int r;
773 
774 	if (*ppos)
775 		return 0;
776 
777 	s = kmalloc(sizeof(*s), GFP_KERNEL);
778 	if (!s)
779 		return -ENOMEM;
780 
781 	trace_seq_init(s);
782 
783 	print_event_filter(call, s);
784 	r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
785 
786 	kfree(s);
787 
788 	return r;
789 }
790 
791 static ssize_t
792 event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
793 		   loff_t *ppos)
794 {
795 	struct ftrace_event_call *call = filp->private_data;
796 	char *buf;
797 	int err;
798 
799 	if (cnt >= PAGE_SIZE)
800 		return -EINVAL;
801 
802 	buf = (char *)__get_free_page(GFP_TEMPORARY);
803 	if (!buf)
804 		return -ENOMEM;
805 
806 	if (copy_from_user(buf, ubuf, cnt)) {
807 		free_page((unsigned long) buf);
808 		return -EFAULT;
809 	}
810 	buf[cnt] = '\0';
811 
812 	err = apply_event_filter(call, buf);
813 	free_page((unsigned long) buf);
814 	if (err < 0)
815 		return err;
816 
817 	*ppos += cnt;
818 
819 	return cnt;
820 }
821 
822 static ssize_t
823 subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
824 		      loff_t *ppos)
825 {
826 	struct event_subsystem *system = filp->private_data;
827 	struct trace_seq *s;
828 	int r;
829 
830 	if (*ppos)
831 		return 0;
832 
833 	s = kmalloc(sizeof(*s), GFP_KERNEL);
834 	if (!s)
835 		return -ENOMEM;
836 
837 	trace_seq_init(s);
838 
839 	print_subsystem_event_filter(system, s);
840 	r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
841 
842 	kfree(s);
843 
844 	return r;
845 }
846 
847 static ssize_t
848 subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
849 		       loff_t *ppos)
850 {
851 	struct event_subsystem *system = filp->private_data;
852 	char *buf;
853 	int err;
854 
855 	if (cnt >= PAGE_SIZE)
856 		return -EINVAL;
857 
858 	buf = (char *)__get_free_page(GFP_TEMPORARY);
859 	if (!buf)
860 		return -ENOMEM;
861 
862 	if (copy_from_user(buf, ubuf, cnt)) {
863 		free_page((unsigned long) buf);
864 		return -EFAULT;
865 	}
866 	buf[cnt] = '\0';
867 
868 	err = apply_subsystem_event_filter(system, buf);
869 	free_page((unsigned long) buf);
870 	if (err < 0)
871 		return err;
872 
873 	*ppos += cnt;
874 
875 	return cnt;
876 }
877 
878 static ssize_t
879 show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
880 {
881 	int (*func)(struct trace_seq *s) = filp->private_data;
882 	struct trace_seq *s;
883 	int r;
884 
885 	if (*ppos)
886 		return 0;
887 
888 	s = kmalloc(sizeof(*s), GFP_KERNEL);
889 	if (!s)
890 		return -ENOMEM;
891 
892 	trace_seq_init(s);
893 
894 	func(s);
895 	r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
896 
897 	kfree(s);
898 
899 	return r;
900 }
901 
902 static const struct seq_operations show_event_seq_ops = {
903 	.start = t_start,
904 	.next = t_next,
905 	.show = t_show,
906 	.stop = t_stop,
907 };
908 
909 static const struct seq_operations show_set_event_seq_ops = {
910 	.start = s_start,
911 	.next = s_next,
912 	.show = t_show,
913 	.stop = t_stop,
914 };
915 
916 static const struct file_operations ftrace_avail_fops = {
917 	.open = ftrace_event_seq_open,
918 	.read = seq_read,
919 	.llseek = seq_lseek,
920 	.release = seq_release,
921 };
922 
923 static const struct file_operations ftrace_set_event_fops = {
924 	.open = ftrace_event_seq_open,
925 	.read = seq_read,
926 	.write = ftrace_event_write,
927 	.llseek = seq_lseek,
928 	.release = seq_release,
929 };
930 
931 static const struct file_operations ftrace_enable_fops = {
932 	.open = tracing_open_generic,
933 	.read = event_enable_read,
934 	.write = event_enable_write,
935 	.llseek = default_llseek,
936 };
937 
938 static const struct file_operations ftrace_event_format_fops = {
939 	.open = trace_format_open,
940 	.read = seq_read,
941 	.llseek = seq_lseek,
942 	.release = seq_release,
943 };
944 
945 static const struct file_operations ftrace_event_id_fops = {
946 	.open = tracing_open_generic,
947 	.read = event_id_read,
948 	.llseek = default_llseek,
949 };
950 
951 static const struct file_operations ftrace_event_filter_fops = {
952 	.open = tracing_open_generic,
953 	.read = event_filter_read,
954 	.write = event_filter_write,
955 	.llseek = default_llseek,
956 };
957 
958 static const struct file_operations ftrace_subsystem_filter_fops = {
959 	.open = tracing_open_generic,
960 	.read = subsystem_filter_read,
961 	.write = subsystem_filter_write,
962 	.llseek = default_llseek,
963 };
964 
965 static const struct file_operations ftrace_system_enable_fops = {
966 	.open = tracing_open_generic,
967 	.read = system_enable_read,
968 	.write = system_enable_write,
969 	.llseek = default_llseek,
970 };
971 
972 static const struct file_operations ftrace_show_header_fops = {
973 	.open = tracing_open_generic,
974 	.read = show_header,
975 	.llseek = default_llseek,
976 };
977 
978 static struct dentry *event_trace_events_dir(void)
979 {
980 	static struct dentry *d_tracer;
981 	static struct dentry *d_events;
982 
983 	if (d_events)
984 		return d_events;
985 
986 	d_tracer = tracing_init_dentry();
987 	if (!d_tracer)
988 		return NULL;
989 
990 	d_events = debugfs_create_dir("events", d_tracer);
991 	if (!d_events)
992 		pr_warning("Could not create debugfs "
993 			   "'events' directory\n");
994 
995 	return d_events;
996 }
997 
998 static LIST_HEAD(event_subsystems);
999 
1000 static struct dentry *
1001 event_subsystem_dir(const char *name, struct dentry *d_events)
1002 {
1003 	struct event_subsystem *system;
1004 	struct dentry *entry;
1005 
1006 	/* First see if we did not already create this dir */
1007 	list_for_each_entry(system, &event_subsystems, list) {
1008 		if (strcmp(system->name, name) == 0) {
1009 			system->nr_events++;
1010 			return system->entry;
1011 		}
1012 	}
1013 
1014 	/* need to create new entry */
1015 	system = kmalloc(sizeof(*system), GFP_KERNEL);
1016 	if (!system) {
1017 		pr_warning("No memory to create event subsystem %s\n",
1018 			   name);
1019 		return d_events;
1020 	}
1021 
1022 	system->entry = debugfs_create_dir(name, d_events);
1023 	if (!system->entry) {
1024 		pr_warning("Could not create event subsystem %s\n",
1025 			   name);
1026 		kfree(system);
1027 		return d_events;
1028 	}
1029 
1030 	system->nr_events = 1;
1031 	system->name = kstrdup(name, GFP_KERNEL);
1032 	if (!system->name) {
1033 		debugfs_remove(system->entry);
1034 		kfree(system);
1035 		return d_events;
1036 	}
1037 
1038 	list_add(&system->list, &event_subsystems);
1039 
1040 	system->filter = NULL;
1041 
1042 	system->filter = kzalloc(sizeof(struct event_filter), GFP_KERNEL);
1043 	if (!system->filter) {
1044 		pr_warning("Could not allocate filter for subsystem "
1045 			   "'%s'\n", name);
1046 		return system->entry;
1047 	}
1048 
1049 	entry = debugfs_create_file("filter", 0644, system->entry, system,
1050 				    &ftrace_subsystem_filter_fops);
1051 	if (!entry) {
1052 		kfree(system->filter);
1053 		system->filter = NULL;
1054 		pr_warning("Could not create debugfs "
1055 			   "'%s/filter' entry\n", name);
1056 	}
1057 
1058 	trace_create_file("enable", 0644, system->entry,
1059 			  (void *)system->name,
1060 			  &ftrace_system_enable_fops);
1061 
1062 	return system->entry;
1063 }
1064 
1065 static int
1066 event_create_dir(struct ftrace_event_call *call, struct dentry *d_events,
1067 		 const struct file_operations *id,
1068 		 const struct file_operations *enable,
1069 		 const struct file_operations *filter,
1070 		 const struct file_operations *format)
1071 {
1072 	struct list_head *head;
1073 	int ret;
1074 
1075 	/*
1076 	 * If the trace point header did not define TRACE_SYSTEM
1077 	 * then the system would be called "TRACE_SYSTEM".
1078 	 */
1079 	if (strcmp(call->class->system, TRACE_SYSTEM) != 0)
1080 		d_events = event_subsystem_dir(call->class->system, d_events);
1081 
1082 	call->dir = debugfs_create_dir(call->name, d_events);
1083 	if (!call->dir) {
1084 		pr_warning("Could not create debugfs "
1085 			   "'%s' directory\n", call->name);
1086 		return -1;
1087 	}
1088 
1089 	if (call->class->reg)
1090 		trace_create_file("enable", 0644, call->dir, call,
1091 				  enable);
1092 
1093 #ifdef CONFIG_PERF_EVENTS
1094 	if (call->event.type && call->class->reg)
1095 		trace_create_file("id", 0444, call->dir, call,
1096 		 		  id);
1097 #endif
1098 
1099 	/*
1100 	 * Other events may have the same class. Only update
1101 	 * the fields if they are not already defined.
1102 	 */
1103 	head = trace_get_fields(call);
1104 	if (list_empty(head)) {
1105 		ret = call->class->define_fields(call);
1106 		if (ret < 0) {
1107 			pr_warning("Could not initialize trace point"
1108 				   " events/%s\n", call->name);
1109 			return ret;
1110 		}
1111 	}
1112 	trace_create_file("filter", 0644, call->dir, call,
1113 			  filter);
1114 
1115 	trace_create_file("format", 0444, call->dir, call,
1116 			  format);
1117 
1118 	return 0;
1119 }
1120 
1121 static int
1122 __trace_add_event_call(struct ftrace_event_call *call, struct module *mod,
1123 		       const struct file_operations *id,
1124 		       const struct file_operations *enable,
1125 		       const struct file_operations *filter,
1126 		       const struct file_operations *format)
1127 {
1128 	struct dentry *d_events;
1129 	int ret;
1130 
1131 	/* The linker may leave blanks */
1132 	if (!call->name)
1133 		return -EINVAL;
1134 
1135 	if (call->class->raw_init) {
1136 		ret = call->class->raw_init(call);
1137 		if (ret < 0) {
1138 			if (ret != -ENOSYS)
1139 				pr_warning("Could not initialize trace events/%s\n",
1140 					   call->name);
1141 			return ret;
1142 		}
1143 	}
1144 
1145 	d_events = event_trace_events_dir();
1146 	if (!d_events)
1147 		return -ENOENT;
1148 
1149 	ret = event_create_dir(call, d_events, id, enable, filter, format);
1150 	if (!ret)
1151 		list_add(&call->list, &ftrace_events);
1152 	call->mod = mod;
1153 
1154 	return ret;
1155 }
1156 
1157 /* Add an additional event_call dynamically */
1158 int trace_add_event_call(struct ftrace_event_call *call)
1159 {
1160 	int ret;
1161 	mutex_lock(&event_mutex);
1162 	ret = __trace_add_event_call(call, NULL, &ftrace_event_id_fops,
1163 				     &ftrace_enable_fops,
1164 				     &ftrace_event_filter_fops,
1165 				     &ftrace_event_format_fops);
1166 	mutex_unlock(&event_mutex);
1167 	return ret;
1168 }
1169 
1170 static void remove_subsystem_dir(const char *name)
1171 {
1172 	struct event_subsystem *system;
1173 
1174 	if (strcmp(name, TRACE_SYSTEM) == 0)
1175 		return;
1176 
1177 	list_for_each_entry(system, &event_subsystems, list) {
1178 		if (strcmp(system->name, name) == 0) {
1179 			if (!--system->nr_events) {
1180 				struct event_filter *filter = system->filter;
1181 
1182 				debugfs_remove_recursive(system->entry);
1183 				list_del(&system->list);
1184 				if (filter) {
1185 					kfree(filter->filter_string);
1186 					kfree(filter);
1187 				}
1188 				kfree(system->name);
1189 				kfree(system);
1190 			}
1191 			break;
1192 		}
1193 	}
1194 }
1195 
1196 /*
1197  * Must be called under locking both of event_mutex and trace_event_mutex.
1198  */
1199 static void __trace_remove_event_call(struct ftrace_event_call *call)
1200 {
1201 	ftrace_event_enable_disable(call, 0);
1202 	if (call->event.funcs)
1203 		__unregister_ftrace_event(&call->event);
1204 	debugfs_remove_recursive(call->dir);
1205 	list_del(&call->list);
1206 	trace_destroy_fields(call);
1207 	destroy_preds(call);
1208 	remove_subsystem_dir(call->class->system);
1209 }
1210 
1211 /* Remove an event_call */
1212 void trace_remove_event_call(struct ftrace_event_call *call)
1213 {
1214 	mutex_lock(&event_mutex);
1215 	down_write(&trace_event_mutex);
1216 	__trace_remove_event_call(call);
1217 	up_write(&trace_event_mutex);
1218 	mutex_unlock(&event_mutex);
1219 }
1220 
1221 #define for_each_event(event, start, end)			\
1222 	for (event = start;					\
1223 	     (unsigned long)event < (unsigned long)end;		\
1224 	     event++)
1225 
1226 #ifdef CONFIG_MODULES
1227 
1228 static LIST_HEAD(ftrace_module_file_list);
1229 
1230 /*
1231  * Modules must own their file_operations to keep up with
1232  * reference counting.
1233  */
1234 struct ftrace_module_file_ops {
1235 	struct list_head		list;
1236 	struct module			*mod;
1237 	struct file_operations		id;
1238 	struct file_operations		enable;
1239 	struct file_operations		format;
1240 	struct file_operations		filter;
1241 };
1242 
1243 static struct ftrace_module_file_ops *
1244 trace_create_file_ops(struct module *mod)
1245 {
1246 	struct ftrace_module_file_ops *file_ops;
1247 
1248 	/*
1249 	 * This is a bit of a PITA. To allow for correct reference
1250 	 * counting, modules must "own" their file_operations.
1251 	 * To do this, we allocate the file operations that will be
1252 	 * used in the event directory.
1253 	 */
1254 
1255 	file_ops = kmalloc(sizeof(*file_ops), GFP_KERNEL);
1256 	if (!file_ops)
1257 		return NULL;
1258 
1259 	file_ops->mod = mod;
1260 
1261 	file_ops->id = ftrace_event_id_fops;
1262 	file_ops->id.owner = mod;
1263 
1264 	file_ops->enable = ftrace_enable_fops;
1265 	file_ops->enable.owner = mod;
1266 
1267 	file_ops->filter = ftrace_event_filter_fops;
1268 	file_ops->filter.owner = mod;
1269 
1270 	file_ops->format = ftrace_event_format_fops;
1271 	file_ops->format.owner = mod;
1272 
1273 	list_add(&file_ops->list, &ftrace_module_file_list);
1274 
1275 	return file_ops;
1276 }
1277 
1278 static void trace_module_add_events(struct module *mod)
1279 {
1280 	struct ftrace_module_file_ops *file_ops = NULL;
1281 	struct ftrace_event_call *call, *start, *end;
1282 
1283 	start = mod->trace_events;
1284 	end = mod->trace_events + mod->num_trace_events;
1285 
1286 	if (start == end)
1287 		return;
1288 
1289 	file_ops = trace_create_file_ops(mod);
1290 	if (!file_ops)
1291 		return;
1292 
1293 	for_each_event(call, start, end) {
1294 		__trace_add_event_call(call, mod,
1295 				       &file_ops->id, &file_ops->enable,
1296 				       &file_ops->filter, &file_ops->format);
1297 	}
1298 }
1299 
1300 static void trace_module_remove_events(struct module *mod)
1301 {
1302 	struct ftrace_module_file_ops *file_ops;
1303 	struct ftrace_event_call *call, *p;
1304 	bool found = false;
1305 
1306 	down_write(&trace_event_mutex);
1307 	list_for_each_entry_safe(call, p, &ftrace_events, list) {
1308 		if (call->mod == mod) {
1309 			found = true;
1310 			__trace_remove_event_call(call);
1311 		}
1312 	}
1313 
1314 	/* Now free the file_operations */
1315 	list_for_each_entry(file_ops, &ftrace_module_file_list, list) {
1316 		if (file_ops->mod == mod)
1317 			break;
1318 	}
1319 	if (&file_ops->list != &ftrace_module_file_list) {
1320 		list_del(&file_ops->list);
1321 		kfree(file_ops);
1322 	}
1323 
1324 	/*
1325 	 * It is safest to reset the ring buffer if the module being unloaded
1326 	 * registered any events.
1327 	 */
1328 	if (found)
1329 		tracing_reset_current_online_cpus();
1330 	up_write(&trace_event_mutex);
1331 }
1332 
1333 static int trace_module_notify(struct notifier_block *self,
1334 			       unsigned long val, void *data)
1335 {
1336 	struct module *mod = data;
1337 
1338 	mutex_lock(&event_mutex);
1339 	switch (val) {
1340 	case MODULE_STATE_COMING:
1341 		trace_module_add_events(mod);
1342 		break;
1343 	case MODULE_STATE_GOING:
1344 		trace_module_remove_events(mod);
1345 		break;
1346 	}
1347 	mutex_unlock(&event_mutex);
1348 
1349 	return 0;
1350 }
1351 #else
1352 static int trace_module_notify(struct notifier_block *self,
1353 			       unsigned long val, void *data)
1354 {
1355 	return 0;
1356 }
1357 #endif /* CONFIG_MODULES */
1358 
1359 static struct notifier_block trace_module_nb = {
1360 	.notifier_call = trace_module_notify,
1361 	.priority = 0,
1362 };
1363 
1364 extern struct ftrace_event_call __start_ftrace_events[];
1365 extern struct ftrace_event_call __stop_ftrace_events[];
1366 
1367 static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata;
1368 
1369 static __init int setup_trace_event(char *str)
1370 {
1371 	strlcpy(bootup_event_buf, str, COMMAND_LINE_SIZE);
1372 	ring_buffer_expanded = 1;
1373 	tracing_selftest_disabled = 1;
1374 
1375 	return 1;
1376 }
1377 __setup("trace_event=", setup_trace_event);
1378 
1379 static __init int event_trace_init(void)
1380 {
1381 	struct ftrace_event_call *call;
1382 	struct dentry *d_tracer;
1383 	struct dentry *entry;
1384 	struct dentry *d_events;
1385 	int ret;
1386 	char *buf = bootup_event_buf;
1387 	char *token;
1388 
1389 	d_tracer = tracing_init_dentry();
1390 	if (!d_tracer)
1391 		return 0;
1392 
1393 	entry = debugfs_create_file("available_events", 0444, d_tracer,
1394 				    (void *)&show_event_seq_ops,
1395 				    &ftrace_avail_fops);
1396 	if (!entry)
1397 		pr_warning("Could not create debugfs "
1398 			   "'available_events' entry\n");
1399 
1400 	entry = debugfs_create_file("set_event", 0644, d_tracer,
1401 				    (void *)&show_set_event_seq_ops,
1402 				    &ftrace_set_event_fops);
1403 	if (!entry)
1404 		pr_warning("Could not create debugfs "
1405 			   "'set_event' entry\n");
1406 
1407 	d_events = event_trace_events_dir();
1408 	if (!d_events)
1409 		return 0;
1410 
1411 	/* ring buffer internal formats */
1412 	trace_create_file("header_page", 0444, d_events,
1413 			  ring_buffer_print_page_header,
1414 			  &ftrace_show_header_fops);
1415 
1416 	trace_create_file("header_event", 0444, d_events,
1417 			  ring_buffer_print_entry_header,
1418 			  &ftrace_show_header_fops);
1419 
1420 	trace_create_file("enable", 0644, d_events,
1421 			  NULL, &ftrace_system_enable_fops);
1422 
1423 	if (trace_define_common_fields())
1424 		pr_warning("tracing: Failed to allocate common fields");
1425 
1426 	for_each_event(call, __start_ftrace_events, __stop_ftrace_events) {
1427 		__trace_add_event_call(call, NULL, &ftrace_event_id_fops,
1428 				       &ftrace_enable_fops,
1429 				       &ftrace_event_filter_fops,
1430 				       &ftrace_event_format_fops);
1431 	}
1432 
1433 	while (true) {
1434 		token = strsep(&buf, ",");
1435 
1436 		if (!token)
1437 			break;
1438 		if (!*token)
1439 			continue;
1440 
1441 		ret = ftrace_set_clr_event(token, 1);
1442 		if (ret)
1443 			pr_warning("Failed to enable trace event: %s\n", token);
1444 	}
1445 
1446 	ret = register_module_notifier(&trace_module_nb);
1447 	if (ret)
1448 		pr_warning("Failed to register trace events module notifier\n");
1449 
1450 	return 0;
1451 }
1452 fs_initcall(event_trace_init);
1453 
1454 #ifdef CONFIG_FTRACE_STARTUP_TEST
1455 
1456 static DEFINE_SPINLOCK(test_spinlock);
1457 static DEFINE_SPINLOCK(test_spinlock_irq);
1458 static DEFINE_MUTEX(test_mutex);
1459 
1460 static __init void test_work(struct work_struct *dummy)
1461 {
1462 	spin_lock(&test_spinlock);
1463 	spin_lock_irq(&test_spinlock_irq);
1464 	udelay(1);
1465 	spin_unlock_irq(&test_spinlock_irq);
1466 	spin_unlock(&test_spinlock);
1467 
1468 	mutex_lock(&test_mutex);
1469 	msleep(1);
1470 	mutex_unlock(&test_mutex);
1471 }
1472 
1473 static __init int event_test_thread(void *unused)
1474 {
1475 	void *test_malloc;
1476 
1477 	test_malloc = kmalloc(1234, GFP_KERNEL);
1478 	if (!test_malloc)
1479 		pr_info("failed to kmalloc\n");
1480 
1481 	schedule_on_each_cpu(test_work);
1482 
1483 	kfree(test_malloc);
1484 
1485 	set_current_state(TASK_INTERRUPTIBLE);
1486 	while (!kthread_should_stop())
1487 		schedule();
1488 
1489 	return 0;
1490 }
1491 
1492 /*
1493  * Do various things that may trigger events.
1494  */
1495 static __init void event_test_stuff(void)
1496 {
1497 	struct task_struct *test_thread;
1498 
1499 	test_thread = kthread_run(event_test_thread, NULL, "test-events");
1500 	msleep(1);
1501 	kthread_stop(test_thread);
1502 }
1503 
1504 /*
1505  * For every trace event defined, we will test each trace point separately,
1506  * and then by groups, and finally all trace points.
1507  */
1508 static __init void event_trace_self_tests(void)
1509 {
1510 	struct ftrace_event_call *call;
1511 	struct event_subsystem *system;
1512 	int ret;
1513 
1514 	pr_info("Running tests on trace events:\n");
1515 
1516 	list_for_each_entry(call, &ftrace_events, list) {
1517 
1518 		/* Only test those that have a probe */
1519 		if (!call->class || !call->class->probe)
1520 			continue;
1521 
1522 /*
1523  * Testing syscall events here is pretty useless, but
1524  * we still do it if configured. But this is time consuming.
1525  * What we really need is a user thread to perform the
1526  * syscalls as we test.
1527  */
1528 #ifndef CONFIG_EVENT_TRACE_TEST_SYSCALLS
1529 		if (call->class->system &&
1530 		    strcmp(call->class->system, "syscalls") == 0)
1531 			continue;
1532 #endif
1533 
1534 		pr_info("Testing event %s: ", call->name);
1535 
1536 		/*
1537 		 * If an event is already enabled, someone is using
1538 		 * it and the self test should not be on.
1539 		 */
1540 		if (call->flags & TRACE_EVENT_FL_ENABLED) {
1541 			pr_warning("Enabled event during self test!\n");
1542 			WARN_ON_ONCE(1);
1543 			continue;
1544 		}
1545 
1546 		ftrace_event_enable_disable(call, 1);
1547 		event_test_stuff();
1548 		ftrace_event_enable_disable(call, 0);
1549 
1550 		pr_cont("OK\n");
1551 	}
1552 
1553 	/* Now test at the sub system level */
1554 
1555 	pr_info("Running tests on trace event systems:\n");
1556 
1557 	list_for_each_entry(system, &event_subsystems, list) {
1558 
1559 		/* the ftrace system is special, skip it */
1560 		if (strcmp(system->name, "ftrace") == 0)
1561 			continue;
1562 
1563 		pr_info("Testing event system %s: ", system->name);
1564 
1565 		ret = __ftrace_set_clr_event(NULL, system->name, NULL, 1);
1566 		if (WARN_ON_ONCE(ret)) {
1567 			pr_warning("error enabling system %s\n",
1568 				   system->name);
1569 			continue;
1570 		}
1571 
1572 		event_test_stuff();
1573 
1574 		ret = __ftrace_set_clr_event(NULL, system->name, NULL, 0);
1575 		if (WARN_ON_ONCE(ret))
1576 			pr_warning("error disabling system %s\n",
1577 				   system->name);
1578 
1579 		pr_cont("OK\n");
1580 	}
1581 
1582 	/* Test with all events enabled */
1583 
1584 	pr_info("Running tests on all trace events:\n");
1585 	pr_info("Testing all events: ");
1586 
1587 	ret = __ftrace_set_clr_event(NULL, NULL, NULL, 1);
1588 	if (WARN_ON_ONCE(ret)) {
1589 		pr_warning("error enabling all events\n");
1590 		return;
1591 	}
1592 
1593 	event_test_stuff();
1594 
1595 	/* reset sysname */
1596 	ret = __ftrace_set_clr_event(NULL, NULL, NULL, 0);
1597 	if (WARN_ON_ONCE(ret)) {
1598 		pr_warning("error disabling all events\n");
1599 		return;
1600 	}
1601 
1602 	pr_cont("OK\n");
1603 }
1604 
1605 #ifdef CONFIG_FUNCTION_TRACER
1606 
1607 static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable);
1608 
1609 static void
1610 function_test_events_call(unsigned long ip, unsigned long parent_ip)
1611 {
1612 	struct ring_buffer_event *event;
1613 	struct ring_buffer *buffer;
1614 	struct ftrace_entry *entry;
1615 	unsigned long flags;
1616 	long disabled;
1617 	int cpu;
1618 	int pc;
1619 
1620 	pc = preempt_count();
1621 	preempt_disable_notrace();
1622 	cpu = raw_smp_processor_id();
1623 	disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu));
1624 
1625 	if (disabled != 1)
1626 		goto out;
1627 
1628 	local_save_flags(flags);
1629 
1630 	event = trace_current_buffer_lock_reserve(&buffer,
1631 						  TRACE_FN, sizeof(*entry),
1632 						  flags, pc);
1633 	if (!event)
1634 		goto out;
1635 	entry	= ring_buffer_event_data(event);
1636 	entry->ip			= ip;
1637 	entry->parent_ip		= parent_ip;
1638 
1639 	trace_nowake_buffer_unlock_commit(buffer, event, flags, pc);
1640 
1641  out:
1642 	atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
1643 	preempt_enable_notrace();
1644 }
1645 
1646 static struct ftrace_ops trace_ops __initdata  =
1647 {
1648 	.func = function_test_events_call,
1649 };
1650 
1651 static __init void event_trace_self_test_with_function(void)
1652 {
1653 	register_ftrace_function(&trace_ops);
1654 	pr_info("Running tests again, along with the function tracer\n");
1655 	event_trace_self_tests();
1656 	unregister_ftrace_function(&trace_ops);
1657 }
1658 #else
1659 static __init void event_trace_self_test_with_function(void)
1660 {
1661 }
1662 #endif
1663 
1664 static __init int event_trace_self_tests_init(void)
1665 {
1666 	if (!tracing_selftest_disabled) {
1667 		event_trace_self_tests();
1668 		event_trace_self_test_with_function();
1669 	}
1670 
1671 	return 0;
1672 }
1673 
1674 late_initcall(event_trace_self_tests_init);
1675 
1676 #endif
1677