xref: /linux/kernel/trace/trace_events.c (revision 606b2f490fb80e55d05cf0e6cec0b6c0ff0fc18f)
1 /*
2  * event tracer
3  *
4  * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
5  *
6  *  - Added format output of fields of the trace point.
7  *    This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
8  *
9  */
10 
11 #include <linux/workqueue.h>
12 #include <linux/spinlock.h>
13 #include <linux/kthread.h>
14 #include <linux/debugfs.h>
15 #include <linux/uaccess.h>
16 #include <linux/module.h>
17 #include <linux/ctype.h>
18 #include <linux/slab.h>
19 #include <linux/delay.h>
20 
21 #include <asm/setup.h>
22 
23 #include "trace_output.h"
24 
25 #undef TRACE_SYSTEM
26 #define TRACE_SYSTEM "TRACE_SYSTEM"
27 
28 DEFINE_MUTEX(event_mutex);
29 
30 LIST_HEAD(ftrace_events);
31 LIST_HEAD(ftrace_common_fields);
32 
33 struct list_head *
34 trace_get_fields(struct ftrace_event_call *event_call)
35 {
36 	if (!event_call->class->get_fields)
37 		return &event_call->class->fields;
38 	return event_call->class->get_fields(event_call);
39 }
40 
41 static int __trace_define_field(struct list_head *head, const char *type,
42 				const char *name, int offset, int size,
43 				int is_signed, int filter_type)
44 {
45 	struct ftrace_event_field *field;
46 
47 	field = kzalloc(sizeof(*field), GFP_KERNEL);
48 	if (!field)
49 		goto err;
50 
51 	field->name = kstrdup(name, GFP_KERNEL);
52 	if (!field->name)
53 		goto err;
54 
55 	field->type = kstrdup(type, GFP_KERNEL);
56 	if (!field->type)
57 		goto err;
58 
59 	if (filter_type == FILTER_OTHER)
60 		field->filter_type = filter_assign_type(type);
61 	else
62 		field->filter_type = filter_type;
63 
64 	field->offset = offset;
65 	field->size = size;
66 	field->is_signed = is_signed;
67 
68 	list_add(&field->link, head);
69 
70 	return 0;
71 
72 err:
73 	if (field)
74 		kfree(field->name);
75 	kfree(field);
76 
77 	return -ENOMEM;
78 }
79 
80 int trace_define_field(struct ftrace_event_call *call, const char *type,
81 		       const char *name, int offset, int size, int is_signed,
82 		       int filter_type)
83 {
84 	struct list_head *head;
85 
86 	if (WARN_ON(!call->class))
87 		return 0;
88 
89 	head = trace_get_fields(call);
90 	return __trace_define_field(head, type, name, offset, size,
91 				    is_signed, filter_type);
92 }
93 EXPORT_SYMBOL_GPL(trace_define_field);
94 
95 #define __common_field(type, item)					\
96 	ret = __trace_define_field(&ftrace_common_fields, #type,	\
97 				   "common_" #item,			\
98 				   offsetof(typeof(ent), item),		\
99 				   sizeof(ent.item),			\
100 				   is_signed_type(type), FILTER_OTHER);	\
101 	if (ret)							\
102 		return ret;
103 
104 static int trace_define_common_fields(void)
105 {
106 	int ret;
107 	struct trace_entry ent;
108 
109 	__common_field(unsigned short, type);
110 	__common_field(unsigned char, flags);
111 	__common_field(unsigned char, preempt_count);
112 	__common_field(int, pid);
113 	__common_field(int, lock_depth);
114 
115 	return ret;
116 }
117 
118 void trace_destroy_fields(struct ftrace_event_call *call)
119 {
120 	struct ftrace_event_field *field, *next;
121 	struct list_head *head;
122 
123 	head = trace_get_fields(call);
124 	list_for_each_entry_safe(field, next, head, link) {
125 		list_del(&field->link);
126 		kfree(field->type);
127 		kfree(field->name);
128 		kfree(field);
129 	}
130 }
131 
132 int trace_event_raw_init(struct ftrace_event_call *call)
133 {
134 	int id;
135 
136 	id = register_ftrace_event(&call->event);
137 	if (!id)
138 		return -ENODEV;
139 
140 	return 0;
141 }
142 EXPORT_SYMBOL_GPL(trace_event_raw_init);
143 
144 int ftrace_event_reg(struct ftrace_event_call *call, enum trace_reg type)
145 {
146 	switch (type) {
147 	case TRACE_REG_REGISTER:
148 		return tracepoint_probe_register(call->name,
149 						 call->class->probe,
150 						 call);
151 	case TRACE_REG_UNREGISTER:
152 		tracepoint_probe_unregister(call->name,
153 					    call->class->probe,
154 					    call);
155 		return 0;
156 
157 #ifdef CONFIG_PERF_EVENTS
158 	case TRACE_REG_PERF_REGISTER:
159 		return tracepoint_probe_register(call->name,
160 						 call->class->perf_probe,
161 						 call);
162 	case TRACE_REG_PERF_UNREGISTER:
163 		tracepoint_probe_unregister(call->name,
164 					    call->class->perf_probe,
165 					    call);
166 		return 0;
167 #endif
168 	}
169 	return 0;
170 }
171 EXPORT_SYMBOL_GPL(ftrace_event_reg);
172 
173 void trace_event_enable_cmd_record(bool enable)
174 {
175 	struct ftrace_event_call *call;
176 
177 	mutex_lock(&event_mutex);
178 	list_for_each_entry(call, &ftrace_events, list) {
179 		if (!(call->flags & TRACE_EVENT_FL_ENABLED))
180 			continue;
181 
182 		if (enable) {
183 			tracing_start_cmdline_record();
184 			call->flags |= TRACE_EVENT_FL_RECORDED_CMD;
185 		} else {
186 			tracing_stop_cmdline_record();
187 			call->flags &= ~TRACE_EVENT_FL_RECORDED_CMD;
188 		}
189 	}
190 	mutex_unlock(&event_mutex);
191 }
192 
193 static int ftrace_event_enable_disable(struct ftrace_event_call *call,
194 					int enable)
195 {
196 	int ret = 0;
197 
198 	switch (enable) {
199 	case 0:
200 		if (call->flags & TRACE_EVENT_FL_ENABLED) {
201 			call->flags &= ~TRACE_EVENT_FL_ENABLED;
202 			if (call->flags & TRACE_EVENT_FL_RECORDED_CMD) {
203 				tracing_stop_cmdline_record();
204 				call->flags &= ~TRACE_EVENT_FL_RECORDED_CMD;
205 			}
206 			call->class->reg(call, TRACE_REG_UNREGISTER);
207 		}
208 		break;
209 	case 1:
210 		if (!(call->flags & TRACE_EVENT_FL_ENABLED)) {
211 			if (trace_flags & TRACE_ITER_RECORD_CMD) {
212 				tracing_start_cmdline_record();
213 				call->flags |= TRACE_EVENT_FL_RECORDED_CMD;
214 			}
215 			ret = call->class->reg(call, TRACE_REG_REGISTER);
216 			if (ret) {
217 				tracing_stop_cmdline_record();
218 				pr_info("event trace: Could not enable event "
219 					"%s\n", call->name);
220 				break;
221 			}
222 			call->flags |= TRACE_EVENT_FL_ENABLED;
223 		}
224 		break;
225 	}
226 
227 	return ret;
228 }
229 
230 static void ftrace_clear_events(void)
231 {
232 	struct ftrace_event_call *call;
233 
234 	mutex_lock(&event_mutex);
235 	list_for_each_entry(call, &ftrace_events, list) {
236 		ftrace_event_enable_disable(call, 0);
237 	}
238 	mutex_unlock(&event_mutex);
239 }
240 
241 /*
242  * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events.
243  */
244 static int __ftrace_set_clr_event(const char *match, const char *sub,
245 				  const char *event, int set)
246 {
247 	struct ftrace_event_call *call;
248 	int ret = -EINVAL;
249 
250 	mutex_lock(&event_mutex);
251 	list_for_each_entry(call, &ftrace_events, list) {
252 
253 		if (!call->name || !call->class || !call->class->reg)
254 			continue;
255 
256 		if (match &&
257 		    strcmp(match, call->name) != 0 &&
258 		    strcmp(match, call->class->system) != 0)
259 			continue;
260 
261 		if (sub && strcmp(sub, call->class->system) != 0)
262 			continue;
263 
264 		if (event && strcmp(event, call->name) != 0)
265 			continue;
266 
267 		ftrace_event_enable_disable(call, set);
268 
269 		ret = 0;
270 	}
271 	mutex_unlock(&event_mutex);
272 
273 	return ret;
274 }
275 
276 static int ftrace_set_clr_event(char *buf, int set)
277 {
278 	char *event = NULL, *sub = NULL, *match;
279 
280 	/*
281 	 * The buf format can be <subsystem>:<event-name>
282 	 *  *:<event-name> means any event by that name.
283 	 *  :<event-name> is the same.
284 	 *
285 	 *  <subsystem>:* means all events in that subsystem
286 	 *  <subsystem>: means the same.
287 	 *
288 	 *  <name> (no ':') means all events in a subsystem with
289 	 *  the name <name> or any event that matches <name>
290 	 */
291 
292 	match = strsep(&buf, ":");
293 	if (buf) {
294 		sub = match;
295 		event = buf;
296 		match = NULL;
297 
298 		if (!strlen(sub) || strcmp(sub, "*") == 0)
299 			sub = NULL;
300 		if (!strlen(event) || strcmp(event, "*") == 0)
301 			event = NULL;
302 	}
303 
304 	return __ftrace_set_clr_event(match, sub, event, set);
305 }
306 
307 /**
308  * trace_set_clr_event - enable or disable an event
309  * @system: system name to match (NULL for any system)
310  * @event: event name to match (NULL for all events, within system)
311  * @set: 1 to enable, 0 to disable
312  *
313  * This is a way for other parts of the kernel to enable or disable
314  * event recording.
315  *
316  * Returns 0 on success, -EINVAL if the parameters do not match any
317  * registered events.
318  */
319 int trace_set_clr_event(const char *system, const char *event, int set)
320 {
321 	return __ftrace_set_clr_event(NULL, system, event, set);
322 }
323 
324 /* 128 should be much more than enough */
325 #define EVENT_BUF_SIZE		127
326 
327 static ssize_t
328 ftrace_event_write(struct file *file, const char __user *ubuf,
329 		   size_t cnt, loff_t *ppos)
330 {
331 	struct trace_parser parser;
332 	ssize_t read, ret;
333 
334 	if (!cnt)
335 		return 0;
336 
337 	ret = tracing_update_buffers();
338 	if (ret < 0)
339 		return ret;
340 
341 	if (trace_parser_get_init(&parser, EVENT_BUF_SIZE + 1))
342 		return -ENOMEM;
343 
344 	read = trace_get_user(&parser, ubuf, cnt, ppos);
345 
346 	if (read >= 0 && trace_parser_loaded((&parser))) {
347 		int set = 1;
348 
349 		if (*parser.buffer == '!')
350 			set = 0;
351 
352 		parser.buffer[parser.idx] = 0;
353 
354 		ret = ftrace_set_clr_event(parser.buffer + !set, set);
355 		if (ret)
356 			goto out_put;
357 	}
358 
359 	ret = read;
360 
361  out_put:
362 	trace_parser_put(&parser);
363 
364 	return ret;
365 }
366 
367 static void *
368 t_next(struct seq_file *m, void *v, loff_t *pos)
369 {
370 	struct ftrace_event_call *call = v;
371 
372 	(*pos)++;
373 
374 	list_for_each_entry_continue(call, &ftrace_events, list) {
375 		/*
376 		 * The ftrace subsystem is for showing formats only.
377 		 * They can not be enabled or disabled via the event files.
378 		 */
379 		if (call->class && call->class->reg)
380 			return call;
381 	}
382 
383 	return NULL;
384 }
385 
386 static void *t_start(struct seq_file *m, loff_t *pos)
387 {
388 	struct ftrace_event_call *call;
389 	loff_t l;
390 
391 	mutex_lock(&event_mutex);
392 
393 	call = list_entry(&ftrace_events, struct ftrace_event_call, list);
394 	for (l = 0; l <= *pos; ) {
395 		call = t_next(m, call, &l);
396 		if (!call)
397 			break;
398 	}
399 	return call;
400 }
401 
402 static void *
403 s_next(struct seq_file *m, void *v, loff_t *pos)
404 {
405 	struct ftrace_event_call *call = v;
406 
407 	(*pos)++;
408 
409 	list_for_each_entry_continue(call, &ftrace_events, list) {
410 		if (call->flags & TRACE_EVENT_FL_ENABLED)
411 			return call;
412 	}
413 
414 	return NULL;
415 }
416 
417 static void *s_start(struct seq_file *m, loff_t *pos)
418 {
419 	struct ftrace_event_call *call;
420 	loff_t l;
421 
422 	mutex_lock(&event_mutex);
423 
424 	call = list_entry(&ftrace_events, struct ftrace_event_call, list);
425 	for (l = 0; l <= *pos; ) {
426 		call = s_next(m, call, &l);
427 		if (!call)
428 			break;
429 	}
430 	return call;
431 }
432 
433 static int t_show(struct seq_file *m, void *v)
434 {
435 	struct ftrace_event_call *call = v;
436 
437 	if (strcmp(call->class->system, TRACE_SYSTEM) != 0)
438 		seq_printf(m, "%s:", call->class->system);
439 	seq_printf(m, "%s\n", call->name);
440 
441 	return 0;
442 }
443 
444 static void t_stop(struct seq_file *m, void *p)
445 {
446 	mutex_unlock(&event_mutex);
447 }
448 
449 static int
450 ftrace_event_seq_open(struct inode *inode, struct file *file)
451 {
452 	const struct seq_operations *seq_ops;
453 
454 	if ((file->f_mode & FMODE_WRITE) &&
455 	    (file->f_flags & O_TRUNC))
456 		ftrace_clear_events();
457 
458 	seq_ops = inode->i_private;
459 	return seq_open(file, seq_ops);
460 }
461 
462 static ssize_t
463 event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
464 		  loff_t *ppos)
465 {
466 	struct ftrace_event_call *call = filp->private_data;
467 	char *buf;
468 
469 	if (call->flags & TRACE_EVENT_FL_ENABLED)
470 		buf = "1\n";
471 	else
472 		buf = "0\n";
473 
474 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
475 }
476 
477 static ssize_t
478 event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
479 		   loff_t *ppos)
480 {
481 	struct ftrace_event_call *call = filp->private_data;
482 	char buf[64];
483 	unsigned long val;
484 	int ret;
485 
486 	if (cnt >= sizeof(buf))
487 		return -EINVAL;
488 
489 	if (copy_from_user(&buf, ubuf, cnt))
490 		return -EFAULT;
491 
492 	buf[cnt] = 0;
493 
494 	ret = strict_strtoul(buf, 10, &val);
495 	if (ret < 0)
496 		return ret;
497 
498 	ret = tracing_update_buffers();
499 	if (ret < 0)
500 		return ret;
501 
502 	switch (val) {
503 	case 0:
504 	case 1:
505 		mutex_lock(&event_mutex);
506 		ret = ftrace_event_enable_disable(call, val);
507 		mutex_unlock(&event_mutex);
508 		break;
509 
510 	default:
511 		return -EINVAL;
512 	}
513 
514 	*ppos += cnt;
515 
516 	return ret ? ret : cnt;
517 }
518 
519 static ssize_t
520 system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
521 		   loff_t *ppos)
522 {
523 	const char set_to_char[4] = { '?', '0', '1', 'X' };
524 	const char *system = filp->private_data;
525 	struct ftrace_event_call *call;
526 	char buf[2];
527 	int set = 0;
528 	int ret;
529 
530 	mutex_lock(&event_mutex);
531 	list_for_each_entry(call, &ftrace_events, list) {
532 		if (!call->name || !call->class || !call->class->reg)
533 			continue;
534 
535 		if (system && strcmp(call->class->system, system) != 0)
536 			continue;
537 
538 		/*
539 		 * We need to find out if all the events are set
540 		 * or if all events or cleared, or if we have
541 		 * a mixture.
542 		 */
543 		set |= (1 << !!(call->flags & TRACE_EVENT_FL_ENABLED));
544 
545 		/*
546 		 * If we have a mixture, no need to look further.
547 		 */
548 		if (set == 3)
549 			break;
550 	}
551 	mutex_unlock(&event_mutex);
552 
553 	buf[0] = set_to_char[set];
554 	buf[1] = '\n';
555 
556 	ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
557 
558 	return ret;
559 }
560 
561 static ssize_t
562 system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
563 		    loff_t *ppos)
564 {
565 	const char *system = filp->private_data;
566 	unsigned long val;
567 	char buf[64];
568 	ssize_t ret;
569 
570 	if (cnt >= sizeof(buf))
571 		return -EINVAL;
572 
573 	if (copy_from_user(&buf, ubuf, cnt))
574 		return -EFAULT;
575 
576 	buf[cnt] = 0;
577 
578 	ret = strict_strtoul(buf, 10, &val);
579 	if (ret < 0)
580 		return ret;
581 
582 	ret = tracing_update_buffers();
583 	if (ret < 0)
584 		return ret;
585 
586 	if (val != 0 && val != 1)
587 		return -EINVAL;
588 
589 	ret = __ftrace_set_clr_event(NULL, system, NULL, val);
590 	if (ret)
591 		goto out;
592 
593 	ret = cnt;
594 
595 out:
596 	*ppos += cnt;
597 
598 	return ret;
599 }
600 
601 enum {
602 	FORMAT_HEADER		= 1,
603 	FORMAT_PRINTFMT		= 2,
604 };
605 
606 static void *f_next(struct seq_file *m, void *v, loff_t *pos)
607 {
608 	struct ftrace_event_call *call = m->private;
609 	struct ftrace_event_field *field;
610 	struct list_head *head;
611 
612 	(*pos)++;
613 
614 	switch ((unsigned long)v) {
615 	case FORMAT_HEADER:
616 		head = &ftrace_common_fields;
617 
618 		if (unlikely(list_empty(head)))
619 			return NULL;
620 
621 		field = list_entry(head->prev, struct ftrace_event_field, link);
622 		return field;
623 
624 	case FORMAT_PRINTFMT:
625 		/* all done */
626 		return NULL;
627 	}
628 
629 	head = trace_get_fields(call);
630 
631 	/*
632 	 * To separate common fields from event fields, the
633 	 * LSB is set on the first event field. Clear it in case.
634 	 */
635 	v = (void *)((unsigned long)v & ~1L);
636 
637 	field = v;
638 	/*
639 	 * If this is a common field, and at the end of the list, then
640 	 * continue with main list.
641 	 */
642 	if (field->link.prev == &ftrace_common_fields) {
643 		if (unlikely(list_empty(head)))
644 			return NULL;
645 		field = list_entry(head->prev, struct ftrace_event_field, link);
646 		/* Set the LSB to notify f_show to print an extra newline */
647 		field = (struct ftrace_event_field *)
648 			((unsigned long)field | 1);
649 		return field;
650 	}
651 
652 	/* If we are done tell f_show to print the format */
653 	if (field->link.prev == head)
654 		return (void *)FORMAT_PRINTFMT;
655 
656 	field = list_entry(field->link.prev, struct ftrace_event_field, link);
657 
658 	return field;
659 }
660 
661 static void *f_start(struct seq_file *m, loff_t *pos)
662 {
663 	loff_t l = 0;
664 	void *p;
665 
666 	/* Start by showing the header */
667 	if (!*pos)
668 		return (void *)FORMAT_HEADER;
669 
670 	p = (void *)FORMAT_HEADER;
671 	do {
672 		p = f_next(m, p, &l);
673 	} while (p && l < *pos);
674 
675 	return p;
676 }
677 
678 static int f_show(struct seq_file *m, void *v)
679 {
680 	struct ftrace_event_call *call = m->private;
681 	struct ftrace_event_field *field;
682 	const char *array_descriptor;
683 
684 	switch ((unsigned long)v) {
685 	case FORMAT_HEADER:
686 		seq_printf(m, "name: %s\n", call->name);
687 		seq_printf(m, "ID: %d\n", call->event.type);
688 		seq_printf(m, "format:\n");
689 		return 0;
690 
691 	case FORMAT_PRINTFMT:
692 		seq_printf(m, "\nprint fmt: %s\n",
693 			   call->print_fmt);
694 		return 0;
695 	}
696 
697 	/*
698 	 * To separate common fields from event fields, the
699 	 * LSB is set on the first event field. Clear it and
700 	 * print a newline if it is set.
701 	 */
702 	if ((unsigned long)v & 1) {
703 		seq_putc(m, '\n');
704 		v = (void *)((unsigned long)v & ~1L);
705 	}
706 
707 	field = v;
708 
709 	/*
710 	 * Smartly shows the array type(except dynamic array).
711 	 * Normal:
712 	 *	field:TYPE VAR
713 	 * If TYPE := TYPE[LEN], it is shown:
714 	 *	field:TYPE VAR[LEN]
715 	 */
716 	array_descriptor = strchr(field->type, '[');
717 
718 	if (!strncmp(field->type, "__data_loc", 10))
719 		array_descriptor = NULL;
720 
721 	if (!array_descriptor)
722 		seq_printf(m, "\tfield:%s %s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
723 			   field->type, field->name, field->offset,
724 			   field->size, !!field->is_signed);
725 	else
726 		seq_printf(m, "\tfield:%.*s %s%s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
727 			   (int)(array_descriptor - field->type),
728 			   field->type, field->name,
729 			   array_descriptor, field->offset,
730 			   field->size, !!field->is_signed);
731 
732 	return 0;
733 }
734 
735 static void f_stop(struct seq_file *m, void *p)
736 {
737 }
738 
739 static const struct seq_operations trace_format_seq_ops = {
740 	.start		= f_start,
741 	.next		= f_next,
742 	.stop		= f_stop,
743 	.show		= f_show,
744 };
745 
746 static int trace_format_open(struct inode *inode, struct file *file)
747 {
748 	struct ftrace_event_call *call = inode->i_private;
749 	struct seq_file *m;
750 	int ret;
751 
752 	ret = seq_open(file, &trace_format_seq_ops);
753 	if (ret < 0)
754 		return ret;
755 
756 	m = file->private_data;
757 	m->private = call;
758 
759 	return 0;
760 }
761 
762 static ssize_t
763 event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
764 {
765 	struct ftrace_event_call *call = filp->private_data;
766 	struct trace_seq *s;
767 	int r;
768 
769 	if (*ppos)
770 		return 0;
771 
772 	s = kmalloc(sizeof(*s), GFP_KERNEL);
773 	if (!s)
774 		return -ENOMEM;
775 
776 	trace_seq_init(s);
777 	trace_seq_printf(s, "%d\n", call->event.type);
778 
779 	r = simple_read_from_buffer(ubuf, cnt, ppos,
780 				    s->buffer, s->len);
781 	kfree(s);
782 	return r;
783 }
784 
785 static ssize_t
786 event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
787 		  loff_t *ppos)
788 {
789 	struct ftrace_event_call *call = filp->private_data;
790 	struct trace_seq *s;
791 	int r;
792 
793 	if (*ppos)
794 		return 0;
795 
796 	s = kmalloc(sizeof(*s), GFP_KERNEL);
797 	if (!s)
798 		return -ENOMEM;
799 
800 	trace_seq_init(s);
801 
802 	print_event_filter(call, s);
803 	r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
804 
805 	kfree(s);
806 
807 	return r;
808 }
809 
810 static ssize_t
811 event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
812 		   loff_t *ppos)
813 {
814 	struct ftrace_event_call *call = filp->private_data;
815 	char *buf;
816 	int err;
817 
818 	if (cnt >= PAGE_SIZE)
819 		return -EINVAL;
820 
821 	buf = (char *)__get_free_page(GFP_TEMPORARY);
822 	if (!buf)
823 		return -ENOMEM;
824 
825 	if (copy_from_user(buf, ubuf, cnt)) {
826 		free_page((unsigned long) buf);
827 		return -EFAULT;
828 	}
829 	buf[cnt] = '\0';
830 
831 	err = apply_event_filter(call, buf);
832 	free_page((unsigned long) buf);
833 	if (err < 0)
834 		return err;
835 
836 	*ppos += cnt;
837 
838 	return cnt;
839 }
840 
841 static ssize_t
842 subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
843 		      loff_t *ppos)
844 {
845 	struct event_subsystem *system = filp->private_data;
846 	struct trace_seq *s;
847 	int r;
848 
849 	if (*ppos)
850 		return 0;
851 
852 	s = kmalloc(sizeof(*s), GFP_KERNEL);
853 	if (!s)
854 		return -ENOMEM;
855 
856 	trace_seq_init(s);
857 
858 	print_subsystem_event_filter(system, s);
859 	r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
860 
861 	kfree(s);
862 
863 	return r;
864 }
865 
866 static ssize_t
867 subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
868 		       loff_t *ppos)
869 {
870 	struct event_subsystem *system = filp->private_data;
871 	char *buf;
872 	int err;
873 
874 	if (cnt >= PAGE_SIZE)
875 		return -EINVAL;
876 
877 	buf = (char *)__get_free_page(GFP_TEMPORARY);
878 	if (!buf)
879 		return -ENOMEM;
880 
881 	if (copy_from_user(buf, ubuf, cnt)) {
882 		free_page((unsigned long) buf);
883 		return -EFAULT;
884 	}
885 	buf[cnt] = '\0';
886 
887 	err = apply_subsystem_event_filter(system, buf);
888 	free_page((unsigned long) buf);
889 	if (err < 0)
890 		return err;
891 
892 	*ppos += cnt;
893 
894 	return cnt;
895 }
896 
897 static ssize_t
898 show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
899 {
900 	int (*func)(struct trace_seq *s) = filp->private_data;
901 	struct trace_seq *s;
902 	int r;
903 
904 	if (*ppos)
905 		return 0;
906 
907 	s = kmalloc(sizeof(*s), GFP_KERNEL);
908 	if (!s)
909 		return -ENOMEM;
910 
911 	trace_seq_init(s);
912 
913 	func(s);
914 	r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
915 
916 	kfree(s);
917 
918 	return r;
919 }
920 
921 static const struct seq_operations show_event_seq_ops = {
922 	.start = t_start,
923 	.next = t_next,
924 	.show = t_show,
925 	.stop = t_stop,
926 };
927 
928 static const struct seq_operations show_set_event_seq_ops = {
929 	.start = s_start,
930 	.next = s_next,
931 	.show = t_show,
932 	.stop = t_stop,
933 };
934 
935 static const struct file_operations ftrace_avail_fops = {
936 	.open = ftrace_event_seq_open,
937 	.read = seq_read,
938 	.llseek = seq_lseek,
939 	.release = seq_release,
940 };
941 
942 static const struct file_operations ftrace_set_event_fops = {
943 	.open = ftrace_event_seq_open,
944 	.read = seq_read,
945 	.write = ftrace_event_write,
946 	.llseek = seq_lseek,
947 	.release = seq_release,
948 };
949 
950 static const struct file_operations ftrace_enable_fops = {
951 	.open = tracing_open_generic,
952 	.read = event_enable_read,
953 	.write = event_enable_write,
954 };
955 
956 static const struct file_operations ftrace_event_format_fops = {
957 	.open = trace_format_open,
958 	.read = seq_read,
959 	.llseek = seq_lseek,
960 	.release = seq_release,
961 };
962 
963 static const struct file_operations ftrace_event_id_fops = {
964 	.open = tracing_open_generic,
965 	.read = event_id_read,
966 };
967 
968 static const struct file_operations ftrace_event_filter_fops = {
969 	.open = tracing_open_generic,
970 	.read = event_filter_read,
971 	.write = event_filter_write,
972 };
973 
974 static const struct file_operations ftrace_subsystem_filter_fops = {
975 	.open = tracing_open_generic,
976 	.read = subsystem_filter_read,
977 	.write = subsystem_filter_write,
978 };
979 
980 static const struct file_operations ftrace_system_enable_fops = {
981 	.open = tracing_open_generic,
982 	.read = system_enable_read,
983 	.write = system_enable_write,
984 };
985 
986 static const struct file_operations ftrace_show_header_fops = {
987 	.open = tracing_open_generic,
988 	.read = show_header,
989 };
990 
991 static struct dentry *event_trace_events_dir(void)
992 {
993 	static struct dentry *d_tracer;
994 	static struct dentry *d_events;
995 
996 	if (d_events)
997 		return d_events;
998 
999 	d_tracer = tracing_init_dentry();
1000 	if (!d_tracer)
1001 		return NULL;
1002 
1003 	d_events = debugfs_create_dir("events", d_tracer);
1004 	if (!d_events)
1005 		pr_warning("Could not create debugfs "
1006 			   "'events' directory\n");
1007 
1008 	return d_events;
1009 }
1010 
1011 static LIST_HEAD(event_subsystems);
1012 
1013 static struct dentry *
1014 event_subsystem_dir(const char *name, struct dentry *d_events)
1015 {
1016 	struct event_subsystem *system;
1017 	struct dentry *entry;
1018 
1019 	/* First see if we did not already create this dir */
1020 	list_for_each_entry(system, &event_subsystems, list) {
1021 		if (strcmp(system->name, name) == 0) {
1022 			system->nr_events++;
1023 			return system->entry;
1024 		}
1025 	}
1026 
1027 	/* need to create new entry */
1028 	system = kmalloc(sizeof(*system), GFP_KERNEL);
1029 	if (!system) {
1030 		pr_warning("No memory to create event subsystem %s\n",
1031 			   name);
1032 		return d_events;
1033 	}
1034 
1035 	system->entry = debugfs_create_dir(name, d_events);
1036 	if (!system->entry) {
1037 		pr_warning("Could not create event subsystem %s\n",
1038 			   name);
1039 		kfree(system);
1040 		return d_events;
1041 	}
1042 
1043 	system->nr_events = 1;
1044 	system->name = kstrdup(name, GFP_KERNEL);
1045 	if (!system->name) {
1046 		debugfs_remove(system->entry);
1047 		kfree(system);
1048 		return d_events;
1049 	}
1050 
1051 	list_add(&system->list, &event_subsystems);
1052 
1053 	system->filter = NULL;
1054 
1055 	system->filter = kzalloc(sizeof(struct event_filter), GFP_KERNEL);
1056 	if (!system->filter) {
1057 		pr_warning("Could not allocate filter for subsystem "
1058 			   "'%s'\n", name);
1059 		return system->entry;
1060 	}
1061 
1062 	entry = debugfs_create_file("filter", 0644, system->entry, system,
1063 				    &ftrace_subsystem_filter_fops);
1064 	if (!entry) {
1065 		kfree(system->filter);
1066 		system->filter = NULL;
1067 		pr_warning("Could not create debugfs "
1068 			   "'%s/filter' entry\n", name);
1069 	}
1070 
1071 	trace_create_file("enable", 0644, system->entry,
1072 			  (void *)system->name,
1073 			  &ftrace_system_enable_fops);
1074 
1075 	return system->entry;
1076 }
1077 
1078 static int
1079 event_create_dir(struct ftrace_event_call *call, struct dentry *d_events,
1080 		 const struct file_operations *id,
1081 		 const struct file_operations *enable,
1082 		 const struct file_operations *filter,
1083 		 const struct file_operations *format)
1084 {
1085 	struct list_head *head;
1086 	int ret;
1087 
1088 	/*
1089 	 * If the trace point header did not define TRACE_SYSTEM
1090 	 * then the system would be called "TRACE_SYSTEM".
1091 	 */
1092 	if (strcmp(call->class->system, TRACE_SYSTEM) != 0)
1093 		d_events = event_subsystem_dir(call->class->system, d_events);
1094 
1095 	call->dir = debugfs_create_dir(call->name, d_events);
1096 	if (!call->dir) {
1097 		pr_warning("Could not create debugfs "
1098 			   "'%s' directory\n", call->name);
1099 		return -1;
1100 	}
1101 
1102 	if (call->class->reg)
1103 		trace_create_file("enable", 0644, call->dir, call,
1104 				  enable);
1105 
1106 #ifdef CONFIG_PERF_EVENTS
1107 	if (call->event.type && call->class->reg)
1108 		trace_create_file("id", 0444, call->dir, call,
1109 		 		  id);
1110 #endif
1111 
1112 	/*
1113 	 * Other events may have the same class. Only update
1114 	 * the fields if they are not already defined.
1115 	 */
1116 	head = trace_get_fields(call);
1117 	if (list_empty(head)) {
1118 		ret = call->class->define_fields(call);
1119 		if (ret < 0) {
1120 			pr_warning("Could not initialize trace point"
1121 				   " events/%s\n", call->name);
1122 			return ret;
1123 		}
1124 	}
1125 	trace_create_file("filter", 0644, call->dir, call,
1126 			  filter);
1127 
1128 	trace_create_file("format", 0444, call->dir, call,
1129 			  format);
1130 
1131 	return 0;
1132 }
1133 
1134 static int
1135 __trace_add_event_call(struct ftrace_event_call *call, struct module *mod,
1136 		       const struct file_operations *id,
1137 		       const struct file_operations *enable,
1138 		       const struct file_operations *filter,
1139 		       const struct file_operations *format)
1140 {
1141 	struct dentry *d_events;
1142 	int ret;
1143 
1144 	/* The linker may leave blanks */
1145 	if (!call->name)
1146 		return -EINVAL;
1147 
1148 	if (call->class->raw_init) {
1149 		ret = call->class->raw_init(call);
1150 		if (ret < 0) {
1151 			if (ret != -ENOSYS)
1152 				pr_warning("Could not initialize trace events/%s\n",
1153 					   call->name);
1154 			return ret;
1155 		}
1156 	}
1157 
1158 	d_events = event_trace_events_dir();
1159 	if (!d_events)
1160 		return -ENOENT;
1161 
1162 	ret = event_create_dir(call, d_events, id, enable, filter, format);
1163 	if (!ret)
1164 		list_add(&call->list, &ftrace_events);
1165 	call->mod = mod;
1166 
1167 	return ret;
1168 }
1169 
1170 /* Add an additional event_call dynamically */
1171 int trace_add_event_call(struct ftrace_event_call *call)
1172 {
1173 	int ret;
1174 	mutex_lock(&event_mutex);
1175 	ret = __trace_add_event_call(call, NULL, &ftrace_event_id_fops,
1176 				     &ftrace_enable_fops,
1177 				     &ftrace_event_filter_fops,
1178 				     &ftrace_event_format_fops);
1179 	mutex_unlock(&event_mutex);
1180 	return ret;
1181 }
1182 
1183 static void remove_subsystem_dir(const char *name)
1184 {
1185 	struct event_subsystem *system;
1186 
1187 	if (strcmp(name, TRACE_SYSTEM) == 0)
1188 		return;
1189 
1190 	list_for_each_entry(system, &event_subsystems, list) {
1191 		if (strcmp(system->name, name) == 0) {
1192 			if (!--system->nr_events) {
1193 				struct event_filter *filter = system->filter;
1194 
1195 				debugfs_remove_recursive(system->entry);
1196 				list_del(&system->list);
1197 				if (filter) {
1198 					kfree(filter->filter_string);
1199 					kfree(filter);
1200 				}
1201 				kfree(system->name);
1202 				kfree(system);
1203 			}
1204 			break;
1205 		}
1206 	}
1207 }
1208 
1209 /*
1210  * Must be called under locking both of event_mutex and trace_event_mutex.
1211  */
1212 static void __trace_remove_event_call(struct ftrace_event_call *call)
1213 {
1214 	ftrace_event_enable_disable(call, 0);
1215 	if (call->event.funcs)
1216 		__unregister_ftrace_event(&call->event);
1217 	debugfs_remove_recursive(call->dir);
1218 	list_del(&call->list);
1219 	trace_destroy_fields(call);
1220 	destroy_preds(call);
1221 	remove_subsystem_dir(call->class->system);
1222 }
1223 
1224 /* Remove an event_call */
1225 void trace_remove_event_call(struct ftrace_event_call *call)
1226 {
1227 	mutex_lock(&event_mutex);
1228 	down_write(&trace_event_mutex);
1229 	__trace_remove_event_call(call);
1230 	up_write(&trace_event_mutex);
1231 	mutex_unlock(&event_mutex);
1232 }
1233 
1234 #define for_each_event(event, start, end)			\
1235 	for (event = start;					\
1236 	     (unsigned long)event < (unsigned long)end;		\
1237 	     event++)
1238 
1239 #ifdef CONFIG_MODULES
1240 
1241 static LIST_HEAD(ftrace_module_file_list);
1242 
1243 /*
1244  * Modules must own their file_operations to keep up with
1245  * reference counting.
1246  */
1247 struct ftrace_module_file_ops {
1248 	struct list_head		list;
1249 	struct module			*mod;
1250 	struct file_operations		id;
1251 	struct file_operations		enable;
1252 	struct file_operations		format;
1253 	struct file_operations		filter;
1254 };
1255 
1256 static struct ftrace_module_file_ops *
1257 trace_create_file_ops(struct module *mod)
1258 {
1259 	struct ftrace_module_file_ops *file_ops;
1260 
1261 	/*
1262 	 * This is a bit of a PITA. To allow for correct reference
1263 	 * counting, modules must "own" their file_operations.
1264 	 * To do this, we allocate the file operations that will be
1265 	 * used in the event directory.
1266 	 */
1267 
1268 	file_ops = kmalloc(sizeof(*file_ops), GFP_KERNEL);
1269 	if (!file_ops)
1270 		return NULL;
1271 
1272 	file_ops->mod = mod;
1273 
1274 	file_ops->id = ftrace_event_id_fops;
1275 	file_ops->id.owner = mod;
1276 
1277 	file_ops->enable = ftrace_enable_fops;
1278 	file_ops->enable.owner = mod;
1279 
1280 	file_ops->filter = ftrace_event_filter_fops;
1281 	file_ops->filter.owner = mod;
1282 
1283 	file_ops->format = ftrace_event_format_fops;
1284 	file_ops->format.owner = mod;
1285 
1286 	list_add(&file_ops->list, &ftrace_module_file_list);
1287 
1288 	return file_ops;
1289 }
1290 
1291 static void trace_module_add_events(struct module *mod)
1292 {
1293 	struct ftrace_module_file_ops *file_ops = NULL;
1294 	struct ftrace_event_call *call, *start, *end;
1295 
1296 	start = mod->trace_events;
1297 	end = mod->trace_events + mod->num_trace_events;
1298 
1299 	if (start == end)
1300 		return;
1301 
1302 	file_ops = trace_create_file_ops(mod);
1303 	if (!file_ops)
1304 		return;
1305 
1306 	for_each_event(call, start, end) {
1307 		__trace_add_event_call(call, mod,
1308 				       &file_ops->id, &file_ops->enable,
1309 				       &file_ops->filter, &file_ops->format);
1310 	}
1311 }
1312 
1313 static void trace_module_remove_events(struct module *mod)
1314 {
1315 	struct ftrace_module_file_ops *file_ops;
1316 	struct ftrace_event_call *call, *p;
1317 	bool found = false;
1318 
1319 	down_write(&trace_event_mutex);
1320 	list_for_each_entry_safe(call, p, &ftrace_events, list) {
1321 		if (call->mod == mod) {
1322 			found = true;
1323 			__trace_remove_event_call(call);
1324 		}
1325 	}
1326 
1327 	/* Now free the file_operations */
1328 	list_for_each_entry(file_ops, &ftrace_module_file_list, list) {
1329 		if (file_ops->mod == mod)
1330 			break;
1331 	}
1332 	if (&file_ops->list != &ftrace_module_file_list) {
1333 		list_del(&file_ops->list);
1334 		kfree(file_ops);
1335 	}
1336 
1337 	/*
1338 	 * It is safest to reset the ring buffer if the module being unloaded
1339 	 * registered any events.
1340 	 */
1341 	if (found)
1342 		tracing_reset_current_online_cpus();
1343 	up_write(&trace_event_mutex);
1344 }
1345 
1346 static int trace_module_notify(struct notifier_block *self,
1347 			       unsigned long val, void *data)
1348 {
1349 	struct module *mod = data;
1350 
1351 	mutex_lock(&event_mutex);
1352 	switch (val) {
1353 	case MODULE_STATE_COMING:
1354 		trace_module_add_events(mod);
1355 		break;
1356 	case MODULE_STATE_GOING:
1357 		trace_module_remove_events(mod);
1358 		break;
1359 	}
1360 	mutex_unlock(&event_mutex);
1361 
1362 	return 0;
1363 }
1364 #else
1365 static int trace_module_notify(struct notifier_block *self,
1366 			       unsigned long val, void *data)
1367 {
1368 	return 0;
1369 }
1370 #endif /* CONFIG_MODULES */
1371 
1372 static struct notifier_block trace_module_nb = {
1373 	.notifier_call = trace_module_notify,
1374 	.priority = 0,
1375 };
1376 
1377 extern struct ftrace_event_call __start_ftrace_events[];
1378 extern struct ftrace_event_call __stop_ftrace_events[];
1379 
1380 static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata;
1381 
1382 static __init int setup_trace_event(char *str)
1383 {
1384 	strlcpy(bootup_event_buf, str, COMMAND_LINE_SIZE);
1385 	ring_buffer_expanded = 1;
1386 	tracing_selftest_disabled = 1;
1387 
1388 	return 1;
1389 }
1390 __setup("trace_event=", setup_trace_event);
1391 
1392 static __init int event_trace_init(void)
1393 {
1394 	struct ftrace_event_call *call;
1395 	struct dentry *d_tracer;
1396 	struct dentry *entry;
1397 	struct dentry *d_events;
1398 	int ret;
1399 	char *buf = bootup_event_buf;
1400 	char *token;
1401 
1402 	d_tracer = tracing_init_dentry();
1403 	if (!d_tracer)
1404 		return 0;
1405 
1406 	entry = debugfs_create_file("available_events", 0444, d_tracer,
1407 				    (void *)&show_event_seq_ops,
1408 				    &ftrace_avail_fops);
1409 	if (!entry)
1410 		pr_warning("Could not create debugfs "
1411 			   "'available_events' entry\n");
1412 
1413 	entry = debugfs_create_file("set_event", 0644, d_tracer,
1414 				    (void *)&show_set_event_seq_ops,
1415 				    &ftrace_set_event_fops);
1416 	if (!entry)
1417 		pr_warning("Could not create debugfs "
1418 			   "'set_event' entry\n");
1419 
1420 	d_events = event_trace_events_dir();
1421 	if (!d_events)
1422 		return 0;
1423 
1424 	/* ring buffer internal formats */
1425 	trace_create_file("header_page", 0444, d_events,
1426 			  ring_buffer_print_page_header,
1427 			  &ftrace_show_header_fops);
1428 
1429 	trace_create_file("header_event", 0444, d_events,
1430 			  ring_buffer_print_entry_header,
1431 			  &ftrace_show_header_fops);
1432 
1433 	trace_create_file("enable", 0644, d_events,
1434 			  NULL, &ftrace_system_enable_fops);
1435 
1436 	if (trace_define_common_fields())
1437 		pr_warning("tracing: Failed to allocate common fields");
1438 
1439 	for_each_event(call, __start_ftrace_events, __stop_ftrace_events) {
1440 		__trace_add_event_call(call, NULL, &ftrace_event_id_fops,
1441 				       &ftrace_enable_fops,
1442 				       &ftrace_event_filter_fops,
1443 				       &ftrace_event_format_fops);
1444 	}
1445 
1446 	while (true) {
1447 		token = strsep(&buf, ",");
1448 
1449 		if (!token)
1450 			break;
1451 		if (!*token)
1452 			continue;
1453 
1454 		ret = ftrace_set_clr_event(token, 1);
1455 		if (ret)
1456 			pr_warning("Failed to enable trace event: %s\n", token);
1457 	}
1458 
1459 	ret = register_module_notifier(&trace_module_nb);
1460 	if (ret)
1461 		pr_warning("Failed to register trace events module notifier\n");
1462 
1463 	return 0;
1464 }
1465 fs_initcall(event_trace_init);
1466 
1467 #ifdef CONFIG_FTRACE_STARTUP_TEST
1468 
1469 static DEFINE_SPINLOCK(test_spinlock);
1470 static DEFINE_SPINLOCK(test_spinlock_irq);
1471 static DEFINE_MUTEX(test_mutex);
1472 
1473 static __init void test_work(struct work_struct *dummy)
1474 {
1475 	spin_lock(&test_spinlock);
1476 	spin_lock_irq(&test_spinlock_irq);
1477 	udelay(1);
1478 	spin_unlock_irq(&test_spinlock_irq);
1479 	spin_unlock(&test_spinlock);
1480 
1481 	mutex_lock(&test_mutex);
1482 	msleep(1);
1483 	mutex_unlock(&test_mutex);
1484 }
1485 
1486 static __init int event_test_thread(void *unused)
1487 {
1488 	void *test_malloc;
1489 
1490 	test_malloc = kmalloc(1234, GFP_KERNEL);
1491 	if (!test_malloc)
1492 		pr_info("failed to kmalloc\n");
1493 
1494 	schedule_on_each_cpu(test_work);
1495 
1496 	kfree(test_malloc);
1497 
1498 	set_current_state(TASK_INTERRUPTIBLE);
1499 	while (!kthread_should_stop())
1500 		schedule();
1501 
1502 	return 0;
1503 }
1504 
1505 /*
1506  * Do various things that may trigger events.
1507  */
1508 static __init void event_test_stuff(void)
1509 {
1510 	struct task_struct *test_thread;
1511 
1512 	test_thread = kthread_run(event_test_thread, NULL, "test-events");
1513 	msleep(1);
1514 	kthread_stop(test_thread);
1515 }
1516 
1517 /*
1518  * For every trace event defined, we will test each trace point separately,
1519  * and then by groups, and finally all trace points.
1520  */
1521 static __init void event_trace_self_tests(void)
1522 {
1523 	struct ftrace_event_call *call;
1524 	struct event_subsystem *system;
1525 	int ret;
1526 
1527 	pr_info("Running tests on trace events:\n");
1528 
1529 	list_for_each_entry(call, &ftrace_events, list) {
1530 
1531 		/* Only test those that have a probe */
1532 		if (!call->class || !call->class->probe)
1533 			continue;
1534 
1535 /*
1536  * Testing syscall events here is pretty useless, but
1537  * we still do it if configured. But this is time consuming.
1538  * What we really need is a user thread to perform the
1539  * syscalls as we test.
1540  */
1541 #ifndef CONFIG_EVENT_TRACE_TEST_SYSCALLS
1542 		if (call->class->system &&
1543 		    strcmp(call->class->system, "syscalls") == 0)
1544 			continue;
1545 #endif
1546 
1547 		pr_info("Testing event %s: ", call->name);
1548 
1549 		/*
1550 		 * If an event is already enabled, someone is using
1551 		 * it and the self test should not be on.
1552 		 */
1553 		if (call->flags & TRACE_EVENT_FL_ENABLED) {
1554 			pr_warning("Enabled event during self test!\n");
1555 			WARN_ON_ONCE(1);
1556 			continue;
1557 		}
1558 
1559 		ftrace_event_enable_disable(call, 1);
1560 		event_test_stuff();
1561 		ftrace_event_enable_disable(call, 0);
1562 
1563 		pr_cont("OK\n");
1564 	}
1565 
1566 	/* Now test at the sub system level */
1567 
1568 	pr_info("Running tests on trace event systems:\n");
1569 
1570 	list_for_each_entry(system, &event_subsystems, list) {
1571 
1572 		/* the ftrace system is special, skip it */
1573 		if (strcmp(system->name, "ftrace") == 0)
1574 			continue;
1575 
1576 		pr_info("Testing event system %s: ", system->name);
1577 
1578 		ret = __ftrace_set_clr_event(NULL, system->name, NULL, 1);
1579 		if (WARN_ON_ONCE(ret)) {
1580 			pr_warning("error enabling system %s\n",
1581 				   system->name);
1582 			continue;
1583 		}
1584 
1585 		event_test_stuff();
1586 
1587 		ret = __ftrace_set_clr_event(NULL, system->name, NULL, 0);
1588 		if (WARN_ON_ONCE(ret))
1589 			pr_warning("error disabling system %s\n",
1590 				   system->name);
1591 
1592 		pr_cont("OK\n");
1593 	}
1594 
1595 	/* Test with all events enabled */
1596 
1597 	pr_info("Running tests on all trace events:\n");
1598 	pr_info("Testing all events: ");
1599 
1600 	ret = __ftrace_set_clr_event(NULL, NULL, NULL, 1);
1601 	if (WARN_ON_ONCE(ret)) {
1602 		pr_warning("error enabling all events\n");
1603 		return;
1604 	}
1605 
1606 	event_test_stuff();
1607 
1608 	/* reset sysname */
1609 	ret = __ftrace_set_clr_event(NULL, NULL, NULL, 0);
1610 	if (WARN_ON_ONCE(ret)) {
1611 		pr_warning("error disabling all events\n");
1612 		return;
1613 	}
1614 
1615 	pr_cont("OK\n");
1616 }
1617 
1618 #ifdef CONFIG_FUNCTION_TRACER
1619 
1620 static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable);
1621 
1622 static void
1623 function_test_events_call(unsigned long ip, unsigned long parent_ip)
1624 {
1625 	struct ring_buffer_event *event;
1626 	struct ring_buffer *buffer;
1627 	struct ftrace_entry *entry;
1628 	unsigned long flags;
1629 	long disabled;
1630 	int cpu;
1631 	int pc;
1632 
1633 	pc = preempt_count();
1634 	preempt_disable_notrace();
1635 	cpu = raw_smp_processor_id();
1636 	disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu));
1637 
1638 	if (disabled != 1)
1639 		goto out;
1640 
1641 	local_save_flags(flags);
1642 
1643 	event = trace_current_buffer_lock_reserve(&buffer,
1644 						  TRACE_FN, sizeof(*entry),
1645 						  flags, pc);
1646 	if (!event)
1647 		goto out;
1648 	entry	= ring_buffer_event_data(event);
1649 	entry->ip			= ip;
1650 	entry->parent_ip		= parent_ip;
1651 
1652 	trace_nowake_buffer_unlock_commit(buffer, event, flags, pc);
1653 
1654  out:
1655 	atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
1656 	preempt_enable_notrace();
1657 }
1658 
1659 static struct ftrace_ops trace_ops __initdata  =
1660 {
1661 	.func = function_test_events_call,
1662 };
1663 
1664 static __init void event_trace_self_test_with_function(void)
1665 {
1666 	register_ftrace_function(&trace_ops);
1667 	pr_info("Running tests again, along with the function tracer\n");
1668 	event_trace_self_tests();
1669 	unregister_ftrace_function(&trace_ops);
1670 }
1671 #else
1672 static __init void event_trace_self_test_with_function(void)
1673 {
1674 }
1675 #endif
1676 
1677 static __init int event_trace_self_tests_init(void)
1678 {
1679 	if (!tracing_selftest_disabled) {
1680 		event_trace_self_tests();
1681 		event_trace_self_test_with_function();
1682 	}
1683 
1684 	return 0;
1685 }
1686 
1687 late_initcall(event_trace_self_tests_init);
1688 
1689 #endif
1690