xref: /linux/kernel/trace/trace_events.c (revision db4e83957f961f9053282409c5062c6baef857a4)
1 /*
2  * event tracer
3  *
4  * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
5  *
6  *  - Added format output of fields of the trace point.
7  *    This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
8  *
9  */
10 
11 #include <linux/workqueue.h>
12 #include <linux/spinlock.h>
13 #include <linux/kthread.h>
14 #include <linux/debugfs.h>
15 #include <linux/uaccess.h>
16 #include <linux/module.h>
17 #include <linux/ctype.h>
18 #include <linux/slab.h>
19 #include <linux/delay.h>
20 
21 #include <asm/setup.h>
22 
23 #include "trace_output.h"
24 
25 #undef TRACE_SYSTEM
26 #define TRACE_SYSTEM "TRACE_SYSTEM"
27 
28 DEFINE_MUTEX(event_mutex);
29 
30 DEFINE_MUTEX(event_storage_mutex);
31 EXPORT_SYMBOL_GPL(event_storage_mutex);
32 
33 char event_storage[EVENT_STORAGE_SIZE];
34 EXPORT_SYMBOL_GPL(event_storage);
35 
36 LIST_HEAD(ftrace_events);
37 LIST_HEAD(ftrace_common_fields);
38 
39 struct list_head *
40 trace_get_fields(struct ftrace_event_call *event_call)
41 {
42 	if (!event_call->class->get_fields)
43 		return &event_call->class->fields;
44 	return event_call->class->get_fields(event_call);
45 }
46 
47 static int __trace_define_field(struct list_head *head, const char *type,
48 				const char *name, int offset, int size,
49 				int is_signed, int filter_type)
50 {
51 	struct ftrace_event_field *field;
52 
53 	field = kzalloc(sizeof(*field), GFP_KERNEL);
54 	if (!field)
55 		goto err;
56 
57 	field->name = kstrdup(name, GFP_KERNEL);
58 	if (!field->name)
59 		goto err;
60 
61 	field->type = kstrdup(type, GFP_KERNEL);
62 	if (!field->type)
63 		goto err;
64 
65 	if (filter_type == FILTER_OTHER)
66 		field->filter_type = filter_assign_type(type);
67 	else
68 		field->filter_type = filter_type;
69 
70 	field->offset = offset;
71 	field->size = size;
72 	field->is_signed = is_signed;
73 
74 	list_add(&field->link, head);
75 
76 	return 0;
77 
78 err:
79 	if (field)
80 		kfree(field->name);
81 	kfree(field);
82 
83 	return -ENOMEM;
84 }
85 
86 int trace_define_field(struct ftrace_event_call *call, const char *type,
87 		       const char *name, int offset, int size, int is_signed,
88 		       int filter_type)
89 {
90 	struct list_head *head;
91 
92 	if (WARN_ON(!call->class))
93 		return 0;
94 
95 	head = trace_get_fields(call);
96 	return __trace_define_field(head, type, name, offset, size,
97 				    is_signed, filter_type);
98 }
99 EXPORT_SYMBOL_GPL(trace_define_field);
100 
101 #define __common_field(type, item)					\
102 	ret = __trace_define_field(&ftrace_common_fields, #type,	\
103 				   "common_" #item,			\
104 				   offsetof(typeof(ent), item),		\
105 				   sizeof(ent.item),			\
106 				   is_signed_type(type), FILTER_OTHER);	\
107 	if (ret)							\
108 		return ret;
109 
110 static int trace_define_common_fields(void)
111 {
112 	int ret;
113 	struct trace_entry ent;
114 
115 	__common_field(unsigned short, type);
116 	__common_field(unsigned char, flags);
117 	__common_field(unsigned char, preempt_count);
118 	__common_field(int, pid);
119 	__common_field(int, padding);
120 
121 	return ret;
122 }
123 
124 void trace_destroy_fields(struct ftrace_event_call *call)
125 {
126 	struct ftrace_event_field *field, *next;
127 	struct list_head *head;
128 
129 	head = trace_get_fields(call);
130 	list_for_each_entry_safe(field, next, head, link) {
131 		list_del(&field->link);
132 		kfree(field->type);
133 		kfree(field->name);
134 		kfree(field);
135 	}
136 }
137 
138 int trace_event_raw_init(struct ftrace_event_call *call)
139 {
140 	int id;
141 
142 	id = register_ftrace_event(&call->event);
143 	if (!id)
144 		return -ENODEV;
145 
146 	return 0;
147 }
148 EXPORT_SYMBOL_GPL(trace_event_raw_init);
149 
150 int ftrace_event_reg(struct ftrace_event_call *call, enum trace_reg type)
151 {
152 	switch (type) {
153 	case TRACE_REG_REGISTER:
154 		return tracepoint_probe_register(call->name,
155 						 call->class->probe,
156 						 call);
157 	case TRACE_REG_UNREGISTER:
158 		tracepoint_probe_unregister(call->name,
159 					    call->class->probe,
160 					    call);
161 		return 0;
162 
163 #ifdef CONFIG_PERF_EVENTS
164 	case TRACE_REG_PERF_REGISTER:
165 		return tracepoint_probe_register(call->name,
166 						 call->class->perf_probe,
167 						 call);
168 	case TRACE_REG_PERF_UNREGISTER:
169 		tracepoint_probe_unregister(call->name,
170 					    call->class->perf_probe,
171 					    call);
172 		return 0;
173 #endif
174 	}
175 	return 0;
176 }
177 EXPORT_SYMBOL_GPL(ftrace_event_reg);
178 
179 void trace_event_enable_cmd_record(bool enable)
180 {
181 	struct ftrace_event_call *call;
182 
183 	mutex_lock(&event_mutex);
184 	list_for_each_entry(call, &ftrace_events, list) {
185 		if (!(call->flags & TRACE_EVENT_FL_ENABLED))
186 			continue;
187 
188 		if (enable) {
189 			tracing_start_cmdline_record();
190 			call->flags |= TRACE_EVENT_FL_RECORDED_CMD;
191 		} else {
192 			tracing_stop_cmdline_record();
193 			call->flags &= ~TRACE_EVENT_FL_RECORDED_CMD;
194 		}
195 	}
196 	mutex_unlock(&event_mutex);
197 }
198 
199 static int ftrace_event_enable_disable(struct ftrace_event_call *call,
200 					int enable)
201 {
202 	int ret = 0;
203 
204 	switch (enable) {
205 	case 0:
206 		if (call->flags & TRACE_EVENT_FL_ENABLED) {
207 			call->flags &= ~TRACE_EVENT_FL_ENABLED;
208 			if (call->flags & TRACE_EVENT_FL_RECORDED_CMD) {
209 				tracing_stop_cmdline_record();
210 				call->flags &= ~TRACE_EVENT_FL_RECORDED_CMD;
211 			}
212 			call->class->reg(call, TRACE_REG_UNREGISTER);
213 		}
214 		break;
215 	case 1:
216 		if (!(call->flags & TRACE_EVENT_FL_ENABLED)) {
217 			if (trace_flags & TRACE_ITER_RECORD_CMD) {
218 				tracing_start_cmdline_record();
219 				call->flags |= TRACE_EVENT_FL_RECORDED_CMD;
220 			}
221 			ret = call->class->reg(call, TRACE_REG_REGISTER);
222 			if (ret) {
223 				tracing_stop_cmdline_record();
224 				pr_info("event trace: Could not enable event "
225 					"%s\n", call->name);
226 				break;
227 			}
228 			call->flags |= TRACE_EVENT_FL_ENABLED;
229 		}
230 		break;
231 	}
232 
233 	return ret;
234 }
235 
236 static void ftrace_clear_events(void)
237 {
238 	struct ftrace_event_call *call;
239 
240 	mutex_lock(&event_mutex);
241 	list_for_each_entry(call, &ftrace_events, list) {
242 		ftrace_event_enable_disable(call, 0);
243 	}
244 	mutex_unlock(&event_mutex);
245 }
246 
247 static void __put_system(struct event_subsystem *system)
248 {
249 	struct event_filter *filter = system->filter;
250 
251 	WARN_ON_ONCE(system->ref_count == 0);
252 	if (--system->ref_count)
253 		return;
254 
255 	if (filter) {
256 		kfree(filter->filter_string);
257 		kfree(filter);
258 	}
259 	kfree(system->name);
260 	kfree(system);
261 }
262 
263 static void __get_system(struct event_subsystem *system)
264 {
265 	WARN_ON_ONCE(system->ref_count == 0);
266 	system->ref_count++;
267 }
268 
269 static void put_system(struct event_subsystem *system)
270 {
271 	mutex_lock(&event_mutex);
272 	__put_system(system);
273 	mutex_unlock(&event_mutex);
274 }
275 
276 /*
277  * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events.
278  */
279 static int __ftrace_set_clr_event(const char *match, const char *sub,
280 				  const char *event, int set)
281 {
282 	struct ftrace_event_call *call;
283 	int ret = -EINVAL;
284 
285 	mutex_lock(&event_mutex);
286 	list_for_each_entry(call, &ftrace_events, list) {
287 
288 		if (!call->name || !call->class || !call->class->reg)
289 			continue;
290 
291 		if (match &&
292 		    strcmp(match, call->name) != 0 &&
293 		    strcmp(match, call->class->system) != 0)
294 			continue;
295 
296 		if (sub && strcmp(sub, call->class->system) != 0)
297 			continue;
298 
299 		if (event && strcmp(event, call->name) != 0)
300 			continue;
301 
302 		ftrace_event_enable_disable(call, set);
303 
304 		ret = 0;
305 	}
306 	mutex_unlock(&event_mutex);
307 
308 	return ret;
309 }
310 
311 static int ftrace_set_clr_event(char *buf, int set)
312 {
313 	char *event = NULL, *sub = NULL, *match;
314 
315 	/*
316 	 * The buf format can be <subsystem>:<event-name>
317 	 *  *:<event-name> means any event by that name.
318 	 *  :<event-name> is the same.
319 	 *
320 	 *  <subsystem>:* means all events in that subsystem
321 	 *  <subsystem>: means the same.
322 	 *
323 	 *  <name> (no ':') means all events in a subsystem with
324 	 *  the name <name> or any event that matches <name>
325 	 */
326 
327 	match = strsep(&buf, ":");
328 	if (buf) {
329 		sub = match;
330 		event = buf;
331 		match = NULL;
332 
333 		if (!strlen(sub) || strcmp(sub, "*") == 0)
334 			sub = NULL;
335 		if (!strlen(event) || strcmp(event, "*") == 0)
336 			event = NULL;
337 	}
338 
339 	return __ftrace_set_clr_event(match, sub, event, set);
340 }
341 
342 /**
343  * trace_set_clr_event - enable or disable an event
344  * @system: system name to match (NULL for any system)
345  * @event: event name to match (NULL for all events, within system)
346  * @set: 1 to enable, 0 to disable
347  *
348  * This is a way for other parts of the kernel to enable or disable
349  * event recording.
350  *
351  * Returns 0 on success, -EINVAL if the parameters do not match any
352  * registered events.
353  */
354 int trace_set_clr_event(const char *system, const char *event, int set)
355 {
356 	return __ftrace_set_clr_event(NULL, system, event, set);
357 }
358 EXPORT_SYMBOL_GPL(trace_set_clr_event);
359 
360 /* 128 should be much more than enough */
361 #define EVENT_BUF_SIZE		127
362 
363 static ssize_t
364 ftrace_event_write(struct file *file, const char __user *ubuf,
365 		   size_t cnt, loff_t *ppos)
366 {
367 	struct trace_parser parser;
368 	ssize_t read, ret;
369 
370 	if (!cnt)
371 		return 0;
372 
373 	ret = tracing_update_buffers();
374 	if (ret < 0)
375 		return ret;
376 
377 	if (trace_parser_get_init(&parser, EVENT_BUF_SIZE + 1))
378 		return -ENOMEM;
379 
380 	read = trace_get_user(&parser, ubuf, cnt, ppos);
381 
382 	if (read >= 0 && trace_parser_loaded((&parser))) {
383 		int set = 1;
384 
385 		if (*parser.buffer == '!')
386 			set = 0;
387 
388 		parser.buffer[parser.idx] = 0;
389 
390 		ret = ftrace_set_clr_event(parser.buffer + !set, set);
391 		if (ret)
392 			goto out_put;
393 	}
394 
395 	ret = read;
396 
397  out_put:
398 	trace_parser_put(&parser);
399 
400 	return ret;
401 }
402 
403 static void *
404 t_next(struct seq_file *m, void *v, loff_t *pos)
405 {
406 	struct ftrace_event_call *call = v;
407 
408 	(*pos)++;
409 
410 	list_for_each_entry_continue(call, &ftrace_events, list) {
411 		/*
412 		 * The ftrace subsystem is for showing formats only.
413 		 * They can not be enabled or disabled via the event files.
414 		 */
415 		if (call->class && call->class->reg)
416 			return call;
417 	}
418 
419 	return NULL;
420 }
421 
422 static void *t_start(struct seq_file *m, loff_t *pos)
423 {
424 	struct ftrace_event_call *call;
425 	loff_t l;
426 
427 	mutex_lock(&event_mutex);
428 
429 	call = list_entry(&ftrace_events, struct ftrace_event_call, list);
430 	for (l = 0; l <= *pos; ) {
431 		call = t_next(m, call, &l);
432 		if (!call)
433 			break;
434 	}
435 	return call;
436 }
437 
438 static void *
439 s_next(struct seq_file *m, void *v, loff_t *pos)
440 {
441 	struct ftrace_event_call *call = v;
442 
443 	(*pos)++;
444 
445 	list_for_each_entry_continue(call, &ftrace_events, list) {
446 		if (call->flags & TRACE_EVENT_FL_ENABLED)
447 			return call;
448 	}
449 
450 	return NULL;
451 }
452 
453 static void *s_start(struct seq_file *m, loff_t *pos)
454 {
455 	struct ftrace_event_call *call;
456 	loff_t l;
457 
458 	mutex_lock(&event_mutex);
459 
460 	call = list_entry(&ftrace_events, struct ftrace_event_call, list);
461 	for (l = 0; l <= *pos; ) {
462 		call = s_next(m, call, &l);
463 		if (!call)
464 			break;
465 	}
466 	return call;
467 }
468 
469 static int t_show(struct seq_file *m, void *v)
470 {
471 	struct ftrace_event_call *call = v;
472 
473 	if (strcmp(call->class->system, TRACE_SYSTEM) != 0)
474 		seq_printf(m, "%s:", call->class->system);
475 	seq_printf(m, "%s\n", call->name);
476 
477 	return 0;
478 }
479 
480 static void t_stop(struct seq_file *m, void *p)
481 {
482 	mutex_unlock(&event_mutex);
483 }
484 
485 static int
486 ftrace_event_seq_open(struct inode *inode, struct file *file)
487 {
488 	const struct seq_operations *seq_ops;
489 
490 	if ((file->f_mode & FMODE_WRITE) &&
491 	    (file->f_flags & O_TRUNC))
492 		ftrace_clear_events();
493 
494 	seq_ops = inode->i_private;
495 	return seq_open(file, seq_ops);
496 }
497 
498 static ssize_t
499 event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
500 		  loff_t *ppos)
501 {
502 	struct ftrace_event_call *call = filp->private_data;
503 	char *buf;
504 
505 	if (call->flags & TRACE_EVENT_FL_ENABLED)
506 		buf = "1\n";
507 	else
508 		buf = "0\n";
509 
510 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
511 }
512 
513 static ssize_t
514 event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
515 		   loff_t *ppos)
516 {
517 	struct ftrace_event_call *call = filp->private_data;
518 	unsigned long val;
519 	int ret;
520 
521 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
522 	if (ret)
523 		return ret;
524 
525 	ret = tracing_update_buffers();
526 	if (ret < 0)
527 		return ret;
528 
529 	switch (val) {
530 	case 0:
531 	case 1:
532 		mutex_lock(&event_mutex);
533 		ret = ftrace_event_enable_disable(call, val);
534 		mutex_unlock(&event_mutex);
535 		break;
536 
537 	default:
538 		return -EINVAL;
539 	}
540 
541 	*ppos += cnt;
542 
543 	return ret ? ret : cnt;
544 }
545 
546 static ssize_t
547 system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
548 		   loff_t *ppos)
549 {
550 	const char set_to_char[4] = { '?', '0', '1', 'X' };
551 	struct event_subsystem *system = filp->private_data;
552 	struct ftrace_event_call *call;
553 	char buf[2];
554 	int set = 0;
555 	int ret;
556 
557 	mutex_lock(&event_mutex);
558 	list_for_each_entry(call, &ftrace_events, list) {
559 		if (!call->name || !call->class || !call->class->reg)
560 			continue;
561 
562 		if (system && strcmp(call->class->system, system->name) != 0)
563 			continue;
564 
565 		/*
566 		 * We need to find out if all the events are set
567 		 * or if all events or cleared, or if we have
568 		 * a mixture.
569 		 */
570 		set |= (1 << !!(call->flags & TRACE_EVENT_FL_ENABLED));
571 
572 		/*
573 		 * If we have a mixture, no need to look further.
574 		 */
575 		if (set == 3)
576 			break;
577 	}
578 	mutex_unlock(&event_mutex);
579 
580 	buf[0] = set_to_char[set];
581 	buf[1] = '\n';
582 
583 	ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
584 
585 	return ret;
586 }
587 
588 static ssize_t
589 system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
590 		    loff_t *ppos)
591 {
592 	struct event_subsystem *system = filp->private_data;
593 	const char *name = NULL;
594 	unsigned long val;
595 	ssize_t ret;
596 
597 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
598 	if (ret)
599 		return ret;
600 
601 	ret = tracing_update_buffers();
602 	if (ret < 0)
603 		return ret;
604 
605 	if (val != 0 && val != 1)
606 		return -EINVAL;
607 
608 	/*
609 	 * Opening of "enable" adds a ref count to system,
610 	 * so the name is safe to use.
611 	 */
612 	if (system)
613 		name = system->name;
614 
615 	ret = __ftrace_set_clr_event(NULL, name, NULL, val);
616 	if (ret)
617 		goto out;
618 
619 	ret = cnt;
620 
621 out:
622 	*ppos += cnt;
623 
624 	return ret;
625 }
626 
627 enum {
628 	FORMAT_HEADER		= 1,
629 	FORMAT_FIELD_SEPERATOR	= 2,
630 	FORMAT_PRINTFMT		= 3,
631 };
632 
633 static void *f_next(struct seq_file *m, void *v, loff_t *pos)
634 {
635 	struct ftrace_event_call *call = m->private;
636 	struct ftrace_event_field *field;
637 	struct list_head *common_head = &ftrace_common_fields;
638 	struct list_head *head = trace_get_fields(call);
639 
640 	(*pos)++;
641 
642 	switch ((unsigned long)v) {
643 	case FORMAT_HEADER:
644 		if (unlikely(list_empty(common_head)))
645 			return NULL;
646 
647 		field = list_entry(common_head->prev,
648 				   struct ftrace_event_field, link);
649 		return field;
650 
651 	case FORMAT_FIELD_SEPERATOR:
652 		if (unlikely(list_empty(head)))
653 			return NULL;
654 
655 		field = list_entry(head->prev, struct ftrace_event_field, link);
656 		return field;
657 
658 	case FORMAT_PRINTFMT:
659 		/* all done */
660 		return NULL;
661 	}
662 
663 	field = v;
664 	if (field->link.prev == common_head)
665 		return (void *)FORMAT_FIELD_SEPERATOR;
666 	else if (field->link.prev == head)
667 		return (void *)FORMAT_PRINTFMT;
668 
669 	field = list_entry(field->link.prev, struct ftrace_event_field, link);
670 
671 	return field;
672 }
673 
674 static void *f_start(struct seq_file *m, loff_t *pos)
675 {
676 	loff_t l = 0;
677 	void *p;
678 
679 	/* Start by showing the header */
680 	if (!*pos)
681 		return (void *)FORMAT_HEADER;
682 
683 	p = (void *)FORMAT_HEADER;
684 	do {
685 		p = f_next(m, p, &l);
686 	} while (p && l < *pos);
687 
688 	return p;
689 }
690 
691 static int f_show(struct seq_file *m, void *v)
692 {
693 	struct ftrace_event_call *call = m->private;
694 	struct ftrace_event_field *field;
695 	const char *array_descriptor;
696 
697 	switch ((unsigned long)v) {
698 	case FORMAT_HEADER:
699 		seq_printf(m, "name: %s\n", call->name);
700 		seq_printf(m, "ID: %d\n", call->event.type);
701 		seq_printf(m, "format:\n");
702 		return 0;
703 
704 	case FORMAT_FIELD_SEPERATOR:
705 		seq_putc(m, '\n');
706 		return 0;
707 
708 	case FORMAT_PRINTFMT:
709 		seq_printf(m, "\nprint fmt: %s\n",
710 			   call->print_fmt);
711 		return 0;
712 	}
713 
714 	field = v;
715 
716 	/*
717 	 * Smartly shows the array type(except dynamic array).
718 	 * Normal:
719 	 *	field:TYPE VAR
720 	 * If TYPE := TYPE[LEN], it is shown:
721 	 *	field:TYPE VAR[LEN]
722 	 */
723 	array_descriptor = strchr(field->type, '[');
724 
725 	if (!strncmp(field->type, "__data_loc", 10))
726 		array_descriptor = NULL;
727 
728 	if (!array_descriptor)
729 		seq_printf(m, "\tfield:%s %s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
730 			   field->type, field->name, field->offset,
731 			   field->size, !!field->is_signed);
732 	else
733 		seq_printf(m, "\tfield:%.*s %s%s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
734 			   (int)(array_descriptor - field->type),
735 			   field->type, field->name,
736 			   array_descriptor, field->offset,
737 			   field->size, !!field->is_signed);
738 
739 	return 0;
740 }
741 
742 static void f_stop(struct seq_file *m, void *p)
743 {
744 }
745 
746 static const struct seq_operations trace_format_seq_ops = {
747 	.start		= f_start,
748 	.next		= f_next,
749 	.stop		= f_stop,
750 	.show		= f_show,
751 };
752 
753 static int trace_format_open(struct inode *inode, struct file *file)
754 {
755 	struct ftrace_event_call *call = inode->i_private;
756 	struct seq_file *m;
757 	int ret;
758 
759 	ret = seq_open(file, &trace_format_seq_ops);
760 	if (ret < 0)
761 		return ret;
762 
763 	m = file->private_data;
764 	m->private = call;
765 
766 	return 0;
767 }
768 
769 static ssize_t
770 event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
771 {
772 	struct ftrace_event_call *call = filp->private_data;
773 	struct trace_seq *s;
774 	int r;
775 
776 	if (*ppos)
777 		return 0;
778 
779 	s = kmalloc(sizeof(*s), GFP_KERNEL);
780 	if (!s)
781 		return -ENOMEM;
782 
783 	trace_seq_init(s);
784 	trace_seq_printf(s, "%d\n", call->event.type);
785 
786 	r = simple_read_from_buffer(ubuf, cnt, ppos,
787 				    s->buffer, s->len);
788 	kfree(s);
789 	return r;
790 }
791 
792 static ssize_t
793 event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
794 		  loff_t *ppos)
795 {
796 	struct ftrace_event_call *call = filp->private_data;
797 	struct trace_seq *s;
798 	int r;
799 
800 	if (*ppos)
801 		return 0;
802 
803 	s = kmalloc(sizeof(*s), GFP_KERNEL);
804 	if (!s)
805 		return -ENOMEM;
806 
807 	trace_seq_init(s);
808 
809 	print_event_filter(call, s);
810 	r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
811 
812 	kfree(s);
813 
814 	return r;
815 }
816 
817 static ssize_t
818 event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
819 		   loff_t *ppos)
820 {
821 	struct ftrace_event_call *call = filp->private_data;
822 	char *buf;
823 	int err;
824 
825 	if (cnt >= PAGE_SIZE)
826 		return -EINVAL;
827 
828 	buf = (char *)__get_free_page(GFP_TEMPORARY);
829 	if (!buf)
830 		return -ENOMEM;
831 
832 	if (copy_from_user(buf, ubuf, cnt)) {
833 		free_page((unsigned long) buf);
834 		return -EFAULT;
835 	}
836 	buf[cnt] = '\0';
837 
838 	err = apply_event_filter(call, buf);
839 	free_page((unsigned long) buf);
840 	if (err < 0)
841 		return err;
842 
843 	*ppos += cnt;
844 
845 	return cnt;
846 }
847 
848 static LIST_HEAD(event_subsystems);
849 
850 static int subsystem_open(struct inode *inode, struct file *filp)
851 {
852 	struct event_subsystem *system = NULL;
853 	int ret;
854 
855 	if (!inode->i_private)
856 		goto skip_search;
857 
858 	/* Make sure the system still exists */
859 	mutex_lock(&event_mutex);
860 	list_for_each_entry(system, &event_subsystems, list) {
861 		if (system == inode->i_private) {
862 			/* Don't open systems with no events */
863 			if (!system->nr_events) {
864 				system = NULL;
865 				break;
866 			}
867 			__get_system(system);
868 			break;
869 		}
870 	}
871 	mutex_unlock(&event_mutex);
872 
873 	if (system != inode->i_private)
874 		return -ENODEV;
875 
876  skip_search:
877 	ret = tracing_open_generic(inode, filp);
878 	if (ret < 0 && system)
879 		put_system(system);
880 
881 	return ret;
882 }
883 
884 static int subsystem_release(struct inode *inode, struct file *file)
885 {
886 	struct event_subsystem *system = inode->i_private;
887 
888 	if (system)
889 		put_system(system);
890 
891 	return 0;
892 }
893 
894 static ssize_t
895 subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
896 		      loff_t *ppos)
897 {
898 	struct event_subsystem *system = filp->private_data;
899 	struct trace_seq *s;
900 	int r;
901 
902 	if (*ppos)
903 		return 0;
904 
905 	s = kmalloc(sizeof(*s), GFP_KERNEL);
906 	if (!s)
907 		return -ENOMEM;
908 
909 	trace_seq_init(s);
910 
911 	print_subsystem_event_filter(system, s);
912 	r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
913 
914 	kfree(s);
915 
916 	return r;
917 }
918 
919 static ssize_t
920 subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
921 		       loff_t *ppos)
922 {
923 	struct event_subsystem *system = filp->private_data;
924 	char *buf;
925 	int err;
926 
927 	if (cnt >= PAGE_SIZE)
928 		return -EINVAL;
929 
930 	buf = (char *)__get_free_page(GFP_TEMPORARY);
931 	if (!buf)
932 		return -ENOMEM;
933 
934 	if (copy_from_user(buf, ubuf, cnt)) {
935 		free_page((unsigned long) buf);
936 		return -EFAULT;
937 	}
938 	buf[cnt] = '\0';
939 
940 	err = apply_subsystem_event_filter(system, buf);
941 	free_page((unsigned long) buf);
942 	if (err < 0)
943 		return err;
944 
945 	*ppos += cnt;
946 
947 	return cnt;
948 }
949 
950 static ssize_t
951 show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
952 {
953 	int (*func)(struct trace_seq *s) = filp->private_data;
954 	struct trace_seq *s;
955 	int r;
956 
957 	if (*ppos)
958 		return 0;
959 
960 	s = kmalloc(sizeof(*s), GFP_KERNEL);
961 	if (!s)
962 		return -ENOMEM;
963 
964 	trace_seq_init(s);
965 
966 	func(s);
967 	r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
968 
969 	kfree(s);
970 
971 	return r;
972 }
973 
974 static const struct seq_operations show_event_seq_ops = {
975 	.start = t_start,
976 	.next = t_next,
977 	.show = t_show,
978 	.stop = t_stop,
979 };
980 
981 static const struct seq_operations show_set_event_seq_ops = {
982 	.start = s_start,
983 	.next = s_next,
984 	.show = t_show,
985 	.stop = t_stop,
986 };
987 
988 static const struct file_operations ftrace_avail_fops = {
989 	.open = ftrace_event_seq_open,
990 	.read = seq_read,
991 	.llseek = seq_lseek,
992 	.release = seq_release,
993 };
994 
995 static const struct file_operations ftrace_set_event_fops = {
996 	.open = ftrace_event_seq_open,
997 	.read = seq_read,
998 	.write = ftrace_event_write,
999 	.llseek = seq_lseek,
1000 	.release = seq_release,
1001 };
1002 
1003 static const struct file_operations ftrace_enable_fops = {
1004 	.open = tracing_open_generic,
1005 	.read = event_enable_read,
1006 	.write = event_enable_write,
1007 	.llseek = default_llseek,
1008 };
1009 
1010 static const struct file_operations ftrace_event_format_fops = {
1011 	.open = trace_format_open,
1012 	.read = seq_read,
1013 	.llseek = seq_lseek,
1014 	.release = seq_release,
1015 };
1016 
1017 static const struct file_operations ftrace_event_id_fops = {
1018 	.open = tracing_open_generic,
1019 	.read = event_id_read,
1020 	.llseek = default_llseek,
1021 };
1022 
1023 static const struct file_operations ftrace_event_filter_fops = {
1024 	.open = tracing_open_generic,
1025 	.read = event_filter_read,
1026 	.write = event_filter_write,
1027 	.llseek = default_llseek,
1028 };
1029 
1030 static const struct file_operations ftrace_subsystem_filter_fops = {
1031 	.open = subsystem_open,
1032 	.read = subsystem_filter_read,
1033 	.write = subsystem_filter_write,
1034 	.llseek = default_llseek,
1035 	.release = subsystem_release,
1036 };
1037 
1038 static const struct file_operations ftrace_system_enable_fops = {
1039 	.open = subsystem_open,
1040 	.read = system_enable_read,
1041 	.write = system_enable_write,
1042 	.llseek = default_llseek,
1043 	.release = subsystem_release,
1044 };
1045 
1046 static const struct file_operations ftrace_show_header_fops = {
1047 	.open = tracing_open_generic,
1048 	.read = show_header,
1049 	.llseek = default_llseek,
1050 };
1051 
1052 static struct dentry *event_trace_events_dir(void)
1053 {
1054 	static struct dentry *d_tracer;
1055 	static struct dentry *d_events;
1056 
1057 	if (d_events)
1058 		return d_events;
1059 
1060 	d_tracer = tracing_init_dentry();
1061 	if (!d_tracer)
1062 		return NULL;
1063 
1064 	d_events = debugfs_create_dir("events", d_tracer);
1065 	if (!d_events)
1066 		pr_warning("Could not create debugfs "
1067 			   "'events' directory\n");
1068 
1069 	return d_events;
1070 }
1071 
1072 static struct dentry *
1073 event_subsystem_dir(const char *name, struct dentry *d_events)
1074 {
1075 	struct event_subsystem *system;
1076 	struct dentry *entry;
1077 
1078 	/* First see if we did not already create this dir */
1079 	list_for_each_entry(system, &event_subsystems, list) {
1080 		if (strcmp(system->name, name) == 0) {
1081 			__get_system(system);
1082 			system->nr_events++;
1083 			return system->entry;
1084 		}
1085 	}
1086 
1087 	/* need to create new entry */
1088 	system = kmalloc(sizeof(*system), GFP_KERNEL);
1089 	if (!system) {
1090 		pr_warning("No memory to create event subsystem %s\n",
1091 			   name);
1092 		return d_events;
1093 	}
1094 
1095 	system->entry = debugfs_create_dir(name, d_events);
1096 	if (!system->entry) {
1097 		pr_warning("Could not create event subsystem %s\n",
1098 			   name);
1099 		kfree(system);
1100 		return d_events;
1101 	}
1102 
1103 	system->nr_events = 1;
1104 	system->ref_count = 1;
1105 	system->name = kstrdup(name, GFP_KERNEL);
1106 	if (!system->name) {
1107 		debugfs_remove(system->entry);
1108 		kfree(system);
1109 		return d_events;
1110 	}
1111 
1112 	list_add(&system->list, &event_subsystems);
1113 
1114 	system->filter = NULL;
1115 
1116 	system->filter = kzalloc(sizeof(struct event_filter), GFP_KERNEL);
1117 	if (!system->filter) {
1118 		pr_warning("Could not allocate filter for subsystem "
1119 			   "'%s'\n", name);
1120 		return system->entry;
1121 	}
1122 
1123 	entry = debugfs_create_file("filter", 0644, system->entry, system,
1124 				    &ftrace_subsystem_filter_fops);
1125 	if (!entry) {
1126 		kfree(system->filter);
1127 		system->filter = NULL;
1128 		pr_warning("Could not create debugfs "
1129 			   "'%s/filter' entry\n", name);
1130 	}
1131 
1132 	trace_create_file("enable", 0644, system->entry, system,
1133 			  &ftrace_system_enable_fops);
1134 
1135 	return system->entry;
1136 }
1137 
1138 static int
1139 event_create_dir(struct ftrace_event_call *call, struct dentry *d_events,
1140 		 const struct file_operations *id,
1141 		 const struct file_operations *enable,
1142 		 const struct file_operations *filter,
1143 		 const struct file_operations *format)
1144 {
1145 	struct list_head *head;
1146 	int ret;
1147 
1148 	/*
1149 	 * If the trace point header did not define TRACE_SYSTEM
1150 	 * then the system would be called "TRACE_SYSTEM".
1151 	 */
1152 	if (strcmp(call->class->system, TRACE_SYSTEM) != 0)
1153 		d_events = event_subsystem_dir(call->class->system, d_events);
1154 
1155 	call->dir = debugfs_create_dir(call->name, d_events);
1156 	if (!call->dir) {
1157 		pr_warning("Could not create debugfs "
1158 			   "'%s' directory\n", call->name);
1159 		return -1;
1160 	}
1161 
1162 	if (call->class->reg)
1163 		trace_create_file("enable", 0644, call->dir, call,
1164 				  enable);
1165 
1166 #ifdef CONFIG_PERF_EVENTS
1167 	if (call->event.type && call->class->reg)
1168 		trace_create_file("id", 0444, call->dir, call,
1169 		 		  id);
1170 #endif
1171 
1172 	/*
1173 	 * Other events may have the same class. Only update
1174 	 * the fields if they are not already defined.
1175 	 */
1176 	head = trace_get_fields(call);
1177 	if (list_empty(head)) {
1178 		ret = call->class->define_fields(call);
1179 		if (ret < 0) {
1180 			pr_warning("Could not initialize trace point"
1181 				   " events/%s\n", call->name);
1182 			return ret;
1183 		}
1184 	}
1185 	trace_create_file("filter", 0644, call->dir, call,
1186 			  filter);
1187 
1188 	trace_create_file("format", 0444, call->dir, call,
1189 			  format);
1190 
1191 	return 0;
1192 }
1193 
1194 static int
1195 __trace_add_event_call(struct ftrace_event_call *call, struct module *mod,
1196 		       const struct file_operations *id,
1197 		       const struct file_operations *enable,
1198 		       const struct file_operations *filter,
1199 		       const struct file_operations *format)
1200 {
1201 	struct dentry *d_events;
1202 	int ret;
1203 
1204 	/* The linker may leave blanks */
1205 	if (!call->name)
1206 		return -EINVAL;
1207 
1208 	if (call->class->raw_init) {
1209 		ret = call->class->raw_init(call);
1210 		if (ret < 0) {
1211 			if (ret != -ENOSYS)
1212 				pr_warning("Could not initialize trace events/%s\n",
1213 					   call->name);
1214 			return ret;
1215 		}
1216 	}
1217 
1218 	d_events = event_trace_events_dir();
1219 	if (!d_events)
1220 		return -ENOENT;
1221 
1222 	ret = event_create_dir(call, d_events, id, enable, filter, format);
1223 	if (!ret)
1224 		list_add(&call->list, &ftrace_events);
1225 	call->mod = mod;
1226 
1227 	return ret;
1228 }
1229 
1230 /* Add an additional event_call dynamically */
1231 int trace_add_event_call(struct ftrace_event_call *call)
1232 {
1233 	int ret;
1234 	mutex_lock(&event_mutex);
1235 	ret = __trace_add_event_call(call, NULL, &ftrace_event_id_fops,
1236 				     &ftrace_enable_fops,
1237 				     &ftrace_event_filter_fops,
1238 				     &ftrace_event_format_fops);
1239 	mutex_unlock(&event_mutex);
1240 	return ret;
1241 }
1242 
1243 static void remove_subsystem_dir(const char *name)
1244 {
1245 	struct event_subsystem *system;
1246 
1247 	if (strcmp(name, TRACE_SYSTEM) == 0)
1248 		return;
1249 
1250 	list_for_each_entry(system, &event_subsystems, list) {
1251 		if (strcmp(system->name, name) == 0) {
1252 			if (!--system->nr_events) {
1253 				debugfs_remove_recursive(system->entry);
1254 				list_del(&system->list);
1255 				__put_system(system);
1256 			}
1257 			break;
1258 		}
1259 	}
1260 }
1261 
1262 /*
1263  * Must be called under locking both of event_mutex and trace_event_mutex.
1264  */
1265 static void __trace_remove_event_call(struct ftrace_event_call *call)
1266 {
1267 	ftrace_event_enable_disable(call, 0);
1268 	if (call->event.funcs)
1269 		__unregister_ftrace_event(&call->event);
1270 	debugfs_remove_recursive(call->dir);
1271 	list_del(&call->list);
1272 	trace_destroy_fields(call);
1273 	destroy_preds(call);
1274 	remove_subsystem_dir(call->class->system);
1275 }
1276 
1277 /* Remove an event_call */
1278 void trace_remove_event_call(struct ftrace_event_call *call)
1279 {
1280 	mutex_lock(&event_mutex);
1281 	down_write(&trace_event_mutex);
1282 	__trace_remove_event_call(call);
1283 	up_write(&trace_event_mutex);
1284 	mutex_unlock(&event_mutex);
1285 }
1286 
1287 #define for_each_event(event, start, end)			\
1288 	for (event = start;					\
1289 	     (unsigned long)event < (unsigned long)end;		\
1290 	     event++)
1291 
1292 #ifdef CONFIG_MODULES
1293 
1294 static LIST_HEAD(ftrace_module_file_list);
1295 
1296 /*
1297  * Modules must own their file_operations to keep up with
1298  * reference counting.
1299  */
1300 struct ftrace_module_file_ops {
1301 	struct list_head		list;
1302 	struct module			*mod;
1303 	struct file_operations		id;
1304 	struct file_operations		enable;
1305 	struct file_operations		format;
1306 	struct file_operations		filter;
1307 };
1308 
1309 static struct ftrace_module_file_ops *
1310 trace_create_file_ops(struct module *mod)
1311 {
1312 	struct ftrace_module_file_ops *file_ops;
1313 
1314 	/*
1315 	 * This is a bit of a PITA. To allow for correct reference
1316 	 * counting, modules must "own" their file_operations.
1317 	 * To do this, we allocate the file operations that will be
1318 	 * used in the event directory.
1319 	 */
1320 
1321 	file_ops = kmalloc(sizeof(*file_ops), GFP_KERNEL);
1322 	if (!file_ops)
1323 		return NULL;
1324 
1325 	file_ops->mod = mod;
1326 
1327 	file_ops->id = ftrace_event_id_fops;
1328 	file_ops->id.owner = mod;
1329 
1330 	file_ops->enable = ftrace_enable_fops;
1331 	file_ops->enable.owner = mod;
1332 
1333 	file_ops->filter = ftrace_event_filter_fops;
1334 	file_ops->filter.owner = mod;
1335 
1336 	file_ops->format = ftrace_event_format_fops;
1337 	file_ops->format.owner = mod;
1338 
1339 	list_add(&file_ops->list, &ftrace_module_file_list);
1340 
1341 	return file_ops;
1342 }
1343 
1344 static void trace_module_add_events(struct module *mod)
1345 {
1346 	struct ftrace_module_file_ops *file_ops = NULL;
1347 	struct ftrace_event_call **call, **start, **end;
1348 
1349 	start = mod->trace_events;
1350 	end = mod->trace_events + mod->num_trace_events;
1351 
1352 	if (start == end)
1353 		return;
1354 
1355 	file_ops = trace_create_file_ops(mod);
1356 	if (!file_ops)
1357 		return;
1358 
1359 	for_each_event(call, start, end) {
1360 		__trace_add_event_call(*call, mod,
1361 				       &file_ops->id, &file_ops->enable,
1362 				       &file_ops->filter, &file_ops->format);
1363 	}
1364 }
1365 
1366 static void trace_module_remove_events(struct module *mod)
1367 {
1368 	struct ftrace_module_file_ops *file_ops;
1369 	struct ftrace_event_call *call, *p;
1370 	bool found = false;
1371 
1372 	down_write(&trace_event_mutex);
1373 	list_for_each_entry_safe(call, p, &ftrace_events, list) {
1374 		if (call->mod == mod) {
1375 			found = true;
1376 			__trace_remove_event_call(call);
1377 		}
1378 	}
1379 
1380 	/* Now free the file_operations */
1381 	list_for_each_entry(file_ops, &ftrace_module_file_list, list) {
1382 		if (file_ops->mod == mod)
1383 			break;
1384 	}
1385 	if (&file_ops->list != &ftrace_module_file_list) {
1386 		list_del(&file_ops->list);
1387 		kfree(file_ops);
1388 	}
1389 
1390 	/*
1391 	 * It is safest to reset the ring buffer if the module being unloaded
1392 	 * registered any events.
1393 	 */
1394 	if (found)
1395 		tracing_reset_current_online_cpus();
1396 	up_write(&trace_event_mutex);
1397 }
1398 
1399 static int trace_module_notify(struct notifier_block *self,
1400 			       unsigned long val, void *data)
1401 {
1402 	struct module *mod = data;
1403 
1404 	mutex_lock(&event_mutex);
1405 	switch (val) {
1406 	case MODULE_STATE_COMING:
1407 		trace_module_add_events(mod);
1408 		break;
1409 	case MODULE_STATE_GOING:
1410 		trace_module_remove_events(mod);
1411 		break;
1412 	}
1413 	mutex_unlock(&event_mutex);
1414 
1415 	return 0;
1416 }
1417 #else
1418 static int trace_module_notify(struct notifier_block *self,
1419 			       unsigned long val, void *data)
1420 {
1421 	return 0;
1422 }
1423 #endif /* CONFIG_MODULES */
1424 
1425 static struct notifier_block trace_module_nb = {
1426 	.notifier_call = trace_module_notify,
1427 	.priority = 0,
1428 };
1429 
1430 extern struct ftrace_event_call *__start_ftrace_events[];
1431 extern struct ftrace_event_call *__stop_ftrace_events[];
1432 
1433 static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata;
1434 
1435 static __init int setup_trace_event(char *str)
1436 {
1437 	strlcpy(bootup_event_buf, str, COMMAND_LINE_SIZE);
1438 	ring_buffer_expanded = 1;
1439 	tracing_selftest_disabled = 1;
1440 
1441 	return 1;
1442 }
1443 __setup("trace_event=", setup_trace_event);
1444 
1445 static __init int event_trace_init(void)
1446 {
1447 	struct ftrace_event_call **call;
1448 	struct dentry *d_tracer;
1449 	struct dentry *entry;
1450 	struct dentry *d_events;
1451 	int ret;
1452 	char *buf = bootup_event_buf;
1453 	char *token;
1454 
1455 	d_tracer = tracing_init_dentry();
1456 	if (!d_tracer)
1457 		return 0;
1458 
1459 	entry = debugfs_create_file("available_events", 0444, d_tracer,
1460 				    (void *)&show_event_seq_ops,
1461 				    &ftrace_avail_fops);
1462 	if (!entry)
1463 		pr_warning("Could not create debugfs "
1464 			   "'available_events' entry\n");
1465 
1466 	entry = debugfs_create_file("set_event", 0644, d_tracer,
1467 				    (void *)&show_set_event_seq_ops,
1468 				    &ftrace_set_event_fops);
1469 	if (!entry)
1470 		pr_warning("Could not create debugfs "
1471 			   "'set_event' entry\n");
1472 
1473 	d_events = event_trace_events_dir();
1474 	if (!d_events)
1475 		return 0;
1476 
1477 	/* ring buffer internal formats */
1478 	trace_create_file("header_page", 0444, d_events,
1479 			  ring_buffer_print_page_header,
1480 			  &ftrace_show_header_fops);
1481 
1482 	trace_create_file("header_event", 0444, d_events,
1483 			  ring_buffer_print_entry_header,
1484 			  &ftrace_show_header_fops);
1485 
1486 	trace_create_file("enable", 0644, d_events,
1487 			  NULL, &ftrace_system_enable_fops);
1488 
1489 	if (trace_define_common_fields())
1490 		pr_warning("tracing: Failed to allocate common fields");
1491 
1492 	for_each_event(call, __start_ftrace_events, __stop_ftrace_events) {
1493 		__trace_add_event_call(*call, NULL, &ftrace_event_id_fops,
1494 				       &ftrace_enable_fops,
1495 				       &ftrace_event_filter_fops,
1496 				       &ftrace_event_format_fops);
1497 	}
1498 
1499 	while (true) {
1500 		token = strsep(&buf, ",");
1501 
1502 		if (!token)
1503 			break;
1504 		if (!*token)
1505 			continue;
1506 
1507 		ret = ftrace_set_clr_event(token, 1);
1508 		if (ret)
1509 			pr_warning("Failed to enable trace event: %s\n", token);
1510 	}
1511 
1512 	ret = register_module_notifier(&trace_module_nb);
1513 	if (ret)
1514 		pr_warning("Failed to register trace events module notifier\n");
1515 
1516 	return 0;
1517 }
1518 fs_initcall(event_trace_init);
1519 
1520 #ifdef CONFIG_FTRACE_STARTUP_TEST
1521 
1522 static DEFINE_SPINLOCK(test_spinlock);
1523 static DEFINE_SPINLOCK(test_spinlock_irq);
1524 static DEFINE_MUTEX(test_mutex);
1525 
1526 static __init void test_work(struct work_struct *dummy)
1527 {
1528 	spin_lock(&test_spinlock);
1529 	spin_lock_irq(&test_spinlock_irq);
1530 	udelay(1);
1531 	spin_unlock_irq(&test_spinlock_irq);
1532 	spin_unlock(&test_spinlock);
1533 
1534 	mutex_lock(&test_mutex);
1535 	msleep(1);
1536 	mutex_unlock(&test_mutex);
1537 }
1538 
1539 static __init int event_test_thread(void *unused)
1540 {
1541 	void *test_malloc;
1542 
1543 	test_malloc = kmalloc(1234, GFP_KERNEL);
1544 	if (!test_malloc)
1545 		pr_info("failed to kmalloc\n");
1546 
1547 	schedule_on_each_cpu(test_work);
1548 
1549 	kfree(test_malloc);
1550 
1551 	set_current_state(TASK_INTERRUPTIBLE);
1552 	while (!kthread_should_stop())
1553 		schedule();
1554 
1555 	return 0;
1556 }
1557 
1558 /*
1559  * Do various things that may trigger events.
1560  */
1561 static __init void event_test_stuff(void)
1562 {
1563 	struct task_struct *test_thread;
1564 
1565 	test_thread = kthread_run(event_test_thread, NULL, "test-events");
1566 	msleep(1);
1567 	kthread_stop(test_thread);
1568 }
1569 
1570 /*
1571  * For every trace event defined, we will test each trace point separately,
1572  * and then by groups, and finally all trace points.
1573  */
1574 static __init void event_trace_self_tests(void)
1575 {
1576 	struct ftrace_event_call *call;
1577 	struct event_subsystem *system;
1578 	int ret;
1579 
1580 	pr_info("Running tests on trace events:\n");
1581 
1582 	list_for_each_entry(call, &ftrace_events, list) {
1583 
1584 		/* Only test those that have a probe */
1585 		if (!call->class || !call->class->probe)
1586 			continue;
1587 
1588 /*
1589  * Testing syscall events here is pretty useless, but
1590  * we still do it if configured. But this is time consuming.
1591  * What we really need is a user thread to perform the
1592  * syscalls as we test.
1593  */
1594 #ifndef CONFIG_EVENT_TRACE_TEST_SYSCALLS
1595 		if (call->class->system &&
1596 		    strcmp(call->class->system, "syscalls") == 0)
1597 			continue;
1598 #endif
1599 
1600 		pr_info("Testing event %s: ", call->name);
1601 
1602 		/*
1603 		 * If an event is already enabled, someone is using
1604 		 * it and the self test should not be on.
1605 		 */
1606 		if (call->flags & TRACE_EVENT_FL_ENABLED) {
1607 			pr_warning("Enabled event during self test!\n");
1608 			WARN_ON_ONCE(1);
1609 			continue;
1610 		}
1611 
1612 		ftrace_event_enable_disable(call, 1);
1613 		event_test_stuff();
1614 		ftrace_event_enable_disable(call, 0);
1615 
1616 		pr_cont("OK\n");
1617 	}
1618 
1619 	/* Now test at the sub system level */
1620 
1621 	pr_info("Running tests on trace event systems:\n");
1622 
1623 	list_for_each_entry(system, &event_subsystems, list) {
1624 
1625 		/* the ftrace system is special, skip it */
1626 		if (strcmp(system->name, "ftrace") == 0)
1627 			continue;
1628 
1629 		pr_info("Testing event system %s: ", system->name);
1630 
1631 		ret = __ftrace_set_clr_event(NULL, system->name, NULL, 1);
1632 		if (WARN_ON_ONCE(ret)) {
1633 			pr_warning("error enabling system %s\n",
1634 				   system->name);
1635 			continue;
1636 		}
1637 
1638 		event_test_stuff();
1639 
1640 		ret = __ftrace_set_clr_event(NULL, system->name, NULL, 0);
1641 		if (WARN_ON_ONCE(ret))
1642 			pr_warning("error disabling system %s\n",
1643 				   system->name);
1644 
1645 		pr_cont("OK\n");
1646 	}
1647 
1648 	/* Test with all events enabled */
1649 
1650 	pr_info("Running tests on all trace events:\n");
1651 	pr_info("Testing all events: ");
1652 
1653 	ret = __ftrace_set_clr_event(NULL, NULL, NULL, 1);
1654 	if (WARN_ON_ONCE(ret)) {
1655 		pr_warning("error enabling all events\n");
1656 		return;
1657 	}
1658 
1659 	event_test_stuff();
1660 
1661 	/* reset sysname */
1662 	ret = __ftrace_set_clr_event(NULL, NULL, NULL, 0);
1663 	if (WARN_ON_ONCE(ret)) {
1664 		pr_warning("error disabling all events\n");
1665 		return;
1666 	}
1667 
1668 	pr_cont("OK\n");
1669 }
1670 
1671 #ifdef CONFIG_FUNCTION_TRACER
1672 
1673 static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable);
1674 
1675 static void
1676 function_test_events_call(unsigned long ip, unsigned long parent_ip)
1677 {
1678 	struct ring_buffer_event *event;
1679 	struct ring_buffer *buffer;
1680 	struct ftrace_entry *entry;
1681 	unsigned long flags;
1682 	long disabled;
1683 	int cpu;
1684 	int pc;
1685 
1686 	pc = preempt_count();
1687 	preempt_disable_notrace();
1688 	cpu = raw_smp_processor_id();
1689 	disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu));
1690 
1691 	if (disabled != 1)
1692 		goto out;
1693 
1694 	local_save_flags(flags);
1695 
1696 	event = trace_current_buffer_lock_reserve(&buffer,
1697 						  TRACE_FN, sizeof(*entry),
1698 						  flags, pc);
1699 	if (!event)
1700 		goto out;
1701 	entry	= ring_buffer_event_data(event);
1702 	entry->ip			= ip;
1703 	entry->parent_ip		= parent_ip;
1704 
1705 	trace_nowake_buffer_unlock_commit(buffer, event, flags, pc);
1706 
1707  out:
1708 	atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
1709 	preempt_enable_notrace();
1710 }
1711 
1712 static struct ftrace_ops trace_ops __initdata  =
1713 {
1714 	.func = function_test_events_call,
1715 };
1716 
1717 static __init void event_trace_self_test_with_function(void)
1718 {
1719 	int ret;
1720 	ret = register_ftrace_function(&trace_ops);
1721 	if (WARN_ON(ret < 0)) {
1722 		pr_info("Failed to enable function tracer for event tests\n");
1723 		return;
1724 	}
1725 	pr_info("Running tests again, along with the function tracer\n");
1726 	event_trace_self_tests();
1727 	unregister_ftrace_function(&trace_ops);
1728 }
1729 #else
1730 static __init void event_trace_self_test_with_function(void)
1731 {
1732 }
1733 #endif
1734 
1735 static __init int event_trace_self_tests_init(void)
1736 {
1737 	if (!tracing_selftest_disabled) {
1738 		event_trace_self_tests();
1739 		event_trace_self_test_with_function();
1740 	}
1741 
1742 	return 0;
1743 }
1744 
1745 late_initcall(event_trace_self_tests_init);
1746 
1747 #endif
1748