xref: /linux/kernel/trace/trace_events.c (revision c39b9fd728d8173ecda993524089fbc38211a17f)
1 /*
2  * event tracer
3  *
4  * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
5  *
6  *  - Added format output of fields of the trace point.
7  *    This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
8  *
9  */
10 
11 #include <linux/workqueue.h>
12 #include <linux/spinlock.h>
13 #include <linux/kthread.h>
14 #include <linux/debugfs.h>
15 #include <linux/uaccess.h>
16 #include <linux/module.h>
17 #include <linux/ctype.h>
18 #include <linux/slab.h>
19 #include <linux/delay.h>
20 
21 #include <asm/setup.h>
22 
23 #include "trace_output.h"
24 
25 #undef TRACE_SYSTEM
26 #define TRACE_SYSTEM "TRACE_SYSTEM"
27 
28 DEFINE_MUTEX(event_mutex);
29 
30 DEFINE_MUTEX(event_storage_mutex);
31 EXPORT_SYMBOL_GPL(event_storage_mutex);
32 
33 char event_storage[EVENT_STORAGE_SIZE];
34 EXPORT_SYMBOL_GPL(event_storage);
35 
36 LIST_HEAD(ftrace_events);
37 static LIST_HEAD(ftrace_common_fields);
38 
39 #define GFP_TRACE (GFP_KERNEL | __GFP_ZERO)
40 
41 static struct kmem_cache *field_cachep;
42 static struct kmem_cache *file_cachep;
43 
44 /* Double loops, do not use break, only goto's work */
45 #define do_for_each_event_file(tr, file)			\
46 	list_for_each_entry(tr, &ftrace_trace_arrays, list) {	\
47 		list_for_each_entry(file, &tr->events, list)
48 
49 #define do_for_each_event_file_safe(tr, file)			\
50 	list_for_each_entry(tr, &ftrace_trace_arrays, list) {	\
51 		struct ftrace_event_file *___n;				\
52 		list_for_each_entry_safe(file, ___n, &tr->events, list)
53 
54 #define while_for_each_event_file()		\
55 	}
56 
57 static struct list_head *
58 trace_get_fields(struct ftrace_event_call *event_call)
59 {
60 	if (!event_call->class->get_fields)
61 		return &event_call->class->fields;
62 	return event_call->class->get_fields(event_call);
63 }
64 
65 static struct ftrace_event_field *
66 __find_event_field(struct list_head *head, char *name)
67 {
68 	struct ftrace_event_field *field;
69 
70 	list_for_each_entry(field, head, link) {
71 		if (!strcmp(field->name, name))
72 			return field;
73 	}
74 
75 	return NULL;
76 }
77 
78 struct ftrace_event_field *
79 trace_find_event_field(struct ftrace_event_call *call, char *name)
80 {
81 	struct ftrace_event_field *field;
82 	struct list_head *head;
83 
84 	field = __find_event_field(&ftrace_common_fields, name);
85 	if (field)
86 		return field;
87 
88 	head = trace_get_fields(call);
89 	return __find_event_field(head, name);
90 }
91 
92 static int __trace_define_field(struct list_head *head, const char *type,
93 				const char *name, int offset, int size,
94 				int is_signed, int filter_type)
95 {
96 	struct ftrace_event_field *field;
97 
98 	field = kmem_cache_alloc(field_cachep, GFP_TRACE);
99 	if (!field)
100 		goto err;
101 
102 	field->name = name;
103 	field->type = type;
104 
105 	if (filter_type == FILTER_OTHER)
106 		field->filter_type = filter_assign_type(type);
107 	else
108 		field->filter_type = filter_type;
109 
110 	field->offset = offset;
111 	field->size = size;
112 	field->is_signed = is_signed;
113 
114 	list_add(&field->link, head);
115 
116 	return 0;
117 
118 err:
119 	kmem_cache_free(field_cachep, field);
120 
121 	return -ENOMEM;
122 }
123 
124 int trace_define_field(struct ftrace_event_call *call, const char *type,
125 		       const char *name, int offset, int size, int is_signed,
126 		       int filter_type)
127 {
128 	struct list_head *head;
129 
130 	if (WARN_ON(!call->class))
131 		return 0;
132 
133 	head = trace_get_fields(call);
134 	return __trace_define_field(head, type, name, offset, size,
135 				    is_signed, filter_type);
136 }
137 EXPORT_SYMBOL_GPL(trace_define_field);
138 
139 #define __common_field(type, item)					\
140 	ret = __trace_define_field(&ftrace_common_fields, #type,	\
141 				   "common_" #item,			\
142 				   offsetof(typeof(ent), item),		\
143 				   sizeof(ent.item),			\
144 				   is_signed_type(type), FILTER_OTHER);	\
145 	if (ret)							\
146 		return ret;
147 
148 static int trace_define_common_fields(void)
149 {
150 	int ret;
151 	struct trace_entry ent;
152 
153 	__common_field(unsigned short, type);
154 	__common_field(unsigned char, flags);
155 	__common_field(unsigned char, preempt_count);
156 	__common_field(int, pid);
157 
158 	return ret;
159 }
160 
161 static void trace_destroy_fields(struct ftrace_event_call *call)
162 {
163 	struct ftrace_event_field *field, *next;
164 	struct list_head *head;
165 
166 	head = trace_get_fields(call);
167 	list_for_each_entry_safe(field, next, head, link) {
168 		list_del(&field->link);
169 		kmem_cache_free(field_cachep, field);
170 	}
171 }
172 
173 int trace_event_raw_init(struct ftrace_event_call *call)
174 {
175 	int id;
176 
177 	id = register_ftrace_event(&call->event);
178 	if (!id)
179 		return -ENODEV;
180 
181 	return 0;
182 }
183 EXPORT_SYMBOL_GPL(trace_event_raw_init);
184 
185 int ftrace_event_reg(struct ftrace_event_call *call,
186 		     enum trace_reg type, void *data)
187 {
188 	struct ftrace_event_file *file = data;
189 
190 	switch (type) {
191 	case TRACE_REG_REGISTER:
192 		return tracepoint_probe_register(call->name,
193 						 call->class->probe,
194 						 file);
195 	case TRACE_REG_UNREGISTER:
196 		tracepoint_probe_unregister(call->name,
197 					    call->class->probe,
198 					    file);
199 		return 0;
200 
201 #ifdef CONFIG_PERF_EVENTS
202 	case TRACE_REG_PERF_REGISTER:
203 		return tracepoint_probe_register(call->name,
204 						 call->class->perf_probe,
205 						 call);
206 	case TRACE_REG_PERF_UNREGISTER:
207 		tracepoint_probe_unregister(call->name,
208 					    call->class->perf_probe,
209 					    call);
210 		return 0;
211 	case TRACE_REG_PERF_OPEN:
212 	case TRACE_REG_PERF_CLOSE:
213 	case TRACE_REG_PERF_ADD:
214 	case TRACE_REG_PERF_DEL:
215 		return 0;
216 #endif
217 	}
218 	return 0;
219 }
220 EXPORT_SYMBOL_GPL(ftrace_event_reg);
221 
222 void trace_event_enable_cmd_record(bool enable)
223 {
224 	struct ftrace_event_file *file;
225 	struct trace_array *tr;
226 
227 	mutex_lock(&event_mutex);
228 	do_for_each_event_file(tr, file) {
229 
230 		if (!(file->flags & FTRACE_EVENT_FL_ENABLED))
231 			continue;
232 
233 		if (enable) {
234 			tracing_start_cmdline_record();
235 			set_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags);
236 		} else {
237 			tracing_stop_cmdline_record();
238 			clear_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags);
239 		}
240 	} while_for_each_event_file();
241 	mutex_unlock(&event_mutex);
242 }
243 
244 static int __ftrace_event_enable_disable(struct ftrace_event_file *file,
245 					 int enable, int soft_disable)
246 {
247 	struct ftrace_event_call *call = file->event_call;
248 	int ret = 0;
249 	int disable;
250 
251 	switch (enable) {
252 	case 0:
253 		/*
254 		 * When soft_disable is set and enable is cleared, we want
255 		 * to clear the SOFT_DISABLED flag but leave the event in the
256 		 * state that it was. That is, if the event was enabled and
257 		 * SOFT_DISABLED isn't set, then do nothing. But if SOFT_DISABLED
258 		 * is set we do not want the event to be enabled before we
259 		 * clear the bit.
260 		 *
261 		 * When soft_disable is not set but the SOFT_MODE flag is,
262 		 * we do nothing. Do not disable the tracepoint, otherwise
263 		 * "soft enable"s (clearing the SOFT_DISABLED bit) wont work.
264 		 */
265 		if (soft_disable) {
266 			disable = file->flags & FTRACE_EVENT_FL_SOFT_DISABLED;
267 			clear_bit(FTRACE_EVENT_FL_SOFT_MODE_BIT, &file->flags);
268 		} else
269 			disable = !(file->flags & FTRACE_EVENT_FL_SOFT_MODE);
270 
271 		if (disable && (file->flags & FTRACE_EVENT_FL_ENABLED)) {
272 			clear_bit(FTRACE_EVENT_FL_ENABLED_BIT, &file->flags);
273 			if (file->flags & FTRACE_EVENT_FL_RECORDED_CMD) {
274 				tracing_stop_cmdline_record();
275 				clear_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags);
276 			}
277 			call->class->reg(call, TRACE_REG_UNREGISTER, file);
278 		}
279 		/* If in SOFT_MODE, just set the SOFT_DISABLE_BIT */
280 		if (file->flags & FTRACE_EVENT_FL_SOFT_MODE)
281 			set_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags);
282 		break;
283 	case 1:
284 		/*
285 		 * When soft_disable is set and enable is set, we want to
286 		 * register the tracepoint for the event, but leave the event
287 		 * as is. That means, if the event was already enabled, we do
288 		 * nothing (but set SOFT_MODE). If the event is disabled, we
289 		 * set SOFT_DISABLED before enabling the event tracepoint, so
290 		 * it still seems to be disabled.
291 		 */
292 		if (!soft_disable)
293 			clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags);
294 		else
295 			set_bit(FTRACE_EVENT_FL_SOFT_MODE_BIT, &file->flags);
296 
297 		if (!(file->flags & FTRACE_EVENT_FL_ENABLED)) {
298 
299 			/* Keep the event disabled, when going to SOFT_MODE. */
300 			if (soft_disable)
301 				set_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags);
302 
303 			if (trace_flags & TRACE_ITER_RECORD_CMD) {
304 				tracing_start_cmdline_record();
305 				set_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags);
306 			}
307 			ret = call->class->reg(call, TRACE_REG_REGISTER, file);
308 			if (ret) {
309 				tracing_stop_cmdline_record();
310 				pr_info("event trace: Could not enable event "
311 					"%s\n", call->name);
312 				break;
313 			}
314 			set_bit(FTRACE_EVENT_FL_ENABLED_BIT, &file->flags);
315 
316 			/* WAS_ENABLED gets set but never cleared. */
317 			call->flags |= TRACE_EVENT_FL_WAS_ENABLED;
318 		}
319 		break;
320 	}
321 
322 	return ret;
323 }
324 
325 static int ftrace_event_enable_disable(struct ftrace_event_file *file,
326 				       int enable)
327 {
328 	return __ftrace_event_enable_disable(file, enable, 0);
329 }
330 
331 static void ftrace_clear_events(struct trace_array *tr)
332 {
333 	struct ftrace_event_file *file;
334 
335 	mutex_lock(&event_mutex);
336 	list_for_each_entry(file, &tr->events, list) {
337 		ftrace_event_enable_disable(file, 0);
338 	}
339 	mutex_unlock(&event_mutex);
340 }
341 
342 static void __put_system(struct event_subsystem *system)
343 {
344 	struct event_filter *filter = system->filter;
345 
346 	WARN_ON_ONCE(system->ref_count == 0);
347 	if (--system->ref_count)
348 		return;
349 
350 	list_del(&system->list);
351 
352 	if (filter) {
353 		kfree(filter->filter_string);
354 		kfree(filter);
355 	}
356 	kfree(system);
357 }
358 
359 static void __get_system(struct event_subsystem *system)
360 {
361 	WARN_ON_ONCE(system->ref_count == 0);
362 	system->ref_count++;
363 }
364 
365 static void __get_system_dir(struct ftrace_subsystem_dir *dir)
366 {
367 	WARN_ON_ONCE(dir->ref_count == 0);
368 	dir->ref_count++;
369 	__get_system(dir->subsystem);
370 }
371 
372 static void __put_system_dir(struct ftrace_subsystem_dir *dir)
373 {
374 	WARN_ON_ONCE(dir->ref_count == 0);
375 	/* If the subsystem is about to be freed, the dir must be too */
376 	WARN_ON_ONCE(dir->subsystem->ref_count == 1 && dir->ref_count != 1);
377 
378 	__put_system(dir->subsystem);
379 	if (!--dir->ref_count)
380 		kfree(dir);
381 }
382 
383 static void put_system(struct ftrace_subsystem_dir *dir)
384 {
385 	mutex_lock(&event_mutex);
386 	__put_system_dir(dir);
387 	mutex_unlock(&event_mutex);
388 }
389 
390 /*
391  * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events.
392  */
393 static int __ftrace_set_clr_event(struct trace_array *tr, const char *match,
394 				  const char *sub, const char *event, int set)
395 {
396 	struct ftrace_event_file *file;
397 	struct ftrace_event_call *call;
398 	int ret = -EINVAL;
399 
400 	mutex_lock(&event_mutex);
401 	list_for_each_entry(file, &tr->events, list) {
402 
403 		call = file->event_call;
404 
405 		if (!call->name || !call->class || !call->class->reg)
406 			continue;
407 
408 		if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)
409 			continue;
410 
411 		if (match &&
412 		    strcmp(match, call->name) != 0 &&
413 		    strcmp(match, call->class->system) != 0)
414 			continue;
415 
416 		if (sub && strcmp(sub, call->class->system) != 0)
417 			continue;
418 
419 		if (event && strcmp(event, call->name) != 0)
420 			continue;
421 
422 		ftrace_event_enable_disable(file, set);
423 
424 		ret = 0;
425 	}
426 	mutex_unlock(&event_mutex);
427 
428 	return ret;
429 }
430 
431 static int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set)
432 {
433 	char *event = NULL, *sub = NULL, *match;
434 
435 	/*
436 	 * The buf format can be <subsystem>:<event-name>
437 	 *  *:<event-name> means any event by that name.
438 	 *  :<event-name> is the same.
439 	 *
440 	 *  <subsystem>:* means all events in that subsystem
441 	 *  <subsystem>: means the same.
442 	 *
443 	 *  <name> (no ':') means all events in a subsystem with
444 	 *  the name <name> or any event that matches <name>
445 	 */
446 
447 	match = strsep(&buf, ":");
448 	if (buf) {
449 		sub = match;
450 		event = buf;
451 		match = NULL;
452 
453 		if (!strlen(sub) || strcmp(sub, "*") == 0)
454 			sub = NULL;
455 		if (!strlen(event) || strcmp(event, "*") == 0)
456 			event = NULL;
457 	}
458 
459 	return __ftrace_set_clr_event(tr, match, sub, event, set);
460 }
461 
462 /**
463  * trace_set_clr_event - enable or disable an event
464  * @system: system name to match (NULL for any system)
465  * @event: event name to match (NULL for all events, within system)
466  * @set: 1 to enable, 0 to disable
467  *
468  * This is a way for other parts of the kernel to enable or disable
469  * event recording.
470  *
471  * Returns 0 on success, -EINVAL if the parameters do not match any
472  * registered events.
473  */
474 int trace_set_clr_event(const char *system, const char *event, int set)
475 {
476 	struct trace_array *tr = top_trace_array();
477 
478 	return __ftrace_set_clr_event(tr, NULL, system, event, set);
479 }
480 EXPORT_SYMBOL_GPL(trace_set_clr_event);
481 
482 /* 128 should be much more than enough */
483 #define EVENT_BUF_SIZE		127
484 
485 static ssize_t
486 ftrace_event_write(struct file *file, const char __user *ubuf,
487 		   size_t cnt, loff_t *ppos)
488 {
489 	struct trace_parser parser;
490 	struct seq_file *m = file->private_data;
491 	struct trace_array *tr = m->private;
492 	ssize_t read, ret;
493 
494 	if (!cnt)
495 		return 0;
496 
497 	ret = tracing_update_buffers();
498 	if (ret < 0)
499 		return ret;
500 
501 	if (trace_parser_get_init(&parser, EVENT_BUF_SIZE + 1))
502 		return -ENOMEM;
503 
504 	read = trace_get_user(&parser, ubuf, cnt, ppos);
505 
506 	if (read >= 0 && trace_parser_loaded((&parser))) {
507 		int set = 1;
508 
509 		if (*parser.buffer == '!')
510 			set = 0;
511 
512 		parser.buffer[parser.idx] = 0;
513 
514 		ret = ftrace_set_clr_event(tr, parser.buffer + !set, set);
515 		if (ret)
516 			goto out_put;
517 	}
518 
519 	ret = read;
520 
521  out_put:
522 	trace_parser_put(&parser);
523 
524 	return ret;
525 }
526 
527 static void *
528 t_next(struct seq_file *m, void *v, loff_t *pos)
529 {
530 	struct ftrace_event_file *file = v;
531 	struct ftrace_event_call *call;
532 	struct trace_array *tr = m->private;
533 
534 	(*pos)++;
535 
536 	list_for_each_entry_continue(file, &tr->events, list) {
537 		call = file->event_call;
538 		/*
539 		 * The ftrace subsystem is for showing formats only.
540 		 * They can not be enabled or disabled via the event files.
541 		 */
542 		if (call->class && call->class->reg)
543 			return file;
544 	}
545 
546 	return NULL;
547 }
548 
549 static void *t_start(struct seq_file *m, loff_t *pos)
550 {
551 	struct ftrace_event_file *file;
552 	struct trace_array *tr = m->private;
553 	loff_t l;
554 
555 	mutex_lock(&event_mutex);
556 
557 	file = list_entry(&tr->events, struct ftrace_event_file, list);
558 	for (l = 0; l <= *pos; ) {
559 		file = t_next(m, file, &l);
560 		if (!file)
561 			break;
562 	}
563 	return file;
564 }
565 
566 static void *
567 s_next(struct seq_file *m, void *v, loff_t *pos)
568 {
569 	struct ftrace_event_file *file = v;
570 	struct trace_array *tr = m->private;
571 
572 	(*pos)++;
573 
574 	list_for_each_entry_continue(file, &tr->events, list) {
575 		if (file->flags & FTRACE_EVENT_FL_ENABLED)
576 			return file;
577 	}
578 
579 	return NULL;
580 }
581 
582 static void *s_start(struct seq_file *m, loff_t *pos)
583 {
584 	struct ftrace_event_file *file;
585 	struct trace_array *tr = m->private;
586 	loff_t l;
587 
588 	mutex_lock(&event_mutex);
589 
590 	file = list_entry(&tr->events, struct ftrace_event_file, list);
591 	for (l = 0; l <= *pos; ) {
592 		file = s_next(m, file, &l);
593 		if (!file)
594 			break;
595 	}
596 	return file;
597 }
598 
599 static int t_show(struct seq_file *m, void *v)
600 {
601 	struct ftrace_event_file *file = v;
602 	struct ftrace_event_call *call = file->event_call;
603 
604 	if (strcmp(call->class->system, TRACE_SYSTEM) != 0)
605 		seq_printf(m, "%s:", call->class->system);
606 	seq_printf(m, "%s\n", call->name);
607 
608 	return 0;
609 }
610 
611 static void t_stop(struct seq_file *m, void *p)
612 {
613 	mutex_unlock(&event_mutex);
614 }
615 
616 static ssize_t
617 event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
618 		  loff_t *ppos)
619 {
620 	struct ftrace_event_file *file = filp->private_data;
621 	char *buf;
622 
623 	if (file->flags & FTRACE_EVENT_FL_ENABLED) {
624 		if (file->flags & FTRACE_EVENT_FL_SOFT_DISABLED)
625 			buf = "0*\n";
626 		else
627 			buf = "1\n";
628 	} else
629 		buf = "0\n";
630 
631 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, strlen(buf));
632 }
633 
634 static ssize_t
635 event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
636 		   loff_t *ppos)
637 {
638 	struct ftrace_event_file *file = filp->private_data;
639 	unsigned long val;
640 	int ret;
641 
642 	if (!file)
643 		return -EINVAL;
644 
645 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
646 	if (ret)
647 		return ret;
648 
649 	ret = tracing_update_buffers();
650 	if (ret < 0)
651 		return ret;
652 
653 	switch (val) {
654 	case 0:
655 	case 1:
656 		mutex_lock(&event_mutex);
657 		ret = ftrace_event_enable_disable(file, val);
658 		mutex_unlock(&event_mutex);
659 		break;
660 
661 	default:
662 		return -EINVAL;
663 	}
664 
665 	*ppos += cnt;
666 
667 	return ret ? ret : cnt;
668 }
669 
670 static ssize_t
671 system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
672 		   loff_t *ppos)
673 {
674 	const char set_to_char[4] = { '?', '0', '1', 'X' };
675 	struct ftrace_subsystem_dir *dir = filp->private_data;
676 	struct event_subsystem *system = dir->subsystem;
677 	struct ftrace_event_call *call;
678 	struct ftrace_event_file *file;
679 	struct trace_array *tr = dir->tr;
680 	char buf[2];
681 	int set = 0;
682 	int ret;
683 
684 	mutex_lock(&event_mutex);
685 	list_for_each_entry(file, &tr->events, list) {
686 		call = file->event_call;
687 		if (!call->name || !call->class || !call->class->reg)
688 			continue;
689 
690 		if (system && strcmp(call->class->system, system->name) != 0)
691 			continue;
692 
693 		/*
694 		 * We need to find out if all the events are set
695 		 * or if all events or cleared, or if we have
696 		 * a mixture.
697 		 */
698 		set |= (1 << !!(file->flags & FTRACE_EVENT_FL_ENABLED));
699 
700 		/*
701 		 * If we have a mixture, no need to look further.
702 		 */
703 		if (set == 3)
704 			break;
705 	}
706 	mutex_unlock(&event_mutex);
707 
708 	buf[0] = set_to_char[set];
709 	buf[1] = '\n';
710 
711 	ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
712 
713 	return ret;
714 }
715 
716 static ssize_t
717 system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
718 		    loff_t *ppos)
719 {
720 	struct ftrace_subsystem_dir *dir = filp->private_data;
721 	struct event_subsystem *system = dir->subsystem;
722 	const char *name = NULL;
723 	unsigned long val;
724 	ssize_t ret;
725 
726 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
727 	if (ret)
728 		return ret;
729 
730 	ret = tracing_update_buffers();
731 	if (ret < 0)
732 		return ret;
733 
734 	if (val != 0 && val != 1)
735 		return -EINVAL;
736 
737 	/*
738 	 * Opening of "enable" adds a ref count to system,
739 	 * so the name is safe to use.
740 	 */
741 	if (system)
742 		name = system->name;
743 
744 	ret = __ftrace_set_clr_event(dir->tr, NULL, name, NULL, val);
745 	if (ret)
746 		goto out;
747 
748 	ret = cnt;
749 
750 out:
751 	*ppos += cnt;
752 
753 	return ret;
754 }
755 
756 enum {
757 	FORMAT_HEADER		= 1,
758 	FORMAT_FIELD_SEPERATOR	= 2,
759 	FORMAT_PRINTFMT		= 3,
760 };
761 
762 static void *f_next(struct seq_file *m, void *v, loff_t *pos)
763 {
764 	struct ftrace_event_call *call = m->private;
765 	struct ftrace_event_field *field;
766 	struct list_head *common_head = &ftrace_common_fields;
767 	struct list_head *head = trace_get_fields(call);
768 
769 	(*pos)++;
770 
771 	switch ((unsigned long)v) {
772 	case FORMAT_HEADER:
773 		if (unlikely(list_empty(common_head)))
774 			return NULL;
775 
776 		field = list_entry(common_head->prev,
777 				   struct ftrace_event_field, link);
778 		return field;
779 
780 	case FORMAT_FIELD_SEPERATOR:
781 		if (unlikely(list_empty(head)))
782 			return NULL;
783 
784 		field = list_entry(head->prev, struct ftrace_event_field, link);
785 		return field;
786 
787 	case FORMAT_PRINTFMT:
788 		/* all done */
789 		return NULL;
790 	}
791 
792 	field = v;
793 	if (field->link.prev == common_head)
794 		return (void *)FORMAT_FIELD_SEPERATOR;
795 	else if (field->link.prev == head)
796 		return (void *)FORMAT_PRINTFMT;
797 
798 	field = list_entry(field->link.prev, struct ftrace_event_field, link);
799 
800 	return field;
801 }
802 
803 static void *f_start(struct seq_file *m, loff_t *pos)
804 {
805 	loff_t l = 0;
806 	void *p;
807 
808 	/* Start by showing the header */
809 	if (!*pos)
810 		return (void *)FORMAT_HEADER;
811 
812 	p = (void *)FORMAT_HEADER;
813 	do {
814 		p = f_next(m, p, &l);
815 	} while (p && l < *pos);
816 
817 	return p;
818 }
819 
820 static int f_show(struct seq_file *m, void *v)
821 {
822 	struct ftrace_event_call *call = m->private;
823 	struct ftrace_event_field *field;
824 	const char *array_descriptor;
825 
826 	switch ((unsigned long)v) {
827 	case FORMAT_HEADER:
828 		seq_printf(m, "name: %s\n", call->name);
829 		seq_printf(m, "ID: %d\n", call->event.type);
830 		seq_printf(m, "format:\n");
831 		return 0;
832 
833 	case FORMAT_FIELD_SEPERATOR:
834 		seq_putc(m, '\n');
835 		return 0;
836 
837 	case FORMAT_PRINTFMT:
838 		seq_printf(m, "\nprint fmt: %s\n",
839 			   call->print_fmt);
840 		return 0;
841 	}
842 
843 	field = v;
844 
845 	/*
846 	 * Smartly shows the array type(except dynamic array).
847 	 * Normal:
848 	 *	field:TYPE VAR
849 	 * If TYPE := TYPE[LEN], it is shown:
850 	 *	field:TYPE VAR[LEN]
851 	 */
852 	array_descriptor = strchr(field->type, '[');
853 
854 	if (!strncmp(field->type, "__data_loc", 10))
855 		array_descriptor = NULL;
856 
857 	if (!array_descriptor)
858 		seq_printf(m, "\tfield:%s %s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
859 			   field->type, field->name, field->offset,
860 			   field->size, !!field->is_signed);
861 	else
862 		seq_printf(m, "\tfield:%.*s %s%s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
863 			   (int)(array_descriptor - field->type),
864 			   field->type, field->name,
865 			   array_descriptor, field->offset,
866 			   field->size, !!field->is_signed);
867 
868 	return 0;
869 }
870 
871 static void f_stop(struct seq_file *m, void *p)
872 {
873 }
874 
875 static const struct seq_operations trace_format_seq_ops = {
876 	.start		= f_start,
877 	.next		= f_next,
878 	.stop		= f_stop,
879 	.show		= f_show,
880 };
881 
882 static int trace_format_open(struct inode *inode, struct file *file)
883 {
884 	struct ftrace_event_call *call = inode->i_private;
885 	struct seq_file *m;
886 	int ret;
887 
888 	ret = seq_open(file, &trace_format_seq_ops);
889 	if (ret < 0)
890 		return ret;
891 
892 	m = file->private_data;
893 	m->private = call;
894 
895 	return 0;
896 }
897 
898 static ssize_t
899 event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
900 {
901 	struct ftrace_event_call *call = filp->private_data;
902 	struct trace_seq *s;
903 	int r;
904 
905 	if (*ppos)
906 		return 0;
907 
908 	s = kmalloc(sizeof(*s), GFP_KERNEL);
909 	if (!s)
910 		return -ENOMEM;
911 
912 	trace_seq_init(s);
913 	trace_seq_printf(s, "%d\n", call->event.type);
914 
915 	r = simple_read_from_buffer(ubuf, cnt, ppos,
916 				    s->buffer, s->len);
917 	kfree(s);
918 	return r;
919 }
920 
921 static ssize_t
922 event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
923 		  loff_t *ppos)
924 {
925 	struct ftrace_event_call *call = filp->private_data;
926 	struct trace_seq *s;
927 	int r;
928 
929 	if (*ppos)
930 		return 0;
931 
932 	s = kmalloc(sizeof(*s), GFP_KERNEL);
933 	if (!s)
934 		return -ENOMEM;
935 
936 	trace_seq_init(s);
937 
938 	print_event_filter(call, s);
939 	r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
940 
941 	kfree(s);
942 
943 	return r;
944 }
945 
946 static ssize_t
947 event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
948 		   loff_t *ppos)
949 {
950 	struct ftrace_event_call *call = filp->private_data;
951 	char *buf;
952 	int err;
953 
954 	if (cnt >= PAGE_SIZE)
955 		return -EINVAL;
956 
957 	buf = (char *)__get_free_page(GFP_TEMPORARY);
958 	if (!buf)
959 		return -ENOMEM;
960 
961 	if (copy_from_user(buf, ubuf, cnt)) {
962 		free_page((unsigned long) buf);
963 		return -EFAULT;
964 	}
965 	buf[cnt] = '\0';
966 
967 	err = apply_event_filter(call, buf);
968 	free_page((unsigned long) buf);
969 	if (err < 0)
970 		return err;
971 
972 	*ppos += cnt;
973 
974 	return cnt;
975 }
976 
977 static LIST_HEAD(event_subsystems);
978 
979 static int subsystem_open(struct inode *inode, struct file *filp)
980 {
981 	struct event_subsystem *system = NULL;
982 	struct ftrace_subsystem_dir *dir = NULL; /* Initialize for gcc */
983 	struct trace_array *tr;
984 	int ret;
985 
986 	/* Make sure the system still exists */
987 	mutex_lock(&event_mutex);
988 	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
989 		list_for_each_entry(dir, &tr->systems, list) {
990 			if (dir == inode->i_private) {
991 				/* Don't open systems with no events */
992 				if (dir->nr_events) {
993 					__get_system_dir(dir);
994 					system = dir->subsystem;
995 				}
996 				goto exit_loop;
997 			}
998 		}
999 	}
1000  exit_loop:
1001 	mutex_unlock(&event_mutex);
1002 
1003 	if (!system)
1004 		return -ENODEV;
1005 
1006 	/* Some versions of gcc think dir can be uninitialized here */
1007 	WARN_ON(!dir);
1008 
1009 	ret = tracing_open_generic(inode, filp);
1010 	if (ret < 0)
1011 		put_system(dir);
1012 
1013 	return ret;
1014 }
1015 
1016 static int system_tr_open(struct inode *inode, struct file *filp)
1017 {
1018 	struct ftrace_subsystem_dir *dir;
1019 	struct trace_array *tr = inode->i_private;
1020 	int ret;
1021 
1022 	/* Make a temporary dir that has no system but points to tr */
1023 	dir = kzalloc(sizeof(*dir), GFP_KERNEL);
1024 	if (!dir)
1025 		return -ENOMEM;
1026 
1027 	dir->tr = tr;
1028 
1029 	ret = tracing_open_generic(inode, filp);
1030 	if (ret < 0)
1031 		kfree(dir);
1032 
1033 	filp->private_data = dir;
1034 
1035 	return ret;
1036 }
1037 
1038 static int subsystem_release(struct inode *inode, struct file *file)
1039 {
1040 	struct ftrace_subsystem_dir *dir = file->private_data;
1041 
1042 	/*
1043 	 * If dir->subsystem is NULL, then this is a temporary
1044 	 * descriptor that was made for a trace_array to enable
1045 	 * all subsystems.
1046 	 */
1047 	if (dir->subsystem)
1048 		put_system(dir);
1049 	else
1050 		kfree(dir);
1051 
1052 	return 0;
1053 }
1054 
1055 static ssize_t
1056 subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
1057 		      loff_t *ppos)
1058 {
1059 	struct ftrace_subsystem_dir *dir = filp->private_data;
1060 	struct event_subsystem *system = dir->subsystem;
1061 	struct trace_seq *s;
1062 	int r;
1063 
1064 	if (*ppos)
1065 		return 0;
1066 
1067 	s = kmalloc(sizeof(*s), GFP_KERNEL);
1068 	if (!s)
1069 		return -ENOMEM;
1070 
1071 	trace_seq_init(s);
1072 
1073 	print_subsystem_event_filter(system, s);
1074 	r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
1075 
1076 	kfree(s);
1077 
1078 	return r;
1079 }
1080 
1081 static ssize_t
1082 subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
1083 		       loff_t *ppos)
1084 {
1085 	struct ftrace_subsystem_dir *dir = filp->private_data;
1086 	char *buf;
1087 	int err;
1088 
1089 	if (cnt >= PAGE_SIZE)
1090 		return -EINVAL;
1091 
1092 	buf = (char *)__get_free_page(GFP_TEMPORARY);
1093 	if (!buf)
1094 		return -ENOMEM;
1095 
1096 	if (copy_from_user(buf, ubuf, cnt)) {
1097 		free_page((unsigned long) buf);
1098 		return -EFAULT;
1099 	}
1100 	buf[cnt] = '\0';
1101 
1102 	err = apply_subsystem_event_filter(dir, buf);
1103 	free_page((unsigned long) buf);
1104 	if (err < 0)
1105 		return err;
1106 
1107 	*ppos += cnt;
1108 
1109 	return cnt;
1110 }
1111 
1112 static ssize_t
1113 show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
1114 {
1115 	int (*func)(struct trace_seq *s) = filp->private_data;
1116 	struct trace_seq *s;
1117 	int r;
1118 
1119 	if (*ppos)
1120 		return 0;
1121 
1122 	s = kmalloc(sizeof(*s), GFP_KERNEL);
1123 	if (!s)
1124 		return -ENOMEM;
1125 
1126 	trace_seq_init(s);
1127 
1128 	func(s);
1129 	r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
1130 
1131 	kfree(s);
1132 
1133 	return r;
1134 }
1135 
1136 static int ftrace_event_avail_open(struct inode *inode, struct file *file);
1137 static int ftrace_event_set_open(struct inode *inode, struct file *file);
1138 
1139 static const struct seq_operations show_event_seq_ops = {
1140 	.start = t_start,
1141 	.next = t_next,
1142 	.show = t_show,
1143 	.stop = t_stop,
1144 };
1145 
1146 static const struct seq_operations show_set_event_seq_ops = {
1147 	.start = s_start,
1148 	.next = s_next,
1149 	.show = t_show,
1150 	.stop = t_stop,
1151 };
1152 
1153 static const struct file_operations ftrace_avail_fops = {
1154 	.open = ftrace_event_avail_open,
1155 	.read = seq_read,
1156 	.llseek = seq_lseek,
1157 	.release = seq_release,
1158 };
1159 
1160 static const struct file_operations ftrace_set_event_fops = {
1161 	.open = ftrace_event_set_open,
1162 	.read = seq_read,
1163 	.write = ftrace_event_write,
1164 	.llseek = seq_lseek,
1165 	.release = seq_release,
1166 };
1167 
1168 static const struct file_operations ftrace_enable_fops = {
1169 	.open = tracing_open_generic,
1170 	.read = event_enable_read,
1171 	.write = event_enable_write,
1172 	.llseek = default_llseek,
1173 };
1174 
1175 static const struct file_operations ftrace_event_format_fops = {
1176 	.open = trace_format_open,
1177 	.read = seq_read,
1178 	.llseek = seq_lseek,
1179 	.release = seq_release,
1180 };
1181 
1182 static const struct file_operations ftrace_event_id_fops = {
1183 	.open = tracing_open_generic,
1184 	.read = event_id_read,
1185 	.llseek = default_llseek,
1186 };
1187 
1188 static const struct file_operations ftrace_event_filter_fops = {
1189 	.open = tracing_open_generic,
1190 	.read = event_filter_read,
1191 	.write = event_filter_write,
1192 	.llseek = default_llseek,
1193 };
1194 
1195 static const struct file_operations ftrace_subsystem_filter_fops = {
1196 	.open = subsystem_open,
1197 	.read = subsystem_filter_read,
1198 	.write = subsystem_filter_write,
1199 	.llseek = default_llseek,
1200 	.release = subsystem_release,
1201 };
1202 
1203 static const struct file_operations ftrace_system_enable_fops = {
1204 	.open = subsystem_open,
1205 	.read = system_enable_read,
1206 	.write = system_enable_write,
1207 	.llseek = default_llseek,
1208 	.release = subsystem_release,
1209 };
1210 
1211 static const struct file_operations ftrace_tr_enable_fops = {
1212 	.open = system_tr_open,
1213 	.read = system_enable_read,
1214 	.write = system_enable_write,
1215 	.llseek = default_llseek,
1216 	.release = subsystem_release,
1217 };
1218 
1219 static const struct file_operations ftrace_show_header_fops = {
1220 	.open = tracing_open_generic,
1221 	.read = show_header,
1222 	.llseek = default_llseek,
1223 };
1224 
1225 static int
1226 ftrace_event_open(struct inode *inode, struct file *file,
1227 		  const struct seq_operations *seq_ops)
1228 {
1229 	struct seq_file *m;
1230 	int ret;
1231 
1232 	ret = seq_open(file, seq_ops);
1233 	if (ret < 0)
1234 		return ret;
1235 	m = file->private_data;
1236 	/* copy tr over to seq ops */
1237 	m->private = inode->i_private;
1238 
1239 	return ret;
1240 }
1241 
1242 static int
1243 ftrace_event_avail_open(struct inode *inode, struct file *file)
1244 {
1245 	const struct seq_operations *seq_ops = &show_event_seq_ops;
1246 
1247 	return ftrace_event_open(inode, file, seq_ops);
1248 }
1249 
1250 static int
1251 ftrace_event_set_open(struct inode *inode, struct file *file)
1252 {
1253 	const struct seq_operations *seq_ops = &show_set_event_seq_ops;
1254 	struct trace_array *tr = inode->i_private;
1255 
1256 	if ((file->f_mode & FMODE_WRITE) &&
1257 	    (file->f_flags & O_TRUNC))
1258 		ftrace_clear_events(tr);
1259 
1260 	return ftrace_event_open(inode, file, seq_ops);
1261 }
1262 
1263 static struct event_subsystem *
1264 create_new_subsystem(const char *name)
1265 {
1266 	struct event_subsystem *system;
1267 
1268 	/* need to create new entry */
1269 	system = kmalloc(sizeof(*system), GFP_KERNEL);
1270 	if (!system)
1271 		return NULL;
1272 
1273 	system->ref_count = 1;
1274 	system->name = name;
1275 
1276 	system->filter = NULL;
1277 
1278 	system->filter = kzalloc(sizeof(struct event_filter), GFP_KERNEL);
1279 	if (!system->filter)
1280 		goto out_free;
1281 
1282 	list_add(&system->list, &event_subsystems);
1283 
1284 	return system;
1285 
1286  out_free:
1287 	kfree(system);
1288 	return NULL;
1289 }
1290 
1291 static struct dentry *
1292 event_subsystem_dir(struct trace_array *tr, const char *name,
1293 		    struct ftrace_event_file *file, struct dentry *parent)
1294 {
1295 	struct ftrace_subsystem_dir *dir;
1296 	struct event_subsystem *system;
1297 	struct dentry *entry;
1298 
1299 	/* First see if we did not already create this dir */
1300 	list_for_each_entry(dir, &tr->systems, list) {
1301 		system = dir->subsystem;
1302 		if (strcmp(system->name, name) == 0) {
1303 			dir->nr_events++;
1304 			file->system = dir;
1305 			return dir->entry;
1306 		}
1307 	}
1308 
1309 	/* Now see if the system itself exists. */
1310 	list_for_each_entry(system, &event_subsystems, list) {
1311 		if (strcmp(system->name, name) == 0)
1312 			break;
1313 	}
1314 	/* Reset system variable when not found */
1315 	if (&system->list == &event_subsystems)
1316 		system = NULL;
1317 
1318 	dir = kmalloc(sizeof(*dir), GFP_KERNEL);
1319 	if (!dir)
1320 		goto out_fail;
1321 
1322 	if (!system) {
1323 		system = create_new_subsystem(name);
1324 		if (!system)
1325 			goto out_free;
1326 	} else
1327 		__get_system(system);
1328 
1329 	dir->entry = debugfs_create_dir(name, parent);
1330 	if (!dir->entry) {
1331 		pr_warning("Failed to create system directory %s\n", name);
1332 		__put_system(system);
1333 		goto out_free;
1334 	}
1335 
1336 	dir->tr = tr;
1337 	dir->ref_count = 1;
1338 	dir->nr_events = 1;
1339 	dir->subsystem = system;
1340 	file->system = dir;
1341 
1342 	entry = debugfs_create_file("filter", 0644, dir->entry, dir,
1343 				    &ftrace_subsystem_filter_fops);
1344 	if (!entry) {
1345 		kfree(system->filter);
1346 		system->filter = NULL;
1347 		pr_warning("Could not create debugfs '%s/filter' entry\n", name);
1348 	}
1349 
1350 	trace_create_file("enable", 0644, dir->entry, dir,
1351 			  &ftrace_system_enable_fops);
1352 
1353 	list_add(&dir->list, &tr->systems);
1354 
1355 	return dir->entry;
1356 
1357  out_free:
1358 	kfree(dir);
1359  out_fail:
1360 	/* Only print this message if failed on memory allocation */
1361 	if (!dir || !system)
1362 		pr_warning("No memory to create event subsystem %s\n",
1363 			   name);
1364 	return NULL;
1365 }
1366 
1367 static int
1368 event_create_dir(struct dentry *parent,
1369 		 struct ftrace_event_file *file,
1370 		 const struct file_operations *id,
1371 		 const struct file_operations *enable,
1372 		 const struct file_operations *filter,
1373 		 const struct file_operations *format)
1374 {
1375 	struct ftrace_event_call *call = file->event_call;
1376 	struct trace_array *tr = file->tr;
1377 	struct list_head *head;
1378 	struct dentry *d_events;
1379 	int ret;
1380 
1381 	/*
1382 	 * If the trace point header did not define TRACE_SYSTEM
1383 	 * then the system would be called "TRACE_SYSTEM".
1384 	 */
1385 	if (strcmp(call->class->system, TRACE_SYSTEM) != 0) {
1386 		d_events = event_subsystem_dir(tr, call->class->system, file, parent);
1387 		if (!d_events)
1388 			return -ENOMEM;
1389 	} else
1390 		d_events = parent;
1391 
1392 	file->dir = debugfs_create_dir(call->name, d_events);
1393 	if (!file->dir) {
1394 		pr_warning("Could not create debugfs '%s' directory\n",
1395 			   call->name);
1396 		return -1;
1397 	}
1398 
1399 	if (call->class->reg && !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
1400 		trace_create_file("enable", 0644, file->dir, file,
1401 				  enable);
1402 
1403 #ifdef CONFIG_PERF_EVENTS
1404 	if (call->event.type && call->class->reg)
1405 		trace_create_file("id", 0444, file->dir, call,
1406 		 		  id);
1407 #endif
1408 
1409 	/*
1410 	 * Other events may have the same class. Only update
1411 	 * the fields if they are not already defined.
1412 	 */
1413 	head = trace_get_fields(call);
1414 	if (list_empty(head)) {
1415 		ret = call->class->define_fields(call);
1416 		if (ret < 0) {
1417 			pr_warning("Could not initialize trace point"
1418 				   " events/%s\n", call->name);
1419 			return -1;
1420 		}
1421 	}
1422 	trace_create_file("filter", 0644, file->dir, call,
1423 			  filter);
1424 
1425 	trace_create_file("format", 0444, file->dir, call,
1426 			  format);
1427 
1428 	return 0;
1429 }
1430 
1431 static void remove_subsystem(struct ftrace_subsystem_dir *dir)
1432 {
1433 	if (!dir)
1434 		return;
1435 
1436 	if (!--dir->nr_events) {
1437 		debugfs_remove_recursive(dir->entry);
1438 		list_del(&dir->list);
1439 		__put_system_dir(dir);
1440 	}
1441 }
1442 
1443 static void remove_event_from_tracers(struct ftrace_event_call *call)
1444 {
1445 	struct ftrace_event_file *file;
1446 	struct trace_array *tr;
1447 
1448 	do_for_each_event_file_safe(tr, file) {
1449 
1450 		if (file->event_call != call)
1451 			continue;
1452 
1453 		list_del(&file->list);
1454 		debugfs_remove_recursive(file->dir);
1455 		remove_subsystem(file->system);
1456 		kmem_cache_free(file_cachep, file);
1457 
1458 		/*
1459 		 * The do_for_each_event_file_safe() is
1460 		 * a double loop. After finding the call for this
1461 		 * trace_array, we use break to jump to the next
1462 		 * trace_array.
1463 		 */
1464 		break;
1465 	} while_for_each_event_file();
1466 }
1467 
1468 static void event_remove(struct ftrace_event_call *call)
1469 {
1470 	struct trace_array *tr;
1471 	struct ftrace_event_file *file;
1472 
1473 	do_for_each_event_file(tr, file) {
1474 		if (file->event_call != call)
1475 			continue;
1476 		ftrace_event_enable_disable(file, 0);
1477 		/*
1478 		 * The do_for_each_event_file() is
1479 		 * a double loop. After finding the call for this
1480 		 * trace_array, we use break to jump to the next
1481 		 * trace_array.
1482 		 */
1483 		break;
1484 	} while_for_each_event_file();
1485 
1486 	if (call->event.funcs)
1487 		__unregister_ftrace_event(&call->event);
1488 	remove_event_from_tracers(call);
1489 	list_del(&call->list);
1490 }
1491 
1492 static int event_init(struct ftrace_event_call *call)
1493 {
1494 	int ret = 0;
1495 
1496 	if (WARN_ON(!call->name))
1497 		return -EINVAL;
1498 
1499 	if (call->class->raw_init) {
1500 		ret = call->class->raw_init(call);
1501 		if (ret < 0 && ret != -ENOSYS)
1502 			pr_warn("Could not initialize trace events/%s\n",
1503 				call->name);
1504 	}
1505 
1506 	return ret;
1507 }
1508 
1509 static int
1510 __register_event(struct ftrace_event_call *call, struct module *mod)
1511 {
1512 	int ret;
1513 
1514 	ret = event_init(call);
1515 	if (ret < 0)
1516 		return ret;
1517 
1518 	list_add(&call->list, &ftrace_events);
1519 	call->mod = mod;
1520 
1521 	return 0;
1522 }
1523 
1524 /* Add an event to a trace directory */
1525 static int
1526 __trace_add_new_event(struct ftrace_event_call *call,
1527 		      struct trace_array *tr,
1528 		      const struct file_operations *id,
1529 		      const struct file_operations *enable,
1530 		      const struct file_operations *filter,
1531 		      const struct file_operations *format)
1532 {
1533 	struct ftrace_event_file *file;
1534 
1535 	file = kmem_cache_alloc(file_cachep, GFP_TRACE);
1536 	if (!file)
1537 		return -ENOMEM;
1538 
1539 	file->event_call = call;
1540 	file->tr = tr;
1541 	list_add(&file->list, &tr->events);
1542 
1543 	return event_create_dir(tr->event_dir, file, id, enable, filter, format);
1544 }
1545 
1546 /*
1547  * Just create a decriptor for early init. A descriptor is required
1548  * for enabling events at boot. We want to enable events before
1549  * the filesystem is initialized.
1550  */
1551 static __init int
1552 __trace_early_add_new_event(struct ftrace_event_call *call,
1553 			    struct trace_array *tr)
1554 {
1555 	struct ftrace_event_file *file;
1556 
1557 	file = kmem_cache_alloc(file_cachep, GFP_TRACE);
1558 	if (!file)
1559 		return -ENOMEM;
1560 
1561 	file->event_call = call;
1562 	file->tr = tr;
1563 	list_add(&file->list, &tr->events);
1564 
1565 	return 0;
1566 }
1567 
1568 struct ftrace_module_file_ops;
1569 static void __add_event_to_tracers(struct ftrace_event_call *call,
1570 				   struct ftrace_module_file_ops *file_ops);
1571 
1572 /* Add an additional event_call dynamically */
1573 int trace_add_event_call(struct ftrace_event_call *call)
1574 {
1575 	int ret;
1576 	mutex_lock(&event_mutex);
1577 
1578 	ret = __register_event(call, NULL);
1579 	if (ret >= 0)
1580 		__add_event_to_tracers(call, NULL);
1581 
1582 	mutex_unlock(&event_mutex);
1583 	return ret;
1584 }
1585 
1586 /*
1587  * Must be called under locking both of event_mutex and trace_event_sem.
1588  */
1589 static void __trace_remove_event_call(struct ftrace_event_call *call)
1590 {
1591 	event_remove(call);
1592 	trace_destroy_fields(call);
1593 	destroy_preds(call);
1594 }
1595 
1596 /* Remove an event_call */
1597 void trace_remove_event_call(struct ftrace_event_call *call)
1598 {
1599 	mutex_lock(&event_mutex);
1600 	down_write(&trace_event_sem);
1601 	__trace_remove_event_call(call);
1602 	up_write(&trace_event_sem);
1603 	mutex_unlock(&event_mutex);
1604 }
1605 
1606 #define for_each_event(event, start, end)			\
1607 	for (event = start;					\
1608 	     (unsigned long)event < (unsigned long)end;		\
1609 	     event++)
1610 
1611 #ifdef CONFIG_MODULES
1612 
1613 static LIST_HEAD(ftrace_module_file_list);
1614 
1615 /*
1616  * Modules must own their file_operations to keep up with
1617  * reference counting.
1618  */
1619 struct ftrace_module_file_ops {
1620 	struct list_head		list;
1621 	struct module			*mod;
1622 	struct file_operations		id;
1623 	struct file_operations		enable;
1624 	struct file_operations		format;
1625 	struct file_operations		filter;
1626 };
1627 
1628 static struct ftrace_module_file_ops *
1629 find_ftrace_file_ops(struct ftrace_module_file_ops *file_ops, struct module *mod)
1630 {
1631 	/*
1632 	 * As event_calls are added in groups by module,
1633 	 * when we find one file_ops, we don't need to search for
1634 	 * each call in that module, as the rest should be the
1635 	 * same. Only search for a new one if the last one did
1636 	 * not match.
1637 	 */
1638 	if (file_ops && mod == file_ops->mod)
1639 		return file_ops;
1640 
1641 	list_for_each_entry(file_ops, &ftrace_module_file_list, list) {
1642 		if (file_ops->mod == mod)
1643 			return file_ops;
1644 	}
1645 	return NULL;
1646 }
1647 
1648 static struct ftrace_module_file_ops *
1649 trace_create_file_ops(struct module *mod)
1650 {
1651 	struct ftrace_module_file_ops *file_ops;
1652 
1653 	/*
1654 	 * This is a bit of a PITA. To allow for correct reference
1655 	 * counting, modules must "own" their file_operations.
1656 	 * To do this, we allocate the file operations that will be
1657 	 * used in the event directory.
1658 	 */
1659 
1660 	file_ops = kmalloc(sizeof(*file_ops), GFP_KERNEL);
1661 	if (!file_ops)
1662 		return NULL;
1663 
1664 	file_ops->mod = mod;
1665 
1666 	file_ops->id = ftrace_event_id_fops;
1667 	file_ops->id.owner = mod;
1668 
1669 	file_ops->enable = ftrace_enable_fops;
1670 	file_ops->enable.owner = mod;
1671 
1672 	file_ops->filter = ftrace_event_filter_fops;
1673 	file_ops->filter.owner = mod;
1674 
1675 	file_ops->format = ftrace_event_format_fops;
1676 	file_ops->format.owner = mod;
1677 
1678 	list_add(&file_ops->list, &ftrace_module_file_list);
1679 
1680 	return file_ops;
1681 }
1682 
1683 static void trace_module_add_events(struct module *mod)
1684 {
1685 	struct ftrace_module_file_ops *file_ops = NULL;
1686 	struct ftrace_event_call **call, **start, **end;
1687 
1688 	start = mod->trace_events;
1689 	end = mod->trace_events + mod->num_trace_events;
1690 
1691 	if (start == end)
1692 		return;
1693 
1694 	file_ops = trace_create_file_ops(mod);
1695 	if (!file_ops)
1696 		return;
1697 
1698 	for_each_event(call, start, end) {
1699 		__register_event(*call, mod);
1700 		__add_event_to_tracers(*call, file_ops);
1701 	}
1702 }
1703 
1704 static void trace_module_remove_events(struct module *mod)
1705 {
1706 	struct ftrace_module_file_ops *file_ops;
1707 	struct ftrace_event_call *call, *p;
1708 	bool clear_trace = false;
1709 
1710 	down_write(&trace_event_sem);
1711 	list_for_each_entry_safe(call, p, &ftrace_events, list) {
1712 		if (call->mod == mod) {
1713 			if (call->flags & TRACE_EVENT_FL_WAS_ENABLED)
1714 				clear_trace = true;
1715 			__trace_remove_event_call(call);
1716 		}
1717 	}
1718 
1719 	/* Now free the file_operations */
1720 	list_for_each_entry(file_ops, &ftrace_module_file_list, list) {
1721 		if (file_ops->mod == mod)
1722 			break;
1723 	}
1724 	if (&file_ops->list != &ftrace_module_file_list) {
1725 		list_del(&file_ops->list);
1726 		kfree(file_ops);
1727 	}
1728 	up_write(&trace_event_sem);
1729 
1730 	/*
1731 	 * It is safest to reset the ring buffer if the module being unloaded
1732 	 * registered any events that were used. The only worry is if
1733 	 * a new module gets loaded, and takes on the same id as the events
1734 	 * of this module. When printing out the buffer, traced events left
1735 	 * over from this module may be passed to the new module events and
1736 	 * unexpected results may occur.
1737 	 */
1738 	if (clear_trace)
1739 		tracing_reset_all_online_cpus();
1740 }
1741 
1742 static int trace_module_notify(struct notifier_block *self,
1743 			       unsigned long val, void *data)
1744 {
1745 	struct module *mod = data;
1746 
1747 	mutex_lock(&event_mutex);
1748 	switch (val) {
1749 	case MODULE_STATE_COMING:
1750 		trace_module_add_events(mod);
1751 		break;
1752 	case MODULE_STATE_GOING:
1753 		trace_module_remove_events(mod);
1754 		break;
1755 	}
1756 	mutex_unlock(&event_mutex);
1757 
1758 	return 0;
1759 }
1760 
1761 static int
1762 __trace_add_new_mod_event(struct ftrace_event_call *call,
1763 			  struct trace_array *tr,
1764 			  struct ftrace_module_file_ops *file_ops)
1765 {
1766 	return __trace_add_new_event(call, tr,
1767 				     &file_ops->id, &file_ops->enable,
1768 				     &file_ops->filter, &file_ops->format);
1769 }
1770 
1771 #else
1772 static inline struct ftrace_module_file_ops *
1773 find_ftrace_file_ops(struct ftrace_module_file_ops *file_ops, struct module *mod)
1774 {
1775 	return NULL;
1776 }
1777 static inline int trace_module_notify(struct notifier_block *self,
1778 				      unsigned long val, void *data)
1779 {
1780 	return 0;
1781 }
1782 static inline int
1783 __trace_add_new_mod_event(struct ftrace_event_call *call,
1784 			  struct trace_array *tr,
1785 			  struct ftrace_module_file_ops *file_ops)
1786 {
1787 	return -ENODEV;
1788 }
1789 #endif /* CONFIG_MODULES */
1790 
1791 /* Create a new event directory structure for a trace directory. */
1792 static void
1793 __trace_add_event_dirs(struct trace_array *tr)
1794 {
1795 	struct ftrace_module_file_ops *file_ops = NULL;
1796 	struct ftrace_event_call *call;
1797 	int ret;
1798 
1799 	list_for_each_entry(call, &ftrace_events, list) {
1800 		if (call->mod) {
1801 			/*
1802 			 * Directories for events by modules need to
1803 			 * keep module ref counts when opened (as we don't
1804 			 * want the module to disappear when reading one
1805 			 * of these files). The file_ops keep account of
1806 			 * the module ref count.
1807 			 */
1808 			file_ops = find_ftrace_file_ops(file_ops, call->mod);
1809 			if (!file_ops)
1810 				continue; /* Warn? */
1811 			ret = __trace_add_new_mod_event(call, tr, file_ops);
1812 			if (ret < 0)
1813 				pr_warning("Could not create directory for event %s\n",
1814 					   call->name);
1815 			continue;
1816 		}
1817 		ret = __trace_add_new_event(call, tr,
1818 					    &ftrace_event_id_fops,
1819 					    &ftrace_enable_fops,
1820 					    &ftrace_event_filter_fops,
1821 					    &ftrace_event_format_fops);
1822 		if (ret < 0)
1823 			pr_warning("Could not create directory for event %s\n",
1824 				   call->name);
1825 	}
1826 }
1827 
1828 #ifdef CONFIG_DYNAMIC_FTRACE
1829 
1830 /* Avoid typos */
1831 #define ENABLE_EVENT_STR	"enable_event"
1832 #define DISABLE_EVENT_STR	"disable_event"
1833 
1834 struct event_probe_data {
1835 	struct ftrace_event_file	*file;
1836 	unsigned long			count;
1837 	int				ref;
1838 	bool				enable;
1839 };
1840 
1841 static struct ftrace_event_file *
1842 find_event_file(struct trace_array *tr, const char *system,  const char *event)
1843 {
1844 	struct ftrace_event_file *file;
1845 	struct ftrace_event_call *call;
1846 
1847 	list_for_each_entry(file, &tr->events, list) {
1848 
1849 		call = file->event_call;
1850 
1851 		if (!call->name || !call->class || !call->class->reg)
1852 			continue;
1853 
1854 		if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)
1855 			continue;
1856 
1857 		if (strcmp(event, call->name) == 0 &&
1858 		    strcmp(system, call->class->system) == 0)
1859 			return file;
1860 	}
1861 	return NULL;
1862 }
1863 
1864 static void
1865 event_enable_probe(unsigned long ip, unsigned long parent_ip, void **_data)
1866 {
1867 	struct event_probe_data **pdata = (struct event_probe_data **)_data;
1868 	struct event_probe_data *data = *pdata;
1869 
1870 	if (!data)
1871 		return;
1872 
1873 	if (data->enable)
1874 		clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &data->file->flags);
1875 	else
1876 		set_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &data->file->flags);
1877 }
1878 
1879 static void
1880 event_enable_count_probe(unsigned long ip, unsigned long parent_ip, void **_data)
1881 {
1882 	struct event_probe_data **pdata = (struct event_probe_data **)_data;
1883 	struct event_probe_data *data = *pdata;
1884 
1885 	if (!data)
1886 		return;
1887 
1888 	if (!data->count)
1889 		return;
1890 
1891 	/* Skip if the event is in a state we want to switch to */
1892 	if (data->enable == !(data->file->flags & FTRACE_EVENT_FL_SOFT_DISABLED))
1893 		return;
1894 
1895 	if (data->count != -1)
1896 		(data->count)--;
1897 
1898 	event_enable_probe(ip, parent_ip, _data);
1899 }
1900 
1901 static int
1902 event_enable_print(struct seq_file *m, unsigned long ip,
1903 		      struct ftrace_probe_ops *ops, void *_data)
1904 {
1905 	struct event_probe_data *data = _data;
1906 
1907 	seq_printf(m, "%ps:", (void *)ip);
1908 
1909 	seq_printf(m, "%s:%s:%s",
1910 		   data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR,
1911 		   data->file->event_call->class->system,
1912 		   data->file->event_call->name);
1913 
1914 	if (data->count == -1)
1915 		seq_printf(m, ":unlimited\n");
1916 	else
1917 		seq_printf(m, ":count=%ld\n", data->count);
1918 
1919 	return 0;
1920 }
1921 
1922 static int
1923 event_enable_init(struct ftrace_probe_ops *ops, unsigned long ip,
1924 		  void **_data)
1925 {
1926 	struct event_probe_data **pdata = (struct event_probe_data **)_data;
1927 	struct event_probe_data *data = *pdata;
1928 
1929 	data->ref++;
1930 	return 0;
1931 }
1932 
1933 static void
1934 event_enable_free(struct ftrace_probe_ops *ops, unsigned long ip,
1935 		  void **_data)
1936 {
1937 	struct event_probe_data **pdata = (struct event_probe_data **)_data;
1938 	struct event_probe_data *data = *pdata;
1939 
1940 	if (WARN_ON_ONCE(data->ref <= 0))
1941 		return;
1942 
1943 	data->ref--;
1944 	if (!data->ref) {
1945 		/* Remove the SOFT_MODE flag */
1946 		__ftrace_event_enable_disable(data->file, 0, 1);
1947 		module_put(data->file->event_call->mod);
1948 		kfree(data);
1949 	}
1950 	*pdata = NULL;
1951 }
1952 
1953 static struct ftrace_probe_ops event_enable_probe_ops = {
1954 	.func			= event_enable_probe,
1955 	.print			= event_enable_print,
1956 	.init			= event_enable_init,
1957 	.free			= event_enable_free,
1958 };
1959 
1960 static struct ftrace_probe_ops event_enable_count_probe_ops = {
1961 	.func			= event_enable_count_probe,
1962 	.print			= event_enable_print,
1963 	.init			= event_enable_init,
1964 	.free			= event_enable_free,
1965 };
1966 
1967 static struct ftrace_probe_ops event_disable_probe_ops = {
1968 	.func			= event_enable_probe,
1969 	.print			= event_enable_print,
1970 	.init			= event_enable_init,
1971 	.free			= event_enable_free,
1972 };
1973 
1974 static struct ftrace_probe_ops event_disable_count_probe_ops = {
1975 	.func			= event_enable_count_probe,
1976 	.print			= event_enable_print,
1977 	.init			= event_enable_init,
1978 	.free			= event_enable_free,
1979 };
1980 
1981 static int
1982 event_enable_func(struct ftrace_hash *hash,
1983 		  char *glob, char *cmd, char *param, int enabled)
1984 {
1985 	struct trace_array *tr = top_trace_array();
1986 	struct ftrace_event_file *file;
1987 	struct ftrace_probe_ops *ops;
1988 	struct event_probe_data *data;
1989 	const char *system;
1990 	const char *event;
1991 	char *number;
1992 	bool enable;
1993 	int ret;
1994 
1995 	/* hash funcs only work with set_ftrace_filter */
1996 	if (!enabled)
1997 		return -EINVAL;
1998 
1999 	if (!param)
2000 		return -EINVAL;
2001 
2002 	system = strsep(&param, ":");
2003 	if (!param)
2004 		return -EINVAL;
2005 
2006 	event = strsep(&param, ":");
2007 
2008 	mutex_lock(&event_mutex);
2009 
2010 	ret = -EINVAL;
2011 	file = find_event_file(tr, system, event);
2012 	if (!file)
2013 		goto out;
2014 
2015 	enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
2016 
2017 	if (enable)
2018 		ops = param ? &event_enable_count_probe_ops : &event_enable_probe_ops;
2019 	else
2020 		ops = param ? &event_disable_count_probe_ops : &event_disable_probe_ops;
2021 
2022 	if (glob[0] == '!') {
2023 		unregister_ftrace_function_probe_func(glob+1, ops);
2024 		ret = 0;
2025 		goto out;
2026 	}
2027 
2028 	ret = -ENOMEM;
2029 	data = kzalloc(sizeof(*data), GFP_KERNEL);
2030 	if (!data)
2031 		goto out;
2032 
2033 	data->enable = enable;
2034 	data->count = -1;
2035 	data->file = file;
2036 
2037 	if (!param)
2038 		goto out_reg;
2039 
2040 	number = strsep(&param, ":");
2041 
2042 	ret = -EINVAL;
2043 	if (!strlen(number))
2044 		goto out_free;
2045 
2046 	/*
2047 	 * We use the callback data field (which is a pointer)
2048 	 * as our counter.
2049 	 */
2050 	ret = kstrtoul(number, 0, &data->count);
2051 	if (ret)
2052 		goto out_free;
2053 
2054  out_reg:
2055 	/* Don't let event modules unload while probe registered */
2056 	ret = try_module_get(file->event_call->mod);
2057 	if (!ret)
2058 		goto out_free;
2059 
2060 	ret = __ftrace_event_enable_disable(file, 1, 1);
2061 	if (ret < 0)
2062 		goto out_put;
2063 	ret = register_ftrace_function_probe(glob, ops, data);
2064 	if (!ret)
2065 		goto out_disable;
2066  out:
2067 	mutex_unlock(&event_mutex);
2068 	return ret;
2069 
2070  out_disable:
2071 	__ftrace_event_enable_disable(file, 0, 1);
2072  out_put:
2073 	module_put(file->event_call->mod);
2074  out_free:
2075 	kfree(data);
2076 	goto out;
2077 }
2078 
2079 static struct ftrace_func_command event_enable_cmd = {
2080 	.name			= ENABLE_EVENT_STR,
2081 	.func			= event_enable_func,
2082 };
2083 
2084 static struct ftrace_func_command event_disable_cmd = {
2085 	.name			= DISABLE_EVENT_STR,
2086 	.func			= event_enable_func,
2087 };
2088 
2089 static __init int register_event_cmds(void)
2090 {
2091 	int ret;
2092 
2093 	ret = register_ftrace_command(&event_enable_cmd);
2094 	if (WARN_ON(ret < 0))
2095 		return ret;
2096 	ret = register_ftrace_command(&event_disable_cmd);
2097 	if (WARN_ON(ret < 0))
2098 		unregister_ftrace_command(&event_enable_cmd);
2099 	return ret;
2100 }
2101 #else
2102 static inline int register_event_cmds(void) { return 0; }
2103 #endif /* CONFIG_DYNAMIC_FTRACE */
2104 
2105 /*
2106  * The top level array has already had its ftrace_event_file
2107  * descriptors created in order to allow for early events to
2108  * be recorded. This function is called after the debugfs has been
2109  * initialized, and we now have to create the files associated
2110  * to the events.
2111  */
2112 static __init void
2113 __trace_early_add_event_dirs(struct trace_array *tr)
2114 {
2115 	struct ftrace_event_file *file;
2116 	int ret;
2117 
2118 
2119 	list_for_each_entry(file, &tr->events, list) {
2120 		ret = event_create_dir(tr->event_dir, file,
2121 				       &ftrace_event_id_fops,
2122 				       &ftrace_enable_fops,
2123 				       &ftrace_event_filter_fops,
2124 				       &ftrace_event_format_fops);
2125 		if (ret < 0)
2126 			pr_warning("Could not create directory for event %s\n",
2127 				   file->event_call->name);
2128 	}
2129 }
2130 
2131 /*
2132  * For early boot up, the top trace array requires to have
2133  * a list of events that can be enabled. This must be done before
2134  * the filesystem is set up in order to allow events to be traced
2135  * early.
2136  */
2137 static __init void
2138 __trace_early_add_events(struct trace_array *tr)
2139 {
2140 	struct ftrace_event_call *call;
2141 	int ret;
2142 
2143 	list_for_each_entry(call, &ftrace_events, list) {
2144 		/* Early boot up should not have any modules loaded */
2145 		if (WARN_ON_ONCE(call->mod))
2146 			continue;
2147 
2148 		ret = __trace_early_add_new_event(call, tr);
2149 		if (ret < 0)
2150 			pr_warning("Could not create early event %s\n",
2151 				   call->name);
2152 	}
2153 }
2154 
2155 /* Remove the event directory structure for a trace directory. */
2156 static void
2157 __trace_remove_event_dirs(struct trace_array *tr)
2158 {
2159 	struct ftrace_event_file *file, *next;
2160 
2161 	list_for_each_entry_safe(file, next, &tr->events, list) {
2162 		list_del(&file->list);
2163 		debugfs_remove_recursive(file->dir);
2164 		remove_subsystem(file->system);
2165 		kmem_cache_free(file_cachep, file);
2166 	}
2167 }
2168 
2169 static void
2170 __add_event_to_tracers(struct ftrace_event_call *call,
2171 		       struct ftrace_module_file_ops *file_ops)
2172 {
2173 	struct trace_array *tr;
2174 
2175 	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
2176 		if (file_ops)
2177 			__trace_add_new_mod_event(call, tr, file_ops);
2178 		else
2179 			__trace_add_new_event(call, tr,
2180 					      &ftrace_event_id_fops,
2181 					      &ftrace_enable_fops,
2182 					      &ftrace_event_filter_fops,
2183 					      &ftrace_event_format_fops);
2184 	}
2185 }
2186 
2187 static struct notifier_block trace_module_nb = {
2188 	.notifier_call = trace_module_notify,
2189 	.priority = 0,
2190 };
2191 
2192 extern struct ftrace_event_call *__start_ftrace_events[];
2193 extern struct ftrace_event_call *__stop_ftrace_events[];
2194 
2195 static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata;
2196 
2197 static __init int setup_trace_event(char *str)
2198 {
2199 	strlcpy(bootup_event_buf, str, COMMAND_LINE_SIZE);
2200 	ring_buffer_expanded = true;
2201 	tracing_selftest_disabled = true;
2202 
2203 	return 1;
2204 }
2205 __setup("trace_event=", setup_trace_event);
2206 
2207 /* Expects to have event_mutex held when called */
2208 static int
2209 create_event_toplevel_files(struct dentry *parent, struct trace_array *tr)
2210 {
2211 	struct dentry *d_events;
2212 	struct dentry *entry;
2213 
2214 	entry = debugfs_create_file("set_event", 0644, parent,
2215 				    tr, &ftrace_set_event_fops);
2216 	if (!entry) {
2217 		pr_warning("Could not create debugfs 'set_event' entry\n");
2218 		return -ENOMEM;
2219 	}
2220 
2221 	d_events = debugfs_create_dir("events", parent);
2222 	if (!d_events) {
2223 		pr_warning("Could not create debugfs 'events' directory\n");
2224 		return -ENOMEM;
2225 	}
2226 
2227 	/* ring buffer internal formats */
2228 	trace_create_file("header_page", 0444, d_events,
2229 			  ring_buffer_print_page_header,
2230 			  &ftrace_show_header_fops);
2231 
2232 	trace_create_file("header_event", 0444, d_events,
2233 			  ring_buffer_print_entry_header,
2234 			  &ftrace_show_header_fops);
2235 
2236 	trace_create_file("enable", 0644, d_events,
2237 			  tr, &ftrace_tr_enable_fops);
2238 
2239 	tr->event_dir = d_events;
2240 
2241 	return 0;
2242 }
2243 
2244 /**
2245  * event_trace_add_tracer - add a instance of a trace_array to events
2246  * @parent: The parent dentry to place the files/directories for events in
2247  * @tr: The trace array associated with these events
2248  *
2249  * When a new instance is created, it needs to set up its events
2250  * directory, as well as other files associated with events. It also
2251  * creates the event hierachry in the @parent/events directory.
2252  *
2253  * Returns 0 on success.
2254  */
2255 int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr)
2256 {
2257 	int ret;
2258 
2259 	mutex_lock(&event_mutex);
2260 
2261 	ret = create_event_toplevel_files(parent, tr);
2262 	if (ret)
2263 		goto out_unlock;
2264 
2265 	down_write(&trace_event_sem);
2266 	__trace_add_event_dirs(tr);
2267 	up_write(&trace_event_sem);
2268 
2269  out_unlock:
2270 	mutex_unlock(&event_mutex);
2271 
2272 	return ret;
2273 }
2274 
2275 /*
2276  * The top trace array already had its file descriptors created.
2277  * Now the files themselves need to be created.
2278  */
2279 static __init int
2280 early_event_add_tracer(struct dentry *parent, struct trace_array *tr)
2281 {
2282 	int ret;
2283 
2284 	mutex_lock(&event_mutex);
2285 
2286 	ret = create_event_toplevel_files(parent, tr);
2287 	if (ret)
2288 		goto out_unlock;
2289 
2290 	down_write(&trace_event_sem);
2291 	__trace_early_add_event_dirs(tr);
2292 	up_write(&trace_event_sem);
2293 
2294  out_unlock:
2295 	mutex_unlock(&event_mutex);
2296 
2297 	return ret;
2298 }
2299 
2300 int event_trace_del_tracer(struct trace_array *tr)
2301 {
2302 	/* Disable any running events */
2303 	__ftrace_set_clr_event(tr, NULL, NULL, NULL, 0);
2304 
2305 	mutex_lock(&event_mutex);
2306 
2307 	down_write(&trace_event_sem);
2308 	__trace_remove_event_dirs(tr);
2309 	debugfs_remove_recursive(tr->event_dir);
2310 	up_write(&trace_event_sem);
2311 
2312 	tr->event_dir = NULL;
2313 
2314 	mutex_unlock(&event_mutex);
2315 
2316 	return 0;
2317 }
2318 
2319 static __init int event_trace_memsetup(void)
2320 {
2321 	field_cachep = KMEM_CACHE(ftrace_event_field, SLAB_PANIC);
2322 	file_cachep = KMEM_CACHE(ftrace_event_file, SLAB_PANIC);
2323 	return 0;
2324 }
2325 
2326 static __init int event_trace_enable(void)
2327 {
2328 	struct trace_array *tr = top_trace_array();
2329 	struct ftrace_event_call **iter, *call;
2330 	char *buf = bootup_event_buf;
2331 	char *token;
2332 	int ret;
2333 
2334 	for_each_event(iter, __start_ftrace_events, __stop_ftrace_events) {
2335 
2336 		call = *iter;
2337 		ret = event_init(call);
2338 		if (!ret)
2339 			list_add(&call->list, &ftrace_events);
2340 	}
2341 
2342 	/*
2343 	 * We need the top trace array to have a working set of trace
2344 	 * points at early init, before the debug files and directories
2345 	 * are created. Create the file entries now, and attach them
2346 	 * to the actual file dentries later.
2347 	 */
2348 	__trace_early_add_events(tr);
2349 
2350 	while (true) {
2351 		token = strsep(&buf, ",");
2352 
2353 		if (!token)
2354 			break;
2355 		if (!*token)
2356 			continue;
2357 
2358 		ret = ftrace_set_clr_event(tr, token, 1);
2359 		if (ret)
2360 			pr_warn("Failed to enable trace event: %s\n", token);
2361 	}
2362 
2363 	trace_printk_start_comm();
2364 
2365 	register_event_cmds();
2366 
2367 	return 0;
2368 }
2369 
2370 static __init int event_trace_init(void)
2371 {
2372 	struct trace_array *tr;
2373 	struct dentry *d_tracer;
2374 	struct dentry *entry;
2375 	int ret;
2376 
2377 	tr = top_trace_array();
2378 
2379 	d_tracer = tracing_init_dentry();
2380 	if (!d_tracer)
2381 		return 0;
2382 
2383 	entry = debugfs_create_file("available_events", 0444, d_tracer,
2384 				    tr, &ftrace_avail_fops);
2385 	if (!entry)
2386 		pr_warning("Could not create debugfs "
2387 			   "'available_events' entry\n");
2388 
2389 	if (trace_define_common_fields())
2390 		pr_warning("tracing: Failed to allocate common fields");
2391 
2392 	ret = early_event_add_tracer(d_tracer, tr);
2393 	if (ret)
2394 		return ret;
2395 
2396 	ret = register_module_notifier(&trace_module_nb);
2397 	if (ret)
2398 		pr_warning("Failed to register trace events module notifier\n");
2399 
2400 	return 0;
2401 }
2402 early_initcall(event_trace_memsetup);
2403 core_initcall(event_trace_enable);
2404 fs_initcall(event_trace_init);
2405 
2406 #ifdef CONFIG_FTRACE_STARTUP_TEST
2407 
2408 static DEFINE_SPINLOCK(test_spinlock);
2409 static DEFINE_SPINLOCK(test_spinlock_irq);
2410 static DEFINE_MUTEX(test_mutex);
2411 
2412 static __init void test_work(struct work_struct *dummy)
2413 {
2414 	spin_lock(&test_spinlock);
2415 	spin_lock_irq(&test_spinlock_irq);
2416 	udelay(1);
2417 	spin_unlock_irq(&test_spinlock_irq);
2418 	spin_unlock(&test_spinlock);
2419 
2420 	mutex_lock(&test_mutex);
2421 	msleep(1);
2422 	mutex_unlock(&test_mutex);
2423 }
2424 
2425 static __init int event_test_thread(void *unused)
2426 {
2427 	void *test_malloc;
2428 
2429 	test_malloc = kmalloc(1234, GFP_KERNEL);
2430 	if (!test_malloc)
2431 		pr_info("failed to kmalloc\n");
2432 
2433 	schedule_on_each_cpu(test_work);
2434 
2435 	kfree(test_malloc);
2436 
2437 	set_current_state(TASK_INTERRUPTIBLE);
2438 	while (!kthread_should_stop())
2439 		schedule();
2440 
2441 	return 0;
2442 }
2443 
2444 /*
2445  * Do various things that may trigger events.
2446  */
2447 static __init void event_test_stuff(void)
2448 {
2449 	struct task_struct *test_thread;
2450 
2451 	test_thread = kthread_run(event_test_thread, NULL, "test-events");
2452 	msleep(1);
2453 	kthread_stop(test_thread);
2454 }
2455 
2456 /*
2457  * For every trace event defined, we will test each trace point separately,
2458  * and then by groups, and finally all trace points.
2459  */
2460 static __init void event_trace_self_tests(void)
2461 {
2462 	struct ftrace_subsystem_dir *dir;
2463 	struct ftrace_event_file *file;
2464 	struct ftrace_event_call *call;
2465 	struct event_subsystem *system;
2466 	struct trace_array *tr;
2467 	int ret;
2468 
2469 	tr = top_trace_array();
2470 
2471 	pr_info("Running tests on trace events:\n");
2472 
2473 	list_for_each_entry(file, &tr->events, list) {
2474 
2475 		call = file->event_call;
2476 
2477 		/* Only test those that have a probe */
2478 		if (!call->class || !call->class->probe)
2479 			continue;
2480 
2481 /*
2482  * Testing syscall events here is pretty useless, but
2483  * we still do it if configured. But this is time consuming.
2484  * What we really need is a user thread to perform the
2485  * syscalls as we test.
2486  */
2487 #ifndef CONFIG_EVENT_TRACE_TEST_SYSCALLS
2488 		if (call->class->system &&
2489 		    strcmp(call->class->system, "syscalls") == 0)
2490 			continue;
2491 #endif
2492 
2493 		pr_info("Testing event %s: ", call->name);
2494 
2495 		/*
2496 		 * If an event is already enabled, someone is using
2497 		 * it and the self test should not be on.
2498 		 */
2499 		if (file->flags & FTRACE_EVENT_FL_ENABLED) {
2500 			pr_warning("Enabled event during self test!\n");
2501 			WARN_ON_ONCE(1);
2502 			continue;
2503 		}
2504 
2505 		ftrace_event_enable_disable(file, 1);
2506 		event_test_stuff();
2507 		ftrace_event_enable_disable(file, 0);
2508 
2509 		pr_cont("OK\n");
2510 	}
2511 
2512 	/* Now test at the sub system level */
2513 
2514 	pr_info("Running tests on trace event systems:\n");
2515 
2516 	list_for_each_entry(dir, &tr->systems, list) {
2517 
2518 		system = dir->subsystem;
2519 
2520 		/* the ftrace system is special, skip it */
2521 		if (strcmp(system->name, "ftrace") == 0)
2522 			continue;
2523 
2524 		pr_info("Testing event system %s: ", system->name);
2525 
2526 		ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 1);
2527 		if (WARN_ON_ONCE(ret)) {
2528 			pr_warning("error enabling system %s\n",
2529 				   system->name);
2530 			continue;
2531 		}
2532 
2533 		event_test_stuff();
2534 
2535 		ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 0);
2536 		if (WARN_ON_ONCE(ret)) {
2537 			pr_warning("error disabling system %s\n",
2538 				   system->name);
2539 			continue;
2540 		}
2541 
2542 		pr_cont("OK\n");
2543 	}
2544 
2545 	/* Test with all events enabled */
2546 
2547 	pr_info("Running tests on all trace events:\n");
2548 	pr_info("Testing all events: ");
2549 
2550 	ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 1);
2551 	if (WARN_ON_ONCE(ret)) {
2552 		pr_warning("error enabling all events\n");
2553 		return;
2554 	}
2555 
2556 	event_test_stuff();
2557 
2558 	/* reset sysname */
2559 	ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 0);
2560 	if (WARN_ON_ONCE(ret)) {
2561 		pr_warning("error disabling all events\n");
2562 		return;
2563 	}
2564 
2565 	pr_cont("OK\n");
2566 }
2567 
2568 #ifdef CONFIG_FUNCTION_TRACER
2569 
2570 static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable);
2571 
2572 static void
2573 function_test_events_call(unsigned long ip, unsigned long parent_ip,
2574 			  struct ftrace_ops *op, struct pt_regs *pt_regs)
2575 {
2576 	struct ring_buffer_event *event;
2577 	struct ring_buffer *buffer;
2578 	struct ftrace_entry *entry;
2579 	unsigned long flags;
2580 	long disabled;
2581 	int cpu;
2582 	int pc;
2583 
2584 	pc = preempt_count();
2585 	preempt_disable_notrace();
2586 	cpu = raw_smp_processor_id();
2587 	disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu));
2588 
2589 	if (disabled != 1)
2590 		goto out;
2591 
2592 	local_save_flags(flags);
2593 
2594 	event = trace_current_buffer_lock_reserve(&buffer,
2595 						  TRACE_FN, sizeof(*entry),
2596 						  flags, pc);
2597 	if (!event)
2598 		goto out;
2599 	entry	= ring_buffer_event_data(event);
2600 	entry->ip			= ip;
2601 	entry->parent_ip		= parent_ip;
2602 
2603 	trace_buffer_unlock_commit(buffer, event, flags, pc);
2604 
2605  out:
2606 	atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
2607 	preempt_enable_notrace();
2608 }
2609 
2610 static struct ftrace_ops trace_ops __initdata  =
2611 {
2612 	.func = function_test_events_call,
2613 	.flags = FTRACE_OPS_FL_RECURSION_SAFE,
2614 };
2615 
2616 static __init void event_trace_self_test_with_function(void)
2617 {
2618 	int ret;
2619 	ret = register_ftrace_function(&trace_ops);
2620 	if (WARN_ON(ret < 0)) {
2621 		pr_info("Failed to enable function tracer for event tests\n");
2622 		return;
2623 	}
2624 	pr_info("Running tests again, along with the function tracer\n");
2625 	event_trace_self_tests();
2626 	unregister_ftrace_function(&trace_ops);
2627 }
2628 #else
2629 static __init void event_trace_self_test_with_function(void)
2630 {
2631 }
2632 #endif
2633 
2634 static __init int event_trace_self_tests_init(void)
2635 {
2636 	if (!tracing_selftest_disabled) {
2637 		event_trace_self_tests();
2638 		event_trace_self_test_with_function();
2639 	}
2640 
2641 	return 0;
2642 }
2643 
2644 late_initcall(event_trace_self_tests_init);
2645 
2646 #endif
2647