xref: /linux/kernel/trace/trace_events.c (revision 9538aa46c2427d6782aa10036c4da4c541605e0e)
1 /*
2  * event tracer
3  *
4  * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
5  *
6  *  - Added format output of fields of the trace point.
7  *    This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
8  *
9  */
10 
11 #include <linux/workqueue.h>
12 #include <linux/spinlock.h>
13 #include <linux/kthread.h>
14 #include <linux/debugfs.h>
15 #include <linux/uaccess.h>
16 #include <linux/module.h>
17 #include <linux/ctype.h>
18 #include <linux/slab.h>
19 #include <linux/delay.h>
20 
21 #include <asm/setup.h>
22 
23 #include "trace_output.h"
24 
25 #undef TRACE_SYSTEM
26 #define TRACE_SYSTEM "TRACE_SYSTEM"
27 
28 DEFINE_MUTEX(event_mutex);
29 
30 DEFINE_MUTEX(event_storage_mutex);
31 EXPORT_SYMBOL_GPL(event_storage_mutex);
32 
33 char event_storage[EVENT_STORAGE_SIZE];
34 EXPORT_SYMBOL_GPL(event_storage);
35 
36 LIST_HEAD(ftrace_events);
37 static LIST_HEAD(ftrace_common_fields);
38 
39 #define GFP_TRACE (GFP_KERNEL | __GFP_ZERO)
40 
41 static struct kmem_cache *field_cachep;
42 static struct kmem_cache *file_cachep;
43 
44 #define SYSTEM_FL_FREE_NAME		(1 << 31)
45 
46 static inline int system_refcount(struct event_subsystem *system)
47 {
48 	return system->ref_count & ~SYSTEM_FL_FREE_NAME;
49 }
50 
51 static int system_refcount_inc(struct event_subsystem *system)
52 {
53 	return (system->ref_count++) & ~SYSTEM_FL_FREE_NAME;
54 }
55 
56 static int system_refcount_dec(struct event_subsystem *system)
57 {
58 	return (--system->ref_count) & ~SYSTEM_FL_FREE_NAME;
59 }
60 
61 /* Double loops, do not use break, only goto's work */
62 #define do_for_each_event_file(tr, file)			\
63 	list_for_each_entry(tr, &ftrace_trace_arrays, list) {	\
64 		list_for_each_entry(file, &tr->events, list)
65 
66 #define do_for_each_event_file_safe(tr, file)			\
67 	list_for_each_entry(tr, &ftrace_trace_arrays, list) {	\
68 		struct ftrace_event_file *___n;				\
69 		list_for_each_entry_safe(file, ___n, &tr->events, list)
70 
71 #define while_for_each_event_file()		\
72 	}
73 
74 static struct list_head *
75 trace_get_fields(struct ftrace_event_call *event_call)
76 {
77 	if (!event_call->class->get_fields)
78 		return &event_call->class->fields;
79 	return event_call->class->get_fields(event_call);
80 }
81 
82 static struct ftrace_event_field *
83 __find_event_field(struct list_head *head, char *name)
84 {
85 	struct ftrace_event_field *field;
86 
87 	list_for_each_entry(field, head, link) {
88 		if (!strcmp(field->name, name))
89 			return field;
90 	}
91 
92 	return NULL;
93 }
94 
95 struct ftrace_event_field *
96 trace_find_event_field(struct ftrace_event_call *call, char *name)
97 {
98 	struct ftrace_event_field *field;
99 	struct list_head *head;
100 
101 	field = __find_event_field(&ftrace_common_fields, name);
102 	if (field)
103 		return field;
104 
105 	head = trace_get_fields(call);
106 	return __find_event_field(head, name);
107 }
108 
109 static int __trace_define_field(struct list_head *head, const char *type,
110 				const char *name, int offset, int size,
111 				int is_signed, int filter_type)
112 {
113 	struct ftrace_event_field *field;
114 
115 	field = kmem_cache_alloc(field_cachep, GFP_TRACE);
116 	if (!field)
117 		return -ENOMEM;
118 
119 	field->name = name;
120 	field->type = type;
121 
122 	if (filter_type == FILTER_OTHER)
123 		field->filter_type = filter_assign_type(type);
124 	else
125 		field->filter_type = filter_type;
126 
127 	field->offset = offset;
128 	field->size = size;
129 	field->is_signed = is_signed;
130 
131 	list_add(&field->link, head);
132 
133 	return 0;
134 }
135 
136 int trace_define_field(struct ftrace_event_call *call, const char *type,
137 		       const char *name, int offset, int size, int is_signed,
138 		       int filter_type)
139 {
140 	struct list_head *head;
141 
142 	if (WARN_ON(!call->class))
143 		return 0;
144 
145 	head = trace_get_fields(call);
146 	return __trace_define_field(head, type, name, offset, size,
147 				    is_signed, filter_type);
148 }
149 EXPORT_SYMBOL_GPL(trace_define_field);
150 
151 #define __common_field(type, item)					\
152 	ret = __trace_define_field(&ftrace_common_fields, #type,	\
153 				   "common_" #item,			\
154 				   offsetof(typeof(ent), item),		\
155 				   sizeof(ent.item),			\
156 				   is_signed_type(type), FILTER_OTHER);	\
157 	if (ret)							\
158 		return ret;
159 
160 static int trace_define_common_fields(void)
161 {
162 	int ret;
163 	struct trace_entry ent;
164 
165 	__common_field(unsigned short, type);
166 	__common_field(unsigned char, flags);
167 	__common_field(unsigned char, preempt_count);
168 	__common_field(int, pid);
169 
170 	return ret;
171 }
172 
173 static void trace_destroy_fields(struct ftrace_event_call *call)
174 {
175 	struct ftrace_event_field *field, *next;
176 	struct list_head *head;
177 
178 	head = trace_get_fields(call);
179 	list_for_each_entry_safe(field, next, head, link) {
180 		list_del(&field->link);
181 		kmem_cache_free(field_cachep, field);
182 	}
183 }
184 
185 int trace_event_raw_init(struct ftrace_event_call *call)
186 {
187 	int id;
188 
189 	id = register_ftrace_event(&call->event);
190 	if (!id)
191 		return -ENODEV;
192 
193 	return 0;
194 }
195 EXPORT_SYMBOL_GPL(trace_event_raw_init);
196 
197 int ftrace_event_reg(struct ftrace_event_call *call,
198 		     enum trace_reg type, void *data)
199 {
200 	struct ftrace_event_file *file = data;
201 
202 	switch (type) {
203 	case TRACE_REG_REGISTER:
204 		return tracepoint_probe_register(call->name,
205 						 call->class->probe,
206 						 file);
207 	case TRACE_REG_UNREGISTER:
208 		tracepoint_probe_unregister(call->name,
209 					    call->class->probe,
210 					    file);
211 		return 0;
212 
213 #ifdef CONFIG_PERF_EVENTS
214 	case TRACE_REG_PERF_REGISTER:
215 		return tracepoint_probe_register(call->name,
216 						 call->class->perf_probe,
217 						 call);
218 	case TRACE_REG_PERF_UNREGISTER:
219 		tracepoint_probe_unregister(call->name,
220 					    call->class->perf_probe,
221 					    call);
222 		return 0;
223 	case TRACE_REG_PERF_OPEN:
224 	case TRACE_REG_PERF_CLOSE:
225 	case TRACE_REG_PERF_ADD:
226 	case TRACE_REG_PERF_DEL:
227 		return 0;
228 #endif
229 	}
230 	return 0;
231 }
232 EXPORT_SYMBOL_GPL(ftrace_event_reg);
233 
234 void trace_event_enable_cmd_record(bool enable)
235 {
236 	struct ftrace_event_file *file;
237 	struct trace_array *tr;
238 
239 	mutex_lock(&event_mutex);
240 	do_for_each_event_file(tr, file) {
241 
242 		if (!(file->flags & FTRACE_EVENT_FL_ENABLED))
243 			continue;
244 
245 		if (enable) {
246 			tracing_start_cmdline_record();
247 			set_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags);
248 		} else {
249 			tracing_stop_cmdline_record();
250 			clear_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags);
251 		}
252 	} while_for_each_event_file();
253 	mutex_unlock(&event_mutex);
254 }
255 
256 static int __ftrace_event_enable_disable(struct ftrace_event_file *file,
257 					 int enable, int soft_disable)
258 {
259 	struct ftrace_event_call *call = file->event_call;
260 	int ret = 0;
261 	int disable;
262 
263 	switch (enable) {
264 	case 0:
265 		/*
266 		 * When soft_disable is set and enable is cleared, the sm_ref
267 		 * reference counter is decremented. If it reaches 0, we want
268 		 * to clear the SOFT_DISABLED flag but leave the event in the
269 		 * state that it was. That is, if the event was enabled and
270 		 * SOFT_DISABLED isn't set, then do nothing. But if SOFT_DISABLED
271 		 * is set we do not want the event to be enabled before we
272 		 * clear the bit.
273 		 *
274 		 * When soft_disable is not set but the SOFT_MODE flag is,
275 		 * we do nothing. Do not disable the tracepoint, otherwise
276 		 * "soft enable"s (clearing the SOFT_DISABLED bit) wont work.
277 		 */
278 		if (soft_disable) {
279 			if (atomic_dec_return(&file->sm_ref) > 0)
280 				break;
281 			disable = file->flags & FTRACE_EVENT_FL_SOFT_DISABLED;
282 			clear_bit(FTRACE_EVENT_FL_SOFT_MODE_BIT, &file->flags);
283 		} else
284 			disable = !(file->flags & FTRACE_EVENT_FL_SOFT_MODE);
285 
286 		if (disable && (file->flags & FTRACE_EVENT_FL_ENABLED)) {
287 			clear_bit(FTRACE_EVENT_FL_ENABLED_BIT, &file->flags);
288 			if (file->flags & FTRACE_EVENT_FL_RECORDED_CMD) {
289 				tracing_stop_cmdline_record();
290 				clear_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags);
291 			}
292 			call->class->reg(call, TRACE_REG_UNREGISTER, file);
293 		}
294 		/* If in SOFT_MODE, just set the SOFT_DISABLE_BIT, else clear it */
295 		if (file->flags & FTRACE_EVENT_FL_SOFT_MODE)
296 			set_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags);
297 		else
298 			clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags);
299 		break;
300 	case 1:
301 		/*
302 		 * When soft_disable is set and enable is set, we want to
303 		 * register the tracepoint for the event, but leave the event
304 		 * as is. That means, if the event was already enabled, we do
305 		 * nothing (but set SOFT_MODE). If the event is disabled, we
306 		 * set SOFT_DISABLED before enabling the event tracepoint, so
307 		 * it still seems to be disabled.
308 		 */
309 		if (!soft_disable)
310 			clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags);
311 		else {
312 			if (atomic_inc_return(&file->sm_ref) > 1)
313 				break;
314 			set_bit(FTRACE_EVENT_FL_SOFT_MODE_BIT, &file->flags);
315 		}
316 
317 		if (!(file->flags & FTRACE_EVENT_FL_ENABLED)) {
318 
319 			/* Keep the event disabled, when going to SOFT_MODE. */
320 			if (soft_disable)
321 				set_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags);
322 
323 			if (trace_flags & TRACE_ITER_RECORD_CMD) {
324 				tracing_start_cmdline_record();
325 				set_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags);
326 			}
327 			ret = call->class->reg(call, TRACE_REG_REGISTER, file);
328 			if (ret) {
329 				tracing_stop_cmdline_record();
330 				pr_info("event trace: Could not enable event "
331 					"%s\n", call->name);
332 				break;
333 			}
334 			set_bit(FTRACE_EVENT_FL_ENABLED_BIT, &file->flags);
335 
336 			/* WAS_ENABLED gets set but never cleared. */
337 			call->flags |= TRACE_EVENT_FL_WAS_ENABLED;
338 		}
339 		break;
340 	}
341 
342 	return ret;
343 }
344 
345 static int ftrace_event_enable_disable(struct ftrace_event_file *file,
346 				       int enable)
347 {
348 	return __ftrace_event_enable_disable(file, enable, 0);
349 }
350 
351 static void ftrace_clear_events(struct trace_array *tr)
352 {
353 	struct ftrace_event_file *file;
354 
355 	mutex_lock(&event_mutex);
356 	list_for_each_entry(file, &tr->events, list) {
357 		ftrace_event_enable_disable(file, 0);
358 	}
359 	mutex_unlock(&event_mutex);
360 }
361 
362 static void __put_system(struct event_subsystem *system)
363 {
364 	struct event_filter *filter = system->filter;
365 
366 	WARN_ON_ONCE(system_refcount(system) == 0);
367 	if (system_refcount_dec(system))
368 		return;
369 
370 	list_del(&system->list);
371 
372 	if (filter) {
373 		kfree(filter->filter_string);
374 		kfree(filter);
375 	}
376 	if (system->ref_count & SYSTEM_FL_FREE_NAME)
377 		kfree(system->name);
378 	kfree(system);
379 }
380 
381 static void __get_system(struct event_subsystem *system)
382 {
383 	WARN_ON_ONCE(system_refcount(system) == 0);
384 	system_refcount_inc(system);
385 }
386 
387 static void __get_system_dir(struct ftrace_subsystem_dir *dir)
388 {
389 	WARN_ON_ONCE(dir->ref_count == 0);
390 	dir->ref_count++;
391 	__get_system(dir->subsystem);
392 }
393 
394 static void __put_system_dir(struct ftrace_subsystem_dir *dir)
395 {
396 	WARN_ON_ONCE(dir->ref_count == 0);
397 	/* If the subsystem is about to be freed, the dir must be too */
398 	WARN_ON_ONCE(system_refcount(dir->subsystem) == 1 && dir->ref_count != 1);
399 
400 	__put_system(dir->subsystem);
401 	if (!--dir->ref_count)
402 		kfree(dir);
403 }
404 
405 static void put_system(struct ftrace_subsystem_dir *dir)
406 {
407 	mutex_lock(&event_mutex);
408 	__put_system_dir(dir);
409 	mutex_unlock(&event_mutex);
410 }
411 
412 /*
413  * Open and update trace_array ref count.
414  * Must have the current trace_array passed to it.
415  */
416 static int tracing_open_generic_file(struct inode *inode, struct file *filp)
417 {
418 	struct ftrace_event_file *file = inode->i_private;
419 	struct trace_array *tr = file->tr;
420 	int ret;
421 
422 	if (trace_array_get(tr) < 0)
423 		return -ENODEV;
424 
425 	ret = tracing_open_generic(inode, filp);
426 	if (ret < 0)
427 		trace_array_put(tr);
428 	return ret;
429 }
430 
431 static int tracing_release_generic_file(struct inode *inode, struct file *filp)
432 {
433 	struct ftrace_event_file *file = inode->i_private;
434 	struct trace_array *tr = file->tr;
435 
436 	trace_array_put(tr);
437 
438 	return 0;
439 }
440 
441 /*
442  * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events.
443  */
444 static int
445 __ftrace_set_clr_event_nolock(struct trace_array *tr, const char *match,
446 			      const char *sub, const char *event, int set)
447 {
448 	struct ftrace_event_file *file;
449 	struct ftrace_event_call *call;
450 	int ret = -EINVAL;
451 
452 	list_for_each_entry(file, &tr->events, list) {
453 
454 		call = file->event_call;
455 
456 		if (!call->name || !call->class || !call->class->reg)
457 			continue;
458 
459 		if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)
460 			continue;
461 
462 		if (match &&
463 		    strcmp(match, call->name) != 0 &&
464 		    strcmp(match, call->class->system) != 0)
465 			continue;
466 
467 		if (sub && strcmp(sub, call->class->system) != 0)
468 			continue;
469 
470 		if (event && strcmp(event, call->name) != 0)
471 			continue;
472 
473 		ftrace_event_enable_disable(file, set);
474 
475 		ret = 0;
476 	}
477 
478 	return ret;
479 }
480 
481 static int __ftrace_set_clr_event(struct trace_array *tr, const char *match,
482 				  const char *sub, const char *event, int set)
483 {
484 	int ret;
485 
486 	mutex_lock(&event_mutex);
487 	ret = __ftrace_set_clr_event_nolock(tr, match, sub, event, set);
488 	mutex_unlock(&event_mutex);
489 
490 	return ret;
491 }
492 
493 static int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set)
494 {
495 	char *event = NULL, *sub = NULL, *match;
496 
497 	/*
498 	 * The buf format can be <subsystem>:<event-name>
499 	 *  *:<event-name> means any event by that name.
500 	 *  :<event-name> is the same.
501 	 *
502 	 *  <subsystem>:* means all events in that subsystem
503 	 *  <subsystem>: means the same.
504 	 *
505 	 *  <name> (no ':') means all events in a subsystem with
506 	 *  the name <name> or any event that matches <name>
507 	 */
508 
509 	match = strsep(&buf, ":");
510 	if (buf) {
511 		sub = match;
512 		event = buf;
513 		match = NULL;
514 
515 		if (!strlen(sub) || strcmp(sub, "*") == 0)
516 			sub = NULL;
517 		if (!strlen(event) || strcmp(event, "*") == 0)
518 			event = NULL;
519 	}
520 
521 	return __ftrace_set_clr_event(tr, match, sub, event, set);
522 }
523 
524 /**
525  * trace_set_clr_event - enable or disable an event
526  * @system: system name to match (NULL for any system)
527  * @event: event name to match (NULL for all events, within system)
528  * @set: 1 to enable, 0 to disable
529  *
530  * This is a way for other parts of the kernel to enable or disable
531  * event recording.
532  *
533  * Returns 0 on success, -EINVAL if the parameters do not match any
534  * registered events.
535  */
536 int trace_set_clr_event(const char *system, const char *event, int set)
537 {
538 	struct trace_array *tr = top_trace_array();
539 
540 	return __ftrace_set_clr_event(tr, NULL, system, event, set);
541 }
542 EXPORT_SYMBOL_GPL(trace_set_clr_event);
543 
544 /* 128 should be much more than enough */
545 #define EVENT_BUF_SIZE		127
546 
547 static ssize_t
548 ftrace_event_write(struct file *file, const char __user *ubuf,
549 		   size_t cnt, loff_t *ppos)
550 {
551 	struct trace_parser parser;
552 	struct seq_file *m = file->private_data;
553 	struct trace_array *tr = m->private;
554 	ssize_t read, ret;
555 
556 	if (!cnt)
557 		return 0;
558 
559 	ret = tracing_update_buffers();
560 	if (ret < 0)
561 		return ret;
562 
563 	if (trace_parser_get_init(&parser, EVENT_BUF_SIZE + 1))
564 		return -ENOMEM;
565 
566 	read = trace_get_user(&parser, ubuf, cnt, ppos);
567 
568 	if (read >= 0 && trace_parser_loaded((&parser))) {
569 		int set = 1;
570 
571 		if (*parser.buffer == '!')
572 			set = 0;
573 
574 		parser.buffer[parser.idx] = 0;
575 
576 		ret = ftrace_set_clr_event(tr, parser.buffer + !set, set);
577 		if (ret)
578 			goto out_put;
579 	}
580 
581 	ret = read;
582 
583  out_put:
584 	trace_parser_put(&parser);
585 
586 	return ret;
587 }
588 
589 static void *
590 t_next(struct seq_file *m, void *v, loff_t *pos)
591 {
592 	struct ftrace_event_file *file = v;
593 	struct ftrace_event_call *call;
594 	struct trace_array *tr = m->private;
595 
596 	(*pos)++;
597 
598 	list_for_each_entry_continue(file, &tr->events, list) {
599 		call = file->event_call;
600 		/*
601 		 * The ftrace subsystem is for showing formats only.
602 		 * They can not be enabled or disabled via the event files.
603 		 */
604 		if (call->class && call->class->reg)
605 			return file;
606 	}
607 
608 	return NULL;
609 }
610 
611 static void *t_start(struct seq_file *m, loff_t *pos)
612 {
613 	struct ftrace_event_file *file;
614 	struct trace_array *tr = m->private;
615 	loff_t l;
616 
617 	mutex_lock(&event_mutex);
618 
619 	file = list_entry(&tr->events, struct ftrace_event_file, list);
620 	for (l = 0; l <= *pos; ) {
621 		file = t_next(m, file, &l);
622 		if (!file)
623 			break;
624 	}
625 	return file;
626 }
627 
628 static void *
629 s_next(struct seq_file *m, void *v, loff_t *pos)
630 {
631 	struct ftrace_event_file *file = v;
632 	struct trace_array *tr = m->private;
633 
634 	(*pos)++;
635 
636 	list_for_each_entry_continue(file, &tr->events, list) {
637 		if (file->flags & FTRACE_EVENT_FL_ENABLED)
638 			return file;
639 	}
640 
641 	return NULL;
642 }
643 
644 static void *s_start(struct seq_file *m, loff_t *pos)
645 {
646 	struct ftrace_event_file *file;
647 	struct trace_array *tr = m->private;
648 	loff_t l;
649 
650 	mutex_lock(&event_mutex);
651 
652 	file = list_entry(&tr->events, struct ftrace_event_file, list);
653 	for (l = 0; l <= *pos; ) {
654 		file = s_next(m, file, &l);
655 		if (!file)
656 			break;
657 	}
658 	return file;
659 }
660 
661 static int t_show(struct seq_file *m, void *v)
662 {
663 	struct ftrace_event_file *file = v;
664 	struct ftrace_event_call *call = file->event_call;
665 
666 	if (strcmp(call->class->system, TRACE_SYSTEM) != 0)
667 		seq_printf(m, "%s:", call->class->system);
668 	seq_printf(m, "%s\n", call->name);
669 
670 	return 0;
671 }
672 
673 static void t_stop(struct seq_file *m, void *p)
674 {
675 	mutex_unlock(&event_mutex);
676 }
677 
678 static ssize_t
679 event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
680 		  loff_t *ppos)
681 {
682 	struct ftrace_event_file *file = filp->private_data;
683 	char buf[4] = "0";
684 
685 	if (file->flags & FTRACE_EVENT_FL_ENABLED &&
686 	    !(file->flags & FTRACE_EVENT_FL_SOFT_DISABLED))
687 		strcpy(buf, "1");
688 
689 	if (file->flags & FTRACE_EVENT_FL_SOFT_DISABLED ||
690 	    file->flags & FTRACE_EVENT_FL_SOFT_MODE)
691 		strcat(buf, "*");
692 
693 	strcat(buf, "\n");
694 
695 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, strlen(buf));
696 }
697 
698 static ssize_t
699 event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
700 		   loff_t *ppos)
701 {
702 	struct ftrace_event_file *file = filp->private_data;
703 	unsigned long val;
704 	int ret;
705 
706 	if (!file)
707 		return -EINVAL;
708 
709 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
710 	if (ret)
711 		return ret;
712 
713 	ret = tracing_update_buffers();
714 	if (ret < 0)
715 		return ret;
716 
717 	switch (val) {
718 	case 0:
719 	case 1:
720 		mutex_lock(&event_mutex);
721 		ret = ftrace_event_enable_disable(file, val);
722 		mutex_unlock(&event_mutex);
723 		break;
724 
725 	default:
726 		return -EINVAL;
727 	}
728 
729 	*ppos += cnt;
730 
731 	return ret ? ret : cnt;
732 }
733 
734 static ssize_t
735 system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
736 		   loff_t *ppos)
737 {
738 	const char set_to_char[4] = { '?', '0', '1', 'X' };
739 	struct ftrace_subsystem_dir *dir = filp->private_data;
740 	struct event_subsystem *system = dir->subsystem;
741 	struct ftrace_event_call *call;
742 	struct ftrace_event_file *file;
743 	struct trace_array *tr = dir->tr;
744 	char buf[2];
745 	int set = 0;
746 	int ret;
747 
748 	mutex_lock(&event_mutex);
749 	list_for_each_entry(file, &tr->events, list) {
750 		call = file->event_call;
751 		if (!call->name || !call->class || !call->class->reg)
752 			continue;
753 
754 		if (system && strcmp(call->class->system, system->name) != 0)
755 			continue;
756 
757 		/*
758 		 * We need to find out if all the events are set
759 		 * or if all events or cleared, or if we have
760 		 * a mixture.
761 		 */
762 		set |= (1 << !!(file->flags & FTRACE_EVENT_FL_ENABLED));
763 
764 		/*
765 		 * If we have a mixture, no need to look further.
766 		 */
767 		if (set == 3)
768 			break;
769 	}
770 	mutex_unlock(&event_mutex);
771 
772 	buf[0] = set_to_char[set];
773 	buf[1] = '\n';
774 
775 	ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
776 
777 	return ret;
778 }
779 
780 static ssize_t
781 system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
782 		    loff_t *ppos)
783 {
784 	struct ftrace_subsystem_dir *dir = filp->private_data;
785 	struct event_subsystem *system = dir->subsystem;
786 	const char *name = NULL;
787 	unsigned long val;
788 	ssize_t ret;
789 
790 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
791 	if (ret)
792 		return ret;
793 
794 	ret = tracing_update_buffers();
795 	if (ret < 0)
796 		return ret;
797 
798 	if (val != 0 && val != 1)
799 		return -EINVAL;
800 
801 	/*
802 	 * Opening of "enable" adds a ref count to system,
803 	 * so the name is safe to use.
804 	 */
805 	if (system)
806 		name = system->name;
807 
808 	ret = __ftrace_set_clr_event(dir->tr, NULL, name, NULL, val);
809 	if (ret)
810 		goto out;
811 
812 	ret = cnt;
813 
814 out:
815 	*ppos += cnt;
816 
817 	return ret;
818 }
819 
820 enum {
821 	FORMAT_HEADER		= 1,
822 	FORMAT_FIELD_SEPERATOR	= 2,
823 	FORMAT_PRINTFMT		= 3,
824 };
825 
826 static void *f_next(struct seq_file *m, void *v, loff_t *pos)
827 {
828 	struct ftrace_event_call *call = m->private;
829 	struct ftrace_event_field *field;
830 	struct list_head *common_head = &ftrace_common_fields;
831 	struct list_head *head = trace_get_fields(call);
832 
833 	(*pos)++;
834 
835 	switch ((unsigned long)v) {
836 	case FORMAT_HEADER:
837 		if (unlikely(list_empty(common_head)))
838 			return NULL;
839 
840 		field = list_entry(common_head->prev,
841 				   struct ftrace_event_field, link);
842 		return field;
843 
844 	case FORMAT_FIELD_SEPERATOR:
845 		if (unlikely(list_empty(head)))
846 			return NULL;
847 
848 		field = list_entry(head->prev, struct ftrace_event_field, link);
849 		return field;
850 
851 	case FORMAT_PRINTFMT:
852 		/* all done */
853 		return NULL;
854 	}
855 
856 	field = v;
857 	if (field->link.prev == common_head)
858 		return (void *)FORMAT_FIELD_SEPERATOR;
859 	else if (field->link.prev == head)
860 		return (void *)FORMAT_PRINTFMT;
861 
862 	field = list_entry(field->link.prev, struct ftrace_event_field, link);
863 
864 	return field;
865 }
866 
867 static void *f_start(struct seq_file *m, loff_t *pos)
868 {
869 	loff_t l = 0;
870 	void *p;
871 
872 	/* Start by showing the header */
873 	if (!*pos)
874 		return (void *)FORMAT_HEADER;
875 
876 	p = (void *)FORMAT_HEADER;
877 	do {
878 		p = f_next(m, p, &l);
879 	} while (p && l < *pos);
880 
881 	return p;
882 }
883 
884 static int f_show(struct seq_file *m, void *v)
885 {
886 	struct ftrace_event_call *call = m->private;
887 	struct ftrace_event_field *field;
888 	const char *array_descriptor;
889 
890 	switch ((unsigned long)v) {
891 	case FORMAT_HEADER:
892 		seq_printf(m, "name: %s\n", call->name);
893 		seq_printf(m, "ID: %d\n", call->event.type);
894 		seq_printf(m, "format:\n");
895 		return 0;
896 
897 	case FORMAT_FIELD_SEPERATOR:
898 		seq_putc(m, '\n');
899 		return 0;
900 
901 	case FORMAT_PRINTFMT:
902 		seq_printf(m, "\nprint fmt: %s\n",
903 			   call->print_fmt);
904 		return 0;
905 	}
906 
907 	field = v;
908 
909 	/*
910 	 * Smartly shows the array type(except dynamic array).
911 	 * Normal:
912 	 *	field:TYPE VAR
913 	 * If TYPE := TYPE[LEN], it is shown:
914 	 *	field:TYPE VAR[LEN]
915 	 */
916 	array_descriptor = strchr(field->type, '[');
917 
918 	if (!strncmp(field->type, "__data_loc", 10))
919 		array_descriptor = NULL;
920 
921 	if (!array_descriptor)
922 		seq_printf(m, "\tfield:%s %s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
923 			   field->type, field->name, field->offset,
924 			   field->size, !!field->is_signed);
925 	else
926 		seq_printf(m, "\tfield:%.*s %s%s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
927 			   (int)(array_descriptor - field->type),
928 			   field->type, field->name,
929 			   array_descriptor, field->offset,
930 			   field->size, !!field->is_signed);
931 
932 	return 0;
933 }
934 
935 static void f_stop(struct seq_file *m, void *p)
936 {
937 }
938 
939 static const struct seq_operations trace_format_seq_ops = {
940 	.start		= f_start,
941 	.next		= f_next,
942 	.stop		= f_stop,
943 	.show		= f_show,
944 };
945 
946 static int trace_format_open(struct inode *inode, struct file *file)
947 {
948 	struct ftrace_event_call *call = inode->i_private;
949 	struct seq_file *m;
950 	int ret;
951 
952 	ret = seq_open(file, &trace_format_seq_ops);
953 	if (ret < 0)
954 		return ret;
955 
956 	m = file->private_data;
957 	m->private = call;
958 
959 	return 0;
960 }
961 
962 static ssize_t
963 event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
964 {
965 	struct ftrace_event_call *call = filp->private_data;
966 	struct trace_seq *s;
967 	int r;
968 
969 	if (*ppos)
970 		return 0;
971 
972 	s = kmalloc(sizeof(*s), GFP_KERNEL);
973 	if (!s)
974 		return -ENOMEM;
975 
976 	trace_seq_init(s);
977 	trace_seq_printf(s, "%d\n", call->event.type);
978 
979 	r = simple_read_from_buffer(ubuf, cnt, ppos,
980 				    s->buffer, s->len);
981 	kfree(s);
982 	return r;
983 }
984 
985 static ssize_t
986 event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
987 		  loff_t *ppos)
988 {
989 	struct ftrace_event_call *call = filp->private_data;
990 	struct trace_seq *s;
991 	int r;
992 
993 	if (*ppos)
994 		return 0;
995 
996 	s = kmalloc(sizeof(*s), GFP_KERNEL);
997 	if (!s)
998 		return -ENOMEM;
999 
1000 	trace_seq_init(s);
1001 
1002 	print_event_filter(call, s);
1003 	r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
1004 
1005 	kfree(s);
1006 
1007 	return r;
1008 }
1009 
1010 static ssize_t
1011 event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
1012 		   loff_t *ppos)
1013 {
1014 	struct ftrace_event_call *call = filp->private_data;
1015 	char *buf;
1016 	int err;
1017 
1018 	if (cnt >= PAGE_SIZE)
1019 		return -EINVAL;
1020 
1021 	buf = (char *)__get_free_page(GFP_TEMPORARY);
1022 	if (!buf)
1023 		return -ENOMEM;
1024 
1025 	if (copy_from_user(buf, ubuf, cnt)) {
1026 		free_page((unsigned long) buf);
1027 		return -EFAULT;
1028 	}
1029 	buf[cnt] = '\0';
1030 
1031 	err = apply_event_filter(call, buf);
1032 	free_page((unsigned long) buf);
1033 	if (err < 0)
1034 		return err;
1035 
1036 	*ppos += cnt;
1037 
1038 	return cnt;
1039 }
1040 
1041 static LIST_HEAD(event_subsystems);
1042 
1043 static int subsystem_open(struct inode *inode, struct file *filp)
1044 {
1045 	struct event_subsystem *system = NULL;
1046 	struct ftrace_subsystem_dir *dir = NULL; /* Initialize for gcc */
1047 	struct trace_array *tr;
1048 	int ret;
1049 
1050 	/* Make sure the system still exists */
1051 	mutex_lock(&trace_types_lock);
1052 	mutex_lock(&event_mutex);
1053 	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
1054 		list_for_each_entry(dir, &tr->systems, list) {
1055 			if (dir == inode->i_private) {
1056 				/* Don't open systems with no events */
1057 				if (dir->nr_events) {
1058 					__get_system_dir(dir);
1059 					system = dir->subsystem;
1060 				}
1061 				goto exit_loop;
1062 			}
1063 		}
1064 	}
1065  exit_loop:
1066 	mutex_unlock(&event_mutex);
1067 	mutex_unlock(&trace_types_lock);
1068 
1069 	if (!system)
1070 		return -ENODEV;
1071 
1072 	/* Some versions of gcc think dir can be uninitialized here */
1073 	WARN_ON(!dir);
1074 
1075 	/* Still need to increment the ref count of the system */
1076 	if (trace_array_get(tr) < 0) {
1077 		put_system(dir);
1078 		return -ENODEV;
1079 	}
1080 
1081 	ret = tracing_open_generic(inode, filp);
1082 	if (ret < 0) {
1083 		trace_array_put(tr);
1084 		put_system(dir);
1085 	}
1086 
1087 	return ret;
1088 }
1089 
1090 static int system_tr_open(struct inode *inode, struct file *filp)
1091 {
1092 	struct ftrace_subsystem_dir *dir;
1093 	struct trace_array *tr = inode->i_private;
1094 	int ret;
1095 
1096 	if (trace_array_get(tr) < 0)
1097 		return -ENODEV;
1098 
1099 	/* Make a temporary dir that has no system but points to tr */
1100 	dir = kzalloc(sizeof(*dir), GFP_KERNEL);
1101 	if (!dir) {
1102 		trace_array_put(tr);
1103 		return -ENOMEM;
1104 	}
1105 
1106 	dir->tr = tr;
1107 
1108 	ret = tracing_open_generic(inode, filp);
1109 	if (ret < 0) {
1110 		trace_array_put(tr);
1111 		kfree(dir);
1112 	}
1113 
1114 	filp->private_data = dir;
1115 
1116 	return ret;
1117 }
1118 
1119 static int subsystem_release(struct inode *inode, struct file *file)
1120 {
1121 	struct ftrace_subsystem_dir *dir = file->private_data;
1122 
1123 	trace_array_put(dir->tr);
1124 
1125 	/*
1126 	 * If dir->subsystem is NULL, then this is a temporary
1127 	 * descriptor that was made for a trace_array to enable
1128 	 * all subsystems.
1129 	 */
1130 	if (dir->subsystem)
1131 		put_system(dir);
1132 	else
1133 		kfree(dir);
1134 
1135 	return 0;
1136 }
1137 
1138 static ssize_t
1139 subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
1140 		      loff_t *ppos)
1141 {
1142 	struct ftrace_subsystem_dir *dir = filp->private_data;
1143 	struct event_subsystem *system = dir->subsystem;
1144 	struct trace_seq *s;
1145 	int r;
1146 
1147 	if (*ppos)
1148 		return 0;
1149 
1150 	s = kmalloc(sizeof(*s), GFP_KERNEL);
1151 	if (!s)
1152 		return -ENOMEM;
1153 
1154 	trace_seq_init(s);
1155 
1156 	print_subsystem_event_filter(system, s);
1157 	r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
1158 
1159 	kfree(s);
1160 
1161 	return r;
1162 }
1163 
1164 static ssize_t
1165 subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
1166 		       loff_t *ppos)
1167 {
1168 	struct ftrace_subsystem_dir *dir = filp->private_data;
1169 	char *buf;
1170 	int err;
1171 
1172 	if (cnt >= PAGE_SIZE)
1173 		return -EINVAL;
1174 
1175 	buf = (char *)__get_free_page(GFP_TEMPORARY);
1176 	if (!buf)
1177 		return -ENOMEM;
1178 
1179 	if (copy_from_user(buf, ubuf, cnt)) {
1180 		free_page((unsigned long) buf);
1181 		return -EFAULT;
1182 	}
1183 	buf[cnt] = '\0';
1184 
1185 	err = apply_subsystem_event_filter(dir, buf);
1186 	free_page((unsigned long) buf);
1187 	if (err < 0)
1188 		return err;
1189 
1190 	*ppos += cnt;
1191 
1192 	return cnt;
1193 }
1194 
1195 static ssize_t
1196 show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
1197 {
1198 	int (*func)(struct trace_seq *s) = filp->private_data;
1199 	struct trace_seq *s;
1200 	int r;
1201 
1202 	if (*ppos)
1203 		return 0;
1204 
1205 	s = kmalloc(sizeof(*s), GFP_KERNEL);
1206 	if (!s)
1207 		return -ENOMEM;
1208 
1209 	trace_seq_init(s);
1210 
1211 	func(s);
1212 	r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
1213 
1214 	kfree(s);
1215 
1216 	return r;
1217 }
1218 
1219 static int ftrace_event_avail_open(struct inode *inode, struct file *file);
1220 static int ftrace_event_set_open(struct inode *inode, struct file *file);
1221 
1222 static const struct seq_operations show_event_seq_ops = {
1223 	.start = t_start,
1224 	.next = t_next,
1225 	.show = t_show,
1226 	.stop = t_stop,
1227 };
1228 
1229 static const struct seq_operations show_set_event_seq_ops = {
1230 	.start = s_start,
1231 	.next = s_next,
1232 	.show = t_show,
1233 	.stop = t_stop,
1234 };
1235 
1236 static const struct file_operations ftrace_avail_fops = {
1237 	.open = ftrace_event_avail_open,
1238 	.read = seq_read,
1239 	.llseek = seq_lseek,
1240 	.release = seq_release,
1241 };
1242 
1243 static const struct file_operations ftrace_set_event_fops = {
1244 	.open = ftrace_event_set_open,
1245 	.read = seq_read,
1246 	.write = ftrace_event_write,
1247 	.llseek = seq_lseek,
1248 	.release = seq_release,
1249 };
1250 
1251 static const struct file_operations ftrace_enable_fops = {
1252 	.open = tracing_open_generic_file,
1253 	.read = event_enable_read,
1254 	.write = event_enable_write,
1255 	.release = tracing_release_generic_file,
1256 	.llseek = default_llseek,
1257 };
1258 
1259 static const struct file_operations ftrace_event_format_fops = {
1260 	.open = trace_format_open,
1261 	.read = seq_read,
1262 	.llseek = seq_lseek,
1263 	.release = seq_release,
1264 };
1265 
1266 static const struct file_operations ftrace_event_id_fops = {
1267 	.open = tracing_open_generic,
1268 	.read = event_id_read,
1269 	.llseek = default_llseek,
1270 };
1271 
1272 static const struct file_operations ftrace_event_filter_fops = {
1273 	.open = tracing_open_generic,
1274 	.read = event_filter_read,
1275 	.write = event_filter_write,
1276 	.llseek = default_llseek,
1277 };
1278 
1279 static const struct file_operations ftrace_subsystem_filter_fops = {
1280 	.open = subsystem_open,
1281 	.read = subsystem_filter_read,
1282 	.write = subsystem_filter_write,
1283 	.llseek = default_llseek,
1284 	.release = subsystem_release,
1285 };
1286 
1287 static const struct file_operations ftrace_system_enable_fops = {
1288 	.open = subsystem_open,
1289 	.read = system_enable_read,
1290 	.write = system_enable_write,
1291 	.llseek = default_llseek,
1292 	.release = subsystem_release,
1293 };
1294 
1295 static const struct file_operations ftrace_tr_enable_fops = {
1296 	.open = system_tr_open,
1297 	.read = system_enable_read,
1298 	.write = system_enable_write,
1299 	.llseek = default_llseek,
1300 	.release = subsystem_release,
1301 };
1302 
1303 static const struct file_operations ftrace_show_header_fops = {
1304 	.open = tracing_open_generic,
1305 	.read = show_header,
1306 	.llseek = default_llseek,
1307 };
1308 
1309 static int
1310 ftrace_event_open(struct inode *inode, struct file *file,
1311 		  const struct seq_operations *seq_ops)
1312 {
1313 	struct seq_file *m;
1314 	int ret;
1315 
1316 	ret = seq_open(file, seq_ops);
1317 	if (ret < 0)
1318 		return ret;
1319 	m = file->private_data;
1320 	/* copy tr over to seq ops */
1321 	m->private = inode->i_private;
1322 
1323 	return ret;
1324 }
1325 
1326 static int
1327 ftrace_event_avail_open(struct inode *inode, struct file *file)
1328 {
1329 	const struct seq_operations *seq_ops = &show_event_seq_ops;
1330 
1331 	return ftrace_event_open(inode, file, seq_ops);
1332 }
1333 
1334 static int
1335 ftrace_event_set_open(struct inode *inode, struct file *file)
1336 {
1337 	const struct seq_operations *seq_ops = &show_set_event_seq_ops;
1338 	struct trace_array *tr = inode->i_private;
1339 
1340 	if ((file->f_mode & FMODE_WRITE) &&
1341 	    (file->f_flags & O_TRUNC))
1342 		ftrace_clear_events(tr);
1343 
1344 	return ftrace_event_open(inode, file, seq_ops);
1345 }
1346 
1347 static struct event_subsystem *
1348 create_new_subsystem(const char *name)
1349 {
1350 	struct event_subsystem *system;
1351 
1352 	/* need to create new entry */
1353 	system = kmalloc(sizeof(*system), GFP_KERNEL);
1354 	if (!system)
1355 		return NULL;
1356 
1357 	system->ref_count = 1;
1358 
1359 	/* Only allocate if dynamic (kprobes and modules) */
1360 	if (!core_kernel_data((unsigned long)name)) {
1361 		system->ref_count |= SYSTEM_FL_FREE_NAME;
1362 		system->name = kstrdup(name, GFP_KERNEL);
1363 		if (!system->name)
1364 			goto out_free;
1365 	} else
1366 		system->name = name;
1367 
1368 	system->filter = NULL;
1369 
1370 	system->filter = kzalloc(sizeof(struct event_filter), GFP_KERNEL);
1371 	if (!system->filter)
1372 		goto out_free;
1373 
1374 	list_add(&system->list, &event_subsystems);
1375 
1376 	return system;
1377 
1378  out_free:
1379 	if (system->ref_count & SYSTEM_FL_FREE_NAME)
1380 		kfree(system->name);
1381 	kfree(system);
1382 	return NULL;
1383 }
1384 
1385 static struct dentry *
1386 event_subsystem_dir(struct trace_array *tr, const char *name,
1387 		    struct ftrace_event_file *file, struct dentry *parent)
1388 {
1389 	struct ftrace_subsystem_dir *dir;
1390 	struct event_subsystem *system;
1391 	struct dentry *entry;
1392 
1393 	/* First see if we did not already create this dir */
1394 	list_for_each_entry(dir, &tr->systems, list) {
1395 		system = dir->subsystem;
1396 		if (strcmp(system->name, name) == 0) {
1397 			dir->nr_events++;
1398 			file->system = dir;
1399 			return dir->entry;
1400 		}
1401 	}
1402 
1403 	/* Now see if the system itself exists. */
1404 	list_for_each_entry(system, &event_subsystems, list) {
1405 		if (strcmp(system->name, name) == 0)
1406 			break;
1407 	}
1408 	/* Reset system variable when not found */
1409 	if (&system->list == &event_subsystems)
1410 		system = NULL;
1411 
1412 	dir = kmalloc(sizeof(*dir), GFP_KERNEL);
1413 	if (!dir)
1414 		goto out_fail;
1415 
1416 	if (!system) {
1417 		system = create_new_subsystem(name);
1418 		if (!system)
1419 			goto out_free;
1420 	} else
1421 		__get_system(system);
1422 
1423 	dir->entry = debugfs_create_dir(name, parent);
1424 	if (!dir->entry) {
1425 		pr_warning("Failed to create system directory %s\n", name);
1426 		__put_system(system);
1427 		goto out_free;
1428 	}
1429 
1430 	dir->tr = tr;
1431 	dir->ref_count = 1;
1432 	dir->nr_events = 1;
1433 	dir->subsystem = system;
1434 	file->system = dir;
1435 
1436 	entry = debugfs_create_file("filter", 0644, dir->entry, dir,
1437 				    &ftrace_subsystem_filter_fops);
1438 	if (!entry) {
1439 		kfree(system->filter);
1440 		system->filter = NULL;
1441 		pr_warning("Could not create debugfs '%s/filter' entry\n", name);
1442 	}
1443 
1444 	trace_create_file("enable", 0644, dir->entry, dir,
1445 			  &ftrace_system_enable_fops);
1446 
1447 	list_add(&dir->list, &tr->systems);
1448 
1449 	return dir->entry;
1450 
1451  out_free:
1452 	kfree(dir);
1453  out_fail:
1454 	/* Only print this message if failed on memory allocation */
1455 	if (!dir || !system)
1456 		pr_warning("No memory to create event subsystem %s\n",
1457 			   name);
1458 	return NULL;
1459 }
1460 
1461 static int
1462 event_create_dir(struct dentry *parent,
1463 		 struct ftrace_event_file *file,
1464 		 const struct file_operations *id,
1465 		 const struct file_operations *enable,
1466 		 const struct file_operations *filter,
1467 		 const struct file_operations *format)
1468 {
1469 	struct ftrace_event_call *call = file->event_call;
1470 	struct trace_array *tr = file->tr;
1471 	struct list_head *head;
1472 	struct dentry *d_events;
1473 	int ret;
1474 
1475 	/*
1476 	 * If the trace point header did not define TRACE_SYSTEM
1477 	 * then the system would be called "TRACE_SYSTEM".
1478 	 */
1479 	if (strcmp(call->class->system, TRACE_SYSTEM) != 0) {
1480 		d_events = event_subsystem_dir(tr, call->class->system, file, parent);
1481 		if (!d_events)
1482 			return -ENOMEM;
1483 	} else
1484 		d_events = parent;
1485 
1486 	file->dir = debugfs_create_dir(call->name, d_events);
1487 	if (!file->dir) {
1488 		pr_warning("Could not create debugfs '%s' directory\n",
1489 			   call->name);
1490 		return -1;
1491 	}
1492 
1493 	if (call->class->reg && !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
1494 		trace_create_file("enable", 0644, file->dir, file,
1495 				  enable);
1496 
1497 #ifdef CONFIG_PERF_EVENTS
1498 	if (call->event.type && call->class->reg)
1499 		trace_create_file("id", 0444, file->dir, call,
1500 		 		  id);
1501 #endif
1502 
1503 	/*
1504 	 * Other events may have the same class. Only update
1505 	 * the fields if they are not already defined.
1506 	 */
1507 	head = trace_get_fields(call);
1508 	if (list_empty(head)) {
1509 		ret = call->class->define_fields(call);
1510 		if (ret < 0) {
1511 			pr_warning("Could not initialize trace point"
1512 				   " events/%s\n", call->name);
1513 			return -1;
1514 		}
1515 	}
1516 	trace_create_file("filter", 0644, file->dir, call,
1517 			  filter);
1518 
1519 	trace_create_file("format", 0444, file->dir, call,
1520 			  format);
1521 
1522 	return 0;
1523 }
1524 
1525 static void remove_subsystem(struct ftrace_subsystem_dir *dir)
1526 {
1527 	if (!dir)
1528 		return;
1529 
1530 	if (!--dir->nr_events) {
1531 		debugfs_remove_recursive(dir->entry);
1532 		list_del(&dir->list);
1533 		__put_system_dir(dir);
1534 	}
1535 }
1536 
1537 static void remove_event_from_tracers(struct ftrace_event_call *call)
1538 {
1539 	struct ftrace_event_file *file;
1540 	struct trace_array *tr;
1541 
1542 	do_for_each_event_file_safe(tr, file) {
1543 
1544 		if (file->event_call != call)
1545 			continue;
1546 
1547 		list_del(&file->list);
1548 		debugfs_remove_recursive(file->dir);
1549 		remove_subsystem(file->system);
1550 		kmem_cache_free(file_cachep, file);
1551 
1552 		/*
1553 		 * The do_for_each_event_file_safe() is
1554 		 * a double loop. After finding the call for this
1555 		 * trace_array, we use break to jump to the next
1556 		 * trace_array.
1557 		 */
1558 		break;
1559 	} while_for_each_event_file();
1560 }
1561 
1562 static void event_remove(struct ftrace_event_call *call)
1563 {
1564 	struct trace_array *tr;
1565 	struct ftrace_event_file *file;
1566 
1567 	do_for_each_event_file(tr, file) {
1568 		if (file->event_call != call)
1569 			continue;
1570 		ftrace_event_enable_disable(file, 0);
1571 		/*
1572 		 * The do_for_each_event_file() is
1573 		 * a double loop. After finding the call for this
1574 		 * trace_array, we use break to jump to the next
1575 		 * trace_array.
1576 		 */
1577 		break;
1578 	} while_for_each_event_file();
1579 
1580 	if (call->event.funcs)
1581 		__unregister_ftrace_event(&call->event);
1582 	remove_event_from_tracers(call);
1583 	list_del(&call->list);
1584 }
1585 
1586 static int event_init(struct ftrace_event_call *call)
1587 {
1588 	int ret = 0;
1589 
1590 	if (WARN_ON(!call->name))
1591 		return -EINVAL;
1592 
1593 	if (call->class->raw_init) {
1594 		ret = call->class->raw_init(call);
1595 		if (ret < 0 && ret != -ENOSYS)
1596 			pr_warn("Could not initialize trace events/%s\n",
1597 				call->name);
1598 	}
1599 
1600 	return ret;
1601 }
1602 
1603 static int
1604 __register_event(struct ftrace_event_call *call, struct module *mod)
1605 {
1606 	int ret;
1607 
1608 	ret = event_init(call);
1609 	if (ret < 0)
1610 		return ret;
1611 
1612 	list_add(&call->list, &ftrace_events);
1613 	call->mod = mod;
1614 
1615 	return 0;
1616 }
1617 
1618 static struct ftrace_event_file *
1619 trace_create_new_event(struct ftrace_event_call *call,
1620 		       struct trace_array *tr)
1621 {
1622 	struct ftrace_event_file *file;
1623 
1624 	file = kmem_cache_alloc(file_cachep, GFP_TRACE);
1625 	if (!file)
1626 		return NULL;
1627 
1628 	file->event_call = call;
1629 	file->tr = tr;
1630 	atomic_set(&file->sm_ref, 0);
1631 	list_add(&file->list, &tr->events);
1632 
1633 	return file;
1634 }
1635 
1636 /* Add an event to a trace directory */
1637 static int
1638 __trace_add_new_event(struct ftrace_event_call *call,
1639 		      struct trace_array *tr,
1640 		      const struct file_operations *id,
1641 		      const struct file_operations *enable,
1642 		      const struct file_operations *filter,
1643 		      const struct file_operations *format)
1644 {
1645 	struct ftrace_event_file *file;
1646 
1647 	file = trace_create_new_event(call, tr);
1648 	if (!file)
1649 		return -ENOMEM;
1650 
1651 	return event_create_dir(tr->event_dir, file, id, enable, filter, format);
1652 }
1653 
1654 /*
1655  * Just create a decriptor for early init. A descriptor is required
1656  * for enabling events at boot. We want to enable events before
1657  * the filesystem is initialized.
1658  */
1659 static __init int
1660 __trace_early_add_new_event(struct ftrace_event_call *call,
1661 			    struct trace_array *tr)
1662 {
1663 	struct ftrace_event_file *file;
1664 
1665 	file = trace_create_new_event(call, tr);
1666 	if (!file)
1667 		return -ENOMEM;
1668 
1669 	return 0;
1670 }
1671 
1672 struct ftrace_module_file_ops;
1673 static void __add_event_to_tracers(struct ftrace_event_call *call,
1674 				   struct ftrace_module_file_ops *file_ops);
1675 
1676 /* Add an additional event_call dynamically */
1677 int trace_add_event_call(struct ftrace_event_call *call)
1678 {
1679 	int ret;
1680 	mutex_lock(&trace_types_lock);
1681 	mutex_lock(&event_mutex);
1682 
1683 	ret = __register_event(call, NULL);
1684 	if (ret >= 0)
1685 		__add_event_to_tracers(call, NULL);
1686 
1687 	mutex_unlock(&event_mutex);
1688 	mutex_unlock(&trace_types_lock);
1689 	return ret;
1690 }
1691 
1692 /*
1693  * Must be called under locking of trace_types_lock, event_mutex and
1694  * trace_event_sem.
1695  */
1696 static void __trace_remove_event_call(struct ftrace_event_call *call)
1697 {
1698 	event_remove(call);
1699 	trace_destroy_fields(call);
1700 	destroy_preds(call);
1701 }
1702 
1703 /* Remove an event_call */
1704 void trace_remove_event_call(struct ftrace_event_call *call)
1705 {
1706 	mutex_lock(&trace_types_lock);
1707 	mutex_lock(&event_mutex);
1708 	down_write(&trace_event_sem);
1709 	__trace_remove_event_call(call);
1710 	up_write(&trace_event_sem);
1711 	mutex_unlock(&event_mutex);
1712 	mutex_unlock(&trace_types_lock);
1713 }
1714 
1715 #define for_each_event(event, start, end)			\
1716 	for (event = start;					\
1717 	     (unsigned long)event < (unsigned long)end;		\
1718 	     event++)
1719 
1720 #ifdef CONFIG_MODULES
1721 
1722 static LIST_HEAD(ftrace_module_file_list);
1723 
1724 /*
1725  * Modules must own their file_operations to keep up with
1726  * reference counting.
1727  */
1728 struct ftrace_module_file_ops {
1729 	struct list_head		list;
1730 	struct module			*mod;
1731 	struct file_operations		id;
1732 	struct file_operations		enable;
1733 	struct file_operations		format;
1734 	struct file_operations		filter;
1735 };
1736 
1737 static struct ftrace_module_file_ops *
1738 find_ftrace_file_ops(struct ftrace_module_file_ops *file_ops, struct module *mod)
1739 {
1740 	/*
1741 	 * As event_calls are added in groups by module,
1742 	 * when we find one file_ops, we don't need to search for
1743 	 * each call in that module, as the rest should be the
1744 	 * same. Only search for a new one if the last one did
1745 	 * not match.
1746 	 */
1747 	if (file_ops && mod == file_ops->mod)
1748 		return file_ops;
1749 
1750 	list_for_each_entry(file_ops, &ftrace_module_file_list, list) {
1751 		if (file_ops->mod == mod)
1752 			return file_ops;
1753 	}
1754 	return NULL;
1755 }
1756 
1757 static struct ftrace_module_file_ops *
1758 trace_create_file_ops(struct module *mod)
1759 {
1760 	struct ftrace_module_file_ops *file_ops;
1761 
1762 	/*
1763 	 * This is a bit of a PITA. To allow for correct reference
1764 	 * counting, modules must "own" their file_operations.
1765 	 * To do this, we allocate the file operations that will be
1766 	 * used in the event directory.
1767 	 */
1768 
1769 	file_ops = kmalloc(sizeof(*file_ops), GFP_KERNEL);
1770 	if (!file_ops)
1771 		return NULL;
1772 
1773 	file_ops->mod = mod;
1774 
1775 	file_ops->id = ftrace_event_id_fops;
1776 	file_ops->id.owner = mod;
1777 
1778 	file_ops->enable = ftrace_enable_fops;
1779 	file_ops->enable.owner = mod;
1780 
1781 	file_ops->filter = ftrace_event_filter_fops;
1782 	file_ops->filter.owner = mod;
1783 
1784 	file_ops->format = ftrace_event_format_fops;
1785 	file_ops->format.owner = mod;
1786 
1787 	list_add(&file_ops->list, &ftrace_module_file_list);
1788 
1789 	return file_ops;
1790 }
1791 
1792 static void trace_module_add_events(struct module *mod)
1793 {
1794 	struct ftrace_module_file_ops *file_ops = NULL;
1795 	struct ftrace_event_call **call, **start, **end;
1796 
1797 	start = mod->trace_events;
1798 	end = mod->trace_events + mod->num_trace_events;
1799 
1800 	if (start == end)
1801 		return;
1802 
1803 	file_ops = trace_create_file_ops(mod);
1804 	if (!file_ops)
1805 		return;
1806 
1807 	for_each_event(call, start, end) {
1808 		__register_event(*call, mod);
1809 		__add_event_to_tracers(*call, file_ops);
1810 	}
1811 }
1812 
1813 static void trace_module_remove_events(struct module *mod)
1814 {
1815 	struct ftrace_module_file_ops *file_ops;
1816 	struct ftrace_event_call *call, *p;
1817 	bool clear_trace = false;
1818 
1819 	down_write(&trace_event_sem);
1820 	list_for_each_entry_safe(call, p, &ftrace_events, list) {
1821 		if (call->mod == mod) {
1822 			if (call->flags & TRACE_EVENT_FL_WAS_ENABLED)
1823 				clear_trace = true;
1824 			__trace_remove_event_call(call);
1825 		}
1826 	}
1827 
1828 	/* Now free the file_operations */
1829 	list_for_each_entry(file_ops, &ftrace_module_file_list, list) {
1830 		if (file_ops->mod == mod)
1831 			break;
1832 	}
1833 	if (&file_ops->list != &ftrace_module_file_list) {
1834 		list_del(&file_ops->list);
1835 		kfree(file_ops);
1836 	}
1837 	up_write(&trace_event_sem);
1838 
1839 	/*
1840 	 * It is safest to reset the ring buffer if the module being unloaded
1841 	 * registered any events that were used. The only worry is if
1842 	 * a new module gets loaded, and takes on the same id as the events
1843 	 * of this module. When printing out the buffer, traced events left
1844 	 * over from this module may be passed to the new module events and
1845 	 * unexpected results may occur.
1846 	 */
1847 	if (clear_trace)
1848 		tracing_reset_all_online_cpus();
1849 }
1850 
1851 static int trace_module_notify(struct notifier_block *self,
1852 			       unsigned long val, void *data)
1853 {
1854 	struct module *mod = data;
1855 
1856 	mutex_lock(&trace_types_lock);
1857 	mutex_lock(&event_mutex);
1858 	switch (val) {
1859 	case MODULE_STATE_COMING:
1860 		trace_module_add_events(mod);
1861 		break;
1862 	case MODULE_STATE_GOING:
1863 		trace_module_remove_events(mod);
1864 		break;
1865 	}
1866 	mutex_unlock(&event_mutex);
1867 	mutex_unlock(&trace_types_lock);
1868 
1869 	return 0;
1870 }
1871 
1872 static int
1873 __trace_add_new_mod_event(struct ftrace_event_call *call,
1874 			  struct trace_array *tr,
1875 			  struct ftrace_module_file_ops *file_ops)
1876 {
1877 	return __trace_add_new_event(call, tr,
1878 				     &file_ops->id, &file_ops->enable,
1879 				     &file_ops->filter, &file_ops->format);
1880 }
1881 
1882 #else
1883 static inline struct ftrace_module_file_ops *
1884 find_ftrace_file_ops(struct ftrace_module_file_ops *file_ops, struct module *mod)
1885 {
1886 	return NULL;
1887 }
1888 static inline int trace_module_notify(struct notifier_block *self,
1889 				      unsigned long val, void *data)
1890 {
1891 	return 0;
1892 }
1893 static inline int
1894 __trace_add_new_mod_event(struct ftrace_event_call *call,
1895 			  struct trace_array *tr,
1896 			  struct ftrace_module_file_ops *file_ops)
1897 {
1898 	return -ENODEV;
1899 }
1900 #endif /* CONFIG_MODULES */
1901 
1902 /* Create a new event directory structure for a trace directory. */
1903 static void
1904 __trace_add_event_dirs(struct trace_array *tr)
1905 {
1906 	struct ftrace_module_file_ops *file_ops = NULL;
1907 	struct ftrace_event_call *call;
1908 	int ret;
1909 
1910 	list_for_each_entry(call, &ftrace_events, list) {
1911 		if (call->mod) {
1912 			/*
1913 			 * Directories for events by modules need to
1914 			 * keep module ref counts when opened (as we don't
1915 			 * want the module to disappear when reading one
1916 			 * of these files). The file_ops keep account of
1917 			 * the module ref count.
1918 			 */
1919 			file_ops = find_ftrace_file_ops(file_ops, call->mod);
1920 			if (!file_ops)
1921 				continue; /* Warn? */
1922 			ret = __trace_add_new_mod_event(call, tr, file_ops);
1923 			if (ret < 0)
1924 				pr_warning("Could not create directory for event %s\n",
1925 					   call->name);
1926 			continue;
1927 		}
1928 		ret = __trace_add_new_event(call, tr,
1929 					    &ftrace_event_id_fops,
1930 					    &ftrace_enable_fops,
1931 					    &ftrace_event_filter_fops,
1932 					    &ftrace_event_format_fops);
1933 		if (ret < 0)
1934 			pr_warning("Could not create directory for event %s\n",
1935 				   call->name);
1936 	}
1937 }
1938 
1939 #ifdef CONFIG_DYNAMIC_FTRACE
1940 
1941 /* Avoid typos */
1942 #define ENABLE_EVENT_STR	"enable_event"
1943 #define DISABLE_EVENT_STR	"disable_event"
1944 
1945 struct event_probe_data {
1946 	struct ftrace_event_file	*file;
1947 	unsigned long			count;
1948 	int				ref;
1949 	bool				enable;
1950 };
1951 
1952 static struct ftrace_event_file *
1953 find_event_file(struct trace_array *tr, const char *system,  const char *event)
1954 {
1955 	struct ftrace_event_file *file;
1956 	struct ftrace_event_call *call;
1957 
1958 	list_for_each_entry(file, &tr->events, list) {
1959 
1960 		call = file->event_call;
1961 
1962 		if (!call->name || !call->class || !call->class->reg)
1963 			continue;
1964 
1965 		if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)
1966 			continue;
1967 
1968 		if (strcmp(event, call->name) == 0 &&
1969 		    strcmp(system, call->class->system) == 0)
1970 			return file;
1971 	}
1972 	return NULL;
1973 }
1974 
1975 static void
1976 event_enable_probe(unsigned long ip, unsigned long parent_ip, void **_data)
1977 {
1978 	struct event_probe_data **pdata = (struct event_probe_data **)_data;
1979 	struct event_probe_data *data = *pdata;
1980 
1981 	if (!data)
1982 		return;
1983 
1984 	if (data->enable)
1985 		clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &data->file->flags);
1986 	else
1987 		set_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &data->file->flags);
1988 }
1989 
1990 static void
1991 event_enable_count_probe(unsigned long ip, unsigned long parent_ip, void **_data)
1992 {
1993 	struct event_probe_data **pdata = (struct event_probe_data **)_data;
1994 	struct event_probe_data *data = *pdata;
1995 
1996 	if (!data)
1997 		return;
1998 
1999 	if (!data->count)
2000 		return;
2001 
2002 	/* Skip if the event is in a state we want to switch to */
2003 	if (data->enable == !(data->file->flags & FTRACE_EVENT_FL_SOFT_DISABLED))
2004 		return;
2005 
2006 	if (data->count != -1)
2007 		(data->count)--;
2008 
2009 	event_enable_probe(ip, parent_ip, _data);
2010 }
2011 
2012 static int
2013 event_enable_print(struct seq_file *m, unsigned long ip,
2014 		      struct ftrace_probe_ops *ops, void *_data)
2015 {
2016 	struct event_probe_data *data = _data;
2017 
2018 	seq_printf(m, "%ps:", (void *)ip);
2019 
2020 	seq_printf(m, "%s:%s:%s",
2021 		   data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR,
2022 		   data->file->event_call->class->system,
2023 		   data->file->event_call->name);
2024 
2025 	if (data->count == -1)
2026 		seq_printf(m, ":unlimited\n");
2027 	else
2028 		seq_printf(m, ":count=%ld\n", data->count);
2029 
2030 	return 0;
2031 }
2032 
2033 static int
2034 event_enable_init(struct ftrace_probe_ops *ops, unsigned long ip,
2035 		  void **_data)
2036 {
2037 	struct event_probe_data **pdata = (struct event_probe_data **)_data;
2038 	struct event_probe_data *data = *pdata;
2039 
2040 	data->ref++;
2041 	return 0;
2042 }
2043 
2044 static void
2045 event_enable_free(struct ftrace_probe_ops *ops, unsigned long ip,
2046 		  void **_data)
2047 {
2048 	struct event_probe_data **pdata = (struct event_probe_data **)_data;
2049 	struct event_probe_data *data = *pdata;
2050 
2051 	if (WARN_ON_ONCE(data->ref <= 0))
2052 		return;
2053 
2054 	data->ref--;
2055 	if (!data->ref) {
2056 		/* Remove the SOFT_MODE flag */
2057 		__ftrace_event_enable_disable(data->file, 0, 1);
2058 		module_put(data->file->event_call->mod);
2059 		kfree(data);
2060 	}
2061 	*pdata = NULL;
2062 }
2063 
2064 static struct ftrace_probe_ops event_enable_probe_ops = {
2065 	.func			= event_enable_probe,
2066 	.print			= event_enable_print,
2067 	.init			= event_enable_init,
2068 	.free			= event_enable_free,
2069 };
2070 
2071 static struct ftrace_probe_ops event_enable_count_probe_ops = {
2072 	.func			= event_enable_count_probe,
2073 	.print			= event_enable_print,
2074 	.init			= event_enable_init,
2075 	.free			= event_enable_free,
2076 };
2077 
2078 static struct ftrace_probe_ops event_disable_probe_ops = {
2079 	.func			= event_enable_probe,
2080 	.print			= event_enable_print,
2081 	.init			= event_enable_init,
2082 	.free			= event_enable_free,
2083 };
2084 
2085 static struct ftrace_probe_ops event_disable_count_probe_ops = {
2086 	.func			= event_enable_count_probe,
2087 	.print			= event_enable_print,
2088 	.init			= event_enable_init,
2089 	.free			= event_enable_free,
2090 };
2091 
2092 static int
2093 event_enable_func(struct ftrace_hash *hash,
2094 		  char *glob, char *cmd, char *param, int enabled)
2095 {
2096 	struct trace_array *tr = top_trace_array();
2097 	struct ftrace_event_file *file;
2098 	struct ftrace_probe_ops *ops;
2099 	struct event_probe_data *data;
2100 	const char *system;
2101 	const char *event;
2102 	char *number;
2103 	bool enable;
2104 	int ret;
2105 
2106 	/* hash funcs only work with set_ftrace_filter */
2107 	if (!enabled || !param)
2108 		return -EINVAL;
2109 
2110 	system = strsep(&param, ":");
2111 	if (!param)
2112 		return -EINVAL;
2113 
2114 	event = strsep(&param, ":");
2115 
2116 	mutex_lock(&event_mutex);
2117 
2118 	ret = -EINVAL;
2119 	file = find_event_file(tr, system, event);
2120 	if (!file)
2121 		goto out;
2122 
2123 	enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
2124 
2125 	if (enable)
2126 		ops = param ? &event_enable_count_probe_ops : &event_enable_probe_ops;
2127 	else
2128 		ops = param ? &event_disable_count_probe_ops : &event_disable_probe_ops;
2129 
2130 	if (glob[0] == '!') {
2131 		unregister_ftrace_function_probe_func(glob+1, ops);
2132 		ret = 0;
2133 		goto out;
2134 	}
2135 
2136 	ret = -ENOMEM;
2137 	data = kzalloc(sizeof(*data), GFP_KERNEL);
2138 	if (!data)
2139 		goto out;
2140 
2141 	data->enable = enable;
2142 	data->count = -1;
2143 	data->file = file;
2144 
2145 	if (!param)
2146 		goto out_reg;
2147 
2148 	number = strsep(&param, ":");
2149 
2150 	ret = -EINVAL;
2151 	if (!strlen(number))
2152 		goto out_free;
2153 
2154 	/*
2155 	 * We use the callback data field (which is a pointer)
2156 	 * as our counter.
2157 	 */
2158 	ret = kstrtoul(number, 0, &data->count);
2159 	if (ret)
2160 		goto out_free;
2161 
2162  out_reg:
2163 	/* Don't let event modules unload while probe registered */
2164 	ret = try_module_get(file->event_call->mod);
2165 	if (!ret) {
2166 		ret = -EBUSY;
2167 		goto out_free;
2168 	}
2169 
2170 	ret = __ftrace_event_enable_disable(file, 1, 1);
2171 	if (ret < 0)
2172 		goto out_put;
2173 	ret = register_ftrace_function_probe(glob, ops, data);
2174 	/*
2175 	 * The above returns on success the # of functions enabled,
2176 	 * but if it didn't find any functions it returns zero.
2177 	 * Consider no functions a failure too.
2178 	 */
2179 	if (!ret) {
2180 		ret = -ENOENT;
2181 		goto out_disable;
2182 	} else if (ret < 0)
2183 		goto out_disable;
2184 	/* Just return zero, not the number of enabled functions */
2185 	ret = 0;
2186  out:
2187 	mutex_unlock(&event_mutex);
2188 	return ret;
2189 
2190  out_disable:
2191 	__ftrace_event_enable_disable(file, 0, 1);
2192  out_put:
2193 	module_put(file->event_call->mod);
2194  out_free:
2195 	kfree(data);
2196 	goto out;
2197 }
2198 
2199 static struct ftrace_func_command event_enable_cmd = {
2200 	.name			= ENABLE_EVENT_STR,
2201 	.func			= event_enable_func,
2202 };
2203 
2204 static struct ftrace_func_command event_disable_cmd = {
2205 	.name			= DISABLE_EVENT_STR,
2206 	.func			= event_enable_func,
2207 };
2208 
2209 static __init int register_event_cmds(void)
2210 {
2211 	int ret;
2212 
2213 	ret = register_ftrace_command(&event_enable_cmd);
2214 	if (WARN_ON(ret < 0))
2215 		return ret;
2216 	ret = register_ftrace_command(&event_disable_cmd);
2217 	if (WARN_ON(ret < 0))
2218 		unregister_ftrace_command(&event_enable_cmd);
2219 	return ret;
2220 }
2221 #else
2222 static inline int register_event_cmds(void) { return 0; }
2223 #endif /* CONFIG_DYNAMIC_FTRACE */
2224 
2225 /*
2226  * The top level array has already had its ftrace_event_file
2227  * descriptors created in order to allow for early events to
2228  * be recorded. This function is called after the debugfs has been
2229  * initialized, and we now have to create the files associated
2230  * to the events.
2231  */
2232 static __init void
2233 __trace_early_add_event_dirs(struct trace_array *tr)
2234 {
2235 	struct ftrace_event_file *file;
2236 	int ret;
2237 
2238 
2239 	list_for_each_entry(file, &tr->events, list) {
2240 		ret = event_create_dir(tr->event_dir, file,
2241 				       &ftrace_event_id_fops,
2242 				       &ftrace_enable_fops,
2243 				       &ftrace_event_filter_fops,
2244 				       &ftrace_event_format_fops);
2245 		if (ret < 0)
2246 			pr_warning("Could not create directory for event %s\n",
2247 				   file->event_call->name);
2248 	}
2249 }
2250 
2251 /*
2252  * For early boot up, the top trace array requires to have
2253  * a list of events that can be enabled. This must be done before
2254  * the filesystem is set up in order to allow events to be traced
2255  * early.
2256  */
2257 static __init void
2258 __trace_early_add_events(struct trace_array *tr)
2259 {
2260 	struct ftrace_event_call *call;
2261 	int ret;
2262 
2263 	list_for_each_entry(call, &ftrace_events, list) {
2264 		/* Early boot up should not have any modules loaded */
2265 		if (WARN_ON_ONCE(call->mod))
2266 			continue;
2267 
2268 		ret = __trace_early_add_new_event(call, tr);
2269 		if (ret < 0)
2270 			pr_warning("Could not create early event %s\n",
2271 				   call->name);
2272 	}
2273 }
2274 
2275 /* Remove the event directory structure for a trace directory. */
2276 static void
2277 __trace_remove_event_dirs(struct trace_array *tr)
2278 {
2279 	struct ftrace_event_file *file, *next;
2280 
2281 	list_for_each_entry_safe(file, next, &tr->events, list) {
2282 		list_del(&file->list);
2283 		debugfs_remove_recursive(file->dir);
2284 		remove_subsystem(file->system);
2285 		kmem_cache_free(file_cachep, file);
2286 	}
2287 }
2288 
2289 static void
2290 __add_event_to_tracers(struct ftrace_event_call *call,
2291 		       struct ftrace_module_file_ops *file_ops)
2292 {
2293 	struct trace_array *tr;
2294 
2295 	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
2296 		if (file_ops)
2297 			__trace_add_new_mod_event(call, tr, file_ops);
2298 		else
2299 			__trace_add_new_event(call, tr,
2300 					      &ftrace_event_id_fops,
2301 					      &ftrace_enable_fops,
2302 					      &ftrace_event_filter_fops,
2303 					      &ftrace_event_format_fops);
2304 	}
2305 }
2306 
2307 static struct notifier_block trace_module_nb = {
2308 	.notifier_call = trace_module_notify,
2309 	.priority = 0,
2310 };
2311 
2312 extern struct ftrace_event_call *__start_ftrace_events[];
2313 extern struct ftrace_event_call *__stop_ftrace_events[];
2314 
2315 static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata;
2316 
2317 static __init int setup_trace_event(char *str)
2318 {
2319 	strlcpy(bootup_event_buf, str, COMMAND_LINE_SIZE);
2320 	ring_buffer_expanded = true;
2321 	tracing_selftest_disabled = true;
2322 
2323 	return 1;
2324 }
2325 __setup("trace_event=", setup_trace_event);
2326 
2327 /* Expects to have event_mutex held when called */
2328 static int
2329 create_event_toplevel_files(struct dentry *parent, struct trace_array *tr)
2330 {
2331 	struct dentry *d_events;
2332 	struct dentry *entry;
2333 
2334 	entry = debugfs_create_file("set_event", 0644, parent,
2335 				    tr, &ftrace_set_event_fops);
2336 	if (!entry) {
2337 		pr_warning("Could not create debugfs 'set_event' entry\n");
2338 		return -ENOMEM;
2339 	}
2340 
2341 	d_events = debugfs_create_dir("events", parent);
2342 	if (!d_events) {
2343 		pr_warning("Could not create debugfs 'events' directory\n");
2344 		return -ENOMEM;
2345 	}
2346 
2347 	/* ring buffer internal formats */
2348 	trace_create_file("header_page", 0444, d_events,
2349 			  ring_buffer_print_page_header,
2350 			  &ftrace_show_header_fops);
2351 
2352 	trace_create_file("header_event", 0444, d_events,
2353 			  ring_buffer_print_entry_header,
2354 			  &ftrace_show_header_fops);
2355 
2356 	trace_create_file("enable", 0644, d_events,
2357 			  tr, &ftrace_tr_enable_fops);
2358 
2359 	tr->event_dir = d_events;
2360 
2361 	return 0;
2362 }
2363 
2364 /**
2365  * event_trace_add_tracer - add a instance of a trace_array to events
2366  * @parent: The parent dentry to place the files/directories for events in
2367  * @tr: The trace array associated with these events
2368  *
2369  * When a new instance is created, it needs to set up its events
2370  * directory, as well as other files associated with events. It also
2371  * creates the event hierachry in the @parent/events directory.
2372  *
2373  * Returns 0 on success.
2374  */
2375 int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr)
2376 {
2377 	int ret;
2378 
2379 	mutex_lock(&event_mutex);
2380 
2381 	ret = create_event_toplevel_files(parent, tr);
2382 	if (ret)
2383 		goto out_unlock;
2384 
2385 	down_write(&trace_event_sem);
2386 	__trace_add_event_dirs(tr);
2387 	up_write(&trace_event_sem);
2388 
2389  out_unlock:
2390 	mutex_unlock(&event_mutex);
2391 
2392 	return ret;
2393 }
2394 
2395 /*
2396  * The top trace array already had its file descriptors created.
2397  * Now the files themselves need to be created.
2398  */
2399 static __init int
2400 early_event_add_tracer(struct dentry *parent, struct trace_array *tr)
2401 {
2402 	int ret;
2403 
2404 	mutex_lock(&event_mutex);
2405 
2406 	ret = create_event_toplevel_files(parent, tr);
2407 	if (ret)
2408 		goto out_unlock;
2409 
2410 	down_write(&trace_event_sem);
2411 	__trace_early_add_event_dirs(tr);
2412 	up_write(&trace_event_sem);
2413 
2414  out_unlock:
2415 	mutex_unlock(&event_mutex);
2416 
2417 	return ret;
2418 }
2419 
2420 int event_trace_del_tracer(struct trace_array *tr)
2421 {
2422 	mutex_lock(&event_mutex);
2423 
2424 	/* Disable any running events */
2425 	__ftrace_set_clr_event_nolock(tr, NULL, NULL, NULL, 0);
2426 
2427 	down_write(&trace_event_sem);
2428 	__trace_remove_event_dirs(tr);
2429 	debugfs_remove_recursive(tr->event_dir);
2430 	up_write(&trace_event_sem);
2431 
2432 	tr->event_dir = NULL;
2433 
2434 	mutex_unlock(&event_mutex);
2435 
2436 	return 0;
2437 }
2438 
2439 static __init int event_trace_memsetup(void)
2440 {
2441 	field_cachep = KMEM_CACHE(ftrace_event_field, SLAB_PANIC);
2442 	file_cachep = KMEM_CACHE(ftrace_event_file, SLAB_PANIC);
2443 	return 0;
2444 }
2445 
2446 static __init int event_trace_enable(void)
2447 {
2448 	struct trace_array *tr = top_trace_array();
2449 	struct ftrace_event_call **iter, *call;
2450 	char *buf = bootup_event_buf;
2451 	char *token;
2452 	int ret;
2453 
2454 	for_each_event(iter, __start_ftrace_events, __stop_ftrace_events) {
2455 
2456 		call = *iter;
2457 		ret = event_init(call);
2458 		if (!ret)
2459 			list_add(&call->list, &ftrace_events);
2460 	}
2461 
2462 	/*
2463 	 * We need the top trace array to have a working set of trace
2464 	 * points at early init, before the debug files and directories
2465 	 * are created. Create the file entries now, and attach them
2466 	 * to the actual file dentries later.
2467 	 */
2468 	__trace_early_add_events(tr);
2469 
2470 	while (true) {
2471 		token = strsep(&buf, ",");
2472 
2473 		if (!token)
2474 			break;
2475 		if (!*token)
2476 			continue;
2477 
2478 		ret = ftrace_set_clr_event(tr, token, 1);
2479 		if (ret)
2480 			pr_warn("Failed to enable trace event: %s\n", token);
2481 	}
2482 
2483 	trace_printk_start_comm();
2484 
2485 	register_event_cmds();
2486 
2487 	return 0;
2488 }
2489 
2490 static __init int event_trace_init(void)
2491 {
2492 	struct trace_array *tr;
2493 	struct dentry *d_tracer;
2494 	struct dentry *entry;
2495 	int ret;
2496 
2497 	tr = top_trace_array();
2498 
2499 	d_tracer = tracing_init_dentry();
2500 	if (!d_tracer)
2501 		return 0;
2502 
2503 	entry = debugfs_create_file("available_events", 0444, d_tracer,
2504 				    tr, &ftrace_avail_fops);
2505 	if (!entry)
2506 		pr_warning("Could not create debugfs "
2507 			   "'available_events' entry\n");
2508 
2509 	if (trace_define_common_fields())
2510 		pr_warning("tracing: Failed to allocate common fields");
2511 
2512 	ret = early_event_add_tracer(d_tracer, tr);
2513 	if (ret)
2514 		return ret;
2515 
2516 	ret = register_module_notifier(&trace_module_nb);
2517 	if (ret)
2518 		pr_warning("Failed to register trace events module notifier\n");
2519 
2520 	return 0;
2521 }
2522 early_initcall(event_trace_memsetup);
2523 core_initcall(event_trace_enable);
2524 fs_initcall(event_trace_init);
2525 
2526 #ifdef CONFIG_FTRACE_STARTUP_TEST
2527 
2528 static DEFINE_SPINLOCK(test_spinlock);
2529 static DEFINE_SPINLOCK(test_spinlock_irq);
2530 static DEFINE_MUTEX(test_mutex);
2531 
2532 static __init void test_work(struct work_struct *dummy)
2533 {
2534 	spin_lock(&test_spinlock);
2535 	spin_lock_irq(&test_spinlock_irq);
2536 	udelay(1);
2537 	spin_unlock_irq(&test_spinlock_irq);
2538 	spin_unlock(&test_spinlock);
2539 
2540 	mutex_lock(&test_mutex);
2541 	msleep(1);
2542 	mutex_unlock(&test_mutex);
2543 }
2544 
2545 static __init int event_test_thread(void *unused)
2546 {
2547 	void *test_malloc;
2548 
2549 	test_malloc = kmalloc(1234, GFP_KERNEL);
2550 	if (!test_malloc)
2551 		pr_info("failed to kmalloc\n");
2552 
2553 	schedule_on_each_cpu(test_work);
2554 
2555 	kfree(test_malloc);
2556 
2557 	set_current_state(TASK_INTERRUPTIBLE);
2558 	while (!kthread_should_stop())
2559 		schedule();
2560 
2561 	return 0;
2562 }
2563 
2564 /*
2565  * Do various things that may trigger events.
2566  */
2567 static __init void event_test_stuff(void)
2568 {
2569 	struct task_struct *test_thread;
2570 
2571 	test_thread = kthread_run(event_test_thread, NULL, "test-events");
2572 	msleep(1);
2573 	kthread_stop(test_thread);
2574 }
2575 
2576 /*
2577  * For every trace event defined, we will test each trace point separately,
2578  * and then by groups, and finally all trace points.
2579  */
2580 static __init void event_trace_self_tests(void)
2581 {
2582 	struct ftrace_subsystem_dir *dir;
2583 	struct ftrace_event_file *file;
2584 	struct ftrace_event_call *call;
2585 	struct event_subsystem *system;
2586 	struct trace_array *tr;
2587 	int ret;
2588 
2589 	tr = top_trace_array();
2590 
2591 	pr_info("Running tests on trace events:\n");
2592 
2593 	list_for_each_entry(file, &tr->events, list) {
2594 
2595 		call = file->event_call;
2596 
2597 		/* Only test those that have a probe */
2598 		if (!call->class || !call->class->probe)
2599 			continue;
2600 
2601 /*
2602  * Testing syscall events here is pretty useless, but
2603  * we still do it if configured. But this is time consuming.
2604  * What we really need is a user thread to perform the
2605  * syscalls as we test.
2606  */
2607 #ifndef CONFIG_EVENT_TRACE_TEST_SYSCALLS
2608 		if (call->class->system &&
2609 		    strcmp(call->class->system, "syscalls") == 0)
2610 			continue;
2611 #endif
2612 
2613 		pr_info("Testing event %s: ", call->name);
2614 
2615 		/*
2616 		 * If an event is already enabled, someone is using
2617 		 * it and the self test should not be on.
2618 		 */
2619 		if (file->flags & FTRACE_EVENT_FL_ENABLED) {
2620 			pr_warning("Enabled event during self test!\n");
2621 			WARN_ON_ONCE(1);
2622 			continue;
2623 		}
2624 
2625 		ftrace_event_enable_disable(file, 1);
2626 		event_test_stuff();
2627 		ftrace_event_enable_disable(file, 0);
2628 
2629 		pr_cont("OK\n");
2630 	}
2631 
2632 	/* Now test at the sub system level */
2633 
2634 	pr_info("Running tests on trace event systems:\n");
2635 
2636 	list_for_each_entry(dir, &tr->systems, list) {
2637 
2638 		system = dir->subsystem;
2639 
2640 		/* the ftrace system is special, skip it */
2641 		if (strcmp(system->name, "ftrace") == 0)
2642 			continue;
2643 
2644 		pr_info("Testing event system %s: ", system->name);
2645 
2646 		ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 1);
2647 		if (WARN_ON_ONCE(ret)) {
2648 			pr_warning("error enabling system %s\n",
2649 				   system->name);
2650 			continue;
2651 		}
2652 
2653 		event_test_stuff();
2654 
2655 		ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 0);
2656 		if (WARN_ON_ONCE(ret)) {
2657 			pr_warning("error disabling system %s\n",
2658 				   system->name);
2659 			continue;
2660 		}
2661 
2662 		pr_cont("OK\n");
2663 	}
2664 
2665 	/* Test with all events enabled */
2666 
2667 	pr_info("Running tests on all trace events:\n");
2668 	pr_info("Testing all events: ");
2669 
2670 	ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 1);
2671 	if (WARN_ON_ONCE(ret)) {
2672 		pr_warning("error enabling all events\n");
2673 		return;
2674 	}
2675 
2676 	event_test_stuff();
2677 
2678 	/* reset sysname */
2679 	ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 0);
2680 	if (WARN_ON_ONCE(ret)) {
2681 		pr_warning("error disabling all events\n");
2682 		return;
2683 	}
2684 
2685 	pr_cont("OK\n");
2686 }
2687 
2688 #ifdef CONFIG_FUNCTION_TRACER
2689 
2690 static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable);
2691 
2692 static void
2693 function_test_events_call(unsigned long ip, unsigned long parent_ip,
2694 			  struct ftrace_ops *op, struct pt_regs *pt_regs)
2695 {
2696 	struct ring_buffer_event *event;
2697 	struct ring_buffer *buffer;
2698 	struct ftrace_entry *entry;
2699 	unsigned long flags;
2700 	long disabled;
2701 	int cpu;
2702 	int pc;
2703 
2704 	pc = preempt_count();
2705 	preempt_disable_notrace();
2706 	cpu = raw_smp_processor_id();
2707 	disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu));
2708 
2709 	if (disabled != 1)
2710 		goto out;
2711 
2712 	local_save_flags(flags);
2713 
2714 	event = trace_current_buffer_lock_reserve(&buffer,
2715 						  TRACE_FN, sizeof(*entry),
2716 						  flags, pc);
2717 	if (!event)
2718 		goto out;
2719 	entry	= ring_buffer_event_data(event);
2720 	entry->ip			= ip;
2721 	entry->parent_ip		= parent_ip;
2722 
2723 	trace_buffer_unlock_commit(buffer, event, flags, pc);
2724 
2725  out:
2726 	atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
2727 	preempt_enable_notrace();
2728 }
2729 
2730 static struct ftrace_ops trace_ops __initdata  =
2731 {
2732 	.func = function_test_events_call,
2733 	.flags = FTRACE_OPS_FL_RECURSION_SAFE,
2734 };
2735 
2736 static __init void event_trace_self_test_with_function(void)
2737 {
2738 	int ret;
2739 	ret = register_ftrace_function(&trace_ops);
2740 	if (WARN_ON(ret < 0)) {
2741 		pr_info("Failed to enable function tracer for event tests\n");
2742 		return;
2743 	}
2744 	pr_info("Running tests again, along with the function tracer\n");
2745 	event_trace_self_tests();
2746 	unregister_ftrace_function(&trace_ops);
2747 }
2748 #else
2749 static __init void event_trace_self_test_with_function(void)
2750 {
2751 }
2752 #endif
2753 
2754 static __init int event_trace_self_tests_init(void)
2755 {
2756 	if (!tracing_selftest_disabled) {
2757 		event_trace_self_tests();
2758 		event_trace_self_test_with_function();
2759 	}
2760 
2761 	return 0;
2762 }
2763 
2764 late_initcall(event_trace_self_tests_init);
2765 
2766 #endif
2767