xref: /linux/kernel/trace/trace_events.c (revision bbeddf52adc1b4207674ab88686cbbe58c24f721)
1 /*
2  * event tracer
3  *
4  * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
5  *
6  *  - Added format output of fields of the trace point.
7  *    This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
8  *
9  */
10 
11 #include <linux/workqueue.h>
12 #include <linux/spinlock.h>
13 #include <linux/kthread.h>
14 #include <linux/debugfs.h>
15 #include <linux/uaccess.h>
16 #include <linux/module.h>
17 #include <linux/ctype.h>
18 #include <linux/slab.h>
19 #include <linux/delay.h>
20 
21 #include <asm/setup.h>
22 
23 #include "trace_output.h"
24 
25 #undef TRACE_SYSTEM
26 #define TRACE_SYSTEM "TRACE_SYSTEM"
27 
28 DEFINE_MUTEX(event_mutex);
29 
30 DEFINE_MUTEX(event_storage_mutex);
31 EXPORT_SYMBOL_GPL(event_storage_mutex);
32 
33 char event_storage[EVENT_STORAGE_SIZE];
34 EXPORT_SYMBOL_GPL(event_storage);
35 
36 LIST_HEAD(ftrace_events);
37 static LIST_HEAD(ftrace_common_fields);
38 
39 #define GFP_TRACE (GFP_KERNEL | __GFP_ZERO)
40 
41 static struct kmem_cache *field_cachep;
42 static struct kmem_cache *file_cachep;
43 
44 #define SYSTEM_FL_FREE_NAME		(1 << 31)
45 
46 static inline int system_refcount(struct event_subsystem *system)
47 {
48 	return system->ref_count & ~SYSTEM_FL_FREE_NAME;
49 }
50 
51 static int system_refcount_inc(struct event_subsystem *system)
52 {
53 	return (system->ref_count++) & ~SYSTEM_FL_FREE_NAME;
54 }
55 
56 static int system_refcount_dec(struct event_subsystem *system)
57 {
58 	return (--system->ref_count) & ~SYSTEM_FL_FREE_NAME;
59 }
60 
61 /* Double loops, do not use break, only goto's work */
62 #define do_for_each_event_file(tr, file)			\
63 	list_for_each_entry(tr, &ftrace_trace_arrays, list) {	\
64 		list_for_each_entry(file, &tr->events, list)
65 
66 #define do_for_each_event_file_safe(tr, file)			\
67 	list_for_each_entry(tr, &ftrace_trace_arrays, list) {	\
68 		struct ftrace_event_file *___n;				\
69 		list_for_each_entry_safe(file, ___n, &tr->events, list)
70 
71 #define while_for_each_event_file()		\
72 	}
73 
74 static struct list_head *
75 trace_get_fields(struct ftrace_event_call *event_call)
76 {
77 	if (!event_call->class->get_fields)
78 		return &event_call->class->fields;
79 	return event_call->class->get_fields(event_call);
80 }
81 
82 static struct ftrace_event_field *
83 __find_event_field(struct list_head *head, char *name)
84 {
85 	struct ftrace_event_field *field;
86 
87 	list_for_each_entry(field, head, link) {
88 		if (!strcmp(field->name, name))
89 			return field;
90 	}
91 
92 	return NULL;
93 }
94 
95 struct ftrace_event_field *
96 trace_find_event_field(struct ftrace_event_call *call, char *name)
97 {
98 	struct ftrace_event_field *field;
99 	struct list_head *head;
100 
101 	field = __find_event_field(&ftrace_common_fields, name);
102 	if (field)
103 		return field;
104 
105 	head = trace_get_fields(call);
106 	return __find_event_field(head, name);
107 }
108 
109 static int __trace_define_field(struct list_head *head, const char *type,
110 				const char *name, int offset, int size,
111 				int is_signed, int filter_type)
112 {
113 	struct ftrace_event_field *field;
114 
115 	field = kmem_cache_alloc(field_cachep, GFP_TRACE);
116 	if (!field)
117 		return -ENOMEM;
118 
119 	field->name = name;
120 	field->type = type;
121 
122 	if (filter_type == FILTER_OTHER)
123 		field->filter_type = filter_assign_type(type);
124 	else
125 		field->filter_type = filter_type;
126 
127 	field->offset = offset;
128 	field->size = size;
129 	field->is_signed = is_signed;
130 
131 	list_add(&field->link, head);
132 
133 	return 0;
134 }
135 
136 int trace_define_field(struct ftrace_event_call *call, const char *type,
137 		       const char *name, int offset, int size, int is_signed,
138 		       int filter_type)
139 {
140 	struct list_head *head;
141 
142 	if (WARN_ON(!call->class))
143 		return 0;
144 
145 	head = trace_get_fields(call);
146 	return __trace_define_field(head, type, name, offset, size,
147 				    is_signed, filter_type);
148 }
149 EXPORT_SYMBOL_GPL(trace_define_field);
150 
151 #define __common_field(type, item)					\
152 	ret = __trace_define_field(&ftrace_common_fields, #type,	\
153 				   "common_" #item,			\
154 				   offsetof(typeof(ent), item),		\
155 				   sizeof(ent.item),			\
156 				   is_signed_type(type), FILTER_OTHER);	\
157 	if (ret)							\
158 		return ret;
159 
160 static int trace_define_common_fields(void)
161 {
162 	int ret;
163 	struct trace_entry ent;
164 
165 	__common_field(unsigned short, type);
166 	__common_field(unsigned char, flags);
167 	__common_field(unsigned char, preempt_count);
168 	__common_field(int, pid);
169 
170 	return ret;
171 }
172 
173 static void trace_destroy_fields(struct ftrace_event_call *call)
174 {
175 	struct ftrace_event_field *field, *next;
176 	struct list_head *head;
177 
178 	head = trace_get_fields(call);
179 	list_for_each_entry_safe(field, next, head, link) {
180 		list_del(&field->link);
181 		kmem_cache_free(field_cachep, field);
182 	}
183 }
184 
185 int trace_event_raw_init(struct ftrace_event_call *call)
186 {
187 	int id;
188 
189 	id = register_ftrace_event(&call->event);
190 	if (!id)
191 		return -ENODEV;
192 
193 	return 0;
194 }
195 EXPORT_SYMBOL_GPL(trace_event_raw_init);
196 
197 int ftrace_event_reg(struct ftrace_event_call *call,
198 		     enum trace_reg type, void *data)
199 {
200 	struct ftrace_event_file *file = data;
201 
202 	switch (type) {
203 	case TRACE_REG_REGISTER:
204 		return tracepoint_probe_register(call->name,
205 						 call->class->probe,
206 						 file);
207 	case TRACE_REG_UNREGISTER:
208 		tracepoint_probe_unregister(call->name,
209 					    call->class->probe,
210 					    file);
211 		return 0;
212 
213 #ifdef CONFIG_PERF_EVENTS
214 	case TRACE_REG_PERF_REGISTER:
215 		return tracepoint_probe_register(call->name,
216 						 call->class->perf_probe,
217 						 call);
218 	case TRACE_REG_PERF_UNREGISTER:
219 		tracepoint_probe_unregister(call->name,
220 					    call->class->perf_probe,
221 					    call);
222 		return 0;
223 	case TRACE_REG_PERF_OPEN:
224 	case TRACE_REG_PERF_CLOSE:
225 	case TRACE_REG_PERF_ADD:
226 	case TRACE_REG_PERF_DEL:
227 		return 0;
228 #endif
229 	}
230 	return 0;
231 }
232 EXPORT_SYMBOL_GPL(ftrace_event_reg);
233 
234 void trace_event_enable_cmd_record(bool enable)
235 {
236 	struct ftrace_event_file *file;
237 	struct trace_array *tr;
238 
239 	mutex_lock(&event_mutex);
240 	do_for_each_event_file(tr, file) {
241 
242 		if (!(file->flags & FTRACE_EVENT_FL_ENABLED))
243 			continue;
244 
245 		if (enable) {
246 			tracing_start_cmdline_record();
247 			set_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags);
248 		} else {
249 			tracing_stop_cmdline_record();
250 			clear_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags);
251 		}
252 	} while_for_each_event_file();
253 	mutex_unlock(&event_mutex);
254 }
255 
256 static int __ftrace_event_enable_disable(struct ftrace_event_file *file,
257 					 int enable, int soft_disable)
258 {
259 	struct ftrace_event_call *call = file->event_call;
260 	int ret = 0;
261 	int disable;
262 
263 	switch (enable) {
264 	case 0:
265 		/*
266 		 * When soft_disable is set and enable is cleared, the sm_ref
267 		 * reference counter is decremented. If it reaches 0, we want
268 		 * to clear the SOFT_DISABLED flag but leave the event in the
269 		 * state that it was. That is, if the event was enabled and
270 		 * SOFT_DISABLED isn't set, then do nothing. But if SOFT_DISABLED
271 		 * is set we do not want the event to be enabled before we
272 		 * clear the bit.
273 		 *
274 		 * When soft_disable is not set but the SOFT_MODE flag is,
275 		 * we do nothing. Do not disable the tracepoint, otherwise
276 		 * "soft enable"s (clearing the SOFT_DISABLED bit) wont work.
277 		 */
278 		if (soft_disable) {
279 			if (atomic_dec_return(&file->sm_ref) > 0)
280 				break;
281 			disable = file->flags & FTRACE_EVENT_FL_SOFT_DISABLED;
282 			clear_bit(FTRACE_EVENT_FL_SOFT_MODE_BIT, &file->flags);
283 		} else
284 			disable = !(file->flags & FTRACE_EVENT_FL_SOFT_MODE);
285 
286 		if (disable && (file->flags & FTRACE_EVENT_FL_ENABLED)) {
287 			clear_bit(FTRACE_EVENT_FL_ENABLED_BIT, &file->flags);
288 			if (file->flags & FTRACE_EVENT_FL_RECORDED_CMD) {
289 				tracing_stop_cmdline_record();
290 				clear_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags);
291 			}
292 			call->class->reg(call, TRACE_REG_UNREGISTER, file);
293 		}
294 		/* If in SOFT_MODE, just set the SOFT_DISABLE_BIT, else clear it */
295 		if (file->flags & FTRACE_EVENT_FL_SOFT_MODE)
296 			set_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags);
297 		else
298 			clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags);
299 		break;
300 	case 1:
301 		/*
302 		 * When soft_disable is set and enable is set, we want to
303 		 * register the tracepoint for the event, but leave the event
304 		 * as is. That means, if the event was already enabled, we do
305 		 * nothing (but set SOFT_MODE). If the event is disabled, we
306 		 * set SOFT_DISABLED before enabling the event tracepoint, so
307 		 * it still seems to be disabled.
308 		 */
309 		if (!soft_disable)
310 			clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags);
311 		else {
312 			if (atomic_inc_return(&file->sm_ref) > 1)
313 				break;
314 			set_bit(FTRACE_EVENT_FL_SOFT_MODE_BIT, &file->flags);
315 		}
316 
317 		if (!(file->flags & FTRACE_EVENT_FL_ENABLED)) {
318 
319 			/* Keep the event disabled, when going to SOFT_MODE. */
320 			if (soft_disable)
321 				set_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags);
322 
323 			if (trace_flags & TRACE_ITER_RECORD_CMD) {
324 				tracing_start_cmdline_record();
325 				set_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags);
326 			}
327 			ret = call->class->reg(call, TRACE_REG_REGISTER, file);
328 			if (ret) {
329 				tracing_stop_cmdline_record();
330 				pr_info("event trace: Could not enable event "
331 					"%s\n", call->name);
332 				break;
333 			}
334 			set_bit(FTRACE_EVENT_FL_ENABLED_BIT, &file->flags);
335 
336 			/* WAS_ENABLED gets set but never cleared. */
337 			call->flags |= TRACE_EVENT_FL_WAS_ENABLED;
338 		}
339 		break;
340 	}
341 
342 	return ret;
343 }
344 
345 static int ftrace_event_enable_disable(struct ftrace_event_file *file,
346 				       int enable)
347 {
348 	return __ftrace_event_enable_disable(file, enable, 0);
349 }
350 
351 static void ftrace_clear_events(struct trace_array *tr)
352 {
353 	struct ftrace_event_file *file;
354 
355 	mutex_lock(&event_mutex);
356 	list_for_each_entry(file, &tr->events, list) {
357 		ftrace_event_enable_disable(file, 0);
358 	}
359 	mutex_unlock(&event_mutex);
360 }
361 
362 static void __put_system(struct event_subsystem *system)
363 {
364 	struct event_filter *filter = system->filter;
365 
366 	WARN_ON_ONCE(system_refcount(system) == 0);
367 	if (system_refcount_dec(system))
368 		return;
369 
370 	list_del(&system->list);
371 
372 	if (filter) {
373 		kfree(filter->filter_string);
374 		kfree(filter);
375 	}
376 	if (system->ref_count & SYSTEM_FL_FREE_NAME)
377 		kfree(system->name);
378 	kfree(system);
379 }
380 
381 static void __get_system(struct event_subsystem *system)
382 {
383 	WARN_ON_ONCE(system_refcount(system) == 0);
384 	system_refcount_inc(system);
385 }
386 
387 static void __get_system_dir(struct ftrace_subsystem_dir *dir)
388 {
389 	WARN_ON_ONCE(dir->ref_count == 0);
390 	dir->ref_count++;
391 	__get_system(dir->subsystem);
392 }
393 
394 static void __put_system_dir(struct ftrace_subsystem_dir *dir)
395 {
396 	WARN_ON_ONCE(dir->ref_count == 0);
397 	/* If the subsystem is about to be freed, the dir must be too */
398 	WARN_ON_ONCE(system_refcount(dir->subsystem) == 1 && dir->ref_count != 1);
399 
400 	__put_system(dir->subsystem);
401 	if (!--dir->ref_count)
402 		kfree(dir);
403 }
404 
405 static void put_system(struct ftrace_subsystem_dir *dir)
406 {
407 	mutex_lock(&event_mutex);
408 	__put_system_dir(dir);
409 	mutex_unlock(&event_mutex);
410 }
411 
412 /*
413  * Open and update trace_array ref count.
414  * Must have the current trace_array passed to it.
415  */
416 static int tracing_open_generic_file(struct inode *inode, struct file *filp)
417 {
418 	struct ftrace_event_file *file = inode->i_private;
419 	struct trace_array *tr = file->tr;
420 	int ret;
421 
422 	if (trace_array_get(tr) < 0)
423 		return -ENODEV;
424 
425 	ret = tracing_open_generic(inode, filp);
426 	if (ret < 0)
427 		trace_array_put(tr);
428 	return ret;
429 }
430 
431 static int tracing_release_generic_file(struct inode *inode, struct file *filp)
432 {
433 	struct ftrace_event_file *file = inode->i_private;
434 	struct trace_array *tr = file->tr;
435 
436 	trace_array_put(tr);
437 
438 	return 0;
439 }
440 
441 /*
442  * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events.
443  */
444 static int
445 __ftrace_set_clr_event_nolock(struct trace_array *tr, const char *match,
446 			      const char *sub, const char *event, int set)
447 {
448 	struct ftrace_event_file *file;
449 	struct ftrace_event_call *call;
450 	int ret = -EINVAL;
451 
452 	list_for_each_entry(file, &tr->events, list) {
453 
454 		call = file->event_call;
455 
456 		if (!call->name || !call->class || !call->class->reg)
457 			continue;
458 
459 		if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)
460 			continue;
461 
462 		if (match &&
463 		    strcmp(match, call->name) != 0 &&
464 		    strcmp(match, call->class->system) != 0)
465 			continue;
466 
467 		if (sub && strcmp(sub, call->class->system) != 0)
468 			continue;
469 
470 		if (event && strcmp(event, call->name) != 0)
471 			continue;
472 
473 		ftrace_event_enable_disable(file, set);
474 
475 		ret = 0;
476 	}
477 
478 	return ret;
479 }
480 
481 static int __ftrace_set_clr_event(struct trace_array *tr, const char *match,
482 				  const char *sub, const char *event, int set)
483 {
484 	int ret;
485 
486 	mutex_lock(&event_mutex);
487 	ret = __ftrace_set_clr_event_nolock(tr, match, sub, event, set);
488 	mutex_unlock(&event_mutex);
489 
490 	return ret;
491 }
492 
493 static int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set)
494 {
495 	char *event = NULL, *sub = NULL, *match;
496 
497 	/*
498 	 * The buf format can be <subsystem>:<event-name>
499 	 *  *:<event-name> means any event by that name.
500 	 *  :<event-name> is the same.
501 	 *
502 	 *  <subsystem>:* means all events in that subsystem
503 	 *  <subsystem>: means the same.
504 	 *
505 	 *  <name> (no ':') means all events in a subsystem with
506 	 *  the name <name> or any event that matches <name>
507 	 */
508 
509 	match = strsep(&buf, ":");
510 	if (buf) {
511 		sub = match;
512 		event = buf;
513 		match = NULL;
514 
515 		if (!strlen(sub) || strcmp(sub, "*") == 0)
516 			sub = NULL;
517 		if (!strlen(event) || strcmp(event, "*") == 0)
518 			event = NULL;
519 	}
520 
521 	return __ftrace_set_clr_event(tr, match, sub, event, set);
522 }
523 
524 /**
525  * trace_set_clr_event - enable or disable an event
526  * @system: system name to match (NULL for any system)
527  * @event: event name to match (NULL for all events, within system)
528  * @set: 1 to enable, 0 to disable
529  *
530  * This is a way for other parts of the kernel to enable or disable
531  * event recording.
532  *
533  * Returns 0 on success, -EINVAL if the parameters do not match any
534  * registered events.
535  */
536 int trace_set_clr_event(const char *system, const char *event, int set)
537 {
538 	struct trace_array *tr = top_trace_array();
539 
540 	return __ftrace_set_clr_event(tr, NULL, system, event, set);
541 }
542 EXPORT_SYMBOL_GPL(trace_set_clr_event);
543 
544 /* 128 should be much more than enough */
545 #define EVENT_BUF_SIZE		127
546 
547 static ssize_t
548 ftrace_event_write(struct file *file, const char __user *ubuf,
549 		   size_t cnt, loff_t *ppos)
550 {
551 	struct trace_parser parser;
552 	struct seq_file *m = file->private_data;
553 	struct trace_array *tr = m->private;
554 	ssize_t read, ret;
555 
556 	if (!cnt)
557 		return 0;
558 
559 	ret = tracing_update_buffers();
560 	if (ret < 0)
561 		return ret;
562 
563 	if (trace_parser_get_init(&parser, EVENT_BUF_SIZE + 1))
564 		return -ENOMEM;
565 
566 	read = trace_get_user(&parser, ubuf, cnt, ppos);
567 
568 	if (read >= 0 && trace_parser_loaded((&parser))) {
569 		int set = 1;
570 
571 		if (*parser.buffer == '!')
572 			set = 0;
573 
574 		parser.buffer[parser.idx] = 0;
575 
576 		ret = ftrace_set_clr_event(tr, parser.buffer + !set, set);
577 		if (ret)
578 			goto out_put;
579 	}
580 
581 	ret = read;
582 
583  out_put:
584 	trace_parser_put(&parser);
585 
586 	return ret;
587 }
588 
589 static void *
590 t_next(struct seq_file *m, void *v, loff_t *pos)
591 {
592 	struct ftrace_event_file *file = v;
593 	struct ftrace_event_call *call;
594 	struct trace_array *tr = m->private;
595 
596 	(*pos)++;
597 
598 	list_for_each_entry_continue(file, &tr->events, list) {
599 		call = file->event_call;
600 		/*
601 		 * The ftrace subsystem is for showing formats only.
602 		 * They can not be enabled or disabled via the event files.
603 		 */
604 		if (call->class && call->class->reg)
605 			return file;
606 	}
607 
608 	return NULL;
609 }
610 
611 static void *t_start(struct seq_file *m, loff_t *pos)
612 {
613 	struct ftrace_event_file *file;
614 	struct trace_array *tr = m->private;
615 	loff_t l;
616 
617 	mutex_lock(&event_mutex);
618 
619 	file = list_entry(&tr->events, struct ftrace_event_file, list);
620 	for (l = 0; l <= *pos; ) {
621 		file = t_next(m, file, &l);
622 		if (!file)
623 			break;
624 	}
625 	return file;
626 }
627 
628 static void *
629 s_next(struct seq_file *m, void *v, loff_t *pos)
630 {
631 	struct ftrace_event_file *file = v;
632 	struct trace_array *tr = m->private;
633 
634 	(*pos)++;
635 
636 	list_for_each_entry_continue(file, &tr->events, list) {
637 		if (file->flags & FTRACE_EVENT_FL_ENABLED)
638 			return file;
639 	}
640 
641 	return NULL;
642 }
643 
644 static void *s_start(struct seq_file *m, loff_t *pos)
645 {
646 	struct ftrace_event_file *file;
647 	struct trace_array *tr = m->private;
648 	loff_t l;
649 
650 	mutex_lock(&event_mutex);
651 
652 	file = list_entry(&tr->events, struct ftrace_event_file, list);
653 	for (l = 0; l <= *pos; ) {
654 		file = s_next(m, file, &l);
655 		if (!file)
656 			break;
657 	}
658 	return file;
659 }
660 
661 static int t_show(struct seq_file *m, void *v)
662 {
663 	struct ftrace_event_file *file = v;
664 	struct ftrace_event_call *call = file->event_call;
665 
666 	if (strcmp(call->class->system, TRACE_SYSTEM) != 0)
667 		seq_printf(m, "%s:", call->class->system);
668 	seq_printf(m, "%s\n", call->name);
669 
670 	return 0;
671 }
672 
673 static void t_stop(struct seq_file *m, void *p)
674 {
675 	mutex_unlock(&event_mutex);
676 }
677 
678 static ssize_t
679 event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
680 		  loff_t *ppos)
681 {
682 	struct ftrace_event_file *file = filp->private_data;
683 	char buf[4] = "0";
684 
685 	if (file->flags & FTRACE_EVENT_FL_ENABLED &&
686 	    !(file->flags & FTRACE_EVENT_FL_SOFT_DISABLED))
687 		strcpy(buf, "1");
688 
689 	if (file->flags & FTRACE_EVENT_FL_SOFT_DISABLED ||
690 	    file->flags & FTRACE_EVENT_FL_SOFT_MODE)
691 		strcat(buf, "*");
692 
693 	strcat(buf, "\n");
694 
695 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, strlen(buf));
696 }
697 
698 static ssize_t
699 event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
700 		   loff_t *ppos)
701 {
702 	struct ftrace_event_file *file = filp->private_data;
703 	unsigned long val;
704 	int ret;
705 
706 	if (!file)
707 		return -EINVAL;
708 
709 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
710 	if (ret)
711 		return ret;
712 
713 	ret = tracing_update_buffers();
714 	if (ret < 0)
715 		return ret;
716 
717 	switch (val) {
718 	case 0:
719 	case 1:
720 		mutex_lock(&event_mutex);
721 		ret = ftrace_event_enable_disable(file, val);
722 		mutex_unlock(&event_mutex);
723 		break;
724 
725 	default:
726 		return -EINVAL;
727 	}
728 
729 	*ppos += cnt;
730 
731 	return ret ? ret : cnt;
732 }
733 
734 static ssize_t
735 system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
736 		   loff_t *ppos)
737 {
738 	const char set_to_char[4] = { '?', '0', '1', 'X' };
739 	struct ftrace_subsystem_dir *dir = filp->private_data;
740 	struct event_subsystem *system = dir->subsystem;
741 	struct ftrace_event_call *call;
742 	struct ftrace_event_file *file;
743 	struct trace_array *tr = dir->tr;
744 	char buf[2];
745 	int set = 0;
746 	int ret;
747 
748 	mutex_lock(&event_mutex);
749 	list_for_each_entry(file, &tr->events, list) {
750 		call = file->event_call;
751 		if (!call->name || !call->class || !call->class->reg)
752 			continue;
753 
754 		if (system && strcmp(call->class->system, system->name) != 0)
755 			continue;
756 
757 		/*
758 		 * We need to find out if all the events are set
759 		 * or if all events or cleared, or if we have
760 		 * a mixture.
761 		 */
762 		set |= (1 << !!(file->flags & FTRACE_EVENT_FL_ENABLED));
763 
764 		/*
765 		 * If we have a mixture, no need to look further.
766 		 */
767 		if (set == 3)
768 			break;
769 	}
770 	mutex_unlock(&event_mutex);
771 
772 	buf[0] = set_to_char[set];
773 	buf[1] = '\n';
774 
775 	ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
776 
777 	return ret;
778 }
779 
780 static ssize_t
781 system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
782 		    loff_t *ppos)
783 {
784 	struct ftrace_subsystem_dir *dir = filp->private_data;
785 	struct event_subsystem *system = dir->subsystem;
786 	const char *name = NULL;
787 	unsigned long val;
788 	ssize_t ret;
789 
790 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
791 	if (ret)
792 		return ret;
793 
794 	ret = tracing_update_buffers();
795 	if (ret < 0)
796 		return ret;
797 
798 	if (val != 0 && val != 1)
799 		return -EINVAL;
800 
801 	/*
802 	 * Opening of "enable" adds a ref count to system,
803 	 * so the name is safe to use.
804 	 */
805 	if (system)
806 		name = system->name;
807 
808 	ret = __ftrace_set_clr_event(dir->tr, NULL, name, NULL, val);
809 	if (ret)
810 		goto out;
811 
812 	ret = cnt;
813 
814 out:
815 	*ppos += cnt;
816 
817 	return ret;
818 }
819 
820 enum {
821 	FORMAT_HEADER		= 1,
822 	FORMAT_FIELD_SEPERATOR	= 2,
823 	FORMAT_PRINTFMT		= 3,
824 };
825 
826 static void *f_next(struct seq_file *m, void *v, loff_t *pos)
827 {
828 	struct ftrace_event_call *call = m->private;
829 	struct list_head *common_head = &ftrace_common_fields;
830 	struct list_head *head = trace_get_fields(call);
831 	struct list_head *node = v;
832 
833 	(*pos)++;
834 
835 	switch ((unsigned long)v) {
836 	case FORMAT_HEADER:
837 		node = common_head;
838 		break;
839 
840 	case FORMAT_FIELD_SEPERATOR:
841 		node = head;
842 		break;
843 
844 	case FORMAT_PRINTFMT:
845 		/* all done */
846 		return NULL;
847 	}
848 
849 	node = node->prev;
850 	if (node == common_head)
851 		return (void *)FORMAT_FIELD_SEPERATOR;
852 	else if (node == head)
853 		return (void *)FORMAT_PRINTFMT;
854 	else
855 		return node;
856 }
857 
858 static int f_show(struct seq_file *m, void *v)
859 {
860 	struct ftrace_event_call *call = m->private;
861 	struct ftrace_event_field *field;
862 	const char *array_descriptor;
863 
864 	switch ((unsigned long)v) {
865 	case FORMAT_HEADER:
866 		seq_printf(m, "name: %s\n", call->name);
867 		seq_printf(m, "ID: %d\n", call->event.type);
868 		seq_printf(m, "format:\n");
869 		return 0;
870 
871 	case FORMAT_FIELD_SEPERATOR:
872 		seq_putc(m, '\n');
873 		return 0;
874 
875 	case FORMAT_PRINTFMT:
876 		seq_printf(m, "\nprint fmt: %s\n",
877 			   call->print_fmt);
878 		return 0;
879 	}
880 
881 	field = list_entry(v, struct ftrace_event_field, link);
882 	/*
883 	 * Smartly shows the array type(except dynamic array).
884 	 * Normal:
885 	 *	field:TYPE VAR
886 	 * If TYPE := TYPE[LEN], it is shown:
887 	 *	field:TYPE VAR[LEN]
888 	 */
889 	array_descriptor = strchr(field->type, '[');
890 
891 	if (!strncmp(field->type, "__data_loc", 10))
892 		array_descriptor = NULL;
893 
894 	if (!array_descriptor)
895 		seq_printf(m, "\tfield:%s %s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
896 			   field->type, field->name, field->offset,
897 			   field->size, !!field->is_signed);
898 	else
899 		seq_printf(m, "\tfield:%.*s %s%s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
900 			   (int)(array_descriptor - field->type),
901 			   field->type, field->name,
902 			   array_descriptor, field->offset,
903 			   field->size, !!field->is_signed);
904 
905 	return 0;
906 }
907 
908 static void *f_start(struct seq_file *m, loff_t *pos)
909 {
910 	void *p = (void *)FORMAT_HEADER;
911 	loff_t l = 0;
912 
913 	while (l < *pos && p)
914 		p = f_next(m, p, &l);
915 
916 	return p;
917 }
918 
919 static void f_stop(struct seq_file *m, void *p)
920 {
921 }
922 
923 static const struct seq_operations trace_format_seq_ops = {
924 	.start		= f_start,
925 	.next		= f_next,
926 	.stop		= f_stop,
927 	.show		= f_show,
928 };
929 
930 static int trace_format_open(struct inode *inode, struct file *file)
931 {
932 	struct ftrace_event_call *call = inode->i_private;
933 	struct seq_file *m;
934 	int ret;
935 
936 	ret = seq_open(file, &trace_format_seq_ops);
937 	if (ret < 0)
938 		return ret;
939 
940 	m = file->private_data;
941 	m->private = call;
942 
943 	return 0;
944 }
945 
946 static ssize_t
947 event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
948 {
949 	struct ftrace_event_call *call = filp->private_data;
950 	char buf[32];
951 	int len;
952 
953 	if (*ppos)
954 		return 0;
955 
956 	len = sprintf(buf, "%d\n", call->event.type);
957 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
958 }
959 
960 static ssize_t
961 event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
962 		  loff_t *ppos)
963 {
964 	struct ftrace_event_call *call = filp->private_data;
965 	struct trace_seq *s;
966 	int r;
967 
968 	if (*ppos)
969 		return 0;
970 
971 	s = kmalloc(sizeof(*s), GFP_KERNEL);
972 	if (!s)
973 		return -ENOMEM;
974 
975 	trace_seq_init(s);
976 
977 	print_event_filter(call, s);
978 	r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
979 
980 	kfree(s);
981 
982 	return r;
983 }
984 
985 static ssize_t
986 event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
987 		   loff_t *ppos)
988 {
989 	struct ftrace_event_call *call = filp->private_data;
990 	char *buf;
991 	int err;
992 
993 	if (cnt >= PAGE_SIZE)
994 		return -EINVAL;
995 
996 	buf = (char *)__get_free_page(GFP_TEMPORARY);
997 	if (!buf)
998 		return -ENOMEM;
999 
1000 	if (copy_from_user(buf, ubuf, cnt)) {
1001 		free_page((unsigned long) buf);
1002 		return -EFAULT;
1003 	}
1004 	buf[cnt] = '\0';
1005 
1006 	err = apply_event_filter(call, buf);
1007 	free_page((unsigned long) buf);
1008 	if (err < 0)
1009 		return err;
1010 
1011 	*ppos += cnt;
1012 
1013 	return cnt;
1014 }
1015 
1016 static LIST_HEAD(event_subsystems);
1017 
1018 static int subsystem_open(struct inode *inode, struct file *filp)
1019 {
1020 	struct event_subsystem *system = NULL;
1021 	struct ftrace_subsystem_dir *dir = NULL; /* Initialize for gcc */
1022 	struct trace_array *tr;
1023 	int ret;
1024 
1025 	/* Make sure the system still exists */
1026 	mutex_lock(&trace_types_lock);
1027 	mutex_lock(&event_mutex);
1028 	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
1029 		list_for_each_entry(dir, &tr->systems, list) {
1030 			if (dir == inode->i_private) {
1031 				/* Don't open systems with no events */
1032 				if (dir->nr_events) {
1033 					__get_system_dir(dir);
1034 					system = dir->subsystem;
1035 				}
1036 				goto exit_loop;
1037 			}
1038 		}
1039 	}
1040  exit_loop:
1041 	mutex_unlock(&event_mutex);
1042 	mutex_unlock(&trace_types_lock);
1043 
1044 	if (!system)
1045 		return -ENODEV;
1046 
1047 	/* Some versions of gcc think dir can be uninitialized here */
1048 	WARN_ON(!dir);
1049 
1050 	/* Still need to increment the ref count of the system */
1051 	if (trace_array_get(tr) < 0) {
1052 		put_system(dir);
1053 		return -ENODEV;
1054 	}
1055 
1056 	ret = tracing_open_generic(inode, filp);
1057 	if (ret < 0) {
1058 		trace_array_put(tr);
1059 		put_system(dir);
1060 	}
1061 
1062 	return ret;
1063 }
1064 
1065 static int system_tr_open(struct inode *inode, struct file *filp)
1066 {
1067 	struct ftrace_subsystem_dir *dir;
1068 	struct trace_array *tr = inode->i_private;
1069 	int ret;
1070 
1071 	if (trace_array_get(tr) < 0)
1072 		return -ENODEV;
1073 
1074 	/* Make a temporary dir that has no system but points to tr */
1075 	dir = kzalloc(sizeof(*dir), GFP_KERNEL);
1076 	if (!dir) {
1077 		trace_array_put(tr);
1078 		return -ENOMEM;
1079 	}
1080 
1081 	dir->tr = tr;
1082 
1083 	ret = tracing_open_generic(inode, filp);
1084 	if (ret < 0) {
1085 		trace_array_put(tr);
1086 		kfree(dir);
1087 	}
1088 
1089 	filp->private_data = dir;
1090 
1091 	return ret;
1092 }
1093 
1094 static int subsystem_release(struct inode *inode, struct file *file)
1095 {
1096 	struct ftrace_subsystem_dir *dir = file->private_data;
1097 
1098 	trace_array_put(dir->tr);
1099 
1100 	/*
1101 	 * If dir->subsystem is NULL, then this is a temporary
1102 	 * descriptor that was made for a trace_array to enable
1103 	 * all subsystems.
1104 	 */
1105 	if (dir->subsystem)
1106 		put_system(dir);
1107 	else
1108 		kfree(dir);
1109 
1110 	return 0;
1111 }
1112 
1113 static ssize_t
1114 subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
1115 		      loff_t *ppos)
1116 {
1117 	struct ftrace_subsystem_dir *dir = filp->private_data;
1118 	struct event_subsystem *system = dir->subsystem;
1119 	struct trace_seq *s;
1120 	int r;
1121 
1122 	if (*ppos)
1123 		return 0;
1124 
1125 	s = kmalloc(sizeof(*s), GFP_KERNEL);
1126 	if (!s)
1127 		return -ENOMEM;
1128 
1129 	trace_seq_init(s);
1130 
1131 	print_subsystem_event_filter(system, s);
1132 	r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
1133 
1134 	kfree(s);
1135 
1136 	return r;
1137 }
1138 
1139 static ssize_t
1140 subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
1141 		       loff_t *ppos)
1142 {
1143 	struct ftrace_subsystem_dir *dir = filp->private_data;
1144 	char *buf;
1145 	int err;
1146 
1147 	if (cnt >= PAGE_SIZE)
1148 		return -EINVAL;
1149 
1150 	buf = (char *)__get_free_page(GFP_TEMPORARY);
1151 	if (!buf)
1152 		return -ENOMEM;
1153 
1154 	if (copy_from_user(buf, ubuf, cnt)) {
1155 		free_page((unsigned long) buf);
1156 		return -EFAULT;
1157 	}
1158 	buf[cnt] = '\0';
1159 
1160 	err = apply_subsystem_event_filter(dir, buf);
1161 	free_page((unsigned long) buf);
1162 	if (err < 0)
1163 		return err;
1164 
1165 	*ppos += cnt;
1166 
1167 	return cnt;
1168 }
1169 
1170 static ssize_t
1171 show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
1172 {
1173 	int (*func)(struct trace_seq *s) = filp->private_data;
1174 	struct trace_seq *s;
1175 	int r;
1176 
1177 	if (*ppos)
1178 		return 0;
1179 
1180 	s = kmalloc(sizeof(*s), GFP_KERNEL);
1181 	if (!s)
1182 		return -ENOMEM;
1183 
1184 	trace_seq_init(s);
1185 
1186 	func(s);
1187 	r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
1188 
1189 	kfree(s);
1190 
1191 	return r;
1192 }
1193 
1194 static int ftrace_event_avail_open(struct inode *inode, struct file *file);
1195 static int ftrace_event_set_open(struct inode *inode, struct file *file);
1196 static int ftrace_event_release(struct inode *inode, struct file *file);
1197 
1198 static const struct seq_operations show_event_seq_ops = {
1199 	.start = t_start,
1200 	.next = t_next,
1201 	.show = t_show,
1202 	.stop = t_stop,
1203 };
1204 
1205 static const struct seq_operations show_set_event_seq_ops = {
1206 	.start = s_start,
1207 	.next = s_next,
1208 	.show = t_show,
1209 	.stop = t_stop,
1210 };
1211 
1212 static const struct file_operations ftrace_avail_fops = {
1213 	.open = ftrace_event_avail_open,
1214 	.read = seq_read,
1215 	.llseek = seq_lseek,
1216 	.release = seq_release,
1217 };
1218 
1219 static const struct file_operations ftrace_set_event_fops = {
1220 	.open = ftrace_event_set_open,
1221 	.read = seq_read,
1222 	.write = ftrace_event_write,
1223 	.llseek = seq_lseek,
1224 	.release = ftrace_event_release,
1225 };
1226 
1227 static const struct file_operations ftrace_enable_fops = {
1228 	.open = tracing_open_generic_file,
1229 	.read = event_enable_read,
1230 	.write = event_enable_write,
1231 	.release = tracing_release_generic_file,
1232 	.llseek = default_llseek,
1233 };
1234 
1235 static const struct file_operations ftrace_event_format_fops = {
1236 	.open = trace_format_open,
1237 	.read = seq_read,
1238 	.llseek = seq_lseek,
1239 	.release = seq_release,
1240 };
1241 
1242 static const struct file_operations ftrace_event_id_fops = {
1243 	.open = tracing_open_generic,
1244 	.read = event_id_read,
1245 	.llseek = default_llseek,
1246 };
1247 
1248 static const struct file_operations ftrace_event_filter_fops = {
1249 	.open = tracing_open_generic,
1250 	.read = event_filter_read,
1251 	.write = event_filter_write,
1252 	.llseek = default_llseek,
1253 };
1254 
1255 static const struct file_operations ftrace_subsystem_filter_fops = {
1256 	.open = subsystem_open,
1257 	.read = subsystem_filter_read,
1258 	.write = subsystem_filter_write,
1259 	.llseek = default_llseek,
1260 	.release = subsystem_release,
1261 };
1262 
1263 static const struct file_operations ftrace_system_enable_fops = {
1264 	.open = subsystem_open,
1265 	.read = system_enable_read,
1266 	.write = system_enable_write,
1267 	.llseek = default_llseek,
1268 	.release = subsystem_release,
1269 };
1270 
1271 static const struct file_operations ftrace_tr_enable_fops = {
1272 	.open = system_tr_open,
1273 	.read = system_enable_read,
1274 	.write = system_enable_write,
1275 	.llseek = default_llseek,
1276 	.release = subsystem_release,
1277 };
1278 
1279 static const struct file_operations ftrace_show_header_fops = {
1280 	.open = tracing_open_generic,
1281 	.read = show_header,
1282 	.llseek = default_llseek,
1283 };
1284 
1285 static int
1286 ftrace_event_open(struct inode *inode, struct file *file,
1287 		  const struct seq_operations *seq_ops)
1288 {
1289 	struct seq_file *m;
1290 	int ret;
1291 
1292 	ret = seq_open(file, seq_ops);
1293 	if (ret < 0)
1294 		return ret;
1295 	m = file->private_data;
1296 	/* copy tr over to seq ops */
1297 	m->private = inode->i_private;
1298 
1299 	return ret;
1300 }
1301 
1302 static int ftrace_event_release(struct inode *inode, struct file *file)
1303 {
1304 	struct trace_array *tr = inode->i_private;
1305 
1306 	trace_array_put(tr);
1307 
1308 	return seq_release(inode, file);
1309 }
1310 
1311 static int
1312 ftrace_event_avail_open(struct inode *inode, struct file *file)
1313 {
1314 	const struct seq_operations *seq_ops = &show_event_seq_ops;
1315 
1316 	return ftrace_event_open(inode, file, seq_ops);
1317 }
1318 
1319 static int
1320 ftrace_event_set_open(struct inode *inode, struct file *file)
1321 {
1322 	const struct seq_operations *seq_ops = &show_set_event_seq_ops;
1323 	struct trace_array *tr = inode->i_private;
1324 	int ret;
1325 
1326 	if (trace_array_get(tr) < 0)
1327 		return -ENODEV;
1328 
1329 	if ((file->f_mode & FMODE_WRITE) &&
1330 	    (file->f_flags & O_TRUNC))
1331 		ftrace_clear_events(tr);
1332 
1333 	ret = ftrace_event_open(inode, file, seq_ops);
1334 	if (ret < 0)
1335 		trace_array_put(tr);
1336 	return ret;
1337 }
1338 
1339 static struct event_subsystem *
1340 create_new_subsystem(const char *name)
1341 {
1342 	struct event_subsystem *system;
1343 
1344 	/* need to create new entry */
1345 	system = kmalloc(sizeof(*system), GFP_KERNEL);
1346 	if (!system)
1347 		return NULL;
1348 
1349 	system->ref_count = 1;
1350 
1351 	/* Only allocate if dynamic (kprobes and modules) */
1352 	if (!core_kernel_data((unsigned long)name)) {
1353 		system->ref_count |= SYSTEM_FL_FREE_NAME;
1354 		system->name = kstrdup(name, GFP_KERNEL);
1355 		if (!system->name)
1356 			goto out_free;
1357 	} else
1358 		system->name = name;
1359 
1360 	system->filter = NULL;
1361 
1362 	system->filter = kzalloc(sizeof(struct event_filter), GFP_KERNEL);
1363 	if (!system->filter)
1364 		goto out_free;
1365 
1366 	list_add(&system->list, &event_subsystems);
1367 
1368 	return system;
1369 
1370  out_free:
1371 	if (system->ref_count & SYSTEM_FL_FREE_NAME)
1372 		kfree(system->name);
1373 	kfree(system);
1374 	return NULL;
1375 }
1376 
1377 static struct dentry *
1378 event_subsystem_dir(struct trace_array *tr, const char *name,
1379 		    struct ftrace_event_file *file, struct dentry *parent)
1380 {
1381 	struct ftrace_subsystem_dir *dir;
1382 	struct event_subsystem *system;
1383 	struct dentry *entry;
1384 
1385 	/* First see if we did not already create this dir */
1386 	list_for_each_entry(dir, &tr->systems, list) {
1387 		system = dir->subsystem;
1388 		if (strcmp(system->name, name) == 0) {
1389 			dir->nr_events++;
1390 			file->system = dir;
1391 			return dir->entry;
1392 		}
1393 	}
1394 
1395 	/* Now see if the system itself exists. */
1396 	list_for_each_entry(system, &event_subsystems, list) {
1397 		if (strcmp(system->name, name) == 0)
1398 			break;
1399 	}
1400 	/* Reset system variable when not found */
1401 	if (&system->list == &event_subsystems)
1402 		system = NULL;
1403 
1404 	dir = kmalloc(sizeof(*dir), GFP_KERNEL);
1405 	if (!dir)
1406 		goto out_fail;
1407 
1408 	if (!system) {
1409 		system = create_new_subsystem(name);
1410 		if (!system)
1411 			goto out_free;
1412 	} else
1413 		__get_system(system);
1414 
1415 	dir->entry = debugfs_create_dir(name, parent);
1416 	if (!dir->entry) {
1417 		pr_warning("Failed to create system directory %s\n", name);
1418 		__put_system(system);
1419 		goto out_free;
1420 	}
1421 
1422 	dir->tr = tr;
1423 	dir->ref_count = 1;
1424 	dir->nr_events = 1;
1425 	dir->subsystem = system;
1426 	file->system = dir;
1427 
1428 	entry = debugfs_create_file("filter", 0644, dir->entry, dir,
1429 				    &ftrace_subsystem_filter_fops);
1430 	if (!entry) {
1431 		kfree(system->filter);
1432 		system->filter = NULL;
1433 		pr_warning("Could not create debugfs '%s/filter' entry\n", name);
1434 	}
1435 
1436 	trace_create_file("enable", 0644, dir->entry, dir,
1437 			  &ftrace_system_enable_fops);
1438 
1439 	list_add(&dir->list, &tr->systems);
1440 
1441 	return dir->entry;
1442 
1443  out_free:
1444 	kfree(dir);
1445  out_fail:
1446 	/* Only print this message if failed on memory allocation */
1447 	if (!dir || !system)
1448 		pr_warning("No memory to create event subsystem %s\n",
1449 			   name);
1450 	return NULL;
1451 }
1452 
1453 static int
1454 event_create_dir(struct dentry *parent,
1455 		 struct ftrace_event_file *file,
1456 		 const struct file_operations *id,
1457 		 const struct file_operations *enable,
1458 		 const struct file_operations *filter,
1459 		 const struct file_operations *format)
1460 {
1461 	struct ftrace_event_call *call = file->event_call;
1462 	struct trace_array *tr = file->tr;
1463 	struct list_head *head;
1464 	struct dentry *d_events;
1465 	int ret;
1466 
1467 	/*
1468 	 * If the trace point header did not define TRACE_SYSTEM
1469 	 * then the system would be called "TRACE_SYSTEM".
1470 	 */
1471 	if (strcmp(call->class->system, TRACE_SYSTEM) != 0) {
1472 		d_events = event_subsystem_dir(tr, call->class->system, file, parent);
1473 		if (!d_events)
1474 			return -ENOMEM;
1475 	} else
1476 		d_events = parent;
1477 
1478 	file->dir = debugfs_create_dir(call->name, d_events);
1479 	if (!file->dir) {
1480 		pr_warning("Could not create debugfs '%s' directory\n",
1481 			   call->name);
1482 		return -1;
1483 	}
1484 
1485 	if (call->class->reg && !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
1486 		trace_create_file("enable", 0644, file->dir, file,
1487 				  enable);
1488 
1489 #ifdef CONFIG_PERF_EVENTS
1490 	if (call->event.type && call->class->reg)
1491 		trace_create_file("id", 0444, file->dir, call,
1492 		 		  id);
1493 #endif
1494 
1495 	/*
1496 	 * Other events may have the same class. Only update
1497 	 * the fields if they are not already defined.
1498 	 */
1499 	head = trace_get_fields(call);
1500 	if (list_empty(head)) {
1501 		ret = call->class->define_fields(call);
1502 		if (ret < 0) {
1503 			pr_warning("Could not initialize trace point"
1504 				   " events/%s\n", call->name);
1505 			return -1;
1506 		}
1507 	}
1508 	trace_create_file("filter", 0644, file->dir, call,
1509 			  filter);
1510 
1511 	trace_create_file("format", 0444, file->dir, call,
1512 			  format);
1513 
1514 	return 0;
1515 }
1516 
1517 static void remove_subsystem(struct ftrace_subsystem_dir *dir)
1518 {
1519 	if (!dir)
1520 		return;
1521 
1522 	if (!--dir->nr_events) {
1523 		debugfs_remove_recursive(dir->entry);
1524 		list_del(&dir->list);
1525 		__put_system_dir(dir);
1526 	}
1527 }
1528 
1529 static void remove_event_from_tracers(struct ftrace_event_call *call)
1530 {
1531 	struct ftrace_event_file *file;
1532 	struct trace_array *tr;
1533 
1534 	do_for_each_event_file_safe(tr, file) {
1535 
1536 		if (file->event_call != call)
1537 			continue;
1538 
1539 		list_del(&file->list);
1540 		debugfs_remove_recursive(file->dir);
1541 		remove_subsystem(file->system);
1542 		kmem_cache_free(file_cachep, file);
1543 
1544 		/*
1545 		 * The do_for_each_event_file_safe() is
1546 		 * a double loop. After finding the call for this
1547 		 * trace_array, we use break to jump to the next
1548 		 * trace_array.
1549 		 */
1550 		break;
1551 	} while_for_each_event_file();
1552 }
1553 
1554 static void event_remove(struct ftrace_event_call *call)
1555 {
1556 	struct trace_array *tr;
1557 	struct ftrace_event_file *file;
1558 
1559 	do_for_each_event_file(tr, file) {
1560 		if (file->event_call != call)
1561 			continue;
1562 		ftrace_event_enable_disable(file, 0);
1563 		/*
1564 		 * The do_for_each_event_file() is
1565 		 * a double loop. After finding the call for this
1566 		 * trace_array, we use break to jump to the next
1567 		 * trace_array.
1568 		 */
1569 		break;
1570 	} while_for_each_event_file();
1571 
1572 	if (call->event.funcs)
1573 		__unregister_ftrace_event(&call->event);
1574 	remove_event_from_tracers(call);
1575 	list_del(&call->list);
1576 }
1577 
1578 static int event_init(struct ftrace_event_call *call)
1579 {
1580 	int ret = 0;
1581 
1582 	if (WARN_ON(!call->name))
1583 		return -EINVAL;
1584 
1585 	if (call->class->raw_init) {
1586 		ret = call->class->raw_init(call);
1587 		if (ret < 0 && ret != -ENOSYS)
1588 			pr_warn("Could not initialize trace events/%s\n",
1589 				call->name);
1590 	}
1591 
1592 	return ret;
1593 }
1594 
1595 static int
1596 __register_event(struct ftrace_event_call *call, struct module *mod)
1597 {
1598 	int ret;
1599 
1600 	ret = event_init(call);
1601 	if (ret < 0)
1602 		return ret;
1603 
1604 	list_add(&call->list, &ftrace_events);
1605 	call->mod = mod;
1606 
1607 	return 0;
1608 }
1609 
1610 static struct ftrace_event_file *
1611 trace_create_new_event(struct ftrace_event_call *call,
1612 		       struct trace_array *tr)
1613 {
1614 	struct ftrace_event_file *file;
1615 
1616 	file = kmem_cache_alloc(file_cachep, GFP_TRACE);
1617 	if (!file)
1618 		return NULL;
1619 
1620 	file->event_call = call;
1621 	file->tr = tr;
1622 	atomic_set(&file->sm_ref, 0);
1623 	list_add(&file->list, &tr->events);
1624 
1625 	return file;
1626 }
1627 
1628 /* Add an event to a trace directory */
1629 static int
1630 __trace_add_new_event(struct ftrace_event_call *call,
1631 		      struct trace_array *tr,
1632 		      const struct file_operations *id,
1633 		      const struct file_operations *enable,
1634 		      const struct file_operations *filter,
1635 		      const struct file_operations *format)
1636 {
1637 	struct ftrace_event_file *file;
1638 
1639 	file = trace_create_new_event(call, tr);
1640 	if (!file)
1641 		return -ENOMEM;
1642 
1643 	return event_create_dir(tr->event_dir, file, id, enable, filter, format);
1644 }
1645 
1646 /*
1647  * Just create a decriptor for early init. A descriptor is required
1648  * for enabling events at boot. We want to enable events before
1649  * the filesystem is initialized.
1650  */
1651 static __init int
1652 __trace_early_add_new_event(struct ftrace_event_call *call,
1653 			    struct trace_array *tr)
1654 {
1655 	struct ftrace_event_file *file;
1656 
1657 	file = trace_create_new_event(call, tr);
1658 	if (!file)
1659 		return -ENOMEM;
1660 
1661 	return 0;
1662 }
1663 
1664 struct ftrace_module_file_ops;
1665 static void __add_event_to_tracers(struct ftrace_event_call *call,
1666 				   struct ftrace_module_file_ops *file_ops);
1667 
1668 /* Add an additional event_call dynamically */
1669 int trace_add_event_call(struct ftrace_event_call *call)
1670 {
1671 	int ret;
1672 	mutex_lock(&trace_types_lock);
1673 	mutex_lock(&event_mutex);
1674 
1675 	ret = __register_event(call, NULL);
1676 	if (ret >= 0)
1677 		__add_event_to_tracers(call, NULL);
1678 
1679 	mutex_unlock(&event_mutex);
1680 	mutex_unlock(&trace_types_lock);
1681 	return ret;
1682 }
1683 
1684 /*
1685  * Must be called under locking of trace_types_lock, event_mutex and
1686  * trace_event_sem.
1687  */
1688 static void __trace_remove_event_call(struct ftrace_event_call *call)
1689 {
1690 	event_remove(call);
1691 	trace_destroy_fields(call);
1692 	destroy_preds(call);
1693 }
1694 
1695 /* Remove an event_call */
1696 void trace_remove_event_call(struct ftrace_event_call *call)
1697 {
1698 	mutex_lock(&trace_types_lock);
1699 	mutex_lock(&event_mutex);
1700 	down_write(&trace_event_sem);
1701 	__trace_remove_event_call(call);
1702 	up_write(&trace_event_sem);
1703 	mutex_unlock(&event_mutex);
1704 	mutex_unlock(&trace_types_lock);
1705 }
1706 
1707 #define for_each_event(event, start, end)			\
1708 	for (event = start;					\
1709 	     (unsigned long)event < (unsigned long)end;		\
1710 	     event++)
1711 
1712 #ifdef CONFIG_MODULES
1713 
1714 static LIST_HEAD(ftrace_module_file_list);
1715 
1716 /*
1717  * Modules must own their file_operations to keep up with
1718  * reference counting.
1719  */
1720 struct ftrace_module_file_ops {
1721 	struct list_head		list;
1722 	struct module			*mod;
1723 	struct file_operations		id;
1724 	struct file_operations		enable;
1725 	struct file_operations		format;
1726 	struct file_operations		filter;
1727 };
1728 
1729 static struct ftrace_module_file_ops *
1730 find_ftrace_file_ops(struct ftrace_module_file_ops *file_ops, struct module *mod)
1731 {
1732 	/*
1733 	 * As event_calls are added in groups by module,
1734 	 * when we find one file_ops, we don't need to search for
1735 	 * each call in that module, as the rest should be the
1736 	 * same. Only search for a new one if the last one did
1737 	 * not match.
1738 	 */
1739 	if (file_ops && mod == file_ops->mod)
1740 		return file_ops;
1741 
1742 	list_for_each_entry(file_ops, &ftrace_module_file_list, list) {
1743 		if (file_ops->mod == mod)
1744 			return file_ops;
1745 	}
1746 	return NULL;
1747 }
1748 
1749 static struct ftrace_module_file_ops *
1750 trace_create_file_ops(struct module *mod)
1751 {
1752 	struct ftrace_module_file_ops *file_ops;
1753 
1754 	/*
1755 	 * This is a bit of a PITA. To allow for correct reference
1756 	 * counting, modules must "own" their file_operations.
1757 	 * To do this, we allocate the file operations that will be
1758 	 * used in the event directory.
1759 	 */
1760 
1761 	file_ops = kmalloc(sizeof(*file_ops), GFP_KERNEL);
1762 	if (!file_ops)
1763 		return NULL;
1764 
1765 	file_ops->mod = mod;
1766 
1767 	file_ops->id = ftrace_event_id_fops;
1768 	file_ops->id.owner = mod;
1769 
1770 	file_ops->enable = ftrace_enable_fops;
1771 	file_ops->enable.owner = mod;
1772 
1773 	file_ops->filter = ftrace_event_filter_fops;
1774 	file_ops->filter.owner = mod;
1775 
1776 	file_ops->format = ftrace_event_format_fops;
1777 	file_ops->format.owner = mod;
1778 
1779 	list_add(&file_ops->list, &ftrace_module_file_list);
1780 
1781 	return file_ops;
1782 }
1783 
1784 static void trace_module_add_events(struct module *mod)
1785 {
1786 	struct ftrace_module_file_ops *file_ops = NULL;
1787 	struct ftrace_event_call **call, **start, **end;
1788 
1789 	start = mod->trace_events;
1790 	end = mod->trace_events + mod->num_trace_events;
1791 
1792 	if (start == end)
1793 		return;
1794 
1795 	file_ops = trace_create_file_ops(mod);
1796 	if (!file_ops)
1797 		return;
1798 
1799 	for_each_event(call, start, end) {
1800 		__register_event(*call, mod);
1801 		__add_event_to_tracers(*call, file_ops);
1802 	}
1803 }
1804 
1805 static void trace_module_remove_events(struct module *mod)
1806 {
1807 	struct ftrace_module_file_ops *file_ops;
1808 	struct ftrace_event_call *call, *p;
1809 	bool clear_trace = false;
1810 
1811 	down_write(&trace_event_sem);
1812 	list_for_each_entry_safe(call, p, &ftrace_events, list) {
1813 		if (call->mod == mod) {
1814 			if (call->flags & TRACE_EVENT_FL_WAS_ENABLED)
1815 				clear_trace = true;
1816 			__trace_remove_event_call(call);
1817 		}
1818 	}
1819 
1820 	/* Now free the file_operations */
1821 	list_for_each_entry(file_ops, &ftrace_module_file_list, list) {
1822 		if (file_ops->mod == mod)
1823 			break;
1824 	}
1825 	if (&file_ops->list != &ftrace_module_file_list) {
1826 		list_del(&file_ops->list);
1827 		kfree(file_ops);
1828 	}
1829 	up_write(&trace_event_sem);
1830 
1831 	/*
1832 	 * It is safest to reset the ring buffer if the module being unloaded
1833 	 * registered any events that were used. The only worry is if
1834 	 * a new module gets loaded, and takes on the same id as the events
1835 	 * of this module. When printing out the buffer, traced events left
1836 	 * over from this module may be passed to the new module events and
1837 	 * unexpected results may occur.
1838 	 */
1839 	if (clear_trace)
1840 		tracing_reset_all_online_cpus();
1841 }
1842 
1843 static int trace_module_notify(struct notifier_block *self,
1844 			       unsigned long val, void *data)
1845 {
1846 	struct module *mod = data;
1847 
1848 	mutex_lock(&trace_types_lock);
1849 	mutex_lock(&event_mutex);
1850 	switch (val) {
1851 	case MODULE_STATE_COMING:
1852 		trace_module_add_events(mod);
1853 		break;
1854 	case MODULE_STATE_GOING:
1855 		trace_module_remove_events(mod);
1856 		break;
1857 	}
1858 	mutex_unlock(&event_mutex);
1859 	mutex_unlock(&trace_types_lock);
1860 
1861 	return 0;
1862 }
1863 
1864 static int
1865 __trace_add_new_mod_event(struct ftrace_event_call *call,
1866 			  struct trace_array *tr,
1867 			  struct ftrace_module_file_ops *file_ops)
1868 {
1869 	return __trace_add_new_event(call, tr,
1870 				     &file_ops->id, &file_ops->enable,
1871 				     &file_ops->filter, &file_ops->format);
1872 }
1873 
1874 #else
1875 static inline struct ftrace_module_file_ops *
1876 find_ftrace_file_ops(struct ftrace_module_file_ops *file_ops, struct module *mod)
1877 {
1878 	return NULL;
1879 }
1880 static inline int trace_module_notify(struct notifier_block *self,
1881 				      unsigned long val, void *data)
1882 {
1883 	return 0;
1884 }
1885 static inline int
1886 __trace_add_new_mod_event(struct ftrace_event_call *call,
1887 			  struct trace_array *tr,
1888 			  struct ftrace_module_file_ops *file_ops)
1889 {
1890 	return -ENODEV;
1891 }
1892 #endif /* CONFIG_MODULES */
1893 
1894 /* Create a new event directory structure for a trace directory. */
1895 static void
1896 __trace_add_event_dirs(struct trace_array *tr)
1897 {
1898 	struct ftrace_module_file_ops *file_ops = NULL;
1899 	struct ftrace_event_call *call;
1900 	int ret;
1901 
1902 	list_for_each_entry(call, &ftrace_events, list) {
1903 		if (call->mod) {
1904 			/*
1905 			 * Directories for events by modules need to
1906 			 * keep module ref counts when opened (as we don't
1907 			 * want the module to disappear when reading one
1908 			 * of these files). The file_ops keep account of
1909 			 * the module ref count.
1910 			 */
1911 			file_ops = find_ftrace_file_ops(file_ops, call->mod);
1912 			if (!file_ops)
1913 				continue; /* Warn? */
1914 			ret = __trace_add_new_mod_event(call, tr, file_ops);
1915 			if (ret < 0)
1916 				pr_warning("Could not create directory for event %s\n",
1917 					   call->name);
1918 			continue;
1919 		}
1920 		ret = __trace_add_new_event(call, tr,
1921 					    &ftrace_event_id_fops,
1922 					    &ftrace_enable_fops,
1923 					    &ftrace_event_filter_fops,
1924 					    &ftrace_event_format_fops);
1925 		if (ret < 0)
1926 			pr_warning("Could not create directory for event %s\n",
1927 				   call->name);
1928 	}
1929 }
1930 
1931 #ifdef CONFIG_DYNAMIC_FTRACE
1932 
1933 /* Avoid typos */
1934 #define ENABLE_EVENT_STR	"enable_event"
1935 #define DISABLE_EVENT_STR	"disable_event"
1936 
1937 struct event_probe_data {
1938 	struct ftrace_event_file	*file;
1939 	unsigned long			count;
1940 	int				ref;
1941 	bool				enable;
1942 };
1943 
1944 static struct ftrace_event_file *
1945 find_event_file(struct trace_array *tr, const char *system,  const char *event)
1946 {
1947 	struct ftrace_event_file *file;
1948 	struct ftrace_event_call *call;
1949 
1950 	list_for_each_entry(file, &tr->events, list) {
1951 
1952 		call = file->event_call;
1953 
1954 		if (!call->name || !call->class || !call->class->reg)
1955 			continue;
1956 
1957 		if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)
1958 			continue;
1959 
1960 		if (strcmp(event, call->name) == 0 &&
1961 		    strcmp(system, call->class->system) == 0)
1962 			return file;
1963 	}
1964 	return NULL;
1965 }
1966 
1967 static void
1968 event_enable_probe(unsigned long ip, unsigned long parent_ip, void **_data)
1969 {
1970 	struct event_probe_data **pdata = (struct event_probe_data **)_data;
1971 	struct event_probe_data *data = *pdata;
1972 
1973 	if (!data)
1974 		return;
1975 
1976 	if (data->enable)
1977 		clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &data->file->flags);
1978 	else
1979 		set_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &data->file->flags);
1980 }
1981 
1982 static void
1983 event_enable_count_probe(unsigned long ip, unsigned long parent_ip, void **_data)
1984 {
1985 	struct event_probe_data **pdata = (struct event_probe_data **)_data;
1986 	struct event_probe_data *data = *pdata;
1987 
1988 	if (!data)
1989 		return;
1990 
1991 	if (!data->count)
1992 		return;
1993 
1994 	/* Skip if the event is in a state we want to switch to */
1995 	if (data->enable == !(data->file->flags & FTRACE_EVENT_FL_SOFT_DISABLED))
1996 		return;
1997 
1998 	if (data->count != -1)
1999 		(data->count)--;
2000 
2001 	event_enable_probe(ip, parent_ip, _data);
2002 }
2003 
2004 static int
2005 event_enable_print(struct seq_file *m, unsigned long ip,
2006 		      struct ftrace_probe_ops *ops, void *_data)
2007 {
2008 	struct event_probe_data *data = _data;
2009 
2010 	seq_printf(m, "%ps:", (void *)ip);
2011 
2012 	seq_printf(m, "%s:%s:%s",
2013 		   data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR,
2014 		   data->file->event_call->class->system,
2015 		   data->file->event_call->name);
2016 
2017 	if (data->count == -1)
2018 		seq_printf(m, ":unlimited\n");
2019 	else
2020 		seq_printf(m, ":count=%ld\n", data->count);
2021 
2022 	return 0;
2023 }
2024 
2025 static int
2026 event_enable_init(struct ftrace_probe_ops *ops, unsigned long ip,
2027 		  void **_data)
2028 {
2029 	struct event_probe_data **pdata = (struct event_probe_data **)_data;
2030 	struct event_probe_data *data = *pdata;
2031 
2032 	data->ref++;
2033 	return 0;
2034 }
2035 
2036 static void
2037 event_enable_free(struct ftrace_probe_ops *ops, unsigned long ip,
2038 		  void **_data)
2039 {
2040 	struct event_probe_data **pdata = (struct event_probe_data **)_data;
2041 	struct event_probe_data *data = *pdata;
2042 
2043 	if (WARN_ON_ONCE(data->ref <= 0))
2044 		return;
2045 
2046 	data->ref--;
2047 	if (!data->ref) {
2048 		/* Remove the SOFT_MODE flag */
2049 		__ftrace_event_enable_disable(data->file, 0, 1);
2050 		module_put(data->file->event_call->mod);
2051 		kfree(data);
2052 	}
2053 	*pdata = NULL;
2054 }
2055 
2056 static struct ftrace_probe_ops event_enable_probe_ops = {
2057 	.func			= event_enable_probe,
2058 	.print			= event_enable_print,
2059 	.init			= event_enable_init,
2060 	.free			= event_enable_free,
2061 };
2062 
2063 static struct ftrace_probe_ops event_enable_count_probe_ops = {
2064 	.func			= event_enable_count_probe,
2065 	.print			= event_enable_print,
2066 	.init			= event_enable_init,
2067 	.free			= event_enable_free,
2068 };
2069 
2070 static struct ftrace_probe_ops event_disable_probe_ops = {
2071 	.func			= event_enable_probe,
2072 	.print			= event_enable_print,
2073 	.init			= event_enable_init,
2074 	.free			= event_enable_free,
2075 };
2076 
2077 static struct ftrace_probe_ops event_disable_count_probe_ops = {
2078 	.func			= event_enable_count_probe,
2079 	.print			= event_enable_print,
2080 	.init			= event_enable_init,
2081 	.free			= event_enable_free,
2082 };
2083 
2084 static int
2085 event_enable_func(struct ftrace_hash *hash,
2086 		  char *glob, char *cmd, char *param, int enabled)
2087 {
2088 	struct trace_array *tr = top_trace_array();
2089 	struct ftrace_event_file *file;
2090 	struct ftrace_probe_ops *ops;
2091 	struct event_probe_data *data;
2092 	const char *system;
2093 	const char *event;
2094 	char *number;
2095 	bool enable;
2096 	int ret;
2097 
2098 	/* hash funcs only work with set_ftrace_filter */
2099 	if (!enabled || !param)
2100 		return -EINVAL;
2101 
2102 	system = strsep(&param, ":");
2103 	if (!param)
2104 		return -EINVAL;
2105 
2106 	event = strsep(&param, ":");
2107 
2108 	mutex_lock(&event_mutex);
2109 
2110 	ret = -EINVAL;
2111 	file = find_event_file(tr, system, event);
2112 	if (!file)
2113 		goto out;
2114 
2115 	enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
2116 
2117 	if (enable)
2118 		ops = param ? &event_enable_count_probe_ops : &event_enable_probe_ops;
2119 	else
2120 		ops = param ? &event_disable_count_probe_ops : &event_disable_probe_ops;
2121 
2122 	if (glob[0] == '!') {
2123 		unregister_ftrace_function_probe_func(glob+1, ops);
2124 		ret = 0;
2125 		goto out;
2126 	}
2127 
2128 	ret = -ENOMEM;
2129 	data = kzalloc(sizeof(*data), GFP_KERNEL);
2130 	if (!data)
2131 		goto out;
2132 
2133 	data->enable = enable;
2134 	data->count = -1;
2135 	data->file = file;
2136 
2137 	if (!param)
2138 		goto out_reg;
2139 
2140 	number = strsep(&param, ":");
2141 
2142 	ret = -EINVAL;
2143 	if (!strlen(number))
2144 		goto out_free;
2145 
2146 	/*
2147 	 * We use the callback data field (which is a pointer)
2148 	 * as our counter.
2149 	 */
2150 	ret = kstrtoul(number, 0, &data->count);
2151 	if (ret)
2152 		goto out_free;
2153 
2154  out_reg:
2155 	/* Don't let event modules unload while probe registered */
2156 	ret = try_module_get(file->event_call->mod);
2157 	if (!ret) {
2158 		ret = -EBUSY;
2159 		goto out_free;
2160 	}
2161 
2162 	ret = __ftrace_event_enable_disable(file, 1, 1);
2163 	if (ret < 0)
2164 		goto out_put;
2165 	ret = register_ftrace_function_probe(glob, ops, data);
2166 	/*
2167 	 * The above returns on success the # of functions enabled,
2168 	 * but if it didn't find any functions it returns zero.
2169 	 * Consider no functions a failure too.
2170 	 */
2171 	if (!ret) {
2172 		ret = -ENOENT;
2173 		goto out_disable;
2174 	} else if (ret < 0)
2175 		goto out_disable;
2176 	/* Just return zero, not the number of enabled functions */
2177 	ret = 0;
2178  out:
2179 	mutex_unlock(&event_mutex);
2180 	return ret;
2181 
2182  out_disable:
2183 	__ftrace_event_enable_disable(file, 0, 1);
2184  out_put:
2185 	module_put(file->event_call->mod);
2186  out_free:
2187 	kfree(data);
2188 	goto out;
2189 }
2190 
2191 static struct ftrace_func_command event_enable_cmd = {
2192 	.name			= ENABLE_EVENT_STR,
2193 	.func			= event_enable_func,
2194 };
2195 
2196 static struct ftrace_func_command event_disable_cmd = {
2197 	.name			= DISABLE_EVENT_STR,
2198 	.func			= event_enable_func,
2199 };
2200 
2201 static __init int register_event_cmds(void)
2202 {
2203 	int ret;
2204 
2205 	ret = register_ftrace_command(&event_enable_cmd);
2206 	if (WARN_ON(ret < 0))
2207 		return ret;
2208 	ret = register_ftrace_command(&event_disable_cmd);
2209 	if (WARN_ON(ret < 0))
2210 		unregister_ftrace_command(&event_enable_cmd);
2211 	return ret;
2212 }
2213 #else
2214 static inline int register_event_cmds(void) { return 0; }
2215 #endif /* CONFIG_DYNAMIC_FTRACE */
2216 
2217 /*
2218  * The top level array has already had its ftrace_event_file
2219  * descriptors created in order to allow for early events to
2220  * be recorded. This function is called after the debugfs has been
2221  * initialized, and we now have to create the files associated
2222  * to the events.
2223  */
2224 static __init void
2225 __trace_early_add_event_dirs(struct trace_array *tr)
2226 {
2227 	struct ftrace_event_file *file;
2228 	int ret;
2229 
2230 
2231 	list_for_each_entry(file, &tr->events, list) {
2232 		ret = event_create_dir(tr->event_dir, file,
2233 				       &ftrace_event_id_fops,
2234 				       &ftrace_enable_fops,
2235 				       &ftrace_event_filter_fops,
2236 				       &ftrace_event_format_fops);
2237 		if (ret < 0)
2238 			pr_warning("Could not create directory for event %s\n",
2239 				   file->event_call->name);
2240 	}
2241 }
2242 
2243 /*
2244  * For early boot up, the top trace array requires to have
2245  * a list of events that can be enabled. This must be done before
2246  * the filesystem is set up in order to allow events to be traced
2247  * early.
2248  */
2249 static __init void
2250 __trace_early_add_events(struct trace_array *tr)
2251 {
2252 	struct ftrace_event_call *call;
2253 	int ret;
2254 
2255 	list_for_each_entry(call, &ftrace_events, list) {
2256 		/* Early boot up should not have any modules loaded */
2257 		if (WARN_ON_ONCE(call->mod))
2258 			continue;
2259 
2260 		ret = __trace_early_add_new_event(call, tr);
2261 		if (ret < 0)
2262 			pr_warning("Could not create early event %s\n",
2263 				   call->name);
2264 	}
2265 }
2266 
2267 /* Remove the event directory structure for a trace directory. */
2268 static void
2269 __trace_remove_event_dirs(struct trace_array *tr)
2270 {
2271 	struct ftrace_event_file *file, *next;
2272 
2273 	list_for_each_entry_safe(file, next, &tr->events, list) {
2274 		list_del(&file->list);
2275 		debugfs_remove_recursive(file->dir);
2276 		remove_subsystem(file->system);
2277 		kmem_cache_free(file_cachep, file);
2278 	}
2279 }
2280 
2281 static void
2282 __add_event_to_tracers(struct ftrace_event_call *call,
2283 		       struct ftrace_module_file_ops *file_ops)
2284 {
2285 	struct trace_array *tr;
2286 
2287 	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
2288 		if (file_ops)
2289 			__trace_add_new_mod_event(call, tr, file_ops);
2290 		else
2291 			__trace_add_new_event(call, tr,
2292 					      &ftrace_event_id_fops,
2293 					      &ftrace_enable_fops,
2294 					      &ftrace_event_filter_fops,
2295 					      &ftrace_event_format_fops);
2296 	}
2297 }
2298 
2299 static struct notifier_block trace_module_nb = {
2300 	.notifier_call = trace_module_notify,
2301 	.priority = 0,
2302 };
2303 
2304 extern struct ftrace_event_call *__start_ftrace_events[];
2305 extern struct ftrace_event_call *__stop_ftrace_events[];
2306 
2307 static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata;
2308 
2309 static __init int setup_trace_event(char *str)
2310 {
2311 	strlcpy(bootup_event_buf, str, COMMAND_LINE_SIZE);
2312 	ring_buffer_expanded = true;
2313 	tracing_selftest_disabled = true;
2314 
2315 	return 1;
2316 }
2317 __setup("trace_event=", setup_trace_event);
2318 
2319 /* Expects to have event_mutex held when called */
2320 static int
2321 create_event_toplevel_files(struct dentry *parent, struct trace_array *tr)
2322 {
2323 	struct dentry *d_events;
2324 	struct dentry *entry;
2325 
2326 	entry = debugfs_create_file("set_event", 0644, parent,
2327 				    tr, &ftrace_set_event_fops);
2328 	if (!entry) {
2329 		pr_warning("Could not create debugfs 'set_event' entry\n");
2330 		return -ENOMEM;
2331 	}
2332 
2333 	d_events = debugfs_create_dir("events", parent);
2334 	if (!d_events) {
2335 		pr_warning("Could not create debugfs 'events' directory\n");
2336 		return -ENOMEM;
2337 	}
2338 
2339 	/* ring buffer internal formats */
2340 	trace_create_file("header_page", 0444, d_events,
2341 			  ring_buffer_print_page_header,
2342 			  &ftrace_show_header_fops);
2343 
2344 	trace_create_file("header_event", 0444, d_events,
2345 			  ring_buffer_print_entry_header,
2346 			  &ftrace_show_header_fops);
2347 
2348 	trace_create_file("enable", 0644, d_events,
2349 			  tr, &ftrace_tr_enable_fops);
2350 
2351 	tr->event_dir = d_events;
2352 
2353 	return 0;
2354 }
2355 
2356 /**
2357  * event_trace_add_tracer - add a instance of a trace_array to events
2358  * @parent: The parent dentry to place the files/directories for events in
2359  * @tr: The trace array associated with these events
2360  *
2361  * When a new instance is created, it needs to set up its events
2362  * directory, as well as other files associated with events. It also
2363  * creates the event hierachry in the @parent/events directory.
2364  *
2365  * Returns 0 on success.
2366  */
2367 int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr)
2368 {
2369 	int ret;
2370 
2371 	mutex_lock(&event_mutex);
2372 
2373 	ret = create_event_toplevel_files(parent, tr);
2374 	if (ret)
2375 		goto out_unlock;
2376 
2377 	down_write(&trace_event_sem);
2378 	__trace_add_event_dirs(tr);
2379 	up_write(&trace_event_sem);
2380 
2381  out_unlock:
2382 	mutex_unlock(&event_mutex);
2383 
2384 	return ret;
2385 }
2386 
2387 /*
2388  * The top trace array already had its file descriptors created.
2389  * Now the files themselves need to be created.
2390  */
2391 static __init int
2392 early_event_add_tracer(struct dentry *parent, struct trace_array *tr)
2393 {
2394 	int ret;
2395 
2396 	mutex_lock(&event_mutex);
2397 
2398 	ret = create_event_toplevel_files(parent, tr);
2399 	if (ret)
2400 		goto out_unlock;
2401 
2402 	down_write(&trace_event_sem);
2403 	__trace_early_add_event_dirs(tr);
2404 	up_write(&trace_event_sem);
2405 
2406  out_unlock:
2407 	mutex_unlock(&event_mutex);
2408 
2409 	return ret;
2410 }
2411 
2412 int event_trace_del_tracer(struct trace_array *tr)
2413 {
2414 	mutex_lock(&event_mutex);
2415 
2416 	/* Disable any running events */
2417 	__ftrace_set_clr_event_nolock(tr, NULL, NULL, NULL, 0);
2418 
2419 	down_write(&trace_event_sem);
2420 	__trace_remove_event_dirs(tr);
2421 	debugfs_remove_recursive(tr->event_dir);
2422 	up_write(&trace_event_sem);
2423 
2424 	tr->event_dir = NULL;
2425 
2426 	mutex_unlock(&event_mutex);
2427 
2428 	return 0;
2429 }
2430 
2431 static __init int event_trace_memsetup(void)
2432 {
2433 	field_cachep = KMEM_CACHE(ftrace_event_field, SLAB_PANIC);
2434 	file_cachep = KMEM_CACHE(ftrace_event_file, SLAB_PANIC);
2435 	return 0;
2436 }
2437 
2438 static __init int event_trace_enable(void)
2439 {
2440 	struct trace_array *tr = top_trace_array();
2441 	struct ftrace_event_call **iter, *call;
2442 	char *buf = bootup_event_buf;
2443 	char *token;
2444 	int ret;
2445 
2446 	for_each_event(iter, __start_ftrace_events, __stop_ftrace_events) {
2447 
2448 		call = *iter;
2449 		ret = event_init(call);
2450 		if (!ret)
2451 			list_add(&call->list, &ftrace_events);
2452 	}
2453 
2454 	/*
2455 	 * We need the top trace array to have a working set of trace
2456 	 * points at early init, before the debug files and directories
2457 	 * are created. Create the file entries now, and attach them
2458 	 * to the actual file dentries later.
2459 	 */
2460 	__trace_early_add_events(tr);
2461 
2462 	while (true) {
2463 		token = strsep(&buf, ",");
2464 
2465 		if (!token)
2466 			break;
2467 		if (!*token)
2468 			continue;
2469 
2470 		ret = ftrace_set_clr_event(tr, token, 1);
2471 		if (ret)
2472 			pr_warn("Failed to enable trace event: %s\n", token);
2473 	}
2474 
2475 	trace_printk_start_comm();
2476 
2477 	register_event_cmds();
2478 
2479 	return 0;
2480 }
2481 
2482 static __init int event_trace_init(void)
2483 {
2484 	struct trace_array *tr;
2485 	struct dentry *d_tracer;
2486 	struct dentry *entry;
2487 	int ret;
2488 
2489 	tr = top_trace_array();
2490 
2491 	d_tracer = tracing_init_dentry();
2492 	if (!d_tracer)
2493 		return 0;
2494 
2495 	entry = debugfs_create_file("available_events", 0444, d_tracer,
2496 				    tr, &ftrace_avail_fops);
2497 	if (!entry)
2498 		pr_warning("Could not create debugfs "
2499 			   "'available_events' entry\n");
2500 
2501 	if (trace_define_common_fields())
2502 		pr_warning("tracing: Failed to allocate common fields");
2503 
2504 	ret = early_event_add_tracer(d_tracer, tr);
2505 	if (ret)
2506 		return ret;
2507 
2508 	ret = register_module_notifier(&trace_module_nb);
2509 	if (ret)
2510 		pr_warning("Failed to register trace events module notifier\n");
2511 
2512 	return 0;
2513 }
2514 early_initcall(event_trace_memsetup);
2515 core_initcall(event_trace_enable);
2516 fs_initcall(event_trace_init);
2517 
2518 #ifdef CONFIG_FTRACE_STARTUP_TEST
2519 
2520 static DEFINE_SPINLOCK(test_spinlock);
2521 static DEFINE_SPINLOCK(test_spinlock_irq);
2522 static DEFINE_MUTEX(test_mutex);
2523 
2524 static __init void test_work(struct work_struct *dummy)
2525 {
2526 	spin_lock(&test_spinlock);
2527 	spin_lock_irq(&test_spinlock_irq);
2528 	udelay(1);
2529 	spin_unlock_irq(&test_spinlock_irq);
2530 	spin_unlock(&test_spinlock);
2531 
2532 	mutex_lock(&test_mutex);
2533 	msleep(1);
2534 	mutex_unlock(&test_mutex);
2535 }
2536 
2537 static __init int event_test_thread(void *unused)
2538 {
2539 	void *test_malloc;
2540 
2541 	test_malloc = kmalloc(1234, GFP_KERNEL);
2542 	if (!test_malloc)
2543 		pr_info("failed to kmalloc\n");
2544 
2545 	schedule_on_each_cpu(test_work);
2546 
2547 	kfree(test_malloc);
2548 
2549 	set_current_state(TASK_INTERRUPTIBLE);
2550 	while (!kthread_should_stop())
2551 		schedule();
2552 
2553 	return 0;
2554 }
2555 
2556 /*
2557  * Do various things that may trigger events.
2558  */
2559 static __init void event_test_stuff(void)
2560 {
2561 	struct task_struct *test_thread;
2562 
2563 	test_thread = kthread_run(event_test_thread, NULL, "test-events");
2564 	msleep(1);
2565 	kthread_stop(test_thread);
2566 }
2567 
2568 /*
2569  * For every trace event defined, we will test each trace point separately,
2570  * and then by groups, and finally all trace points.
2571  */
2572 static __init void event_trace_self_tests(void)
2573 {
2574 	struct ftrace_subsystem_dir *dir;
2575 	struct ftrace_event_file *file;
2576 	struct ftrace_event_call *call;
2577 	struct event_subsystem *system;
2578 	struct trace_array *tr;
2579 	int ret;
2580 
2581 	tr = top_trace_array();
2582 
2583 	pr_info("Running tests on trace events:\n");
2584 
2585 	list_for_each_entry(file, &tr->events, list) {
2586 
2587 		call = file->event_call;
2588 
2589 		/* Only test those that have a probe */
2590 		if (!call->class || !call->class->probe)
2591 			continue;
2592 
2593 /*
2594  * Testing syscall events here is pretty useless, but
2595  * we still do it if configured. But this is time consuming.
2596  * What we really need is a user thread to perform the
2597  * syscalls as we test.
2598  */
2599 #ifndef CONFIG_EVENT_TRACE_TEST_SYSCALLS
2600 		if (call->class->system &&
2601 		    strcmp(call->class->system, "syscalls") == 0)
2602 			continue;
2603 #endif
2604 
2605 		pr_info("Testing event %s: ", call->name);
2606 
2607 		/*
2608 		 * If an event is already enabled, someone is using
2609 		 * it and the self test should not be on.
2610 		 */
2611 		if (file->flags & FTRACE_EVENT_FL_ENABLED) {
2612 			pr_warning("Enabled event during self test!\n");
2613 			WARN_ON_ONCE(1);
2614 			continue;
2615 		}
2616 
2617 		ftrace_event_enable_disable(file, 1);
2618 		event_test_stuff();
2619 		ftrace_event_enable_disable(file, 0);
2620 
2621 		pr_cont("OK\n");
2622 	}
2623 
2624 	/* Now test at the sub system level */
2625 
2626 	pr_info("Running tests on trace event systems:\n");
2627 
2628 	list_for_each_entry(dir, &tr->systems, list) {
2629 
2630 		system = dir->subsystem;
2631 
2632 		/* the ftrace system is special, skip it */
2633 		if (strcmp(system->name, "ftrace") == 0)
2634 			continue;
2635 
2636 		pr_info("Testing event system %s: ", system->name);
2637 
2638 		ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 1);
2639 		if (WARN_ON_ONCE(ret)) {
2640 			pr_warning("error enabling system %s\n",
2641 				   system->name);
2642 			continue;
2643 		}
2644 
2645 		event_test_stuff();
2646 
2647 		ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 0);
2648 		if (WARN_ON_ONCE(ret)) {
2649 			pr_warning("error disabling system %s\n",
2650 				   system->name);
2651 			continue;
2652 		}
2653 
2654 		pr_cont("OK\n");
2655 	}
2656 
2657 	/* Test with all events enabled */
2658 
2659 	pr_info("Running tests on all trace events:\n");
2660 	pr_info("Testing all events: ");
2661 
2662 	ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 1);
2663 	if (WARN_ON_ONCE(ret)) {
2664 		pr_warning("error enabling all events\n");
2665 		return;
2666 	}
2667 
2668 	event_test_stuff();
2669 
2670 	/* reset sysname */
2671 	ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 0);
2672 	if (WARN_ON_ONCE(ret)) {
2673 		pr_warning("error disabling all events\n");
2674 		return;
2675 	}
2676 
2677 	pr_cont("OK\n");
2678 }
2679 
2680 #ifdef CONFIG_FUNCTION_TRACER
2681 
2682 static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable);
2683 
2684 static void
2685 function_test_events_call(unsigned long ip, unsigned long parent_ip,
2686 			  struct ftrace_ops *op, struct pt_regs *pt_regs)
2687 {
2688 	struct ring_buffer_event *event;
2689 	struct ring_buffer *buffer;
2690 	struct ftrace_entry *entry;
2691 	unsigned long flags;
2692 	long disabled;
2693 	int cpu;
2694 	int pc;
2695 
2696 	pc = preempt_count();
2697 	preempt_disable_notrace();
2698 	cpu = raw_smp_processor_id();
2699 	disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu));
2700 
2701 	if (disabled != 1)
2702 		goto out;
2703 
2704 	local_save_flags(flags);
2705 
2706 	event = trace_current_buffer_lock_reserve(&buffer,
2707 						  TRACE_FN, sizeof(*entry),
2708 						  flags, pc);
2709 	if (!event)
2710 		goto out;
2711 	entry	= ring_buffer_event_data(event);
2712 	entry->ip			= ip;
2713 	entry->parent_ip		= parent_ip;
2714 
2715 	trace_buffer_unlock_commit(buffer, event, flags, pc);
2716 
2717  out:
2718 	atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
2719 	preempt_enable_notrace();
2720 }
2721 
2722 static struct ftrace_ops trace_ops __initdata  =
2723 {
2724 	.func = function_test_events_call,
2725 	.flags = FTRACE_OPS_FL_RECURSION_SAFE,
2726 };
2727 
2728 static __init void event_trace_self_test_with_function(void)
2729 {
2730 	int ret;
2731 	ret = register_ftrace_function(&trace_ops);
2732 	if (WARN_ON(ret < 0)) {
2733 		pr_info("Failed to enable function tracer for event tests\n");
2734 		return;
2735 	}
2736 	pr_info("Running tests again, along with the function tracer\n");
2737 	event_trace_self_tests();
2738 	unregister_ftrace_function(&trace_ops);
2739 }
2740 #else
2741 static __init void event_trace_self_test_with_function(void)
2742 {
2743 }
2744 #endif
2745 
2746 static __init int event_trace_self_tests_init(void)
2747 {
2748 	if (!tracing_selftest_disabled) {
2749 		event_trace_self_tests();
2750 		event_trace_self_test_with_function();
2751 	}
2752 
2753 	return 0;
2754 }
2755 
2756 late_initcall(event_trace_self_tests_init);
2757 
2758 #endif
2759