xref: /linux/kernel/trace/trace_events.c (revision 41dc27e3b9bd41b900f5aea06f86669e54a2cdd6)
1 /*
2  * event tracer
3  *
4  * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
5  *
6  *  - Added format output of fields of the trace point.
7  *    This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
8  *
9  */
10 
11 #define pr_fmt(fmt) fmt
12 
13 #include <linux/workqueue.h>
14 #include <linux/spinlock.h>
15 #include <linux/kthread.h>
16 #include <linux/debugfs.h>
17 #include <linux/uaccess.h>
18 #include <linux/module.h>
19 #include <linux/ctype.h>
20 #include <linux/slab.h>
21 #include <linux/delay.h>
22 
23 #include <asm/setup.h>
24 
25 #include "trace_output.h"
26 
27 #undef TRACE_SYSTEM
28 #define TRACE_SYSTEM "TRACE_SYSTEM"
29 
30 DEFINE_MUTEX(event_mutex);
31 
32 LIST_HEAD(ftrace_events);
33 static LIST_HEAD(ftrace_common_fields);
34 
35 #define GFP_TRACE (GFP_KERNEL | __GFP_ZERO)
36 
37 static struct kmem_cache *field_cachep;
38 static struct kmem_cache *file_cachep;
39 
40 #define SYSTEM_FL_FREE_NAME		(1 << 31)
41 
42 static inline int system_refcount(struct event_subsystem *system)
43 {
44 	return system->ref_count & ~SYSTEM_FL_FREE_NAME;
45 }
46 
47 static int system_refcount_inc(struct event_subsystem *system)
48 {
49 	return (system->ref_count++) & ~SYSTEM_FL_FREE_NAME;
50 }
51 
52 static int system_refcount_dec(struct event_subsystem *system)
53 {
54 	return (--system->ref_count) & ~SYSTEM_FL_FREE_NAME;
55 }
56 
57 /* Double loops, do not use break, only goto's work */
58 #define do_for_each_event_file(tr, file)			\
59 	list_for_each_entry(tr, &ftrace_trace_arrays, list) {	\
60 		list_for_each_entry(file, &tr->events, list)
61 
62 #define do_for_each_event_file_safe(tr, file)			\
63 	list_for_each_entry(tr, &ftrace_trace_arrays, list) {	\
64 		struct ftrace_event_file *___n;				\
65 		list_for_each_entry_safe(file, ___n, &tr->events, list)
66 
67 #define while_for_each_event_file()		\
68 	}
69 
70 static struct list_head *
71 trace_get_fields(struct ftrace_event_call *event_call)
72 {
73 	if (!event_call->class->get_fields)
74 		return &event_call->class->fields;
75 	return event_call->class->get_fields(event_call);
76 }
77 
78 static struct ftrace_event_field *
79 __find_event_field(struct list_head *head, char *name)
80 {
81 	struct ftrace_event_field *field;
82 
83 	list_for_each_entry(field, head, link) {
84 		if (!strcmp(field->name, name))
85 			return field;
86 	}
87 
88 	return NULL;
89 }
90 
91 struct ftrace_event_field *
92 trace_find_event_field(struct ftrace_event_call *call, char *name)
93 {
94 	struct ftrace_event_field *field;
95 	struct list_head *head;
96 
97 	field = __find_event_field(&ftrace_common_fields, name);
98 	if (field)
99 		return field;
100 
101 	head = trace_get_fields(call);
102 	return __find_event_field(head, name);
103 }
104 
105 static int __trace_define_field(struct list_head *head, const char *type,
106 				const char *name, int offset, int size,
107 				int is_signed, int filter_type)
108 {
109 	struct ftrace_event_field *field;
110 
111 	field = kmem_cache_alloc(field_cachep, GFP_TRACE);
112 	if (!field)
113 		return -ENOMEM;
114 
115 	field->name = name;
116 	field->type = type;
117 
118 	if (filter_type == FILTER_OTHER)
119 		field->filter_type = filter_assign_type(type);
120 	else
121 		field->filter_type = filter_type;
122 
123 	field->offset = offset;
124 	field->size = size;
125 	field->is_signed = is_signed;
126 
127 	list_add(&field->link, head);
128 
129 	return 0;
130 }
131 
132 int trace_define_field(struct ftrace_event_call *call, const char *type,
133 		       const char *name, int offset, int size, int is_signed,
134 		       int filter_type)
135 {
136 	struct list_head *head;
137 
138 	if (WARN_ON(!call->class))
139 		return 0;
140 
141 	head = trace_get_fields(call);
142 	return __trace_define_field(head, type, name, offset, size,
143 				    is_signed, filter_type);
144 }
145 EXPORT_SYMBOL_GPL(trace_define_field);
146 
147 #define __common_field(type, item)					\
148 	ret = __trace_define_field(&ftrace_common_fields, #type,	\
149 				   "common_" #item,			\
150 				   offsetof(typeof(ent), item),		\
151 				   sizeof(ent.item),			\
152 				   is_signed_type(type), FILTER_OTHER);	\
153 	if (ret)							\
154 		return ret;
155 
156 static int trace_define_common_fields(void)
157 {
158 	int ret;
159 	struct trace_entry ent;
160 
161 	__common_field(unsigned short, type);
162 	__common_field(unsigned char, flags);
163 	__common_field(unsigned char, preempt_count);
164 	__common_field(int, pid);
165 
166 	return ret;
167 }
168 
169 static void trace_destroy_fields(struct ftrace_event_call *call)
170 {
171 	struct ftrace_event_field *field, *next;
172 	struct list_head *head;
173 
174 	head = trace_get_fields(call);
175 	list_for_each_entry_safe(field, next, head, link) {
176 		list_del(&field->link);
177 		kmem_cache_free(field_cachep, field);
178 	}
179 }
180 
181 int trace_event_raw_init(struct ftrace_event_call *call)
182 {
183 	int id;
184 
185 	id = register_ftrace_event(&call->event);
186 	if (!id)
187 		return -ENODEV;
188 
189 	return 0;
190 }
191 EXPORT_SYMBOL_GPL(trace_event_raw_init);
192 
193 void *ftrace_event_buffer_reserve(struct ftrace_event_buffer *fbuffer,
194 				  struct ftrace_event_file *ftrace_file,
195 				  unsigned long len)
196 {
197 	struct ftrace_event_call *event_call = ftrace_file->event_call;
198 
199 	local_save_flags(fbuffer->flags);
200 	fbuffer->pc = preempt_count();
201 	fbuffer->ftrace_file = ftrace_file;
202 
203 	fbuffer->event =
204 		trace_event_buffer_lock_reserve(&fbuffer->buffer, ftrace_file,
205 						event_call->event.type, len,
206 						fbuffer->flags, fbuffer->pc);
207 	if (!fbuffer->event)
208 		return NULL;
209 
210 	fbuffer->entry = ring_buffer_event_data(fbuffer->event);
211 	return fbuffer->entry;
212 }
213 EXPORT_SYMBOL_GPL(ftrace_event_buffer_reserve);
214 
215 void ftrace_event_buffer_commit(struct ftrace_event_buffer *fbuffer)
216 {
217 	event_trigger_unlock_commit(fbuffer->ftrace_file, fbuffer->buffer,
218 				    fbuffer->event, fbuffer->entry,
219 				    fbuffer->flags, fbuffer->pc);
220 }
221 EXPORT_SYMBOL_GPL(ftrace_event_buffer_commit);
222 
223 int ftrace_event_reg(struct ftrace_event_call *call,
224 		     enum trace_reg type, void *data)
225 {
226 	struct ftrace_event_file *file = data;
227 
228 	WARN_ON(!(call->flags & TRACE_EVENT_FL_TRACEPOINT));
229 	switch (type) {
230 	case TRACE_REG_REGISTER:
231 		return tracepoint_probe_register(call->tp,
232 						 call->class->probe,
233 						 file);
234 	case TRACE_REG_UNREGISTER:
235 		tracepoint_probe_unregister(call->tp,
236 					    call->class->probe,
237 					    file);
238 		return 0;
239 
240 #ifdef CONFIG_PERF_EVENTS
241 	case TRACE_REG_PERF_REGISTER:
242 		return tracepoint_probe_register(call->tp,
243 						 call->class->perf_probe,
244 						 call);
245 	case TRACE_REG_PERF_UNREGISTER:
246 		tracepoint_probe_unregister(call->tp,
247 					    call->class->perf_probe,
248 					    call);
249 		return 0;
250 	case TRACE_REG_PERF_OPEN:
251 	case TRACE_REG_PERF_CLOSE:
252 	case TRACE_REG_PERF_ADD:
253 	case TRACE_REG_PERF_DEL:
254 		return 0;
255 #endif
256 	}
257 	return 0;
258 }
259 EXPORT_SYMBOL_GPL(ftrace_event_reg);
260 
261 void trace_event_enable_cmd_record(bool enable)
262 {
263 	struct ftrace_event_file *file;
264 	struct trace_array *tr;
265 
266 	mutex_lock(&event_mutex);
267 	do_for_each_event_file(tr, file) {
268 
269 		if (!(file->flags & FTRACE_EVENT_FL_ENABLED))
270 			continue;
271 
272 		if (enable) {
273 			tracing_start_cmdline_record();
274 			set_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags);
275 		} else {
276 			tracing_stop_cmdline_record();
277 			clear_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags);
278 		}
279 	} while_for_each_event_file();
280 	mutex_unlock(&event_mutex);
281 }
282 
283 static int __ftrace_event_enable_disable(struct ftrace_event_file *file,
284 					 int enable, int soft_disable)
285 {
286 	struct ftrace_event_call *call = file->event_call;
287 	int ret = 0;
288 	int disable;
289 
290 	switch (enable) {
291 	case 0:
292 		/*
293 		 * When soft_disable is set and enable is cleared, the sm_ref
294 		 * reference counter is decremented. If it reaches 0, we want
295 		 * to clear the SOFT_DISABLED flag but leave the event in the
296 		 * state that it was. That is, if the event was enabled and
297 		 * SOFT_DISABLED isn't set, then do nothing. But if SOFT_DISABLED
298 		 * is set we do not want the event to be enabled before we
299 		 * clear the bit.
300 		 *
301 		 * When soft_disable is not set but the SOFT_MODE flag is,
302 		 * we do nothing. Do not disable the tracepoint, otherwise
303 		 * "soft enable"s (clearing the SOFT_DISABLED bit) wont work.
304 		 */
305 		if (soft_disable) {
306 			if (atomic_dec_return(&file->sm_ref) > 0)
307 				break;
308 			disable = file->flags & FTRACE_EVENT_FL_SOFT_DISABLED;
309 			clear_bit(FTRACE_EVENT_FL_SOFT_MODE_BIT, &file->flags);
310 		} else
311 			disable = !(file->flags & FTRACE_EVENT_FL_SOFT_MODE);
312 
313 		if (disable && (file->flags & FTRACE_EVENT_FL_ENABLED)) {
314 			clear_bit(FTRACE_EVENT_FL_ENABLED_BIT, &file->flags);
315 			if (file->flags & FTRACE_EVENT_FL_RECORDED_CMD) {
316 				tracing_stop_cmdline_record();
317 				clear_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags);
318 			}
319 			call->class->reg(call, TRACE_REG_UNREGISTER, file);
320 		}
321 		/* If in SOFT_MODE, just set the SOFT_DISABLE_BIT, else clear it */
322 		if (file->flags & FTRACE_EVENT_FL_SOFT_MODE)
323 			set_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags);
324 		else
325 			clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags);
326 		break;
327 	case 1:
328 		/*
329 		 * When soft_disable is set and enable is set, we want to
330 		 * register the tracepoint for the event, but leave the event
331 		 * as is. That means, if the event was already enabled, we do
332 		 * nothing (but set SOFT_MODE). If the event is disabled, we
333 		 * set SOFT_DISABLED before enabling the event tracepoint, so
334 		 * it still seems to be disabled.
335 		 */
336 		if (!soft_disable)
337 			clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags);
338 		else {
339 			if (atomic_inc_return(&file->sm_ref) > 1)
340 				break;
341 			set_bit(FTRACE_EVENT_FL_SOFT_MODE_BIT, &file->flags);
342 		}
343 
344 		if (!(file->flags & FTRACE_EVENT_FL_ENABLED)) {
345 
346 			/* Keep the event disabled, when going to SOFT_MODE. */
347 			if (soft_disable)
348 				set_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags);
349 
350 			if (trace_flags & TRACE_ITER_RECORD_CMD) {
351 				tracing_start_cmdline_record();
352 				set_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags);
353 			}
354 			ret = call->class->reg(call, TRACE_REG_REGISTER, file);
355 			if (ret) {
356 				tracing_stop_cmdline_record();
357 				pr_info("event trace: Could not enable event "
358 					"%s\n", ftrace_event_name(call));
359 				break;
360 			}
361 			set_bit(FTRACE_EVENT_FL_ENABLED_BIT, &file->flags);
362 
363 			/* WAS_ENABLED gets set but never cleared. */
364 			call->flags |= TRACE_EVENT_FL_WAS_ENABLED;
365 		}
366 		break;
367 	}
368 
369 	return ret;
370 }
371 
372 int trace_event_enable_disable(struct ftrace_event_file *file,
373 			       int enable, int soft_disable)
374 {
375 	return __ftrace_event_enable_disable(file, enable, soft_disable);
376 }
377 
378 static int ftrace_event_enable_disable(struct ftrace_event_file *file,
379 				       int enable)
380 {
381 	return __ftrace_event_enable_disable(file, enable, 0);
382 }
383 
384 static void ftrace_clear_events(struct trace_array *tr)
385 {
386 	struct ftrace_event_file *file;
387 
388 	mutex_lock(&event_mutex);
389 	list_for_each_entry(file, &tr->events, list) {
390 		ftrace_event_enable_disable(file, 0);
391 	}
392 	mutex_unlock(&event_mutex);
393 }
394 
395 static void __put_system(struct event_subsystem *system)
396 {
397 	struct event_filter *filter = system->filter;
398 
399 	WARN_ON_ONCE(system_refcount(system) == 0);
400 	if (system_refcount_dec(system))
401 		return;
402 
403 	list_del(&system->list);
404 
405 	if (filter) {
406 		kfree(filter->filter_string);
407 		kfree(filter);
408 	}
409 	if (system->ref_count & SYSTEM_FL_FREE_NAME)
410 		kfree(system->name);
411 	kfree(system);
412 }
413 
414 static void __get_system(struct event_subsystem *system)
415 {
416 	WARN_ON_ONCE(system_refcount(system) == 0);
417 	system_refcount_inc(system);
418 }
419 
420 static void __get_system_dir(struct ftrace_subsystem_dir *dir)
421 {
422 	WARN_ON_ONCE(dir->ref_count == 0);
423 	dir->ref_count++;
424 	__get_system(dir->subsystem);
425 }
426 
427 static void __put_system_dir(struct ftrace_subsystem_dir *dir)
428 {
429 	WARN_ON_ONCE(dir->ref_count == 0);
430 	/* If the subsystem is about to be freed, the dir must be too */
431 	WARN_ON_ONCE(system_refcount(dir->subsystem) == 1 && dir->ref_count != 1);
432 
433 	__put_system(dir->subsystem);
434 	if (!--dir->ref_count)
435 		kfree(dir);
436 }
437 
438 static void put_system(struct ftrace_subsystem_dir *dir)
439 {
440 	mutex_lock(&event_mutex);
441 	__put_system_dir(dir);
442 	mutex_unlock(&event_mutex);
443 }
444 
445 static void remove_subsystem(struct ftrace_subsystem_dir *dir)
446 {
447 	if (!dir)
448 		return;
449 
450 	if (!--dir->nr_events) {
451 		debugfs_remove_recursive(dir->entry);
452 		list_del(&dir->list);
453 		__put_system_dir(dir);
454 	}
455 }
456 
457 static void remove_event_file_dir(struct ftrace_event_file *file)
458 {
459 	struct dentry *dir = file->dir;
460 	struct dentry *child;
461 
462 	if (dir) {
463 		spin_lock(&dir->d_lock);	/* probably unneeded */
464 		list_for_each_entry(child, &dir->d_subdirs, d_u.d_child) {
465 			if (child->d_inode)	/* probably unneeded */
466 				child->d_inode->i_private = NULL;
467 		}
468 		spin_unlock(&dir->d_lock);
469 
470 		debugfs_remove_recursive(dir);
471 	}
472 
473 	list_del(&file->list);
474 	remove_subsystem(file->system);
475 	kmem_cache_free(file_cachep, file);
476 }
477 
478 /*
479  * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events.
480  */
481 static int
482 __ftrace_set_clr_event_nolock(struct trace_array *tr, const char *match,
483 			      const char *sub, const char *event, int set)
484 {
485 	struct ftrace_event_file *file;
486 	struct ftrace_event_call *call;
487 	const char *name;
488 	int ret = -EINVAL;
489 
490 	list_for_each_entry(file, &tr->events, list) {
491 
492 		call = file->event_call;
493 		name = ftrace_event_name(call);
494 
495 		if (!name || !call->class || !call->class->reg)
496 			continue;
497 
498 		if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)
499 			continue;
500 
501 		if (match &&
502 		    strcmp(match, name) != 0 &&
503 		    strcmp(match, call->class->system) != 0)
504 			continue;
505 
506 		if (sub && strcmp(sub, call->class->system) != 0)
507 			continue;
508 
509 		if (event && strcmp(event, name) != 0)
510 			continue;
511 
512 		ftrace_event_enable_disable(file, set);
513 
514 		ret = 0;
515 	}
516 
517 	return ret;
518 }
519 
520 static int __ftrace_set_clr_event(struct trace_array *tr, const char *match,
521 				  const char *sub, const char *event, int set)
522 {
523 	int ret;
524 
525 	mutex_lock(&event_mutex);
526 	ret = __ftrace_set_clr_event_nolock(tr, match, sub, event, set);
527 	mutex_unlock(&event_mutex);
528 
529 	return ret;
530 }
531 
532 static int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set)
533 {
534 	char *event = NULL, *sub = NULL, *match;
535 
536 	/*
537 	 * The buf format can be <subsystem>:<event-name>
538 	 *  *:<event-name> means any event by that name.
539 	 *  :<event-name> is the same.
540 	 *
541 	 *  <subsystem>:* means all events in that subsystem
542 	 *  <subsystem>: means the same.
543 	 *
544 	 *  <name> (no ':') means all events in a subsystem with
545 	 *  the name <name> or any event that matches <name>
546 	 */
547 
548 	match = strsep(&buf, ":");
549 	if (buf) {
550 		sub = match;
551 		event = buf;
552 		match = NULL;
553 
554 		if (!strlen(sub) || strcmp(sub, "*") == 0)
555 			sub = NULL;
556 		if (!strlen(event) || strcmp(event, "*") == 0)
557 			event = NULL;
558 	}
559 
560 	return __ftrace_set_clr_event(tr, match, sub, event, set);
561 }
562 
563 /**
564  * trace_set_clr_event - enable or disable an event
565  * @system: system name to match (NULL for any system)
566  * @event: event name to match (NULL for all events, within system)
567  * @set: 1 to enable, 0 to disable
568  *
569  * This is a way for other parts of the kernel to enable or disable
570  * event recording.
571  *
572  * Returns 0 on success, -EINVAL if the parameters do not match any
573  * registered events.
574  */
575 int trace_set_clr_event(const char *system, const char *event, int set)
576 {
577 	struct trace_array *tr = top_trace_array();
578 
579 	if (!tr)
580 		return -ENODEV;
581 
582 	return __ftrace_set_clr_event(tr, NULL, system, event, set);
583 }
584 EXPORT_SYMBOL_GPL(trace_set_clr_event);
585 
586 /* 128 should be much more than enough */
587 #define EVENT_BUF_SIZE		127
588 
589 static ssize_t
590 ftrace_event_write(struct file *file, const char __user *ubuf,
591 		   size_t cnt, loff_t *ppos)
592 {
593 	struct trace_parser parser;
594 	struct seq_file *m = file->private_data;
595 	struct trace_array *tr = m->private;
596 	ssize_t read, ret;
597 
598 	if (!cnt)
599 		return 0;
600 
601 	ret = tracing_update_buffers();
602 	if (ret < 0)
603 		return ret;
604 
605 	if (trace_parser_get_init(&parser, EVENT_BUF_SIZE + 1))
606 		return -ENOMEM;
607 
608 	read = trace_get_user(&parser, ubuf, cnt, ppos);
609 
610 	if (read >= 0 && trace_parser_loaded((&parser))) {
611 		int set = 1;
612 
613 		if (*parser.buffer == '!')
614 			set = 0;
615 
616 		parser.buffer[parser.idx] = 0;
617 
618 		ret = ftrace_set_clr_event(tr, parser.buffer + !set, set);
619 		if (ret)
620 			goto out_put;
621 	}
622 
623 	ret = read;
624 
625  out_put:
626 	trace_parser_put(&parser);
627 
628 	return ret;
629 }
630 
631 static void *
632 t_next(struct seq_file *m, void *v, loff_t *pos)
633 {
634 	struct ftrace_event_file *file = v;
635 	struct ftrace_event_call *call;
636 	struct trace_array *tr = m->private;
637 
638 	(*pos)++;
639 
640 	list_for_each_entry_continue(file, &tr->events, list) {
641 		call = file->event_call;
642 		/*
643 		 * The ftrace subsystem is for showing formats only.
644 		 * They can not be enabled or disabled via the event files.
645 		 */
646 		if (call->class && call->class->reg)
647 			return file;
648 	}
649 
650 	return NULL;
651 }
652 
653 static void *t_start(struct seq_file *m, loff_t *pos)
654 {
655 	struct ftrace_event_file *file;
656 	struct trace_array *tr = m->private;
657 	loff_t l;
658 
659 	mutex_lock(&event_mutex);
660 
661 	file = list_entry(&tr->events, struct ftrace_event_file, list);
662 	for (l = 0; l <= *pos; ) {
663 		file = t_next(m, file, &l);
664 		if (!file)
665 			break;
666 	}
667 	return file;
668 }
669 
670 static void *
671 s_next(struct seq_file *m, void *v, loff_t *pos)
672 {
673 	struct ftrace_event_file *file = v;
674 	struct trace_array *tr = m->private;
675 
676 	(*pos)++;
677 
678 	list_for_each_entry_continue(file, &tr->events, list) {
679 		if (file->flags & FTRACE_EVENT_FL_ENABLED)
680 			return file;
681 	}
682 
683 	return NULL;
684 }
685 
686 static void *s_start(struct seq_file *m, loff_t *pos)
687 {
688 	struct ftrace_event_file *file;
689 	struct trace_array *tr = m->private;
690 	loff_t l;
691 
692 	mutex_lock(&event_mutex);
693 
694 	file = list_entry(&tr->events, struct ftrace_event_file, list);
695 	for (l = 0; l <= *pos; ) {
696 		file = s_next(m, file, &l);
697 		if (!file)
698 			break;
699 	}
700 	return file;
701 }
702 
703 static int t_show(struct seq_file *m, void *v)
704 {
705 	struct ftrace_event_file *file = v;
706 	struct ftrace_event_call *call = file->event_call;
707 
708 	if (strcmp(call->class->system, TRACE_SYSTEM) != 0)
709 		seq_printf(m, "%s:", call->class->system);
710 	seq_printf(m, "%s\n", ftrace_event_name(call));
711 
712 	return 0;
713 }
714 
715 static void t_stop(struct seq_file *m, void *p)
716 {
717 	mutex_unlock(&event_mutex);
718 }
719 
720 static ssize_t
721 event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
722 		  loff_t *ppos)
723 {
724 	struct ftrace_event_file *file;
725 	unsigned long flags;
726 	char buf[4] = "0";
727 
728 	mutex_lock(&event_mutex);
729 	file = event_file_data(filp);
730 	if (likely(file))
731 		flags = file->flags;
732 	mutex_unlock(&event_mutex);
733 
734 	if (!file)
735 		return -ENODEV;
736 
737 	if (flags & FTRACE_EVENT_FL_ENABLED &&
738 	    !(flags & FTRACE_EVENT_FL_SOFT_DISABLED))
739 		strcpy(buf, "1");
740 
741 	if (flags & FTRACE_EVENT_FL_SOFT_DISABLED ||
742 	    flags & FTRACE_EVENT_FL_SOFT_MODE)
743 		strcat(buf, "*");
744 
745 	strcat(buf, "\n");
746 
747 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, strlen(buf));
748 }
749 
750 static ssize_t
751 event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
752 		   loff_t *ppos)
753 {
754 	struct ftrace_event_file *file;
755 	unsigned long val;
756 	int ret;
757 
758 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
759 	if (ret)
760 		return ret;
761 
762 	ret = tracing_update_buffers();
763 	if (ret < 0)
764 		return ret;
765 
766 	switch (val) {
767 	case 0:
768 	case 1:
769 		ret = -ENODEV;
770 		mutex_lock(&event_mutex);
771 		file = event_file_data(filp);
772 		if (likely(file))
773 			ret = ftrace_event_enable_disable(file, val);
774 		mutex_unlock(&event_mutex);
775 		break;
776 
777 	default:
778 		return -EINVAL;
779 	}
780 
781 	*ppos += cnt;
782 
783 	return ret ? ret : cnt;
784 }
785 
786 static ssize_t
787 system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
788 		   loff_t *ppos)
789 {
790 	const char set_to_char[4] = { '?', '0', '1', 'X' };
791 	struct ftrace_subsystem_dir *dir = filp->private_data;
792 	struct event_subsystem *system = dir->subsystem;
793 	struct ftrace_event_call *call;
794 	struct ftrace_event_file *file;
795 	struct trace_array *tr = dir->tr;
796 	char buf[2];
797 	int set = 0;
798 	int ret;
799 
800 	mutex_lock(&event_mutex);
801 	list_for_each_entry(file, &tr->events, list) {
802 		call = file->event_call;
803 		if (!ftrace_event_name(call) || !call->class || !call->class->reg)
804 			continue;
805 
806 		if (system && strcmp(call->class->system, system->name) != 0)
807 			continue;
808 
809 		/*
810 		 * We need to find out if all the events are set
811 		 * or if all events or cleared, or if we have
812 		 * a mixture.
813 		 */
814 		set |= (1 << !!(file->flags & FTRACE_EVENT_FL_ENABLED));
815 
816 		/*
817 		 * If we have a mixture, no need to look further.
818 		 */
819 		if (set == 3)
820 			break;
821 	}
822 	mutex_unlock(&event_mutex);
823 
824 	buf[0] = set_to_char[set];
825 	buf[1] = '\n';
826 
827 	ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
828 
829 	return ret;
830 }
831 
832 static ssize_t
833 system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
834 		    loff_t *ppos)
835 {
836 	struct ftrace_subsystem_dir *dir = filp->private_data;
837 	struct event_subsystem *system = dir->subsystem;
838 	const char *name = NULL;
839 	unsigned long val;
840 	ssize_t ret;
841 
842 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
843 	if (ret)
844 		return ret;
845 
846 	ret = tracing_update_buffers();
847 	if (ret < 0)
848 		return ret;
849 
850 	if (val != 0 && val != 1)
851 		return -EINVAL;
852 
853 	/*
854 	 * Opening of "enable" adds a ref count to system,
855 	 * so the name is safe to use.
856 	 */
857 	if (system)
858 		name = system->name;
859 
860 	ret = __ftrace_set_clr_event(dir->tr, NULL, name, NULL, val);
861 	if (ret)
862 		goto out;
863 
864 	ret = cnt;
865 
866 out:
867 	*ppos += cnt;
868 
869 	return ret;
870 }
871 
872 enum {
873 	FORMAT_HEADER		= 1,
874 	FORMAT_FIELD_SEPERATOR	= 2,
875 	FORMAT_PRINTFMT		= 3,
876 };
877 
878 static void *f_next(struct seq_file *m, void *v, loff_t *pos)
879 {
880 	struct ftrace_event_call *call = event_file_data(m->private);
881 	struct list_head *common_head = &ftrace_common_fields;
882 	struct list_head *head = trace_get_fields(call);
883 	struct list_head *node = v;
884 
885 	(*pos)++;
886 
887 	switch ((unsigned long)v) {
888 	case FORMAT_HEADER:
889 		node = common_head;
890 		break;
891 
892 	case FORMAT_FIELD_SEPERATOR:
893 		node = head;
894 		break;
895 
896 	case FORMAT_PRINTFMT:
897 		/* all done */
898 		return NULL;
899 	}
900 
901 	node = node->prev;
902 	if (node == common_head)
903 		return (void *)FORMAT_FIELD_SEPERATOR;
904 	else if (node == head)
905 		return (void *)FORMAT_PRINTFMT;
906 	else
907 		return node;
908 }
909 
910 static int f_show(struct seq_file *m, void *v)
911 {
912 	struct ftrace_event_call *call = event_file_data(m->private);
913 	struct ftrace_event_field *field;
914 	const char *array_descriptor;
915 
916 	switch ((unsigned long)v) {
917 	case FORMAT_HEADER:
918 		seq_printf(m, "name: %s\n", ftrace_event_name(call));
919 		seq_printf(m, "ID: %d\n", call->event.type);
920 		seq_printf(m, "format:\n");
921 		return 0;
922 
923 	case FORMAT_FIELD_SEPERATOR:
924 		seq_putc(m, '\n');
925 		return 0;
926 
927 	case FORMAT_PRINTFMT:
928 		seq_printf(m, "\nprint fmt: %s\n",
929 			   call->print_fmt);
930 		return 0;
931 	}
932 
933 	field = list_entry(v, struct ftrace_event_field, link);
934 	/*
935 	 * Smartly shows the array type(except dynamic array).
936 	 * Normal:
937 	 *	field:TYPE VAR
938 	 * If TYPE := TYPE[LEN], it is shown:
939 	 *	field:TYPE VAR[LEN]
940 	 */
941 	array_descriptor = strchr(field->type, '[');
942 
943 	if (!strncmp(field->type, "__data_loc", 10))
944 		array_descriptor = NULL;
945 
946 	if (!array_descriptor)
947 		seq_printf(m, "\tfield:%s %s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
948 			   field->type, field->name, field->offset,
949 			   field->size, !!field->is_signed);
950 	else
951 		seq_printf(m, "\tfield:%.*s %s%s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
952 			   (int)(array_descriptor - field->type),
953 			   field->type, field->name,
954 			   array_descriptor, field->offset,
955 			   field->size, !!field->is_signed);
956 
957 	return 0;
958 }
959 
960 static void *f_start(struct seq_file *m, loff_t *pos)
961 {
962 	void *p = (void *)FORMAT_HEADER;
963 	loff_t l = 0;
964 
965 	/* ->stop() is called even if ->start() fails */
966 	mutex_lock(&event_mutex);
967 	if (!event_file_data(m->private))
968 		return ERR_PTR(-ENODEV);
969 
970 	while (l < *pos && p)
971 		p = f_next(m, p, &l);
972 
973 	return p;
974 }
975 
976 static void f_stop(struct seq_file *m, void *p)
977 {
978 	mutex_unlock(&event_mutex);
979 }
980 
981 static const struct seq_operations trace_format_seq_ops = {
982 	.start		= f_start,
983 	.next		= f_next,
984 	.stop		= f_stop,
985 	.show		= f_show,
986 };
987 
988 static int trace_format_open(struct inode *inode, struct file *file)
989 {
990 	struct seq_file *m;
991 	int ret;
992 
993 	ret = seq_open(file, &trace_format_seq_ops);
994 	if (ret < 0)
995 		return ret;
996 
997 	m = file->private_data;
998 	m->private = file;
999 
1000 	return 0;
1001 }
1002 
1003 static ssize_t
1004 event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
1005 {
1006 	int id = (long)event_file_data(filp);
1007 	char buf[32];
1008 	int len;
1009 
1010 	if (*ppos)
1011 		return 0;
1012 
1013 	if (unlikely(!id))
1014 		return -ENODEV;
1015 
1016 	len = sprintf(buf, "%d\n", id);
1017 
1018 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
1019 }
1020 
1021 static ssize_t
1022 event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
1023 		  loff_t *ppos)
1024 {
1025 	struct ftrace_event_file *file;
1026 	struct trace_seq *s;
1027 	int r = -ENODEV;
1028 
1029 	if (*ppos)
1030 		return 0;
1031 
1032 	s = kmalloc(sizeof(*s), GFP_KERNEL);
1033 
1034 	if (!s)
1035 		return -ENOMEM;
1036 
1037 	trace_seq_init(s);
1038 
1039 	mutex_lock(&event_mutex);
1040 	file = event_file_data(filp);
1041 	if (file)
1042 		print_event_filter(file, s);
1043 	mutex_unlock(&event_mutex);
1044 
1045 	if (file)
1046 		r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
1047 
1048 	kfree(s);
1049 
1050 	return r;
1051 }
1052 
1053 static ssize_t
1054 event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
1055 		   loff_t *ppos)
1056 {
1057 	struct ftrace_event_file *file;
1058 	char *buf;
1059 	int err = -ENODEV;
1060 
1061 	if (cnt >= PAGE_SIZE)
1062 		return -EINVAL;
1063 
1064 	buf = (char *)__get_free_page(GFP_TEMPORARY);
1065 	if (!buf)
1066 		return -ENOMEM;
1067 
1068 	if (copy_from_user(buf, ubuf, cnt)) {
1069 		free_page((unsigned long) buf);
1070 		return -EFAULT;
1071 	}
1072 	buf[cnt] = '\0';
1073 
1074 	mutex_lock(&event_mutex);
1075 	file = event_file_data(filp);
1076 	if (file)
1077 		err = apply_event_filter(file, buf);
1078 	mutex_unlock(&event_mutex);
1079 
1080 	free_page((unsigned long) buf);
1081 	if (err < 0)
1082 		return err;
1083 
1084 	*ppos += cnt;
1085 
1086 	return cnt;
1087 }
1088 
1089 static LIST_HEAD(event_subsystems);
1090 
1091 static int subsystem_open(struct inode *inode, struct file *filp)
1092 {
1093 	struct event_subsystem *system = NULL;
1094 	struct ftrace_subsystem_dir *dir = NULL; /* Initialize for gcc */
1095 	struct trace_array *tr;
1096 	int ret;
1097 
1098 	if (tracing_is_disabled())
1099 		return -ENODEV;
1100 
1101 	/* Make sure the system still exists */
1102 	mutex_lock(&trace_types_lock);
1103 	mutex_lock(&event_mutex);
1104 	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
1105 		list_for_each_entry(dir, &tr->systems, list) {
1106 			if (dir == inode->i_private) {
1107 				/* Don't open systems with no events */
1108 				if (dir->nr_events) {
1109 					__get_system_dir(dir);
1110 					system = dir->subsystem;
1111 				}
1112 				goto exit_loop;
1113 			}
1114 		}
1115 	}
1116  exit_loop:
1117 	mutex_unlock(&event_mutex);
1118 	mutex_unlock(&trace_types_lock);
1119 
1120 	if (!system)
1121 		return -ENODEV;
1122 
1123 	/* Some versions of gcc think dir can be uninitialized here */
1124 	WARN_ON(!dir);
1125 
1126 	/* Still need to increment the ref count of the system */
1127 	if (trace_array_get(tr) < 0) {
1128 		put_system(dir);
1129 		return -ENODEV;
1130 	}
1131 
1132 	ret = tracing_open_generic(inode, filp);
1133 	if (ret < 0) {
1134 		trace_array_put(tr);
1135 		put_system(dir);
1136 	}
1137 
1138 	return ret;
1139 }
1140 
1141 static int system_tr_open(struct inode *inode, struct file *filp)
1142 {
1143 	struct ftrace_subsystem_dir *dir;
1144 	struct trace_array *tr = inode->i_private;
1145 	int ret;
1146 
1147 	if (tracing_is_disabled())
1148 		return -ENODEV;
1149 
1150 	if (trace_array_get(tr) < 0)
1151 		return -ENODEV;
1152 
1153 	/* Make a temporary dir that has no system but points to tr */
1154 	dir = kzalloc(sizeof(*dir), GFP_KERNEL);
1155 	if (!dir) {
1156 		trace_array_put(tr);
1157 		return -ENOMEM;
1158 	}
1159 
1160 	dir->tr = tr;
1161 
1162 	ret = tracing_open_generic(inode, filp);
1163 	if (ret < 0) {
1164 		trace_array_put(tr);
1165 		kfree(dir);
1166 		return ret;
1167 	}
1168 
1169 	filp->private_data = dir;
1170 
1171 	return 0;
1172 }
1173 
1174 static int subsystem_release(struct inode *inode, struct file *file)
1175 {
1176 	struct ftrace_subsystem_dir *dir = file->private_data;
1177 
1178 	trace_array_put(dir->tr);
1179 
1180 	/*
1181 	 * If dir->subsystem is NULL, then this is a temporary
1182 	 * descriptor that was made for a trace_array to enable
1183 	 * all subsystems.
1184 	 */
1185 	if (dir->subsystem)
1186 		put_system(dir);
1187 	else
1188 		kfree(dir);
1189 
1190 	return 0;
1191 }
1192 
1193 static ssize_t
1194 subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
1195 		      loff_t *ppos)
1196 {
1197 	struct ftrace_subsystem_dir *dir = filp->private_data;
1198 	struct event_subsystem *system = dir->subsystem;
1199 	struct trace_seq *s;
1200 	int r;
1201 
1202 	if (*ppos)
1203 		return 0;
1204 
1205 	s = kmalloc(sizeof(*s), GFP_KERNEL);
1206 	if (!s)
1207 		return -ENOMEM;
1208 
1209 	trace_seq_init(s);
1210 
1211 	print_subsystem_event_filter(system, s);
1212 	r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
1213 
1214 	kfree(s);
1215 
1216 	return r;
1217 }
1218 
1219 static ssize_t
1220 subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
1221 		       loff_t *ppos)
1222 {
1223 	struct ftrace_subsystem_dir *dir = filp->private_data;
1224 	char *buf;
1225 	int err;
1226 
1227 	if (cnt >= PAGE_SIZE)
1228 		return -EINVAL;
1229 
1230 	buf = (char *)__get_free_page(GFP_TEMPORARY);
1231 	if (!buf)
1232 		return -ENOMEM;
1233 
1234 	if (copy_from_user(buf, ubuf, cnt)) {
1235 		free_page((unsigned long) buf);
1236 		return -EFAULT;
1237 	}
1238 	buf[cnt] = '\0';
1239 
1240 	err = apply_subsystem_event_filter(dir, buf);
1241 	free_page((unsigned long) buf);
1242 	if (err < 0)
1243 		return err;
1244 
1245 	*ppos += cnt;
1246 
1247 	return cnt;
1248 }
1249 
1250 static ssize_t
1251 show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
1252 {
1253 	int (*func)(struct trace_seq *s) = filp->private_data;
1254 	struct trace_seq *s;
1255 	int r;
1256 
1257 	if (*ppos)
1258 		return 0;
1259 
1260 	s = kmalloc(sizeof(*s), GFP_KERNEL);
1261 	if (!s)
1262 		return -ENOMEM;
1263 
1264 	trace_seq_init(s);
1265 
1266 	func(s);
1267 	r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
1268 
1269 	kfree(s);
1270 
1271 	return r;
1272 }
1273 
1274 static int ftrace_event_avail_open(struct inode *inode, struct file *file);
1275 static int ftrace_event_set_open(struct inode *inode, struct file *file);
1276 static int ftrace_event_release(struct inode *inode, struct file *file);
1277 
1278 static const struct seq_operations show_event_seq_ops = {
1279 	.start = t_start,
1280 	.next = t_next,
1281 	.show = t_show,
1282 	.stop = t_stop,
1283 };
1284 
1285 static const struct seq_operations show_set_event_seq_ops = {
1286 	.start = s_start,
1287 	.next = s_next,
1288 	.show = t_show,
1289 	.stop = t_stop,
1290 };
1291 
1292 static const struct file_operations ftrace_avail_fops = {
1293 	.open = ftrace_event_avail_open,
1294 	.read = seq_read,
1295 	.llseek = seq_lseek,
1296 	.release = seq_release,
1297 };
1298 
1299 static const struct file_operations ftrace_set_event_fops = {
1300 	.open = ftrace_event_set_open,
1301 	.read = seq_read,
1302 	.write = ftrace_event_write,
1303 	.llseek = seq_lseek,
1304 	.release = ftrace_event_release,
1305 };
1306 
1307 static const struct file_operations ftrace_enable_fops = {
1308 	.open = tracing_open_generic,
1309 	.read = event_enable_read,
1310 	.write = event_enable_write,
1311 	.llseek = default_llseek,
1312 };
1313 
1314 static const struct file_operations ftrace_event_format_fops = {
1315 	.open = trace_format_open,
1316 	.read = seq_read,
1317 	.llseek = seq_lseek,
1318 	.release = seq_release,
1319 };
1320 
1321 static const struct file_operations ftrace_event_id_fops = {
1322 	.read = event_id_read,
1323 	.llseek = default_llseek,
1324 };
1325 
1326 static const struct file_operations ftrace_event_filter_fops = {
1327 	.open = tracing_open_generic,
1328 	.read = event_filter_read,
1329 	.write = event_filter_write,
1330 	.llseek = default_llseek,
1331 };
1332 
1333 static const struct file_operations ftrace_subsystem_filter_fops = {
1334 	.open = subsystem_open,
1335 	.read = subsystem_filter_read,
1336 	.write = subsystem_filter_write,
1337 	.llseek = default_llseek,
1338 	.release = subsystem_release,
1339 };
1340 
1341 static const struct file_operations ftrace_system_enable_fops = {
1342 	.open = subsystem_open,
1343 	.read = system_enable_read,
1344 	.write = system_enable_write,
1345 	.llseek = default_llseek,
1346 	.release = subsystem_release,
1347 };
1348 
1349 static const struct file_operations ftrace_tr_enable_fops = {
1350 	.open = system_tr_open,
1351 	.read = system_enable_read,
1352 	.write = system_enable_write,
1353 	.llseek = default_llseek,
1354 	.release = subsystem_release,
1355 };
1356 
1357 static const struct file_operations ftrace_show_header_fops = {
1358 	.open = tracing_open_generic,
1359 	.read = show_header,
1360 	.llseek = default_llseek,
1361 };
1362 
1363 static int
1364 ftrace_event_open(struct inode *inode, struct file *file,
1365 		  const struct seq_operations *seq_ops)
1366 {
1367 	struct seq_file *m;
1368 	int ret;
1369 
1370 	ret = seq_open(file, seq_ops);
1371 	if (ret < 0)
1372 		return ret;
1373 	m = file->private_data;
1374 	/* copy tr over to seq ops */
1375 	m->private = inode->i_private;
1376 
1377 	return ret;
1378 }
1379 
1380 static int ftrace_event_release(struct inode *inode, struct file *file)
1381 {
1382 	struct trace_array *tr = inode->i_private;
1383 
1384 	trace_array_put(tr);
1385 
1386 	return seq_release(inode, file);
1387 }
1388 
1389 static int
1390 ftrace_event_avail_open(struct inode *inode, struct file *file)
1391 {
1392 	const struct seq_operations *seq_ops = &show_event_seq_ops;
1393 
1394 	return ftrace_event_open(inode, file, seq_ops);
1395 }
1396 
1397 static int
1398 ftrace_event_set_open(struct inode *inode, struct file *file)
1399 {
1400 	const struct seq_operations *seq_ops = &show_set_event_seq_ops;
1401 	struct trace_array *tr = inode->i_private;
1402 	int ret;
1403 
1404 	if (trace_array_get(tr) < 0)
1405 		return -ENODEV;
1406 
1407 	if ((file->f_mode & FMODE_WRITE) &&
1408 	    (file->f_flags & O_TRUNC))
1409 		ftrace_clear_events(tr);
1410 
1411 	ret = ftrace_event_open(inode, file, seq_ops);
1412 	if (ret < 0)
1413 		trace_array_put(tr);
1414 	return ret;
1415 }
1416 
1417 static struct event_subsystem *
1418 create_new_subsystem(const char *name)
1419 {
1420 	struct event_subsystem *system;
1421 
1422 	/* need to create new entry */
1423 	system = kmalloc(sizeof(*system), GFP_KERNEL);
1424 	if (!system)
1425 		return NULL;
1426 
1427 	system->ref_count = 1;
1428 
1429 	/* Only allocate if dynamic (kprobes and modules) */
1430 	if (!core_kernel_data((unsigned long)name)) {
1431 		system->ref_count |= SYSTEM_FL_FREE_NAME;
1432 		system->name = kstrdup(name, GFP_KERNEL);
1433 		if (!system->name)
1434 			goto out_free;
1435 	} else
1436 		system->name = name;
1437 
1438 	system->filter = NULL;
1439 
1440 	system->filter = kzalloc(sizeof(struct event_filter), GFP_KERNEL);
1441 	if (!system->filter)
1442 		goto out_free;
1443 
1444 	list_add(&system->list, &event_subsystems);
1445 
1446 	return system;
1447 
1448  out_free:
1449 	if (system->ref_count & SYSTEM_FL_FREE_NAME)
1450 		kfree(system->name);
1451 	kfree(system);
1452 	return NULL;
1453 }
1454 
1455 static struct dentry *
1456 event_subsystem_dir(struct trace_array *tr, const char *name,
1457 		    struct ftrace_event_file *file, struct dentry *parent)
1458 {
1459 	struct ftrace_subsystem_dir *dir;
1460 	struct event_subsystem *system;
1461 	struct dentry *entry;
1462 
1463 	/* First see if we did not already create this dir */
1464 	list_for_each_entry(dir, &tr->systems, list) {
1465 		system = dir->subsystem;
1466 		if (strcmp(system->name, name) == 0) {
1467 			dir->nr_events++;
1468 			file->system = dir;
1469 			return dir->entry;
1470 		}
1471 	}
1472 
1473 	/* Now see if the system itself exists. */
1474 	list_for_each_entry(system, &event_subsystems, list) {
1475 		if (strcmp(system->name, name) == 0)
1476 			break;
1477 	}
1478 	/* Reset system variable when not found */
1479 	if (&system->list == &event_subsystems)
1480 		system = NULL;
1481 
1482 	dir = kmalloc(sizeof(*dir), GFP_KERNEL);
1483 	if (!dir)
1484 		goto out_fail;
1485 
1486 	if (!system) {
1487 		system = create_new_subsystem(name);
1488 		if (!system)
1489 			goto out_free;
1490 	} else
1491 		__get_system(system);
1492 
1493 	dir->entry = debugfs_create_dir(name, parent);
1494 	if (!dir->entry) {
1495 		pr_warn("Failed to create system directory %s\n", name);
1496 		__put_system(system);
1497 		goto out_free;
1498 	}
1499 
1500 	dir->tr = tr;
1501 	dir->ref_count = 1;
1502 	dir->nr_events = 1;
1503 	dir->subsystem = system;
1504 	file->system = dir;
1505 
1506 	entry = debugfs_create_file("filter", 0644, dir->entry, dir,
1507 				    &ftrace_subsystem_filter_fops);
1508 	if (!entry) {
1509 		kfree(system->filter);
1510 		system->filter = NULL;
1511 		pr_warn("Could not create debugfs '%s/filter' entry\n", name);
1512 	}
1513 
1514 	trace_create_file("enable", 0644, dir->entry, dir,
1515 			  &ftrace_system_enable_fops);
1516 
1517 	list_add(&dir->list, &tr->systems);
1518 
1519 	return dir->entry;
1520 
1521  out_free:
1522 	kfree(dir);
1523  out_fail:
1524 	/* Only print this message if failed on memory allocation */
1525 	if (!dir || !system)
1526 		pr_warn("No memory to create event subsystem %s\n", name);
1527 	return NULL;
1528 }
1529 
1530 static int
1531 event_create_dir(struct dentry *parent, struct ftrace_event_file *file)
1532 {
1533 	struct ftrace_event_call *call = file->event_call;
1534 	struct trace_array *tr = file->tr;
1535 	struct list_head *head;
1536 	struct dentry *d_events;
1537 	const char *name;
1538 	int ret;
1539 
1540 	/*
1541 	 * If the trace point header did not define TRACE_SYSTEM
1542 	 * then the system would be called "TRACE_SYSTEM".
1543 	 */
1544 	if (strcmp(call->class->system, TRACE_SYSTEM) != 0) {
1545 		d_events = event_subsystem_dir(tr, call->class->system, file, parent);
1546 		if (!d_events)
1547 			return -ENOMEM;
1548 	} else
1549 		d_events = parent;
1550 
1551 	name = ftrace_event_name(call);
1552 	file->dir = debugfs_create_dir(name, d_events);
1553 	if (!file->dir) {
1554 		pr_warn("Could not create debugfs '%s' directory\n", name);
1555 		return -1;
1556 	}
1557 
1558 	if (call->class->reg && !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
1559 		trace_create_file("enable", 0644, file->dir, file,
1560 				  &ftrace_enable_fops);
1561 
1562 #ifdef CONFIG_PERF_EVENTS
1563 	if (call->event.type && call->class->reg)
1564 		trace_create_file("id", 0444, file->dir,
1565 				  (void *)(long)call->event.type,
1566 				  &ftrace_event_id_fops);
1567 #endif
1568 
1569 	/*
1570 	 * Other events may have the same class. Only update
1571 	 * the fields if they are not already defined.
1572 	 */
1573 	head = trace_get_fields(call);
1574 	if (list_empty(head)) {
1575 		ret = call->class->define_fields(call);
1576 		if (ret < 0) {
1577 			pr_warn("Could not initialize trace point events/%s\n",
1578 				name);
1579 			return -1;
1580 		}
1581 	}
1582 	trace_create_file("filter", 0644, file->dir, file,
1583 			  &ftrace_event_filter_fops);
1584 
1585 	trace_create_file("trigger", 0644, file->dir, file,
1586 			  &event_trigger_fops);
1587 
1588 	trace_create_file("format", 0444, file->dir, call,
1589 			  &ftrace_event_format_fops);
1590 
1591 	return 0;
1592 }
1593 
1594 static void remove_event_from_tracers(struct ftrace_event_call *call)
1595 {
1596 	struct ftrace_event_file *file;
1597 	struct trace_array *tr;
1598 
1599 	do_for_each_event_file_safe(tr, file) {
1600 		if (file->event_call != call)
1601 			continue;
1602 
1603 		remove_event_file_dir(file);
1604 		/*
1605 		 * The do_for_each_event_file_safe() is
1606 		 * a double loop. After finding the call for this
1607 		 * trace_array, we use break to jump to the next
1608 		 * trace_array.
1609 		 */
1610 		break;
1611 	} while_for_each_event_file();
1612 }
1613 
1614 static void event_remove(struct ftrace_event_call *call)
1615 {
1616 	struct trace_array *tr;
1617 	struct ftrace_event_file *file;
1618 
1619 	do_for_each_event_file(tr, file) {
1620 		if (file->event_call != call)
1621 			continue;
1622 		ftrace_event_enable_disable(file, 0);
1623 		destroy_preds(file);
1624 		/*
1625 		 * The do_for_each_event_file() is
1626 		 * a double loop. After finding the call for this
1627 		 * trace_array, we use break to jump to the next
1628 		 * trace_array.
1629 		 */
1630 		break;
1631 	} while_for_each_event_file();
1632 
1633 	if (call->event.funcs)
1634 		__unregister_ftrace_event(&call->event);
1635 	remove_event_from_tracers(call);
1636 	list_del(&call->list);
1637 }
1638 
1639 static int event_init(struct ftrace_event_call *call)
1640 {
1641 	int ret = 0;
1642 	const char *name;
1643 
1644 	name = ftrace_event_name(call);
1645 	if (WARN_ON(!name))
1646 		return -EINVAL;
1647 
1648 	if (call->class->raw_init) {
1649 		ret = call->class->raw_init(call);
1650 		if (ret < 0 && ret != -ENOSYS)
1651 			pr_warn("Could not initialize trace events/%s\n", name);
1652 	}
1653 
1654 	return ret;
1655 }
1656 
1657 static int
1658 __register_event(struct ftrace_event_call *call, struct module *mod)
1659 {
1660 	int ret;
1661 
1662 	ret = event_init(call);
1663 	if (ret < 0)
1664 		return ret;
1665 
1666 	list_add(&call->list, &ftrace_events);
1667 	call->mod = mod;
1668 
1669 	return 0;
1670 }
1671 
1672 static struct ftrace_event_file *
1673 trace_create_new_event(struct ftrace_event_call *call,
1674 		       struct trace_array *tr)
1675 {
1676 	struct ftrace_event_file *file;
1677 
1678 	file = kmem_cache_alloc(file_cachep, GFP_TRACE);
1679 	if (!file)
1680 		return NULL;
1681 
1682 	file->event_call = call;
1683 	file->tr = tr;
1684 	atomic_set(&file->sm_ref, 0);
1685 	atomic_set(&file->tm_ref, 0);
1686 	INIT_LIST_HEAD(&file->triggers);
1687 	list_add(&file->list, &tr->events);
1688 
1689 	return file;
1690 }
1691 
1692 /* Add an event to a trace directory */
1693 static int
1694 __trace_add_new_event(struct ftrace_event_call *call, struct trace_array *tr)
1695 {
1696 	struct ftrace_event_file *file;
1697 
1698 	file = trace_create_new_event(call, tr);
1699 	if (!file)
1700 		return -ENOMEM;
1701 
1702 	return event_create_dir(tr->event_dir, file);
1703 }
1704 
1705 /*
1706  * Just create a decriptor for early init. A descriptor is required
1707  * for enabling events at boot. We want to enable events before
1708  * the filesystem is initialized.
1709  */
1710 static __init int
1711 __trace_early_add_new_event(struct ftrace_event_call *call,
1712 			    struct trace_array *tr)
1713 {
1714 	struct ftrace_event_file *file;
1715 
1716 	file = trace_create_new_event(call, tr);
1717 	if (!file)
1718 		return -ENOMEM;
1719 
1720 	return 0;
1721 }
1722 
1723 struct ftrace_module_file_ops;
1724 static void __add_event_to_tracers(struct ftrace_event_call *call);
1725 
1726 /* Add an additional event_call dynamically */
1727 int trace_add_event_call(struct ftrace_event_call *call)
1728 {
1729 	int ret;
1730 	mutex_lock(&trace_types_lock);
1731 	mutex_lock(&event_mutex);
1732 
1733 	ret = __register_event(call, NULL);
1734 	if (ret >= 0)
1735 		__add_event_to_tracers(call);
1736 
1737 	mutex_unlock(&event_mutex);
1738 	mutex_unlock(&trace_types_lock);
1739 	return ret;
1740 }
1741 
1742 /*
1743  * Must be called under locking of trace_types_lock, event_mutex and
1744  * trace_event_sem.
1745  */
1746 static void __trace_remove_event_call(struct ftrace_event_call *call)
1747 {
1748 	event_remove(call);
1749 	trace_destroy_fields(call);
1750 	destroy_call_preds(call);
1751 }
1752 
1753 static int probe_remove_event_call(struct ftrace_event_call *call)
1754 {
1755 	struct trace_array *tr;
1756 	struct ftrace_event_file *file;
1757 
1758 #ifdef CONFIG_PERF_EVENTS
1759 	if (call->perf_refcount)
1760 		return -EBUSY;
1761 #endif
1762 	do_for_each_event_file(tr, file) {
1763 		if (file->event_call != call)
1764 			continue;
1765 		/*
1766 		 * We can't rely on ftrace_event_enable_disable(enable => 0)
1767 		 * we are going to do, FTRACE_EVENT_FL_SOFT_MODE can suppress
1768 		 * TRACE_REG_UNREGISTER.
1769 		 */
1770 		if (file->flags & FTRACE_EVENT_FL_ENABLED)
1771 			return -EBUSY;
1772 		/*
1773 		 * The do_for_each_event_file_safe() is
1774 		 * a double loop. After finding the call for this
1775 		 * trace_array, we use break to jump to the next
1776 		 * trace_array.
1777 		 */
1778 		break;
1779 	} while_for_each_event_file();
1780 
1781 	__trace_remove_event_call(call);
1782 
1783 	return 0;
1784 }
1785 
1786 /* Remove an event_call */
1787 int trace_remove_event_call(struct ftrace_event_call *call)
1788 {
1789 	int ret;
1790 
1791 	mutex_lock(&trace_types_lock);
1792 	mutex_lock(&event_mutex);
1793 	down_write(&trace_event_sem);
1794 	ret = probe_remove_event_call(call);
1795 	up_write(&trace_event_sem);
1796 	mutex_unlock(&event_mutex);
1797 	mutex_unlock(&trace_types_lock);
1798 
1799 	return ret;
1800 }
1801 
1802 #define for_each_event(event, start, end)			\
1803 	for (event = start;					\
1804 	     (unsigned long)event < (unsigned long)end;		\
1805 	     event++)
1806 
1807 #ifdef CONFIG_MODULES
1808 
1809 static void trace_module_add_events(struct module *mod)
1810 {
1811 	struct ftrace_event_call **call, **start, **end;
1812 
1813 	if (!mod->num_trace_events)
1814 		return;
1815 
1816 	/* Don't add infrastructure for mods without tracepoints */
1817 	if (trace_module_has_bad_taint(mod)) {
1818 		pr_err("%s: module has bad taint, not creating trace events\n",
1819 		       mod->name);
1820 		return;
1821 	}
1822 
1823 	start = mod->trace_events;
1824 	end = mod->trace_events + mod->num_trace_events;
1825 
1826 	for_each_event(call, start, end) {
1827 		__register_event(*call, mod);
1828 		__add_event_to_tracers(*call);
1829 	}
1830 }
1831 
1832 static void trace_module_remove_events(struct module *mod)
1833 {
1834 	struct ftrace_event_call *call, *p;
1835 	bool clear_trace = false;
1836 
1837 	down_write(&trace_event_sem);
1838 	list_for_each_entry_safe(call, p, &ftrace_events, list) {
1839 		if (call->mod == mod) {
1840 			if (call->flags & TRACE_EVENT_FL_WAS_ENABLED)
1841 				clear_trace = true;
1842 			__trace_remove_event_call(call);
1843 		}
1844 	}
1845 	up_write(&trace_event_sem);
1846 
1847 	/*
1848 	 * It is safest to reset the ring buffer if the module being unloaded
1849 	 * registered any events that were used. The only worry is if
1850 	 * a new module gets loaded, and takes on the same id as the events
1851 	 * of this module. When printing out the buffer, traced events left
1852 	 * over from this module may be passed to the new module events and
1853 	 * unexpected results may occur.
1854 	 */
1855 	if (clear_trace)
1856 		tracing_reset_all_online_cpus();
1857 }
1858 
1859 static int trace_module_notify(struct notifier_block *self,
1860 			       unsigned long val, void *data)
1861 {
1862 	struct module *mod = data;
1863 
1864 	mutex_lock(&trace_types_lock);
1865 	mutex_lock(&event_mutex);
1866 	switch (val) {
1867 	case MODULE_STATE_COMING:
1868 		trace_module_add_events(mod);
1869 		break;
1870 	case MODULE_STATE_GOING:
1871 		trace_module_remove_events(mod);
1872 		break;
1873 	}
1874 	mutex_unlock(&event_mutex);
1875 	mutex_unlock(&trace_types_lock);
1876 
1877 	return 0;
1878 }
1879 
1880 static struct notifier_block trace_module_nb = {
1881 	.notifier_call = trace_module_notify,
1882 	.priority = 0,
1883 };
1884 #endif /* CONFIG_MODULES */
1885 
1886 /* Create a new event directory structure for a trace directory. */
1887 static void
1888 __trace_add_event_dirs(struct trace_array *tr)
1889 {
1890 	struct ftrace_event_call *call;
1891 	int ret;
1892 
1893 	list_for_each_entry(call, &ftrace_events, list) {
1894 		ret = __trace_add_new_event(call, tr);
1895 		if (ret < 0)
1896 			pr_warn("Could not create directory for event %s\n",
1897 				ftrace_event_name(call));
1898 	}
1899 }
1900 
1901 struct ftrace_event_file *
1902 find_event_file(struct trace_array *tr, const char *system,  const char *event)
1903 {
1904 	struct ftrace_event_file *file;
1905 	struct ftrace_event_call *call;
1906 	const char *name;
1907 
1908 	list_for_each_entry(file, &tr->events, list) {
1909 
1910 		call = file->event_call;
1911 		name = ftrace_event_name(call);
1912 
1913 		if (!name || !call->class || !call->class->reg)
1914 			continue;
1915 
1916 		if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)
1917 			continue;
1918 
1919 		if (strcmp(event, name) == 0 &&
1920 		    strcmp(system, call->class->system) == 0)
1921 			return file;
1922 	}
1923 	return NULL;
1924 }
1925 
1926 #ifdef CONFIG_DYNAMIC_FTRACE
1927 
1928 /* Avoid typos */
1929 #define ENABLE_EVENT_STR	"enable_event"
1930 #define DISABLE_EVENT_STR	"disable_event"
1931 
1932 struct event_probe_data {
1933 	struct ftrace_event_file	*file;
1934 	unsigned long			count;
1935 	int				ref;
1936 	bool				enable;
1937 };
1938 
1939 static void
1940 event_enable_probe(unsigned long ip, unsigned long parent_ip, void **_data)
1941 {
1942 	struct event_probe_data **pdata = (struct event_probe_data **)_data;
1943 	struct event_probe_data *data = *pdata;
1944 
1945 	if (!data)
1946 		return;
1947 
1948 	if (data->enable)
1949 		clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &data->file->flags);
1950 	else
1951 		set_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &data->file->flags);
1952 }
1953 
1954 static void
1955 event_enable_count_probe(unsigned long ip, unsigned long parent_ip, void **_data)
1956 {
1957 	struct event_probe_data **pdata = (struct event_probe_data **)_data;
1958 	struct event_probe_data *data = *pdata;
1959 
1960 	if (!data)
1961 		return;
1962 
1963 	if (!data->count)
1964 		return;
1965 
1966 	/* Skip if the event is in a state we want to switch to */
1967 	if (data->enable == !(data->file->flags & FTRACE_EVENT_FL_SOFT_DISABLED))
1968 		return;
1969 
1970 	if (data->count != -1)
1971 		(data->count)--;
1972 
1973 	event_enable_probe(ip, parent_ip, _data);
1974 }
1975 
1976 static int
1977 event_enable_print(struct seq_file *m, unsigned long ip,
1978 		      struct ftrace_probe_ops *ops, void *_data)
1979 {
1980 	struct event_probe_data *data = _data;
1981 
1982 	seq_printf(m, "%ps:", (void *)ip);
1983 
1984 	seq_printf(m, "%s:%s:%s",
1985 		   data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR,
1986 		   data->file->event_call->class->system,
1987 		   ftrace_event_name(data->file->event_call));
1988 
1989 	if (data->count == -1)
1990 		seq_printf(m, ":unlimited\n");
1991 	else
1992 		seq_printf(m, ":count=%ld\n", data->count);
1993 
1994 	return 0;
1995 }
1996 
1997 static int
1998 event_enable_init(struct ftrace_probe_ops *ops, unsigned long ip,
1999 		  void **_data)
2000 {
2001 	struct event_probe_data **pdata = (struct event_probe_data **)_data;
2002 	struct event_probe_data *data = *pdata;
2003 
2004 	data->ref++;
2005 	return 0;
2006 }
2007 
2008 static void
2009 event_enable_free(struct ftrace_probe_ops *ops, unsigned long ip,
2010 		  void **_data)
2011 {
2012 	struct event_probe_data **pdata = (struct event_probe_data **)_data;
2013 	struct event_probe_data *data = *pdata;
2014 
2015 	if (WARN_ON_ONCE(data->ref <= 0))
2016 		return;
2017 
2018 	data->ref--;
2019 	if (!data->ref) {
2020 		/* Remove the SOFT_MODE flag */
2021 		__ftrace_event_enable_disable(data->file, 0, 1);
2022 		module_put(data->file->event_call->mod);
2023 		kfree(data);
2024 	}
2025 	*pdata = NULL;
2026 }
2027 
2028 static struct ftrace_probe_ops event_enable_probe_ops = {
2029 	.func			= event_enable_probe,
2030 	.print			= event_enable_print,
2031 	.init			= event_enable_init,
2032 	.free			= event_enable_free,
2033 };
2034 
2035 static struct ftrace_probe_ops event_enable_count_probe_ops = {
2036 	.func			= event_enable_count_probe,
2037 	.print			= event_enable_print,
2038 	.init			= event_enable_init,
2039 	.free			= event_enable_free,
2040 };
2041 
2042 static struct ftrace_probe_ops event_disable_probe_ops = {
2043 	.func			= event_enable_probe,
2044 	.print			= event_enable_print,
2045 	.init			= event_enable_init,
2046 	.free			= event_enable_free,
2047 };
2048 
2049 static struct ftrace_probe_ops event_disable_count_probe_ops = {
2050 	.func			= event_enable_count_probe,
2051 	.print			= event_enable_print,
2052 	.init			= event_enable_init,
2053 	.free			= event_enable_free,
2054 };
2055 
2056 static int
2057 event_enable_func(struct ftrace_hash *hash,
2058 		  char *glob, char *cmd, char *param, int enabled)
2059 {
2060 	struct trace_array *tr = top_trace_array();
2061 	struct ftrace_event_file *file;
2062 	struct ftrace_probe_ops *ops;
2063 	struct event_probe_data *data;
2064 	const char *system;
2065 	const char *event;
2066 	char *number;
2067 	bool enable;
2068 	int ret;
2069 
2070 	if (!tr)
2071 		return -ENODEV;
2072 
2073 	/* hash funcs only work with set_ftrace_filter */
2074 	if (!enabled || !param)
2075 		return -EINVAL;
2076 
2077 	system = strsep(&param, ":");
2078 	if (!param)
2079 		return -EINVAL;
2080 
2081 	event = strsep(&param, ":");
2082 
2083 	mutex_lock(&event_mutex);
2084 
2085 	ret = -EINVAL;
2086 	file = find_event_file(tr, system, event);
2087 	if (!file)
2088 		goto out;
2089 
2090 	enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
2091 
2092 	if (enable)
2093 		ops = param ? &event_enable_count_probe_ops : &event_enable_probe_ops;
2094 	else
2095 		ops = param ? &event_disable_count_probe_ops : &event_disable_probe_ops;
2096 
2097 	if (glob[0] == '!') {
2098 		unregister_ftrace_function_probe_func(glob+1, ops);
2099 		ret = 0;
2100 		goto out;
2101 	}
2102 
2103 	ret = -ENOMEM;
2104 	data = kzalloc(sizeof(*data), GFP_KERNEL);
2105 	if (!data)
2106 		goto out;
2107 
2108 	data->enable = enable;
2109 	data->count = -1;
2110 	data->file = file;
2111 
2112 	if (!param)
2113 		goto out_reg;
2114 
2115 	number = strsep(&param, ":");
2116 
2117 	ret = -EINVAL;
2118 	if (!strlen(number))
2119 		goto out_free;
2120 
2121 	/*
2122 	 * We use the callback data field (which is a pointer)
2123 	 * as our counter.
2124 	 */
2125 	ret = kstrtoul(number, 0, &data->count);
2126 	if (ret)
2127 		goto out_free;
2128 
2129  out_reg:
2130 	/* Don't let event modules unload while probe registered */
2131 	ret = try_module_get(file->event_call->mod);
2132 	if (!ret) {
2133 		ret = -EBUSY;
2134 		goto out_free;
2135 	}
2136 
2137 	ret = __ftrace_event_enable_disable(file, 1, 1);
2138 	if (ret < 0)
2139 		goto out_put;
2140 	ret = register_ftrace_function_probe(glob, ops, data);
2141 	/*
2142 	 * The above returns on success the # of functions enabled,
2143 	 * but if it didn't find any functions it returns zero.
2144 	 * Consider no functions a failure too.
2145 	 */
2146 	if (!ret) {
2147 		ret = -ENOENT;
2148 		goto out_disable;
2149 	} else if (ret < 0)
2150 		goto out_disable;
2151 	/* Just return zero, not the number of enabled functions */
2152 	ret = 0;
2153  out:
2154 	mutex_unlock(&event_mutex);
2155 	return ret;
2156 
2157  out_disable:
2158 	__ftrace_event_enable_disable(file, 0, 1);
2159  out_put:
2160 	module_put(file->event_call->mod);
2161  out_free:
2162 	kfree(data);
2163 	goto out;
2164 }
2165 
2166 static struct ftrace_func_command event_enable_cmd = {
2167 	.name			= ENABLE_EVENT_STR,
2168 	.func			= event_enable_func,
2169 };
2170 
2171 static struct ftrace_func_command event_disable_cmd = {
2172 	.name			= DISABLE_EVENT_STR,
2173 	.func			= event_enable_func,
2174 };
2175 
2176 static __init int register_event_cmds(void)
2177 {
2178 	int ret;
2179 
2180 	ret = register_ftrace_command(&event_enable_cmd);
2181 	if (WARN_ON(ret < 0))
2182 		return ret;
2183 	ret = register_ftrace_command(&event_disable_cmd);
2184 	if (WARN_ON(ret < 0))
2185 		unregister_ftrace_command(&event_enable_cmd);
2186 	return ret;
2187 }
2188 #else
2189 static inline int register_event_cmds(void) { return 0; }
2190 #endif /* CONFIG_DYNAMIC_FTRACE */
2191 
2192 /*
2193  * The top level array has already had its ftrace_event_file
2194  * descriptors created in order to allow for early events to
2195  * be recorded. This function is called after the debugfs has been
2196  * initialized, and we now have to create the files associated
2197  * to the events.
2198  */
2199 static __init void
2200 __trace_early_add_event_dirs(struct trace_array *tr)
2201 {
2202 	struct ftrace_event_file *file;
2203 	int ret;
2204 
2205 
2206 	list_for_each_entry(file, &tr->events, list) {
2207 		ret = event_create_dir(tr->event_dir, file);
2208 		if (ret < 0)
2209 			pr_warn("Could not create directory for event %s\n",
2210 				ftrace_event_name(file->event_call));
2211 	}
2212 }
2213 
2214 /*
2215  * For early boot up, the top trace array requires to have
2216  * a list of events that can be enabled. This must be done before
2217  * the filesystem is set up in order to allow events to be traced
2218  * early.
2219  */
2220 static __init void
2221 __trace_early_add_events(struct trace_array *tr)
2222 {
2223 	struct ftrace_event_call *call;
2224 	int ret;
2225 
2226 	list_for_each_entry(call, &ftrace_events, list) {
2227 		/* Early boot up should not have any modules loaded */
2228 		if (WARN_ON_ONCE(call->mod))
2229 			continue;
2230 
2231 		ret = __trace_early_add_new_event(call, tr);
2232 		if (ret < 0)
2233 			pr_warn("Could not create early event %s\n",
2234 				ftrace_event_name(call));
2235 	}
2236 }
2237 
2238 /* Remove the event directory structure for a trace directory. */
2239 static void
2240 __trace_remove_event_dirs(struct trace_array *tr)
2241 {
2242 	struct ftrace_event_file *file, *next;
2243 
2244 	list_for_each_entry_safe(file, next, &tr->events, list)
2245 		remove_event_file_dir(file);
2246 }
2247 
2248 static void __add_event_to_tracers(struct ftrace_event_call *call)
2249 {
2250 	struct trace_array *tr;
2251 
2252 	list_for_each_entry(tr, &ftrace_trace_arrays, list)
2253 		__trace_add_new_event(call, tr);
2254 }
2255 
2256 extern struct ftrace_event_call *__start_ftrace_events[];
2257 extern struct ftrace_event_call *__stop_ftrace_events[];
2258 
2259 static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata;
2260 
2261 static __init int setup_trace_event(char *str)
2262 {
2263 	strlcpy(bootup_event_buf, str, COMMAND_LINE_SIZE);
2264 	ring_buffer_expanded = true;
2265 	tracing_selftest_disabled = true;
2266 
2267 	return 1;
2268 }
2269 __setup("trace_event=", setup_trace_event);
2270 
2271 /* Expects to have event_mutex held when called */
2272 static int
2273 create_event_toplevel_files(struct dentry *parent, struct trace_array *tr)
2274 {
2275 	struct dentry *d_events;
2276 	struct dentry *entry;
2277 
2278 	entry = debugfs_create_file("set_event", 0644, parent,
2279 				    tr, &ftrace_set_event_fops);
2280 	if (!entry) {
2281 		pr_warn("Could not create debugfs 'set_event' entry\n");
2282 		return -ENOMEM;
2283 	}
2284 
2285 	d_events = debugfs_create_dir("events", parent);
2286 	if (!d_events) {
2287 		pr_warn("Could not create debugfs 'events' directory\n");
2288 		return -ENOMEM;
2289 	}
2290 
2291 	/* ring buffer internal formats */
2292 	trace_create_file("header_page", 0444, d_events,
2293 			  ring_buffer_print_page_header,
2294 			  &ftrace_show_header_fops);
2295 
2296 	trace_create_file("header_event", 0444, d_events,
2297 			  ring_buffer_print_entry_header,
2298 			  &ftrace_show_header_fops);
2299 
2300 	trace_create_file("enable", 0644, d_events,
2301 			  tr, &ftrace_tr_enable_fops);
2302 
2303 	tr->event_dir = d_events;
2304 
2305 	return 0;
2306 }
2307 
2308 /**
2309  * event_trace_add_tracer - add a instance of a trace_array to events
2310  * @parent: The parent dentry to place the files/directories for events in
2311  * @tr: The trace array associated with these events
2312  *
2313  * When a new instance is created, it needs to set up its events
2314  * directory, as well as other files associated with events. It also
2315  * creates the event hierachry in the @parent/events directory.
2316  *
2317  * Returns 0 on success.
2318  */
2319 int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr)
2320 {
2321 	int ret;
2322 
2323 	mutex_lock(&event_mutex);
2324 
2325 	ret = create_event_toplevel_files(parent, tr);
2326 	if (ret)
2327 		goto out_unlock;
2328 
2329 	down_write(&trace_event_sem);
2330 	__trace_add_event_dirs(tr);
2331 	up_write(&trace_event_sem);
2332 
2333  out_unlock:
2334 	mutex_unlock(&event_mutex);
2335 
2336 	return ret;
2337 }
2338 
2339 /*
2340  * The top trace array already had its file descriptors created.
2341  * Now the files themselves need to be created.
2342  */
2343 static __init int
2344 early_event_add_tracer(struct dentry *parent, struct trace_array *tr)
2345 {
2346 	int ret;
2347 
2348 	mutex_lock(&event_mutex);
2349 
2350 	ret = create_event_toplevel_files(parent, tr);
2351 	if (ret)
2352 		goto out_unlock;
2353 
2354 	down_write(&trace_event_sem);
2355 	__trace_early_add_event_dirs(tr);
2356 	up_write(&trace_event_sem);
2357 
2358  out_unlock:
2359 	mutex_unlock(&event_mutex);
2360 
2361 	return ret;
2362 }
2363 
2364 int event_trace_del_tracer(struct trace_array *tr)
2365 {
2366 	mutex_lock(&event_mutex);
2367 
2368 	/* Disable any event triggers and associated soft-disabled events */
2369 	clear_event_triggers(tr);
2370 
2371 	/* Disable any running events */
2372 	__ftrace_set_clr_event_nolock(tr, NULL, NULL, NULL, 0);
2373 
2374 	/* Access to events are within rcu_read_lock_sched() */
2375 	synchronize_sched();
2376 
2377 	down_write(&trace_event_sem);
2378 	__trace_remove_event_dirs(tr);
2379 	debugfs_remove_recursive(tr->event_dir);
2380 	up_write(&trace_event_sem);
2381 
2382 	tr->event_dir = NULL;
2383 
2384 	mutex_unlock(&event_mutex);
2385 
2386 	return 0;
2387 }
2388 
2389 static __init int event_trace_memsetup(void)
2390 {
2391 	field_cachep = KMEM_CACHE(ftrace_event_field, SLAB_PANIC);
2392 	file_cachep = KMEM_CACHE(ftrace_event_file, SLAB_PANIC);
2393 	return 0;
2394 }
2395 
2396 static __init int event_trace_enable(void)
2397 {
2398 	struct trace_array *tr = top_trace_array();
2399 	struct ftrace_event_call **iter, *call;
2400 	char *buf = bootup_event_buf;
2401 	char *token;
2402 	int ret;
2403 
2404 	if (!tr)
2405 		return -ENODEV;
2406 
2407 	for_each_event(iter, __start_ftrace_events, __stop_ftrace_events) {
2408 
2409 		call = *iter;
2410 		ret = event_init(call);
2411 		if (!ret)
2412 			list_add(&call->list, &ftrace_events);
2413 	}
2414 
2415 	/*
2416 	 * We need the top trace array to have a working set of trace
2417 	 * points at early init, before the debug files and directories
2418 	 * are created. Create the file entries now, and attach them
2419 	 * to the actual file dentries later.
2420 	 */
2421 	__trace_early_add_events(tr);
2422 
2423 	while (true) {
2424 		token = strsep(&buf, ",");
2425 
2426 		if (!token)
2427 			break;
2428 		if (!*token)
2429 			continue;
2430 
2431 		ret = ftrace_set_clr_event(tr, token, 1);
2432 		if (ret)
2433 			pr_warn("Failed to enable trace event: %s\n", token);
2434 	}
2435 
2436 	trace_printk_start_comm();
2437 
2438 	register_event_cmds();
2439 
2440 	register_trigger_cmds();
2441 
2442 	return 0;
2443 }
2444 
2445 static __init int event_trace_init(void)
2446 {
2447 	struct trace_array *tr;
2448 	struct dentry *d_tracer;
2449 	struct dentry *entry;
2450 	int ret;
2451 
2452 	tr = top_trace_array();
2453 	if (!tr)
2454 		return -ENODEV;
2455 
2456 	d_tracer = tracing_init_dentry();
2457 	if (!d_tracer)
2458 		return 0;
2459 
2460 	entry = debugfs_create_file("available_events", 0444, d_tracer,
2461 				    tr, &ftrace_avail_fops);
2462 	if (!entry)
2463 		pr_warn("Could not create debugfs 'available_events' entry\n");
2464 
2465 	if (trace_define_common_fields())
2466 		pr_warn("tracing: Failed to allocate common fields");
2467 
2468 	ret = early_event_add_tracer(d_tracer, tr);
2469 	if (ret)
2470 		return ret;
2471 
2472 #ifdef CONFIG_MODULES
2473 	ret = register_module_notifier(&trace_module_nb);
2474 	if (ret)
2475 		pr_warn("Failed to register trace events module notifier\n");
2476 #endif
2477 	return 0;
2478 }
2479 early_initcall(event_trace_memsetup);
2480 core_initcall(event_trace_enable);
2481 fs_initcall(event_trace_init);
2482 
2483 #ifdef CONFIG_FTRACE_STARTUP_TEST
2484 
2485 static DEFINE_SPINLOCK(test_spinlock);
2486 static DEFINE_SPINLOCK(test_spinlock_irq);
2487 static DEFINE_MUTEX(test_mutex);
2488 
2489 static __init void test_work(struct work_struct *dummy)
2490 {
2491 	spin_lock(&test_spinlock);
2492 	spin_lock_irq(&test_spinlock_irq);
2493 	udelay(1);
2494 	spin_unlock_irq(&test_spinlock_irq);
2495 	spin_unlock(&test_spinlock);
2496 
2497 	mutex_lock(&test_mutex);
2498 	msleep(1);
2499 	mutex_unlock(&test_mutex);
2500 }
2501 
2502 static __init int event_test_thread(void *unused)
2503 {
2504 	void *test_malloc;
2505 
2506 	test_malloc = kmalloc(1234, GFP_KERNEL);
2507 	if (!test_malloc)
2508 		pr_info("failed to kmalloc\n");
2509 
2510 	schedule_on_each_cpu(test_work);
2511 
2512 	kfree(test_malloc);
2513 
2514 	set_current_state(TASK_INTERRUPTIBLE);
2515 	while (!kthread_should_stop())
2516 		schedule();
2517 
2518 	return 0;
2519 }
2520 
2521 /*
2522  * Do various things that may trigger events.
2523  */
2524 static __init void event_test_stuff(void)
2525 {
2526 	struct task_struct *test_thread;
2527 
2528 	test_thread = kthread_run(event_test_thread, NULL, "test-events");
2529 	msleep(1);
2530 	kthread_stop(test_thread);
2531 }
2532 
2533 /*
2534  * For every trace event defined, we will test each trace point separately,
2535  * and then by groups, and finally all trace points.
2536  */
2537 static __init void event_trace_self_tests(void)
2538 {
2539 	struct ftrace_subsystem_dir *dir;
2540 	struct ftrace_event_file *file;
2541 	struct ftrace_event_call *call;
2542 	struct event_subsystem *system;
2543 	struct trace_array *tr;
2544 	int ret;
2545 
2546 	tr = top_trace_array();
2547 	if (!tr)
2548 		return;
2549 
2550 	pr_info("Running tests on trace events:\n");
2551 
2552 	list_for_each_entry(file, &tr->events, list) {
2553 
2554 		call = file->event_call;
2555 
2556 		/* Only test those that have a probe */
2557 		if (!call->class || !call->class->probe)
2558 			continue;
2559 
2560 /*
2561  * Testing syscall events here is pretty useless, but
2562  * we still do it if configured. But this is time consuming.
2563  * What we really need is a user thread to perform the
2564  * syscalls as we test.
2565  */
2566 #ifndef CONFIG_EVENT_TRACE_TEST_SYSCALLS
2567 		if (call->class->system &&
2568 		    strcmp(call->class->system, "syscalls") == 0)
2569 			continue;
2570 #endif
2571 
2572 		pr_info("Testing event %s: ", ftrace_event_name(call));
2573 
2574 		/*
2575 		 * If an event is already enabled, someone is using
2576 		 * it and the self test should not be on.
2577 		 */
2578 		if (file->flags & FTRACE_EVENT_FL_ENABLED) {
2579 			pr_warn("Enabled event during self test!\n");
2580 			WARN_ON_ONCE(1);
2581 			continue;
2582 		}
2583 
2584 		ftrace_event_enable_disable(file, 1);
2585 		event_test_stuff();
2586 		ftrace_event_enable_disable(file, 0);
2587 
2588 		pr_cont("OK\n");
2589 	}
2590 
2591 	/* Now test at the sub system level */
2592 
2593 	pr_info("Running tests on trace event systems:\n");
2594 
2595 	list_for_each_entry(dir, &tr->systems, list) {
2596 
2597 		system = dir->subsystem;
2598 
2599 		/* the ftrace system is special, skip it */
2600 		if (strcmp(system->name, "ftrace") == 0)
2601 			continue;
2602 
2603 		pr_info("Testing event system %s: ", system->name);
2604 
2605 		ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 1);
2606 		if (WARN_ON_ONCE(ret)) {
2607 			pr_warn("error enabling system %s\n",
2608 				system->name);
2609 			continue;
2610 		}
2611 
2612 		event_test_stuff();
2613 
2614 		ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 0);
2615 		if (WARN_ON_ONCE(ret)) {
2616 			pr_warn("error disabling system %s\n",
2617 				system->name);
2618 			continue;
2619 		}
2620 
2621 		pr_cont("OK\n");
2622 	}
2623 
2624 	/* Test with all events enabled */
2625 
2626 	pr_info("Running tests on all trace events:\n");
2627 	pr_info("Testing all events: ");
2628 
2629 	ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 1);
2630 	if (WARN_ON_ONCE(ret)) {
2631 		pr_warn("error enabling all events\n");
2632 		return;
2633 	}
2634 
2635 	event_test_stuff();
2636 
2637 	/* reset sysname */
2638 	ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 0);
2639 	if (WARN_ON_ONCE(ret)) {
2640 		pr_warn("error disabling all events\n");
2641 		return;
2642 	}
2643 
2644 	pr_cont("OK\n");
2645 }
2646 
2647 #ifdef CONFIG_FUNCTION_TRACER
2648 
2649 static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable);
2650 
2651 static void
2652 function_test_events_call(unsigned long ip, unsigned long parent_ip,
2653 			  struct ftrace_ops *op, struct pt_regs *pt_regs)
2654 {
2655 	struct ring_buffer_event *event;
2656 	struct ring_buffer *buffer;
2657 	struct ftrace_entry *entry;
2658 	unsigned long flags;
2659 	long disabled;
2660 	int cpu;
2661 	int pc;
2662 
2663 	pc = preempt_count();
2664 	preempt_disable_notrace();
2665 	cpu = raw_smp_processor_id();
2666 	disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu));
2667 
2668 	if (disabled != 1)
2669 		goto out;
2670 
2671 	local_save_flags(flags);
2672 
2673 	event = trace_current_buffer_lock_reserve(&buffer,
2674 						  TRACE_FN, sizeof(*entry),
2675 						  flags, pc);
2676 	if (!event)
2677 		goto out;
2678 	entry	= ring_buffer_event_data(event);
2679 	entry->ip			= ip;
2680 	entry->parent_ip		= parent_ip;
2681 
2682 	trace_buffer_unlock_commit(buffer, event, flags, pc);
2683 
2684  out:
2685 	atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
2686 	preempt_enable_notrace();
2687 }
2688 
2689 static struct ftrace_ops trace_ops __initdata  =
2690 {
2691 	.func = function_test_events_call,
2692 	.flags = FTRACE_OPS_FL_RECURSION_SAFE,
2693 };
2694 
2695 static __init void event_trace_self_test_with_function(void)
2696 {
2697 	int ret;
2698 	ret = register_ftrace_function(&trace_ops);
2699 	if (WARN_ON(ret < 0)) {
2700 		pr_info("Failed to enable function tracer for event tests\n");
2701 		return;
2702 	}
2703 	pr_info("Running tests again, along with the function tracer\n");
2704 	event_trace_self_tests();
2705 	unregister_ftrace_function(&trace_ops);
2706 }
2707 #else
2708 static __init void event_trace_self_test_with_function(void)
2709 {
2710 }
2711 #endif
2712 
2713 static __init int event_trace_self_tests_init(void)
2714 {
2715 	if (!tracing_selftest_disabled) {
2716 		event_trace_self_tests();
2717 		event_trace_self_test_with_function();
2718 	}
2719 
2720 	return 0;
2721 }
2722 
2723 late_initcall(event_trace_self_tests_init);
2724 
2725 #endif
2726