xref: /linux/kernel/trace/trace_events.c (revision 2c97b5ae83dca56718774e7b4bf9640f05d11867)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * event tracer
4  *
5  * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
6  *
7  *  - Added format output of fields of the trace point.
8  *    This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
9  *
10  */
11 
12 #define pr_fmt(fmt) fmt
13 
14 #include <linux/workqueue.h>
15 #include <linux/security.h>
16 #include <linux/spinlock.h>
17 #include <linux/kthread.h>
18 #include <linux/tracefs.h>
19 #include <linux/uaccess.h>
20 #include <linux/module.h>
21 #include <linux/ctype.h>
22 #include <linux/sort.h>
23 #include <linux/slab.h>
24 #include <linux/delay.h>
25 
26 #include <trace/events/sched.h>
27 
28 #include <asm/setup.h>
29 
30 #include "trace_output.h"
31 
32 #undef TRACE_SYSTEM
33 #define TRACE_SYSTEM "TRACE_SYSTEM"
34 
35 DEFINE_MUTEX(event_mutex);
36 
37 LIST_HEAD(ftrace_events);
38 static LIST_HEAD(ftrace_generic_fields);
39 static LIST_HEAD(ftrace_common_fields);
40 
41 #define GFP_TRACE (GFP_KERNEL | __GFP_ZERO)
42 
43 static struct kmem_cache *field_cachep;
44 static struct kmem_cache *file_cachep;
45 
46 static inline int system_refcount(struct event_subsystem *system)
47 {
48 	return system->ref_count;
49 }
50 
51 static int system_refcount_inc(struct event_subsystem *system)
52 {
53 	return system->ref_count++;
54 }
55 
56 static int system_refcount_dec(struct event_subsystem *system)
57 {
58 	return --system->ref_count;
59 }
60 
61 /* Double loops, do not use break, only goto's work */
62 #define do_for_each_event_file(tr, file)			\
63 	list_for_each_entry(tr, &ftrace_trace_arrays, list) {	\
64 		list_for_each_entry(file, &tr->events, list)
65 
66 #define do_for_each_event_file_safe(tr, file)			\
67 	list_for_each_entry(tr, &ftrace_trace_arrays, list) {	\
68 		struct trace_event_file *___n;				\
69 		list_for_each_entry_safe(file, ___n, &tr->events, list)
70 
71 #define while_for_each_event_file()		\
72 	}
73 
74 static struct ftrace_event_field *
75 __find_event_field(struct list_head *head, char *name)
76 {
77 	struct ftrace_event_field *field;
78 
79 	list_for_each_entry(field, head, link) {
80 		if (!strcmp(field->name, name))
81 			return field;
82 	}
83 
84 	return NULL;
85 }
86 
87 struct ftrace_event_field *
88 trace_find_event_field(struct trace_event_call *call, char *name)
89 {
90 	struct ftrace_event_field *field;
91 	struct list_head *head;
92 
93 	head = trace_get_fields(call);
94 	field = __find_event_field(head, name);
95 	if (field)
96 		return field;
97 
98 	field = __find_event_field(&ftrace_generic_fields, name);
99 	if (field)
100 		return field;
101 
102 	return __find_event_field(&ftrace_common_fields, name);
103 }
104 
105 static int __trace_define_field(struct list_head *head, const char *type,
106 				const char *name, int offset, int size,
107 				int is_signed, int filter_type)
108 {
109 	struct ftrace_event_field *field;
110 
111 	field = kmem_cache_alloc(field_cachep, GFP_TRACE);
112 	if (!field)
113 		return -ENOMEM;
114 
115 	field->name = name;
116 	field->type = type;
117 
118 	if (filter_type == FILTER_OTHER)
119 		field->filter_type = filter_assign_type(type);
120 	else
121 		field->filter_type = filter_type;
122 
123 	field->offset = offset;
124 	field->size = size;
125 	field->is_signed = is_signed;
126 
127 	list_add(&field->link, head);
128 
129 	return 0;
130 }
131 
132 int trace_define_field(struct trace_event_call *call, const char *type,
133 		       const char *name, int offset, int size, int is_signed,
134 		       int filter_type)
135 {
136 	struct list_head *head;
137 
138 	if (WARN_ON(!call->class))
139 		return 0;
140 
141 	head = trace_get_fields(call);
142 	return __trace_define_field(head, type, name, offset, size,
143 				    is_signed, filter_type);
144 }
145 EXPORT_SYMBOL_GPL(trace_define_field);
146 
147 #define __generic_field(type, item, filter_type)			\
148 	ret = __trace_define_field(&ftrace_generic_fields, #type,	\
149 				   #item, 0, 0, is_signed_type(type),	\
150 				   filter_type);			\
151 	if (ret)							\
152 		return ret;
153 
154 #define __common_field(type, item)					\
155 	ret = __trace_define_field(&ftrace_common_fields, #type,	\
156 				   "common_" #item,			\
157 				   offsetof(typeof(ent), item),		\
158 				   sizeof(ent.item),			\
159 				   is_signed_type(type), FILTER_OTHER);	\
160 	if (ret)							\
161 		return ret;
162 
163 static int trace_define_generic_fields(void)
164 {
165 	int ret;
166 
167 	__generic_field(int, CPU, FILTER_CPU);
168 	__generic_field(int, cpu, FILTER_CPU);
169 	__generic_field(char *, COMM, FILTER_COMM);
170 	__generic_field(char *, comm, FILTER_COMM);
171 
172 	return ret;
173 }
174 
175 static int trace_define_common_fields(void)
176 {
177 	int ret;
178 	struct trace_entry ent;
179 
180 	__common_field(unsigned short, type);
181 	__common_field(unsigned char, flags);
182 	__common_field(unsigned char, preempt_count);
183 	__common_field(int, pid);
184 
185 	return ret;
186 }
187 
188 static void trace_destroy_fields(struct trace_event_call *call)
189 {
190 	struct ftrace_event_field *field, *next;
191 	struct list_head *head;
192 
193 	head = trace_get_fields(call);
194 	list_for_each_entry_safe(field, next, head, link) {
195 		list_del(&field->link);
196 		kmem_cache_free(field_cachep, field);
197 	}
198 }
199 
200 /*
201  * run-time version of trace_event_get_offsets_<call>() that returns the last
202  * accessible offset of trace fields excluding __dynamic_array bytes
203  */
204 int trace_event_get_offsets(struct trace_event_call *call)
205 {
206 	struct ftrace_event_field *tail;
207 	struct list_head *head;
208 
209 	head = trace_get_fields(call);
210 	/*
211 	 * head->next points to the last field with the largest offset,
212 	 * since it was added last by trace_define_field()
213 	 */
214 	tail = list_first_entry(head, struct ftrace_event_field, link);
215 	return tail->offset + tail->size;
216 }
217 
218 int trace_event_raw_init(struct trace_event_call *call)
219 {
220 	int id;
221 
222 	id = register_trace_event(&call->event);
223 	if (!id)
224 		return -ENODEV;
225 
226 	return 0;
227 }
228 EXPORT_SYMBOL_GPL(trace_event_raw_init);
229 
230 bool trace_event_ignore_this_pid(struct trace_event_file *trace_file)
231 {
232 	struct trace_array *tr = trace_file->tr;
233 	struct trace_array_cpu *data;
234 	struct trace_pid_list *pid_list;
235 
236 	pid_list = rcu_dereference_raw(tr->filtered_pids);
237 	if (!pid_list)
238 		return false;
239 
240 	data = this_cpu_ptr(tr->trace_buffer.data);
241 
242 	return data->ignore_pid;
243 }
244 EXPORT_SYMBOL_GPL(trace_event_ignore_this_pid);
245 
246 void *trace_event_buffer_reserve(struct trace_event_buffer *fbuffer,
247 				 struct trace_event_file *trace_file,
248 				 unsigned long len)
249 {
250 	struct trace_event_call *event_call = trace_file->event_call;
251 
252 	if ((trace_file->flags & EVENT_FILE_FL_PID_FILTER) &&
253 	    trace_event_ignore_this_pid(trace_file))
254 		return NULL;
255 
256 	local_save_flags(fbuffer->flags);
257 	fbuffer->pc = preempt_count();
258 	/*
259 	 * If CONFIG_PREEMPTION is enabled, then the tracepoint itself disables
260 	 * preemption (adding one to the preempt_count). Since we are
261 	 * interested in the preempt_count at the time the tracepoint was
262 	 * hit, we need to subtract one to offset the increment.
263 	 */
264 	if (IS_ENABLED(CONFIG_PREEMPTION))
265 		fbuffer->pc--;
266 	fbuffer->trace_file = trace_file;
267 
268 	fbuffer->event =
269 		trace_event_buffer_lock_reserve(&fbuffer->buffer, trace_file,
270 						event_call->event.type, len,
271 						fbuffer->flags, fbuffer->pc);
272 	if (!fbuffer->event)
273 		return NULL;
274 
275 	fbuffer->entry = ring_buffer_event_data(fbuffer->event);
276 	return fbuffer->entry;
277 }
278 EXPORT_SYMBOL_GPL(trace_event_buffer_reserve);
279 
280 int trace_event_reg(struct trace_event_call *call,
281 		    enum trace_reg type, void *data)
282 {
283 	struct trace_event_file *file = data;
284 
285 	WARN_ON(!(call->flags & TRACE_EVENT_FL_TRACEPOINT));
286 	switch (type) {
287 	case TRACE_REG_REGISTER:
288 		return tracepoint_probe_register(call->tp,
289 						 call->class->probe,
290 						 file);
291 	case TRACE_REG_UNREGISTER:
292 		tracepoint_probe_unregister(call->tp,
293 					    call->class->probe,
294 					    file);
295 		return 0;
296 
297 #ifdef CONFIG_PERF_EVENTS
298 	case TRACE_REG_PERF_REGISTER:
299 		return tracepoint_probe_register(call->tp,
300 						 call->class->perf_probe,
301 						 call);
302 	case TRACE_REG_PERF_UNREGISTER:
303 		tracepoint_probe_unregister(call->tp,
304 					    call->class->perf_probe,
305 					    call);
306 		return 0;
307 	case TRACE_REG_PERF_OPEN:
308 	case TRACE_REG_PERF_CLOSE:
309 	case TRACE_REG_PERF_ADD:
310 	case TRACE_REG_PERF_DEL:
311 		return 0;
312 #endif
313 	}
314 	return 0;
315 }
316 EXPORT_SYMBOL_GPL(trace_event_reg);
317 
318 void trace_event_enable_cmd_record(bool enable)
319 {
320 	struct trace_event_file *file;
321 	struct trace_array *tr;
322 
323 	mutex_lock(&event_mutex);
324 	do_for_each_event_file(tr, file) {
325 
326 		if (!(file->flags & EVENT_FILE_FL_ENABLED))
327 			continue;
328 
329 		if (enable) {
330 			tracing_start_cmdline_record();
331 			set_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
332 		} else {
333 			tracing_stop_cmdline_record();
334 			clear_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
335 		}
336 	} while_for_each_event_file();
337 	mutex_unlock(&event_mutex);
338 }
339 
340 void trace_event_enable_tgid_record(bool enable)
341 {
342 	struct trace_event_file *file;
343 	struct trace_array *tr;
344 
345 	mutex_lock(&event_mutex);
346 	do_for_each_event_file(tr, file) {
347 		if (!(file->flags & EVENT_FILE_FL_ENABLED))
348 			continue;
349 
350 		if (enable) {
351 			tracing_start_tgid_record();
352 			set_bit(EVENT_FILE_FL_RECORDED_TGID_BIT, &file->flags);
353 		} else {
354 			tracing_stop_tgid_record();
355 			clear_bit(EVENT_FILE_FL_RECORDED_TGID_BIT,
356 				  &file->flags);
357 		}
358 	} while_for_each_event_file();
359 	mutex_unlock(&event_mutex);
360 }
361 
362 static int __ftrace_event_enable_disable(struct trace_event_file *file,
363 					 int enable, int soft_disable)
364 {
365 	struct trace_event_call *call = file->event_call;
366 	struct trace_array *tr = file->tr;
367 	unsigned long file_flags = file->flags;
368 	int ret = 0;
369 	int disable;
370 
371 	switch (enable) {
372 	case 0:
373 		/*
374 		 * When soft_disable is set and enable is cleared, the sm_ref
375 		 * reference counter is decremented. If it reaches 0, we want
376 		 * to clear the SOFT_DISABLED flag but leave the event in the
377 		 * state that it was. That is, if the event was enabled and
378 		 * SOFT_DISABLED isn't set, then do nothing. But if SOFT_DISABLED
379 		 * is set we do not want the event to be enabled before we
380 		 * clear the bit.
381 		 *
382 		 * When soft_disable is not set but the SOFT_MODE flag is,
383 		 * we do nothing. Do not disable the tracepoint, otherwise
384 		 * "soft enable"s (clearing the SOFT_DISABLED bit) wont work.
385 		 */
386 		if (soft_disable) {
387 			if (atomic_dec_return(&file->sm_ref) > 0)
388 				break;
389 			disable = file->flags & EVENT_FILE_FL_SOFT_DISABLED;
390 			clear_bit(EVENT_FILE_FL_SOFT_MODE_BIT, &file->flags);
391 		} else
392 			disable = !(file->flags & EVENT_FILE_FL_SOFT_MODE);
393 
394 		if (disable && (file->flags & EVENT_FILE_FL_ENABLED)) {
395 			clear_bit(EVENT_FILE_FL_ENABLED_BIT, &file->flags);
396 			if (file->flags & EVENT_FILE_FL_RECORDED_CMD) {
397 				tracing_stop_cmdline_record();
398 				clear_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
399 			}
400 
401 			if (file->flags & EVENT_FILE_FL_RECORDED_TGID) {
402 				tracing_stop_tgid_record();
403 				clear_bit(EVENT_FILE_FL_RECORDED_TGID_BIT, &file->flags);
404 			}
405 
406 			call->class->reg(call, TRACE_REG_UNREGISTER, file);
407 		}
408 		/* If in SOFT_MODE, just set the SOFT_DISABLE_BIT, else clear it */
409 		if (file->flags & EVENT_FILE_FL_SOFT_MODE)
410 			set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags);
411 		else
412 			clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags);
413 		break;
414 	case 1:
415 		/*
416 		 * When soft_disable is set and enable is set, we want to
417 		 * register the tracepoint for the event, but leave the event
418 		 * as is. That means, if the event was already enabled, we do
419 		 * nothing (but set SOFT_MODE). If the event is disabled, we
420 		 * set SOFT_DISABLED before enabling the event tracepoint, so
421 		 * it still seems to be disabled.
422 		 */
423 		if (!soft_disable)
424 			clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags);
425 		else {
426 			if (atomic_inc_return(&file->sm_ref) > 1)
427 				break;
428 			set_bit(EVENT_FILE_FL_SOFT_MODE_BIT, &file->flags);
429 		}
430 
431 		if (!(file->flags & EVENT_FILE_FL_ENABLED)) {
432 			bool cmd = false, tgid = false;
433 
434 			/* Keep the event disabled, when going to SOFT_MODE. */
435 			if (soft_disable)
436 				set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags);
437 
438 			if (tr->trace_flags & TRACE_ITER_RECORD_CMD) {
439 				cmd = true;
440 				tracing_start_cmdline_record();
441 				set_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
442 			}
443 
444 			if (tr->trace_flags & TRACE_ITER_RECORD_TGID) {
445 				tgid = true;
446 				tracing_start_tgid_record();
447 				set_bit(EVENT_FILE_FL_RECORDED_TGID_BIT, &file->flags);
448 			}
449 
450 			ret = call->class->reg(call, TRACE_REG_REGISTER, file);
451 			if (ret) {
452 				if (cmd)
453 					tracing_stop_cmdline_record();
454 				if (tgid)
455 					tracing_stop_tgid_record();
456 				pr_info("event trace: Could not enable event "
457 					"%s\n", trace_event_name(call));
458 				break;
459 			}
460 			set_bit(EVENT_FILE_FL_ENABLED_BIT, &file->flags);
461 
462 			/* WAS_ENABLED gets set but never cleared. */
463 			set_bit(EVENT_FILE_FL_WAS_ENABLED_BIT, &file->flags);
464 		}
465 		break;
466 	}
467 
468 	/* Enable or disable use of trace_buffered_event */
469 	if ((file_flags & EVENT_FILE_FL_SOFT_DISABLED) !=
470 	    (file->flags & EVENT_FILE_FL_SOFT_DISABLED)) {
471 		if (file->flags & EVENT_FILE_FL_SOFT_DISABLED)
472 			trace_buffered_event_enable();
473 		else
474 			trace_buffered_event_disable();
475 	}
476 
477 	return ret;
478 }
479 
480 int trace_event_enable_disable(struct trace_event_file *file,
481 			       int enable, int soft_disable)
482 {
483 	return __ftrace_event_enable_disable(file, enable, soft_disable);
484 }
485 
486 static int ftrace_event_enable_disable(struct trace_event_file *file,
487 				       int enable)
488 {
489 	return __ftrace_event_enable_disable(file, enable, 0);
490 }
491 
492 static void ftrace_clear_events(struct trace_array *tr)
493 {
494 	struct trace_event_file *file;
495 
496 	mutex_lock(&event_mutex);
497 	list_for_each_entry(file, &tr->events, list) {
498 		ftrace_event_enable_disable(file, 0);
499 	}
500 	mutex_unlock(&event_mutex);
501 }
502 
503 static void
504 event_filter_pid_sched_process_exit(void *data, struct task_struct *task)
505 {
506 	struct trace_pid_list *pid_list;
507 	struct trace_array *tr = data;
508 
509 	pid_list = rcu_dereference_raw(tr->filtered_pids);
510 	trace_filter_add_remove_task(pid_list, NULL, task);
511 }
512 
513 static void
514 event_filter_pid_sched_process_fork(void *data,
515 				    struct task_struct *self,
516 				    struct task_struct *task)
517 {
518 	struct trace_pid_list *pid_list;
519 	struct trace_array *tr = data;
520 
521 	pid_list = rcu_dereference_sched(tr->filtered_pids);
522 	trace_filter_add_remove_task(pid_list, self, task);
523 }
524 
525 void trace_event_follow_fork(struct trace_array *tr, bool enable)
526 {
527 	if (enable) {
528 		register_trace_prio_sched_process_fork(event_filter_pid_sched_process_fork,
529 						       tr, INT_MIN);
530 		register_trace_prio_sched_process_exit(event_filter_pid_sched_process_exit,
531 						       tr, INT_MAX);
532 	} else {
533 		unregister_trace_sched_process_fork(event_filter_pid_sched_process_fork,
534 						    tr);
535 		unregister_trace_sched_process_exit(event_filter_pid_sched_process_exit,
536 						    tr);
537 	}
538 }
539 
540 static void
541 event_filter_pid_sched_switch_probe_pre(void *data, bool preempt,
542 		    struct task_struct *prev, struct task_struct *next)
543 {
544 	struct trace_array *tr = data;
545 	struct trace_pid_list *pid_list;
546 
547 	pid_list = rcu_dereference_sched(tr->filtered_pids);
548 
549 	this_cpu_write(tr->trace_buffer.data->ignore_pid,
550 		       trace_ignore_this_task(pid_list, prev) &&
551 		       trace_ignore_this_task(pid_list, next));
552 }
553 
554 static void
555 event_filter_pid_sched_switch_probe_post(void *data, bool preempt,
556 		    struct task_struct *prev, struct task_struct *next)
557 {
558 	struct trace_array *tr = data;
559 	struct trace_pid_list *pid_list;
560 
561 	pid_list = rcu_dereference_sched(tr->filtered_pids);
562 
563 	this_cpu_write(tr->trace_buffer.data->ignore_pid,
564 		       trace_ignore_this_task(pid_list, next));
565 }
566 
567 static void
568 event_filter_pid_sched_wakeup_probe_pre(void *data, struct task_struct *task)
569 {
570 	struct trace_array *tr = data;
571 	struct trace_pid_list *pid_list;
572 
573 	/* Nothing to do if we are already tracing */
574 	if (!this_cpu_read(tr->trace_buffer.data->ignore_pid))
575 		return;
576 
577 	pid_list = rcu_dereference_sched(tr->filtered_pids);
578 
579 	this_cpu_write(tr->trace_buffer.data->ignore_pid,
580 		       trace_ignore_this_task(pid_list, task));
581 }
582 
583 static void
584 event_filter_pid_sched_wakeup_probe_post(void *data, struct task_struct *task)
585 {
586 	struct trace_array *tr = data;
587 	struct trace_pid_list *pid_list;
588 
589 	/* Nothing to do if we are not tracing */
590 	if (this_cpu_read(tr->trace_buffer.data->ignore_pid))
591 		return;
592 
593 	pid_list = rcu_dereference_sched(tr->filtered_pids);
594 
595 	/* Set tracing if current is enabled */
596 	this_cpu_write(tr->trace_buffer.data->ignore_pid,
597 		       trace_ignore_this_task(pid_list, current));
598 }
599 
600 static void __ftrace_clear_event_pids(struct trace_array *tr)
601 {
602 	struct trace_pid_list *pid_list;
603 	struct trace_event_file *file;
604 	int cpu;
605 
606 	pid_list = rcu_dereference_protected(tr->filtered_pids,
607 					     lockdep_is_held(&event_mutex));
608 	if (!pid_list)
609 		return;
610 
611 	unregister_trace_sched_switch(event_filter_pid_sched_switch_probe_pre, tr);
612 	unregister_trace_sched_switch(event_filter_pid_sched_switch_probe_post, tr);
613 
614 	unregister_trace_sched_wakeup(event_filter_pid_sched_wakeup_probe_pre, tr);
615 	unregister_trace_sched_wakeup(event_filter_pid_sched_wakeup_probe_post, tr);
616 
617 	unregister_trace_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_pre, tr);
618 	unregister_trace_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_post, tr);
619 
620 	unregister_trace_sched_waking(event_filter_pid_sched_wakeup_probe_pre, tr);
621 	unregister_trace_sched_waking(event_filter_pid_sched_wakeup_probe_post, tr);
622 
623 	list_for_each_entry(file, &tr->events, list) {
624 		clear_bit(EVENT_FILE_FL_PID_FILTER_BIT, &file->flags);
625 	}
626 
627 	for_each_possible_cpu(cpu)
628 		per_cpu_ptr(tr->trace_buffer.data, cpu)->ignore_pid = false;
629 
630 	rcu_assign_pointer(tr->filtered_pids, NULL);
631 
632 	/* Wait till all users are no longer using pid filtering */
633 	tracepoint_synchronize_unregister();
634 
635 	trace_free_pid_list(pid_list);
636 }
637 
638 static void ftrace_clear_event_pids(struct trace_array *tr)
639 {
640 	mutex_lock(&event_mutex);
641 	__ftrace_clear_event_pids(tr);
642 	mutex_unlock(&event_mutex);
643 }
644 
645 static void __put_system(struct event_subsystem *system)
646 {
647 	struct event_filter *filter = system->filter;
648 
649 	WARN_ON_ONCE(system_refcount(system) == 0);
650 	if (system_refcount_dec(system))
651 		return;
652 
653 	list_del(&system->list);
654 
655 	if (filter) {
656 		kfree(filter->filter_string);
657 		kfree(filter);
658 	}
659 	kfree_const(system->name);
660 	kfree(system);
661 }
662 
663 static void __get_system(struct event_subsystem *system)
664 {
665 	WARN_ON_ONCE(system_refcount(system) == 0);
666 	system_refcount_inc(system);
667 }
668 
669 static void __get_system_dir(struct trace_subsystem_dir *dir)
670 {
671 	WARN_ON_ONCE(dir->ref_count == 0);
672 	dir->ref_count++;
673 	__get_system(dir->subsystem);
674 }
675 
676 static void __put_system_dir(struct trace_subsystem_dir *dir)
677 {
678 	WARN_ON_ONCE(dir->ref_count == 0);
679 	/* If the subsystem is about to be freed, the dir must be too */
680 	WARN_ON_ONCE(system_refcount(dir->subsystem) == 1 && dir->ref_count != 1);
681 
682 	__put_system(dir->subsystem);
683 	if (!--dir->ref_count)
684 		kfree(dir);
685 }
686 
687 static void put_system(struct trace_subsystem_dir *dir)
688 {
689 	mutex_lock(&event_mutex);
690 	__put_system_dir(dir);
691 	mutex_unlock(&event_mutex);
692 }
693 
694 static void remove_subsystem(struct trace_subsystem_dir *dir)
695 {
696 	if (!dir)
697 		return;
698 
699 	if (!--dir->nr_events) {
700 		tracefs_remove_recursive(dir->entry);
701 		list_del(&dir->list);
702 		__put_system_dir(dir);
703 	}
704 }
705 
706 static void remove_event_file_dir(struct trace_event_file *file)
707 {
708 	struct dentry *dir = file->dir;
709 	struct dentry *child;
710 
711 	if (dir) {
712 		spin_lock(&dir->d_lock);	/* probably unneeded */
713 		list_for_each_entry(child, &dir->d_subdirs, d_child) {
714 			if (d_really_is_positive(child))	/* probably unneeded */
715 				d_inode(child)->i_private = NULL;
716 		}
717 		spin_unlock(&dir->d_lock);
718 
719 		tracefs_remove_recursive(dir);
720 	}
721 
722 	list_del(&file->list);
723 	remove_subsystem(file->system);
724 	free_event_filter(file->filter);
725 	kmem_cache_free(file_cachep, file);
726 }
727 
728 /*
729  * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events.
730  */
731 static int
732 __ftrace_set_clr_event_nolock(struct trace_array *tr, const char *match,
733 			      const char *sub, const char *event, int set)
734 {
735 	struct trace_event_file *file;
736 	struct trace_event_call *call;
737 	const char *name;
738 	int ret = -EINVAL;
739 	int eret = 0;
740 
741 	list_for_each_entry(file, &tr->events, list) {
742 
743 		call = file->event_call;
744 		name = trace_event_name(call);
745 
746 		if (!name || !call->class || !call->class->reg)
747 			continue;
748 
749 		if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)
750 			continue;
751 
752 		if (match &&
753 		    strcmp(match, name) != 0 &&
754 		    strcmp(match, call->class->system) != 0)
755 			continue;
756 
757 		if (sub && strcmp(sub, call->class->system) != 0)
758 			continue;
759 
760 		if (event && strcmp(event, name) != 0)
761 			continue;
762 
763 		ret = ftrace_event_enable_disable(file, set);
764 
765 		/*
766 		 * Save the first error and return that. Some events
767 		 * may still have been enabled, but let the user
768 		 * know that something went wrong.
769 		 */
770 		if (ret && !eret)
771 			eret = ret;
772 
773 		ret = eret;
774 	}
775 
776 	return ret;
777 }
778 
779 static int __ftrace_set_clr_event(struct trace_array *tr, const char *match,
780 				  const char *sub, const char *event, int set)
781 {
782 	int ret;
783 
784 	mutex_lock(&event_mutex);
785 	ret = __ftrace_set_clr_event_nolock(tr, match, sub, event, set);
786 	mutex_unlock(&event_mutex);
787 
788 	return ret;
789 }
790 
791 int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set)
792 {
793 	char *event = NULL, *sub = NULL, *match;
794 	int ret;
795 
796 	if (!tr)
797 		return -ENOENT;
798 	/*
799 	 * The buf format can be <subsystem>:<event-name>
800 	 *  *:<event-name> means any event by that name.
801 	 *  :<event-name> is the same.
802 	 *
803 	 *  <subsystem>:* means all events in that subsystem
804 	 *  <subsystem>: means the same.
805 	 *
806 	 *  <name> (no ':') means all events in a subsystem with
807 	 *  the name <name> or any event that matches <name>
808 	 */
809 
810 	match = strsep(&buf, ":");
811 	if (buf) {
812 		sub = match;
813 		event = buf;
814 		match = NULL;
815 
816 		if (!strlen(sub) || strcmp(sub, "*") == 0)
817 			sub = NULL;
818 		if (!strlen(event) || strcmp(event, "*") == 0)
819 			event = NULL;
820 	}
821 
822 	ret = __ftrace_set_clr_event(tr, match, sub, event, set);
823 
824 	/* Put back the colon to allow this to be called again */
825 	if (buf)
826 		*(buf - 1) = ':';
827 
828 	return ret;
829 }
830 
831 /**
832  * trace_set_clr_event - enable or disable an event
833  * @system: system name to match (NULL for any system)
834  * @event: event name to match (NULL for all events, within system)
835  * @set: 1 to enable, 0 to disable
836  *
837  * This is a way for other parts of the kernel to enable or disable
838  * event recording.
839  *
840  * Returns 0 on success, -EINVAL if the parameters do not match any
841  * registered events.
842  */
843 int trace_set_clr_event(const char *system, const char *event, int set)
844 {
845 	struct trace_array *tr = top_trace_array();
846 
847 	if (!tr)
848 		return -ENODEV;
849 
850 	return __ftrace_set_clr_event(tr, NULL, system, event, set);
851 }
852 EXPORT_SYMBOL_GPL(trace_set_clr_event);
853 
854 /**
855  * trace_array_set_clr_event - enable or disable an event for a trace array.
856  * @tr: concerned trace array.
857  * @system: system name to match (NULL for any system)
858  * @event: event name to match (NULL for all events, within system)
859  * @enable: true to enable, false to disable
860  *
861  * This is a way for other parts of the kernel to enable or disable
862  * event recording.
863  *
864  * Returns 0 on success, -EINVAL if the parameters do not match any
865  * registered events.
866  */
867 int trace_array_set_clr_event(struct trace_array *tr, const char *system,
868 		const char *event, bool enable)
869 {
870 	int set;
871 
872 	if (!tr)
873 		return -ENOENT;
874 
875 	set = (enable == true) ? 1 : 0;
876 	return __ftrace_set_clr_event(tr, NULL, system, event, set);
877 }
878 EXPORT_SYMBOL_GPL(trace_array_set_clr_event);
879 
880 /* 128 should be much more than enough */
881 #define EVENT_BUF_SIZE		127
882 
883 static ssize_t
884 ftrace_event_write(struct file *file, const char __user *ubuf,
885 		   size_t cnt, loff_t *ppos)
886 {
887 	struct trace_parser parser;
888 	struct seq_file *m = file->private_data;
889 	struct trace_array *tr = m->private;
890 	ssize_t read, ret;
891 
892 	if (!cnt)
893 		return 0;
894 
895 	ret = tracing_update_buffers();
896 	if (ret < 0)
897 		return ret;
898 
899 	if (trace_parser_get_init(&parser, EVENT_BUF_SIZE + 1))
900 		return -ENOMEM;
901 
902 	read = trace_get_user(&parser, ubuf, cnt, ppos);
903 
904 	if (read >= 0 && trace_parser_loaded((&parser))) {
905 		int set = 1;
906 
907 		if (*parser.buffer == '!')
908 			set = 0;
909 
910 		ret = ftrace_set_clr_event(tr, parser.buffer + !set, set);
911 		if (ret)
912 			goto out_put;
913 	}
914 
915 	ret = read;
916 
917  out_put:
918 	trace_parser_put(&parser);
919 
920 	return ret;
921 }
922 
923 static void *
924 t_next(struct seq_file *m, void *v, loff_t *pos)
925 {
926 	struct trace_event_file *file = v;
927 	struct trace_event_call *call;
928 	struct trace_array *tr = m->private;
929 
930 	(*pos)++;
931 
932 	list_for_each_entry_continue(file, &tr->events, list) {
933 		call = file->event_call;
934 		/*
935 		 * The ftrace subsystem is for showing formats only.
936 		 * They can not be enabled or disabled via the event files.
937 		 */
938 		if (call->class && call->class->reg &&
939 		    !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
940 			return file;
941 	}
942 
943 	return NULL;
944 }
945 
946 static void *t_start(struct seq_file *m, loff_t *pos)
947 {
948 	struct trace_event_file *file;
949 	struct trace_array *tr = m->private;
950 	loff_t l;
951 
952 	mutex_lock(&event_mutex);
953 
954 	file = list_entry(&tr->events, struct trace_event_file, list);
955 	for (l = 0; l <= *pos; ) {
956 		file = t_next(m, file, &l);
957 		if (!file)
958 			break;
959 	}
960 	return file;
961 }
962 
963 static void *
964 s_next(struct seq_file *m, void *v, loff_t *pos)
965 {
966 	struct trace_event_file *file = v;
967 	struct trace_array *tr = m->private;
968 
969 	(*pos)++;
970 
971 	list_for_each_entry_continue(file, &tr->events, list) {
972 		if (file->flags & EVENT_FILE_FL_ENABLED)
973 			return file;
974 	}
975 
976 	return NULL;
977 }
978 
979 static void *s_start(struct seq_file *m, loff_t *pos)
980 {
981 	struct trace_event_file *file;
982 	struct trace_array *tr = m->private;
983 	loff_t l;
984 
985 	mutex_lock(&event_mutex);
986 
987 	file = list_entry(&tr->events, struct trace_event_file, list);
988 	for (l = 0; l <= *pos; ) {
989 		file = s_next(m, file, &l);
990 		if (!file)
991 			break;
992 	}
993 	return file;
994 }
995 
996 static int t_show(struct seq_file *m, void *v)
997 {
998 	struct trace_event_file *file = v;
999 	struct trace_event_call *call = file->event_call;
1000 
1001 	if (strcmp(call->class->system, TRACE_SYSTEM) != 0)
1002 		seq_printf(m, "%s:", call->class->system);
1003 	seq_printf(m, "%s\n", trace_event_name(call));
1004 
1005 	return 0;
1006 }
1007 
1008 static void t_stop(struct seq_file *m, void *p)
1009 {
1010 	mutex_unlock(&event_mutex);
1011 }
1012 
1013 static void *
1014 p_next(struct seq_file *m, void *v, loff_t *pos)
1015 {
1016 	struct trace_array *tr = m->private;
1017 	struct trace_pid_list *pid_list = rcu_dereference_sched(tr->filtered_pids);
1018 
1019 	return trace_pid_next(pid_list, v, pos);
1020 }
1021 
1022 static void *p_start(struct seq_file *m, loff_t *pos)
1023 	__acquires(RCU)
1024 {
1025 	struct trace_pid_list *pid_list;
1026 	struct trace_array *tr = m->private;
1027 
1028 	/*
1029 	 * Grab the mutex, to keep calls to p_next() having the same
1030 	 * tr->filtered_pids as p_start() has.
1031 	 * If we just passed the tr->filtered_pids around, then RCU would
1032 	 * have been enough, but doing that makes things more complex.
1033 	 */
1034 	mutex_lock(&event_mutex);
1035 	rcu_read_lock_sched();
1036 
1037 	pid_list = rcu_dereference_sched(tr->filtered_pids);
1038 
1039 	if (!pid_list)
1040 		return NULL;
1041 
1042 	return trace_pid_start(pid_list, pos);
1043 }
1044 
1045 static void p_stop(struct seq_file *m, void *p)
1046 	__releases(RCU)
1047 {
1048 	rcu_read_unlock_sched();
1049 	mutex_unlock(&event_mutex);
1050 }
1051 
1052 static ssize_t
1053 event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
1054 		  loff_t *ppos)
1055 {
1056 	struct trace_event_file *file;
1057 	unsigned long flags;
1058 	char buf[4] = "0";
1059 
1060 	mutex_lock(&event_mutex);
1061 	file = event_file_data(filp);
1062 	if (likely(file))
1063 		flags = file->flags;
1064 	mutex_unlock(&event_mutex);
1065 
1066 	if (!file)
1067 		return -ENODEV;
1068 
1069 	if (flags & EVENT_FILE_FL_ENABLED &&
1070 	    !(flags & EVENT_FILE_FL_SOFT_DISABLED))
1071 		strcpy(buf, "1");
1072 
1073 	if (flags & EVENT_FILE_FL_SOFT_DISABLED ||
1074 	    flags & EVENT_FILE_FL_SOFT_MODE)
1075 		strcat(buf, "*");
1076 
1077 	strcat(buf, "\n");
1078 
1079 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, strlen(buf));
1080 }
1081 
1082 static ssize_t
1083 event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
1084 		   loff_t *ppos)
1085 {
1086 	struct trace_event_file *file;
1087 	unsigned long val;
1088 	int ret;
1089 
1090 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
1091 	if (ret)
1092 		return ret;
1093 
1094 	ret = tracing_update_buffers();
1095 	if (ret < 0)
1096 		return ret;
1097 
1098 	switch (val) {
1099 	case 0:
1100 	case 1:
1101 		ret = -ENODEV;
1102 		mutex_lock(&event_mutex);
1103 		file = event_file_data(filp);
1104 		if (likely(file))
1105 			ret = ftrace_event_enable_disable(file, val);
1106 		mutex_unlock(&event_mutex);
1107 		break;
1108 
1109 	default:
1110 		return -EINVAL;
1111 	}
1112 
1113 	*ppos += cnt;
1114 
1115 	return ret ? ret : cnt;
1116 }
1117 
1118 static ssize_t
1119 system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
1120 		   loff_t *ppos)
1121 {
1122 	const char set_to_char[4] = { '?', '0', '1', 'X' };
1123 	struct trace_subsystem_dir *dir = filp->private_data;
1124 	struct event_subsystem *system = dir->subsystem;
1125 	struct trace_event_call *call;
1126 	struct trace_event_file *file;
1127 	struct trace_array *tr = dir->tr;
1128 	char buf[2];
1129 	int set = 0;
1130 	int ret;
1131 
1132 	mutex_lock(&event_mutex);
1133 	list_for_each_entry(file, &tr->events, list) {
1134 		call = file->event_call;
1135 		if (!trace_event_name(call) || !call->class || !call->class->reg)
1136 			continue;
1137 
1138 		if (system && strcmp(call->class->system, system->name) != 0)
1139 			continue;
1140 
1141 		/*
1142 		 * We need to find out if all the events are set
1143 		 * or if all events or cleared, or if we have
1144 		 * a mixture.
1145 		 */
1146 		set |= (1 << !!(file->flags & EVENT_FILE_FL_ENABLED));
1147 
1148 		/*
1149 		 * If we have a mixture, no need to look further.
1150 		 */
1151 		if (set == 3)
1152 			break;
1153 	}
1154 	mutex_unlock(&event_mutex);
1155 
1156 	buf[0] = set_to_char[set];
1157 	buf[1] = '\n';
1158 
1159 	ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
1160 
1161 	return ret;
1162 }
1163 
1164 static ssize_t
1165 system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
1166 		    loff_t *ppos)
1167 {
1168 	struct trace_subsystem_dir *dir = filp->private_data;
1169 	struct event_subsystem *system = dir->subsystem;
1170 	const char *name = NULL;
1171 	unsigned long val;
1172 	ssize_t ret;
1173 
1174 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
1175 	if (ret)
1176 		return ret;
1177 
1178 	ret = tracing_update_buffers();
1179 	if (ret < 0)
1180 		return ret;
1181 
1182 	if (val != 0 && val != 1)
1183 		return -EINVAL;
1184 
1185 	/*
1186 	 * Opening of "enable" adds a ref count to system,
1187 	 * so the name is safe to use.
1188 	 */
1189 	if (system)
1190 		name = system->name;
1191 
1192 	ret = __ftrace_set_clr_event(dir->tr, NULL, name, NULL, val);
1193 	if (ret)
1194 		goto out;
1195 
1196 	ret = cnt;
1197 
1198 out:
1199 	*ppos += cnt;
1200 
1201 	return ret;
1202 }
1203 
1204 enum {
1205 	FORMAT_HEADER		= 1,
1206 	FORMAT_FIELD_SEPERATOR	= 2,
1207 	FORMAT_PRINTFMT		= 3,
1208 };
1209 
1210 static void *f_next(struct seq_file *m, void *v, loff_t *pos)
1211 {
1212 	struct trace_event_call *call = event_file_data(m->private);
1213 	struct list_head *common_head = &ftrace_common_fields;
1214 	struct list_head *head = trace_get_fields(call);
1215 	struct list_head *node = v;
1216 
1217 	(*pos)++;
1218 
1219 	switch ((unsigned long)v) {
1220 	case FORMAT_HEADER:
1221 		node = common_head;
1222 		break;
1223 
1224 	case FORMAT_FIELD_SEPERATOR:
1225 		node = head;
1226 		break;
1227 
1228 	case FORMAT_PRINTFMT:
1229 		/* all done */
1230 		return NULL;
1231 	}
1232 
1233 	node = node->prev;
1234 	if (node == common_head)
1235 		return (void *)FORMAT_FIELD_SEPERATOR;
1236 	else if (node == head)
1237 		return (void *)FORMAT_PRINTFMT;
1238 	else
1239 		return node;
1240 }
1241 
1242 static int f_show(struct seq_file *m, void *v)
1243 {
1244 	struct trace_event_call *call = event_file_data(m->private);
1245 	struct ftrace_event_field *field;
1246 	const char *array_descriptor;
1247 
1248 	switch ((unsigned long)v) {
1249 	case FORMAT_HEADER:
1250 		seq_printf(m, "name: %s\n", trace_event_name(call));
1251 		seq_printf(m, "ID: %d\n", call->event.type);
1252 		seq_puts(m, "format:\n");
1253 		return 0;
1254 
1255 	case FORMAT_FIELD_SEPERATOR:
1256 		seq_putc(m, '\n');
1257 		return 0;
1258 
1259 	case FORMAT_PRINTFMT:
1260 		seq_printf(m, "\nprint fmt: %s\n",
1261 			   call->print_fmt);
1262 		return 0;
1263 	}
1264 
1265 	field = list_entry(v, struct ftrace_event_field, link);
1266 	/*
1267 	 * Smartly shows the array type(except dynamic array).
1268 	 * Normal:
1269 	 *	field:TYPE VAR
1270 	 * If TYPE := TYPE[LEN], it is shown:
1271 	 *	field:TYPE VAR[LEN]
1272 	 */
1273 	array_descriptor = strchr(field->type, '[');
1274 
1275 	if (str_has_prefix(field->type, "__data_loc"))
1276 		array_descriptor = NULL;
1277 
1278 	if (!array_descriptor)
1279 		seq_printf(m, "\tfield:%s %s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
1280 			   field->type, field->name, field->offset,
1281 			   field->size, !!field->is_signed);
1282 	else
1283 		seq_printf(m, "\tfield:%.*s %s%s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
1284 			   (int)(array_descriptor - field->type),
1285 			   field->type, field->name,
1286 			   array_descriptor, field->offset,
1287 			   field->size, !!field->is_signed);
1288 
1289 	return 0;
1290 }
1291 
1292 static void *f_start(struct seq_file *m, loff_t *pos)
1293 {
1294 	void *p = (void *)FORMAT_HEADER;
1295 	loff_t l = 0;
1296 
1297 	/* ->stop() is called even if ->start() fails */
1298 	mutex_lock(&event_mutex);
1299 	if (!event_file_data(m->private))
1300 		return ERR_PTR(-ENODEV);
1301 
1302 	while (l < *pos && p)
1303 		p = f_next(m, p, &l);
1304 
1305 	return p;
1306 }
1307 
1308 static void f_stop(struct seq_file *m, void *p)
1309 {
1310 	mutex_unlock(&event_mutex);
1311 }
1312 
1313 static const struct seq_operations trace_format_seq_ops = {
1314 	.start		= f_start,
1315 	.next		= f_next,
1316 	.stop		= f_stop,
1317 	.show		= f_show,
1318 };
1319 
1320 static int trace_format_open(struct inode *inode, struct file *file)
1321 {
1322 	struct seq_file *m;
1323 	int ret;
1324 
1325 	/* Do we want to hide event format files on tracefs lockdown? */
1326 
1327 	ret = seq_open(file, &trace_format_seq_ops);
1328 	if (ret < 0)
1329 		return ret;
1330 
1331 	m = file->private_data;
1332 	m->private = file;
1333 
1334 	return 0;
1335 }
1336 
1337 static ssize_t
1338 event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
1339 {
1340 	int id = (long)event_file_data(filp);
1341 	char buf[32];
1342 	int len;
1343 
1344 	if (unlikely(!id))
1345 		return -ENODEV;
1346 
1347 	len = sprintf(buf, "%d\n", id);
1348 
1349 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
1350 }
1351 
1352 static ssize_t
1353 event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
1354 		  loff_t *ppos)
1355 {
1356 	struct trace_event_file *file;
1357 	struct trace_seq *s;
1358 	int r = -ENODEV;
1359 
1360 	if (*ppos)
1361 		return 0;
1362 
1363 	s = kmalloc(sizeof(*s), GFP_KERNEL);
1364 
1365 	if (!s)
1366 		return -ENOMEM;
1367 
1368 	trace_seq_init(s);
1369 
1370 	mutex_lock(&event_mutex);
1371 	file = event_file_data(filp);
1372 	if (file)
1373 		print_event_filter(file, s);
1374 	mutex_unlock(&event_mutex);
1375 
1376 	if (file)
1377 		r = simple_read_from_buffer(ubuf, cnt, ppos,
1378 					    s->buffer, trace_seq_used(s));
1379 
1380 	kfree(s);
1381 
1382 	return r;
1383 }
1384 
1385 static ssize_t
1386 event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
1387 		   loff_t *ppos)
1388 {
1389 	struct trace_event_file *file;
1390 	char *buf;
1391 	int err = -ENODEV;
1392 
1393 	if (cnt >= PAGE_SIZE)
1394 		return -EINVAL;
1395 
1396 	buf = memdup_user_nul(ubuf, cnt);
1397 	if (IS_ERR(buf))
1398 		return PTR_ERR(buf);
1399 
1400 	mutex_lock(&event_mutex);
1401 	file = event_file_data(filp);
1402 	if (file)
1403 		err = apply_event_filter(file, buf);
1404 	mutex_unlock(&event_mutex);
1405 
1406 	kfree(buf);
1407 	if (err < 0)
1408 		return err;
1409 
1410 	*ppos += cnt;
1411 
1412 	return cnt;
1413 }
1414 
1415 static LIST_HEAD(event_subsystems);
1416 
1417 static int subsystem_open(struct inode *inode, struct file *filp)
1418 {
1419 	struct event_subsystem *system = NULL;
1420 	struct trace_subsystem_dir *dir = NULL; /* Initialize for gcc */
1421 	struct trace_array *tr;
1422 	int ret;
1423 
1424 	if (tracing_is_disabled())
1425 		return -ENODEV;
1426 
1427 	/* Make sure the system still exists */
1428 	mutex_lock(&event_mutex);
1429 	mutex_lock(&trace_types_lock);
1430 	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
1431 		list_for_each_entry(dir, &tr->systems, list) {
1432 			if (dir == inode->i_private) {
1433 				/* Don't open systems with no events */
1434 				if (dir->nr_events) {
1435 					__get_system_dir(dir);
1436 					system = dir->subsystem;
1437 				}
1438 				goto exit_loop;
1439 			}
1440 		}
1441 	}
1442  exit_loop:
1443 	mutex_unlock(&trace_types_lock);
1444 	mutex_unlock(&event_mutex);
1445 
1446 	if (!system)
1447 		return -ENODEV;
1448 
1449 	/* Some versions of gcc think dir can be uninitialized here */
1450 	WARN_ON(!dir);
1451 
1452 	/* Still need to increment the ref count of the system */
1453 	if (trace_array_get(tr) < 0) {
1454 		put_system(dir);
1455 		return -ENODEV;
1456 	}
1457 
1458 	ret = tracing_open_generic(inode, filp);
1459 	if (ret < 0) {
1460 		trace_array_put(tr);
1461 		put_system(dir);
1462 	}
1463 
1464 	return ret;
1465 }
1466 
1467 static int system_tr_open(struct inode *inode, struct file *filp)
1468 {
1469 	struct trace_subsystem_dir *dir;
1470 	struct trace_array *tr = inode->i_private;
1471 	int ret;
1472 
1473 	/* Make a temporary dir that has no system but points to tr */
1474 	dir = kzalloc(sizeof(*dir), GFP_KERNEL);
1475 	if (!dir)
1476 		return -ENOMEM;
1477 
1478 	ret = tracing_open_generic_tr(inode, filp);
1479 	if (ret < 0) {
1480 		kfree(dir);
1481 		return ret;
1482 	}
1483 	dir->tr = tr;
1484 	filp->private_data = dir;
1485 
1486 	return 0;
1487 }
1488 
1489 static int subsystem_release(struct inode *inode, struct file *file)
1490 {
1491 	struct trace_subsystem_dir *dir = file->private_data;
1492 
1493 	trace_array_put(dir->tr);
1494 
1495 	/*
1496 	 * If dir->subsystem is NULL, then this is a temporary
1497 	 * descriptor that was made for a trace_array to enable
1498 	 * all subsystems.
1499 	 */
1500 	if (dir->subsystem)
1501 		put_system(dir);
1502 	else
1503 		kfree(dir);
1504 
1505 	return 0;
1506 }
1507 
1508 static ssize_t
1509 subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
1510 		      loff_t *ppos)
1511 {
1512 	struct trace_subsystem_dir *dir = filp->private_data;
1513 	struct event_subsystem *system = dir->subsystem;
1514 	struct trace_seq *s;
1515 	int r;
1516 
1517 	if (*ppos)
1518 		return 0;
1519 
1520 	s = kmalloc(sizeof(*s), GFP_KERNEL);
1521 	if (!s)
1522 		return -ENOMEM;
1523 
1524 	trace_seq_init(s);
1525 
1526 	print_subsystem_event_filter(system, s);
1527 	r = simple_read_from_buffer(ubuf, cnt, ppos,
1528 				    s->buffer, trace_seq_used(s));
1529 
1530 	kfree(s);
1531 
1532 	return r;
1533 }
1534 
1535 static ssize_t
1536 subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
1537 		       loff_t *ppos)
1538 {
1539 	struct trace_subsystem_dir *dir = filp->private_data;
1540 	char *buf;
1541 	int err;
1542 
1543 	if (cnt >= PAGE_SIZE)
1544 		return -EINVAL;
1545 
1546 	buf = memdup_user_nul(ubuf, cnt);
1547 	if (IS_ERR(buf))
1548 		return PTR_ERR(buf);
1549 
1550 	err = apply_subsystem_event_filter(dir, buf);
1551 	kfree(buf);
1552 	if (err < 0)
1553 		return err;
1554 
1555 	*ppos += cnt;
1556 
1557 	return cnt;
1558 }
1559 
1560 static ssize_t
1561 show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
1562 {
1563 	int (*func)(struct trace_seq *s) = filp->private_data;
1564 	struct trace_seq *s;
1565 	int r;
1566 
1567 	if (*ppos)
1568 		return 0;
1569 
1570 	s = kmalloc(sizeof(*s), GFP_KERNEL);
1571 	if (!s)
1572 		return -ENOMEM;
1573 
1574 	trace_seq_init(s);
1575 
1576 	func(s);
1577 	r = simple_read_from_buffer(ubuf, cnt, ppos,
1578 				    s->buffer, trace_seq_used(s));
1579 
1580 	kfree(s);
1581 
1582 	return r;
1583 }
1584 
1585 static void ignore_task_cpu(void *data)
1586 {
1587 	struct trace_array *tr = data;
1588 	struct trace_pid_list *pid_list;
1589 
1590 	/*
1591 	 * This function is called by on_each_cpu() while the
1592 	 * event_mutex is held.
1593 	 */
1594 	pid_list = rcu_dereference_protected(tr->filtered_pids,
1595 					     mutex_is_locked(&event_mutex));
1596 
1597 	this_cpu_write(tr->trace_buffer.data->ignore_pid,
1598 		       trace_ignore_this_task(pid_list, current));
1599 }
1600 
1601 static ssize_t
1602 ftrace_event_pid_write(struct file *filp, const char __user *ubuf,
1603 		       size_t cnt, loff_t *ppos)
1604 {
1605 	struct seq_file *m = filp->private_data;
1606 	struct trace_array *tr = m->private;
1607 	struct trace_pid_list *filtered_pids = NULL;
1608 	struct trace_pid_list *pid_list;
1609 	struct trace_event_file *file;
1610 	ssize_t ret;
1611 
1612 	if (!cnt)
1613 		return 0;
1614 
1615 	ret = tracing_update_buffers();
1616 	if (ret < 0)
1617 		return ret;
1618 
1619 	mutex_lock(&event_mutex);
1620 
1621 	filtered_pids = rcu_dereference_protected(tr->filtered_pids,
1622 					     lockdep_is_held(&event_mutex));
1623 
1624 	ret = trace_pid_write(filtered_pids, &pid_list, ubuf, cnt);
1625 	if (ret < 0)
1626 		goto out;
1627 
1628 	rcu_assign_pointer(tr->filtered_pids, pid_list);
1629 
1630 	list_for_each_entry(file, &tr->events, list) {
1631 		set_bit(EVENT_FILE_FL_PID_FILTER_BIT, &file->flags);
1632 	}
1633 
1634 	if (filtered_pids) {
1635 		tracepoint_synchronize_unregister();
1636 		trace_free_pid_list(filtered_pids);
1637 	} else if (pid_list) {
1638 		/*
1639 		 * Register a probe that is called before all other probes
1640 		 * to set ignore_pid if next or prev do not match.
1641 		 * Register a probe this is called after all other probes
1642 		 * to only keep ignore_pid set if next pid matches.
1643 		 */
1644 		register_trace_prio_sched_switch(event_filter_pid_sched_switch_probe_pre,
1645 						 tr, INT_MAX);
1646 		register_trace_prio_sched_switch(event_filter_pid_sched_switch_probe_post,
1647 						 tr, 0);
1648 
1649 		register_trace_prio_sched_wakeup(event_filter_pid_sched_wakeup_probe_pre,
1650 						 tr, INT_MAX);
1651 		register_trace_prio_sched_wakeup(event_filter_pid_sched_wakeup_probe_post,
1652 						 tr, 0);
1653 
1654 		register_trace_prio_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_pre,
1655 						     tr, INT_MAX);
1656 		register_trace_prio_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_post,
1657 						     tr, 0);
1658 
1659 		register_trace_prio_sched_waking(event_filter_pid_sched_wakeup_probe_pre,
1660 						 tr, INT_MAX);
1661 		register_trace_prio_sched_waking(event_filter_pid_sched_wakeup_probe_post,
1662 						 tr, 0);
1663 	}
1664 
1665 	/*
1666 	 * Ignoring of pids is done at task switch. But we have to
1667 	 * check for those tasks that are currently running.
1668 	 * Always do this in case a pid was appended or removed.
1669 	 */
1670 	on_each_cpu(ignore_task_cpu, tr, 1);
1671 
1672  out:
1673 	mutex_unlock(&event_mutex);
1674 
1675 	if (ret > 0)
1676 		*ppos += ret;
1677 
1678 	return ret;
1679 }
1680 
1681 static int ftrace_event_avail_open(struct inode *inode, struct file *file);
1682 static int ftrace_event_set_open(struct inode *inode, struct file *file);
1683 static int ftrace_event_set_pid_open(struct inode *inode, struct file *file);
1684 static int ftrace_event_release(struct inode *inode, struct file *file);
1685 
1686 static const struct seq_operations show_event_seq_ops = {
1687 	.start = t_start,
1688 	.next = t_next,
1689 	.show = t_show,
1690 	.stop = t_stop,
1691 };
1692 
1693 static const struct seq_operations show_set_event_seq_ops = {
1694 	.start = s_start,
1695 	.next = s_next,
1696 	.show = t_show,
1697 	.stop = t_stop,
1698 };
1699 
1700 static const struct seq_operations show_set_pid_seq_ops = {
1701 	.start = p_start,
1702 	.next = p_next,
1703 	.show = trace_pid_show,
1704 	.stop = p_stop,
1705 };
1706 
1707 static const struct file_operations ftrace_avail_fops = {
1708 	.open = ftrace_event_avail_open,
1709 	.read = seq_read,
1710 	.llseek = seq_lseek,
1711 	.release = seq_release,
1712 };
1713 
1714 static const struct file_operations ftrace_set_event_fops = {
1715 	.open = ftrace_event_set_open,
1716 	.read = seq_read,
1717 	.write = ftrace_event_write,
1718 	.llseek = seq_lseek,
1719 	.release = ftrace_event_release,
1720 };
1721 
1722 static const struct file_operations ftrace_set_event_pid_fops = {
1723 	.open = ftrace_event_set_pid_open,
1724 	.read = seq_read,
1725 	.write = ftrace_event_pid_write,
1726 	.llseek = seq_lseek,
1727 	.release = ftrace_event_release,
1728 };
1729 
1730 static const struct file_operations ftrace_enable_fops = {
1731 	.open = tracing_open_generic,
1732 	.read = event_enable_read,
1733 	.write = event_enable_write,
1734 	.llseek = default_llseek,
1735 };
1736 
1737 static const struct file_operations ftrace_event_format_fops = {
1738 	.open = trace_format_open,
1739 	.read = seq_read,
1740 	.llseek = seq_lseek,
1741 	.release = seq_release,
1742 };
1743 
1744 static const struct file_operations ftrace_event_id_fops = {
1745 	.read = event_id_read,
1746 	.llseek = default_llseek,
1747 };
1748 
1749 static const struct file_operations ftrace_event_filter_fops = {
1750 	.open = tracing_open_generic,
1751 	.read = event_filter_read,
1752 	.write = event_filter_write,
1753 	.llseek = default_llseek,
1754 };
1755 
1756 static const struct file_operations ftrace_subsystem_filter_fops = {
1757 	.open = subsystem_open,
1758 	.read = subsystem_filter_read,
1759 	.write = subsystem_filter_write,
1760 	.llseek = default_llseek,
1761 	.release = subsystem_release,
1762 };
1763 
1764 static const struct file_operations ftrace_system_enable_fops = {
1765 	.open = subsystem_open,
1766 	.read = system_enable_read,
1767 	.write = system_enable_write,
1768 	.llseek = default_llseek,
1769 	.release = subsystem_release,
1770 };
1771 
1772 static const struct file_operations ftrace_tr_enable_fops = {
1773 	.open = system_tr_open,
1774 	.read = system_enable_read,
1775 	.write = system_enable_write,
1776 	.llseek = default_llseek,
1777 	.release = subsystem_release,
1778 };
1779 
1780 static const struct file_operations ftrace_show_header_fops = {
1781 	.open = tracing_open_generic,
1782 	.read = show_header,
1783 	.llseek = default_llseek,
1784 };
1785 
1786 static int
1787 ftrace_event_open(struct inode *inode, struct file *file,
1788 		  const struct seq_operations *seq_ops)
1789 {
1790 	struct seq_file *m;
1791 	int ret;
1792 
1793 	ret = security_locked_down(LOCKDOWN_TRACEFS);
1794 	if (ret)
1795 		return ret;
1796 
1797 	ret = seq_open(file, seq_ops);
1798 	if (ret < 0)
1799 		return ret;
1800 	m = file->private_data;
1801 	/* copy tr over to seq ops */
1802 	m->private = inode->i_private;
1803 
1804 	return ret;
1805 }
1806 
1807 static int ftrace_event_release(struct inode *inode, struct file *file)
1808 {
1809 	struct trace_array *tr = inode->i_private;
1810 
1811 	trace_array_put(tr);
1812 
1813 	return seq_release(inode, file);
1814 }
1815 
1816 static int
1817 ftrace_event_avail_open(struct inode *inode, struct file *file)
1818 {
1819 	const struct seq_operations *seq_ops = &show_event_seq_ops;
1820 
1821 	/* Checks for tracefs lockdown */
1822 	return ftrace_event_open(inode, file, seq_ops);
1823 }
1824 
1825 static int
1826 ftrace_event_set_open(struct inode *inode, struct file *file)
1827 {
1828 	const struct seq_operations *seq_ops = &show_set_event_seq_ops;
1829 	struct trace_array *tr = inode->i_private;
1830 	int ret;
1831 
1832 	ret = tracing_check_open_get_tr(tr);
1833 	if (ret)
1834 		return ret;
1835 
1836 	if ((file->f_mode & FMODE_WRITE) &&
1837 	    (file->f_flags & O_TRUNC))
1838 		ftrace_clear_events(tr);
1839 
1840 	ret = ftrace_event_open(inode, file, seq_ops);
1841 	if (ret < 0)
1842 		trace_array_put(tr);
1843 	return ret;
1844 }
1845 
1846 static int
1847 ftrace_event_set_pid_open(struct inode *inode, struct file *file)
1848 {
1849 	const struct seq_operations *seq_ops = &show_set_pid_seq_ops;
1850 	struct trace_array *tr = inode->i_private;
1851 	int ret;
1852 
1853 	ret = tracing_check_open_get_tr(tr);
1854 	if (ret)
1855 		return ret;
1856 
1857 	if ((file->f_mode & FMODE_WRITE) &&
1858 	    (file->f_flags & O_TRUNC))
1859 		ftrace_clear_event_pids(tr);
1860 
1861 	ret = ftrace_event_open(inode, file, seq_ops);
1862 	if (ret < 0)
1863 		trace_array_put(tr);
1864 	return ret;
1865 }
1866 
1867 static struct event_subsystem *
1868 create_new_subsystem(const char *name)
1869 {
1870 	struct event_subsystem *system;
1871 
1872 	/* need to create new entry */
1873 	system = kmalloc(sizeof(*system), GFP_KERNEL);
1874 	if (!system)
1875 		return NULL;
1876 
1877 	system->ref_count = 1;
1878 
1879 	/* Only allocate if dynamic (kprobes and modules) */
1880 	system->name = kstrdup_const(name, GFP_KERNEL);
1881 	if (!system->name)
1882 		goto out_free;
1883 
1884 	system->filter = NULL;
1885 
1886 	system->filter = kzalloc(sizeof(struct event_filter), GFP_KERNEL);
1887 	if (!system->filter)
1888 		goto out_free;
1889 
1890 	list_add(&system->list, &event_subsystems);
1891 
1892 	return system;
1893 
1894  out_free:
1895 	kfree_const(system->name);
1896 	kfree(system);
1897 	return NULL;
1898 }
1899 
1900 static struct dentry *
1901 event_subsystem_dir(struct trace_array *tr, const char *name,
1902 		    struct trace_event_file *file, struct dentry *parent)
1903 {
1904 	struct trace_subsystem_dir *dir;
1905 	struct event_subsystem *system;
1906 	struct dentry *entry;
1907 
1908 	/* First see if we did not already create this dir */
1909 	list_for_each_entry(dir, &tr->systems, list) {
1910 		system = dir->subsystem;
1911 		if (strcmp(system->name, name) == 0) {
1912 			dir->nr_events++;
1913 			file->system = dir;
1914 			return dir->entry;
1915 		}
1916 	}
1917 
1918 	/* Now see if the system itself exists. */
1919 	list_for_each_entry(system, &event_subsystems, list) {
1920 		if (strcmp(system->name, name) == 0)
1921 			break;
1922 	}
1923 	/* Reset system variable when not found */
1924 	if (&system->list == &event_subsystems)
1925 		system = NULL;
1926 
1927 	dir = kmalloc(sizeof(*dir), GFP_KERNEL);
1928 	if (!dir)
1929 		goto out_fail;
1930 
1931 	if (!system) {
1932 		system = create_new_subsystem(name);
1933 		if (!system)
1934 			goto out_free;
1935 	} else
1936 		__get_system(system);
1937 
1938 	dir->entry = tracefs_create_dir(name, parent);
1939 	if (!dir->entry) {
1940 		pr_warn("Failed to create system directory %s\n", name);
1941 		__put_system(system);
1942 		goto out_free;
1943 	}
1944 
1945 	dir->tr = tr;
1946 	dir->ref_count = 1;
1947 	dir->nr_events = 1;
1948 	dir->subsystem = system;
1949 	file->system = dir;
1950 
1951 	entry = tracefs_create_file("filter", 0644, dir->entry, dir,
1952 				    &ftrace_subsystem_filter_fops);
1953 	if (!entry) {
1954 		kfree(system->filter);
1955 		system->filter = NULL;
1956 		pr_warn("Could not create tracefs '%s/filter' entry\n", name);
1957 	}
1958 
1959 	trace_create_file("enable", 0644, dir->entry, dir,
1960 			  &ftrace_system_enable_fops);
1961 
1962 	list_add(&dir->list, &tr->systems);
1963 
1964 	return dir->entry;
1965 
1966  out_free:
1967 	kfree(dir);
1968  out_fail:
1969 	/* Only print this message if failed on memory allocation */
1970 	if (!dir || !system)
1971 		pr_warn("No memory to create event subsystem %s\n", name);
1972 	return NULL;
1973 }
1974 
1975 static int
1976 event_create_dir(struct dentry *parent, struct trace_event_file *file)
1977 {
1978 	struct trace_event_call *call = file->event_call;
1979 	struct trace_array *tr = file->tr;
1980 	struct list_head *head;
1981 	struct dentry *d_events;
1982 	const char *name;
1983 	int ret;
1984 
1985 	/*
1986 	 * If the trace point header did not define TRACE_SYSTEM
1987 	 * then the system would be called "TRACE_SYSTEM".
1988 	 */
1989 	if (strcmp(call->class->system, TRACE_SYSTEM) != 0) {
1990 		d_events = event_subsystem_dir(tr, call->class->system, file, parent);
1991 		if (!d_events)
1992 			return -ENOMEM;
1993 	} else
1994 		d_events = parent;
1995 
1996 	name = trace_event_name(call);
1997 	file->dir = tracefs_create_dir(name, d_events);
1998 	if (!file->dir) {
1999 		pr_warn("Could not create tracefs '%s' directory\n", name);
2000 		return -1;
2001 	}
2002 
2003 	if (call->class->reg && !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
2004 		trace_create_file("enable", 0644, file->dir, file,
2005 				  &ftrace_enable_fops);
2006 
2007 #ifdef CONFIG_PERF_EVENTS
2008 	if (call->event.type && call->class->reg)
2009 		trace_create_file("id", 0444, file->dir,
2010 				  (void *)(long)call->event.type,
2011 				  &ftrace_event_id_fops);
2012 #endif
2013 
2014 	/*
2015 	 * Other events may have the same class. Only update
2016 	 * the fields if they are not already defined.
2017 	 */
2018 	head = trace_get_fields(call);
2019 	if (list_empty(head)) {
2020 		ret = call->class->define_fields(call);
2021 		if (ret < 0) {
2022 			pr_warn("Could not initialize trace point events/%s\n",
2023 				name);
2024 			return -1;
2025 		}
2026 	}
2027 
2028 	/*
2029 	 * Only event directories that can be enabled should have
2030 	 * triggers or filters.
2031 	 */
2032 	if (!(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)) {
2033 		trace_create_file("filter", 0644, file->dir, file,
2034 				  &ftrace_event_filter_fops);
2035 
2036 		trace_create_file("trigger", 0644, file->dir, file,
2037 				  &event_trigger_fops);
2038 	}
2039 
2040 #ifdef CONFIG_HIST_TRIGGERS
2041 	trace_create_file("hist", 0444, file->dir, file,
2042 			  &event_hist_fops);
2043 #endif
2044 	trace_create_file("format", 0444, file->dir, call,
2045 			  &ftrace_event_format_fops);
2046 
2047 	return 0;
2048 }
2049 
2050 static void remove_event_from_tracers(struct trace_event_call *call)
2051 {
2052 	struct trace_event_file *file;
2053 	struct trace_array *tr;
2054 
2055 	do_for_each_event_file_safe(tr, file) {
2056 		if (file->event_call != call)
2057 			continue;
2058 
2059 		remove_event_file_dir(file);
2060 		/*
2061 		 * The do_for_each_event_file_safe() is
2062 		 * a double loop. After finding the call for this
2063 		 * trace_array, we use break to jump to the next
2064 		 * trace_array.
2065 		 */
2066 		break;
2067 	} while_for_each_event_file();
2068 }
2069 
2070 static void event_remove(struct trace_event_call *call)
2071 {
2072 	struct trace_array *tr;
2073 	struct trace_event_file *file;
2074 
2075 	do_for_each_event_file(tr, file) {
2076 		if (file->event_call != call)
2077 			continue;
2078 
2079 		if (file->flags & EVENT_FILE_FL_WAS_ENABLED)
2080 			tr->clear_trace = true;
2081 
2082 		ftrace_event_enable_disable(file, 0);
2083 		/*
2084 		 * The do_for_each_event_file() is
2085 		 * a double loop. After finding the call for this
2086 		 * trace_array, we use break to jump to the next
2087 		 * trace_array.
2088 		 */
2089 		break;
2090 	} while_for_each_event_file();
2091 
2092 	if (call->event.funcs)
2093 		__unregister_trace_event(&call->event);
2094 	remove_event_from_tracers(call);
2095 	list_del(&call->list);
2096 }
2097 
2098 static int event_init(struct trace_event_call *call)
2099 {
2100 	int ret = 0;
2101 	const char *name;
2102 
2103 	name = trace_event_name(call);
2104 	if (WARN_ON(!name))
2105 		return -EINVAL;
2106 
2107 	if (call->class->raw_init) {
2108 		ret = call->class->raw_init(call);
2109 		if (ret < 0 && ret != -ENOSYS)
2110 			pr_warn("Could not initialize trace events/%s\n", name);
2111 	}
2112 
2113 	return ret;
2114 }
2115 
2116 static int
2117 __register_event(struct trace_event_call *call, struct module *mod)
2118 {
2119 	int ret;
2120 
2121 	ret = event_init(call);
2122 	if (ret < 0)
2123 		return ret;
2124 
2125 	list_add(&call->list, &ftrace_events);
2126 	call->mod = mod;
2127 
2128 	return 0;
2129 }
2130 
2131 static char *eval_replace(char *ptr, struct trace_eval_map *map, int len)
2132 {
2133 	int rlen;
2134 	int elen;
2135 
2136 	/* Find the length of the eval value as a string */
2137 	elen = snprintf(ptr, 0, "%ld", map->eval_value);
2138 	/* Make sure there's enough room to replace the string with the value */
2139 	if (len < elen)
2140 		return NULL;
2141 
2142 	snprintf(ptr, elen + 1, "%ld", map->eval_value);
2143 
2144 	/* Get the rest of the string of ptr */
2145 	rlen = strlen(ptr + len);
2146 	memmove(ptr + elen, ptr + len, rlen);
2147 	/* Make sure we end the new string */
2148 	ptr[elen + rlen] = 0;
2149 
2150 	return ptr + elen;
2151 }
2152 
2153 static void update_event_printk(struct trace_event_call *call,
2154 				struct trace_eval_map *map)
2155 {
2156 	char *ptr;
2157 	int quote = 0;
2158 	int len = strlen(map->eval_string);
2159 
2160 	for (ptr = call->print_fmt; *ptr; ptr++) {
2161 		if (*ptr == '\\') {
2162 			ptr++;
2163 			/* paranoid */
2164 			if (!*ptr)
2165 				break;
2166 			continue;
2167 		}
2168 		if (*ptr == '"') {
2169 			quote ^= 1;
2170 			continue;
2171 		}
2172 		if (quote)
2173 			continue;
2174 		if (isdigit(*ptr)) {
2175 			/* skip numbers */
2176 			do {
2177 				ptr++;
2178 				/* Check for alpha chars like ULL */
2179 			} while (isalnum(*ptr));
2180 			if (!*ptr)
2181 				break;
2182 			/*
2183 			 * A number must have some kind of delimiter after
2184 			 * it, and we can ignore that too.
2185 			 */
2186 			continue;
2187 		}
2188 		if (isalpha(*ptr) || *ptr == '_') {
2189 			if (strncmp(map->eval_string, ptr, len) == 0 &&
2190 			    !isalnum(ptr[len]) && ptr[len] != '_') {
2191 				ptr = eval_replace(ptr, map, len);
2192 				/* enum/sizeof string smaller than value */
2193 				if (WARN_ON_ONCE(!ptr))
2194 					return;
2195 				/*
2196 				 * No need to decrement here, as eval_replace()
2197 				 * returns the pointer to the character passed
2198 				 * the eval, and two evals can not be placed
2199 				 * back to back without something in between.
2200 				 * We can skip that something in between.
2201 				 */
2202 				continue;
2203 			}
2204 		skip_more:
2205 			do {
2206 				ptr++;
2207 			} while (isalnum(*ptr) || *ptr == '_');
2208 			if (!*ptr)
2209 				break;
2210 			/*
2211 			 * If what comes after this variable is a '.' or
2212 			 * '->' then we can continue to ignore that string.
2213 			 */
2214 			if (*ptr == '.' || (ptr[0] == '-' && ptr[1] == '>')) {
2215 				ptr += *ptr == '.' ? 1 : 2;
2216 				if (!*ptr)
2217 					break;
2218 				goto skip_more;
2219 			}
2220 			/*
2221 			 * Once again, we can skip the delimiter that came
2222 			 * after the string.
2223 			 */
2224 			continue;
2225 		}
2226 	}
2227 }
2228 
2229 void trace_event_eval_update(struct trace_eval_map **map, int len)
2230 {
2231 	struct trace_event_call *call, *p;
2232 	const char *last_system = NULL;
2233 	bool first = false;
2234 	int last_i;
2235 	int i;
2236 
2237 	down_write(&trace_event_sem);
2238 	list_for_each_entry_safe(call, p, &ftrace_events, list) {
2239 		/* events are usually grouped together with systems */
2240 		if (!last_system || call->class->system != last_system) {
2241 			first = true;
2242 			last_i = 0;
2243 			last_system = call->class->system;
2244 		}
2245 
2246 		/*
2247 		 * Since calls are grouped by systems, the likelyhood that the
2248 		 * next call in the iteration belongs to the same system as the
2249 		 * previous call is high. As an optimization, we skip seaching
2250 		 * for a map[] that matches the call's system if the last call
2251 		 * was from the same system. That's what last_i is for. If the
2252 		 * call has the same system as the previous call, then last_i
2253 		 * will be the index of the first map[] that has a matching
2254 		 * system.
2255 		 */
2256 		for (i = last_i; i < len; i++) {
2257 			if (call->class->system == map[i]->system) {
2258 				/* Save the first system if need be */
2259 				if (first) {
2260 					last_i = i;
2261 					first = false;
2262 				}
2263 				update_event_printk(call, map[i]);
2264 			}
2265 		}
2266 	}
2267 	up_write(&trace_event_sem);
2268 }
2269 
2270 static struct trace_event_file *
2271 trace_create_new_event(struct trace_event_call *call,
2272 		       struct trace_array *tr)
2273 {
2274 	struct trace_event_file *file;
2275 
2276 	file = kmem_cache_alloc(file_cachep, GFP_TRACE);
2277 	if (!file)
2278 		return NULL;
2279 
2280 	file->event_call = call;
2281 	file->tr = tr;
2282 	atomic_set(&file->sm_ref, 0);
2283 	atomic_set(&file->tm_ref, 0);
2284 	INIT_LIST_HEAD(&file->triggers);
2285 	list_add(&file->list, &tr->events);
2286 
2287 	return file;
2288 }
2289 
2290 /* Add an event to a trace directory */
2291 static int
2292 __trace_add_new_event(struct trace_event_call *call, struct trace_array *tr)
2293 {
2294 	struct trace_event_file *file;
2295 
2296 	file = trace_create_new_event(call, tr);
2297 	if (!file)
2298 		return -ENOMEM;
2299 
2300 	return event_create_dir(tr->event_dir, file);
2301 }
2302 
2303 /*
2304  * Just create a decriptor for early init. A descriptor is required
2305  * for enabling events at boot. We want to enable events before
2306  * the filesystem is initialized.
2307  */
2308 static __init int
2309 __trace_early_add_new_event(struct trace_event_call *call,
2310 			    struct trace_array *tr)
2311 {
2312 	struct trace_event_file *file;
2313 
2314 	file = trace_create_new_event(call, tr);
2315 	if (!file)
2316 		return -ENOMEM;
2317 
2318 	return 0;
2319 }
2320 
2321 struct ftrace_module_file_ops;
2322 static void __add_event_to_tracers(struct trace_event_call *call);
2323 
2324 /* Add an additional event_call dynamically */
2325 int trace_add_event_call(struct trace_event_call *call)
2326 {
2327 	int ret;
2328 	lockdep_assert_held(&event_mutex);
2329 
2330 	mutex_lock(&trace_types_lock);
2331 
2332 	ret = __register_event(call, NULL);
2333 	if (ret >= 0)
2334 		__add_event_to_tracers(call);
2335 
2336 	mutex_unlock(&trace_types_lock);
2337 	return ret;
2338 }
2339 
2340 /*
2341  * Must be called under locking of trace_types_lock, event_mutex and
2342  * trace_event_sem.
2343  */
2344 static void __trace_remove_event_call(struct trace_event_call *call)
2345 {
2346 	event_remove(call);
2347 	trace_destroy_fields(call);
2348 	free_event_filter(call->filter);
2349 	call->filter = NULL;
2350 }
2351 
2352 static int probe_remove_event_call(struct trace_event_call *call)
2353 {
2354 	struct trace_array *tr;
2355 	struct trace_event_file *file;
2356 
2357 #ifdef CONFIG_PERF_EVENTS
2358 	if (call->perf_refcount)
2359 		return -EBUSY;
2360 #endif
2361 	do_for_each_event_file(tr, file) {
2362 		if (file->event_call != call)
2363 			continue;
2364 		/*
2365 		 * We can't rely on ftrace_event_enable_disable(enable => 0)
2366 		 * we are going to do, EVENT_FILE_FL_SOFT_MODE can suppress
2367 		 * TRACE_REG_UNREGISTER.
2368 		 */
2369 		if (file->flags & EVENT_FILE_FL_ENABLED)
2370 			return -EBUSY;
2371 		/*
2372 		 * The do_for_each_event_file_safe() is
2373 		 * a double loop. After finding the call for this
2374 		 * trace_array, we use break to jump to the next
2375 		 * trace_array.
2376 		 */
2377 		break;
2378 	} while_for_each_event_file();
2379 
2380 	__trace_remove_event_call(call);
2381 
2382 	return 0;
2383 }
2384 
2385 /* Remove an event_call */
2386 int trace_remove_event_call(struct trace_event_call *call)
2387 {
2388 	int ret;
2389 
2390 	lockdep_assert_held(&event_mutex);
2391 
2392 	mutex_lock(&trace_types_lock);
2393 	down_write(&trace_event_sem);
2394 	ret = probe_remove_event_call(call);
2395 	up_write(&trace_event_sem);
2396 	mutex_unlock(&trace_types_lock);
2397 
2398 	return ret;
2399 }
2400 
2401 #define for_each_event(event, start, end)			\
2402 	for (event = start;					\
2403 	     (unsigned long)event < (unsigned long)end;		\
2404 	     event++)
2405 
2406 #ifdef CONFIG_MODULES
2407 
2408 static void trace_module_add_events(struct module *mod)
2409 {
2410 	struct trace_event_call **call, **start, **end;
2411 
2412 	if (!mod->num_trace_events)
2413 		return;
2414 
2415 	/* Don't add infrastructure for mods without tracepoints */
2416 	if (trace_module_has_bad_taint(mod)) {
2417 		pr_err("%s: module has bad taint, not creating trace events\n",
2418 		       mod->name);
2419 		return;
2420 	}
2421 
2422 	start = mod->trace_events;
2423 	end = mod->trace_events + mod->num_trace_events;
2424 
2425 	for_each_event(call, start, end) {
2426 		__register_event(*call, mod);
2427 		__add_event_to_tracers(*call);
2428 	}
2429 }
2430 
2431 static void trace_module_remove_events(struct module *mod)
2432 {
2433 	struct trace_event_call *call, *p;
2434 
2435 	down_write(&trace_event_sem);
2436 	list_for_each_entry_safe(call, p, &ftrace_events, list) {
2437 		if (call->mod == mod)
2438 			__trace_remove_event_call(call);
2439 	}
2440 	up_write(&trace_event_sem);
2441 
2442 	/*
2443 	 * It is safest to reset the ring buffer if the module being unloaded
2444 	 * registered any events that were used. The only worry is if
2445 	 * a new module gets loaded, and takes on the same id as the events
2446 	 * of this module. When printing out the buffer, traced events left
2447 	 * over from this module may be passed to the new module events and
2448 	 * unexpected results may occur.
2449 	 */
2450 	tracing_reset_all_online_cpus();
2451 }
2452 
2453 static int trace_module_notify(struct notifier_block *self,
2454 			       unsigned long val, void *data)
2455 {
2456 	struct module *mod = data;
2457 
2458 	mutex_lock(&event_mutex);
2459 	mutex_lock(&trace_types_lock);
2460 	switch (val) {
2461 	case MODULE_STATE_COMING:
2462 		trace_module_add_events(mod);
2463 		break;
2464 	case MODULE_STATE_GOING:
2465 		trace_module_remove_events(mod);
2466 		break;
2467 	}
2468 	mutex_unlock(&trace_types_lock);
2469 	mutex_unlock(&event_mutex);
2470 
2471 	return 0;
2472 }
2473 
2474 static struct notifier_block trace_module_nb = {
2475 	.notifier_call = trace_module_notify,
2476 	.priority = 1, /* higher than trace.c module notify */
2477 };
2478 #endif /* CONFIG_MODULES */
2479 
2480 /* Create a new event directory structure for a trace directory. */
2481 static void
2482 __trace_add_event_dirs(struct trace_array *tr)
2483 {
2484 	struct trace_event_call *call;
2485 	int ret;
2486 
2487 	list_for_each_entry(call, &ftrace_events, list) {
2488 		ret = __trace_add_new_event(call, tr);
2489 		if (ret < 0)
2490 			pr_warn("Could not create directory for event %s\n",
2491 				trace_event_name(call));
2492 	}
2493 }
2494 
2495 /* Returns any file that matches the system and event */
2496 struct trace_event_file *
2497 __find_event_file(struct trace_array *tr, const char *system, const char *event)
2498 {
2499 	struct trace_event_file *file;
2500 	struct trace_event_call *call;
2501 	const char *name;
2502 
2503 	list_for_each_entry(file, &tr->events, list) {
2504 
2505 		call = file->event_call;
2506 		name = trace_event_name(call);
2507 
2508 		if (!name || !call->class)
2509 			continue;
2510 
2511 		if (strcmp(event, name) == 0 &&
2512 		    strcmp(system, call->class->system) == 0)
2513 			return file;
2514 	}
2515 	return NULL;
2516 }
2517 
2518 /* Returns valid trace event files that match system and event */
2519 struct trace_event_file *
2520 find_event_file(struct trace_array *tr, const char *system, const char *event)
2521 {
2522 	struct trace_event_file *file;
2523 
2524 	file = __find_event_file(tr, system, event);
2525 	if (!file || !file->event_call->class->reg ||
2526 	    file->event_call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)
2527 		return NULL;
2528 
2529 	return file;
2530 }
2531 
2532 #ifdef CONFIG_DYNAMIC_FTRACE
2533 
2534 /* Avoid typos */
2535 #define ENABLE_EVENT_STR	"enable_event"
2536 #define DISABLE_EVENT_STR	"disable_event"
2537 
2538 struct event_probe_data {
2539 	struct trace_event_file	*file;
2540 	unsigned long			count;
2541 	int				ref;
2542 	bool				enable;
2543 };
2544 
2545 static void update_event_probe(struct event_probe_data *data)
2546 {
2547 	if (data->enable)
2548 		clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &data->file->flags);
2549 	else
2550 		set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &data->file->flags);
2551 }
2552 
2553 static void
2554 event_enable_probe(unsigned long ip, unsigned long parent_ip,
2555 		   struct trace_array *tr, struct ftrace_probe_ops *ops,
2556 		   void *data)
2557 {
2558 	struct ftrace_func_mapper *mapper = data;
2559 	struct event_probe_data *edata;
2560 	void **pdata;
2561 
2562 	pdata = ftrace_func_mapper_find_ip(mapper, ip);
2563 	if (!pdata || !*pdata)
2564 		return;
2565 
2566 	edata = *pdata;
2567 	update_event_probe(edata);
2568 }
2569 
2570 static void
2571 event_enable_count_probe(unsigned long ip, unsigned long parent_ip,
2572 			 struct trace_array *tr, struct ftrace_probe_ops *ops,
2573 			 void *data)
2574 {
2575 	struct ftrace_func_mapper *mapper = data;
2576 	struct event_probe_data *edata;
2577 	void **pdata;
2578 
2579 	pdata = ftrace_func_mapper_find_ip(mapper, ip);
2580 	if (!pdata || !*pdata)
2581 		return;
2582 
2583 	edata = *pdata;
2584 
2585 	if (!edata->count)
2586 		return;
2587 
2588 	/* Skip if the event is in a state we want to switch to */
2589 	if (edata->enable == !(edata->file->flags & EVENT_FILE_FL_SOFT_DISABLED))
2590 		return;
2591 
2592 	if (edata->count != -1)
2593 		(edata->count)--;
2594 
2595 	update_event_probe(edata);
2596 }
2597 
2598 static int
2599 event_enable_print(struct seq_file *m, unsigned long ip,
2600 		   struct ftrace_probe_ops *ops, void *data)
2601 {
2602 	struct ftrace_func_mapper *mapper = data;
2603 	struct event_probe_data *edata;
2604 	void **pdata;
2605 
2606 	pdata = ftrace_func_mapper_find_ip(mapper, ip);
2607 
2608 	if (WARN_ON_ONCE(!pdata || !*pdata))
2609 		return 0;
2610 
2611 	edata = *pdata;
2612 
2613 	seq_printf(m, "%ps:", (void *)ip);
2614 
2615 	seq_printf(m, "%s:%s:%s",
2616 		   edata->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR,
2617 		   edata->file->event_call->class->system,
2618 		   trace_event_name(edata->file->event_call));
2619 
2620 	if (edata->count == -1)
2621 		seq_puts(m, ":unlimited\n");
2622 	else
2623 		seq_printf(m, ":count=%ld\n", edata->count);
2624 
2625 	return 0;
2626 }
2627 
2628 static int
2629 event_enable_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
2630 		  unsigned long ip, void *init_data, void **data)
2631 {
2632 	struct ftrace_func_mapper *mapper = *data;
2633 	struct event_probe_data *edata = init_data;
2634 	int ret;
2635 
2636 	if (!mapper) {
2637 		mapper = allocate_ftrace_func_mapper();
2638 		if (!mapper)
2639 			return -ENODEV;
2640 		*data = mapper;
2641 	}
2642 
2643 	ret = ftrace_func_mapper_add_ip(mapper, ip, edata);
2644 	if (ret < 0)
2645 		return ret;
2646 
2647 	edata->ref++;
2648 
2649 	return 0;
2650 }
2651 
2652 static int free_probe_data(void *data)
2653 {
2654 	struct event_probe_data *edata = data;
2655 
2656 	edata->ref--;
2657 	if (!edata->ref) {
2658 		/* Remove the SOFT_MODE flag */
2659 		__ftrace_event_enable_disable(edata->file, 0, 1);
2660 		module_put(edata->file->event_call->mod);
2661 		kfree(edata);
2662 	}
2663 	return 0;
2664 }
2665 
2666 static void
2667 event_enable_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
2668 		  unsigned long ip, void *data)
2669 {
2670 	struct ftrace_func_mapper *mapper = data;
2671 	struct event_probe_data *edata;
2672 
2673 	if (!ip) {
2674 		if (!mapper)
2675 			return;
2676 		free_ftrace_func_mapper(mapper, free_probe_data);
2677 		return;
2678 	}
2679 
2680 	edata = ftrace_func_mapper_remove_ip(mapper, ip);
2681 
2682 	if (WARN_ON_ONCE(!edata))
2683 		return;
2684 
2685 	if (WARN_ON_ONCE(edata->ref <= 0))
2686 		return;
2687 
2688 	free_probe_data(edata);
2689 }
2690 
2691 static struct ftrace_probe_ops event_enable_probe_ops = {
2692 	.func			= event_enable_probe,
2693 	.print			= event_enable_print,
2694 	.init			= event_enable_init,
2695 	.free			= event_enable_free,
2696 };
2697 
2698 static struct ftrace_probe_ops event_enable_count_probe_ops = {
2699 	.func			= event_enable_count_probe,
2700 	.print			= event_enable_print,
2701 	.init			= event_enable_init,
2702 	.free			= event_enable_free,
2703 };
2704 
2705 static struct ftrace_probe_ops event_disable_probe_ops = {
2706 	.func			= event_enable_probe,
2707 	.print			= event_enable_print,
2708 	.init			= event_enable_init,
2709 	.free			= event_enable_free,
2710 };
2711 
2712 static struct ftrace_probe_ops event_disable_count_probe_ops = {
2713 	.func			= event_enable_count_probe,
2714 	.print			= event_enable_print,
2715 	.init			= event_enable_init,
2716 	.free			= event_enable_free,
2717 };
2718 
2719 static int
2720 event_enable_func(struct trace_array *tr, struct ftrace_hash *hash,
2721 		  char *glob, char *cmd, char *param, int enabled)
2722 {
2723 	struct trace_event_file *file;
2724 	struct ftrace_probe_ops *ops;
2725 	struct event_probe_data *data;
2726 	const char *system;
2727 	const char *event;
2728 	char *number;
2729 	bool enable;
2730 	int ret;
2731 
2732 	if (!tr)
2733 		return -ENODEV;
2734 
2735 	/* hash funcs only work with set_ftrace_filter */
2736 	if (!enabled || !param)
2737 		return -EINVAL;
2738 
2739 	system = strsep(&param, ":");
2740 	if (!param)
2741 		return -EINVAL;
2742 
2743 	event = strsep(&param, ":");
2744 
2745 	mutex_lock(&event_mutex);
2746 
2747 	ret = -EINVAL;
2748 	file = find_event_file(tr, system, event);
2749 	if (!file)
2750 		goto out;
2751 
2752 	enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
2753 
2754 	if (enable)
2755 		ops = param ? &event_enable_count_probe_ops : &event_enable_probe_ops;
2756 	else
2757 		ops = param ? &event_disable_count_probe_ops : &event_disable_probe_ops;
2758 
2759 	if (glob[0] == '!') {
2760 		ret = unregister_ftrace_function_probe_func(glob+1, tr, ops);
2761 		goto out;
2762 	}
2763 
2764 	ret = -ENOMEM;
2765 
2766 	data = kzalloc(sizeof(*data), GFP_KERNEL);
2767 	if (!data)
2768 		goto out;
2769 
2770 	data->enable = enable;
2771 	data->count = -1;
2772 	data->file = file;
2773 
2774 	if (!param)
2775 		goto out_reg;
2776 
2777 	number = strsep(&param, ":");
2778 
2779 	ret = -EINVAL;
2780 	if (!strlen(number))
2781 		goto out_free;
2782 
2783 	/*
2784 	 * We use the callback data field (which is a pointer)
2785 	 * as our counter.
2786 	 */
2787 	ret = kstrtoul(number, 0, &data->count);
2788 	if (ret)
2789 		goto out_free;
2790 
2791  out_reg:
2792 	/* Don't let event modules unload while probe registered */
2793 	ret = try_module_get(file->event_call->mod);
2794 	if (!ret) {
2795 		ret = -EBUSY;
2796 		goto out_free;
2797 	}
2798 
2799 	ret = __ftrace_event_enable_disable(file, 1, 1);
2800 	if (ret < 0)
2801 		goto out_put;
2802 
2803 	ret = register_ftrace_function_probe(glob, tr, ops, data);
2804 	/*
2805 	 * The above returns on success the # of functions enabled,
2806 	 * but if it didn't find any functions it returns zero.
2807 	 * Consider no functions a failure too.
2808 	 */
2809 	if (!ret) {
2810 		ret = -ENOENT;
2811 		goto out_disable;
2812 	} else if (ret < 0)
2813 		goto out_disable;
2814 	/* Just return zero, not the number of enabled functions */
2815 	ret = 0;
2816  out:
2817 	mutex_unlock(&event_mutex);
2818 	return ret;
2819 
2820  out_disable:
2821 	__ftrace_event_enable_disable(file, 0, 1);
2822  out_put:
2823 	module_put(file->event_call->mod);
2824  out_free:
2825 	kfree(data);
2826 	goto out;
2827 }
2828 
2829 static struct ftrace_func_command event_enable_cmd = {
2830 	.name			= ENABLE_EVENT_STR,
2831 	.func			= event_enable_func,
2832 };
2833 
2834 static struct ftrace_func_command event_disable_cmd = {
2835 	.name			= DISABLE_EVENT_STR,
2836 	.func			= event_enable_func,
2837 };
2838 
2839 static __init int register_event_cmds(void)
2840 {
2841 	int ret;
2842 
2843 	ret = register_ftrace_command(&event_enable_cmd);
2844 	if (WARN_ON(ret < 0))
2845 		return ret;
2846 	ret = register_ftrace_command(&event_disable_cmd);
2847 	if (WARN_ON(ret < 0))
2848 		unregister_ftrace_command(&event_enable_cmd);
2849 	return ret;
2850 }
2851 #else
2852 static inline int register_event_cmds(void) { return 0; }
2853 #endif /* CONFIG_DYNAMIC_FTRACE */
2854 
2855 /*
2856  * The top level array has already had its trace_event_file
2857  * descriptors created in order to allow for early events to
2858  * be recorded. This function is called after the tracefs has been
2859  * initialized, and we now have to create the files associated
2860  * to the events.
2861  */
2862 static __init void
2863 __trace_early_add_event_dirs(struct trace_array *tr)
2864 {
2865 	struct trace_event_file *file;
2866 	int ret;
2867 
2868 
2869 	list_for_each_entry(file, &tr->events, list) {
2870 		ret = event_create_dir(tr->event_dir, file);
2871 		if (ret < 0)
2872 			pr_warn("Could not create directory for event %s\n",
2873 				trace_event_name(file->event_call));
2874 	}
2875 }
2876 
2877 /*
2878  * For early boot up, the top trace array requires to have
2879  * a list of events that can be enabled. This must be done before
2880  * the filesystem is set up in order to allow events to be traced
2881  * early.
2882  */
2883 static __init void
2884 __trace_early_add_events(struct trace_array *tr)
2885 {
2886 	struct trace_event_call *call;
2887 	int ret;
2888 
2889 	list_for_each_entry(call, &ftrace_events, list) {
2890 		/* Early boot up should not have any modules loaded */
2891 		if (WARN_ON_ONCE(call->mod))
2892 			continue;
2893 
2894 		ret = __trace_early_add_new_event(call, tr);
2895 		if (ret < 0)
2896 			pr_warn("Could not create early event %s\n",
2897 				trace_event_name(call));
2898 	}
2899 }
2900 
2901 /* Remove the event directory structure for a trace directory. */
2902 static void
2903 __trace_remove_event_dirs(struct trace_array *tr)
2904 {
2905 	struct trace_event_file *file, *next;
2906 
2907 	list_for_each_entry_safe(file, next, &tr->events, list)
2908 		remove_event_file_dir(file);
2909 }
2910 
2911 static void __add_event_to_tracers(struct trace_event_call *call)
2912 {
2913 	struct trace_array *tr;
2914 
2915 	list_for_each_entry(tr, &ftrace_trace_arrays, list)
2916 		__trace_add_new_event(call, tr);
2917 }
2918 
2919 extern struct trace_event_call *__start_ftrace_events[];
2920 extern struct trace_event_call *__stop_ftrace_events[];
2921 
2922 static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata;
2923 
2924 static __init int setup_trace_event(char *str)
2925 {
2926 	strlcpy(bootup_event_buf, str, COMMAND_LINE_SIZE);
2927 	ring_buffer_expanded = true;
2928 	tracing_selftest_disabled = true;
2929 
2930 	return 1;
2931 }
2932 __setup("trace_event=", setup_trace_event);
2933 
2934 /* Expects to have event_mutex held when called */
2935 static int
2936 create_event_toplevel_files(struct dentry *parent, struct trace_array *tr)
2937 {
2938 	struct dentry *d_events;
2939 	struct dentry *entry;
2940 
2941 	entry = tracefs_create_file("set_event", 0644, parent,
2942 				    tr, &ftrace_set_event_fops);
2943 	if (!entry) {
2944 		pr_warn("Could not create tracefs 'set_event' entry\n");
2945 		return -ENOMEM;
2946 	}
2947 
2948 	d_events = tracefs_create_dir("events", parent);
2949 	if (!d_events) {
2950 		pr_warn("Could not create tracefs 'events' directory\n");
2951 		return -ENOMEM;
2952 	}
2953 
2954 	entry = trace_create_file("enable", 0644, d_events,
2955 				  tr, &ftrace_tr_enable_fops);
2956 	if (!entry) {
2957 		pr_warn("Could not create tracefs 'enable' entry\n");
2958 		return -ENOMEM;
2959 	}
2960 
2961 	/* There are not as crucial, just warn if they are not created */
2962 
2963 	entry = tracefs_create_file("set_event_pid", 0644, parent,
2964 				    tr, &ftrace_set_event_pid_fops);
2965 	if (!entry)
2966 		pr_warn("Could not create tracefs 'set_event_pid' entry\n");
2967 
2968 	/* ring buffer internal formats */
2969 	entry = trace_create_file("header_page", 0444, d_events,
2970 				  ring_buffer_print_page_header,
2971 				  &ftrace_show_header_fops);
2972 	if (!entry)
2973 		pr_warn("Could not create tracefs 'header_page' entry\n");
2974 
2975 	entry = trace_create_file("header_event", 0444, d_events,
2976 				  ring_buffer_print_entry_header,
2977 				  &ftrace_show_header_fops);
2978 	if (!entry)
2979 		pr_warn("Could not create tracefs 'header_event' entry\n");
2980 
2981 	tr->event_dir = d_events;
2982 
2983 	return 0;
2984 }
2985 
2986 /**
2987  * event_trace_add_tracer - add a instance of a trace_array to events
2988  * @parent: The parent dentry to place the files/directories for events in
2989  * @tr: The trace array associated with these events
2990  *
2991  * When a new instance is created, it needs to set up its events
2992  * directory, as well as other files associated with events. It also
2993  * creates the event hierachry in the @parent/events directory.
2994  *
2995  * Returns 0 on success.
2996  *
2997  * Must be called with event_mutex held.
2998  */
2999 int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr)
3000 {
3001 	int ret;
3002 
3003 	lockdep_assert_held(&event_mutex);
3004 
3005 	ret = create_event_toplevel_files(parent, tr);
3006 	if (ret)
3007 		goto out;
3008 
3009 	down_write(&trace_event_sem);
3010 	__trace_add_event_dirs(tr);
3011 	up_write(&trace_event_sem);
3012 
3013  out:
3014 	return ret;
3015 }
3016 
3017 /*
3018  * The top trace array already had its file descriptors created.
3019  * Now the files themselves need to be created.
3020  */
3021 static __init int
3022 early_event_add_tracer(struct dentry *parent, struct trace_array *tr)
3023 {
3024 	int ret;
3025 
3026 	mutex_lock(&event_mutex);
3027 
3028 	ret = create_event_toplevel_files(parent, tr);
3029 	if (ret)
3030 		goto out_unlock;
3031 
3032 	down_write(&trace_event_sem);
3033 	__trace_early_add_event_dirs(tr);
3034 	up_write(&trace_event_sem);
3035 
3036  out_unlock:
3037 	mutex_unlock(&event_mutex);
3038 
3039 	return ret;
3040 }
3041 
3042 /* Must be called with event_mutex held */
3043 int event_trace_del_tracer(struct trace_array *tr)
3044 {
3045 	lockdep_assert_held(&event_mutex);
3046 
3047 	/* Disable any event triggers and associated soft-disabled events */
3048 	clear_event_triggers(tr);
3049 
3050 	/* Clear the pid list */
3051 	__ftrace_clear_event_pids(tr);
3052 
3053 	/* Disable any running events */
3054 	__ftrace_set_clr_event_nolock(tr, NULL, NULL, NULL, 0);
3055 
3056 	/* Make sure no more events are being executed */
3057 	tracepoint_synchronize_unregister();
3058 
3059 	down_write(&trace_event_sem);
3060 	__trace_remove_event_dirs(tr);
3061 	tracefs_remove_recursive(tr->event_dir);
3062 	up_write(&trace_event_sem);
3063 
3064 	tr->event_dir = NULL;
3065 
3066 	return 0;
3067 }
3068 
3069 static __init int event_trace_memsetup(void)
3070 {
3071 	field_cachep = KMEM_CACHE(ftrace_event_field, SLAB_PANIC);
3072 	file_cachep = KMEM_CACHE(trace_event_file, SLAB_PANIC);
3073 	return 0;
3074 }
3075 
3076 static __init void
3077 early_enable_events(struct trace_array *tr, bool disable_first)
3078 {
3079 	char *buf = bootup_event_buf;
3080 	char *token;
3081 	int ret;
3082 
3083 	while (true) {
3084 		token = strsep(&buf, ",");
3085 
3086 		if (!token)
3087 			break;
3088 
3089 		if (*token) {
3090 			/* Restarting syscalls requires that we stop them first */
3091 			if (disable_first)
3092 				ftrace_set_clr_event(tr, token, 0);
3093 
3094 			ret = ftrace_set_clr_event(tr, token, 1);
3095 			if (ret)
3096 				pr_warn("Failed to enable trace event: %s\n", token);
3097 		}
3098 
3099 		/* Put back the comma to allow this to be called again */
3100 		if (buf)
3101 			*(buf - 1) = ',';
3102 	}
3103 }
3104 
3105 static __init int event_trace_enable(void)
3106 {
3107 	struct trace_array *tr = top_trace_array();
3108 	struct trace_event_call **iter, *call;
3109 	int ret;
3110 
3111 	if (!tr)
3112 		return -ENODEV;
3113 
3114 	for_each_event(iter, __start_ftrace_events, __stop_ftrace_events) {
3115 
3116 		call = *iter;
3117 		ret = event_init(call);
3118 		if (!ret)
3119 			list_add(&call->list, &ftrace_events);
3120 	}
3121 
3122 	/*
3123 	 * We need the top trace array to have a working set of trace
3124 	 * points at early init, before the debug files and directories
3125 	 * are created. Create the file entries now, and attach them
3126 	 * to the actual file dentries later.
3127 	 */
3128 	__trace_early_add_events(tr);
3129 
3130 	early_enable_events(tr, false);
3131 
3132 	trace_printk_start_comm();
3133 
3134 	register_event_cmds();
3135 
3136 	register_trigger_cmds();
3137 
3138 	return 0;
3139 }
3140 
3141 /*
3142  * event_trace_enable() is called from trace_event_init() first to
3143  * initialize events and perhaps start any events that are on the
3144  * command line. Unfortunately, there are some events that will not
3145  * start this early, like the system call tracepoints that need
3146  * to set the TIF_SYSCALL_TRACEPOINT flag of pid 1. But event_trace_enable()
3147  * is called before pid 1 starts, and this flag is never set, making
3148  * the syscall tracepoint never get reached, but the event is enabled
3149  * regardless (and not doing anything).
3150  */
3151 static __init int event_trace_enable_again(void)
3152 {
3153 	struct trace_array *tr;
3154 
3155 	tr = top_trace_array();
3156 	if (!tr)
3157 		return -ENODEV;
3158 
3159 	early_enable_events(tr, true);
3160 
3161 	return 0;
3162 }
3163 
3164 early_initcall(event_trace_enable_again);
3165 
3166 __init int event_trace_init(void)
3167 {
3168 	struct trace_array *tr;
3169 	struct dentry *d_tracer;
3170 	struct dentry *entry;
3171 	int ret;
3172 
3173 	tr = top_trace_array();
3174 	if (!tr)
3175 		return -ENODEV;
3176 
3177 	d_tracer = tracing_init_dentry();
3178 	if (IS_ERR(d_tracer))
3179 		return 0;
3180 
3181 	entry = tracefs_create_file("available_events", 0444, d_tracer,
3182 				    tr, &ftrace_avail_fops);
3183 	if (!entry)
3184 		pr_warn("Could not create tracefs 'available_events' entry\n");
3185 
3186 	if (trace_define_generic_fields())
3187 		pr_warn("tracing: Failed to allocated generic fields");
3188 
3189 	if (trace_define_common_fields())
3190 		pr_warn("tracing: Failed to allocate common fields");
3191 
3192 	ret = early_event_add_tracer(d_tracer, tr);
3193 	if (ret)
3194 		return ret;
3195 
3196 #ifdef CONFIG_MODULES
3197 	ret = register_module_notifier(&trace_module_nb);
3198 	if (ret)
3199 		pr_warn("Failed to register trace events module notifier\n");
3200 #endif
3201 	return 0;
3202 }
3203 
3204 void __init trace_event_init(void)
3205 {
3206 	event_trace_memsetup();
3207 	init_ftrace_syscalls();
3208 	event_trace_enable();
3209 }
3210 
3211 #ifdef CONFIG_EVENT_TRACE_STARTUP_TEST
3212 
3213 static DEFINE_SPINLOCK(test_spinlock);
3214 static DEFINE_SPINLOCK(test_spinlock_irq);
3215 static DEFINE_MUTEX(test_mutex);
3216 
3217 static __init void test_work(struct work_struct *dummy)
3218 {
3219 	spin_lock(&test_spinlock);
3220 	spin_lock_irq(&test_spinlock_irq);
3221 	udelay(1);
3222 	spin_unlock_irq(&test_spinlock_irq);
3223 	spin_unlock(&test_spinlock);
3224 
3225 	mutex_lock(&test_mutex);
3226 	msleep(1);
3227 	mutex_unlock(&test_mutex);
3228 }
3229 
3230 static __init int event_test_thread(void *unused)
3231 {
3232 	void *test_malloc;
3233 
3234 	test_malloc = kmalloc(1234, GFP_KERNEL);
3235 	if (!test_malloc)
3236 		pr_info("failed to kmalloc\n");
3237 
3238 	schedule_on_each_cpu(test_work);
3239 
3240 	kfree(test_malloc);
3241 
3242 	set_current_state(TASK_INTERRUPTIBLE);
3243 	while (!kthread_should_stop()) {
3244 		schedule();
3245 		set_current_state(TASK_INTERRUPTIBLE);
3246 	}
3247 	__set_current_state(TASK_RUNNING);
3248 
3249 	return 0;
3250 }
3251 
3252 /*
3253  * Do various things that may trigger events.
3254  */
3255 static __init void event_test_stuff(void)
3256 {
3257 	struct task_struct *test_thread;
3258 
3259 	test_thread = kthread_run(event_test_thread, NULL, "test-events");
3260 	msleep(1);
3261 	kthread_stop(test_thread);
3262 }
3263 
3264 /*
3265  * For every trace event defined, we will test each trace point separately,
3266  * and then by groups, and finally all trace points.
3267  */
3268 static __init void event_trace_self_tests(void)
3269 {
3270 	struct trace_subsystem_dir *dir;
3271 	struct trace_event_file *file;
3272 	struct trace_event_call *call;
3273 	struct event_subsystem *system;
3274 	struct trace_array *tr;
3275 	int ret;
3276 
3277 	tr = top_trace_array();
3278 	if (!tr)
3279 		return;
3280 
3281 	pr_info("Running tests on trace events:\n");
3282 
3283 	list_for_each_entry(file, &tr->events, list) {
3284 
3285 		call = file->event_call;
3286 
3287 		/* Only test those that have a probe */
3288 		if (!call->class || !call->class->probe)
3289 			continue;
3290 
3291 /*
3292  * Testing syscall events here is pretty useless, but
3293  * we still do it if configured. But this is time consuming.
3294  * What we really need is a user thread to perform the
3295  * syscalls as we test.
3296  */
3297 #ifndef CONFIG_EVENT_TRACE_TEST_SYSCALLS
3298 		if (call->class->system &&
3299 		    strcmp(call->class->system, "syscalls") == 0)
3300 			continue;
3301 #endif
3302 
3303 		pr_info("Testing event %s: ", trace_event_name(call));
3304 
3305 		/*
3306 		 * If an event is already enabled, someone is using
3307 		 * it and the self test should not be on.
3308 		 */
3309 		if (file->flags & EVENT_FILE_FL_ENABLED) {
3310 			pr_warn("Enabled event during self test!\n");
3311 			WARN_ON_ONCE(1);
3312 			continue;
3313 		}
3314 
3315 		ftrace_event_enable_disable(file, 1);
3316 		event_test_stuff();
3317 		ftrace_event_enable_disable(file, 0);
3318 
3319 		pr_cont("OK\n");
3320 	}
3321 
3322 	/* Now test at the sub system level */
3323 
3324 	pr_info("Running tests on trace event systems:\n");
3325 
3326 	list_for_each_entry(dir, &tr->systems, list) {
3327 
3328 		system = dir->subsystem;
3329 
3330 		/* the ftrace system is special, skip it */
3331 		if (strcmp(system->name, "ftrace") == 0)
3332 			continue;
3333 
3334 		pr_info("Testing event system %s: ", system->name);
3335 
3336 		ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 1);
3337 		if (WARN_ON_ONCE(ret)) {
3338 			pr_warn("error enabling system %s\n",
3339 				system->name);
3340 			continue;
3341 		}
3342 
3343 		event_test_stuff();
3344 
3345 		ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 0);
3346 		if (WARN_ON_ONCE(ret)) {
3347 			pr_warn("error disabling system %s\n",
3348 				system->name);
3349 			continue;
3350 		}
3351 
3352 		pr_cont("OK\n");
3353 	}
3354 
3355 	/* Test with all events enabled */
3356 
3357 	pr_info("Running tests on all trace events:\n");
3358 	pr_info("Testing all events: ");
3359 
3360 	ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 1);
3361 	if (WARN_ON_ONCE(ret)) {
3362 		pr_warn("error enabling all events\n");
3363 		return;
3364 	}
3365 
3366 	event_test_stuff();
3367 
3368 	/* reset sysname */
3369 	ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 0);
3370 	if (WARN_ON_ONCE(ret)) {
3371 		pr_warn("error disabling all events\n");
3372 		return;
3373 	}
3374 
3375 	pr_cont("OK\n");
3376 }
3377 
3378 #ifdef CONFIG_FUNCTION_TRACER
3379 
3380 static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable);
3381 
3382 static struct trace_event_file event_trace_file __initdata;
3383 
3384 static void __init
3385 function_test_events_call(unsigned long ip, unsigned long parent_ip,
3386 			  struct ftrace_ops *op, struct pt_regs *pt_regs)
3387 {
3388 	struct ring_buffer_event *event;
3389 	struct ring_buffer *buffer;
3390 	struct ftrace_entry *entry;
3391 	unsigned long flags;
3392 	long disabled;
3393 	int cpu;
3394 	int pc;
3395 
3396 	pc = preempt_count();
3397 	preempt_disable_notrace();
3398 	cpu = raw_smp_processor_id();
3399 	disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu));
3400 
3401 	if (disabled != 1)
3402 		goto out;
3403 
3404 	local_save_flags(flags);
3405 
3406 	event = trace_event_buffer_lock_reserve(&buffer, &event_trace_file,
3407 						TRACE_FN, sizeof(*entry),
3408 						flags, pc);
3409 	if (!event)
3410 		goto out;
3411 	entry	= ring_buffer_event_data(event);
3412 	entry->ip			= ip;
3413 	entry->parent_ip		= parent_ip;
3414 
3415 	event_trigger_unlock_commit(&event_trace_file, buffer, event,
3416 				    entry, flags, pc);
3417  out:
3418 	atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
3419 	preempt_enable_notrace();
3420 }
3421 
3422 static struct ftrace_ops trace_ops __initdata  =
3423 {
3424 	.func = function_test_events_call,
3425 	.flags = FTRACE_OPS_FL_RECURSION_SAFE,
3426 };
3427 
3428 static __init void event_trace_self_test_with_function(void)
3429 {
3430 	int ret;
3431 
3432 	event_trace_file.tr = top_trace_array();
3433 	if (WARN_ON(!event_trace_file.tr))
3434 		return;
3435 
3436 	ret = register_ftrace_function(&trace_ops);
3437 	if (WARN_ON(ret < 0)) {
3438 		pr_info("Failed to enable function tracer for event tests\n");
3439 		return;
3440 	}
3441 	pr_info("Running tests again, along with the function tracer\n");
3442 	event_trace_self_tests();
3443 	unregister_ftrace_function(&trace_ops);
3444 }
3445 #else
3446 static __init void event_trace_self_test_with_function(void)
3447 {
3448 }
3449 #endif
3450 
3451 static __init int event_trace_self_tests_init(void)
3452 {
3453 	if (!tracing_selftest_disabled) {
3454 		event_trace_self_tests();
3455 		event_trace_self_test_with_function();
3456 	}
3457 
3458 	return 0;
3459 }
3460 
3461 late_initcall(event_trace_self_tests_init);
3462 
3463 #endif
3464