xref: /linux/kernel/trace/trace_events_trigger.c (revision 250ab25391edeeab8462b68be42e4904506c409c)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * trace_events_trigger - trace event triggers
4  *
5  * Copyright (C) 2013 Tom Zanussi <tom.zanussi@linux.intel.com>
6  */
7 
8 #include <linux/security.h>
9 #include <linux/kthread.h>
10 #include <linux/module.h>
11 #include <linux/ctype.h>
12 #include <linux/mutex.h>
13 #include <linux/slab.h>
14 #include <linux/rculist.h>
15 
16 #include "trace.h"
17 
18 static LIST_HEAD(trigger_commands);
19 static DEFINE_MUTEX(trigger_cmd_mutex);
20 
21 static struct task_struct *trigger_kthread;
22 static struct llist_head trigger_data_free_list;
23 static DEFINE_MUTEX(trigger_data_kthread_mutex);
24 
25 static int trigger_kthread_fn(void *ignore);
26 
27 static void trigger_create_kthread_locked(void)
28 {
29 	lockdep_assert_held(&trigger_data_kthread_mutex);
30 
31 	if (!trigger_kthread) {
32 		struct task_struct *kthread;
33 
34 		kthread = kthread_create(trigger_kthread_fn, NULL,
35 					 "trigger_data_free");
36 		if (!IS_ERR(kthread))
37 			WRITE_ONCE(trigger_kthread, kthread);
38 	}
39 }
40 
41 static void trigger_data_free_queued_locked(void)
42 {
43 	struct event_trigger_data *data, *tmp;
44 	struct llist_node *llnodes;
45 
46 	lockdep_assert_held(&trigger_data_kthread_mutex);
47 
48 	llnodes = llist_del_all(&trigger_data_free_list);
49 	if (!llnodes)
50 		return;
51 
52 	tracepoint_synchronize_unregister();
53 
54 	llist_for_each_entry_safe(data, tmp, llnodes, llist)
55 		kfree(data);
56 }
57 
58 /* Bulk garbage collection of event_trigger_data elements */
59 static int trigger_kthread_fn(void *ignore)
60 {
61 	struct event_trigger_data *data, *tmp;
62 	struct llist_node *llnodes;
63 
64 	/* Once this task starts, it lives forever */
65 	for (;;) {
66 		set_current_state(TASK_INTERRUPTIBLE);
67 		if (llist_empty(&trigger_data_free_list))
68 			schedule();
69 
70 		__set_current_state(TASK_RUNNING);
71 
72 		llnodes = llist_del_all(&trigger_data_free_list);
73 
74 		/* make sure current triggers exit before free */
75 		tracepoint_synchronize_unregister();
76 
77 		llist_for_each_entry_safe(data, tmp, llnodes, llist)
78 			kfree(data);
79 	}
80 
81 	return 0;
82 }
83 
84 void trigger_data_free(struct event_trigger_data *data)
85 {
86 	if (!data)
87 		return;
88 
89 	if (data->cmd_ops->set_filter)
90 		data->cmd_ops->set_filter(NULL, data, NULL);
91 
92 	/*
93 	 * Boot-time trigger registration can fail before kthread creation
94 	 * works. Keep the deferred-free semantics during boot and let late
95 	 * init start the kthread to drain the list.
96 	 */
97 	if (system_state == SYSTEM_BOOTING && !trigger_kthread) {
98 		llist_add(&data->llist, &trigger_data_free_list);
99 		return;
100 	}
101 
102 	if (unlikely(!trigger_kthread)) {
103 		guard(mutex)(&trigger_data_kthread_mutex);
104 
105 		trigger_create_kthread_locked();
106 		/* Check again after taking mutex */
107 		if (!trigger_kthread) {
108 			llist_add(&data->llist, &trigger_data_free_list);
109 			/* Drain the queued frees synchronously if creation failed. */
110 			trigger_data_free_queued_locked();
111 			return;
112 		}
113 	}
114 
115 	llist_add(&data->llist, &trigger_data_free_list);
116 	wake_up_process(trigger_kthread);
117 }
118 
119 static int __init trigger_data_free_init(void)
120 {
121 	guard(mutex)(&trigger_data_kthread_mutex);
122 
123 	if (llist_empty(&trigger_data_free_list))
124 		return 0;
125 
126 	trigger_create_kthread_locked();
127 	if (trigger_kthread)
128 		wake_up_process(trigger_kthread);
129 	else
130 		trigger_data_free_queued_locked();
131 
132 	return 0;
133 }
134 late_initcall(trigger_data_free_init);
135 
136 static inline void data_ops_trigger(struct event_trigger_data *data,
137 				    struct trace_buffer *buffer,  void *rec,
138 				    struct ring_buffer_event *event)
139 {
140 	const struct event_command *cmd_ops = data->cmd_ops;
141 
142 	if (data->flags & EVENT_TRIGGER_FL_COUNT) {
143 		if (!cmd_ops->count_func(data, buffer, rec, event))
144 			return;
145 	}
146 
147 	cmd_ops->trigger(data, buffer, rec, event);
148 }
149 
150 /**
151  * event_triggers_call - Call triggers associated with a trace event
152  * @file: The trace_event_file associated with the event
153  * @buffer: The ring buffer that the event is being written to
154  * @rec: The trace entry for the event, NULL for unconditional invocation
155  * @event: The event meta data in the ring buffer
156  *
157  * For each trigger associated with an event, invoke the trigger
158  * function registered with the associated trigger command.  If rec is
159  * non-NULL, it means that the trigger requires further processing and
160  * shouldn't be unconditionally invoked.  If rec is non-NULL and the
161  * trigger has a filter associated with it, rec will checked against
162  * the filter and if the record matches the trigger will be invoked.
163  * If the trigger is a 'post_trigger', meaning it shouldn't be invoked
164  * in any case until the current event is written, the trigger
165  * function isn't invoked but the bit associated with the deferred
166  * trigger is set in the return value.
167  *
168  * Returns an enum event_trigger_type value containing a set bit for
169  * any trigger that should be deferred, ETT_NONE if nothing to defer.
170  *
171  * Called from tracepoint handlers (with rcu_read_lock_sched() held).
172  *
173  * Return: an enum event_trigger_type value containing a set bit for
174  * any trigger that should be deferred, ETT_NONE if nothing to defer.
175  */
176 enum event_trigger_type
177 event_triggers_call(struct trace_event_file *file,
178 		    struct trace_buffer *buffer, void *rec,
179 		    struct ring_buffer_event *event)
180 {
181 	struct event_trigger_data *data;
182 	enum event_trigger_type tt = ETT_NONE;
183 	struct event_filter *filter;
184 
185 	if (list_empty(&file->triggers))
186 		return tt;
187 
188 	list_for_each_entry_rcu(data, &file->triggers, list) {
189 		if (data->paused)
190 			continue;
191 		if (!rec) {
192 			data_ops_trigger(data, buffer, rec, event);
193 			continue;
194 		}
195 		filter = rcu_dereference_sched(data->filter);
196 		if (filter && !filter_match_preds(filter, rec))
197 			continue;
198 		if (event_command_post_trigger(data->cmd_ops)) {
199 			tt |= data->cmd_ops->trigger_type;
200 			continue;
201 		}
202 		data_ops_trigger(data, buffer, rec, event);
203 	}
204 	return tt;
205 }
206 EXPORT_SYMBOL_GPL(event_triggers_call);
207 
208 bool __trace_trigger_soft_disabled(struct trace_event_file *file)
209 {
210 	unsigned long eflags = file->flags;
211 
212 	if (eflags & EVENT_FILE_FL_TRIGGER_MODE)
213 		event_triggers_call(file, NULL, NULL, NULL);
214 	if (eflags & EVENT_FILE_FL_SOFT_DISABLED)
215 		return true;
216 	if (eflags & EVENT_FILE_FL_PID_FILTER)
217 		return trace_event_ignore_this_pid(file);
218 	return false;
219 }
220 EXPORT_SYMBOL_GPL(__trace_trigger_soft_disabled);
221 
222 /**
223  * event_triggers_post_call - Call 'post_triggers' for a trace event
224  * @file: The trace_event_file associated with the event
225  * @tt: enum event_trigger_type containing a set bit for each trigger to invoke
226  *
227  * For each trigger associated with an event, invoke the trigger
228  * function registered with the associated trigger command, if the
229  * corresponding bit is set in the tt enum passed into this function.
230  * See @event_triggers_call for details on how those bits are set.
231  *
232  * Called from tracepoint handlers (with rcu_read_lock_sched() held).
233  */
234 void
235 event_triggers_post_call(struct trace_event_file *file,
236 			 enum event_trigger_type tt)
237 {
238 	struct event_trigger_data *data;
239 
240 	list_for_each_entry_rcu(data, &file->triggers, list) {
241 		if (data->paused)
242 			continue;
243 		if (data->cmd_ops->trigger_type & tt)
244 			data_ops_trigger(data, NULL, NULL, NULL);
245 	}
246 }
247 EXPORT_SYMBOL_GPL(event_triggers_post_call);
248 
249 #define SHOW_AVAILABLE_TRIGGERS	(void *)(1UL)
250 
251 static void *trigger_next(struct seq_file *m, void *t, loff_t *pos)
252 {
253 	struct trace_event_file *event_file = event_file_data(m->private);
254 
255 	if (t == SHOW_AVAILABLE_TRIGGERS) {
256 		(*pos)++;
257 		return NULL;
258 	}
259 	return seq_list_next(t, &event_file->triggers, pos);
260 }
261 
262 static bool check_user_trigger(struct trace_event_file *file)
263 {
264 	struct event_trigger_data *data;
265 
266 	list_for_each_entry_rcu(data, &file->triggers, list,
267 				lockdep_is_held(&event_mutex)) {
268 		if (data->flags & EVENT_TRIGGER_FL_PROBE)
269 			continue;
270 		return true;
271 	}
272 	return false;
273 }
274 
275 static void *trigger_start(struct seq_file *m, loff_t *pos)
276 {
277 	struct trace_event_file *event_file;
278 
279 	/* ->stop() is called even if ->start() fails */
280 	mutex_lock(&event_mutex);
281 	event_file = event_file_file(m->private);
282 	if (unlikely(!event_file))
283 		return ERR_PTR(-ENODEV);
284 
285 	if (list_empty(&event_file->triggers) || !check_user_trigger(event_file))
286 		return *pos == 0 ? SHOW_AVAILABLE_TRIGGERS : NULL;
287 
288 	return seq_list_start(&event_file->triggers, *pos);
289 }
290 
291 static void trigger_stop(struct seq_file *m, void *t)
292 {
293 	mutex_unlock(&event_mutex);
294 }
295 
296 static int trigger_show(struct seq_file *m, void *v)
297 {
298 	struct event_trigger_data *data;
299 	struct event_command *p;
300 
301 	if (v == SHOW_AVAILABLE_TRIGGERS) {
302 		seq_puts(m, "# Available triggers:\n");
303 		seq_putc(m, '#');
304 		mutex_lock(&trigger_cmd_mutex);
305 		list_for_each_entry_reverse(p, &trigger_commands, list)
306 			seq_printf(m, " %s", p->name);
307 		seq_putc(m, '\n');
308 		mutex_unlock(&trigger_cmd_mutex);
309 		return 0;
310 	}
311 
312 	data = list_entry(v, struct event_trigger_data, list);
313 	data->cmd_ops->print(m, data);
314 
315 	return 0;
316 }
317 
318 static const struct seq_operations event_triggers_seq_ops = {
319 	.start = trigger_start,
320 	.next = trigger_next,
321 	.stop = trigger_stop,
322 	.show = trigger_show,
323 };
324 
325 static int event_trigger_regex_open(struct inode *inode, struct file *file)
326 {
327 	int ret;
328 
329 	ret = security_locked_down(LOCKDOWN_TRACEFS);
330 	if (ret)
331 		return ret;
332 
333 	guard(mutex)(&event_mutex);
334 
335 	if (unlikely(!event_file_file(file)))
336 		return -ENODEV;
337 
338 	if ((file->f_mode & FMODE_WRITE) &&
339 	    (file->f_flags & O_TRUNC)) {
340 		struct trace_event_file *event_file;
341 		struct event_command *p;
342 
343 		event_file = event_file_data(file);
344 
345 		list_for_each_entry(p, &trigger_commands, list) {
346 			if (p->unreg_all)
347 				p->unreg_all(event_file);
348 		}
349 	}
350 
351 	if (file->f_mode & FMODE_READ) {
352 		ret = seq_open(file, &event_triggers_seq_ops);
353 		if (!ret) {
354 			struct seq_file *m = file->private_data;
355 			m->private = file;
356 		}
357 	}
358 
359 	return ret;
360 }
361 
362 int trigger_process_regex(struct trace_event_file *file, char *buff)
363 {
364 	char *command, *next;
365 	struct event_command *p;
366 
367 	next = buff = strim(buff);
368 
369 	command = strsep(&next, ": \t");
370 	if (next) {
371 		next = skip_spaces(next);
372 		if (!*next)
373 			next = NULL;
374 	}
375 	command = (command[0] != '!') ? command : command + 1;
376 
377 	guard(mutex)(&trigger_cmd_mutex);
378 
379 	list_for_each_entry(p, &trigger_commands, list) {
380 		if (strcmp(p->name, command) == 0)
381 			return p->parse(p, file, buff, command, next);
382 	}
383 
384 	return -EINVAL;
385 }
386 
387 static ssize_t event_trigger_regex_write(struct file *file,
388 					 const char __user *ubuf,
389 					 size_t cnt, loff_t *ppos)
390 {
391 	struct trace_event_file *event_file;
392 	ssize_t ret;
393 	char *buf __free(kfree) = NULL;
394 
395 	if (!cnt)
396 		return 0;
397 
398 	if (cnt >= PAGE_SIZE)
399 		return -EINVAL;
400 
401 	buf = memdup_user_nul(ubuf, cnt);
402 	if (IS_ERR(buf))
403 		return PTR_ERR(buf);
404 
405 	guard(mutex)(&event_mutex);
406 
407 	event_file = event_file_file(file);
408 	if (unlikely(!event_file))
409 		return -ENODEV;
410 
411 	ret = trigger_process_regex(event_file, buf);
412 	if (ret < 0)
413 		return ret;
414 
415 	*ppos += cnt;
416 	return cnt;
417 }
418 
419 static int event_trigger_regex_release(struct inode *inode, struct file *file)
420 {
421 	if (file->f_mode & FMODE_READ)
422 		seq_release(inode, file);
423 
424 	return 0;
425 }
426 
427 static ssize_t
428 event_trigger_write(struct file *filp, const char __user *ubuf,
429 		    size_t cnt, loff_t *ppos)
430 {
431 	return event_trigger_regex_write(filp, ubuf, cnt, ppos);
432 }
433 
434 static int
435 event_trigger_open(struct inode *inode, struct file *filp)
436 {
437 	/* Checks for tracefs lockdown */
438 	return event_trigger_regex_open(inode, filp);
439 }
440 
441 static int
442 event_trigger_release(struct inode *inode, struct file *file)
443 {
444 	return event_trigger_regex_release(inode, file);
445 }
446 
447 const struct file_operations event_trigger_fops = {
448 	.open = event_trigger_open,
449 	.read = seq_read,
450 	.write = event_trigger_write,
451 	.llseek = tracing_lseek,
452 	.release = event_trigger_release,
453 };
454 
455 /*
456  * Currently we only register event commands from __init, so mark this
457  * __init too.
458  */
459 __init int register_event_command(struct event_command *cmd)
460 {
461 	struct event_command *p;
462 
463 	guard(mutex)(&trigger_cmd_mutex);
464 
465 	list_for_each_entry(p, &trigger_commands, list) {
466 		if (strcmp(cmd->name, p->name) == 0)
467 			return -EBUSY;
468 	}
469 	list_add(&cmd->list, &trigger_commands);
470 
471 	return 0;
472 }
473 
474 /*
475  * Currently we only unregister event commands from __init, so mark
476  * this __init too.
477  */
478 __init int unregister_event_command(struct event_command *cmd)
479 {
480 	struct event_command *p, *n;
481 
482 	guard(mutex)(&trigger_cmd_mutex);
483 
484 	list_for_each_entry_safe(p, n, &trigger_commands, list) {
485 		if (strcmp(cmd->name, p->name) == 0) {
486 			list_del_init(&p->list);
487 			return 0;
488 		}
489 	}
490 
491 	return -ENODEV;
492 }
493 
494 /**
495  * event_trigger_count - Optional count function for event triggers
496  * @data: Trigger-specific data
497  * @buffer: The ring buffer that the event is being written to
498  * @rec: The trace entry for the event, NULL for unconditional invocation
499  * @event: The event meta data in the ring buffer
500  *
501  * For triggers that can take a count parameter that doesn't do anything
502  * special, they can use this function to assign to their .count_func
503  * field.
504  *
505  * This simply does a count down of the @data->count field.
506  *
507  * If the @data->count is greater than zero, it will decrement it.
508  *
509  * Returns false if @data->count is zero, otherwise true.
510  */
511 bool event_trigger_count(struct event_trigger_data *data,
512 			 struct trace_buffer *buffer,  void *rec,
513 			 struct ring_buffer_event *event)
514 {
515 	if (!data->count)
516 		return false;
517 
518 	if (data->count != -1)
519 		(data->count)--;
520 
521 	return true;
522 }
523 
524 /**
525  * event_trigger_print - Generic event_command @print implementation
526  * @name: The name of the event trigger
527  * @m: The seq_file being printed to
528  * @data: Trigger-specific data
529  * @filter_str: filter_str to print, if present
530  *
531  * Common implementation for event triggers to print themselves.
532  *
533  * Usually wrapped by a function that simply sets the @name of the
534  * trigger command and then invokes this.
535  *
536  * Return: 0 on success, errno otherwise
537  */
538 static int
539 event_trigger_print(const char *name, struct seq_file *m,
540 		    void *data, char *filter_str)
541 {
542 	long count = (long)data;
543 
544 	seq_puts(m, name);
545 
546 	if (count == -1)
547 		seq_puts(m, ":unlimited");
548 	else
549 		seq_printf(m, ":count=%ld", count);
550 
551 	if (filter_str)
552 		seq_printf(m, " if %s\n", filter_str);
553 	else
554 		seq_putc(m, '\n');
555 
556 	return 0;
557 }
558 
559 /**
560  * event_trigger_init - Generic event_command @init implementation
561  * @data: Trigger-specific data
562  *
563  * Common implementation of event trigger initialization.
564  *
565  * Usually used directly as the @init method in event trigger
566  * implementations.
567  *
568  * Return: 0 on success, errno otherwise
569  */
570 int event_trigger_init(struct event_trigger_data *data)
571 {
572 	data->ref++;
573 	return 0;
574 }
575 
576 /**
577  * event_trigger_free - Generic event_command @free implementation
578  * @data: Trigger-specific data
579  *
580  * Common implementation of event trigger de-initialization.
581  *
582  * Usually used directly as the @free method in event trigger
583  * implementations.
584  */
585 static void
586 event_trigger_free(struct event_trigger_data *data)
587 {
588 	if (WARN_ON_ONCE(data->ref <= 0))
589 		return;
590 
591 	data->ref--;
592 	if (!data->ref)
593 		trigger_data_free(data);
594 }
595 
596 int trace_event_trigger_enable_disable(struct trace_event_file *file,
597 				       int trigger_enable)
598 {
599 	int ret = 0;
600 
601 	if (trigger_enable) {
602 		if (atomic_inc_return(&file->tm_ref) > 1)
603 			return ret;
604 		set_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT, &file->flags);
605 		ret = trace_event_enable_disable(file, 1, 1);
606 	} else {
607 		if (atomic_dec_return(&file->tm_ref) > 0)
608 			return ret;
609 		clear_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT, &file->flags);
610 		ret = trace_event_enable_disable(file, 0, 1);
611 	}
612 
613 	return ret;
614 }
615 
616 /**
617  * clear_event_triggers - Clear all triggers associated with a trace array
618  * @tr: The trace array to clear
619  *
620  * For each trigger, the triggering event has its tm_ref decremented
621  * via trace_event_trigger_enable_disable(), and any associated event
622  * (in the case of enable/disable_event triggers) will have its sm_ref
623  * decremented via free()->trace_event_enable_disable().  That
624  * combination effectively reverses the soft-mode/trigger state added
625  * by trigger registration.
626  *
627  * Must be called with event_mutex held.
628  */
629 void
630 clear_event_triggers(struct trace_array *tr)
631 {
632 	struct trace_event_file *file;
633 
634 	list_for_each_entry(file, &tr->events, list) {
635 		struct event_trigger_data *data, *n;
636 		list_for_each_entry_safe(data, n, &file->triggers, list) {
637 			trace_event_trigger_enable_disable(file, 0);
638 			list_del_rcu(&data->list);
639 			if (data->cmd_ops->free)
640 				data->cmd_ops->free(data);
641 		}
642 	}
643 }
644 
645 /**
646  * update_cond_flag - Set or reset the TRIGGER_COND bit
647  * @file: The trace_event_file associated with the event
648  *
649  * If an event has triggers and any of those triggers has a filter or
650  * a post_trigger, trigger invocation needs to be deferred until after
651  * the current event has logged its data, and the event should have
652  * its TRIGGER_COND bit set, otherwise the TRIGGER_COND bit should be
653  * cleared.
654  */
655 void update_cond_flag(struct trace_event_file *file)
656 {
657 	struct event_trigger_data *data;
658 	bool set_cond = false;
659 
660 	lockdep_assert_held(&event_mutex);
661 
662 	list_for_each_entry(data, &file->triggers, list) {
663 		if (data->filter || event_command_post_trigger(data->cmd_ops) ||
664 		    event_command_needs_rec(data->cmd_ops)) {
665 			set_cond = true;
666 			break;
667 		}
668 	}
669 
670 	if (set_cond)
671 		set_bit(EVENT_FILE_FL_TRIGGER_COND_BIT, &file->flags);
672 	else
673 		clear_bit(EVENT_FILE_FL_TRIGGER_COND_BIT, &file->flags);
674 }
675 
676 /**
677  * register_trigger - Generic event_command @reg implementation
678  * @glob: The raw string used to register the trigger
679  * @data: Trigger-specific data to associate with the trigger
680  * @file: The trace_event_file associated with the event
681  *
682  * Common implementation for event trigger registration.
683  *
684  * Usually used directly as the @reg method in event command
685  * implementations.
686  *
687  * Return: 0 on success, errno otherwise
688  */
689 static int register_trigger(char *glob,
690 			    struct event_trigger_data *data,
691 			    struct trace_event_file *file)
692 {
693 	struct event_trigger_data *test;
694 	int ret = 0;
695 
696 	lockdep_assert_held(&event_mutex);
697 
698 	list_for_each_entry(test, &file->triggers, list) {
699 		if (test->cmd_ops->trigger_type == data->cmd_ops->trigger_type)
700 			return -EEXIST;
701 	}
702 
703 	if (data->cmd_ops->init) {
704 		ret = data->cmd_ops->init(data);
705 		if (ret < 0)
706 			return ret;
707 	}
708 
709 	list_add_rcu(&data->list, &file->triggers);
710 
711 	update_cond_flag(file);
712 	ret = trace_event_trigger_enable_disable(file, 1);
713 	if (ret < 0) {
714 		list_del_rcu(&data->list);
715 		update_cond_flag(file);
716 	}
717 	return ret;
718 }
719 
720 /*
721  * True if the trigger was found and unregistered, else false.
722  */
723 static bool try_unregister_trigger(char *glob,
724 				   struct event_trigger_data *test,
725 				   struct trace_event_file *file)
726 {
727 	struct event_trigger_data *data = NULL, *iter;
728 
729 	lockdep_assert_held(&event_mutex);
730 
731 	list_for_each_entry(iter, &file->triggers, list) {
732 		if (iter->cmd_ops->trigger_type == test->cmd_ops->trigger_type) {
733 			data = iter;
734 			list_del_rcu(&data->list);
735 			trace_event_trigger_enable_disable(file, 0);
736 			update_cond_flag(file);
737 			break;
738 		}
739 	}
740 
741 	if (data) {
742 		if (data->cmd_ops->free)
743 			data->cmd_ops->free(data);
744 
745 		return true;
746 	}
747 
748 	return false;
749 }
750 
751 /**
752  * unregister_trigger - Generic event_command @unreg implementation
753  * @glob: The raw string used to register the trigger
754  * @test: Trigger-specific data used to find the trigger to remove
755  * @file: The trace_event_file associated with the event
756  *
757  * Common implementation for event trigger unregistration.
758  *
759  * Usually used directly as the @unreg method in event command
760  * implementations.
761  */
762 static void unregister_trigger(char *glob,
763 			       struct event_trigger_data *test,
764 			       struct trace_event_file *file)
765 {
766 	try_unregister_trigger(glob, test, file);
767 }
768 
769 /*
770  * Event trigger parsing helper functions.
771  *
772  * These functions help make it easier to write an event trigger
773  * parsing function i.e. the struct event_command.parse() callback
774  * function responsible for parsing and registering a trigger command
775  * written to the 'trigger' file.
776  *
777  * A trigger command (or just 'trigger' for short) takes the form:
778  *   [trigger] [if filter]
779  *
780  * The struct event_command.parse() callback (and other struct
781  * event_command functions) refer to several components of a trigger
782  * command.  Those same components are referenced by the event trigger
783  * parsing helper functions defined below.  These components are:
784  *
785  *   cmd               - the trigger command name
786  *   glob              - the trigger command name optionally prefaced with '!'
787  *   param_and_filter  - text following cmd and ':'
788  *   param             - text following cmd and ':' and stripped of filter
789  *   filter            - the optional filter text following (and including) 'if'
790  *
791  * To illustrate the use of these components, here are some concrete
792  * examples. For the following triggers:
793  *
794  *   echo 'traceon:5 if pid == 0' > trigger
795  *     - 'traceon' is both cmd and glob
796  *     - '5 if pid == 0' is the param_and_filter
797  *     - '5' is the param
798  *     - 'if pid == 0' is the filter
799  *
800  *   echo 'enable_event:sys:event:n' > trigger
801  *     - 'enable_event' is both cmd and glob
802  *     - 'sys:event:n' is the param_and_filter
803  *     - 'sys:event:n' is the param
804  *     - there is no filter
805  *
806  *   echo 'hist:keys=pid if prio > 50' > trigger
807  *     - 'hist' is both cmd and glob
808  *     - 'keys=pid if prio > 50' is the param_and_filter
809  *     - 'keys=pid' is the param
810  *     - 'if prio > 50' is the filter
811  *
812  *   echo '!enable_event:sys:event:n' > trigger
813  *     - 'enable_event' the cmd
814  *     - '!enable_event' is the glob
815  *     - 'sys:event:n' is the param_and_filter
816  *     - 'sys:event:n' is the param
817  *     - there is no filter
818  *
819  *   echo 'traceoff' > trigger
820  *     - 'traceoff' is both cmd and glob
821  *     - there is no param_and_filter
822  *     - there is no param
823  *     - there is no filter
824  *
825  * There are a few different categories of event trigger covered by
826  * these helpers:
827  *
828  *  - triggers that don't require a parameter e.g. traceon
829  *  - triggers that do require a parameter e.g. enable_event and hist
830  *  - triggers that though they may not require a param may support an
831  *    optional 'n' param (n = number of times the trigger should fire)
832  *    e.g.: traceon:5 or enable_event:sys:event:n
833  *  - triggers that do not support an 'n' param e.g. hist
834  *
835  * These functions can be used or ignored as necessary - it all
836  * depends on the complexity of the trigger, and the granularity of
837  * the functions supported reflects the fact that some implementations
838  * may need to customize certain aspects of their implementations and
839  * won't need certain functions.  For instance, the hist trigger
840  * implementation doesn't use event_trigger_separate_filter() because
841  * it has special requirements for handling the filter.
842  */
843 
844 /**
845  * event_trigger_check_remove - check whether an event trigger specifies remove
846  * @glob: The trigger command string, with optional remove(!) operator
847  *
848  * The event trigger callback implementations pass in 'glob' as a
849  * parameter.  This is the command name either with or without a
850  * remove(!)  operator.  This function simply parses the glob and
851  * determines whether the command corresponds to a trigger removal or
852  * a trigger addition.
853  *
854  * Return: true if this is a remove command, false otherwise
855  */
856 bool event_trigger_check_remove(const char *glob)
857 {
858 	return (glob && glob[0] == '!') ? true : false;
859 }
860 
861 /**
862  * event_trigger_empty_param - check whether the param is empty
863  * @param: The trigger param string
864  *
865  * The event trigger callback implementations pass in 'param' as a
866  * parameter.  This corresponds to the string following the command
867  * name minus the command name.  This function can be called by a
868  * callback implementation for any command that requires a param; a
869  * callback that doesn't require a param can ignore it.
870  *
871  * Return: true if this is an empty param, false otherwise
872  */
873 bool event_trigger_empty_param(const char *param)
874 {
875 	return !param;
876 }
877 
878 /**
879  * event_trigger_separate_filter - separate an event trigger from a filter
880  * @param_and_filter: String containing trigger and possibly filter
881  * @param: outparam, will be filled with a pointer to the trigger
882  * @filter: outparam, will be filled with a pointer to the filter
883  * @param_required: Specifies whether or not the param string is required
884  *
885  * Given a param string of the form '[trigger] [if filter]', this
886  * function separates the filter from the trigger and returns the
887  * trigger in @param and the filter in @filter.  Either the @param
888  * or the @filter may be set to NULL by this function - if not set to
889  * NULL, they will contain strings corresponding to the trigger and
890  * filter.
891  *
892  * There are two cases that need to be handled with respect to the
893  * passed-in param: either the param is required, or it is not
894  * required.  If @param_required is set, and there's no param, it will
895  * return -EINVAL.  If @param_required is not set and there's a param
896  * that starts with a number, that corresponds to the case of a
897  * trigger with :n (n = number of times the trigger should fire) and
898  * the parsing continues normally; otherwise the function just returns
899  * and assumes param just contains a filter and there's nothing else
900  * to do.
901  *
902  * Return: 0 on success, errno otherwise
903  */
904 int event_trigger_separate_filter(char *param_and_filter, char **param,
905 				  char **filter, bool param_required)
906 {
907 	int ret = 0;
908 
909 	*param = *filter = NULL;
910 
911 	if (!param_and_filter) {
912 		if (param_required)
913 			ret = -EINVAL;
914 		return ret;
915 	}
916 
917 	/*
918 	 * Here we check for an optional param. The only legal
919 	 * optional param is :n, and if that's the case, continue
920 	 * below. Otherwise we assume what's left is a filter and
921 	 * return it as the filter string for the caller to deal with.
922 	 */
923 	if (!param_required && param_and_filter && !isdigit(param_and_filter[0])) {
924 		*filter = param_and_filter;
925 		return ret;
926 	}
927 
928 	/*
929 	 * Separate the param from the filter (param [if filter]).
930 	 * Here we have either an optional :n param or a required
931 	 * param and an optional filter.
932 	 */
933 	*param = strsep(&param_and_filter, " \t");
934 
935 	/*
936 	 * Here we have a filter, though it may be empty.
937 	 */
938 	if (param_and_filter) {
939 		*filter = skip_spaces(param_and_filter);
940 		if (!**filter)
941 			*filter = NULL;
942 	}
943 	return ret;
944 }
945 
946 /**
947  * trigger_data_alloc - allocate and init event_trigger_data for a trigger
948  * @cmd_ops: The event_command operations for the trigger
949  * @cmd: The cmd string
950  * @param: The param string
951  * @private_data: User data to associate with the event trigger
952  *
953  * Allocate an event_trigger_data instance and initialize it.  The
954  * @cmd_ops defines how the trigger will operate. If @param is set,
955  * and @cmd_ops->trigger_ops->count_func is non NULL, then the
956  * data->count is set to @param and before the trigger is executed, the
957  * @cmd_ops->trigger_ops->count_func() is called. If that function returns
958  * false, the @cmd_ops->trigger_ops->trigger() function will not be called.
959  * @private_data can also be passed in and associated with the
960  * event_trigger_data.
961  *
962  * Use trigger_data_free() to free an event_trigger_data object.
963  *
964  * Return: The trigger_data object success, NULL otherwise
965  */
966 struct event_trigger_data *trigger_data_alloc(struct event_command *cmd_ops,
967 					      char *cmd,
968 					      char *param,
969 					      void *private_data)
970 {
971 	struct event_trigger_data *trigger_data;
972 
973 	trigger_data = kzalloc_obj(*trigger_data);
974 	if (!trigger_data)
975 		return NULL;
976 
977 	trigger_data->count = -1;
978 	trigger_data->cmd_ops = cmd_ops;
979 	trigger_data->private_data = private_data;
980 	if (param && cmd_ops->count_func)
981 		trigger_data->flags |= EVENT_TRIGGER_FL_COUNT;
982 
983 	INIT_LIST_HEAD(&trigger_data->list);
984 	INIT_LIST_HEAD(&trigger_data->named_list);
985 	RCU_INIT_POINTER(trigger_data->filter, NULL);
986 
987 	return trigger_data;
988 }
989 
990 /**
991  * event_trigger_parse_num - parse and return the number param for a trigger
992  * @param: The param string
993  * @trigger_data: The trigger_data for the trigger
994  *
995  * Parse the :n (n = number of times the trigger should fire) param
996  * and set the count variable in the trigger_data to the parsed count.
997  *
998  * Return: 0 on success, errno otherwise
999  */
1000 int event_trigger_parse_num(char *param,
1001 			    struct event_trigger_data *trigger_data)
1002 {
1003 	char *number;
1004 	int ret = 0;
1005 
1006 	if (param) {
1007 		number = strsep(&param, ":");
1008 
1009 		if (!strlen(number))
1010 			return -EINVAL;
1011 
1012 		/*
1013 		 * We use the callback data field (which is a pointer)
1014 		 * as our counter.
1015 		 */
1016 		ret = kstrtoul(number, 0, &trigger_data->count);
1017 	}
1018 
1019 	return ret;
1020 }
1021 
1022 /**
1023  * event_trigger_set_filter - set an event trigger's filter
1024  * @cmd_ops: The event_command operations for the trigger
1025  * @file: The event file for the trigger's event
1026  * @param: The string containing the filter
1027  * @trigger_data: The trigger_data for the trigger
1028  *
1029  * Set the filter for the trigger.  If the filter is NULL, just return
1030  * without error.
1031  *
1032  * Return: 0 on success, errno otherwise
1033  */
1034 int event_trigger_set_filter(struct event_command *cmd_ops,
1035 			     struct trace_event_file *file,
1036 			     char *param,
1037 			     struct event_trigger_data *trigger_data)
1038 {
1039 	if (param && cmd_ops->set_filter)
1040 		return cmd_ops->set_filter(param, trigger_data, file);
1041 
1042 	return 0;
1043 }
1044 
1045 /**
1046  * event_trigger_reset_filter - reset an event trigger's filter
1047  * @cmd_ops: The event_command operations for the trigger
1048  * @trigger_data: The trigger_data for the trigger
1049  *
1050  * Reset the filter for the trigger to no filter.
1051  */
1052 void event_trigger_reset_filter(struct event_command *cmd_ops,
1053 				struct event_trigger_data *trigger_data)
1054 {
1055 	if (cmd_ops->set_filter)
1056 		cmd_ops->set_filter(NULL, trigger_data, NULL);
1057 }
1058 
1059 /**
1060  * event_trigger_register - register an event trigger
1061  * @cmd_ops: The event_command operations for the trigger
1062  * @file: The event file for the trigger's event
1063  * @glob: The trigger command string, with optional remove(!) operator
1064  * @trigger_data: The trigger_data for the trigger
1065  *
1066  * Register an event trigger.  The @cmd_ops are used to call the
1067  * cmd_ops->reg() function which actually does the registration.
1068  *
1069  * Return: 0 on success, errno otherwise
1070  */
1071 int event_trigger_register(struct event_command *cmd_ops,
1072 			   struct trace_event_file *file,
1073 			   char *glob,
1074 			   struct event_trigger_data *trigger_data)
1075 {
1076 	return cmd_ops->reg(glob, trigger_data, file);
1077 }
1078 
1079 /**
1080  * event_trigger_unregister - unregister an event trigger
1081  * @cmd_ops: The event_command operations for the trigger
1082  * @file: The event file for the trigger's event
1083  * @glob: The trigger command string, with optional remove(!) operator
1084  * @trigger_data: The trigger_data for the trigger
1085  *
1086  * Unregister an event trigger.  The @cmd_ops are used to call the
1087  * cmd_ops->unreg() function which actually does the unregistration.
1088  */
1089 void event_trigger_unregister(struct event_command *cmd_ops,
1090 			      struct trace_event_file *file,
1091 			      char *glob,
1092 			      struct event_trigger_data *trigger_data)
1093 {
1094 	cmd_ops->unreg(glob, trigger_data, file);
1095 }
1096 
1097 /*
1098  * End event trigger parsing helper functions.
1099  */
1100 
1101 /**
1102  * event_trigger_parse - Generic event_command @parse implementation
1103  * @cmd_ops: The command ops, used for trigger registration
1104  * @file: The trace_event_file associated with the event
1105  * @glob: The raw string used to register the trigger
1106  * @cmd: The cmd portion of the string used to register the trigger
1107  * @param_and_filter: The param and filter portion of the string used to register the trigger
1108  *
1109  * Common implementation for event command parsing and trigger
1110  * instantiation.
1111  *
1112  * Usually used directly as the @parse method in event command
1113  * implementations.
1114  *
1115  * Return: 0 on success, errno otherwise
1116  */
1117 static int
1118 event_trigger_parse(struct event_command *cmd_ops,
1119 		    struct trace_event_file *file,
1120 		    char *glob, char *cmd, char *param_and_filter)
1121 {
1122 	struct event_trigger_data *trigger_data;
1123 	char *param, *filter;
1124 	bool remove;
1125 	int ret;
1126 
1127 	remove = event_trigger_check_remove(glob);
1128 
1129 	ret = event_trigger_separate_filter(param_and_filter, &param, &filter, false);
1130 	if (ret)
1131 		return ret;
1132 
1133 	ret = -ENOMEM;
1134 	trigger_data = trigger_data_alloc(cmd_ops, cmd, param, file);
1135 	if (!trigger_data)
1136 		return ret;
1137 
1138 	if (remove) {
1139 		event_trigger_unregister(cmd_ops, file, glob+1, trigger_data);
1140 		trigger_data_free(trigger_data);
1141 		return 0;
1142 	}
1143 
1144 	ret = event_trigger_parse_num(param, trigger_data);
1145 	if (ret)
1146 		goto out_free;
1147 
1148 	ret = event_trigger_set_filter(cmd_ops, file, filter, trigger_data);
1149 	if (ret < 0)
1150 		goto out_free;
1151 
1152 	/* Up the trigger_data count to make sure reg doesn't free it on failure */
1153 	event_trigger_init(trigger_data);
1154 
1155 	ret = event_trigger_register(cmd_ops, file, glob, trigger_data);
1156 	if (ret)
1157 		goto out_free;
1158 
1159 	/* Down the counter of trigger_data or free it if not used anymore */
1160 	event_trigger_free(trigger_data);
1161 	return ret;
1162 
1163  out_free:
1164 	event_trigger_reset_filter(cmd_ops, trigger_data);
1165 	trigger_data_free(trigger_data);
1166 	return ret;
1167 }
1168 
1169 /**
1170  * set_trigger_filter - Generic event_command @set_filter implementation
1171  * @filter_str: The filter string for the trigger, NULL to remove filter
1172  * @trigger_data: Trigger-specific data
1173  * @file: The trace_event_file associated with the event
1174  *
1175  * Common implementation for event command filter parsing and filter
1176  * instantiation.
1177  *
1178  * Usually used directly as the @set_filter method in event command
1179  * implementations.
1180  *
1181  * Also used to remove a filter (if filter_str = NULL).
1182  *
1183  * Return: 0 on success, errno otherwise
1184  */
1185 int set_trigger_filter(char *filter_str,
1186 		       struct event_trigger_data *trigger_data,
1187 		       struct trace_event_file *file)
1188 {
1189 	struct event_trigger_data *data = trigger_data;
1190 	struct event_filter *filter = NULL, *tmp;
1191 	int ret = -EINVAL;
1192 	char *s;
1193 
1194 	if (!filter_str) /* clear the current filter */
1195 		goto assign;
1196 
1197 	s = strsep(&filter_str, " \t");
1198 
1199 	if (!strlen(s) || strcmp(s, "if") != 0)
1200 		return ret;
1201 
1202 	if (!filter_str)
1203 		return ret;
1204 
1205 	/* The filter is for the 'trigger' event, not the triggered event */
1206 	ret = create_event_filter(file->tr, file->event_call,
1207 				  filter_str, true, &filter);
1208 
1209 	/* Only enabled set_str for error handling */
1210 	if (filter) {
1211 		kfree(filter->filter_string);
1212 		filter->filter_string = NULL;
1213 	}
1214 
1215 	/*
1216 	 * If create_event_filter() fails, filter still needs to be freed.
1217 	 * Which the calling code will do with data->filter.
1218 	 */
1219  assign:
1220 	tmp = rcu_access_pointer(data->filter);
1221 
1222 	rcu_assign_pointer(data->filter, filter);
1223 
1224 	if (tmp) {
1225 		/*
1226 		 * Make sure the call is done with the filter.
1227 		 * It is possible that a filter could fail at boot up,
1228 		 * and then this path will be called. Avoid the synchronization
1229 		 * in that case.
1230 		 */
1231 		if (system_state != SYSTEM_BOOTING)
1232 			tracepoint_synchronize_unregister();
1233 		free_event_filter(tmp);
1234 	}
1235 
1236 	kfree(data->filter_str);
1237 	data->filter_str = NULL;
1238 
1239 	if (filter_str) {
1240 		data->filter_str = kstrdup(filter_str, GFP_KERNEL);
1241 		if (!data->filter_str) {
1242 			free_event_filter(rcu_access_pointer(data->filter));
1243 			data->filter = NULL;
1244 			ret = -ENOMEM;
1245 		}
1246 	}
1247 	return ret;
1248 }
1249 
1250 static LIST_HEAD(named_triggers);
1251 
1252 /**
1253  * find_named_trigger - Find the common named trigger associated with @name
1254  * @name: The name of the set of named triggers to find the common data for
1255  *
1256  * Named triggers are sets of triggers that share a common set of
1257  * trigger data.  The first named trigger registered with a given name
1258  * owns the common trigger data that the others subsequently
1259  * registered with the same name will reference.  This function
1260  * returns the common trigger data associated with that first
1261  * registered instance.
1262  *
1263  * Return: the common trigger data for the given named trigger on
1264  * success, NULL otherwise.
1265  */
1266 struct event_trigger_data *find_named_trigger(const char *name)
1267 {
1268 	struct event_trigger_data *data;
1269 
1270 	if (!name)
1271 		return NULL;
1272 
1273 	list_for_each_entry(data, &named_triggers, named_list) {
1274 		if (data->named_data)
1275 			continue;
1276 		if (strcmp(data->name, name) == 0)
1277 			return data;
1278 	}
1279 
1280 	return NULL;
1281 }
1282 
1283 /**
1284  * is_named_trigger - determine if a given trigger is a named trigger
1285  * @test: The trigger data to test
1286  *
1287  * Return: true if 'test' is a named trigger, false otherwise.
1288  */
1289 bool is_named_trigger(struct event_trigger_data *test)
1290 {
1291 	struct event_trigger_data *data;
1292 
1293 	list_for_each_entry(data, &named_triggers, named_list) {
1294 		if (test == data)
1295 			return true;
1296 	}
1297 
1298 	return false;
1299 }
1300 
1301 /**
1302  * save_named_trigger - save the trigger in the named trigger list
1303  * @name: The name of the named trigger set
1304  * @data: The trigger data to save
1305  *
1306  * Return: 0 if successful, negative error otherwise.
1307  */
1308 int save_named_trigger(const char *name, struct event_trigger_data *data)
1309 {
1310 	data->name = kstrdup(name, GFP_KERNEL);
1311 	if (!data->name)
1312 		return -ENOMEM;
1313 
1314 	list_add(&data->named_list, &named_triggers);
1315 
1316 	return 0;
1317 }
1318 
1319 /**
1320  * del_named_trigger - delete a trigger from the named trigger list
1321  * @data: The trigger data to delete
1322  */
1323 void del_named_trigger(struct event_trigger_data *data)
1324 {
1325 	kfree(data->name);
1326 	data->name = NULL;
1327 
1328 	list_del(&data->named_list);
1329 }
1330 
1331 static void __pause_named_trigger(struct event_trigger_data *data, bool pause)
1332 {
1333 	struct event_trigger_data *test;
1334 
1335 	list_for_each_entry(test, &named_triggers, named_list) {
1336 		if (strcmp(test->name, data->name) == 0) {
1337 			if (pause) {
1338 				test->paused_tmp = test->paused;
1339 				test->paused = true;
1340 			} else {
1341 				test->paused = test->paused_tmp;
1342 			}
1343 		}
1344 	}
1345 }
1346 
1347 /**
1348  * pause_named_trigger - Pause all named triggers with the same name
1349  * @data: The trigger data of a named trigger to pause
1350  *
1351  * Pauses a named trigger along with all other triggers having the
1352  * same name.  Because named triggers share a common set of data,
1353  * pausing only one is meaningless, so pausing one named trigger needs
1354  * to pause all triggers with the same name.
1355  */
1356 void pause_named_trigger(struct event_trigger_data *data)
1357 {
1358 	__pause_named_trigger(data, true);
1359 }
1360 
1361 /**
1362  * unpause_named_trigger - Un-pause all named triggers with the same name
1363  * @data: The trigger data of a named trigger to unpause
1364  *
1365  * Un-pauses a named trigger along with all other triggers having the
1366  * same name.  Because named triggers share a common set of data,
1367  * unpausing only one is meaningless, so unpausing one named trigger
1368  * needs to unpause all triggers with the same name.
1369  */
1370 void unpause_named_trigger(struct event_trigger_data *data)
1371 {
1372 	__pause_named_trigger(data, false);
1373 }
1374 
1375 /**
1376  * set_named_trigger_data - Associate common named trigger data
1377  * @data: The trigger data to associate
1378  * @named_data: The common named trigger to be associated
1379  *
1380  * Named triggers are sets of triggers that share a common set of
1381  * trigger data.  The first named trigger registered with a given name
1382  * owns the common trigger data that the others subsequently
1383  * registered with the same name will reference.  This function
1384  * associates the common trigger data from the first trigger with the
1385  * given trigger.
1386  */
1387 void set_named_trigger_data(struct event_trigger_data *data,
1388 			    struct event_trigger_data *named_data)
1389 {
1390 	data->named_data = named_data;
1391 }
1392 
1393 struct event_trigger_data *
1394 get_named_trigger_data(struct event_trigger_data *data)
1395 {
1396 	return data->named_data;
1397 }
1398 
1399 static void
1400 traceon_trigger(struct event_trigger_data *data,
1401 		struct trace_buffer *buffer, void *rec,
1402 		struct ring_buffer_event *event)
1403 {
1404 	struct trace_event_file *file = data->private_data;
1405 
1406 	if (WARN_ON_ONCE(!file))
1407 		return;
1408 
1409 	if (tracer_tracing_is_on(file->tr))
1410 		return;
1411 
1412 	tracer_tracing_on(file->tr);
1413 }
1414 
1415 static bool
1416 traceon_count_func(struct event_trigger_data *data,
1417 		   struct trace_buffer *buffer, void *rec,
1418 		   struct ring_buffer_event *event)
1419 {
1420 	struct trace_event_file *file = data->private_data;
1421 
1422 	if (WARN_ON_ONCE(!file))
1423 		return false;
1424 
1425 	if (tracer_tracing_is_on(file->tr))
1426 		return false;
1427 
1428 	if (!data->count)
1429 		return false;
1430 
1431 	if (data->count != -1)
1432 		(data->count)--;
1433 
1434 	return true;
1435 }
1436 
1437 static void
1438 traceoff_trigger(struct event_trigger_data *data,
1439 		 struct trace_buffer *buffer, void *rec,
1440 		 struct ring_buffer_event *event)
1441 {
1442 	struct trace_event_file *file = data->private_data;
1443 
1444 	if (WARN_ON_ONCE(!file))
1445 		return;
1446 
1447 	if (!tracer_tracing_is_on(file->tr))
1448 		return;
1449 
1450 	tracer_tracing_off(file->tr);
1451 }
1452 
1453 static bool
1454 traceoff_count_func(struct event_trigger_data *data,
1455 		    struct trace_buffer *buffer, void *rec,
1456 		    struct ring_buffer_event *event)
1457 {
1458 	struct trace_event_file *file = data->private_data;
1459 
1460 	if (WARN_ON_ONCE(!file))
1461 		return false;
1462 
1463 	if (!tracer_tracing_is_on(file->tr))
1464 		return false;
1465 
1466 	if (!data->count)
1467 		return false;
1468 
1469 	if (data->count != -1)
1470 		(data->count)--;
1471 
1472 	return true;
1473 }
1474 
1475 static int
1476 traceon_trigger_print(struct seq_file *m, struct event_trigger_data *data)
1477 {
1478 	return event_trigger_print("traceon", m, (void *)data->count,
1479 				   data->filter_str);
1480 }
1481 
1482 static int
1483 traceoff_trigger_print(struct seq_file *m, struct event_trigger_data *data)
1484 {
1485 	return event_trigger_print("traceoff", m, (void *)data->count,
1486 				   data->filter_str);
1487 }
1488 
1489 static struct event_command trigger_traceon_cmd = {
1490 	.name			= "traceon",
1491 	.trigger_type		= ETT_TRACE_ONOFF,
1492 	.parse			= event_trigger_parse,
1493 	.reg			= register_trigger,
1494 	.unreg			= unregister_trigger,
1495 	.set_filter		= set_trigger_filter,
1496 	.trigger		= traceon_trigger,
1497 	.count_func		= traceon_count_func,
1498 	.print			= traceon_trigger_print,
1499 	.init			= event_trigger_init,
1500 	.free			= event_trigger_free,
1501 };
1502 
1503 static struct event_command trigger_traceoff_cmd = {
1504 	.name			= "traceoff",
1505 	.trigger_type		= ETT_TRACE_ONOFF,
1506 	.flags			= EVENT_CMD_FL_POST_TRIGGER,
1507 	.parse			= event_trigger_parse,
1508 	.reg			= register_trigger,
1509 	.unreg			= unregister_trigger,
1510 	.set_filter		= set_trigger_filter,
1511 	.trigger		= traceoff_trigger,
1512 	.count_func		= traceoff_count_func,
1513 	.print			= traceoff_trigger_print,
1514 	.init			= event_trigger_init,
1515 	.free			= event_trigger_free,
1516 };
1517 
1518 #ifdef CONFIG_TRACER_SNAPSHOT
1519 static void
1520 snapshot_trigger(struct event_trigger_data *data,
1521 		 struct trace_buffer *buffer, void *rec,
1522 		 struct ring_buffer_event *event)
1523 {
1524 	struct trace_event_file *file = data->private_data;
1525 
1526 	if (WARN_ON_ONCE(!file))
1527 		return;
1528 
1529 	tracing_snapshot_instance(file->tr);
1530 }
1531 
1532 static int
1533 register_snapshot_trigger(char *glob,
1534 			  struct event_trigger_data *data,
1535 			  struct trace_event_file *file)
1536 {
1537 	int ret = tracing_arm_snapshot(file->tr);
1538 
1539 	if (ret < 0)
1540 		return ret;
1541 
1542 	ret = register_trigger(glob, data, file);
1543 	if (ret < 0)
1544 		tracing_disarm_snapshot(file->tr);
1545 	return ret;
1546 }
1547 
1548 static void unregister_snapshot_trigger(char *glob,
1549 					struct event_trigger_data *data,
1550 					struct trace_event_file *file)
1551 {
1552 	if (try_unregister_trigger(glob, data, file))
1553 		tracing_disarm_snapshot(file->tr);
1554 }
1555 
1556 static int
1557 snapshot_trigger_print(struct seq_file *m, struct event_trigger_data *data)
1558 {
1559 	return event_trigger_print("snapshot", m, (void *)data->count,
1560 				   data->filter_str);
1561 }
1562 
1563 static struct event_command trigger_snapshot_cmd = {
1564 	.name			= "snapshot",
1565 	.trigger_type		= ETT_SNAPSHOT,
1566 	.parse			= event_trigger_parse,
1567 	.reg			= register_snapshot_trigger,
1568 	.unreg			= unregister_snapshot_trigger,
1569 	.set_filter		= set_trigger_filter,
1570 	.trigger		= snapshot_trigger,
1571 	.count_func		= event_trigger_count,
1572 	.print			= snapshot_trigger_print,
1573 	.init			= event_trigger_init,
1574 	.free			= event_trigger_free,
1575 };
1576 
1577 static __init int register_trigger_snapshot_cmd(void)
1578 {
1579 	int ret;
1580 
1581 	ret = register_event_command(&trigger_snapshot_cmd);
1582 	WARN_ON(ret < 0);
1583 
1584 	return ret;
1585 }
1586 #else
1587 static __init int register_trigger_snapshot_cmd(void) { return 0; }
1588 #endif /* CONFIG_TRACER_SNAPSHOT */
1589 
1590 #ifdef CONFIG_STACKTRACE
1591 #ifdef CONFIG_UNWINDER_ORC
1592 /* Skip 2:
1593  *   event_triggers_post_call()
1594  *   trace_event_raw_event_xxx()
1595  */
1596 # define STACK_SKIP 2
1597 #else
1598 /*
1599  * Skip 4:
1600  *   stacktrace_trigger()
1601  *   event_triggers_post_call()
1602  *   trace_event_buffer_commit()
1603  *   trace_event_raw_event_xxx()
1604  */
1605 #define STACK_SKIP 4
1606 #endif
1607 
1608 static void
1609 stacktrace_trigger(struct event_trigger_data *data,
1610 		   struct trace_buffer *buffer,  void *rec,
1611 		   struct ring_buffer_event *event)
1612 {
1613 	struct trace_event_file *file = data->private_data;
1614 
1615 	if (WARN_ON_ONCE(!file))
1616 		return;
1617 
1618 	__trace_stack(file->tr, tracing_gen_ctx_dec(), STACK_SKIP);
1619 }
1620 
1621 static int
1622 stacktrace_trigger_print(struct seq_file *m, struct event_trigger_data *data)
1623 {
1624 	return event_trigger_print("stacktrace", m, (void *)data->count,
1625 				   data->filter_str);
1626 }
1627 
1628 static struct event_command trigger_stacktrace_cmd = {
1629 	.name			= "stacktrace",
1630 	.trigger_type		= ETT_STACKTRACE,
1631 	.flags			= EVENT_CMD_FL_POST_TRIGGER,
1632 	.parse			= event_trigger_parse,
1633 	.reg			= register_trigger,
1634 	.unreg			= unregister_trigger,
1635 	.set_filter		= set_trigger_filter,
1636 	.trigger		= stacktrace_trigger,
1637 	.count_func		= event_trigger_count,
1638 	.print			= stacktrace_trigger_print,
1639 	.init			= event_trigger_init,
1640 	.free			= event_trigger_free,
1641 };
1642 
1643 static __init int register_trigger_stacktrace_cmd(void)
1644 {
1645 	int ret;
1646 
1647 	ret = register_event_command(&trigger_stacktrace_cmd);
1648 	WARN_ON(ret < 0);
1649 
1650 	return ret;
1651 }
1652 #else
1653 static __init int register_trigger_stacktrace_cmd(void) { return 0; }
1654 #endif /* CONFIG_STACKTRACE */
1655 
1656 static __init void unregister_trigger_traceon_traceoff_cmds(void)
1657 {
1658 	unregister_event_command(&trigger_traceon_cmd);
1659 	unregister_event_command(&trigger_traceoff_cmd);
1660 }
1661 
1662 static void
1663 event_enable_trigger(struct event_trigger_data *data,
1664 		     struct trace_buffer *buffer,  void *rec,
1665 		     struct ring_buffer_event *event)
1666 {
1667 	struct enable_trigger_data *enable_data = data->private_data;
1668 
1669 	if (enable_data->enable)
1670 		clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &enable_data->file->flags);
1671 	else
1672 		set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &enable_data->file->flags);
1673 }
1674 
1675 static bool
1676 event_enable_count_func(struct event_trigger_data *data,
1677 			struct trace_buffer *buffer,  void *rec,
1678 			struct ring_buffer_event *event)
1679 {
1680 	struct enable_trigger_data *enable_data = data->private_data;
1681 
1682 	if (!data->count)
1683 		return false;
1684 
1685 	/* Skip if the event is in a state we want to switch to */
1686 	if (enable_data->enable == !(enable_data->file->flags & EVENT_FILE_FL_SOFT_DISABLED))
1687 		return false;
1688 
1689 	if (data->count != -1)
1690 		(data->count)--;
1691 
1692 	return true;
1693 }
1694 
1695 int event_enable_trigger_print(struct seq_file *m,
1696 			       struct event_trigger_data *data)
1697 {
1698 	struct enable_trigger_data *enable_data = data->private_data;
1699 
1700 	seq_printf(m, "%s:%s:%s",
1701 		   enable_data->hist ?
1702 		   (enable_data->enable ? ENABLE_HIST_STR : DISABLE_HIST_STR) :
1703 		   (enable_data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR),
1704 		   enable_data->file->event_call->class->system,
1705 		   trace_event_name(enable_data->file->event_call));
1706 
1707 	if (data->count == -1)
1708 		seq_puts(m, ":unlimited");
1709 	else
1710 		seq_printf(m, ":count=%ld", data->count);
1711 
1712 	if (data->filter_str)
1713 		seq_printf(m, " if %s\n", data->filter_str);
1714 	else
1715 		seq_putc(m, '\n');
1716 
1717 	return 0;
1718 }
1719 
1720 void event_enable_trigger_free(struct event_trigger_data *data)
1721 {
1722 	struct enable_trigger_data *enable_data = data->private_data;
1723 
1724 	if (WARN_ON_ONCE(data->ref <= 0))
1725 		return;
1726 
1727 	data->ref--;
1728 	if (!data->ref) {
1729 		/* Remove the SOFT_MODE flag */
1730 		trace_event_enable_disable(enable_data->file, 0, 1);
1731 		trace_event_put_ref(enable_data->file->event_call);
1732 		trigger_data_free(data);
1733 		kfree(enable_data);
1734 	}
1735 }
1736 
1737 int event_enable_trigger_parse(struct event_command *cmd_ops,
1738 			       struct trace_event_file *file,
1739 			       char *glob, char *cmd, char *param_and_filter)
1740 {
1741 	struct trace_event_file *event_enable_file;
1742 	struct enable_trigger_data *enable_data;
1743 	struct event_trigger_data *trigger_data;
1744 	struct trace_array *tr = file->tr;
1745 	char *param, *filter;
1746 	bool enable, remove;
1747 	const char *system;
1748 	const char *event;
1749 	bool hist = false;
1750 	int ret;
1751 
1752 	remove = event_trigger_check_remove(glob);
1753 
1754 	if (event_trigger_empty_param(param_and_filter))
1755 		return -EINVAL;
1756 
1757 	ret = event_trigger_separate_filter(param_and_filter, &param, &filter, true);
1758 	if (ret)
1759 		return ret;
1760 
1761 	system = strsep(&param, ":");
1762 	if (!param)
1763 		return -EINVAL;
1764 
1765 	event = strsep(&param, ":");
1766 
1767 	ret = -EINVAL;
1768 	event_enable_file = find_event_file(tr, system, event);
1769 	if (!event_enable_file)
1770 		return ret;
1771 
1772 #ifdef CONFIG_HIST_TRIGGERS
1773 	hist = ((strcmp(cmd, ENABLE_HIST_STR) == 0) ||
1774 		(strcmp(cmd, DISABLE_HIST_STR) == 0));
1775 
1776 	enable = ((strcmp(cmd, ENABLE_EVENT_STR) == 0) ||
1777 		  (strcmp(cmd, ENABLE_HIST_STR) == 0));
1778 #else
1779 	enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
1780 #endif
1781 	ret = -ENOMEM;
1782 
1783 	enable_data = kzalloc_obj(*enable_data);
1784 	if (!enable_data)
1785 		return ret;
1786 
1787 	enable_data->hist = hist;
1788 	enable_data->enable = enable;
1789 	enable_data->file = event_enable_file;
1790 
1791 	trigger_data = trigger_data_alloc(cmd_ops, cmd, param, enable_data);
1792 	if (!trigger_data) {
1793 		kfree(enable_data);
1794 		return ret;
1795 	}
1796 
1797 	if (remove) {
1798 		event_trigger_unregister(cmd_ops, file, glob+1, trigger_data);
1799 		kfree(trigger_data);
1800 		kfree(enable_data);
1801 		ret = 0;
1802 		return ret;
1803 	}
1804 
1805 	/* Up the trigger_data count to make sure nothing frees it on failure */
1806 	event_trigger_init(trigger_data);
1807 
1808 	ret = event_trigger_parse_num(param, trigger_data);
1809 	if (ret)
1810 		goto out_free;
1811 
1812 	ret = event_trigger_set_filter(cmd_ops, file, filter, trigger_data);
1813 	if (ret < 0)
1814 		goto out_free;
1815 
1816 	/* Don't let event modules unload while probe registered */
1817 	ret = trace_event_try_get_ref(event_enable_file->event_call);
1818 	if (!ret) {
1819 		ret = -EBUSY;
1820 		goto out_free;
1821 	}
1822 
1823 	ret = trace_event_enable_disable(event_enable_file, 1, 1);
1824 	if (ret < 0)
1825 		goto out_put;
1826 
1827 	ret = event_trigger_register(cmd_ops, file, glob, trigger_data);
1828 	if (ret)
1829 		goto out_disable;
1830 
1831 	event_trigger_free(trigger_data);
1832 	return ret;
1833  out_disable:
1834 	trace_event_enable_disable(event_enable_file, 0, 1);
1835  out_put:
1836 	trace_event_put_ref(event_enable_file->event_call);
1837  out_free:
1838 	event_trigger_reset_filter(cmd_ops, trigger_data);
1839 	event_trigger_free(trigger_data);
1840 	kfree(enable_data);
1841 
1842 	return ret;
1843 }
1844 
1845 int event_enable_register_trigger(char *glob,
1846 				  struct event_trigger_data *data,
1847 				  struct trace_event_file *file)
1848 {
1849 	struct enable_trigger_data *enable_data = data->private_data;
1850 	struct enable_trigger_data *test_enable_data;
1851 	struct event_trigger_data *test;
1852 	int ret = 0;
1853 
1854 	lockdep_assert_held(&event_mutex);
1855 
1856 	list_for_each_entry(test, &file->triggers, list) {
1857 		test_enable_data = test->private_data;
1858 		if (test_enable_data &&
1859 		    (test->cmd_ops->trigger_type ==
1860 		     data->cmd_ops->trigger_type) &&
1861 		    (test_enable_data->file == enable_data->file)) {
1862 			return -EEXIST;
1863 		}
1864 	}
1865 
1866 	if (data->cmd_ops->init) {
1867 		ret = data->cmd_ops->init(data);
1868 		if (ret < 0)
1869 			return ret;
1870 	}
1871 
1872 	list_add_rcu(&data->list, &file->triggers);
1873 
1874 	update_cond_flag(file);
1875 	ret = trace_event_trigger_enable_disable(file, 1);
1876 	if (ret < 0) {
1877 		list_del_rcu(&data->list);
1878 		update_cond_flag(file);
1879 	}
1880 	return ret;
1881 }
1882 
1883 void event_enable_unregister_trigger(char *glob,
1884 				     struct event_trigger_data *test,
1885 				     struct trace_event_file *file)
1886 {
1887 	struct enable_trigger_data *test_enable_data = test->private_data;
1888 	struct event_trigger_data *data = NULL, *iter;
1889 	struct enable_trigger_data *enable_data;
1890 
1891 	lockdep_assert_held(&event_mutex);
1892 
1893 	list_for_each_entry(iter, &file->triggers, list) {
1894 		enable_data = iter->private_data;
1895 		if (enable_data &&
1896 		    (iter->cmd_ops->trigger_type ==
1897 		     test->cmd_ops->trigger_type) &&
1898 		    (enable_data->file == test_enable_data->file)) {
1899 			data = iter;
1900 			list_del_rcu(&data->list);
1901 			trace_event_trigger_enable_disable(file, 0);
1902 			update_cond_flag(file);
1903 			break;
1904 		}
1905 	}
1906 
1907 	if (data && data->cmd_ops->free)
1908 		data->cmd_ops->free(data);
1909 }
1910 
1911 static struct event_command trigger_enable_cmd = {
1912 	.name			= ENABLE_EVENT_STR,
1913 	.trigger_type		= ETT_EVENT_ENABLE,
1914 	.parse			= event_enable_trigger_parse,
1915 	.reg			= event_enable_register_trigger,
1916 	.unreg			= event_enable_unregister_trigger,
1917 	.set_filter		= set_trigger_filter,
1918 	.trigger		= event_enable_trigger,
1919 	.count_func		= event_enable_count_func,
1920 	.print			= event_enable_trigger_print,
1921 	.init			= event_trigger_init,
1922 	.free			= event_enable_trigger_free,
1923 };
1924 
1925 static struct event_command trigger_disable_cmd = {
1926 	.name			= DISABLE_EVENT_STR,
1927 	.trigger_type		= ETT_EVENT_ENABLE,
1928 	.parse			= event_enable_trigger_parse,
1929 	.reg			= event_enable_register_trigger,
1930 	.unreg			= event_enable_unregister_trigger,
1931 	.set_filter		= set_trigger_filter,
1932 	.trigger		= event_enable_trigger,
1933 	.count_func		= event_enable_count_func,
1934 	.print			= event_enable_trigger_print,
1935 	.init			= event_trigger_init,
1936 	.free			= event_enable_trigger_free,
1937 };
1938 
1939 static __init void unregister_trigger_enable_disable_cmds(void)
1940 {
1941 	unregister_event_command(&trigger_enable_cmd);
1942 	unregister_event_command(&trigger_disable_cmd);
1943 }
1944 
1945 static __init int register_trigger_enable_disable_cmds(void)
1946 {
1947 	int ret;
1948 
1949 	ret = register_event_command(&trigger_enable_cmd);
1950 	if (WARN_ON(ret < 0))
1951 		return ret;
1952 	ret = register_event_command(&trigger_disable_cmd);
1953 	if (WARN_ON(ret < 0))
1954 		unregister_trigger_enable_disable_cmds();
1955 
1956 	return ret;
1957 }
1958 
1959 static __init int register_trigger_traceon_traceoff_cmds(void)
1960 {
1961 	int ret;
1962 
1963 	ret = register_event_command(&trigger_traceon_cmd);
1964 	if (WARN_ON(ret < 0))
1965 		return ret;
1966 	ret = register_event_command(&trigger_traceoff_cmd);
1967 	if (WARN_ON(ret < 0))
1968 		unregister_trigger_traceon_traceoff_cmds();
1969 
1970 	return ret;
1971 }
1972 
1973 __init int register_trigger_cmds(void)
1974 {
1975 	register_trigger_traceon_traceoff_cmds();
1976 	register_trigger_snapshot_cmd();
1977 	register_trigger_stacktrace_cmd();
1978 	register_trigger_enable_disable_cmds();
1979 	register_trigger_hist_enable_disable_cmds();
1980 	register_trigger_hist_cmd();
1981 
1982 	return 0;
1983 }
1984