xref: /linux/kernel/trace/trace_events_trigger.c (revision b9b77222d4ff6b5bb8f5d87fca20de0910618bb9)
1 /*
2  * trace_events_trigger - trace event triggers
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17  *
18  * Copyright (C) 2013 Tom Zanussi <tom.zanussi@linux.intel.com>
19  */
20 
21 #include <linux/module.h>
22 #include <linux/ctype.h>
23 #include <linux/mutex.h>
24 #include <linux/slab.h>
25 #include <linux/rculist.h>
26 
27 #include "trace.h"
28 
29 static LIST_HEAD(trigger_commands);
30 static DEFINE_MUTEX(trigger_cmd_mutex);
31 
32 void trigger_data_free(struct event_trigger_data *data)
33 {
34 	if (data->cmd_ops->set_filter)
35 		data->cmd_ops->set_filter(NULL, data, NULL);
36 
37 	synchronize_sched(); /* make sure current triggers exit before free */
38 	kfree(data);
39 }
40 
41 /**
42  * event_triggers_call - Call triggers associated with a trace event
43  * @file: The trace_event_file associated with the event
44  * @rec: The trace entry for the event, NULL for unconditional invocation
45  *
46  * For each trigger associated with an event, invoke the trigger
47  * function registered with the associated trigger command.  If rec is
48  * non-NULL, it means that the trigger requires further processing and
49  * shouldn't be unconditionally invoked.  If rec is non-NULL and the
50  * trigger has a filter associated with it, rec will checked against
51  * the filter and if the record matches the trigger will be invoked.
52  * If the trigger is a 'post_trigger', meaning it shouldn't be invoked
53  * in any case until the current event is written, the trigger
54  * function isn't invoked but the bit associated with the deferred
55  * trigger is set in the return value.
56  *
57  * Returns an enum event_trigger_type value containing a set bit for
58  * any trigger that should be deferred, ETT_NONE if nothing to defer.
59  *
60  * Called from tracepoint handlers (with rcu_read_lock_sched() held).
61  *
62  * Return: an enum event_trigger_type value containing a set bit for
63  * any trigger that should be deferred, ETT_NONE if nothing to defer.
64  */
65 enum event_trigger_type
66 event_triggers_call(struct trace_event_file *file, void *rec,
67 		    struct ring_buffer_event *event)
68 {
69 	struct event_trigger_data *data;
70 	enum event_trigger_type tt = ETT_NONE;
71 	struct event_filter *filter;
72 
73 	if (list_empty(&file->triggers))
74 		return tt;
75 
76 	list_for_each_entry_rcu(data, &file->triggers, list) {
77 		if (data->paused)
78 			continue;
79 		if (!rec) {
80 			data->ops->func(data, rec, event);
81 			continue;
82 		}
83 		filter = rcu_dereference_sched(data->filter);
84 		if (filter && !filter_match_preds(filter, rec))
85 			continue;
86 		if (event_command_post_trigger(data->cmd_ops)) {
87 			tt |= data->cmd_ops->trigger_type;
88 			continue;
89 		}
90 		data->ops->func(data, rec, event);
91 	}
92 	return tt;
93 }
94 EXPORT_SYMBOL_GPL(event_triggers_call);
95 
96 /**
97  * event_triggers_post_call - Call 'post_triggers' for a trace event
98  * @file: The trace_event_file associated with the event
99  * @tt: enum event_trigger_type containing a set bit for each trigger to invoke
100  *
101  * For each trigger associated with an event, invoke the trigger
102  * function registered with the associated trigger command, if the
103  * corresponding bit is set in the tt enum passed into this function.
104  * See @event_triggers_call for details on how those bits are set.
105  *
106  * Called from tracepoint handlers (with rcu_read_lock_sched() held).
107  */
108 void
109 event_triggers_post_call(struct trace_event_file *file,
110 			 enum event_trigger_type tt)
111 {
112 	struct event_trigger_data *data;
113 
114 	list_for_each_entry_rcu(data, &file->triggers, list) {
115 		if (data->paused)
116 			continue;
117 		if (data->cmd_ops->trigger_type & tt)
118 			data->ops->func(data, NULL, NULL);
119 	}
120 }
121 EXPORT_SYMBOL_GPL(event_triggers_post_call);
122 
123 #define SHOW_AVAILABLE_TRIGGERS	(void *)(1UL)
124 
125 static void *trigger_next(struct seq_file *m, void *t, loff_t *pos)
126 {
127 	struct trace_event_file *event_file = event_file_data(m->private);
128 
129 	if (t == SHOW_AVAILABLE_TRIGGERS)
130 		return NULL;
131 
132 	return seq_list_next(t, &event_file->triggers, pos);
133 }
134 
135 static void *trigger_start(struct seq_file *m, loff_t *pos)
136 {
137 	struct trace_event_file *event_file;
138 
139 	/* ->stop() is called even if ->start() fails */
140 	mutex_lock(&event_mutex);
141 	event_file = event_file_data(m->private);
142 	if (unlikely(!event_file))
143 		return ERR_PTR(-ENODEV);
144 
145 	if (list_empty(&event_file->triggers))
146 		return *pos == 0 ? SHOW_AVAILABLE_TRIGGERS : NULL;
147 
148 	return seq_list_start(&event_file->triggers, *pos);
149 }
150 
151 static void trigger_stop(struct seq_file *m, void *t)
152 {
153 	mutex_unlock(&event_mutex);
154 }
155 
156 static int trigger_show(struct seq_file *m, void *v)
157 {
158 	struct event_trigger_data *data;
159 	struct event_command *p;
160 
161 	if (v == SHOW_AVAILABLE_TRIGGERS) {
162 		seq_puts(m, "# Available triggers:\n");
163 		seq_putc(m, '#');
164 		mutex_lock(&trigger_cmd_mutex);
165 		list_for_each_entry_reverse(p, &trigger_commands, list)
166 			seq_printf(m, " %s", p->name);
167 		seq_putc(m, '\n');
168 		mutex_unlock(&trigger_cmd_mutex);
169 		return 0;
170 	}
171 
172 	data = list_entry(v, struct event_trigger_data, list);
173 	data->ops->print(m, data->ops, data);
174 
175 	return 0;
176 }
177 
178 static const struct seq_operations event_triggers_seq_ops = {
179 	.start = trigger_start,
180 	.next = trigger_next,
181 	.stop = trigger_stop,
182 	.show = trigger_show,
183 };
184 
185 static int event_trigger_regex_open(struct inode *inode, struct file *file)
186 {
187 	int ret = 0;
188 
189 	mutex_lock(&event_mutex);
190 
191 	if (unlikely(!event_file_data(file))) {
192 		mutex_unlock(&event_mutex);
193 		return -ENODEV;
194 	}
195 
196 	if ((file->f_mode & FMODE_WRITE) &&
197 	    (file->f_flags & O_TRUNC)) {
198 		struct trace_event_file *event_file;
199 		struct event_command *p;
200 
201 		event_file = event_file_data(file);
202 
203 		list_for_each_entry(p, &trigger_commands, list) {
204 			if (p->unreg_all)
205 				p->unreg_all(event_file);
206 		}
207 	}
208 
209 	if (file->f_mode & FMODE_READ) {
210 		ret = seq_open(file, &event_triggers_seq_ops);
211 		if (!ret) {
212 			struct seq_file *m = file->private_data;
213 			m->private = file;
214 		}
215 	}
216 
217 	mutex_unlock(&event_mutex);
218 
219 	return ret;
220 }
221 
222 static int trigger_process_regex(struct trace_event_file *file, char *buff)
223 {
224 	char *command, *next = buff;
225 	struct event_command *p;
226 	int ret = -EINVAL;
227 
228 	command = strsep(&next, ": \t");
229 	command = (command[0] != '!') ? command : command + 1;
230 
231 	mutex_lock(&trigger_cmd_mutex);
232 	list_for_each_entry(p, &trigger_commands, list) {
233 		if (strcmp(p->name, command) == 0) {
234 			ret = p->func(p, file, buff, command, next);
235 			goto out_unlock;
236 		}
237 	}
238  out_unlock:
239 	mutex_unlock(&trigger_cmd_mutex);
240 
241 	return ret;
242 }
243 
244 static ssize_t event_trigger_regex_write(struct file *file,
245 					 const char __user *ubuf,
246 					 size_t cnt, loff_t *ppos)
247 {
248 	struct trace_event_file *event_file;
249 	ssize_t ret;
250 	char *buf;
251 
252 	if (!cnt)
253 		return 0;
254 
255 	if (cnt >= PAGE_SIZE)
256 		return -EINVAL;
257 
258 	buf = memdup_user_nul(ubuf, cnt);
259 	if (IS_ERR(buf))
260 		return PTR_ERR(buf);
261 
262 	strim(buf);
263 
264 	mutex_lock(&event_mutex);
265 	event_file = event_file_data(file);
266 	if (unlikely(!event_file)) {
267 		mutex_unlock(&event_mutex);
268 		kfree(buf);
269 		return -ENODEV;
270 	}
271 	ret = trigger_process_regex(event_file, buf);
272 	mutex_unlock(&event_mutex);
273 
274 	kfree(buf);
275 	if (ret < 0)
276 		goto out;
277 
278 	*ppos += cnt;
279 	ret = cnt;
280  out:
281 	return ret;
282 }
283 
284 static int event_trigger_regex_release(struct inode *inode, struct file *file)
285 {
286 	mutex_lock(&event_mutex);
287 
288 	if (file->f_mode & FMODE_READ)
289 		seq_release(inode, file);
290 
291 	mutex_unlock(&event_mutex);
292 
293 	return 0;
294 }
295 
296 static ssize_t
297 event_trigger_write(struct file *filp, const char __user *ubuf,
298 		    size_t cnt, loff_t *ppos)
299 {
300 	return event_trigger_regex_write(filp, ubuf, cnt, ppos);
301 }
302 
303 static int
304 event_trigger_open(struct inode *inode, struct file *filp)
305 {
306 	return event_trigger_regex_open(inode, filp);
307 }
308 
309 static int
310 event_trigger_release(struct inode *inode, struct file *file)
311 {
312 	return event_trigger_regex_release(inode, file);
313 }
314 
315 const struct file_operations event_trigger_fops = {
316 	.open = event_trigger_open,
317 	.read = seq_read,
318 	.write = event_trigger_write,
319 	.llseek = tracing_lseek,
320 	.release = event_trigger_release,
321 };
322 
323 /*
324  * Currently we only register event commands from __init, so mark this
325  * __init too.
326  */
327 __init int register_event_command(struct event_command *cmd)
328 {
329 	struct event_command *p;
330 	int ret = 0;
331 
332 	mutex_lock(&trigger_cmd_mutex);
333 	list_for_each_entry(p, &trigger_commands, list) {
334 		if (strcmp(cmd->name, p->name) == 0) {
335 			ret = -EBUSY;
336 			goto out_unlock;
337 		}
338 	}
339 	list_add(&cmd->list, &trigger_commands);
340  out_unlock:
341 	mutex_unlock(&trigger_cmd_mutex);
342 
343 	return ret;
344 }
345 
346 /*
347  * Currently we only unregister event commands from __init, so mark
348  * this __init too.
349  */
350 __init int unregister_event_command(struct event_command *cmd)
351 {
352 	struct event_command *p, *n;
353 	int ret = -ENODEV;
354 
355 	mutex_lock(&trigger_cmd_mutex);
356 	list_for_each_entry_safe(p, n, &trigger_commands, list) {
357 		if (strcmp(cmd->name, p->name) == 0) {
358 			ret = 0;
359 			list_del_init(&p->list);
360 			goto out_unlock;
361 		}
362 	}
363  out_unlock:
364 	mutex_unlock(&trigger_cmd_mutex);
365 
366 	return ret;
367 }
368 
369 /**
370  * event_trigger_print - Generic event_trigger_ops @print implementation
371  * @name: The name of the event trigger
372  * @m: The seq_file being printed to
373  * @data: Trigger-specific data
374  * @filter_str: filter_str to print, if present
375  *
376  * Common implementation for event triggers to print themselves.
377  *
378  * Usually wrapped by a function that simply sets the @name of the
379  * trigger command and then invokes this.
380  *
381  * Return: 0 on success, errno otherwise
382  */
383 static int
384 event_trigger_print(const char *name, struct seq_file *m,
385 		    void *data, char *filter_str)
386 {
387 	long count = (long)data;
388 
389 	seq_puts(m, name);
390 
391 	if (count == -1)
392 		seq_puts(m, ":unlimited");
393 	else
394 		seq_printf(m, ":count=%ld", count);
395 
396 	if (filter_str)
397 		seq_printf(m, " if %s\n", filter_str);
398 	else
399 		seq_putc(m, '\n');
400 
401 	return 0;
402 }
403 
404 /**
405  * event_trigger_init - Generic event_trigger_ops @init implementation
406  * @ops: The trigger ops associated with the trigger
407  * @data: Trigger-specific data
408  *
409  * Common implementation of event trigger initialization.
410  *
411  * Usually used directly as the @init method in event trigger
412  * implementations.
413  *
414  * Return: 0 on success, errno otherwise
415  */
416 int event_trigger_init(struct event_trigger_ops *ops,
417 		       struct event_trigger_data *data)
418 {
419 	data->ref++;
420 	return 0;
421 }
422 
423 /**
424  * event_trigger_free - Generic event_trigger_ops @free implementation
425  * @ops: The trigger ops associated with the trigger
426  * @data: Trigger-specific data
427  *
428  * Common implementation of event trigger de-initialization.
429  *
430  * Usually used directly as the @free method in event trigger
431  * implementations.
432  */
433 static void
434 event_trigger_free(struct event_trigger_ops *ops,
435 		   struct event_trigger_data *data)
436 {
437 	if (WARN_ON_ONCE(data->ref <= 0))
438 		return;
439 
440 	data->ref--;
441 	if (!data->ref)
442 		trigger_data_free(data);
443 }
444 
445 int trace_event_trigger_enable_disable(struct trace_event_file *file,
446 				       int trigger_enable)
447 {
448 	int ret = 0;
449 
450 	if (trigger_enable) {
451 		if (atomic_inc_return(&file->tm_ref) > 1)
452 			return ret;
453 		set_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT, &file->flags);
454 		ret = trace_event_enable_disable(file, 1, 1);
455 	} else {
456 		if (atomic_dec_return(&file->tm_ref) > 0)
457 			return ret;
458 		clear_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT, &file->flags);
459 		ret = trace_event_enable_disable(file, 0, 1);
460 	}
461 
462 	return ret;
463 }
464 
465 /**
466  * clear_event_triggers - Clear all triggers associated with a trace array
467  * @tr: The trace array to clear
468  *
469  * For each trigger, the triggering event has its tm_ref decremented
470  * via trace_event_trigger_enable_disable(), and any associated event
471  * (in the case of enable/disable_event triggers) will have its sm_ref
472  * decremented via free()->trace_event_enable_disable().  That
473  * combination effectively reverses the soft-mode/trigger state added
474  * by trigger registration.
475  *
476  * Must be called with event_mutex held.
477  */
478 void
479 clear_event_triggers(struct trace_array *tr)
480 {
481 	struct trace_event_file *file;
482 
483 	list_for_each_entry(file, &tr->events, list) {
484 		struct event_trigger_data *data, *n;
485 		list_for_each_entry_safe(data, n, &file->triggers, list) {
486 			trace_event_trigger_enable_disable(file, 0);
487 			list_del_rcu(&data->list);
488 			if (data->ops->free)
489 				data->ops->free(data->ops, data);
490 		}
491 	}
492 }
493 
494 /**
495  * update_cond_flag - Set or reset the TRIGGER_COND bit
496  * @file: The trace_event_file associated with the event
497  *
498  * If an event has triggers and any of those triggers has a filter or
499  * a post_trigger, trigger invocation needs to be deferred until after
500  * the current event has logged its data, and the event should have
501  * its TRIGGER_COND bit set, otherwise the TRIGGER_COND bit should be
502  * cleared.
503  */
504 void update_cond_flag(struct trace_event_file *file)
505 {
506 	struct event_trigger_data *data;
507 	bool set_cond = false;
508 
509 	list_for_each_entry_rcu(data, &file->triggers, list) {
510 		if (data->filter || event_command_post_trigger(data->cmd_ops) ||
511 		    event_command_needs_rec(data->cmd_ops)) {
512 			set_cond = true;
513 			break;
514 		}
515 	}
516 
517 	if (set_cond)
518 		set_bit(EVENT_FILE_FL_TRIGGER_COND_BIT, &file->flags);
519 	else
520 		clear_bit(EVENT_FILE_FL_TRIGGER_COND_BIT, &file->flags);
521 }
522 
523 /**
524  * register_trigger - Generic event_command @reg implementation
525  * @glob: The raw string used to register the trigger
526  * @ops: The trigger ops associated with the trigger
527  * @data: Trigger-specific data to associate with the trigger
528  * @file: The trace_event_file associated with the event
529  *
530  * Common implementation for event trigger registration.
531  *
532  * Usually used directly as the @reg method in event command
533  * implementations.
534  *
535  * Return: 0 on success, errno otherwise
536  */
537 static int register_trigger(char *glob, struct event_trigger_ops *ops,
538 			    struct event_trigger_data *data,
539 			    struct trace_event_file *file)
540 {
541 	struct event_trigger_data *test;
542 	int ret = 0;
543 
544 	list_for_each_entry_rcu(test, &file->triggers, list) {
545 		if (test->cmd_ops->trigger_type == data->cmd_ops->trigger_type) {
546 			ret = -EEXIST;
547 			goto out;
548 		}
549 	}
550 
551 	if (data->ops->init) {
552 		ret = data->ops->init(data->ops, data);
553 		if (ret < 0)
554 			goto out;
555 	}
556 
557 	list_add_rcu(&data->list, &file->triggers);
558 	ret++;
559 
560 	update_cond_flag(file);
561 	if (trace_event_trigger_enable_disable(file, 1) < 0) {
562 		list_del_rcu(&data->list);
563 		update_cond_flag(file);
564 		ret--;
565 	}
566 out:
567 	return ret;
568 }
569 
570 /**
571  * unregister_trigger - Generic event_command @unreg implementation
572  * @glob: The raw string used to register the trigger
573  * @ops: The trigger ops associated with the trigger
574  * @test: Trigger-specific data used to find the trigger to remove
575  * @file: The trace_event_file associated with the event
576  *
577  * Common implementation for event trigger unregistration.
578  *
579  * Usually used directly as the @unreg method in event command
580  * implementations.
581  */
582 void unregister_trigger(char *glob, struct event_trigger_ops *ops,
583 			struct event_trigger_data *test,
584 			struct trace_event_file *file)
585 {
586 	struct event_trigger_data *data;
587 	bool unregistered = false;
588 
589 	list_for_each_entry_rcu(data, &file->triggers, list) {
590 		if (data->cmd_ops->trigger_type == test->cmd_ops->trigger_type) {
591 			unregistered = true;
592 			list_del_rcu(&data->list);
593 			trace_event_trigger_enable_disable(file, 0);
594 			update_cond_flag(file);
595 			break;
596 		}
597 	}
598 
599 	if (unregistered && data->ops->free)
600 		data->ops->free(data->ops, data);
601 }
602 
603 /**
604  * event_trigger_callback - Generic event_command @func implementation
605  * @cmd_ops: The command ops, used for trigger registration
606  * @file: The trace_event_file associated with the event
607  * @glob: The raw string used to register the trigger
608  * @cmd: The cmd portion of the string used to register the trigger
609  * @param: The params portion of the string used to register the trigger
610  *
611  * Common implementation for event command parsing and trigger
612  * instantiation.
613  *
614  * Usually used directly as the @func method in event command
615  * implementations.
616  *
617  * Return: 0 on success, errno otherwise
618  */
619 static int
620 event_trigger_callback(struct event_command *cmd_ops,
621 		       struct trace_event_file *file,
622 		       char *glob, char *cmd, char *param)
623 {
624 	struct event_trigger_data *trigger_data;
625 	struct event_trigger_ops *trigger_ops;
626 	char *trigger = NULL;
627 	char *number;
628 	int ret;
629 
630 	/* separate the trigger from the filter (t:n [if filter]) */
631 	if (param && isdigit(param[0]))
632 		trigger = strsep(&param, " \t");
633 
634 	trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
635 
636 	ret = -ENOMEM;
637 	trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
638 	if (!trigger_data)
639 		goto out;
640 
641 	trigger_data->count = -1;
642 	trigger_data->ops = trigger_ops;
643 	trigger_data->cmd_ops = cmd_ops;
644 	trigger_data->private_data = file;
645 	INIT_LIST_HEAD(&trigger_data->list);
646 	INIT_LIST_HEAD(&trigger_data->named_list);
647 
648 	if (glob[0] == '!') {
649 		cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
650 		kfree(trigger_data);
651 		ret = 0;
652 		goto out;
653 	}
654 
655 	if (trigger) {
656 		number = strsep(&trigger, ":");
657 
658 		ret = -EINVAL;
659 		if (!strlen(number))
660 			goto out_free;
661 
662 		/*
663 		 * We use the callback data field (which is a pointer)
664 		 * as our counter.
665 		 */
666 		ret = kstrtoul(number, 0, &trigger_data->count);
667 		if (ret)
668 			goto out_free;
669 	}
670 
671 	if (!param) /* if param is non-empty, it's supposed to be a filter */
672 		goto out_reg;
673 
674 	if (!cmd_ops->set_filter)
675 		goto out_reg;
676 
677 	ret = cmd_ops->set_filter(param, trigger_data, file);
678 	if (ret < 0)
679 		goto out_free;
680 
681  out_reg:
682 	/* Up the trigger_data count to make sure reg doesn't free it on failure */
683 	event_trigger_init(trigger_ops, trigger_data);
684 	ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
685 	/*
686 	 * The above returns on success the # of functions enabled,
687 	 * but if it didn't find any functions it returns zero.
688 	 * Consider no functions a failure too.
689 	 */
690 	if (!ret) {
691 		cmd_ops->unreg(glob, trigger_ops, trigger_data, file);
692 		ret = -ENOENT;
693 	} else if (ret > 0)
694 		ret = 0;
695 
696 	/* Down the counter of trigger_data or free it if not used anymore */
697 	event_trigger_free(trigger_ops, trigger_data);
698  out:
699 	return ret;
700 
701  out_free:
702 	if (cmd_ops->set_filter)
703 		cmd_ops->set_filter(NULL, trigger_data, NULL);
704 	kfree(trigger_data);
705 	goto out;
706 }
707 
708 /**
709  * set_trigger_filter - Generic event_command @set_filter implementation
710  * @filter_str: The filter string for the trigger, NULL to remove filter
711  * @trigger_data: Trigger-specific data
712  * @file: The trace_event_file associated with the event
713  *
714  * Common implementation for event command filter parsing and filter
715  * instantiation.
716  *
717  * Usually used directly as the @set_filter method in event command
718  * implementations.
719  *
720  * Also used to remove a filter (if filter_str = NULL).
721  *
722  * Return: 0 on success, errno otherwise
723  */
724 int set_trigger_filter(char *filter_str,
725 		       struct event_trigger_data *trigger_data,
726 		       struct trace_event_file *file)
727 {
728 	struct event_trigger_data *data = trigger_data;
729 	struct event_filter *filter = NULL, *tmp;
730 	int ret = -EINVAL;
731 	char *s;
732 
733 	if (!filter_str) /* clear the current filter */
734 		goto assign;
735 
736 	s = strsep(&filter_str, " \t");
737 
738 	if (!strlen(s) || strcmp(s, "if") != 0)
739 		goto out;
740 
741 	if (!filter_str)
742 		goto out;
743 
744 	/* The filter is for the 'trigger' event, not the triggered event */
745 	ret = create_event_filter(file->event_call, filter_str, false, &filter);
746 	if (ret)
747 		goto out;
748  assign:
749 	tmp = rcu_access_pointer(data->filter);
750 
751 	rcu_assign_pointer(data->filter, filter);
752 
753 	if (tmp) {
754 		/* Make sure the call is done with the filter */
755 		synchronize_sched();
756 		free_event_filter(tmp);
757 	}
758 
759 	kfree(data->filter_str);
760 	data->filter_str = NULL;
761 
762 	if (filter_str) {
763 		data->filter_str = kstrdup(filter_str, GFP_KERNEL);
764 		if (!data->filter_str) {
765 			free_event_filter(rcu_access_pointer(data->filter));
766 			data->filter = NULL;
767 			ret = -ENOMEM;
768 		}
769 	}
770  out:
771 	return ret;
772 }
773 
774 static LIST_HEAD(named_triggers);
775 
776 /**
777  * find_named_trigger - Find the common named trigger associated with @name
778  * @name: The name of the set of named triggers to find the common data for
779  *
780  * Named triggers are sets of triggers that share a common set of
781  * trigger data.  The first named trigger registered with a given name
782  * owns the common trigger data that the others subsequently
783  * registered with the same name will reference.  This function
784  * returns the common trigger data associated with that first
785  * registered instance.
786  *
787  * Return: the common trigger data for the given named trigger on
788  * success, NULL otherwise.
789  */
790 struct event_trigger_data *find_named_trigger(const char *name)
791 {
792 	struct event_trigger_data *data;
793 
794 	if (!name)
795 		return NULL;
796 
797 	list_for_each_entry(data, &named_triggers, named_list) {
798 		if (data->named_data)
799 			continue;
800 		if (strcmp(data->name, name) == 0)
801 			return data;
802 	}
803 
804 	return NULL;
805 }
806 
807 /**
808  * is_named_trigger - determine if a given trigger is a named trigger
809  * @test: The trigger data to test
810  *
811  * Return: true if 'test' is a named trigger, false otherwise.
812  */
813 bool is_named_trigger(struct event_trigger_data *test)
814 {
815 	struct event_trigger_data *data;
816 
817 	list_for_each_entry(data, &named_triggers, named_list) {
818 		if (test == data)
819 			return true;
820 	}
821 
822 	return false;
823 }
824 
825 /**
826  * save_named_trigger - save the trigger in the named trigger list
827  * @name: The name of the named trigger set
828  * @data: The trigger data to save
829  *
830  * Return: 0 if successful, negative error otherwise.
831  */
832 int save_named_trigger(const char *name, struct event_trigger_data *data)
833 {
834 	data->name = kstrdup(name, GFP_KERNEL);
835 	if (!data->name)
836 		return -ENOMEM;
837 
838 	list_add(&data->named_list, &named_triggers);
839 
840 	return 0;
841 }
842 
843 /**
844  * del_named_trigger - delete a trigger from the named trigger list
845  * @data: The trigger data to delete
846  */
847 void del_named_trigger(struct event_trigger_data *data)
848 {
849 	kfree(data->name);
850 	data->name = NULL;
851 
852 	list_del(&data->named_list);
853 }
854 
855 static void __pause_named_trigger(struct event_trigger_data *data, bool pause)
856 {
857 	struct event_trigger_data *test;
858 
859 	list_for_each_entry(test, &named_triggers, named_list) {
860 		if (strcmp(test->name, data->name) == 0) {
861 			if (pause) {
862 				test->paused_tmp = test->paused;
863 				test->paused = true;
864 			} else {
865 				test->paused = test->paused_tmp;
866 			}
867 		}
868 	}
869 }
870 
871 /**
872  * pause_named_trigger - Pause all named triggers with the same name
873  * @data: The trigger data of a named trigger to pause
874  *
875  * Pauses a named trigger along with all other triggers having the
876  * same name.  Because named triggers share a common set of data,
877  * pausing only one is meaningless, so pausing one named trigger needs
878  * to pause all triggers with the same name.
879  */
880 void pause_named_trigger(struct event_trigger_data *data)
881 {
882 	__pause_named_trigger(data, true);
883 }
884 
885 /**
886  * unpause_named_trigger - Un-pause all named triggers with the same name
887  * @data: The trigger data of a named trigger to unpause
888  *
889  * Un-pauses a named trigger along with all other triggers having the
890  * same name.  Because named triggers share a common set of data,
891  * unpausing only one is meaningless, so unpausing one named trigger
892  * needs to unpause all triggers with the same name.
893  */
894 void unpause_named_trigger(struct event_trigger_data *data)
895 {
896 	__pause_named_trigger(data, false);
897 }
898 
899 /**
900  * set_named_trigger_data - Associate common named trigger data
901  * @data: The trigger data of a named trigger to unpause
902  *
903  * Named triggers are sets of triggers that share a common set of
904  * trigger data.  The first named trigger registered with a given name
905  * owns the common trigger data that the others subsequently
906  * registered with the same name will reference.  This function
907  * associates the common trigger data from the first trigger with the
908  * given trigger.
909  */
910 void set_named_trigger_data(struct event_trigger_data *data,
911 			    struct event_trigger_data *named_data)
912 {
913 	data->named_data = named_data;
914 }
915 
916 struct event_trigger_data *
917 get_named_trigger_data(struct event_trigger_data *data)
918 {
919 	return data->named_data;
920 }
921 
922 static void
923 traceon_trigger(struct event_trigger_data *data, void *rec,
924 		struct ring_buffer_event *event)
925 {
926 	if (tracing_is_on())
927 		return;
928 
929 	tracing_on();
930 }
931 
932 static void
933 traceon_count_trigger(struct event_trigger_data *data, void *rec,
934 		      struct ring_buffer_event *event)
935 {
936 	if (tracing_is_on())
937 		return;
938 
939 	if (!data->count)
940 		return;
941 
942 	if (data->count != -1)
943 		(data->count)--;
944 
945 	tracing_on();
946 }
947 
948 static void
949 traceoff_trigger(struct event_trigger_data *data, void *rec,
950 		 struct ring_buffer_event *event)
951 {
952 	if (!tracing_is_on())
953 		return;
954 
955 	tracing_off();
956 }
957 
958 static void
959 traceoff_count_trigger(struct event_trigger_data *data, void *rec,
960 		       struct ring_buffer_event *event)
961 {
962 	if (!tracing_is_on())
963 		return;
964 
965 	if (!data->count)
966 		return;
967 
968 	if (data->count != -1)
969 		(data->count)--;
970 
971 	tracing_off();
972 }
973 
974 static int
975 traceon_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
976 		      struct event_trigger_data *data)
977 {
978 	return event_trigger_print("traceon", m, (void *)data->count,
979 				   data->filter_str);
980 }
981 
982 static int
983 traceoff_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
984 		       struct event_trigger_data *data)
985 {
986 	return event_trigger_print("traceoff", m, (void *)data->count,
987 				   data->filter_str);
988 }
989 
990 static struct event_trigger_ops traceon_trigger_ops = {
991 	.func			= traceon_trigger,
992 	.print			= traceon_trigger_print,
993 	.init			= event_trigger_init,
994 	.free			= event_trigger_free,
995 };
996 
997 static struct event_trigger_ops traceon_count_trigger_ops = {
998 	.func			= traceon_count_trigger,
999 	.print			= traceon_trigger_print,
1000 	.init			= event_trigger_init,
1001 	.free			= event_trigger_free,
1002 };
1003 
1004 static struct event_trigger_ops traceoff_trigger_ops = {
1005 	.func			= traceoff_trigger,
1006 	.print			= traceoff_trigger_print,
1007 	.init			= event_trigger_init,
1008 	.free			= event_trigger_free,
1009 };
1010 
1011 static struct event_trigger_ops traceoff_count_trigger_ops = {
1012 	.func			= traceoff_count_trigger,
1013 	.print			= traceoff_trigger_print,
1014 	.init			= event_trigger_init,
1015 	.free			= event_trigger_free,
1016 };
1017 
1018 static struct event_trigger_ops *
1019 onoff_get_trigger_ops(char *cmd, char *param)
1020 {
1021 	struct event_trigger_ops *ops;
1022 
1023 	/* we register both traceon and traceoff to this callback */
1024 	if (strcmp(cmd, "traceon") == 0)
1025 		ops = param ? &traceon_count_trigger_ops :
1026 			&traceon_trigger_ops;
1027 	else
1028 		ops = param ? &traceoff_count_trigger_ops :
1029 			&traceoff_trigger_ops;
1030 
1031 	return ops;
1032 }
1033 
1034 static struct event_command trigger_traceon_cmd = {
1035 	.name			= "traceon",
1036 	.trigger_type		= ETT_TRACE_ONOFF,
1037 	.func			= event_trigger_callback,
1038 	.reg			= register_trigger,
1039 	.unreg			= unregister_trigger,
1040 	.get_trigger_ops	= onoff_get_trigger_ops,
1041 	.set_filter		= set_trigger_filter,
1042 };
1043 
1044 static struct event_command trigger_traceoff_cmd = {
1045 	.name			= "traceoff",
1046 	.trigger_type		= ETT_TRACE_ONOFF,
1047 	.flags			= EVENT_CMD_FL_POST_TRIGGER,
1048 	.func			= event_trigger_callback,
1049 	.reg			= register_trigger,
1050 	.unreg			= unregister_trigger,
1051 	.get_trigger_ops	= onoff_get_trigger_ops,
1052 	.set_filter		= set_trigger_filter,
1053 };
1054 
1055 #ifdef CONFIG_TRACER_SNAPSHOT
1056 static void
1057 snapshot_trigger(struct event_trigger_data *data, void *rec,
1058 		 struct ring_buffer_event *event)
1059 {
1060 	struct trace_event_file *file = data->private_data;
1061 
1062 	if (file)
1063 		tracing_snapshot_instance(file->tr);
1064 	else
1065 		tracing_snapshot();
1066 }
1067 
1068 static void
1069 snapshot_count_trigger(struct event_trigger_data *data, void *rec,
1070 		       struct ring_buffer_event *event)
1071 {
1072 	if (!data->count)
1073 		return;
1074 
1075 	if (data->count != -1)
1076 		(data->count)--;
1077 
1078 	snapshot_trigger(data, rec, event);
1079 }
1080 
1081 static int
1082 register_snapshot_trigger(char *glob, struct event_trigger_ops *ops,
1083 			  struct event_trigger_data *data,
1084 			  struct trace_event_file *file)
1085 {
1086 	int ret = register_trigger(glob, ops, data, file);
1087 
1088 	if (ret > 0 && tracing_alloc_snapshot_instance(file->tr) != 0) {
1089 		unregister_trigger(glob, ops, data, file);
1090 		ret = 0;
1091 	}
1092 
1093 	return ret;
1094 }
1095 
1096 static int
1097 snapshot_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
1098 		       struct event_trigger_data *data)
1099 {
1100 	return event_trigger_print("snapshot", m, (void *)data->count,
1101 				   data->filter_str);
1102 }
1103 
1104 static struct event_trigger_ops snapshot_trigger_ops = {
1105 	.func			= snapshot_trigger,
1106 	.print			= snapshot_trigger_print,
1107 	.init			= event_trigger_init,
1108 	.free			= event_trigger_free,
1109 };
1110 
1111 static struct event_trigger_ops snapshot_count_trigger_ops = {
1112 	.func			= snapshot_count_trigger,
1113 	.print			= snapshot_trigger_print,
1114 	.init			= event_trigger_init,
1115 	.free			= event_trigger_free,
1116 };
1117 
1118 static struct event_trigger_ops *
1119 snapshot_get_trigger_ops(char *cmd, char *param)
1120 {
1121 	return param ? &snapshot_count_trigger_ops : &snapshot_trigger_ops;
1122 }
1123 
1124 static struct event_command trigger_snapshot_cmd = {
1125 	.name			= "snapshot",
1126 	.trigger_type		= ETT_SNAPSHOT,
1127 	.func			= event_trigger_callback,
1128 	.reg			= register_snapshot_trigger,
1129 	.unreg			= unregister_trigger,
1130 	.get_trigger_ops	= snapshot_get_trigger_ops,
1131 	.set_filter		= set_trigger_filter,
1132 };
1133 
1134 static __init int register_trigger_snapshot_cmd(void)
1135 {
1136 	int ret;
1137 
1138 	ret = register_event_command(&trigger_snapshot_cmd);
1139 	WARN_ON(ret < 0);
1140 
1141 	return ret;
1142 }
1143 #else
1144 static __init int register_trigger_snapshot_cmd(void) { return 0; }
1145 #endif /* CONFIG_TRACER_SNAPSHOT */
1146 
1147 #ifdef CONFIG_STACKTRACE
1148 #ifdef CONFIG_UNWINDER_ORC
1149 /* Skip 2:
1150  *   event_triggers_post_call()
1151  *   trace_event_raw_event_xxx()
1152  */
1153 # define STACK_SKIP 2
1154 #else
1155 /*
1156  * Skip 4:
1157  *   stacktrace_trigger()
1158  *   event_triggers_post_call()
1159  *   trace_event_buffer_commit()
1160  *   trace_event_raw_event_xxx()
1161  */
1162 #define STACK_SKIP 4
1163 #endif
1164 
1165 static void
1166 stacktrace_trigger(struct event_trigger_data *data, void *rec,
1167 		   struct ring_buffer_event *event)
1168 {
1169 	trace_dump_stack(STACK_SKIP);
1170 }
1171 
1172 static void
1173 stacktrace_count_trigger(struct event_trigger_data *data, void *rec,
1174 			 struct ring_buffer_event *event)
1175 {
1176 	if (!data->count)
1177 		return;
1178 
1179 	if (data->count != -1)
1180 		(data->count)--;
1181 
1182 	stacktrace_trigger(data, rec, event);
1183 }
1184 
1185 static int
1186 stacktrace_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
1187 			 struct event_trigger_data *data)
1188 {
1189 	return event_trigger_print("stacktrace", m, (void *)data->count,
1190 				   data->filter_str);
1191 }
1192 
1193 static struct event_trigger_ops stacktrace_trigger_ops = {
1194 	.func			= stacktrace_trigger,
1195 	.print			= stacktrace_trigger_print,
1196 	.init			= event_trigger_init,
1197 	.free			= event_trigger_free,
1198 };
1199 
1200 static struct event_trigger_ops stacktrace_count_trigger_ops = {
1201 	.func			= stacktrace_count_trigger,
1202 	.print			= stacktrace_trigger_print,
1203 	.init			= event_trigger_init,
1204 	.free			= event_trigger_free,
1205 };
1206 
1207 static struct event_trigger_ops *
1208 stacktrace_get_trigger_ops(char *cmd, char *param)
1209 {
1210 	return param ? &stacktrace_count_trigger_ops : &stacktrace_trigger_ops;
1211 }
1212 
1213 static struct event_command trigger_stacktrace_cmd = {
1214 	.name			= "stacktrace",
1215 	.trigger_type		= ETT_STACKTRACE,
1216 	.flags			= EVENT_CMD_FL_POST_TRIGGER,
1217 	.func			= event_trigger_callback,
1218 	.reg			= register_trigger,
1219 	.unreg			= unregister_trigger,
1220 	.get_trigger_ops	= stacktrace_get_trigger_ops,
1221 	.set_filter		= set_trigger_filter,
1222 };
1223 
1224 static __init int register_trigger_stacktrace_cmd(void)
1225 {
1226 	int ret;
1227 
1228 	ret = register_event_command(&trigger_stacktrace_cmd);
1229 	WARN_ON(ret < 0);
1230 
1231 	return ret;
1232 }
1233 #else
1234 static __init int register_trigger_stacktrace_cmd(void) { return 0; }
1235 #endif /* CONFIG_STACKTRACE */
1236 
1237 static __init void unregister_trigger_traceon_traceoff_cmds(void)
1238 {
1239 	unregister_event_command(&trigger_traceon_cmd);
1240 	unregister_event_command(&trigger_traceoff_cmd);
1241 }
1242 
1243 static void
1244 event_enable_trigger(struct event_trigger_data *data, void *rec,
1245 		     struct ring_buffer_event *event)
1246 {
1247 	struct enable_trigger_data *enable_data = data->private_data;
1248 
1249 	if (enable_data->enable)
1250 		clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &enable_data->file->flags);
1251 	else
1252 		set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &enable_data->file->flags);
1253 }
1254 
1255 static void
1256 event_enable_count_trigger(struct event_trigger_data *data, void *rec,
1257 			   struct ring_buffer_event *event)
1258 {
1259 	struct enable_trigger_data *enable_data = data->private_data;
1260 
1261 	if (!data->count)
1262 		return;
1263 
1264 	/* Skip if the event is in a state we want to switch to */
1265 	if (enable_data->enable == !(enable_data->file->flags & EVENT_FILE_FL_SOFT_DISABLED))
1266 		return;
1267 
1268 	if (data->count != -1)
1269 		(data->count)--;
1270 
1271 	event_enable_trigger(data, rec, event);
1272 }
1273 
1274 int event_enable_trigger_print(struct seq_file *m,
1275 			       struct event_trigger_ops *ops,
1276 			       struct event_trigger_data *data)
1277 {
1278 	struct enable_trigger_data *enable_data = data->private_data;
1279 
1280 	seq_printf(m, "%s:%s:%s",
1281 		   enable_data->hist ?
1282 		   (enable_data->enable ? ENABLE_HIST_STR : DISABLE_HIST_STR) :
1283 		   (enable_data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR),
1284 		   enable_data->file->event_call->class->system,
1285 		   trace_event_name(enable_data->file->event_call));
1286 
1287 	if (data->count == -1)
1288 		seq_puts(m, ":unlimited");
1289 	else
1290 		seq_printf(m, ":count=%ld", data->count);
1291 
1292 	if (data->filter_str)
1293 		seq_printf(m, " if %s\n", data->filter_str);
1294 	else
1295 		seq_putc(m, '\n');
1296 
1297 	return 0;
1298 }
1299 
1300 void event_enable_trigger_free(struct event_trigger_ops *ops,
1301 			       struct event_trigger_data *data)
1302 {
1303 	struct enable_trigger_data *enable_data = data->private_data;
1304 
1305 	if (WARN_ON_ONCE(data->ref <= 0))
1306 		return;
1307 
1308 	data->ref--;
1309 	if (!data->ref) {
1310 		/* Remove the SOFT_MODE flag */
1311 		trace_event_enable_disable(enable_data->file, 0, 1);
1312 		module_put(enable_data->file->event_call->mod);
1313 		trigger_data_free(data);
1314 		kfree(enable_data);
1315 	}
1316 }
1317 
1318 static struct event_trigger_ops event_enable_trigger_ops = {
1319 	.func			= event_enable_trigger,
1320 	.print			= event_enable_trigger_print,
1321 	.init			= event_trigger_init,
1322 	.free			= event_enable_trigger_free,
1323 };
1324 
1325 static struct event_trigger_ops event_enable_count_trigger_ops = {
1326 	.func			= event_enable_count_trigger,
1327 	.print			= event_enable_trigger_print,
1328 	.init			= event_trigger_init,
1329 	.free			= event_enable_trigger_free,
1330 };
1331 
1332 static struct event_trigger_ops event_disable_trigger_ops = {
1333 	.func			= event_enable_trigger,
1334 	.print			= event_enable_trigger_print,
1335 	.init			= event_trigger_init,
1336 	.free			= event_enable_trigger_free,
1337 };
1338 
1339 static struct event_trigger_ops event_disable_count_trigger_ops = {
1340 	.func			= event_enable_count_trigger,
1341 	.print			= event_enable_trigger_print,
1342 	.init			= event_trigger_init,
1343 	.free			= event_enable_trigger_free,
1344 };
1345 
1346 int event_enable_trigger_func(struct event_command *cmd_ops,
1347 			      struct trace_event_file *file,
1348 			      char *glob, char *cmd, char *param)
1349 {
1350 	struct trace_event_file *event_enable_file;
1351 	struct enable_trigger_data *enable_data;
1352 	struct event_trigger_data *trigger_data;
1353 	struct event_trigger_ops *trigger_ops;
1354 	struct trace_array *tr = file->tr;
1355 	const char *system;
1356 	const char *event;
1357 	bool hist = false;
1358 	char *trigger;
1359 	char *number;
1360 	bool enable;
1361 	int ret;
1362 
1363 	if (!param)
1364 		return -EINVAL;
1365 
1366 	/* separate the trigger from the filter (s:e:n [if filter]) */
1367 	trigger = strsep(&param, " \t");
1368 	if (!trigger)
1369 		return -EINVAL;
1370 
1371 	system = strsep(&trigger, ":");
1372 	if (!trigger)
1373 		return -EINVAL;
1374 
1375 	event = strsep(&trigger, ":");
1376 
1377 	ret = -EINVAL;
1378 	event_enable_file = find_event_file(tr, system, event);
1379 	if (!event_enable_file)
1380 		goto out;
1381 
1382 #ifdef CONFIG_HIST_TRIGGERS
1383 	hist = ((strcmp(cmd, ENABLE_HIST_STR) == 0) ||
1384 		(strcmp(cmd, DISABLE_HIST_STR) == 0));
1385 
1386 	enable = ((strcmp(cmd, ENABLE_EVENT_STR) == 0) ||
1387 		  (strcmp(cmd, ENABLE_HIST_STR) == 0));
1388 #else
1389 	enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
1390 #endif
1391 	trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
1392 
1393 	ret = -ENOMEM;
1394 	trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
1395 	if (!trigger_data)
1396 		goto out;
1397 
1398 	enable_data = kzalloc(sizeof(*enable_data), GFP_KERNEL);
1399 	if (!enable_data) {
1400 		kfree(trigger_data);
1401 		goto out;
1402 	}
1403 
1404 	trigger_data->count = -1;
1405 	trigger_data->ops = trigger_ops;
1406 	trigger_data->cmd_ops = cmd_ops;
1407 	INIT_LIST_HEAD(&trigger_data->list);
1408 	RCU_INIT_POINTER(trigger_data->filter, NULL);
1409 
1410 	enable_data->hist = hist;
1411 	enable_data->enable = enable;
1412 	enable_data->file = event_enable_file;
1413 	trigger_data->private_data = enable_data;
1414 
1415 	if (glob[0] == '!') {
1416 		cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
1417 		kfree(trigger_data);
1418 		kfree(enable_data);
1419 		ret = 0;
1420 		goto out;
1421 	}
1422 
1423 	/* Up the trigger_data count to make sure nothing frees it on failure */
1424 	event_trigger_init(trigger_ops, trigger_data);
1425 
1426 	if (trigger) {
1427 		number = strsep(&trigger, ":");
1428 
1429 		ret = -EINVAL;
1430 		if (!strlen(number))
1431 			goto out_free;
1432 
1433 		/*
1434 		 * We use the callback data field (which is a pointer)
1435 		 * as our counter.
1436 		 */
1437 		ret = kstrtoul(number, 0, &trigger_data->count);
1438 		if (ret)
1439 			goto out_free;
1440 	}
1441 
1442 	if (!param) /* if param is non-empty, it's supposed to be a filter */
1443 		goto out_reg;
1444 
1445 	if (!cmd_ops->set_filter)
1446 		goto out_reg;
1447 
1448 	ret = cmd_ops->set_filter(param, trigger_data, file);
1449 	if (ret < 0)
1450 		goto out_free;
1451 
1452  out_reg:
1453 	/* Don't let event modules unload while probe registered */
1454 	ret = try_module_get(event_enable_file->event_call->mod);
1455 	if (!ret) {
1456 		ret = -EBUSY;
1457 		goto out_free;
1458 	}
1459 
1460 	ret = trace_event_enable_disable(event_enable_file, 1, 1);
1461 	if (ret < 0)
1462 		goto out_put;
1463 	ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
1464 	/*
1465 	 * The above returns on success the # of functions enabled,
1466 	 * but if it didn't find any functions it returns zero.
1467 	 * Consider no functions a failure too.
1468 	 */
1469 	if (!ret) {
1470 		ret = -ENOENT;
1471 		goto out_disable;
1472 	} else if (ret < 0)
1473 		goto out_disable;
1474 	/* Just return zero, not the number of enabled functions */
1475 	ret = 0;
1476 	event_trigger_free(trigger_ops, trigger_data);
1477  out:
1478 	return ret;
1479 
1480  out_disable:
1481 	trace_event_enable_disable(event_enable_file, 0, 1);
1482  out_put:
1483 	module_put(event_enable_file->event_call->mod);
1484  out_free:
1485 	if (cmd_ops->set_filter)
1486 		cmd_ops->set_filter(NULL, trigger_data, NULL);
1487 	event_trigger_free(trigger_ops, trigger_data);
1488 	kfree(enable_data);
1489 	goto out;
1490 }
1491 
1492 int event_enable_register_trigger(char *glob,
1493 				  struct event_trigger_ops *ops,
1494 				  struct event_trigger_data *data,
1495 				  struct trace_event_file *file)
1496 {
1497 	struct enable_trigger_data *enable_data = data->private_data;
1498 	struct enable_trigger_data *test_enable_data;
1499 	struct event_trigger_data *test;
1500 	int ret = 0;
1501 
1502 	list_for_each_entry_rcu(test, &file->triggers, list) {
1503 		test_enable_data = test->private_data;
1504 		if (test_enable_data &&
1505 		    (test->cmd_ops->trigger_type ==
1506 		     data->cmd_ops->trigger_type) &&
1507 		    (test_enable_data->file == enable_data->file)) {
1508 			ret = -EEXIST;
1509 			goto out;
1510 		}
1511 	}
1512 
1513 	if (data->ops->init) {
1514 		ret = data->ops->init(data->ops, data);
1515 		if (ret < 0)
1516 			goto out;
1517 	}
1518 
1519 	list_add_rcu(&data->list, &file->triggers);
1520 	ret++;
1521 
1522 	update_cond_flag(file);
1523 	if (trace_event_trigger_enable_disable(file, 1) < 0) {
1524 		list_del_rcu(&data->list);
1525 		update_cond_flag(file);
1526 		ret--;
1527 	}
1528 out:
1529 	return ret;
1530 }
1531 
1532 void event_enable_unregister_trigger(char *glob,
1533 				     struct event_trigger_ops *ops,
1534 				     struct event_trigger_data *test,
1535 				     struct trace_event_file *file)
1536 {
1537 	struct enable_trigger_data *test_enable_data = test->private_data;
1538 	struct enable_trigger_data *enable_data;
1539 	struct event_trigger_data *data;
1540 	bool unregistered = false;
1541 
1542 	list_for_each_entry_rcu(data, &file->triggers, list) {
1543 		enable_data = data->private_data;
1544 		if (enable_data &&
1545 		    (data->cmd_ops->trigger_type ==
1546 		     test->cmd_ops->trigger_type) &&
1547 		    (enable_data->file == test_enable_data->file)) {
1548 			unregistered = true;
1549 			list_del_rcu(&data->list);
1550 			trace_event_trigger_enable_disable(file, 0);
1551 			update_cond_flag(file);
1552 			break;
1553 		}
1554 	}
1555 
1556 	if (unregistered && data->ops->free)
1557 		data->ops->free(data->ops, data);
1558 }
1559 
1560 static struct event_trigger_ops *
1561 event_enable_get_trigger_ops(char *cmd, char *param)
1562 {
1563 	struct event_trigger_ops *ops;
1564 	bool enable;
1565 
1566 #ifdef CONFIG_HIST_TRIGGERS
1567 	enable = ((strcmp(cmd, ENABLE_EVENT_STR) == 0) ||
1568 		  (strcmp(cmd, ENABLE_HIST_STR) == 0));
1569 #else
1570 	enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
1571 #endif
1572 	if (enable)
1573 		ops = param ? &event_enable_count_trigger_ops :
1574 			&event_enable_trigger_ops;
1575 	else
1576 		ops = param ? &event_disable_count_trigger_ops :
1577 			&event_disable_trigger_ops;
1578 
1579 	return ops;
1580 }
1581 
1582 static struct event_command trigger_enable_cmd = {
1583 	.name			= ENABLE_EVENT_STR,
1584 	.trigger_type		= ETT_EVENT_ENABLE,
1585 	.func			= event_enable_trigger_func,
1586 	.reg			= event_enable_register_trigger,
1587 	.unreg			= event_enable_unregister_trigger,
1588 	.get_trigger_ops	= event_enable_get_trigger_ops,
1589 	.set_filter		= set_trigger_filter,
1590 };
1591 
1592 static struct event_command trigger_disable_cmd = {
1593 	.name			= DISABLE_EVENT_STR,
1594 	.trigger_type		= ETT_EVENT_ENABLE,
1595 	.func			= event_enable_trigger_func,
1596 	.reg			= event_enable_register_trigger,
1597 	.unreg			= event_enable_unregister_trigger,
1598 	.get_trigger_ops	= event_enable_get_trigger_ops,
1599 	.set_filter		= set_trigger_filter,
1600 };
1601 
1602 static __init void unregister_trigger_enable_disable_cmds(void)
1603 {
1604 	unregister_event_command(&trigger_enable_cmd);
1605 	unregister_event_command(&trigger_disable_cmd);
1606 }
1607 
1608 static __init int register_trigger_enable_disable_cmds(void)
1609 {
1610 	int ret;
1611 
1612 	ret = register_event_command(&trigger_enable_cmd);
1613 	if (WARN_ON(ret < 0))
1614 		return ret;
1615 	ret = register_event_command(&trigger_disable_cmd);
1616 	if (WARN_ON(ret < 0))
1617 		unregister_trigger_enable_disable_cmds();
1618 
1619 	return ret;
1620 }
1621 
1622 static __init int register_trigger_traceon_traceoff_cmds(void)
1623 {
1624 	int ret;
1625 
1626 	ret = register_event_command(&trigger_traceon_cmd);
1627 	if (WARN_ON(ret < 0))
1628 		return ret;
1629 	ret = register_event_command(&trigger_traceoff_cmd);
1630 	if (WARN_ON(ret < 0))
1631 		unregister_trigger_traceon_traceoff_cmds();
1632 
1633 	return ret;
1634 }
1635 
1636 __init int register_trigger_cmds(void)
1637 {
1638 	register_trigger_traceon_traceoff_cmds();
1639 	register_trigger_snapshot_cmd();
1640 	register_trigger_stacktrace_cmd();
1641 	register_trigger_enable_disable_cmds();
1642 	register_trigger_hist_enable_disable_cmds();
1643 	register_trigger_hist_cmd();
1644 
1645 	return 0;
1646 }
1647