xref: /linux/kernel/trace/trace_events_trigger.c (revision 34dc1baba215b826e454b8d19e4f24adbeb7d00d)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * trace_events_trigger - trace event triggers
4  *
5  * Copyright (C) 2013 Tom Zanussi <tom.zanussi@linux.intel.com>
6  */
7 
8 #include <linux/security.h>
9 #include <linux/module.h>
10 #include <linux/ctype.h>
11 #include <linux/mutex.h>
12 #include <linux/slab.h>
13 #include <linux/rculist.h>
14 
15 #include "trace.h"
16 
17 static LIST_HEAD(trigger_commands);
18 static DEFINE_MUTEX(trigger_cmd_mutex);
19 
20 void trigger_data_free(struct event_trigger_data *data)
21 {
22 	if (data->cmd_ops->set_filter)
23 		data->cmd_ops->set_filter(NULL, data, NULL);
24 
25 	/* make sure current triggers exit before free */
26 	tracepoint_synchronize_unregister();
27 
28 	kfree(data);
29 }
30 
31 /**
32  * event_triggers_call - Call triggers associated with a trace event
33  * @file: The trace_event_file associated with the event
34  * @buffer: The ring buffer that the event is being written to
35  * @rec: The trace entry for the event, NULL for unconditional invocation
36  * @event: The event meta data in the ring buffer
37  *
38  * For each trigger associated with an event, invoke the trigger
39  * function registered with the associated trigger command.  If rec is
40  * non-NULL, it means that the trigger requires further processing and
41  * shouldn't be unconditionally invoked.  If rec is non-NULL and the
42  * trigger has a filter associated with it, rec will checked against
43  * the filter and if the record matches the trigger will be invoked.
44  * If the trigger is a 'post_trigger', meaning it shouldn't be invoked
45  * in any case until the current event is written, the trigger
46  * function isn't invoked but the bit associated with the deferred
47  * trigger is set in the return value.
48  *
49  * Returns an enum event_trigger_type value containing a set bit for
50  * any trigger that should be deferred, ETT_NONE if nothing to defer.
51  *
52  * Called from tracepoint handlers (with rcu_read_lock_sched() held).
53  *
54  * Return: an enum event_trigger_type value containing a set bit for
55  * any trigger that should be deferred, ETT_NONE if nothing to defer.
56  */
57 enum event_trigger_type
58 event_triggers_call(struct trace_event_file *file,
59 		    struct trace_buffer *buffer, void *rec,
60 		    struct ring_buffer_event *event)
61 {
62 	struct event_trigger_data *data;
63 	enum event_trigger_type tt = ETT_NONE;
64 	struct event_filter *filter;
65 
66 	if (list_empty(&file->triggers))
67 		return tt;
68 
69 	list_for_each_entry_rcu(data, &file->triggers, list) {
70 		if (data->paused)
71 			continue;
72 		if (!rec) {
73 			data->ops->trigger(data, buffer, rec, event);
74 			continue;
75 		}
76 		filter = rcu_dereference_sched(data->filter);
77 		if (filter && !filter_match_preds(filter, rec))
78 			continue;
79 		if (event_command_post_trigger(data->cmd_ops)) {
80 			tt |= data->cmd_ops->trigger_type;
81 			continue;
82 		}
83 		data->ops->trigger(data, buffer, rec, event);
84 	}
85 	return tt;
86 }
87 EXPORT_SYMBOL_GPL(event_triggers_call);
88 
89 bool __trace_trigger_soft_disabled(struct trace_event_file *file)
90 {
91 	unsigned long eflags = file->flags;
92 
93 	if (eflags & EVENT_FILE_FL_TRIGGER_MODE)
94 		event_triggers_call(file, NULL, NULL, NULL);
95 	if (eflags & EVENT_FILE_FL_SOFT_DISABLED)
96 		return true;
97 	if (eflags & EVENT_FILE_FL_PID_FILTER)
98 		return trace_event_ignore_this_pid(file);
99 	return false;
100 }
101 EXPORT_SYMBOL_GPL(__trace_trigger_soft_disabled);
102 
103 /**
104  * event_triggers_post_call - Call 'post_triggers' for a trace event
105  * @file: The trace_event_file associated with the event
106  * @tt: enum event_trigger_type containing a set bit for each trigger to invoke
107  *
108  * For each trigger associated with an event, invoke the trigger
109  * function registered with the associated trigger command, if the
110  * corresponding bit is set in the tt enum passed into this function.
111  * See @event_triggers_call for details on how those bits are set.
112  *
113  * Called from tracepoint handlers (with rcu_read_lock_sched() held).
114  */
115 void
116 event_triggers_post_call(struct trace_event_file *file,
117 			 enum event_trigger_type tt)
118 {
119 	struct event_trigger_data *data;
120 
121 	list_for_each_entry_rcu(data, &file->triggers, list) {
122 		if (data->paused)
123 			continue;
124 		if (data->cmd_ops->trigger_type & tt)
125 			data->ops->trigger(data, NULL, NULL, NULL);
126 	}
127 }
128 EXPORT_SYMBOL_GPL(event_triggers_post_call);
129 
130 #define SHOW_AVAILABLE_TRIGGERS	(void *)(1UL)
131 
132 static void *trigger_next(struct seq_file *m, void *t, loff_t *pos)
133 {
134 	struct trace_event_file *event_file = event_file_data(m->private);
135 
136 	if (t == SHOW_AVAILABLE_TRIGGERS) {
137 		(*pos)++;
138 		return NULL;
139 	}
140 	return seq_list_next(t, &event_file->triggers, pos);
141 }
142 
143 static bool check_user_trigger(struct trace_event_file *file)
144 {
145 	struct event_trigger_data *data;
146 
147 	list_for_each_entry_rcu(data, &file->triggers, list,
148 				lockdep_is_held(&event_mutex)) {
149 		if (data->flags & EVENT_TRIGGER_FL_PROBE)
150 			continue;
151 		return true;
152 	}
153 	return false;
154 }
155 
156 static void *trigger_start(struct seq_file *m, loff_t *pos)
157 {
158 	struct trace_event_file *event_file;
159 
160 	/* ->stop() is called even if ->start() fails */
161 	mutex_lock(&event_mutex);
162 	event_file = event_file_data(m->private);
163 	if (unlikely(!event_file))
164 		return ERR_PTR(-ENODEV);
165 
166 	if (list_empty(&event_file->triggers) || !check_user_trigger(event_file))
167 		return *pos == 0 ? SHOW_AVAILABLE_TRIGGERS : NULL;
168 
169 	return seq_list_start(&event_file->triggers, *pos);
170 }
171 
172 static void trigger_stop(struct seq_file *m, void *t)
173 {
174 	mutex_unlock(&event_mutex);
175 }
176 
177 static int trigger_show(struct seq_file *m, void *v)
178 {
179 	struct event_trigger_data *data;
180 	struct event_command *p;
181 
182 	if (v == SHOW_AVAILABLE_TRIGGERS) {
183 		seq_puts(m, "# Available triggers:\n");
184 		seq_putc(m, '#');
185 		mutex_lock(&trigger_cmd_mutex);
186 		list_for_each_entry_reverse(p, &trigger_commands, list)
187 			seq_printf(m, " %s", p->name);
188 		seq_putc(m, '\n');
189 		mutex_unlock(&trigger_cmd_mutex);
190 		return 0;
191 	}
192 
193 	data = list_entry(v, struct event_trigger_data, list);
194 	data->ops->print(m, data);
195 
196 	return 0;
197 }
198 
199 static const struct seq_operations event_triggers_seq_ops = {
200 	.start = trigger_start,
201 	.next = trigger_next,
202 	.stop = trigger_stop,
203 	.show = trigger_show,
204 };
205 
206 static int event_trigger_regex_open(struct inode *inode, struct file *file)
207 {
208 	int ret;
209 
210 	ret = security_locked_down(LOCKDOWN_TRACEFS);
211 	if (ret)
212 		return ret;
213 
214 	mutex_lock(&event_mutex);
215 
216 	if (unlikely(!event_file_data(file))) {
217 		mutex_unlock(&event_mutex);
218 		return -ENODEV;
219 	}
220 
221 	if ((file->f_mode & FMODE_WRITE) &&
222 	    (file->f_flags & O_TRUNC)) {
223 		struct trace_event_file *event_file;
224 		struct event_command *p;
225 
226 		event_file = event_file_data(file);
227 
228 		list_for_each_entry(p, &trigger_commands, list) {
229 			if (p->unreg_all)
230 				p->unreg_all(event_file);
231 		}
232 	}
233 
234 	if (file->f_mode & FMODE_READ) {
235 		ret = seq_open(file, &event_triggers_seq_ops);
236 		if (!ret) {
237 			struct seq_file *m = file->private_data;
238 			m->private = file;
239 		}
240 	}
241 
242 	mutex_unlock(&event_mutex);
243 
244 	return ret;
245 }
246 
247 int trigger_process_regex(struct trace_event_file *file, char *buff)
248 {
249 	char *command, *next;
250 	struct event_command *p;
251 	int ret = -EINVAL;
252 
253 	next = buff = skip_spaces(buff);
254 	command = strsep(&next, ": \t");
255 	if (next) {
256 		next = skip_spaces(next);
257 		if (!*next)
258 			next = NULL;
259 	}
260 	command = (command[0] != '!') ? command : command + 1;
261 
262 	mutex_lock(&trigger_cmd_mutex);
263 	list_for_each_entry(p, &trigger_commands, list) {
264 		if (strcmp(p->name, command) == 0) {
265 			ret = p->parse(p, file, buff, command, next);
266 			goto out_unlock;
267 		}
268 	}
269  out_unlock:
270 	mutex_unlock(&trigger_cmd_mutex);
271 
272 	return ret;
273 }
274 
275 static ssize_t event_trigger_regex_write(struct file *file,
276 					 const char __user *ubuf,
277 					 size_t cnt, loff_t *ppos)
278 {
279 	struct trace_event_file *event_file;
280 	ssize_t ret;
281 	char *buf;
282 
283 	if (!cnt)
284 		return 0;
285 
286 	if (cnt >= PAGE_SIZE)
287 		return -EINVAL;
288 
289 	buf = memdup_user_nul(ubuf, cnt);
290 	if (IS_ERR(buf))
291 		return PTR_ERR(buf);
292 
293 	strim(buf);
294 
295 	mutex_lock(&event_mutex);
296 	event_file = event_file_data(file);
297 	if (unlikely(!event_file)) {
298 		mutex_unlock(&event_mutex);
299 		kfree(buf);
300 		return -ENODEV;
301 	}
302 	ret = trigger_process_regex(event_file, buf);
303 	mutex_unlock(&event_mutex);
304 
305 	kfree(buf);
306 	if (ret < 0)
307 		goto out;
308 
309 	*ppos += cnt;
310 	ret = cnt;
311  out:
312 	return ret;
313 }
314 
315 static int event_trigger_regex_release(struct inode *inode, struct file *file)
316 {
317 	mutex_lock(&event_mutex);
318 
319 	if (file->f_mode & FMODE_READ)
320 		seq_release(inode, file);
321 
322 	mutex_unlock(&event_mutex);
323 
324 	return 0;
325 }
326 
327 static ssize_t
328 event_trigger_write(struct file *filp, const char __user *ubuf,
329 		    size_t cnt, loff_t *ppos)
330 {
331 	return event_trigger_regex_write(filp, ubuf, cnt, ppos);
332 }
333 
334 static int
335 event_trigger_open(struct inode *inode, struct file *filp)
336 {
337 	/* Checks for tracefs lockdown */
338 	return event_trigger_regex_open(inode, filp);
339 }
340 
341 static int
342 event_trigger_release(struct inode *inode, struct file *file)
343 {
344 	return event_trigger_regex_release(inode, file);
345 }
346 
347 const struct file_operations event_trigger_fops = {
348 	.open = event_trigger_open,
349 	.read = seq_read,
350 	.write = event_trigger_write,
351 	.llseek = tracing_lseek,
352 	.release = event_trigger_release,
353 };
354 
355 /*
356  * Currently we only register event commands from __init, so mark this
357  * __init too.
358  */
359 __init int register_event_command(struct event_command *cmd)
360 {
361 	struct event_command *p;
362 	int ret = 0;
363 
364 	mutex_lock(&trigger_cmd_mutex);
365 	list_for_each_entry(p, &trigger_commands, list) {
366 		if (strcmp(cmd->name, p->name) == 0) {
367 			ret = -EBUSY;
368 			goto out_unlock;
369 		}
370 	}
371 	list_add(&cmd->list, &trigger_commands);
372  out_unlock:
373 	mutex_unlock(&trigger_cmd_mutex);
374 
375 	return ret;
376 }
377 
378 /*
379  * Currently we only unregister event commands from __init, so mark
380  * this __init too.
381  */
382 __init int unregister_event_command(struct event_command *cmd)
383 {
384 	struct event_command *p, *n;
385 	int ret = -ENODEV;
386 
387 	mutex_lock(&trigger_cmd_mutex);
388 	list_for_each_entry_safe(p, n, &trigger_commands, list) {
389 		if (strcmp(cmd->name, p->name) == 0) {
390 			ret = 0;
391 			list_del_init(&p->list);
392 			goto out_unlock;
393 		}
394 	}
395  out_unlock:
396 	mutex_unlock(&trigger_cmd_mutex);
397 
398 	return ret;
399 }
400 
401 /**
402  * event_trigger_print - Generic event_trigger_ops @print implementation
403  * @name: The name of the event trigger
404  * @m: The seq_file being printed to
405  * @data: Trigger-specific data
406  * @filter_str: filter_str to print, if present
407  *
408  * Common implementation for event triggers to print themselves.
409  *
410  * Usually wrapped by a function that simply sets the @name of the
411  * trigger command and then invokes this.
412  *
413  * Return: 0 on success, errno otherwise
414  */
415 static int
416 event_trigger_print(const char *name, struct seq_file *m,
417 		    void *data, char *filter_str)
418 {
419 	long count = (long)data;
420 
421 	seq_puts(m, name);
422 
423 	if (count == -1)
424 		seq_puts(m, ":unlimited");
425 	else
426 		seq_printf(m, ":count=%ld", count);
427 
428 	if (filter_str)
429 		seq_printf(m, " if %s\n", filter_str);
430 	else
431 		seq_putc(m, '\n');
432 
433 	return 0;
434 }
435 
436 /**
437  * event_trigger_init - Generic event_trigger_ops @init implementation
438  * @data: Trigger-specific data
439  *
440  * Common implementation of event trigger initialization.
441  *
442  * Usually used directly as the @init method in event trigger
443  * implementations.
444  *
445  * Return: 0 on success, errno otherwise
446  */
447 int event_trigger_init(struct event_trigger_data *data)
448 {
449 	data->ref++;
450 	return 0;
451 }
452 
453 /**
454  * event_trigger_free - Generic event_trigger_ops @free implementation
455  * @data: Trigger-specific data
456  *
457  * Common implementation of event trigger de-initialization.
458  *
459  * Usually used directly as the @free method in event trigger
460  * implementations.
461  */
462 static void
463 event_trigger_free(struct event_trigger_data *data)
464 {
465 	if (WARN_ON_ONCE(data->ref <= 0))
466 		return;
467 
468 	data->ref--;
469 	if (!data->ref)
470 		trigger_data_free(data);
471 }
472 
473 int trace_event_trigger_enable_disable(struct trace_event_file *file,
474 				       int trigger_enable)
475 {
476 	int ret = 0;
477 
478 	if (trigger_enable) {
479 		if (atomic_inc_return(&file->tm_ref) > 1)
480 			return ret;
481 		set_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT, &file->flags);
482 		ret = trace_event_enable_disable(file, 1, 1);
483 	} else {
484 		if (atomic_dec_return(&file->tm_ref) > 0)
485 			return ret;
486 		clear_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT, &file->flags);
487 		ret = trace_event_enable_disable(file, 0, 1);
488 	}
489 
490 	return ret;
491 }
492 
493 /**
494  * clear_event_triggers - Clear all triggers associated with a trace array
495  * @tr: The trace array to clear
496  *
497  * For each trigger, the triggering event has its tm_ref decremented
498  * via trace_event_trigger_enable_disable(), and any associated event
499  * (in the case of enable/disable_event triggers) will have its sm_ref
500  * decremented via free()->trace_event_enable_disable().  That
501  * combination effectively reverses the soft-mode/trigger state added
502  * by trigger registration.
503  *
504  * Must be called with event_mutex held.
505  */
506 void
507 clear_event_triggers(struct trace_array *tr)
508 {
509 	struct trace_event_file *file;
510 
511 	list_for_each_entry(file, &tr->events, list) {
512 		struct event_trigger_data *data, *n;
513 		list_for_each_entry_safe(data, n, &file->triggers, list) {
514 			trace_event_trigger_enable_disable(file, 0);
515 			list_del_rcu(&data->list);
516 			if (data->ops->free)
517 				data->ops->free(data);
518 		}
519 	}
520 }
521 
522 /**
523  * update_cond_flag - Set or reset the TRIGGER_COND bit
524  * @file: The trace_event_file associated with the event
525  *
526  * If an event has triggers and any of those triggers has a filter or
527  * a post_trigger, trigger invocation needs to be deferred until after
528  * the current event has logged its data, and the event should have
529  * its TRIGGER_COND bit set, otherwise the TRIGGER_COND bit should be
530  * cleared.
531  */
532 void update_cond_flag(struct trace_event_file *file)
533 {
534 	struct event_trigger_data *data;
535 	bool set_cond = false;
536 
537 	lockdep_assert_held(&event_mutex);
538 
539 	list_for_each_entry(data, &file->triggers, list) {
540 		if (data->filter || event_command_post_trigger(data->cmd_ops) ||
541 		    event_command_needs_rec(data->cmd_ops)) {
542 			set_cond = true;
543 			break;
544 		}
545 	}
546 
547 	if (set_cond)
548 		set_bit(EVENT_FILE_FL_TRIGGER_COND_BIT, &file->flags);
549 	else
550 		clear_bit(EVENT_FILE_FL_TRIGGER_COND_BIT, &file->flags);
551 }
552 
553 /**
554  * register_trigger - Generic event_command @reg implementation
555  * @glob: The raw string used to register the trigger
556  * @data: Trigger-specific data to associate with the trigger
557  * @file: The trace_event_file associated with the event
558  *
559  * Common implementation for event trigger registration.
560  *
561  * Usually used directly as the @reg method in event command
562  * implementations.
563  *
564  * Return: 0 on success, errno otherwise
565  */
566 static int register_trigger(char *glob,
567 			    struct event_trigger_data *data,
568 			    struct trace_event_file *file)
569 {
570 	struct event_trigger_data *test;
571 	int ret = 0;
572 
573 	lockdep_assert_held(&event_mutex);
574 
575 	list_for_each_entry(test, &file->triggers, list) {
576 		if (test->cmd_ops->trigger_type == data->cmd_ops->trigger_type) {
577 			ret = -EEXIST;
578 			goto out;
579 		}
580 	}
581 
582 	if (data->ops->init) {
583 		ret = data->ops->init(data);
584 		if (ret < 0)
585 			goto out;
586 	}
587 
588 	list_add_rcu(&data->list, &file->triggers);
589 
590 	update_cond_flag(file);
591 	ret = trace_event_trigger_enable_disable(file, 1);
592 	if (ret < 0) {
593 		list_del_rcu(&data->list);
594 		update_cond_flag(file);
595 	}
596 out:
597 	return ret;
598 }
599 
600 /**
601  * unregister_trigger - Generic event_command @unreg implementation
602  * @glob: The raw string used to register the trigger
603  * @test: Trigger-specific data used to find the trigger to remove
604  * @file: The trace_event_file associated with the event
605  *
606  * Common implementation for event trigger unregistration.
607  *
608  * Usually used directly as the @unreg method in event command
609  * implementations.
610  */
611 static void unregister_trigger(char *glob,
612 			       struct event_trigger_data *test,
613 			       struct trace_event_file *file)
614 {
615 	struct event_trigger_data *data = NULL, *iter;
616 
617 	lockdep_assert_held(&event_mutex);
618 
619 	list_for_each_entry(iter, &file->triggers, list) {
620 		if (iter->cmd_ops->trigger_type == test->cmd_ops->trigger_type) {
621 			data = iter;
622 			list_del_rcu(&data->list);
623 			trace_event_trigger_enable_disable(file, 0);
624 			update_cond_flag(file);
625 			break;
626 		}
627 	}
628 
629 	if (data && data->ops->free)
630 		data->ops->free(data);
631 }
632 
633 /*
634  * Event trigger parsing helper functions.
635  *
636  * These functions help make it easier to write an event trigger
637  * parsing function i.e. the struct event_command.parse() callback
638  * function responsible for parsing and registering a trigger command
639  * written to the 'trigger' file.
640  *
641  * A trigger command (or just 'trigger' for short) takes the form:
642  *   [trigger] [if filter]
643  *
644  * The struct event_command.parse() callback (and other struct
645  * event_command functions) refer to several components of a trigger
646  * command.  Those same components are referenced by the event trigger
647  * parsing helper functions defined below.  These components are:
648  *
649  *   cmd               - the trigger command name
650  *   glob              - the trigger command name optionally prefaced with '!'
651  *   param_and_filter  - text following cmd and ':'
652  *   param             - text following cmd and ':' and stripped of filter
653  *   filter            - the optional filter text following (and including) 'if'
654  *
655  * To illustrate the use of these componenents, here are some concrete
656  * examples. For the following triggers:
657  *
658  *   echo 'traceon:5 if pid == 0' > trigger
659  *     - 'traceon' is both cmd and glob
660  *     - '5 if pid == 0' is the param_and_filter
661  *     - '5' is the param
662  *     - 'if pid == 0' is the filter
663  *
664  *   echo 'enable_event:sys:event:n' > trigger
665  *     - 'enable_event' is both cmd and glob
666  *     - 'sys:event:n' is the param_and_filter
667  *     - 'sys:event:n' is the param
668  *     - there is no filter
669  *
670  *   echo 'hist:keys=pid if prio > 50' > trigger
671  *     - 'hist' is both cmd and glob
672  *     - 'keys=pid if prio > 50' is the param_and_filter
673  *     - 'keys=pid' is the param
674  *     - 'if prio > 50' is the filter
675  *
676  *   echo '!enable_event:sys:event:n' > trigger
677  *     - 'enable_event' the cmd
678  *     - '!enable_event' is the glob
679  *     - 'sys:event:n' is the param_and_filter
680  *     - 'sys:event:n' is the param
681  *     - there is no filter
682  *
683  *   echo 'traceoff' > trigger
684  *     - 'traceoff' is both cmd and glob
685  *     - there is no param_and_filter
686  *     - there is no param
687  *     - there is no filter
688  *
689  * There are a few different categories of event trigger covered by
690  * these helpers:
691  *
692  *  - triggers that don't require a parameter e.g. traceon
693  *  - triggers that do require a parameter e.g. enable_event and hist
694  *  - triggers that though they may not require a param may support an
695  *    optional 'n' param (n = number of times the trigger should fire)
696  *    e.g.: traceon:5 or enable_event:sys:event:n
697  *  - triggers that do not support an 'n' param e.g. hist
698  *
699  * These functions can be used or ignored as necessary - it all
700  * depends on the complexity of the trigger, and the granularity of
701  * the functions supported reflects the fact that some implementations
702  * may need to customize certain aspects of their implementations and
703  * won't need certain functions.  For instance, the hist trigger
704  * implementation doesn't use event_trigger_separate_filter() because
705  * it has special requirements for handling the filter.
706  */
707 
708 /**
709  * event_trigger_check_remove - check whether an event trigger specifies remove
710  * @glob: The trigger command string, with optional remove(!) operator
711  *
712  * The event trigger callback implementations pass in 'glob' as a
713  * parameter.  This is the command name either with or without a
714  * remove(!)  operator.  This function simply parses the glob and
715  * determines whether the command corresponds to a trigger removal or
716  * a trigger addition.
717  *
718  * Return: true if this is a remove command, false otherwise
719  */
720 bool event_trigger_check_remove(const char *glob)
721 {
722 	return (glob && glob[0] == '!') ? true : false;
723 }
724 
725 /**
726  * event_trigger_empty_param - check whether the param is empty
727  * @param: The trigger param string
728  *
729  * The event trigger callback implementations pass in 'param' as a
730  * parameter.  This corresponds to the string following the command
731  * name minus the command name.  This function can be called by a
732  * callback implementation for any command that requires a param; a
733  * callback that doesn't require a param can ignore it.
734  *
735  * Return: true if this is an empty param, false otherwise
736  */
737 bool event_trigger_empty_param(const char *param)
738 {
739 	return !param;
740 }
741 
742 /**
743  * event_trigger_separate_filter - separate an event trigger from a filter
744  * @param_and_filter: String containing trigger and possibly filter
745  * @param: outparam, will be filled with a pointer to the trigger
746  * @filter: outparam, will be filled with a pointer to the filter
747  * @param_required: Specifies whether or not the param string is required
748  *
749  * Given a param string of the form '[trigger] [if filter]', this
750  * function separates the filter from the trigger and returns the
751  * trigger in @param and the filter in @filter.  Either the @param
752  * or the @filter may be set to NULL by this function - if not set to
753  * NULL, they will contain strings corresponding to the trigger and
754  * filter.
755  *
756  * There are two cases that need to be handled with respect to the
757  * passed-in param: either the param is required, or it is not
758  * required.  If @param_required is set, and there's no param, it will
759  * return -EINVAL.  If @param_required is not set and there's a param
760  * that starts with a number, that corresponds to the case of a
761  * trigger with :n (n = number of times the trigger should fire) and
762  * the parsing continues normally; otherwise the function just returns
763  * and assumes param just contains a filter and there's nothing else
764  * to do.
765  *
766  * Return: 0 on success, errno otherwise
767  */
768 int event_trigger_separate_filter(char *param_and_filter, char **param,
769 				  char **filter, bool param_required)
770 {
771 	int ret = 0;
772 
773 	*param = *filter = NULL;
774 
775 	if (!param_and_filter) {
776 		if (param_required)
777 			ret = -EINVAL;
778 		goto out;
779 	}
780 
781 	/*
782 	 * Here we check for an optional param. The only legal
783 	 * optional param is :n, and if that's the case, continue
784 	 * below. Otherwise we assume what's left is a filter and
785 	 * return it as the filter string for the caller to deal with.
786 	 */
787 	if (!param_required && param_and_filter && !isdigit(param_and_filter[0])) {
788 		*filter = param_and_filter;
789 		goto out;
790 	}
791 
792 	/*
793 	 * Separate the param from the filter (param [if filter]).
794 	 * Here we have either an optional :n param or a required
795 	 * param and an optional filter.
796 	 */
797 	*param = strsep(&param_and_filter, " \t");
798 
799 	/*
800 	 * Here we have a filter, though it may be empty.
801 	 */
802 	if (param_and_filter) {
803 		*filter = skip_spaces(param_and_filter);
804 		if (!**filter)
805 			*filter = NULL;
806 	}
807 out:
808 	return ret;
809 }
810 
811 /**
812  * event_trigger_alloc - allocate and init event_trigger_data for a trigger
813  * @cmd_ops: The event_command operations for the trigger
814  * @cmd: The cmd string
815  * @param: The param string
816  * @private_data: User data to associate with the event trigger
817  *
818  * Allocate an event_trigger_data instance and initialize it.  The
819  * @cmd_ops are used along with the @cmd and @param to get the
820  * trigger_ops to assign to the event_trigger_data.  @private_data can
821  * also be passed in and associated with the event_trigger_data.
822  *
823  * Use event_trigger_free() to free an event_trigger_data object.
824  *
825  * Return: The trigger_data object success, NULL otherwise
826  */
827 struct event_trigger_data *event_trigger_alloc(struct event_command *cmd_ops,
828 					       char *cmd,
829 					       char *param,
830 					       void *private_data)
831 {
832 	struct event_trigger_data *trigger_data;
833 	struct event_trigger_ops *trigger_ops;
834 
835 	trigger_ops = cmd_ops->get_trigger_ops(cmd, param);
836 
837 	trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
838 	if (!trigger_data)
839 		return NULL;
840 
841 	trigger_data->count = -1;
842 	trigger_data->ops = trigger_ops;
843 	trigger_data->cmd_ops = cmd_ops;
844 	trigger_data->private_data = private_data;
845 
846 	INIT_LIST_HEAD(&trigger_data->list);
847 	INIT_LIST_HEAD(&trigger_data->named_list);
848 	RCU_INIT_POINTER(trigger_data->filter, NULL);
849 
850 	return trigger_data;
851 }
852 
853 /**
854  * event_trigger_parse_num - parse and return the number param for a trigger
855  * @param: The param string
856  * @trigger_data: The trigger_data for the trigger
857  *
858  * Parse the :n (n = number of times the trigger should fire) param
859  * and set the count variable in the trigger_data to the parsed count.
860  *
861  * Return: 0 on success, errno otherwise
862  */
863 int event_trigger_parse_num(char *param,
864 			    struct event_trigger_data *trigger_data)
865 {
866 	char *number;
867 	int ret = 0;
868 
869 	if (param) {
870 		number = strsep(&param, ":");
871 
872 		if (!strlen(number))
873 			return -EINVAL;
874 
875 		/*
876 		 * We use the callback data field (which is a pointer)
877 		 * as our counter.
878 		 */
879 		ret = kstrtoul(number, 0, &trigger_data->count);
880 	}
881 
882 	return ret;
883 }
884 
885 /**
886  * event_trigger_set_filter - set an event trigger's filter
887  * @cmd_ops: The event_command operations for the trigger
888  * @file: The event file for the trigger's event
889  * @param: The string containing the filter
890  * @trigger_data: The trigger_data for the trigger
891  *
892  * Set the filter for the trigger.  If the filter is NULL, just return
893  * without error.
894  *
895  * Return: 0 on success, errno otherwise
896  */
897 int event_trigger_set_filter(struct event_command *cmd_ops,
898 			     struct trace_event_file *file,
899 			     char *param,
900 			     struct event_trigger_data *trigger_data)
901 {
902 	if (param && cmd_ops->set_filter)
903 		return cmd_ops->set_filter(param, trigger_data, file);
904 
905 	return 0;
906 }
907 
908 /**
909  * event_trigger_reset_filter - reset an event trigger's filter
910  * @cmd_ops: The event_command operations for the trigger
911  * @trigger_data: The trigger_data for the trigger
912  *
913  * Reset the filter for the trigger to no filter.
914  */
915 void event_trigger_reset_filter(struct event_command *cmd_ops,
916 				struct event_trigger_data *trigger_data)
917 {
918 	if (cmd_ops->set_filter)
919 		cmd_ops->set_filter(NULL, trigger_data, NULL);
920 }
921 
922 /**
923  * event_trigger_register - register an event trigger
924  * @cmd_ops: The event_command operations for the trigger
925  * @file: The event file for the trigger's event
926  * @glob: The trigger command string, with optional remove(!) operator
927  * @trigger_data: The trigger_data for the trigger
928  *
929  * Register an event trigger.  The @cmd_ops are used to call the
930  * cmd_ops->reg() function which actually does the registration.
931  *
932  * Return: 0 on success, errno otherwise
933  */
934 int event_trigger_register(struct event_command *cmd_ops,
935 			   struct trace_event_file *file,
936 			   char *glob,
937 			   struct event_trigger_data *trigger_data)
938 {
939 	return cmd_ops->reg(glob, trigger_data, file);
940 }
941 
942 /**
943  * event_trigger_unregister - unregister an event trigger
944  * @cmd_ops: The event_command operations for the trigger
945  * @file: The event file for the trigger's event
946  * @glob: The trigger command string, with optional remove(!) operator
947  * @trigger_data: The trigger_data for the trigger
948  *
949  * Unregister an event trigger.  The @cmd_ops are used to call the
950  * cmd_ops->unreg() function which actually does the unregistration.
951  */
952 void event_trigger_unregister(struct event_command *cmd_ops,
953 			      struct trace_event_file *file,
954 			      char *glob,
955 			      struct event_trigger_data *trigger_data)
956 {
957 	cmd_ops->unreg(glob, trigger_data, file);
958 }
959 
960 /*
961  * End event trigger parsing helper functions.
962  */
963 
964 /**
965  * event_trigger_parse - Generic event_command @parse implementation
966  * @cmd_ops: The command ops, used for trigger registration
967  * @file: The trace_event_file associated with the event
968  * @glob: The raw string used to register the trigger
969  * @cmd: The cmd portion of the string used to register the trigger
970  * @param_and_filter: The param and filter portion of the string used to register the trigger
971  *
972  * Common implementation for event command parsing and trigger
973  * instantiation.
974  *
975  * Usually used directly as the @parse method in event command
976  * implementations.
977  *
978  * Return: 0 on success, errno otherwise
979  */
980 static int
981 event_trigger_parse(struct event_command *cmd_ops,
982 		    struct trace_event_file *file,
983 		    char *glob, char *cmd, char *param_and_filter)
984 {
985 	struct event_trigger_data *trigger_data;
986 	char *param, *filter;
987 	bool remove;
988 	int ret;
989 
990 	remove = event_trigger_check_remove(glob);
991 
992 	ret = event_trigger_separate_filter(param_and_filter, &param, &filter, false);
993 	if (ret)
994 		return ret;
995 
996 	ret = -ENOMEM;
997 	trigger_data = event_trigger_alloc(cmd_ops, cmd, param, file);
998 	if (!trigger_data)
999 		goto out;
1000 
1001 	if (remove) {
1002 		event_trigger_unregister(cmd_ops, file, glob+1, trigger_data);
1003 		kfree(trigger_data);
1004 		ret = 0;
1005 		goto out;
1006 	}
1007 
1008 	ret = event_trigger_parse_num(param, trigger_data);
1009 	if (ret)
1010 		goto out_free;
1011 
1012 	ret = event_trigger_set_filter(cmd_ops, file, filter, trigger_data);
1013 	if (ret < 0)
1014 		goto out_free;
1015 
1016 	/* Up the trigger_data count to make sure reg doesn't free it on failure */
1017 	event_trigger_init(trigger_data);
1018 
1019 	ret = event_trigger_register(cmd_ops, file, glob, trigger_data);
1020 	if (ret)
1021 		goto out_free;
1022 
1023 	/* Down the counter of trigger_data or free it if not used anymore */
1024 	event_trigger_free(trigger_data);
1025  out:
1026 	return ret;
1027 
1028  out_free:
1029 	event_trigger_reset_filter(cmd_ops, trigger_data);
1030 	kfree(trigger_data);
1031 	goto out;
1032 }
1033 
1034 /**
1035  * set_trigger_filter - Generic event_command @set_filter implementation
1036  * @filter_str: The filter string for the trigger, NULL to remove filter
1037  * @trigger_data: Trigger-specific data
1038  * @file: The trace_event_file associated with the event
1039  *
1040  * Common implementation for event command filter parsing and filter
1041  * instantiation.
1042  *
1043  * Usually used directly as the @set_filter method in event command
1044  * implementations.
1045  *
1046  * Also used to remove a filter (if filter_str = NULL).
1047  *
1048  * Return: 0 on success, errno otherwise
1049  */
1050 int set_trigger_filter(char *filter_str,
1051 		       struct event_trigger_data *trigger_data,
1052 		       struct trace_event_file *file)
1053 {
1054 	struct event_trigger_data *data = trigger_data;
1055 	struct event_filter *filter = NULL, *tmp;
1056 	int ret = -EINVAL;
1057 	char *s;
1058 
1059 	if (!filter_str) /* clear the current filter */
1060 		goto assign;
1061 
1062 	s = strsep(&filter_str, " \t");
1063 
1064 	if (!strlen(s) || strcmp(s, "if") != 0)
1065 		goto out;
1066 
1067 	if (!filter_str)
1068 		goto out;
1069 
1070 	/* The filter is for the 'trigger' event, not the triggered event */
1071 	ret = create_event_filter(file->tr, file->event_call,
1072 				  filter_str, true, &filter);
1073 
1074 	/* Only enabled set_str for error handling */
1075 	if (filter) {
1076 		kfree(filter->filter_string);
1077 		filter->filter_string = NULL;
1078 	}
1079 
1080 	/*
1081 	 * If create_event_filter() fails, filter still needs to be freed.
1082 	 * Which the calling code will do with data->filter.
1083 	 */
1084  assign:
1085 	tmp = rcu_access_pointer(data->filter);
1086 
1087 	rcu_assign_pointer(data->filter, filter);
1088 
1089 	if (tmp) {
1090 		/*
1091 		 * Make sure the call is done with the filter.
1092 		 * It is possible that a filter could fail at boot up,
1093 		 * and then this path will be called. Avoid the synchronization
1094 		 * in that case.
1095 		 */
1096 		if (system_state != SYSTEM_BOOTING)
1097 			tracepoint_synchronize_unregister();
1098 		free_event_filter(tmp);
1099 	}
1100 
1101 	kfree(data->filter_str);
1102 	data->filter_str = NULL;
1103 
1104 	if (filter_str) {
1105 		data->filter_str = kstrdup(filter_str, GFP_KERNEL);
1106 		if (!data->filter_str) {
1107 			free_event_filter(rcu_access_pointer(data->filter));
1108 			data->filter = NULL;
1109 			ret = -ENOMEM;
1110 		}
1111 	}
1112  out:
1113 	return ret;
1114 }
1115 
1116 static LIST_HEAD(named_triggers);
1117 
1118 /**
1119  * find_named_trigger - Find the common named trigger associated with @name
1120  * @name: The name of the set of named triggers to find the common data for
1121  *
1122  * Named triggers are sets of triggers that share a common set of
1123  * trigger data.  The first named trigger registered with a given name
1124  * owns the common trigger data that the others subsequently
1125  * registered with the same name will reference.  This function
1126  * returns the common trigger data associated with that first
1127  * registered instance.
1128  *
1129  * Return: the common trigger data for the given named trigger on
1130  * success, NULL otherwise.
1131  */
1132 struct event_trigger_data *find_named_trigger(const char *name)
1133 {
1134 	struct event_trigger_data *data;
1135 
1136 	if (!name)
1137 		return NULL;
1138 
1139 	list_for_each_entry(data, &named_triggers, named_list) {
1140 		if (data->named_data)
1141 			continue;
1142 		if (strcmp(data->name, name) == 0)
1143 			return data;
1144 	}
1145 
1146 	return NULL;
1147 }
1148 
1149 /**
1150  * is_named_trigger - determine if a given trigger is a named trigger
1151  * @test: The trigger data to test
1152  *
1153  * Return: true if 'test' is a named trigger, false otherwise.
1154  */
1155 bool is_named_trigger(struct event_trigger_data *test)
1156 {
1157 	struct event_trigger_data *data;
1158 
1159 	list_for_each_entry(data, &named_triggers, named_list) {
1160 		if (test == data)
1161 			return true;
1162 	}
1163 
1164 	return false;
1165 }
1166 
1167 /**
1168  * save_named_trigger - save the trigger in the named trigger list
1169  * @name: The name of the named trigger set
1170  * @data: The trigger data to save
1171  *
1172  * Return: 0 if successful, negative error otherwise.
1173  */
1174 int save_named_trigger(const char *name, struct event_trigger_data *data)
1175 {
1176 	data->name = kstrdup(name, GFP_KERNEL);
1177 	if (!data->name)
1178 		return -ENOMEM;
1179 
1180 	list_add(&data->named_list, &named_triggers);
1181 
1182 	return 0;
1183 }
1184 
1185 /**
1186  * del_named_trigger - delete a trigger from the named trigger list
1187  * @data: The trigger data to delete
1188  */
1189 void del_named_trigger(struct event_trigger_data *data)
1190 {
1191 	kfree(data->name);
1192 	data->name = NULL;
1193 
1194 	list_del(&data->named_list);
1195 }
1196 
1197 static void __pause_named_trigger(struct event_trigger_data *data, bool pause)
1198 {
1199 	struct event_trigger_data *test;
1200 
1201 	list_for_each_entry(test, &named_triggers, named_list) {
1202 		if (strcmp(test->name, data->name) == 0) {
1203 			if (pause) {
1204 				test->paused_tmp = test->paused;
1205 				test->paused = true;
1206 			} else {
1207 				test->paused = test->paused_tmp;
1208 			}
1209 		}
1210 	}
1211 }
1212 
1213 /**
1214  * pause_named_trigger - Pause all named triggers with the same name
1215  * @data: The trigger data of a named trigger to pause
1216  *
1217  * Pauses a named trigger along with all other triggers having the
1218  * same name.  Because named triggers share a common set of data,
1219  * pausing only one is meaningless, so pausing one named trigger needs
1220  * to pause all triggers with the same name.
1221  */
1222 void pause_named_trigger(struct event_trigger_data *data)
1223 {
1224 	__pause_named_trigger(data, true);
1225 }
1226 
1227 /**
1228  * unpause_named_trigger - Un-pause all named triggers with the same name
1229  * @data: The trigger data of a named trigger to unpause
1230  *
1231  * Un-pauses a named trigger along with all other triggers having the
1232  * same name.  Because named triggers share a common set of data,
1233  * unpausing only one is meaningless, so unpausing one named trigger
1234  * needs to unpause all triggers with the same name.
1235  */
1236 void unpause_named_trigger(struct event_trigger_data *data)
1237 {
1238 	__pause_named_trigger(data, false);
1239 }
1240 
1241 /**
1242  * set_named_trigger_data - Associate common named trigger data
1243  * @data: The trigger data to associate
1244  * @named_data: The common named trigger to be associated
1245  *
1246  * Named triggers are sets of triggers that share a common set of
1247  * trigger data.  The first named trigger registered with a given name
1248  * owns the common trigger data that the others subsequently
1249  * registered with the same name will reference.  This function
1250  * associates the common trigger data from the first trigger with the
1251  * given trigger.
1252  */
1253 void set_named_trigger_data(struct event_trigger_data *data,
1254 			    struct event_trigger_data *named_data)
1255 {
1256 	data->named_data = named_data;
1257 }
1258 
1259 struct event_trigger_data *
1260 get_named_trigger_data(struct event_trigger_data *data)
1261 {
1262 	return data->named_data;
1263 }
1264 
1265 static void
1266 traceon_trigger(struct event_trigger_data *data,
1267 		struct trace_buffer *buffer, void *rec,
1268 		struct ring_buffer_event *event)
1269 {
1270 	struct trace_event_file *file = data->private_data;
1271 
1272 	if (file) {
1273 		if (tracer_tracing_is_on(file->tr))
1274 			return;
1275 
1276 		tracer_tracing_on(file->tr);
1277 		return;
1278 	}
1279 
1280 	if (tracing_is_on())
1281 		return;
1282 
1283 	tracing_on();
1284 }
1285 
1286 static void
1287 traceon_count_trigger(struct event_trigger_data *data,
1288 		      struct trace_buffer *buffer, void *rec,
1289 		      struct ring_buffer_event *event)
1290 {
1291 	struct trace_event_file *file = data->private_data;
1292 
1293 	if (file) {
1294 		if (tracer_tracing_is_on(file->tr))
1295 			return;
1296 	} else {
1297 		if (tracing_is_on())
1298 			return;
1299 	}
1300 
1301 	if (!data->count)
1302 		return;
1303 
1304 	if (data->count != -1)
1305 		(data->count)--;
1306 
1307 	if (file)
1308 		tracer_tracing_on(file->tr);
1309 	else
1310 		tracing_on();
1311 }
1312 
1313 static void
1314 traceoff_trigger(struct event_trigger_data *data,
1315 		 struct trace_buffer *buffer, void *rec,
1316 		 struct ring_buffer_event *event)
1317 {
1318 	struct trace_event_file *file = data->private_data;
1319 
1320 	if (file) {
1321 		if (!tracer_tracing_is_on(file->tr))
1322 			return;
1323 
1324 		tracer_tracing_off(file->tr);
1325 		return;
1326 	}
1327 
1328 	if (!tracing_is_on())
1329 		return;
1330 
1331 	tracing_off();
1332 }
1333 
1334 static void
1335 traceoff_count_trigger(struct event_trigger_data *data,
1336 		       struct trace_buffer *buffer, void *rec,
1337 		       struct ring_buffer_event *event)
1338 {
1339 	struct trace_event_file *file = data->private_data;
1340 
1341 	if (file) {
1342 		if (!tracer_tracing_is_on(file->tr))
1343 			return;
1344 	} else {
1345 		if (!tracing_is_on())
1346 			return;
1347 	}
1348 
1349 	if (!data->count)
1350 		return;
1351 
1352 	if (data->count != -1)
1353 		(data->count)--;
1354 
1355 	if (file)
1356 		tracer_tracing_off(file->tr);
1357 	else
1358 		tracing_off();
1359 }
1360 
1361 static int
1362 traceon_trigger_print(struct seq_file *m, struct event_trigger_data *data)
1363 {
1364 	return event_trigger_print("traceon", m, (void *)data->count,
1365 				   data->filter_str);
1366 }
1367 
1368 static int
1369 traceoff_trigger_print(struct seq_file *m, struct event_trigger_data *data)
1370 {
1371 	return event_trigger_print("traceoff", m, (void *)data->count,
1372 				   data->filter_str);
1373 }
1374 
1375 static struct event_trigger_ops traceon_trigger_ops = {
1376 	.trigger		= traceon_trigger,
1377 	.print			= traceon_trigger_print,
1378 	.init			= event_trigger_init,
1379 	.free			= event_trigger_free,
1380 };
1381 
1382 static struct event_trigger_ops traceon_count_trigger_ops = {
1383 	.trigger		= traceon_count_trigger,
1384 	.print			= traceon_trigger_print,
1385 	.init			= event_trigger_init,
1386 	.free			= event_trigger_free,
1387 };
1388 
1389 static struct event_trigger_ops traceoff_trigger_ops = {
1390 	.trigger		= traceoff_trigger,
1391 	.print			= traceoff_trigger_print,
1392 	.init			= event_trigger_init,
1393 	.free			= event_trigger_free,
1394 };
1395 
1396 static struct event_trigger_ops traceoff_count_trigger_ops = {
1397 	.trigger		= traceoff_count_trigger,
1398 	.print			= traceoff_trigger_print,
1399 	.init			= event_trigger_init,
1400 	.free			= event_trigger_free,
1401 };
1402 
1403 static struct event_trigger_ops *
1404 onoff_get_trigger_ops(char *cmd, char *param)
1405 {
1406 	struct event_trigger_ops *ops;
1407 
1408 	/* we register both traceon and traceoff to this callback */
1409 	if (strcmp(cmd, "traceon") == 0)
1410 		ops = param ? &traceon_count_trigger_ops :
1411 			&traceon_trigger_ops;
1412 	else
1413 		ops = param ? &traceoff_count_trigger_ops :
1414 			&traceoff_trigger_ops;
1415 
1416 	return ops;
1417 }
1418 
1419 static struct event_command trigger_traceon_cmd = {
1420 	.name			= "traceon",
1421 	.trigger_type		= ETT_TRACE_ONOFF,
1422 	.parse			= event_trigger_parse,
1423 	.reg			= register_trigger,
1424 	.unreg			= unregister_trigger,
1425 	.get_trigger_ops	= onoff_get_trigger_ops,
1426 	.set_filter		= set_trigger_filter,
1427 };
1428 
1429 static struct event_command trigger_traceoff_cmd = {
1430 	.name			= "traceoff",
1431 	.trigger_type		= ETT_TRACE_ONOFF,
1432 	.flags			= EVENT_CMD_FL_POST_TRIGGER,
1433 	.parse			= event_trigger_parse,
1434 	.reg			= register_trigger,
1435 	.unreg			= unregister_trigger,
1436 	.get_trigger_ops	= onoff_get_trigger_ops,
1437 	.set_filter		= set_trigger_filter,
1438 };
1439 
1440 #ifdef CONFIG_TRACER_SNAPSHOT
1441 static void
1442 snapshot_trigger(struct event_trigger_data *data,
1443 		 struct trace_buffer *buffer, void *rec,
1444 		 struct ring_buffer_event *event)
1445 {
1446 	struct trace_event_file *file = data->private_data;
1447 
1448 	if (file)
1449 		tracing_snapshot_instance(file->tr);
1450 	else
1451 		tracing_snapshot();
1452 }
1453 
1454 static void
1455 snapshot_count_trigger(struct event_trigger_data *data,
1456 		       struct trace_buffer *buffer, void *rec,
1457 		       struct ring_buffer_event *event)
1458 {
1459 	if (!data->count)
1460 		return;
1461 
1462 	if (data->count != -1)
1463 		(data->count)--;
1464 
1465 	snapshot_trigger(data, buffer, rec, event);
1466 }
1467 
1468 static int
1469 register_snapshot_trigger(char *glob,
1470 			  struct event_trigger_data *data,
1471 			  struct trace_event_file *file)
1472 {
1473 	if (tracing_alloc_snapshot_instance(file->tr) != 0)
1474 		return 0;
1475 
1476 	return register_trigger(glob, data, file);
1477 }
1478 
1479 static int
1480 snapshot_trigger_print(struct seq_file *m, struct event_trigger_data *data)
1481 {
1482 	return event_trigger_print("snapshot", m, (void *)data->count,
1483 				   data->filter_str);
1484 }
1485 
1486 static struct event_trigger_ops snapshot_trigger_ops = {
1487 	.trigger		= snapshot_trigger,
1488 	.print			= snapshot_trigger_print,
1489 	.init			= event_trigger_init,
1490 	.free			= event_trigger_free,
1491 };
1492 
1493 static struct event_trigger_ops snapshot_count_trigger_ops = {
1494 	.trigger		= snapshot_count_trigger,
1495 	.print			= snapshot_trigger_print,
1496 	.init			= event_trigger_init,
1497 	.free			= event_trigger_free,
1498 };
1499 
1500 static struct event_trigger_ops *
1501 snapshot_get_trigger_ops(char *cmd, char *param)
1502 {
1503 	return param ? &snapshot_count_trigger_ops : &snapshot_trigger_ops;
1504 }
1505 
1506 static struct event_command trigger_snapshot_cmd = {
1507 	.name			= "snapshot",
1508 	.trigger_type		= ETT_SNAPSHOT,
1509 	.parse			= event_trigger_parse,
1510 	.reg			= register_snapshot_trigger,
1511 	.unreg			= unregister_trigger,
1512 	.get_trigger_ops	= snapshot_get_trigger_ops,
1513 	.set_filter		= set_trigger_filter,
1514 };
1515 
1516 static __init int register_trigger_snapshot_cmd(void)
1517 {
1518 	int ret;
1519 
1520 	ret = register_event_command(&trigger_snapshot_cmd);
1521 	WARN_ON(ret < 0);
1522 
1523 	return ret;
1524 }
1525 #else
1526 static __init int register_trigger_snapshot_cmd(void) { return 0; }
1527 #endif /* CONFIG_TRACER_SNAPSHOT */
1528 
1529 #ifdef CONFIG_STACKTRACE
1530 #ifdef CONFIG_UNWINDER_ORC
1531 /* Skip 2:
1532  *   event_triggers_post_call()
1533  *   trace_event_raw_event_xxx()
1534  */
1535 # define STACK_SKIP 2
1536 #else
1537 /*
1538  * Skip 4:
1539  *   stacktrace_trigger()
1540  *   event_triggers_post_call()
1541  *   trace_event_buffer_commit()
1542  *   trace_event_raw_event_xxx()
1543  */
1544 #define STACK_SKIP 4
1545 #endif
1546 
1547 static void
1548 stacktrace_trigger(struct event_trigger_data *data,
1549 		   struct trace_buffer *buffer,  void *rec,
1550 		   struct ring_buffer_event *event)
1551 {
1552 	struct trace_event_file *file = data->private_data;
1553 
1554 	if (file)
1555 		__trace_stack(file->tr, tracing_gen_ctx(), STACK_SKIP);
1556 	else
1557 		trace_dump_stack(STACK_SKIP);
1558 }
1559 
1560 static void
1561 stacktrace_count_trigger(struct event_trigger_data *data,
1562 			 struct trace_buffer *buffer, void *rec,
1563 			 struct ring_buffer_event *event)
1564 {
1565 	if (!data->count)
1566 		return;
1567 
1568 	if (data->count != -1)
1569 		(data->count)--;
1570 
1571 	stacktrace_trigger(data, buffer, rec, event);
1572 }
1573 
1574 static int
1575 stacktrace_trigger_print(struct seq_file *m, struct event_trigger_data *data)
1576 {
1577 	return event_trigger_print("stacktrace", m, (void *)data->count,
1578 				   data->filter_str);
1579 }
1580 
1581 static struct event_trigger_ops stacktrace_trigger_ops = {
1582 	.trigger		= stacktrace_trigger,
1583 	.print			= stacktrace_trigger_print,
1584 	.init			= event_trigger_init,
1585 	.free			= event_trigger_free,
1586 };
1587 
1588 static struct event_trigger_ops stacktrace_count_trigger_ops = {
1589 	.trigger		= stacktrace_count_trigger,
1590 	.print			= stacktrace_trigger_print,
1591 	.init			= event_trigger_init,
1592 	.free			= event_trigger_free,
1593 };
1594 
1595 static struct event_trigger_ops *
1596 stacktrace_get_trigger_ops(char *cmd, char *param)
1597 {
1598 	return param ? &stacktrace_count_trigger_ops : &stacktrace_trigger_ops;
1599 }
1600 
1601 static struct event_command trigger_stacktrace_cmd = {
1602 	.name			= "stacktrace",
1603 	.trigger_type		= ETT_STACKTRACE,
1604 	.flags			= EVENT_CMD_FL_POST_TRIGGER,
1605 	.parse			= event_trigger_parse,
1606 	.reg			= register_trigger,
1607 	.unreg			= unregister_trigger,
1608 	.get_trigger_ops	= stacktrace_get_trigger_ops,
1609 	.set_filter		= set_trigger_filter,
1610 };
1611 
1612 static __init int register_trigger_stacktrace_cmd(void)
1613 {
1614 	int ret;
1615 
1616 	ret = register_event_command(&trigger_stacktrace_cmd);
1617 	WARN_ON(ret < 0);
1618 
1619 	return ret;
1620 }
1621 #else
1622 static __init int register_trigger_stacktrace_cmd(void) { return 0; }
1623 #endif /* CONFIG_STACKTRACE */
1624 
1625 static __init void unregister_trigger_traceon_traceoff_cmds(void)
1626 {
1627 	unregister_event_command(&trigger_traceon_cmd);
1628 	unregister_event_command(&trigger_traceoff_cmd);
1629 }
1630 
1631 static void
1632 event_enable_trigger(struct event_trigger_data *data,
1633 		     struct trace_buffer *buffer,  void *rec,
1634 		     struct ring_buffer_event *event)
1635 {
1636 	struct enable_trigger_data *enable_data = data->private_data;
1637 
1638 	if (enable_data->enable)
1639 		clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &enable_data->file->flags);
1640 	else
1641 		set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &enable_data->file->flags);
1642 }
1643 
1644 static void
1645 event_enable_count_trigger(struct event_trigger_data *data,
1646 			   struct trace_buffer *buffer,  void *rec,
1647 			   struct ring_buffer_event *event)
1648 {
1649 	struct enable_trigger_data *enable_data = data->private_data;
1650 
1651 	if (!data->count)
1652 		return;
1653 
1654 	/* Skip if the event is in a state we want to switch to */
1655 	if (enable_data->enable == !(enable_data->file->flags & EVENT_FILE_FL_SOFT_DISABLED))
1656 		return;
1657 
1658 	if (data->count != -1)
1659 		(data->count)--;
1660 
1661 	event_enable_trigger(data, buffer, rec, event);
1662 }
1663 
1664 int event_enable_trigger_print(struct seq_file *m,
1665 			       struct event_trigger_data *data)
1666 {
1667 	struct enable_trigger_data *enable_data = data->private_data;
1668 
1669 	seq_printf(m, "%s:%s:%s",
1670 		   enable_data->hist ?
1671 		   (enable_data->enable ? ENABLE_HIST_STR : DISABLE_HIST_STR) :
1672 		   (enable_data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR),
1673 		   enable_data->file->event_call->class->system,
1674 		   trace_event_name(enable_data->file->event_call));
1675 
1676 	if (data->count == -1)
1677 		seq_puts(m, ":unlimited");
1678 	else
1679 		seq_printf(m, ":count=%ld", data->count);
1680 
1681 	if (data->filter_str)
1682 		seq_printf(m, " if %s\n", data->filter_str);
1683 	else
1684 		seq_putc(m, '\n');
1685 
1686 	return 0;
1687 }
1688 
1689 void event_enable_trigger_free(struct event_trigger_data *data)
1690 {
1691 	struct enable_trigger_data *enable_data = data->private_data;
1692 
1693 	if (WARN_ON_ONCE(data->ref <= 0))
1694 		return;
1695 
1696 	data->ref--;
1697 	if (!data->ref) {
1698 		/* Remove the SOFT_MODE flag */
1699 		trace_event_enable_disable(enable_data->file, 0, 1);
1700 		trace_event_put_ref(enable_data->file->event_call);
1701 		trigger_data_free(data);
1702 		kfree(enable_data);
1703 	}
1704 }
1705 
1706 static struct event_trigger_ops event_enable_trigger_ops = {
1707 	.trigger		= event_enable_trigger,
1708 	.print			= event_enable_trigger_print,
1709 	.init			= event_trigger_init,
1710 	.free			= event_enable_trigger_free,
1711 };
1712 
1713 static struct event_trigger_ops event_enable_count_trigger_ops = {
1714 	.trigger		= event_enable_count_trigger,
1715 	.print			= event_enable_trigger_print,
1716 	.init			= event_trigger_init,
1717 	.free			= event_enable_trigger_free,
1718 };
1719 
1720 static struct event_trigger_ops event_disable_trigger_ops = {
1721 	.trigger		= event_enable_trigger,
1722 	.print			= event_enable_trigger_print,
1723 	.init			= event_trigger_init,
1724 	.free			= event_enable_trigger_free,
1725 };
1726 
1727 static struct event_trigger_ops event_disable_count_trigger_ops = {
1728 	.trigger		= event_enable_count_trigger,
1729 	.print			= event_enable_trigger_print,
1730 	.init			= event_trigger_init,
1731 	.free			= event_enable_trigger_free,
1732 };
1733 
1734 int event_enable_trigger_parse(struct event_command *cmd_ops,
1735 			       struct trace_event_file *file,
1736 			       char *glob, char *cmd, char *param_and_filter)
1737 {
1738 	struct trace_event_file *event_enable_file;
1739 	struct enable_trigger_data *enable_data;
1740 	struct event_trigger_data *trigger_data;
1741 	struct trace_array *tr = file->tr;
1742 	char *param, *filter;
1743 	bool enable, remove;
1744 	const char *system;
1745 	const char *event;
1746 	bool hist = false;
1747 	int ret;
1748 
1749 	remove = event_trigger_check_remove(glob);
1750 
1751 	if (event_trigger_empty_param(param_and_filter))
1752 		return -EINVAL;
1753 
1754 	ret = event_trigger_separate_filter(param_and_filter, &param, &filter, true);
1755 	if (ret)
1756 		return ret;
1757 
1758 	system = strsep(&param, ":");
1759 	if (!param)
1760 		return -EINVAL;
1761 
1762 	event = strsep(&param, ":");
1763 
1764 	ret = -EINVAL;
1765 	event_enable_file = find_event_file(tr, system, event);
1766 	if (!event_enable_file)
1767 		goto out;
1768 
1769 #ifdef CONFIG_HIST_TRIGGERS
1770 	hist = ((strcmp(cmd, ENABLE_HIST_STR) == 0) ||
1771 		(strcmp(cmd, DISABLE_HIST_STR) == 0));
1772 
1773 	enable = ((strcmp(cmd, ENABLE_EVENT_STR) == 0) ||
1774 		  (strcmp(cmd, ENABLE_HIST_STR) == 0));
1775 #else
1776 	enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
1777 #endif
1778 	ret = -ENOMEM;
1779 
1780 	enable_data = kzalloc(sizeof(*enable_data), GFP_KERNEL);
1781 	if (!enable_data)
1782 		goto out;
1783 
1784 	enable_data->hist = hist;
1785 	enable_data->enable = enable;
1786 	enable_data->file = event_enable_file;
1787 
1788 	trigger_data = event_trigger_alloc(cmd_ops, cmd, param, enable_data);
1789 	if (!trigger_data) {
1790 		kfree(enable_data);
1791 		goto out;
1792 	}
1793 
1794 	if (remove) {
1795 		event_trigger_unregister(cmd_ops, file, glob+1, trigger_data);
1796 		kfree(trigger_data);
1797 		kfree(enable_data);
1798 		ret = 0;
1799 		goto out;
1800 	}
1801 
1802 	/* Up the trigger_data count to make sure nothing frees it on failure */
1803 	event_trigger_init(trigger_data);
1804 
1805 	ret = event_trigger_parse_num(param, trigger_data);
1806 	if (ret)
1807 		goto out_free;
1808 
1809 	ret = event_trigger_set_filter(cmd_ops, file, filter, trigger_data);
1810 	if (ret < 0)
1811 		goto out_free;
1812 
1813 	/* Don't let event modules unload while probe registered */
1814 	ret = trace_event_try_get_ref(event_enable_file->event_call);
1815 	if (!ret) {
1816 		ret = -EBUSY;
1817 		goto out_free;
1818 	}
1819 
1820 	ret = trace_event_enable_disable(event_enable_file, 1, 1);
1821 	if (ret < 0)
1822 		goto out_put;
1823 
1824 	ret = event_trigger_register(cmd_ops, file, glob, trigger_data);
1825 	if (ret)
1826 		goto out_disable;
1827 
1828 	event_trigger_free(trigger_data);
1829  out:
1830 	return ret;
1831  out_disable:
1832 	trace_event_enable_disable(event_enable_file, 0, 1);
1833  out_put:
1834 	trace_event_put_ref(event_enable_file->event_call);
1835  out_free:
1836 	event_trigger_reset_filter(cmd_ops, trigger_data);
1837 	event_trigger_free(trigger_data);
1838 	kfree(enable_data);
1839 
1840 	goto out;
1841 }
1842 
1843 int event_enable_register_trigger(char *glob,
1844 				  struct event_trigger_data *data,
1845 				  struct trace_event_file *file)
1846 {
1847 	struct enable_trigger_data *enable_data = data->private_data;
1848 	struct enable_trigger_data *test_enable_data;
1849 	struct event_trigger_data *test;
1850 	int ret = 0;
1851 
1852 	lockdep_assert_held(&event_mutex);
1853 
1854 	list_for_each_entry(test, &file->triggers, list) {
1855 		test_enable_data = test->private_data;
1856 		if (test_enable_data &&
1857 		    (test->cmd_ops->trigger_type ==
1858 		     data->cmd_ops->trigger_type) &&
1859 		    (test_enable_data->file == enable_data->file)) {
1860 			ret = -EEXIST;
1861 			goto out;
1862 		}
1863 	}
1864 
1865 	if (data->ops->init) {
1866 		ret = data->ops->init(data);
1867 		if (ret < 0)
1868 			goto out;
1869 	}
1870 
1871 	list_add_rcu(&data->list, &file->triggers);
1872 
1873 	update_cond_flag(file);
1874 	ret = trace_event_trigger_enable_disable(file, 1);
1875 	if (ret < 0) {
1876 		list_del_rcu(&data->list);
1877 		update_cond_flag(file);
1878 	}
1879 out:
1880 	return ret;
1881 }
1882 
1883 void event_enable_unregister_trigger(char *glob,
1884 				     struct event_trigger_data *test,
1885 				     struct trace_event_file *file)
1886 {
1887 	struct enable_trigger_data *test_enable_data = test->private_data;
1888 	struct event_trigger_data *data = NULL, *iter;
1889 	struct enable_trigger_data *enable_data;
1890 
1891 	lockdep_assert_held(&event_mutex);
1892 
1893 	list_for_each_entry(iter, &file->triggers, list) {
1894 		enable_data = iter->private_data;
1895 		if (enable_data &&
1896 		    (iter->cmd_ops->trigger_type ==
1897 		     test->cmd_ops->trigger_type) &&
1898 		    (enable_data->file == test_enable_data->file)) {
1899 			data = iter;
1900 			list_del_rcu(&data->list);
1901 			trace_event_trigger_enable_disable(file, 0);
1902 			update_cond_flag(file);
1903 			break;
1904 		}
1905 	}
1906 
1907 	if (data && data->ops->free)
1908 		data->ops->free(data);
1909 }
1910 
1911 static struct event_trigger_ops *
1912 event_enable_get_trigger_ops(char *cmd, char *param)
1913 {
1914 	struct event_trigger_ops *ops;
1915 	bool enable;
1916 
1917 #ifdef CONFIG_HIST_TRIGGERS
1918 	enable = ((strcmp(cmd, ENABLE_EVENT_STR) == 0) ||
1919 		  (strcmp(cmd, ENABLE_HIST_STR) == 0));
1920 #else
1921 	enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
1922 #endif
1923 	if (enable)
1924 		ops = param ? &event_enable_count_trigger_ops :
1925 			&event_enable_trigger_ops;
1926 	else
1927 		ops = param ? &event_disable_count_trigger_ops :
1928 			&event_disable_trigger_ops;
1929 
1930 	return ops;
1931 }
1932 
1933 static struct event_command trigger_enable_cmd = {
1934 	.name			= ENABLE_EVENT_STR,
1935 	.trigger_type		= ETT_EVENT_ENABLE,
1936 	.parse			= event_enable_trigger_parse,
1937 	.reg			= event_enable_register_trigger,
1938 	.unreg			= event_enable_unregister_trigger,
1939 	.get_trigger_ops	= event_enable_get_trigger_ops,
1940 	.set_filter		= set_trigger_filter,
1941 };
1942 
1943 static struct event_command trigger_disable_cmd = {
1944 	.name			= DISABLE_EVENT_STR,
1945 	.trigger_type		= ETT_EVENT_ENABLE,
1946 	.parse			= event_enable_trigger_parse,
1947 	.reg			= event_enable_register_trigger,
1948 	.unreg			= event_enable_unregister_trigger,
1949 	.get_trigger_ops	= event_enable_get_trigger_ops,
1950 	.set_filter		= set_trigger_filter,
1951 };
1952 
1953 static __init void unregister_trigger_enable_disable_cmds(void)
1954 {
1955 	unregister_event_command(&trigger_enable_cmd);
1956 	unregister_event_command(&trigger_disable_cmd);
1957 }
1958 
1959 static __init int register_trigger_enable_disable_cmds(void)
1960 {
1961 	int ret;
1962 
1963 	ret = register_event_command(&trigger_enable_cmd);
1964 	if (WARN_ON(ret < 0))
1965 		return ret;
1966 	ret = register_event_command(&trigger_disable_cmd);
1967 	if (WARN_ON(ret < 0))
1968 		unregister_trigger_enable_disable_cmds();
1969 
1970 	return ret;
1971 }
1972 
1973 static __init int register_trigger_traceon_traceoff_cmds(void)
1974 {
1975 	int ret;
1976 
1977 	ret = register_event_command(&trigger_traceon_cmd);
1978 	if (WARN_ON(ret < 0))
1979 		return ret;
1980 	ret = register_event_command(&trigger_traceoff_cmd);
1981 	if (WARN_ON(ret < 0))
1982 		unregister_trigger_traceon_traceoff_cmds();
1983 
1984 	return ret;
1985 }
1986 
1987 __init int register_trigger_cmds(void)
1988 {
1989 	register_trigger_traceon_traceoff_cmds();
1990 	register_trigger_snapshot_cmd();
1991 	register_trigger_stacktrace_cmd();
1992 	register_trigger_enable_disable_cmds();
1993 	register_trigger_hist_enable_disable_cmds();
1994 	register_trigger_hist_cmd();
1995 
1996 	return 0;
1997 }
1998