xref: /linux/kernel/trace/trace_events_hist.c (revision c5951e7c8ee5cb04b8b41c32bf567b90117a2124)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * trace_events_hist - trace event hist triggers
4  *
5  * Copyright (C) 2015 Tom Zanussi <tom.zanussi@linux.intel.com>
6  */
7 
8 #include <linux/module.h>
9 #include <linux/kallsyms.h>
10 #include <linux/security.h>
11 #include <linux/mutex.h>
12 #include <linux/slab.h>
13 #include <linux/stacktrace.h>
14 #include <linux/rculist.h>
15 #include <linux/tracefs.h>
16 
17 /* for gfp flag names */
18 #include <linux/trace_events.h>
19 #include <trace/events/mmflags.h>
20 
21 #include "tracing_map.h"
22 #include "trace.h"
23 #include "trace_dynevent.h"
24 
25 #define SYNTH_SYSTEM		"synthetic"
26 #define SYNTH_FIELDS_MAX	32
27 
28 #define STR_VAR_LEN_MAX		32 /* must be multiple of sizeof(u64) */
29 
30 #define ERRORS								\
31 	C(NONE,			"No error"),				\
32 	C(DUPLICATE_VAR,	"Variable already defined"),		\
33 	C(VAR_NOT_UNIQUE,	"Variable name not unique, need to use fully qualified name (subsys.event.var) for variable"), \
34 	C(TOO_MANY_VARS,	"Too many variables defined"),		\
35 	C(MALFORMED_ASSIGNMENT,	"Malformed assignment"),		\
36 	C(NAMED_MISMATCH,	"Named hist trigger doesn't match existing named trigger (includes variables)"), \
37 	C(TRIGGER_EEXIST,	"Hist trigger already exists"),		\
38 	C(TRIGGER_ENOENT_CLEAR,	"Can't clear or continue a nonexistent hist trigger"), \
39 	C(SET_CLOCK_FAIL,	"Couldn't set trace_clock"),		\
40 	C(BAD_FIELD_MODIFIER,	"Invalid field modifier"),		\
41 	C(TOO_MANY_SUBEXPR,	"Too many subexpressions (3 max)"),	\
42 	C(TIMESTAMP_MISMATCH,	"Timestamp units in expression don't match"), \
43 	C(TOO_MANY_FIELD_VARS,	"Too many field variables defined"),	\
44 	C(EVENT_FILE_NOT_FOUND,	"Event file not found"),		\
45 	C(HIST_NOT_FOUND,	"Matching event histogram not found"),	\
46 	C(HIST_CREATE_FAIL,	"Couldn't create histogram for field"),	\
47 	C(SYNTH_VAR_NOT_FOUND,	"Couldn't find synthetic variable"),	\
48 	C(SYNTH_EVENT_NOT_FOUND,"Couldn't find synthetic event"),	\
49 	C(SYNTH_TYPE_MISMATCH,	"Param type doesn't match synthetic event field type"), \
50 	C(SYNTH_COUNT_MISMATCH,	"Param count doesn't match synthetic event field count"), \
51 	C(FIELD_VAR_PARSE_FAIL,	"Couldn't parse field variable"),	\
52 	C(VAR_CREATE_FIND_FAIL,	"Couldn't create or find variable"),	\
53 	C(ONX_NOT_VAR,		"For onmax(x) or onchange(x), x must be a variable"), \
54 	C(ONX_VAR_NOT_FOUND,	"Couldn't find onmax or onchange variable"), \
55 	C(ONX_VAR_CREATE_FAIL,	"Couldn't create onmax or onchange variable"), \
56 	C(FIELD_VAR_CREATE_FAIL,"Couldn't create field variable"),	\
57 	C(TOO_MANY_PARAMS,	"Too many action params"),		\
58 	C(PARAM_NOT_FOUND,	"Couldn't find param"),			\
59 	C(INVALID_PARAM,	"Invalid action param"),		\
60 	C(ACTION_NOT_FOUND,	"No action found"),			\
61 	C(NO_SAVE_PARAMS,	"No params found for save()"),		\
62 	C(TOO_MANY_SAVE_ACTIONS,"Can't have more than one save() action per hist"), \
63 	C(ACTION_MISMATCH,	"Handler doesn't support action"),	\
64 	C(NO_CLOSING_PAREN,	"No closing paren found"),		\
65 	C(SUBSYS_NOT_FOUND,	"Missing subsystem"),			\
66 	C(INVALID_SUBSYS_EVENT,	"Invalid subsystem or event name"),	\
67 	C(INVALID_REF_KEY,	"Using variable references in keys not supported"), \
68 	C(VAR_NOT_FOUND,	"Couldn't find variable"),		\
69 	C(FIELD_NOT_FOUND,	"Couldn't find field"),
70 
71 #undef C
72 #define C(a, b)		HIST_ERR_##a
73 
74 enum { ERRORS };
75 
76 #undef C
77 #define C(a, b)		b
78 
79 static const char *err_text[] = { ERRORS };
80 
81 struct hist_field;
82 
83 typedef u64 (*hist_field_fn_t) (struct hist_field *field,
84 				struct tracing_map_elt *elt,
85 				struct ring_buffer_event *rbe,
86 				void *event);
87 
88 #define HIST_FIELD_OPERANDS_MAX	2
89 #define HIST_FIELDS_MAX		(TRACING_MAP_FIELDS_MAX + TRACING_MAP_VARS_MAX)
90 #define HIST_ACTIONS_MAX	8
91 
92 enum field_op_id {
93 	FIELD_OP_NONE,
94 	FIELD_OP_PLUS,
95 	FIELD_OP_MINUS,
96 	FIELD_OP_UNARY_MINUS,
97 };
98 
99 /*
100  * A hist_var (histogram variable) contains variable information for
101  * hist_fields having the HIST_FIELD_FL_VAR or HIST_FIELD_FL_VAR_REF
102  * flag set.  A hist_var has a variable name e.g. ts0, and is
103  * associated with a given histogram trigger, as specified by
104  * hist_data.  The hist_var idx is the unique index assigned to the
105  * variable by the hist trigger's tracing_map.  The idx is what is
106  * used to set a variable's value and, by a variable reference, to
107  * retrieve it.
108  */
109 struct hist_var {
110 	char				*name;
111 	struct hist_trigger_data	*hist_data;
112 	unsigned int			idx;
113 };
114 
115 struct hist_field {
116 	struct ftrace_event_field	*field;
117 	unsigned long			flags;
118 	hist_field_fn_t			fn;
119 	unsigned int			ref;
120 	unsigned int			size;
121 	unsigned int			offset;
122 	unsigned int                    is_signed;
123 	const char			*type;
124 	struct hist_field		*operands[HIST_FIELD_OPERANDS_MAX];
125 	struct hist_trigger_data	*hist_data;
126 
127 	/*
128 	 * Variable fields contain variable-specific info in var.
129 	 */
130 	struct hist_var			var;
131 	enum field_op_id		operator;
132 	char				*system;
133 	char				*event_name;
134 
135 	/*
136 	 * The name field is used for EXPR and VAR_REF fields.  VAR
137 	 * fields contain the variable name in var.name.
138 	 */
139 	char				*name;
140 
141 	/*
142 	 * When a histogram trigger is hit, if it has any references
143 	 * to variables, the values of those variables are collected
144 	 * into a var_ref_vals array by resolve_var_refs().  The
145 	 * current value of each variable is read from the tracing_map
146 	 * using the hist field's hist_var.idx and entered into the
147 	 * var_ref_idx entry i.e. var_ref_vals[var_ref_idx].
148 	 */
149 	unsigned int			var_ref_idx;
150 	bool                            read_once;
151 };
152 
153 static u64 hist_field_none(struct hist_field *field,
154 			   struct tracing_map_elt *elt,
155 			   struct ring_buffer_event *rbe,
156 			   void *event)
157 {
158 	return 0;
159 }
160 
161 static u64 hist_field_counter(struct hist_field *field,
162 			      struct tracing_map_elt *elt,
163 			      struct ring_buffer_event *rbe,
164 			      void *event)
165 {
166 	return 1;
167 }
168 
169 static u64 hist_field_string(struct hist_field *hist_field,
170 			     struct tracing_map_elt *elt,
171 			     struct ring_buffer_event *rbe,
172 			     void *event)
173 {
174 	char *addr = (char *)(event + hist_field->field->offset);
175 
176 	return (u64)(unsigned long)addr;
177 }
178 
179 static u64 hist_field_dynstring(struct hist_field *hist_field,
180 				struct tracing_map_elt *elt,
181 				struct ring_buffer_event *rbe,
182 				void *event)
183 {
184 	u32 str_item = *(u32 *)(event + hist_field->field->offset);
185 	int str_loc = str_item & 0xffff;
186 	char *addr = (char *)(event + str_loc);
187 
188 	return (u64)(unsigned long)addr;
189 }
190 
191 static u64 hist_field_pstring(struct hist_field *hist_field,
192 			      struct tracing_map_elt *elt,
193 			      struct ring_buffer_event *rbe,
194 			      void *event)
195 {
196 	char **addr = (char **)(event + hist_field->field->offset);
197 
198 	return (u64)(unsigned long)*addr;
199 }
200 
201 static u64 hist_field_log2(struct hist_field *hist_field,
202 			   struct tracing_map_elt *elt,
203 			   struct ring_buffer_event *rbe,
204 			   void *event)
205 {
206 	struct hist_field *operand = hist_field->operands[0];
207 
208 	u64 val = operand->fn(operand, elt, rbe, event);
209 
210 	return (u64) ilog2(roundup_pow_of_two(val));
211 }
212 
213 static u64 hist_field_plus(struct hist_field *hist_field,
214 			   struct tracing_map_elt *elt,
215 			   struct ring_buffer_event *rbe,
216 			   void *event)
217 {
218 	struct hist_field *operand1 = hist_field->operands[0];
219 	struct hist_field *operand2 = hist_field->operands[1];
220 
221 	u64 val1 = operand1->fn(operand1, elt, rbe, event);
222 	u64 val2 = operand2->fn(operand2, elt, rbe, event);
223 
224 	return val1 + val2;
225 }
226 
227 static u64 hist_field_minus(struct hist_field *hist_field,
228 			    struct tracing_map_elt *elt,
229 			    struct ring_buffer_event *rbe,
230 			    void *event)
231 {
232 	struct hist_field *operand1 = hist_field->operands[0];
233 	struct hist_field *operand2 = hist_field->operands[1];
234 
235 	u64 val1 = operand1->fn(operand1, elt, rbe, event);
236 	u64 val2 = operand2->fn(operand2, elt, rbe, event);
237 
238 	return val1 - val2;
239 }
240 
241 static u64 hist_field_unary_minus(struct hist_field *hist_field,
242 				  struct tracing_map_elt *elt,
243 				  struct ring_buffer_event *rbe,
244 				  void *event)
245 {
246 	struct hist_field *operand = hist_field->operands[0];
247 
248 	s64 sval = (s64)operand->fn(operand, elt, rbe, event);
249 	u64 val = (u64)-sval;
250 
251 	return val;
252 }
253 
254 #define DEFINE_HIST_FIELD_FN(type)					\
255 	static u64 hist_field_##type(struct hist_field *hist_field,	\
256 				     struct tracing_map_elt *elt,	\
257 				     struct ring_buffer_event *rbe,	\
258 				     void *event)			\
259 {									\
260 	type *addr = (type *)(event + hist_field->field->offset);	\
261 									\
262 	return (u64)(unsigned long)*addr;				\
263 }
264 
265 DEFINE_HIST_FIELD_FN(s64);
266 DEFINE_HIST_FIELD_FN(u64);
267 DEFINE_HIST_FIELD_FN(s32);
268 DEFINE_HIST_FIELD_FN(u32);
269 DEFINE_HIST_FIELD_FN(s16);
270 DEFINE_HIST_FIELD_FN(u16);
271 DEFINE_HIST_FIELD_FN(s8);
272 DEFINE_HIST_FIELD_FN(u8);
273 
274 #define for_each_hist_field(i, hist_data)	\
275 	for ((i) = 0; (i) < (hist_data)->n_fields; (i)++)
276 
277 #define for_each_hist_val_field(i, hist_data)	\
278 	for ((i) = 0; (i) < (hist_data)->n_vals; (i)++)
279 
280 #define for_each_hist_key_field(i, hist_data)	\
281 	for ((i) = (hist_data)->n_vals; (i) < (hist_data)->n_fields; (i)++)
282 
283 #define HIST_STACKTRACE_DEPTH	16
284 #define HIST_STACKTRACE_SIZE	(HIST_STACKTRACE_DEPTH * sizeof(unsigned long))
285 #define HIST_STACKTRACE_SKIP	5
286 
287 #define HITCOUNT_IDX		0
288 #define HIST_KEY_SIZE_MAX	(MAX_FILTER_STR_VAL + HIST_STACKTRACE_SIZE)
289 
290 enum hist_field_flags {
291 	HIST_FIELD_FL_HITCOUNT		= 1 << 0,
292 	HIST_FIELD_FL_KEY		= 1 << 1,
293 	HIST_FIELD_FL_STRING		= 1 << 2,
294 	HIST_FIELD_FL_HEX		= 1 << 3,
295 	HIST_FIELD_FL_SYM		= 1 << 4,
296 	HIST_FIELD_FL_SYM_OFFSET	= 1 << 5,
297 	HIST_FIELD_FL_EXECNAME		= 1 << 6,
298 	HIST_FIELD_FL_SYSCALL		= 1 << 7,
299 	HIST_FIELD_FL_STACKTRACE	= 1 << 8,
300 	HIST_FIELD_FL_LOG2		= 1 << 9,
301 	HIST_FIELD_FL_TIMESTAMP		= 1 << 10,
302 	HIST_FIELD_FL_TIMESTAMP_USECS	= 1 << 11,
303 	HIST_FIELD_FL_VAR		= 1 << 12,
304 	HIST_FIELD_FL_EXPR		= 1 << 13,
305 	HIST_FIELD_FL_VAR_REF		= 1 << 14,
306 	HIST_FIELD_FL_CPU		= 1 << 15,
307 	HIST_FIELD_FL_ALIAS		= 1 << 16,
308 };
309 
310 struct var_defs {
311 	unsigned int	n_vars;
312 	char		*name[TRACING_MAP_VARS_MAX];
313 	char		*expr[TRACING_MAP_VARS_MAX];
314 };
315 
316 struct hist_trigger_attrs {
317 	char		*keys_str;
318 	char		*vals_str;
319 	char		*sort_key_str;
320 	char		*name;
321 	char		*clock;
322 	bool		pause;
323 	bool		cont;
324 	bool		clear;
325 	bool		ts_in_usecs;
326 	unsigned int	map_bits;
327 
328 	char		*assignment_str[TRACING_MAP_VARS_MAX];
329 	unsigned int	n_assignments;
330 
331 	char		*action_str[HIST_ACTIONS_MAX];
332 	unsigned int	n_actions;
333 
334 	struct var_defs	var_defs;
335 };
336 
337 struct field_var {
338 	struct hist_field	*var;
339 	struct hist_field	*val;
340 };
341 
342 struct field_var_hist {
343 	struct hist_trigger_data	*hist_data;
344 	char				*cmd;
345 };
346 
347 struct hist_trigger_data {
348 	struct hist_field               *fields[HIST_FIELDS_MAX];
349 	unsigned int			n_vals;
350 	unsigned int			n_keys;
351 	unsigned int			n_fields;
352 	unsigned int			n_vars;
353 	unsigned int			key_size;
354 	struct tracing_map_sort_key	sort_keys[TRACING_MAP_SORT_KEYS_MAX];
355 	unsigned int			n_sort_keys;
356 	struct trace_event_file		*event_file;
357 	struct hist_trigger_attrs	*attrs;
358 	struct tracing_map		*map;
359 	bool				enable_timestamps;
360 	bool				remove;
361 	struct hist_field               *var_refs[TRACING_MAP_VARS_MAX];
362 	unsigned int			n_var_refs;
363 
364 	struct action_data		*actions[HIST_ACTIONS_MAX];
365 	unsigned int			n_actions;
366 
367 	struct field_var		*field_vars[SYNTH_FIELDS_MAX];
368 	unsigned int			n_field_vars;
369 	unsigned int			n_field_var_str;
370 	struct field_var_hist		*field_var_hists[SYNTH_FIELDS_MAX];
371 	unsigned int			n_field_var_hists;
372 
373 	struct field_var		*save_vars[SYNTH_FIELDS_MAX];
374 	unsigned int			n_save_vars;
375 	unsigned int			n_save_var_str;
376 };
377 
378 static int synth_event_create(int argc, const char **argv);
379 static int synth_event_show(struct seq_file *m, struct dyn_event *ev);
380 static int synth_event_release(struct dyn_event *ev);
381 static bool synth_event_is_busy(struct dyn_event *ev);
382 static bool synth_event_match(const char *system, const char *event,
383 			int argc, const char **argv, struct dyn_event *ev);
384 
385 static struct dyn_event_operations synth_event_ops = {
386 	.create = synth_event_create,
387 	.show = synth_event_show,
388 	.is_busy = synth_event_is_busy,
389 	.free = synth_event_release,
390 	.match = synth_event_match,
391 };
392 
393 struct synth_field {
394 	char *type;
395 	char *name;
396 	size_t size;
397 	bool is_signed;
398 	bool is_string;
399 };
400 
401 struct synth_event {
402 	struct dyn_event			devent;
403 	int					ref;
404 	char					*name;
405 	struct synth_field			**fields;
406 	unsigned int				n_fields;
407 	unsigned int				n_u64;
408 	struct trace_event_class		class;
409 	struct trace_event_call			call;
410 	struct tracepoint			*tp;
411 };
412 
413 static bool is_synth_event(struct dyn_event *ev)
414 {
415 	return ev->ops == &synth_event_ops;
416 }
417 
418 static struct synth_event *to_synth_event(struct dyn_event *ev)
419 {
420 	return container_of(ev, struct synth_event, devent);
421 }
422 
423 static bool synth_event_is_busy(struct dyn_event *ev)
424 {
425 	struct synth_event *event = to_synth_event(ev);
426 
427 	return event->ref != 0;
428 }
429 
430 static bool synth_event_match(const char *system, const char *event,
431 			int argc, const char **argv, struct dyn_event *ev)
432 {
433 	struct synth_event *sev = to_synth_event(ev);
434 
435 	return strcmp(sev->name, event) == 0 &&
436 		(!system || strcmp(system, SYNTH_SYSTEM) == 0);
437 }
438 
439 struct action_data;
440 
441 typedef void (*action_fn_t) (struct hist_trigger_data *hist_data,
442 			     struct tracing_map_elt *elt, void *rec,
443 			     struct ring_buffer_event *rbe, void *key,
444 			     struct action_data *data, u64 *var_ref_vals);
445 
446 typedef bool (*check_track_val_fn_t) (u64 track_val, u64 var_val);
447 
448 enum handler_id {
449 	HANDLER_ONMATCH = 1,
450 	HANDLER_ONMAX,
451 	HANDLER_ONCHANGE,
452 };
453 
454 enum action_id {
455 	ACTION_SAVE = 1,
456 	ACTION_TRACE,
457 	ACTION_SNAPSHOT,
458 };
459 
460 struct action_data {
461 	enum handler_id		handler;
462 	enum action_id		action;
463 	char			*action_name;
464 	action_fn_t		fn;
465 
466 	unsigned int		n_params;
467 	char			*params[SYNTH_FIELDS_MAX];
468 
469 	/*
470 	 * When a histogram trigger is hit, the values of any
471 	 * references to variables, including variables being passed
472 	 * as parameters to synthetic events, are collected into a
473 	 * var_ref_vals array.  This var_ref_idx is the index of the
474 	 * first param in the array to be passed to the synthetic
475 	 * event invocation.
476 	 */
477 	unsigned int		var_ref_idx;
478 	struct synth_event	*synth_event;
479 	bool			use_trace_keyword;
480 	char			*synth_event_name;
481 
482 	union {
483 		struct {
484 			char			*event;
485 			char			*event_system;
486 		} match_data;
487 
488 		struct {
489 			/*
490 			 * var_str contains the $-unstripped variable
491 			 * name referenced by var_ref, and used when
492 			 * printing the action.  Because var_ref
493 			 * creation is deferred to create_actions(),
494 			 * we need a per-action way to save it until
495 			 * then, thus var_str.
496 			 */
497 			char			*var_str;
498 
499 			/*
500 			 * var_ref refers to the variable being
501 			 * tracked e.g onmax($var).
502 			 */
503 			struct hist_field	*var_ref;
504 
505 			/*
506 			 * track_var contains the 'invisible' tracking
507 			 * variable created to keep the current
508 			 * e.g. max value.
509 			 */
510 			struct hist_field	*track_var;
511 
512 			check_track_val_fn_t	check_val;
513 			action_fn_t		save_data;
514 		} track_data;
515 	};
516 };
517 
518 struct track_data {
519 	u64				track_val;
520 	bool				updated;
521 
522 	unsigned int			key_len;
523 	void				*key;
524 	struct tracing_map_elt		elt;
525 
526 	struct action_data		*action_data;
527 	struct hist_trigger_data	*hist_data;
528 };
529 
530 struct hist_elt_data {
531 	char *comm;
532 	u64 *var_ref_vals;
533 	char *field_var_str[SYNTH_FIELDS_MAX];
534 };
535 
536 struct snapshot_context {
537 	struct tracing_map_elt	*elt;
538 	void			*key;
539 };
540 
541 static void track_data_free(struct track_data *track_data)
542 {
543 	struct hist_elt_data *elt_data;
544 
545 	if (!track_data)
546 		return;
547 
548 	kfree(track_data->key);
549 
550 	elt_data = track_data->elt.private_data;
551 	if (elt_data) {
552 		kfree(elt_data->comm);
553 		kfree(elt_data);
554 	}
555 
556 	kfree(track_data);
557 }
558 
559 static struct track_data *track_data_alloc(unsigned int key_len,
560 					   struct action_data *action_data,
561 					   struct hist_trigger_data *hist_data)
562 {
563 	struct track_data *data = kzalloc(sizeof(*data), GFP_KERNEL);
564 	struct hist_elt_data *elt_data;
565 
566 	if (!data)
567 		return ERR_PTR(-ENOMEM);
568 
569 	data->key = kzalloc(key_len, GFP_KERNEL);
570 	if (!data->key) {
571 		track_data_free(data);
572 		return ERR_PTR(-ENOMEM);
573 	}
574 
575 	data->key_len = key_len;
576 	data->action_data = action_data;
577 	data->hist_data = hist_data;
578 
579 	elt_data = kzalloc(sizeof(*elt_data), GFP_KERNEL);
580 	if (!elt_data) {
581 		track_data_free(data);
582 		return ERR_PTR(-ENOMEM);
583 	}
584 	data->elt.private_data = elt_data;
585 
586 	elt_data->comm = kzalloc(TASK_COMM_LEN, GFP_KERNEL);
587 	if (!elt_data->comm) {
588 		track_data_free(data);
589 		return ERR_PTR(-ENOMEM);
590 	}
591 
592 	return data;
593 }
594 
595 static char last_cmd[MAX_FILTER_STR_VAL];
596 static char last_cmd_loc[MAX_FILTER_STR_VAL];
597 
598 static int errpos(char *str)
599 {
600 	return err_pos(last_cmd, str);
601 }
602 
603 static void last_cmd_set(struct trace_event_file *file, char *str)
604 {
605 	const char *system = NULL, *name = NULL;
606 	struct trace_event_call *call;
607 
608 	if (!str)
609 		return;
610 
611 	strncpy(last_cmd, str, MAX_FILTER_STR_VAL - 1);
612 
613 	if (file) {
614 		call = file->event_call;
615 
616 		system = call->class->system;
617 		if (system) {
618 			name = trace_event_name(call);
619 			if (!name)
620 				system = NULL;
621 		}
622 	}
623 
624 	if (system)
625 		snprintf(last_cmd_loc, MAX_FILTER_STR_VAL, "hist:%s:%s", system, name);
626 }
627 
628 static void hist_err(struct trace_array *tr, u8 err_type, u8 err_pos)
629 {
630 	tracing_log_err(tr, last_cmd_loc, last_cmd, err_text,
631 			err_type, err_pos);
632 }
633 
634 static void hist_err_clear(void)
635 {
636 	last_cmd[0] = '\0';
637 	last_cmd_loc[0] = '\0';
638 }
639 
640 struct synth_trace_event {
641 	struct trace_entry	ent;
642 	u64			fields[];
643 };
644 
645 static int synth_event_define_fields(struct trace_event_call *call)
646 {
647 	struct synth_trace_event trace;
648 	int offset = offsetof(typeof(trace), fields);
649 	struct synth_event *event = call->data;
650 	unsigned int i, size, n_u64;
651 	char *name, *type;
652 	bool is_signed;
653 	int ret = 0;
654 
655 	for (i = 0, n_u64 = 0; i < event->n_fields; i++) {
656 		size = event->fields[i]->size;
657 		is_signed = event->fields[i]->is_signed;
658 		type = event->fields[i]->type;
659 		name = event->fields[i]->name;
660 		ret = trace_define_field(call, type, name, offset, size,
661 					 is_signed, FILTER_OTHER);
662 		if (ret)
663 			break;
664 
665 		if (event->fields[i]->is_string) {
666 			offset += STR_VAR_LEN_MAX;
667 			n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
668 		} else {
669 			offset += sizeof(u64);
670 			n_u64++;
671 		}
672 	}
673 
674 	event->n_u64 = n_u64;
675 
676 	return ret;
677 }
678 
679 static bool synth_field_signed(char *type)
680 {
681 	if (str_has_prefix(type, "u"))
682 		return false;
683 	if (strcmp(type, "gfp_t") == 0)
684 		return false;
685 
686 	return true;
687 }
688 
689 static int synth_field_is_string(char *type)
690 {
691 	if (strstr(type, "char[") != NULL)
692 		return true;
693 
694 	return false;
695 }
696 
697 static int synth_field_string_size(char *type)
698 {
699 	char buf[4], *end, *start;
700 	unsigned int len;
701 	int size, err;
702 
703 	start = strstr(type, "char[");
704 	if (start == NULL)
705 		return -EINVAL;
706 	start += sizeof("char[") - 1;
707 
708 	end = strchr(type, ']');
709 	if (!end || end < start)
710 		return -EINVAL;
711 
712 	len = end - start;
713 	if (len > 3)
714 		return -EINVAL;
715 
716 	strncpy(buf, start, len);
717 	buf[len] = '\0';
718 
719 	err = kstrtouint(buf, 0, &size);
720 	if (err)
721 		return err;
722 
723 	if (size > STR_VAR_LEN_MAX)
724 		return -EINVAL;
725 
726 	return size;
727 }
728 
729 static int synth_field_size(char *type)
730 {
731 	int size = 0;
732 
733 	if (strcmp(type, "s64") == 0)
734 		size = sizeof(s64);
735 	else if (strcmp(type, "u64") == 0)
736 		size = sizeof(u64);
737 	else if (strcmp(type, "s32") == 0)
738 		size = sizeof(s32);
739 	else if (strcmp(type, "u32") == 0)
740 		size = sizeof(u32);
741 	else if (strcmp(type, "s16") == 0)
742 		size = sizeof(s16);
743 	else if (strcmp(type, "u16") == 0)
744 		size = sizeof(u16);
745 	else if (strcmp(type, "s8") == 0)
746 		size = sizeof(s8);
747 	else if (strcmp(type, "u8") == 0)
748 		size = sizeof(u8);
749 	else if (strcmp(type, "char") == 0)
750 		size = sizeof(char);
751 	else if (strcmp(type, "unsigned char") == 0)
752 		size = sizeof(unsigned char);
753 	else if (strcmp(type, "int") == 0)
754 		size = sizeof(int);
755 	else if (strcmp(type, "unsigned int") == 0)
756 		size = sizeof(unsigned int);
757 	else if (strcmp(type, "long") == 0)
758 		size = sizeof(long);
759 	else if (strcmp(type, "unsigned long") == 0)
760 		size = sizeof(unsigned long);
761 	else if (strcmp(type, "pid_t") == 0)
762 		size = sizeof(pid_t);
763 	else if (strcmp(type, "gfp_t") == 0)
764 		size = sizeof(gfp_t);
765 	else if (synth_field_is_string(type))
766 		size = synth_field_string_size(type);
767 
768 	return size;
769 }
770 
771 static const char *synth_field_fmt(char *type)
772 {
773 	const char *fmt = "%llu";
774 
775 	if (strcmp(type, "s64") == 0)
776 		fmt = "%lld";
777 	else if (strcmp(type, "u64") == 0)
778 		fmt = "%llu";
779 	else if (strcmp(type, "s32") == 0)
780 		fmt = "%d";
781 	else if (strcmp(type, "u32") == 0)
782 		fmt = "%u";
783 	else if (strcmp(type, "s16") == 0)
784 		fmt = "%d";
785 	else if (strcmp(type, "u16") == 0)
786 		fmt = "%u";
787 	else if (strcmp(type, "s8") == 0)
788 		fmt = "%d";
789 	else if (strcmp(type, "u8") == 0)
790 		fmt = "%u";
791 	else if (strcmp(type, "char") == 0)
792 		fmt = "%d";
793 	else if (strcmp(type, "unsigned char") == 0)
794 		fmt = "%u";
795 	else if (strcmp(type, "int") == 0)
796 		fmt = "%d";
797 	else if (strcmp(type, "unsigned int") == 0)
798 		fmt = "%u";
799 	else if (strcmp(type, "long") == 0)
800 		fmt = "%ld";
801 	else if (strcmp(type, "unsigned long") == 0)
802 		fmt = "%lu";
803 	else if (strcmp(type, "pid_t") == 0)
804 		fmt = "%d";
805 	else if (strcmp(type, "gfp_t") == 0)
806 		fmt = "%x";
807 	else if (synth_field_is_string(type))
808 		fmt = "%s";
809 
810 	return fmt;
811 }
812 
813 static enum print_line_t print_synth_event(struct trace_iterator *iter,
814 					   int flags,
815 					   struct trace_event *event)
816 {
817 	struct trace_array *tr = iter->tr;
818 	struct trace_seq *s = &iter->seq;
819 	struct synth_trace_event *entry;
820 	struct synth_event *se;
821 	unsigned int i, n_u64;
822 	char print_fmt[32];
823 	const char *fmt;
824 
825 	entry = (struct synth_trace_event *)iter->ent;
826 	se = container_of(event, struct synth_event, call.event);
827 
828 	trace_seq_printf(s, "%s: ", se->name);
829 
830 	for (i = 0, n_u64 = 0; i < se->n_fields; i++) {
831 		if (trace_seq_has_overflowed(s))
832 			goto end;
833 
834 		fmt = synth_field_fmt(se->fields[i]->type);
835 
836 		/* parameter types */
837 		if (tr->trace_flags & TRACE_ITER_VERBOSE)
838 			trace_seq_printf(s, "%s ", fmt);
839 
840 		snprintf(print_fmt, sizeof(print_fmt), "%%s=%s%%s", fmt);
841 
842 		/* parameter values */
843 		if (se->fields[i]->is_string) {
844 			trace_seq_printf(s, print_fmt, se->fields[i]->name,
845 					 (char *)&entry->fields[n_u64],
846 					 i == se->n_fields - 1 ? "" : " ");
847 			n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
848 		} else {
849 			struct trace_print_flags __flags[] = {
850 			    __def_gfpflag_names, {-1, NULL} };
851 
852 			trace_seq_printf(s, print_fmt, se->fields[i]->name,
853 					 entry->fields[n_u64],
854 					 i == se->n_fields - 1 ? "" : " ");
855 
856 			if (strcmp(se->fields[i]->type, "gfp_t") == 0) {
857 				trace_seq_puts(s, " (");
858 				trace_print_flags_seq(s, "|",
859 						      entry->fields[n_u64],
860 						      __flags);
861 				trace_seq_putc(s, ')');
862 			}
863 			n_u64++;
864 		}
865 	}
866 end:
867 	trace_seq_putc(s, '\n');
868 
869 	return trace_handle_return(s);
870 }
871 
872 static struct trace_event_functions synth_event_funcs = {
873 	.trace		= print_synth_event
874 };
875 
876 static notrace void trace_event_raw_event_synth(void *__data,
877 						u64 *var_ref_vals,
878 						unsigned int var_ref_idx)
879 {
880 	struct trace_event_file *trace_file = __data;
881 	struct synth_trace_event *entry;
882 	struct trace_event_buffer fbuffer;
883 	struct ring_buffer *buffer;
884 	struct synth_event *event;
885 	unsigned int i, n_u64;
886 	int fields_size = 0;
887 
888 	event = trace_file->event_call->data;
889 
890 	if (trace_trigger_soft_disabled(trace_file))
891 		return;
892 
893 	fields_size = event->n_u64 * sizeof(u64);
894 
895 	/*
896 	 * Avoid ring buffer recursion detection, as this event
897 	 * is being performed within another event.
898 	 */
899 	buffer = trace_file->tr->trace_buffer.buffer;
900 	ring_buffer_nest_start(buffer);
901 
902 	entry = trace_event_buffer_reserve(&fbuffer, trace_file,
903 					   sizeof(*entry) + fields_size);
904 	if (!entry)
905 		goto out;
906 
907 	for (i = 0, n_u64 = 0; i < event->n_fields; i++) {
908 		if (event->fields[i]->is_string) {
909 			char *str_val = (char *)(long)var_ref_vals[var_ref_idx + i];
910 			char *str_field = (char *)&entry->fields[n_u64];
911 
912 			strscpy(str_field, str_val, STR_VAR_LEN_MAX);
913 			n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
914 		} else {
915 			struct synth_field *field = event->fields[i];
916 			u64 val = var_ref_vals[var_ref_idx + i];
917 
918 			switch (field->size) {
919 			case 1:
920 				*(u8 *)&entry->fields[n_u64] = (u8)val;
921 				break;
922 
923 			case 2:
924 				*(u16 *)&entry->fields[n_u64] = (u16)val;
925 				break;
926 
927 			case 4:
928 				*(u32 *)&entry->fields[n_u64] = (u32)val;
929 				break;
930 
931 			default:
932 				entry->fields[n_u64] = val;
933 				break;
934 			}
935 			n_u64++;
936 		}
937 	}
938 
939 	trace_event_buffer_commit(&fbuffer);
940 out:
941 	ring_buffer_nest_end(buffer);
942 }
943 
944 static void free_synth_event_print_fmt(struct trace_event_call *call)
945 {
946 	if (call) {
947 		kfree(call->print_fmt);
948 		call->print_fmt = NULL;
949 	}
950 }
951 
952 static int __set_synth_event_print_fmt(struct synth_event *event,
953 				       char *buf, int len)
954 {
955 	const char *fmt;
956 	int pos = 0;
957 	int i;
958 
959 	/* When len=0, we just calculate the needed length */
960 #define LEN_OR_ZERO (len ? len - pos : 0)
961 
962 	pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
963 	for (i = 0; i < event->n_fields; i++) {
964 		fmt = synth_field_fmt(event->fields[i]->type);
965 		pos += snprintf(buf + pos, LEN_OR_ZERO, "%s=%s%s",
966 				event->fields[i]->name, fmt,
967 				i == event->n_fields - 1 ? "" : ", ");
968 	}
969 	pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
970 
971 	for (i = 0; i < event->n_fields; i++) {
972 		pos += snprintf(buf + pos, LEN_OR_ZERO,
973 				", REC->%s", event->fields[i]->name);
974 	}
975 
976 #undef LEN_OR_ZERO
977 
978 	/* return the length of print_fmt */
979 	return pos;
980 }
981 
982 static int set_synth_event_print_fmt(struct trace_event_call *call)
983 {
984 	struct synth_event *event = call->data;
985 	char *print_fmt;
986 	int len;
987 
988 	/* First: called with 0 length to calculate the needed length */
989 	len = __set_synth_event_print_fmt(event, NULL, 0);
990 
991 	print_fmt = kmalloc(len + 1, GFP_KERNEL);
992 	if (!print_fmt)
993 		return -ENOMEM;
994 
995 	/* Second: actually write the @print_fmt */
996 	__set_synth_event_print_fmt(event, print_fmt, len + 1);
997 	call->print_fmt = print_fmt;
998 
999 	return 0;
1000 }
1001 
1002 static void free_synth_field(struct synth_field *field)
1003 {
1004 	kfree(field->type);
1005 	kfree(field->name);
1006 	kfree(field);
1007 }
1008 
1009 static struct synth_field *parse_synth_field(int argc, const char **argv,
1010 					     int *consumed)
1011 {
1012 	struct synth_field *field;
1013 	const char *prefix = NULL, *field_type = argv[0], *field_name, *array;
1014 	int len, ret = 0;
1015 
1016 	if (field_type[0] == ';')
1017 		field_type++;
1018 
1019 	if (!strcmp(field_type, "unsigned")) {
1020 		if (argc < 3)
1021 			return ERR_PTR(-EINVAL);
1022 		prefix = "unsigned ";
1023 		field_type = argv[1];
1024 		field_name = argv[2];
1025 		*consumed = 3;
1026 	} else {
1027 		field_name = argv[1];
1028 		*consumed = 2;
1029 	}
1030 
1031 	field = kzalloc(sizeof(*field), GFP_KERNEL);
1032 	if (!field)
1033 		return ERR_PTR(-ENOMEM);
1034 
1035 	len = strlen(field_name);
1036 	array = strchr(field_name, '[');
1037 	if (array)
1038 		len -= strlen(array);
1039 	else if (field_name[len - 1] == ';')
1040 		len--;
1041 
1042 	field->name = kmemdup_nul(field_name, len, GFP_KERNEL);
1043 	if (!field->name) {
1044 		ret = -ENOMEM;
1045 		goto free;
1046 	}
1047 
1048 	if (field_type[0] == ';')
1049 		field_type++;
1050 	len = strlen(field_type) + 1;
1051 	if (array)
1052 		len += strlen(array);
1053 	if (prefix)
1054 		len += strlen(prefix);
1055 
1056 	field->type = kzalloc(len, GFP_KERNEL);
1057 	if (!field->type) {
1058 		ret = -ENOMEM;
1059 		goto free;
1060 	}
1061 	if (prefix)
1062 		strcat(field->type, prefix);
1063 	strcat(field->type, field_type);
1064 	if (array) {
1065 		strcat(field->type, array);
1066 		if (field->type[len - 1] == ';')
1067 			field->type[len - 1] = '\0';
1068 	}
1069 
1070 	field->size = synth_field_size(field->type);
1071 	if (!field->size) {
1072 		ret = -EINVAL;
1073 		goto free;
1074 	}
1075 
1076 	if (synth_field_is_string(field->type))
1077 		field->is_string = true;
1078 
1079 	field->is_signed = synth_field_signed(field->type);
1080 
1081  out:
1082 	return field;
1083  free:
1084 	free_synth_field(field);
1085 	field = ERR_PTR(ret);
1086 	goto out;
1087 }
1088 
1089 static void free_synth_tracepoint(struct tracepoint *tp)
1090 {
1091 	if (!tp)
1092 		return;
1093 
1094 	kfree(tp->name);
1095 	kfree(tp);
1096 }
1097 
1098 static struct tracepoint *alloc_synth_tracepoint(char *name)
1099 {
1100 	struct tracepoint *tp;
1101 
1102 	tp = kzalloc(sizeof(*tp), GFP_KERNEL);
1103 	if (!tp)
1104 		return ERR_PTR(-ENOMEM);
1105 
1106 	tp->name = kstrdup(name, GFP_KERNEL);
1107 	if (!tp->name) {
1108 		kfree(tp);
1109 		return ERR_PTR(-ENOMEM);
1110 	}
1111 
1112 	return tp;
1113 }
1114 
1115 typedef void (*synth_probe_func_t) (void *__data, u64 *var_ref_vals,
1116 				    unsigned int var_ref_idx);
1117 
1118 static inline void trace_synth(struct synth_event *event, u64 *var_ref_vals,
1119 			       unsigned int var_ref_idx)
1120 {
1121 	struct tracepoint *tp = event->tp;
1122 
1123 	if (unlikely(atomic_read(&tp->key.enabled) > 0)) {
1124 		struct tracepoint_func *probe_func_ptr;
1125 		synth_probe_func_t probe_func;
1126 		void *__data;
1127 
1128 		if (!(cpu_online(raw_smp_processor_id())))
1129 			return;
1130 
1131 		probe_func_ptr = rcu_dereference_sched((tp)->funcs);
1132 		if (probe_func_ptr) {
1133 			do {
1134 				probe_func = probe_func_ptr->func;
1135 				__data = probe_func_ptr->data;
1136 				probe_func(__data, var_ref_vals, var_ref_idx);
1137 			} while ((++probe_func_ptr)->func);
1138 		}
1139 	}
1140 }
1141 
1142 static struct synth_event *find_synth_event(const char *name)
1143 {
1144 	struct dyn_event *pos;
1145 	struct synth_event *event;
1146 
1147 	for_each_dyn_event(pos) {
1148 		if (!is_synth_event(pos))
1149 			continue;
1150 		event = to_synth_event(pos);
1151 		if (strcmp(event->name, name) == 0)
1152 			return event;
1153 	}
1154 
1155 	return NULL;
1156 }
1157 
1158 static struct trace_event_fields synth_event_fields_array[] = {
1159 	{ .type = TRACE_FUNCTION_TYPE,
1160 	  .define_fields = synth_event_define_fields },
1161 	{}
1162 };
1163 
1164 static int register_synth_event(struct synth_event *event)
1165 {
1166 	struct trace_event_call *call = &event->call;
1167 	int ret = 0;
1168 
1169 	event->call.class = &event->class;
1170 	event->class.system = kstrdup(SYNTH_SYSTEM, GFP_KERNEL);
1171 	if (!event->class.system) {
1172 		ret = -ENOMEM;
1173 		goto out;
1174 	}
1175 
1176 	event->tp = alloc_synth_tracepoint(event->name);
1177 	if (IS_ERR(event->tp)) {
1178 		ret = PTR_ERR(event->tp);
1179 		event->tp = NULL;
1180 		goto out;
1181 	}
1182 
1183 	INIT_LIST_HEAD(&call->class->fields);
1184 	call->event.funcs = &synth_event_funcs;
1185 	call->class->fields_array = synth_event_fields_array;
1186 
1187 	ret = register_trace_event(&call->event);
1188 	if (!ret) {
1189 		ret = -ENODEV;
1190 		goto out;
1191 	}
1192 	call->flags = TRACE_EVENT_FL_TRACEPOINT;
1193 	call->class->reg = trace_event_reg;
1194 	call->class->probe = trace_event_raw_event_synth;
1195 	call->data = event;
1196 	call->tp = event->tp;
1197 
1198 	ret = trace_add_event_call(call);
1199 	if (ret) {
1200 		pr_warn("Failed to register synthetic event: %s\n",
1201 			trace_event_name(call));
1202 		goto err;
1203 	}
1204 
1205 	ret = set_synth_event_print_fmt(call);
1206 	if (ret < 0) {
1207 		trace_remove_event_call(call);
1208 		goto err;
1209 	}
1210  out:
1211 	return ret;
1212  err:
1213 	unregister_trace_event(&call->event);
1214 	goto out;
1215 }
1216 
1217 static int unregister_synth_event(struct synth_event *event)
1218 {
1219 	struct trace_event_call *call = &event->call;
1220 	int ret;
1221 
1222 	ret = trace_remove_event_call(call);
1223 
1224 	return ret;
1225 }
1226 
1227 static void free_synth_event(struct synth_event *event)
1228 {
1229 	unsigned int i;
1230 
1231 	if (!event)
1232 		return;
1233 
1234 	for (i = 0; i < event->n_fields; i++)
1235 		free_synth_field(event->fields[i]);
1236 
1237 	kfree(event->fields);
1238 	kfree(event->name);
1239 	kfree(event->class.system);
1240 	free_synth_tracepoint(event->tp);
1241 	free_synth_event_print_fmt(&event->call);
1242 	kfree(event);
1243 }
1244 
1245 static struct synth_event *alloc_synth_event(const char *name, int n_fields,
1246 					     struct synth_field **fields)
1247 {
1248 	struct synth_event *event;
1249 	unsigned int i;
1250 
1251 	event = kzalloc(sizeof(*event), GFP_KERNEL);
1252 	if (!event) {
1253 		event = ERR_PTR(-ENOMEM);
1254 		goto out;
1255 	}
1256 
1257 	event->name = kstrdup(name, GFP_KERNEL);
1258 	if (!event->name) {
1259 		kfree(event);
1260 		event = ERR_PTR(-ENOMEM);
1261 		goto out;
1262 	}
1263 
1264 	event->fields = kcalloc(n_fields, sizeof(*event->fields), GFP_KERNEL);
1265 	if (!event->fields) {
1266 		free_synth_event(event);
1267 		event = ERR_PTR(-ENOMEM);
1268 		goto out;
1269 	}
1270 
1271 	dyn_event_init(&event->devent, &synth_event_ops);
1272 
1273 	for (i = 0; i < n_fields; i++)
1274 		event->fields[i] = fields[i];
1275 
1276 	event->n_fields = n_fields;
1277  out:
1278 	return event;
1279 }
1280 
1281 static void action_trace(struct hist_trigger_data *hist_data,
1282 			 struct tracing_map_elt *elt, void *rec,
1283 			 struct ring_buffer_event *rbe, void *key,
1284 			 struct action_data *data, u64 *var_ref_vals)
1285 {
1286 	struct synth_event *event = data->synth_event;
1287 
1288 	trace_synth(event, var_ref_vals, data->var_ref_idx);
1289 }
1290 
1291 struct hist_var_data {
1292 	struct list_head list;
1293 	struct hist_trigger_data *hist_data;
1294 };
1295 
1296 static int __create_synth_event(int argc, const char *name, const char **argv)
1297 {
1298 	struct synth_field *field, *fields[SYNTH_FIELDS_MAX];
1299 	struct synth_event *event = NULL;
1300 	int i, consumed = 0, n_fields = 0, ret = 0;
1301 
1302 	/*
1303 	 * Argument syntax:
1304 	 *  - Add synthetic event: <event_name> field[;field] ...
1305 	 *  - Remove synthetic event: !<event_name> field[;field] ...
1306 	 *      where 'field' = type field_name
1307 	 */
1308 
1309 	if (name[0] == '\0' || argc < 1)
1310 		return -EINVAL;
1311 
1312 	mutex_lock(&event_mutex);
1313 
1314 	event = find_synth_event(name);
1315 	if (event) {
1316 		ret = -EEXIST;
1317 		goto out;
1318 	}
1319 
1320 	for (i = 0; i < argc - 1; i++) {
1321 		if (strcmp(argv[i], ";") == 0)
1322 			continue;
1323 		if (n_fields == SYNTH_FIELDS_MAX) {
1324 			ret = -EINVAL;
1325 			goto err;
1326 		}
1327 
1328 		field = parse_synth_field(argc - i, &argv[i], &consumed);
1329 		if (IS_ERR(field)) {
1330 			ret = PTR_ERR(field);
1331 			goto err;
1332 		}
1333 		fields[n_fields++] = field;
1334 		i += consumed - 1;
1335 	}
1336 
1337 	if (i < argc && strcmp(argv[i], ";") != 0) {
1338 		ret = -EINVAL;
1339 		goto err;
1340 	}
1341 
1342 	event = alloc_synth_event(name, n_fields, fields);
1343 	if (IS_ERR(event)) {
1344 		ret = PTR_ERR(event);
1345 		event = NULL;
1346 		goto err;
1347 	}
1348 	ret = register_synth_event(event);
1349 	if (!ret)
1350 		dyn_event_add(&event->devent);
1351 	else
1352 		free_synth_event(event);
1353  out:
1354 	mutex_unlock(&event_mutex);
1355 
1356 	return ret;
1357  err:
1358 	for (i = 0; i < n_fields; i++)
1359 		free_synth_field(fields[i]);
1360 
1361 	goto out;
1362 }
1363 
1364 static int create_or_delete_synth_event(int argc, char **argv)
1365 {
1366 	const char *name = argv[0];
1367 	struct synth_event *event = NULL;
1368 	int ret;
1369 
1370 	/* trace_run_command() ensures argc != 0 */
1371 	if (name[0] == '!') {
1372 		mutex_lock(&event_mutex);
1373 		event = find_synth_event(name + 1);
1374 		if (event) {
1375 			if (event->ref)
1376 				ret = -EBUSY;
1377 			else {
1378 				ret = unregister_synth_event(event);
1379 				if (!ret) {
1380 					dyn_event_remove(&event->devent);
1381 					free_synth_event(event);
1382 				}
1383 			}
1384 		} else
1385 			ret = -ENOENT;
1386 		mutex_unlock(&event_mutex);
1387 		return ret;
1388 	}
1389 
1390 	ret = __create_synth_event(argc - 1, name, (const char **)argv + 1);
1391 	return ret == -ECANCELED ? -EINVAL : ret;
1392 }
1393 
1394 static int synth_event_create(int argc, const char **argv)
1395 {
1396 	const char *name = argv[0];
1397 	int len;
1398 
1399 	if (name[0] != 's' || name[1] != ':')
1400 		return -ECANCELED;
1401 	name += 2;
1402 
1403 	/* This interface accepts group name prefix */
1404 	if (strchr(name, '/')) {
1405 		len = str_has_prefix(name, SYNTH_SYSTEM "/");
1406 		if (len == 0)
1407 			return -EINVAL;
1408 		name += len;
1409 	}
1410 	return __create_synth_event(argc - 1, name, argv + 1);
1411 }
1412 
1413 static int synth_event_release(struct dyn_event *ev)
1414 {
1415 	struct synth_event *event = to_synth_event(ev);
1416 	int ret;
1417 
1418 	if (event->ref)
1419 		return -EBUSY;
1420 
1421 	ret = unregister_synth_event(event);
1422 	if (ret)
1423 		return ret;
1424 
1425 	dyn_event_remove(ev);
1426 	free_synth_event(event);
1427 	return 0;
1428 }
1429 
1430 static int __synth_event_show(struct seq_file *m, struct synth_event *event)
1431 {
1432 	struct synth_field *field;
1433 	unsigned int i;
1434 
1435 	seq_printf(m, "%s\t", event->name);
1436 
1437 	for (i = 0; i < event->n_fields; i++) {
1438 		field = event->fields[i];
1439 
1440 		/* parameter values */
1441 		seq_printf(m, "%s %s%s", field->type, field->name,
1442 			   i == event->n_fields - 1 ? "" : "; ");
1443 	}
1444 
1445 	seq_putc(m, '\n');
1446 
1447 	return 0;
1448 }
1449 
1450 static int synth_event_show(struct seq_file *m, struct dyn_event *ev)
1451 {
1452 	struct synth_event *event = to_synth_event(ev);
1453 
1454 	seq_printf(m, "s:%s/", event->class.system);
1455 
1456 	return __synth_event_show(m, event);
1457 }
1458 
1459 static int synth_events_seq_show(struct seq_file *m, void *v)
1460 {
1461 	struct dyn_event *ev = v;
1462 
1463 	if (!is_synth_event(ev))
1464 		return 0;
1465 
1466 	return __synth_event_show(m, to_synth_event(ev));
1467 }
1468 
1469 static const struct seq_operations synth_events_seq_op = {
1470 	.start	= dyn_event_seq_start,
1471 	.next	= dyn_event_seq_next,
1472 	.stop	= dyn_event_seq_stop,
1473 	.show	= synth_events_seq_show,
1474 };
1475 
1476 static int synth_events_open(struct inode *inode, struct file *file)
1477 {
1478 	int ret;
1479 
1480 	ret = security_locked_down(LOCKDOWN_TRACEFS);
1481 	if (ret)
1482 		return ret;
1483 
1484 	if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
1485 		ret = dyn_events_release_all(&synth_event_ops);
1486 		if (ret < 0)
1487 			return ret;
1488 	}
1489 
1490 	return seq_open(file, &synth_events_seq_op);
1491 }
1492 
1493 static ssize_t synth_events_write(struct file *file,
1494 				  const char __user *buffer,
1495 				  size_t count, loff_t *ppos)
1496 {
1497 	return trace_parse_run_command(file, buffer, count, ppos,
1498 				       create_or_delete_synth_event);
1499 }
1500 
1501 static const struct file_operations synth_events_fops = {
1502 	.open           = synth_events_open,
1503 	.write		= synth_events_write,
1504 	.read           = seq_read,
1505 	.llseek         = seq_lseek,
1506 	.release        = seq_release,
1507 };
1508 
1509 static u64 hist_field_timestamp(struct hist_field *hist_field,
1510 				struct tracing_map_elt *elt,
1511 				struct ring_buffer_event *rbe,
1512 				void *event)
1513 {
1514 	struct hist_trigger_data *hist_data = hist_field->hist_data;
1515 	struct trace_array *tr = hist_data->event_file->tr;
1516 
1517 	u64 ts = ring_buffer_event_time_stamp(rbe);
1518 
1519 	if (hist_data->attrs->ts_in_usecs && trace_clock_in_ns(tr))
1520 		ts = ns2usecs(ts);
1521 
1522 	return ts;
1523 }
1524 
1525 static u64 hist_field_cpu(struct hist_field *hist_field,
1526 			  struct tracing_map_elt *elt,
1527 			  struct ring_buffer_event *rbe,
1528 			  void *event)
1529 {
1530 	int cpu = smp_processor_id();
1531 
1532 	return cpu;
1533 }
1534 
1535 /**
1536  * check_field_for_var_ref - Check if a VAR_REF field references a variable
1537  * @hist_field: The VAR_REF field to check
1538  * @var_data: The hist trigger that owns the variable
1539  * @var_idx: The trigger variable identifier
1540  *
1541  * Check the given VAR_REF field to see whether or not it references
1542  * the given variable associated with the given trigger.
1543  *
1544  * Return: The VAR_REF field if it does reference the variable, NULL if not
1545  */
1546 static struct hist_field *
1547 check_field_for_var_ref(struct hist_field *hist_field,
1548 			struct hist_trigger_data *var_data,
1549 			unsigned int var_idx)
1550 {
1551 	WARN_ON(!(hist_field && hist_field->flags & HIST_FIELD_FL_VAR_REF));
1552 
1553 	if (hist_field && hist_field->var.idx == var_idx &&
1554 	    hist_field->var.hist_data == var_data)
1555 		return hist_field;
1556 
1557 	return NULL;
1558 }
1559 
1560 /**
1561  * find_var_ref - Check if a trigger has a reference to a trigger variable
1562  * @hist_data: The hist trigger that might have a reference to the variable
1563  * @var_data: The hist trigger that owns the variable
1564  * @var_idx: The trigger variable identifier
1565  *
1566  * Check the list of var_refs[] on the first hist trigger to see
1567  * whether any of them are references to the variable on the second
1568  * trigger.
1569  *
1570  * Return: The VAR_REF field referencing the variable if so, NULL if not
1571  */
1572 static struct hist_field *find_var_ref(struct hist_trigger_data *hist_data,
1573 				       struct hist_trigger_data *var_data,
1574 				       unsigned int var_idx)
1575 {
1576 	struct hist_field *hist_field;
1577 	unsigned int i;
1578 
1579 	for (i = 0; i < hist_data->n_var_refs; i++) {
1580 		hist_field = hist_data->var_refs[i];
1581 		if (check_field_for_var_ref(hist_field, var_data, var_idx))
1582 			return hist_field;
1583 	}
1584 
1585 	return NULL;
1586 }
1587 
1588 /**
1589  * find_any_var_ref - Check if there is a reference to a given trigger variable
1590  * @hist_data: The hist trigger
1591  * @var_idx: The trigger variable identifier
1592  *
1593  * Check to see whether the given variable is currently referenced by
1594  * any other trigger.
1595  *
1596  * The trigger the variable is defined on is explicitly excluded - the
1597  * assumption being that a self-reference doesn't prevent a trigger
1598  * from being removed.
1599  *
1600  * Return: The VAR_REF field referencing the variable if so, NULL if not
1601  */
1602 static struct hist_field *find_any_var_ref(struct hist_trigger_data *hist_data,
1603 					   unsigned int var_idx)
1604 {
1605 	struct trace_array *tr = hist_data->event_file->tr;
1606 	struct hist_field *found = NULL;
1607 	struct hist_var_data *var_data;
1608 
1609 	list_for_each_entry(var_data, &tr->hist_vars, list) {
1610 		if (var_data->hist_data == hist_data)
1611 			continue;
1612 		found = find_var_ref(var_data->hist_data, hist_data, var_idx);
1613 		if (found)
1614 			break;
1615 	}
1616 
1617 	return found;
1618 }
1619 
1620 /**
1621  * check_var_refs - Check if there is a reference to any of trigger's variables
1622  * @hist_data: The hist trigger
1623  *
1624  * A trigger can define one or more variables.  If any one of them is
1625  * currently referenced by any other trigger, this function will
1626  * determine that.
1627 
1628  * Typically used to determine whether or not a trigger can be removed
1629  * - if there are any references to a trigger's variables, it cannot.
1630  *
1631  * Return: True if there is a reference to any of trigger's variables
1632  */
1633 static bool check_var_refs(struct hist_trigger_data *hist_data)
1634 {
1635 	struct hist_field *field;
1636 	bool found = false;
1637 	int i;
1638 
1639 	for_each_hist_field(i, hist_data) {
1640 		field = hist_data->fields[i];
1641 		if (field && field->flags & HIST_FIELD_FL_VAR) {
1642 			if (find_any_var_ref(hist_data, field->var.idx)) {
1643 				found = true;
1644 				break;
1645 			}
1646 		}
1647 	}
1648 
1649 	return found;
1650 }
1651 
1652 static struct hist_var_data *find_hist_vars(struct hist_trigger_data *hist_data)
1653 {
1654 	struct trace_array *tr = hist_data->event_file->tr;
1655 	struct hist_var_data *var_data, *found = NULL;
1656 
1657 	list_for_each_entry(var_data, &tr->hist_vars, list) {
1658 		if (var_data->hist_data == hist_data) {
1659 			found = var_data;
1660 			break;
1661 		}
1662 	}
1663 
1664 	return found;
1665 }
1666 
1667 static bool field_has_hist_vars(struct hist_field *hist_field,
1668 				unsigned int level)
1669 {
1670 	int i;
1671 
1672 	if (level > 3)
1673 		return false;
1674 
1675 	if (!hist_field)
1676 		return false;
1677 
1678 	if (hist_field->flags & HIST_FIELD_FL_VAR ||
1679 	    hist_field->flags & HIST_FIELD_FL_VAR_REF)
1680 		return true;
1681 
1682 	for (i = 0; i < HIST_FIELD_OPERANDS_MAX; i++) {
1683 		struct hist_field *operand;
1684 
1685 		operand = hist_field->operands[i];
1686 		if (field_has_hist_vars(operand, level + 1))
1687 			return true;
1688 	}
1689 
1690 	return false;
1691 }
1692 
1693 static bool has_hist_vars(struct hist_trigger_data *hist_data)
1694 {
1695 	struct hist_field *hist_field;
1696 	int i;
1697 
1698 	for_each_hist_field(i, hist_data) {
1699 		hist_field = hist_data->fields[i];
1700 		if (field_has_hist_vars(hist_field, 0))
1701 			return true;
1702 	}
1703 
1704 	return false;
1705 }
1706 
1707 static int save_hist_vars(struct hist_trigger_data *hist_data)
1708 {
1709 	struct trace_array *tr = hist_data->event_file->tr;
1710 	struct hist_var_data *var_data;
1711 
1712 	var_data = find_hist_vars(hist_data);
1713 	if (var_data)
1714 		return 0;
1715 
1716 	if (tracing_check_open_get_tr(tr))
1717 		return -ENODEV;
1718 
1719 	var_data = kzalloc(sizeof(*var_data), GFP_KERNEL);
1720 	if (!var_data) {
1721 		trace_array_put(tr);
1722 		return -ENOMEM;
1723 	}
1724 
1725 	var_data->hist_data = hist_data;
1726 	list_add(&var_data->list, &tr->hist_vars);
1727 
1728 	return 0;
1729 }
1730 
1731 static void remove_hist_vars(struct hist_trigger_data *hist_data)
1732 {
1733 	struct trace_array *tr = hist_data->event_file->tr;
1734 	struct hist_var_data *var_data;
1735 
1736 	var_data = find_hist_vars(hist_data);
1737 	if (!var_data)
1738 		return;
1739 
1740 	if (WARN_ON(check_var_refs(hist_data)))
1741 		return;
1742 
1743 	list_del(&var_data->list);
1744 
1745 	kfree(var_data);
1746 
1747 	trace_array_put(tr);
1748 }
1749 
1750 static struct hist_field *find_var_field(struct hist_trigger_data *hist_data,
1751 					 const char *var_name)
1752 {
1753 	struct hist_field *hist_field, *found = NULL;
1754 	int i;
1755 
1756 	for_each_hist_field(i, hist_data) {
1757 		hist_field = hist_data->fields[i];
1758 		if (hist_field && hist_field->flags & HIST_FIELD_FL_VAR &&
1759 		    strcmp(hist_field->var.name, var_name) == 0) {
1760 			found = hist_field;
1761 			break;
1762 		}
1763 	}
1764 
1765 	return found;
1766 }
1767 
1768 static struct hist_field *find_var(struct hist_trigger_data *hist_data,
1769 				   struct trace_event_file *file,
1770 				   const char *var_name)
1771 {
1772 	struct hist_trigger_data *test_data;
1773 	struct event_trigger_data *test;
1774 	struct hist_field *hist_field;
1775 
1776 	lockdep_assert_held(&event_mutex);
1777 
1778 	hist_field = find_var_field(hist_data, var_name);
1779 	if (hist_field)
1780 		return hist_field;
1781 
1782 	list_for_each_entry(test, &file->triggers, list) {
1783 		if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
1784 			test_data = test->private_data;
1785 			hist_field = find_var_field(test_data, var_name);
1786 			if (hist_field)
1787 				return hist_field;
1788 		}
1789 	}
1790 
1791 	return NULL;
1792 }
1793 
1794 static struct trace_event_file *find_var_file(struct trace_array *tr,
1795 					      char *system,
1796 					      char *event_name,
1797 					      char *var_name)
1798 {
1799 	struct hist_trigger_data *var_hist_data;
1800 	struct hist_var_data *var_data;
1801 	struct trace_event_file *file, *found = NULL;
1802 
1803 	if (system)
1804 		return find_event_file(tr, system, event_name);
1805 
1806 	list_for_each_entry(var_data, &tr->hist_vars, list) {
1807 		var_hist_data = var_data->hist_data;
1808 		file = var_hist_data->event_file;
1809 		if (file == found)
1810 			continue;
1811 
1812 		if (find_var_field(var_hist_data, var_name)) {
1813 			if (found) {
1814 				hist_err(tr, HIST_ERR_VAR_NOT_UNIQUE, errpos(var_name));
1815 				return NULL;
1816 			}
1817 
1818 			found = file;
1819 		}
1820 	}
1821 
1822 	return found;
1823 }
1824 
1825 static struct hist_field *find_file_var(struct trace_event_file *file,
1826 					const char *var_name)
1827 {
1828 	struct hist_trigger_data *test_data;
1829 	struct event_trigger_data *test;
1830 	struct hist_field *hist_field;
1831 
1832 	lockdep_assert_held(&event_mutex);
1833 
1834 	list_for_each_entry(test, &file->triggers, list) {
1835 		if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
1836 			test_data = test->private_data;
1837 			hist_field = find_var_field(test_data, var_name);
1838 			if (hist_field)
1839 				return hist_field;
1840 		}
1841 	}
1842 
1843 	return NULL;
1844 }
1845 
1846 static struct hist_field *
1847 find_match_var(struct hist_trigger_data *hist_data, char *var_name)
1848 {
1849 	struct trace_array *tr = hist_data->event_file->tr;
1850 	struct hist_field *hist_field, *found = NULL;
1851 	struct trace_event_file *file;
1852 	unsigned int i;
1853 
1854 	for (i = 0; i < hist_data->n_actions; i++) {
1855 		struct action_data *data = hist_data->actions[i];
1856 
1857 		if (data->handler == HANDLER_ONMATCH) {
1858 			char *system = data->match_data.event_system;
1859 			char *event_name = data->match_data.event;
1860 
1861 			file = find_var_file(tr, system, event_name, var_name);
1862 			if (!file)
1863 				continue;
1864 			hist_field = find_file_var(file, var_name);
1865 			if (hist_field) {
1866 				if (found) {
1867 					hist_err(tr, HIST_ERR_VAR_NOT_UNIQUE,
1868 						 errpos(var_name));
1869 					return ERR_PTR(-EINVAL);
1870 				}
1871 
1872 				found = hist_field;
1873 			}
1874 		}
1875 	}
1876 	return found;
1877 }
1878 
1879 static struct hist_field *find_event_var(struct hist_trigger_data *hist_data,
1880 					 char *system,
1881 					 char *event_name,
1882 					 char *var_name)
1883 {
1884 	struct trace_array *tr = hist_data->event_file->tr;
1885 	struct hist_field *hist_field = NULL;
1886 	struct trace_event_file *file;
1887 
1888 	if (!system || !event_name) {
1889 		hist_field = find_match_var(hist_data, var_name);
1890 		if (IS_ERR(hist_field))
1891 			return NULL;
1892 		if (hist_field)
1893 			return hist_field;
1894 	}
1895 
1896 	file = find_var_file(tr, system, event_name, var_name);
1897 	if (!file)
1898 		return NULL;
1899 
1900 	hist_field = find_file_var(file, var_name);
1901 
1902 	return hist_field;
1903 }
1904 
1905 static u64 hist_field_var_ref(struct hist_field *hist_field,
1906 			      struct tracing_map_elt *elt,
1907 			      struct ring_buffer_event *rbe,
1908 			      void *event)
1909 {
1910 	struct hist_elt_data *elt_data;
1911 	u64 var_val = 0;
1912 
1913 	if (WARN_ON_ONCE(!elt))
1914 		return var_val;
1915 
1916 	elt_data = elt->private_data;
1917 	var_val = elt_data->var_ref_vals[hist_field->var_ref_idx];
1918 
1919 	return var_val;
1920 }
1921 
1922 static bool resolve_var_refs(struct hist_trigger_data *hist_data, void *key,
1923 			     u64 *var_ref_vals, bool self)
1924 {
1925 	struct hist_trigger_data *var_data;
1926 	struct tracing_map_elt *var_elt;
1927 	struct hist_field *hist_field;
1928 	unsigned int i, var_idx;
1929 	bool resolved = true;
1930 	u64 var_val = 0;
1931 
1932 	for (i = 0; i < hist_data->n_var_refs; i++) {
1933 		hist_field = hist_data->var_refs[i];
1934 		var_idx = hist_field->var.idx;
1935 		var_data = hist_field->var.hist_data;
1936 
1937 		if (var_data == NULL) {
1938 			resolved = false;
1939 			break;
1940 		}
1941 
1942 		if ((self && var_data != hist_data) ||
1943 		    (!self && var_data == hist_data))
1944 			continue;
1945 
1946 		var_elt = tracing_map_lookup(var_data->map, key);
1947 		if (!var_elt) {
1948 			resolved = false;
1949 			break;
1950 		}
1951 
1952 		if (!tracing_map_var_set(var_elt, var_idx)) {
1953 			resolved = false;
1954 			break;
1955 		}
1956 
1957 		if (self || !hist_field->read_once)
1958 			var_val = tracing_map_read_var(var_elt, var_idx);
1959 		else
1960 			var_val = tracing_map_read_var_once(var_elt, var_idx);
1961 
1962 		var_ref_vals[i] = var_val;
1963 	}
1964 
1965 	return resolved;
1966 }
1967 
1968 static const char *hist_field_name(struct hist_field *field,
1969 				   unsigned int level)
1970 {
1971 	const char *field_name = "";
1972 
1973 	if (level > 1)
1974 		return field_name;
1975 
1976 	if (field->field)
1977 		field_name = field->field->name;
1978 	else if (field->flags & HIST_FIELD_FL_LOG2 ||
1979 		 field->flags & HIST_FIELD_FL_ALIAS)
1980 		field_name = hist_field_name(field->operands[0], ++level);
1981 	else if (field->flags & HIST_FIELD_FL_CPU)
1982 		field_name = "cpu";
1983 	else if (field->flags & HIST_FIELD_FL_EXPR ||
1984 		 field->flags & HIST_FIELD_FL_VAR_REF) {
1985 		if (field->system) {
1986 			static char full_name[MAX_FILTER_STR_VAL];
1987 
1988 			strcat(full_name, field->system);
1989 			strcat(full_name, ".");
1990 			strcat(full_name, field->event_name);
1991 			strcat(full_name, ".");
1992 			strcat(full_name, field->name);
1993 			field_name = full_name;
1994 		} else
1995 			field_name = field->name;
1996 	} else if (field->flags & HIST_FIELD_FL_TIMESTAMP)
1997 		field_name = "common_timestamp";
1998 
1999 	if (field_name == NULL)
2000 		field_name = "";
2001 
2002 	return field_name;
2003 }
2004 
2005 static hist_field_fn_t select_value_fn(int field_size, int field_is_signed)
2006 {
2007 	hist_field_fn_t fn = NULL;
2008 
2009 	switch (field_size) {
2010 	case 8:
2011 		if (field_is_signed)
2012 			fn = hist_field_s64;
2013 		else
2014 			fn = hist_field_u64;
2015 		break;
2016 	case 4:
2017 		if (field_is_signed)
2018 			fn = hist_field_s32;
2019 		else
2020 			fn = hist_field_u32;
2021 		break;
2022 	case 2:
2023 		if (field_is_signed)
2024 			fn = hist_field_s16;
2025 		else
2026 			fn = hist_field_u16;
2027 		break;
2028 	case 1:
2029 		if (field_is_signed)
2030 			fn = hist_field_s8;
2031 		else
2032 			fn = hist_field_u8;
2033 		break;
2034 	}
2035 
2036 	return fn;
2037 }
2038 
2039 static int parse_map_size(char *str)
2040 {
2041 	unsigned long size, map_bits;
2042 	int ret;
2043 
2044 	strsep(&str, "=");
2045 	if (!str) {
2046 		ret = -EINVAL;
2047 		goto out;
2048 	}
2049 
2050 	ret = kstrtoul(str, 0, &size);
2051 	if (ret)
2052 		goto out;
2053 
2054 	map_bits = ilog2(roundup_pow_of_two(size));
2055 	if (map_bits < TRACING_MAP_BITS_MIN ||
2056 	    map_bits > TRACING_MAP_BITS_MAX)
2057 		ret = -EINVAL;
2058 	else
2059 		ret = map_bits;
2060  out:
2061 	return ret;
2062 }
2063 
2064 static void destroy_hist_trigger_attrs(struct hist_trigger_attrs *attrs)
2065 {
2066 	unsigned int i;
2067 
2068 	if (!attrs)
2069 		return;
2070 
2071 	for (i = 0; i < attrs->n_assignments; i++)
2072 		kfree(attrs->assignment_str[i]);
2073 
2074 	for (i = 0; i < attrs->n_actions; i++)
2075 		kfree(attrs->action_str[i]);
2076 
2077 	kfree(attrs->name);
2078 	kfree(attrs->sort_key_str);
2079 	kfree(attrs->keys_str);
2080 	kfree(attrs->vals_str);
2081 	kfree(attrs->clock);
2082 	kfree(attrs);
2083 }
2084 
2085 static int parse_action(char *str, struct hist_trigger_attrs *attrs)
2086 {
2087 	int ret = -EINVAL;
2088 
2089 	if (attrs->n_actions >= HIST_ACTIONS_MAX)
2090 		return ret;
2091 
2092 	if ((str_has_prefix(str, "onmatch(")) ||
2093 	    (str_has_prefix(str, "onmax(")) ||
2094 	    (str_has_prefix(str, "onchange("))) {
2095 		attrs->action_str[attrs->n_actions] = kstrdup(str, GFP_KERNEL);
2096 		if (!attrs->action_str[attrs->n_actions]) {
2097 			ret = -ENOMEM;
2098 			return ret;
2099 		}
2100 		attrs->n_actions++;
2101 		ret = 0;
2102 	}
2103 	return ret;
2104 }
2105 
2106 static int parse_assignment(struct trace_array *tr,
2107 			    char *str, struct hist_trigger_attrs *attrs)
2108 {
2109 	int ret = 0;
2110 
2111 	if ((str_has_prefix(str, "key=")) ||
2112 	    (str_has_prefix(str, "keys="))) {
2113 		attrs->keys_str = kstrdup(str, GFP_KERNEL);
2114 		if (!attrs->keys_str) {
2115 			ret = -ENOMEM;
2116 			goto out;
2117 		}
2118 	} else if ((str_has_prefix(str, "val=")) ||
2119 		   (str_has_prefix(str, "vals=")) ||
2120 		   (str_has_prefix(str, "values="))) {
2121 		attrs->vals_str = kstrdup(str, GFP_KERNEL);
2122 		if (!attrs->vals_str) {
2123 			ret = -ENOMEM;
2124 			goto out;
2125 		}
2126 	} else if (str_has_prefix(str, "sort=")) {
2127 		attrs->sort_key_str = kstrdup(str, GFP_KERNEL);
2128 		if (!attrs->sort_key_str) {
2129 			ret = -ENOMEM;
2130 			goto out;
2131 		}
2132 	} else if (str_has_prefix(str, "name=")) {
2133 		attrs->name = kstrdup(str, GFP_KERNEL);
2134 		if (!attrs->name) {
2135 			ret = -ENOMEM;
2136 			goto out;
2137 		}
2138 	} else if (str_has_prefix(str, "clock=")) {
2139 		strsep(&str, "=");
2140 		if (!str) {
2141 			ret = -EINVAL;
2142 			goto out;
2143 		}
2144 
2145 		str = strstrip(str);
2146 		attrs->clock = kstrdup(str, GFP_KERNEL);
2147 		if (!attrs->clock) {
2148 			ret = -ENOMEM;
2149 			goto out;
2150 		}
2151 	} else if (str_has_prefix(str, "size=")) {
2152 		int map_bits = parse_map_size(str);
2153 
2154 		if (map_bits < 0) {
2155 			ret = map_bits;
2156 			goto out;
2157 		}
2158 		attrs->map_bits = map_bits;
2159 	} else {
2160 		char *assignment;
2161 
2162 		if (attrs->n_assignments == TRACING_MAP_VARS_MAX) {
2163 			hist_err(tr, HIST_ERR_TOO_MANY_VARS, errpos(str));
2164 			ret = -EINVAL;
2165 			goto out;
2166 		}
2167 
2168 		assignment = kstrdup(str, GFP_KERNEL);
2169 		if (!assignment) {
2170 			ret = -ENOMEM;
2171 			goto out;
2172 		}
2173 
2174 		attrs->assignment_str[attrs->n_assignments++] = assignment;
2175 	}
2176  out:
2177 	return ret;
2178 }
2179 
2180 static struct hist_trigger_attrs *
2181 parse_hist_trigger_attrs(struct trace_array *tr, char *trigger_str)
2182 {
2183 	struct hist_trigger_attrs *attrs;
2184 	int ret = 0;
2185 
2186 	attrs = kzalloc(sizeof(*attrs), GFP_KERNEL);
2187 	if (!attrs)
2188 		return ERR_PTR(-ENOMEM);
2189 
2190 	while (trigger_str) {
2191 		char *str = strsep(&trigger_str, ":");
2192 
2193 		if (strchr(str, '=')) {
2194 			ret = parse_assignment(tr, str, attrs);
2195 			if (ret)
2196 				goto free;
2197 		} else if (strcmp(str, "pause") == 0)
2198 			attrs->pause = true;
2199 		else if ((strcmp(str, "cont") == 0) ||
2200 			 (strcmp(str, "continue") == 0))
2201 			attrs->cont = true;
2202 		else if (strcmp(str, "clear") == 0)
2203 			attrs->clear = true;
2204 		else {
2205 			ret = parse_action(str, attrs);
2206 			if (ret)
2207 				goto free;
2208 		}
2209 	}
2210 
2211 	if (!attrs->keys_str) {
2212 		ret = -EINVAL;
2213 		goto free;
2214 	}
2215 
2216 	if (!attrs->clock) {
2217 		attrs->clock = kstrdup("global", GFP_KERNEL);
2218 		if (!attrs->clock) {
2219 			ret = -ENOMEM;
2220 			goto free;
2221 		}
2222 	}
2223 
2224 	return attrs;
2225  free:
2226 	destroy_hist_trigger_attrs(attrs);
2227 
2228 	return ERR_PTR(ret);
2229 }
2230 
2231 static inline void save_comm(char *comm, struct task_struct *task)
2232 {
2233 	if (!task->pid) {
2234 		strcpy(comm, "<idle>");
2235 		return;
2236 	}
2237 
2238 	if (WARN_ON_ONCE(task->pid < 0)) {
2239 		strcpy(comm, "<XXX>");
2240 		return;
2241 	}
2242 
2243 	strncpy(comm, task->comm, TASK_COMM_LEN);
2244 }
2245 
2246 static void hist_elt_data_free(struct hist_elt_data *elt_data)
2247 {
2248 	unsigned int i;
2249 
2250 	for (i = 0; i < SYNTH_FIELDS_MAX; i++)
2251 		kfree(elt_data->field_var_str[i]);
2252 
2253 	kfree(elt_data->comm);
2254 	kfree(elt_data);
2255 }
2256 
2257 static void hist_trigger_elt_data_free(struct tracing_map_elt *elt)
2258 {
2259 	struct hist_elt_data *elt_data = elt->private_data;
2260 
2261 	hist_elt_data_free(elt_data);
2262 }
2263 
2264 static int hist_trigger_elt_data_alloc(struct tracing_map_elt *elt)
2265 {
2266 	struct hist_trigger_data *hist_data = elt->map->private_data;
2267 	unsigned int size = TASK_COMM_LEN;
2268 	struct hist_elt_data *elt_data;
2269 	struct hist_field *key_field;
2270 	unsigned int i, n_str;
2271 
2272 	elt_data = kzalloc(sizeof(*elt_data), GFP_KERNEL);
2273 	if (!elt_data)
2274 		return -ENOMEM;
2275 
2276 	for_each_hist_key_field(i, hist_data) {
2277 		key_field = hist_data->fields[i];
2278 
2279 		if (key_field->flags & HIST_FIELD_FL_EXECNAME) {
2280 			elt_data->comm = kzalloc(size, GFP_KERNEL);
2281 			if (!elt_data->comm) {
2282 				kfree(elt_data);
2283 				return -ENOMEM;
2284 			}
2285 			break;
2286 		}
2287 	}
2288 
2289 	n_str = hist_data->n_field_var_str + hist_data->n_save_var_str;
2290 
2291 	size = STR_VAR_LEN_MAX;
2292 
2293 	for (i = 0; i < n_str; i++) {
2294 		elt_data->field_var_str[i] = kzalloc(size, GFP_KERNEL);
2295 		if (!elt_data->field_var_str[i]) {
2296 			hist_elt_data_free(elt_data);
2297 			return -ENOMEM;
2298 		}
2299 	}
2300 
2301 	elt->private_data = elt_data;
2302 
2303 	return 0;
2304 }
2305 
2306 static void hist_trigger_elt_data_init(struct tracing_map_elt *elt)
2307 {
2308 	struct hist_elt_data *elt_data = elt->private_data;
2309 
2310 	if (elt_data->comm)
2311 		save_comm(elt_data->comm, current);
2312 }
2313 
2314 static const struct tracing_map_ops hist_trigger_elt_data_ops = {
2315 	.elt_alloc	= hist_trigger_elt_data_alloc,
2316 	.elt_free	= hist_trigger_elt_data_free,
2317 	.elt_init	= hist_trigger_elt_data_init,
2318 };
2319 
2320 static const char *get_hist_field_flags(struct hist_field *hist_field)
2321 {
2322 	const char *flags_str = NULL;
2323 
2324 	if (hist_field->flags & HIST_FIELD_FL_HEX)
2325 		flags_str = "hex";
2326 	else if (hist_field->flags & HIST_FIELD_FL_SYM)
2327 		flags_str = "sym";
2328 	else if (hist_field->flags & HIST_FIELD_FL_SYM_OFFSET)
2329 		flags_str = "sym-offset";
2330 	else if (hist_field->flags & HIST_FIELD_FL_EXECNAME)
2331 		flags_str = "execname";
2332 	else if (hist_field->flags & HIST_FIELD_FL_SYSCALL)
2333 		flags_str = "syscall";
2334 	else if (hist_field->flags & HIST_FIELD_FL_LOG2)
2335 		flags_str = "log2";
2336 	else if (hist_field->flags & HIST_FIELD_FL_TIMESTAMP_USECS)
2337 		flags_str = "usecs";
2338 
2339 	return flags_str;
2340 }
2341 
2342 static void expr_field_str(struct hist_field *field, char *expr)
2343 {
2344 	if (field->flags & HIST_FIELD_FL_VAR_REF)
2345 		strcat(expr, "$");
2346 
2347 	strcat(expr, hist_field_name(field, 0));
2348 
2349 	if (field->flags && !(field->flags & HIST_FIELD_FL_VAR_REF)) {
2350 		const char *flags_str = get_hist_field_flags(field);
2351 
2352 		if (flags_str) {
2353 			strcat(expr, ".");
2354 			strcat(expr, flags_str);
2355 		}
2356 	}
2357 }
2358 
2359 static char *expr_str(struct hist_field *field, unsigned int level)
2360 {
2361 	char *expr;
2362 
2363 	if (level > 1)
2364 		return NULL;
2365 
2366 	expr = kzalloc(MAX_FILTER_STR_VAL, GFP_KERNEL);
2367 	if (!expr)
2368 		return NULL;
2369 
2370 	if (!field->operands[0]) {
2371 		expr_field_str(field, expr);
2372 		return expr;
2373 	}
2374 
2375 	if (field->operator == FIELD_OP_UNARY_MINUS) {
2376 		char *subexpr;
2377 
2378 		strcat(expr, "-(");
2379 		subexpr = expr_str(field->operands[0], ++level);
2380 		if (!subexpr) {
2381 			kfree(expr);
2382 			return NULL;
2383 		}
2384 		strcat(expr, subexpr);
2385 		strcat(expr, ")");
2386 
2387 		kfree(subexpr);
2388 
2389 		return expr;
2390 	}
2391 
2392 	expr_field_str(field->operands[0], expr);
2393 
2394 	switch (field->operator) {
2395 	case FIELD_OP_MINUS:
2396 		strcat(expr, "-");
2397 		break;
2398 	case FIELD_OP_PLUS:
2399 		strcat(expr, "+");
2400 		break;
2401 	default:
2402 		kfree(expr);
2403 		return NULL;
2404 	}
2405 
2406 	expr_field_str(field->operands[1], expr);
2407 
2408 	return expr;
2409 }
2410 
2411 static int contains_operator(char *str)
2412 {
2413 	enum field_op_id field_op = FIELD_OP_NONE;
2414 	char *op;
2415 
2416 	op = strpbrk(str, "+-");
2417 	if (!op)
2418 		return FIELD_OP_NONE;
2419 
2420 	switch (*op) {
2421 	case '-':
2422 		if (*str == '-')
2423 			field_op = FIELD_OP_UNARY_MINUS;
2424 		else
2425 			field_op = FIELD_OP_MINUS;
2426 		break;
2427 	case '+':
2428 		field_op = FIELD_OP_PLUS;
2429 		break;
2430 	default:
2431 		break;
2432 	}
2433 
2434 	return field_op;
2435 }
2436 
2437 static void get_hist_field(struct hist_field *hist_field)
2438 {
2439 	hist_field->ref++;
2440 }
2441 
2442 static void __destroy_hist_field(struct hist_field *hist_field)
2443 {
2444 	if (--hist_field->ref > 1)
2445 		return;
2446 
2447 	kfree(hist_field->var.name);
2448 	kfree(hist_field->name);
2449 	kfree(hist_field->type);
2450 
2451 	kfree(hist_field);
2452 }
2453 
2454 static void destroy_hist_field(struct hist_field *hist_field,
2455 			       unsigned int level)
2456 {
2457 	unsigned int i;
2458 
2459 	if (level > 3)
2460 		return;
2461 
2462 	if (!hist_field)
2463 		return;
2464 
2465 	if (hist_field->flags & HIST_FIELD_FL_VAR_REF)
2466 		return; /* var refs will be destroyed separately */
2467 
2468 	for (i = 0; i < HIST_FIELD_OPERANDS_MAX; i++)
2469 		destroy_hist_field(hist_field->operands[i], level + 1);
2470 
2471 	__destroy_hist_field(hist_field);
2472 }
2473 
2474 static struct hist_field *create_hist_field(struct hist_trigger_data *hist_data,
2475 					    struct ftrace_event_field *field,
2476 					    unsigned long flags,
2477 					    char *var_name)
2478 {
2479 	struct hist_field *hist_field;
2480 
2481 	if (field && is_function_field(field))
2482 		return NULL;
2483 
2484 	hist_field = kzalloc(sizeof(struct hist_field), GFP_KERNEL);
2485 	if (!hist_field)
2486 		return NULL;
2487 
2488 	hist_field->ref = 1;
2489 
2490 	hist_field->hist_data = hist_data;
2491 
2492 	if (flags & HIST_FIELD_FL_EXPR || flags & HIST_FIELD_FL_ALIAS)
2493 		goto out; /* caller will populate */
2494 
2495 	if (flags & HIST_FIELD_FL_VAR_REF) {
2496 		hist_field->fn = hist_field_var_ref;
2497 		goto out;
2498 	}
2499 
2500 	if (flags & HIST_FIELD_FL_HITCOUNT) {
2501 		hist_field->fn = hist_field_counter;
2502 		hist_field->size = sizeof(u64);
2503 		hist_field->type = kstrdup("u64", GFP_KERNEL);
2504 		if (!hist_field->type)
2505 			goto free;
2506 		goto out;
2507 	}
2508 
2509 	if (flags & HIST_FIELD_FL_STACKTRACE) {
2510 		hist_field->fn = hist_field_none;
2511 		goto out;
2512 	}
2513 
2514 	if (flags & HIST_FIELD_FL_LOG2) {
2515 		unsigned long fl = flags & ~HIST_FIELD_FL_LOG2;
2516 		hist_field->fn = hist_field_log2;
2517 		hist_field->operands[0] = create_hist_field(hist_data, field, fl, NULL);
2518 		hist_field->size = hist_field->operands[0]->size;
2519 		hist_field->type = kstrdup(hist_field->operands[0]->type, GFP_KERNEL);
2520 		if (!hist_field->type)
2521 			goto free;
2522 		goto out;
2523 	}
2524 
2525 	if (flags & HIST_FIELD_FL_TIMESTAMP) {
2526 		hist_field->fn = hist_field_timestamp;
2527 		hist_field->size = sizeof(u64);
2528 		hist_field->type = kstrdup("u64", GFP_KERNEL);
2529 		if (!hist_field->type)
2530 			goto free;
2531 		goto out;
2532 	}
2533 
2534 	if (flags & HIST_FIELD_FL_CPU) {
2535 		hist_field->fn = hist_field_cpu;
2536 		hist_field->size = sizeof(int);
2537 		hist_field->type = kstrdup("unsigned int", GFP_KERNEL);
2538 		if (!hist_field->type)
2539 			goto free;
2540 		goto out;
2541 	}
2542 
2543 	if (WARN_ON_ONCE(!field))
2544 		goto out;
2545 
2546 	if (is_string_field(field)) {
2547 		flags |= HIST_FIELD_FL_STRING;
2548 
2549 		hist_field->size = MAX_FILTER_STR_VAL;
2550 		hist_field->type = kstrdup(field->type, GFP_KERNEL);
2551 		if (!hist_field->type)
2552 			goto free;
2553 
2554 		if (field->filter_type == FILTER_STATIC_STRING)
2555 			hist_field->fn = hist_field_string;
2556 		else if (field->filter_type == FILTER_DYN_STRING)
2557 			hist_field->fn = hist_field_dynstring;
2558 		else
2559 			hist_field->fn = hist_field_pstring;
2560 	} else {
2561 		hist_field->size = field->size;
2562 		hist_field->is_signed = field->is_signed;
2563 		hist_field->type = kstrdup(field->type, GFP_KERNEL);
2564 		if (!hist_field->type)
2565 			goto free;
2566 
2567 		hist_field->fn = select_value_fn(field->size,
2568 						 field->is_signed);
2569 		if (!hist_field->fn) {
2570 			destroy_hist_field(hist_field, 0);
2571 			return NULL;
2572 		}
2573 	}
2574  out:
2575 	hist_field->field = field;
2576 	hist_field->flags = flags;
2577 
2578 	if (var_name) {
2579 		hist_field->var.name = kstrdup(var_name, GFP_KERNEL);
2580 		if (!hist_field->var.name)
2581 			goto free;
2582 	}
2583 
2584 	return hist_field;
2585  free:
2586 	destroy_hist_field(hist_field, 0);
2587 	return NULL;
2588 }
2589 
2590 static void destroy_hist_fields(struct hist_trigger_data *hist_data)
2591 {
2592 	unsigned int i;
2593 
2594 	for (i = 0; i < HIST_FIELDS_MAX; i++) {
2595 		if (hist_data->fields[i]) {
2596 			destroy_hist_field(hist_data->fields[i], 0);
2597 			hist_data->fields[i] = NULL;
2598 		}
2599 	}
2600 
2601 	for (i = 0; i < hist_data->n_var_refs; i++) {
2602 		WARN_ON(!(hist_data->var_refs[i]->flags & HIST_FIELD_FL_VAR_REF));
2603 		__destroy_hist_field(hist_data->var_refs[i]);
2604 		hist_data->var_refs[i] = NULL;
2605 	}
2606 }
2607 
2608 static int init_var_ref(struct hist_field *ref_field,
2609 			struct hist_field *var_field,
2610 			char *system, char *event_name)
2611 {
2612 	int err = 0;
2613 
2614 	ref_field->var.idx = var_field->var.idx;
2615 	ref_field->var.hist_data = var_field->hist_data;
2616 	ref_field->size = var_field->size;
2617 	ref_field->is_signed = var_field->is_signed;
2618 	ref_field->flags |= var_field->flags &
2619 		(HIST_FIELD_FL_TIMESTAMP | HIST_FIELD_FL_TIMESTAMP_USECS);
2620 
2621 	if (system) {
2622 		ref_field->system = kstrdup(system, GFP_KERNEL);
2623 		if (!ref_field->system)
2624 			return -ENOMEM;
2625 	}
2626 
2627 	if (event_name) {
2628 		ref_field->event_name = kstrdup(event_name, GFP_KERNEL);
2629 		if (!ref_field->event_name) {
2630 			err = -ENOMEM;
2631 			goto free;
2632 		}
2633 	}
2634 
2635 	if (var_field->var.name) {
2636 		ref_field->name = kstrdup(var_field->var.name, GFP_KERNEL);
2637 		if (!ref_field->name) {
2638 			err = -ENOMEM;
2639 			goto free;
2640 		}
2641 	} else if (var_field->name) {
2642 		ref_field->name = kstrdup(var_field->name, GFP_KERNEL);
2643 		if (!ref_field->name) {
2644 			err = -ENOMEM;
2645 			goto free;
2646 		}
2647 	}
2648 
2649 	ref_field->type = kstrdup(var_field->type, GFP_KERNEL);
2650 	if (!ref_field->type) {
2651 		err = -ENOMEM;
2652 		goto free;
2653 	}
2654  out:
2655 	return err;
2656  free:
2657 	kfree(ref_field->system);
2658 	kfree(ref_field->event_name);
2659 	kfree(ref_field->name);
2660 
2661 	goto out;
2662 }
2663 
2664 /**
2665  * create_var_ref - Create a variable reference and attach it to trigger
2666  * @hist_data: The trigger that will be referencing the variable
2667  * @var_field: The VAR field to create a reference to
2668  * @system: The optional system string
2669  * @event_name: The optional event_name string
2670  *
2671  * Given a variable hist_field, create a VAR_REF hist_field that
2672  * represents a reference to it.
2673  *
2674  * This function also adds the reference to the trigger that
2675  * now references the variable.
2676  *
2677  * Return: The VAR_REF field if successful, NULL if not
2678  */
2679 static struct hist_field *create_var_ref(struct hist_trigger_data *hist_data,
2680 					 struct hist_field *var_field,
2681 					 char *system, char *event_name)
2682 {
2683 	unsigned long flags = HIST_FIELD_FL_VAR_REF;
2684 	struct hist_field *ref_field;
2685 	int i;
2686 
2687 	/* Check if the variable already exists */
2688 	for (i = 0; i < hist_data->n_var_refs; i++) {
2689 		ref_field = hist_data->var_refs[i];
2690 		if (ref_field->var.idx == var_field->var.idx &&
2691 		    ref_field->var.hist_data == var_field->hist_data) {
2692 			get_hist_field(ref_field);
2693 			return ref_field;
2694 		}
2695 	}
2696 
2697 	ref_field = create_hist_field(var_field->hist_data, NULL, flags, NULL);
2698 	if (ref_field) {
2699 		if (init_var_ref(ref_field, var_field, system, event_name)) {
2700 			destroy_hist_field(ref_field, 0);
2701 			return NULL;
2702 		}
2703 
2704 		hist_data->var_refs[hist_data->n_var_refs] = ref_field;
2705 		ref_field->var_ref_idx = hist_data->n_var_refs++;
2706 	}
2707 
2708 	return ref_field;
2709 }
2710 
2711 static bool is_var_ref(char *var_name)
2712 {
2713 	if (!var_name || strlen(var_name) < 2 || var_name[0] != '$')
2714 		return false;
2715 
2716 	return true;
2717 }
2718 
2719 static char *field_name_from_var(struct hist_trigger_data *hist_data,
2720 				 char *var_name)
2721 {
2722 	char *name, *field;
2723 	unsigned int i;
2724 
2725 	for (i = 0; i < hist_data->attrs->var_defs.n_vars; i++) {
2726 		name = hist_data->attrs->var_defs.name[i];
2727 
2728 		if (strcmp(var_name, name) == 0) {
2729 			field = hist_data->attrs->var_defs.expr[i];
2730 			if (contains_operator(field) || is_var_ref(field))
2731 				continue;
2732 			return field;
2733 		}
2734 	}
2735 
2736 	return NULL;
2737 }
2738 
2739 static char *local_field_var_ref(struct hist_trigger_data *hist_data,
2740 				 char *system, char *event_name,
2741 				 char *var_name)
2742 {
2743 	struct trace_event_call *call;
2744 
2745 	if (system && event_name) {
2746 		call = hist_data->event_file->event_call;
2747 
2748 		if (strcmp(system, call->class->system) != 0)
2749 			return NULL;
2750 
2751 		if (strcmp(event_name, trace_event_name(call)) != 0)
2752 			return NULL;
2753 	}
2754 
2755 	if (!!system != !!event_name)
2756 		return NULL;
2757 
2758 	if (!is_var_ref(var_name))
2759 		return NULL;
2760 
2761 	var_name++;
2762 
2763 	return field_name_from_var(hist_data, var_name);
2764 }
2765 
2766 static struct hist_field *parse_var_ref(struct hist_trigger_data *hist_data,
2767 					char *system, char *event_name,
2768 					char *var_name)
2769 {
2770 	struct hist_field *var_field = NULL, *ref_field = NULL;
2771 	struct trace_array *tr = hist_data->event_file->tr;
2772 
2773 	if (!is_var_ref(var_name))
2774 		return NULL;
2775 
2776 	var_name++;
2777 
2778 	var_field = find_event_var(hist_data, system, event_name, var_name);
2779 	if (var_field)
2780 		ref_field = create_var_ref(hist_data, var_field,
2781 					   system, event_name);
2782 
2783 	if (!ref_field)
2784 		hist_err(tr, HIST_ERR_VAR_NOT_FOUND, errpos(var_name));
2785 
2786 	return ref_field;
2787 }
2788 
2789 static struct ftrace_event_field *
2790 parse_field(struct hist_trigger_data *hist_data, struct trace_event_file *file,
2791 	    char *field_str, unsigned long *flags)
2792 {
2793 	struct ftrace_event_field *field = NULL;
2794 	char *field_name, *modifier, *str;
2795 	struct trace_array *tr = file->tr;
2796 
2797 	modifier = str = kstrdup(field_str, GFP_KERNEL);
2798 	if (!modifier)
2799 		return ERR_PTR(-ENOMEM);
2800 
2801 	field_name = strsep(&modifier, ".");
2802 	if (modifier) {
2803 		if (strcmp(modifier, "hex") == 0)
2804 			*flags |= HIST_FIELD_FL_HEX;
2805 		else if (strcmp(modifier, "sym") == 0)
2806 			*flags |= HIST_FIELD_FL_SYM;
2807 		else if (strcmp(modifier, "sym-offset") == 0)
2808 			*flags |= HIST_FIELD_FL_SYM_OFFSET;
2809 		else if ((strcmp(modifier, "execname") == 0) &&
2810 			 (strcmp(field_name, "common_pid") == 0))
2811 			*flags |= HIST_FIELD_FL_EXECNAME;
2812 		else if (strcmp(modifier, "syscall") == 0)
2813 			*flags |= HIST_FIELD_FL_SYSCALL;
2814 		else if (strcmp(modifier, "log2") == 0)
2815 			*flags |= HIST_FIELD_FL_LOG2;
2816 		else if (strcmp(modifier, "usecs") == 0)
2817 			*flags |= HIST_FIELD_FL_TIMESTAMP_USECS;
2818 		else {
2819 			hist_err(tr, HIST_ERR_BAD_FIELD_MODIFIER, errpos(modifier));
2820 			field = ERR_PTR(-EINVAL);
2821 			goto out;
2822 		}
2823 	}
2824 
2825 	if (strcmp(field_name, "common_timestamp") == 0) {
2826 		*flags |= HIST_FIELD_FL_TIMESTAMP;
2827 		hist_data->enable_timestamps = true;
2828 		if (*flags & HIST_FIELD_FL_TIMESTAMP_USECS)
2829 			hist_data->attrs->ts_in_usecs = true;
2830 	} else if (strcmp(field_name, "cpu") == 0)
2831 		*flags |= HIST_FIELD_FL_CPU;
2832 	else {
2833 		field = trace_find_event_field(file->event_call, field_name);
2834 		if (!field || !field->size) {
2835 			hist_err(tr, HIST_ERR_FIELD_NOT_FOUND, errpos(field_name));
2836 			field = ERR_PTR(-EINVAL);
2837 			goto out;
2838 		}
2839 	}
2840  out:
2841 	kfree(str);
2842 
2843 	return field;
2844 }
2845 
2846 static struct hist_field *create_alias(struct hist_trigger_data *hist_data,
2847 				       struct hist_field *var_ref,
2848 				       char *var_name)
2849 {
2850 	struct hist_field *alias = NULL;
2851 	unsigned long flags = HIST_FIELD_FL_ALIAS | HIST_FIELD_FL_VAR;
2852 
2853 	alias = create_hist_field(hist_data, NULL, flags, var_name);
2854 	if (!alias)
2855 		return NULL;
2856 
2857 	alias->fn = var_ref->fn;
2858 	alias->operands[0] = var_ref;
2859 
2860 	if (init_var_ref(alias, var_ref, var_ref->system, var_ref->event_name)) {
2861 		destroy_hist_field(alias, 0);
2862 		return NULL;
2863 	}
2864 
2865 	alias->var_ref_idx = var_ref->var_ref_idx;
2866 
2867 	return alias;
2868 }
2869 
2870 static struct hist_field *parse_atom(struct hist_trigger_data *hist_data,
2871 				     struct trace_event_file *file, char *str,
2872 				     unsigned long *flags, char *var_name)
2873 {
2874 	char *s, *ref_system = NULL, *ref_event = NULL, *ref_var = str;
2875 	struct ftrace_event_field *field = NULL;
2876 	struct hist_field *hist_field = NULL;
2877 	int ret = 0;
2878 
2879 	s = strchr(str, '.');
2880 	if (s) {
2881 		s = strchr(++s, '.');
2882 		if (s) {
2883 			ref_system = strsep(&str, ".");
2884 			if (!str) {
2885 				ret = -EINVAL;
2886 				goto out;
2887 			}
2888 			ref_event = strsep(&str, ".");
2889 			if (!str) {
2890 				ret = -EINVAL;
2891 				goto out;
2892 			}
2893 			ref_var = str;
2894 		}
2895 	}
2896 
2897 	s = local_field_var_ref(hist_data, ref_system, ref_event, ref_var);
2898 	if (!s) {
2899 		hist_field = parse_var_ref(hist_data, ref_system,
2900 					   ref_event, ref_var);
2901 		if (hist_field) {
2902 			if (var_name) {
2903 				hist_field = create_alias(hist_data, hist_field, var_name);
2904 				if (!hist_field) {
2905 					ret = -ENOMEM;
2906 					goto out;
2907 				}
2908 			}
2909 			return hist_field;
2910 		}
2911 	} else
2912 		str = s;
2913 
2914 	field = parse_field(hist_data, file, str, flags);
2915 	if (IS_ERR(field)) {
2916 		ret = PTR_ERR(field);
2917 		goto out;
2918 	}
2919 
2920 	hist_field = create_hist_field(hist_data, field, *flags, var_name);
2921 	if (!hist_field) {
2922 		ret = -ENOMEM;
2923 		goto out;
2924 	}
2925 
2926 	return hist_field;
2927  out:
2928 	return ERR_PTR(ret);
2929 }
2930 
2931 static struct hist_field *parse_expr(struct hist_trigger_data *hist_data,
2932 				     struct trace_event_file *file,
2933 				     char *str, unsigned long flags,
2934 				     char *var_name, unsigned int level);
2935 
2936 static struct hist_field *parse_unary(struct hist_trigger_data *hist_data,
2937 				      struct trace_event_file *file,
2938 				      char *str, unsigned long flags,
2939 				      char *var_name, unsigned int level)
2940 {
2941 	struct hist_field *operand1, *expr = NULL;
2942 	unsigned long operand_flags;
2943 	int ret = 0;
2944 	char *s;
2945 
2946 	/* we support only -(xxx) i.e. explicit parens required */
2947 
2948 	if (level > 3) {
2949 		hist_err(file->tr, HIST_ERR_TOO_MANY_SUBEXPR, errpos(str));
2950 		ret = -EINVAL;
2951 		goto free;
2952 	}
2953 
2954 	str++; /* skip leading '-' */
2955 
2956 	s = strchr(str, '(');
2957 	if (s)
2958 		str++;
2959 	else {
2960 		ret = -EINVAL;
2961 		goto free;
2962 	}
2963 
2964 	s = strrchr(str, ')');
2965 	if (s)
2966 		*s = '\0';
2967 	else {
2968 		ret = -EINVAL; /* no closing ')' */
2969 		goto free;
2970 	}
2971 
2972 	flags |= HIST_FIELD_FL_EXPR;
2973 	expr = create_hist_field(hist_data, NULL, flags, var_name);
2974 	if (!expr) {
2975 		ret = -ENOMEM;
2976 		goto free;
2977 	}
2978 
2979 	operand_flags = 0;
2980 	operand1 = parse_expr(hist_data, file, str, operand_flags, NULL, ++level);
2981 	if (IS_ERR(operand1)) {
2982 		ret = PTR_ERR(operand1);
2983 		goto free;
2984 	}
2985 
2986 	expr->flags |= operand1->flags &
2987 		(HIST_FIELD_FL_TIMESTAMP | HIST_FIELD_FL_TIMESTAMP_USECS);
2988 	expr->fn = hist_field_unary_minus;
2989 	expr->operands[0] = operand1;
2990 	expr->operator = FIELD_OP_UNARY_MINUS;
2991 	expr->name = expr_str(expr, 0);
2992 	expr->type = kstrdup(operand1->type, GFP_KERNEL);
2993 	if (!expr->type) {
2994 		ret = -ENOMEM;
2995 		goto free;
2996 	}
2997 
2998 	return expr;
2999  free:
3000 	destroy_hist_field(expr, 0);
3001 	return ERR_PTR(ret);
3002 }
3003 
3004 static int check_expr_operands(struct trace_array *tr,
3005 			       struct hist_field *operand1,
3006 			       struct hist_field *operand2)
3007 {
3008 	unsigned long operand1_flags = operand1->flags;
3009 	unsigned long operand2_flags = operand2->flags;
3010 
3011 	if ((operand1_flags & HIST_FIELD_FL_VAR_REF) ||
3012 	    (operand1_flags & HIST_FIELD_FL_ALIAS)) {
3013 		struct hist_field *var;
3014 
3015 		var = find_var_field(operand1->var.hist_data, operand1->name);
3016 		if (!var)
3017 			return -EINVAL;
3018 		operand1_flags = var->flags;
3019 	}
3020 
3021 	if ((operand2_flags & HIST_FIELD_FL_VAR_REF) ||
3022 	    (operand2_flags & HIST_FIELD_FL_ALIAS)) {
3023 		struct hist_field *var;
3024 
3025 		var = find_var_field(operand2->var.hist_data, operand2->name);
3026 		if (!var)
3027 			return -EINVAL;
3028 		operand2_flags = var->flags;
3029 	}
3030 
3031 	if ((operand1_flags & HIST_FIELD_FL_TIMESTAMP_USECS) !=
3032 	    (operand2_flags & HIST_FIELD_FL_TIMESTAMP_USECS)) {
3033 		hist_err(tr, HIST_ERR_TIMESTAMP_MISMATCH, 0);
3034 		return -EINVAL;
3035 	}
3036 
3037 	return 0;
3038 }
3039 
3040 static struct hist_field *parse_expr(struct hist_trigger_data *hist_data,
3041 				     struct trace_event_file *file,
3042 				     char *str, unsigned long flags,
3043 				     char *var_name, unsigned int level)
3044 {
3045 	struct hist_field *operand1 = NULL, *operand2 = NULL, *expr = NULL;
3046 	unsigned long operand_flags;
3047 	int field_op, ret = -EINVAL;
3048 	char *sep, *operand1_str;
3049 
3050 	if (level > 3) {
3051 		hist_err(file->tr, HIST_ERR_TOO_MANY_SUBEXPR, errpos(str));
3052 		return ERR_PTR(-EINVAL);
3053 	}
3054 
3055 	field_op = contains_operator(str);
3056 
3057 	if (field_op == FIELD_OP_NONE)
3058 		return parse_atom(hist_data, file, str, &flags, var_name);
3059 
3060 	if (field_op == FIELD_OP_UNARY_MINUS)
3061 		return parse_unary(hist_data, file, str, flags, var_name, ++level);
3062 
3063 	switch (field_op) {
3064 	case FIELD_OP_MINUS:
3065 		sep = "-";
3066 		break;
3067 	case FIELD_OP_PLUS:
3068 		sep = "+";
3069 		break;
3070 	default:
3071 		goto free;
3072 	}
3073 
3074 	operand1_str = strsep(&str, sep);
3075 	if (!operand1_str || !str)
3076 		goto free;
3077 
3078 	operand_flags = 0;
3079 	operand1 = parse_atom(hist_data, file, operand1_str,
3080 			      &operand_flags, NULL);
3081 	if (IS_ERR(operand1)) {
3082 		ret = PTR_ERR(operand1);
3083 		operand1 = NULL;
3084 		goto free;
3085 	}
3086 
3087 	/* rest of string could be another expression e.g. b+c in a+b+c */
3088 	operand_flags = 0;
3089 	operand2 = parse_expr(hist_data, file, str, operand_flags, NULL, ++level);
3090 	if (IS_ERR(operand2)) {
3091 		ret = PTR_ERR(operand2);
3092 		operand2 = NULL;
3093 		goto free;
3094 	}
3095 
3096 	ret = check_expr_operands(file->tr, operand1, operand2);
3097 	if (ret)
3098 		goto free;
3099 
3100 	flags |= HIST_FIELD_FL_EXPR;
3101 
3102 	flags |= operand1->flags &
3103 		(HIST_FIELD_FL_TIMESTAMP | HIST_FIELD_FL_TIMESTAMP_USECS);
3104 
3105 	expr = create_hist_field(hist_data, NULL, flags, var_name);
3106 	if (!expr) {
3107 		ret = -ENOMEM;
3108 		goto free;
3109 	}
3110 
3111 	operand1->read_once = true;
3112 	operand2->read_once = true;
3113 
3114 	expr->operands[0] = operand1;
3115 	expr->operands[1] = operand2;
3116 	expr->operator = field_op;
3117 	expr->name = expr_str(expr, 0);
3118 	expr->type = kstrdup(operand1->type, GFP_KERNEL);
3119 	if (!expr->type) {
3120 		ret = -ENOMEM;
3121 		goto free;
3122 	}
3123 
3124 	switch (field_op) {
3125 	case FIELD_OP_MINUS:
3126 		expr->fn = hist_field_minus;
3127 		break;
3128 	case FIELD_OP_PLUS:
3129 		expr->fn = hist_field_plus;
3130 		break;
3131 	default:
3132 		ret = -EINVAL;
3133 		goto free;
3134 	}
3135 
3136 	return expr;
3137  free:
3138 	destroy_hist_field(operand1, 0);
3139 	destroy_hist_field(operand2, 0);
3140 	destroy_hist_field(expr, 0);
3141 
3142 	return ERR_PTR(ret);
3143 }
3144 
3145 static char *find_trigger_filter(struct hist_trigger_data *hist_data,
3146 				 struct trace_event_file *file)
3147 {
3148 	struct event_trigger_data *test;
3149 
3150 	lockdep_assert_held(&event_mutex);
3151 
3152 	list_for_each_entry(test, &file->triggers, list) {
3153 		if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
3154 			if (test->private_data == hist_data)
3155 				return test->filter_str;
3156 		}
3157 	}
3158 
3159 	return NULL;
3160 }
3161 
3162 static struct event_command trigger_hist_cmd;
3163 static int event_hist_trigger_func(struct event_command *cmd_ops,
3164 				   struct trace_event_file *file,
3165 				   char *glob, char *cmd, char *param);
3166 
3167 static bool compatible_keys(struct hist_trigger_data *target_hist_data,
3168 			    struct hist_trigger_data *hist_data,
3169 			    unsigned int n_keys)
3170 {
3171 	struct hist_field *target_hist_field, *hist_field;
3172 	unsigned int n, i, j;
3173 
3174 	if (hist_data->n_fields - hist_data->n_vals != n_keys)
3175 		return false;
3176 
3177 	i = hist_data->n_vals;
3178 	j = target_hist_data->n_vals;
3179 
3180 	for (n = 0; n < n_keys; n++) {
3181 		hist_field = hist_data->fields[i + n];
3182 		target_hist_field = target_hist_data->fields[j + n];
3183 
3184 		if (strcmp(hist_field->type, target_hist_field->type) != 0)
3185 			return false;
3186 		if (hist_field->size != target_hist_field->size)
3187 			return false;
3188 		if (hist_field->is_signed != target_hist_field->is_signed)
3189 			return false;
3190 	}
3191 
3192 	return true;
3193 }
3194 
3195 static struct hist_trigger_data *
3196 find_compatible_hist(struct hist_trigger_data *target_hist_data,
3197 		     struct trace_event_file *file)
3198 {
3199 	struct hist_trigger_data *hist_data;
3200 	struct event_trigger_data *test;
3201 	unsigned int n_keys;
3202 
3203 	lockdep_assert_held(&event_mutex);
3204 
3205 	n_keys = target_hist_data->n_fields - target_hist_data->n_vals;
3206 
3207 	list_for_each_entry(test, &file->triggers, list) {
3208 		if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
3209 			hist_data = test->private_data;
3210 
3211 			if (compatible_keys(target_hist_data, hist_data, n_keys))
3212 				return hist_data;
3213 		}
3214 	}
3215 
3216 	return NULL;
3217 }
3218 
3219 static struct trace_event_file *event_file(struct trace_array *tr,
3220 					   char *system, char *event_name)
3221 {
3222 	struct trace_event_file *file;
3223 
3224 	file = __find_event_file(tr, system, event_name);
3225 	if (!file)
3226 		return ERR_PTR(-EINVAL);
3227 
3228 	return file;
3229 }
3230 
3231 static struct hist_field *
3232 find_synthetic_field_var(struct hist_trigger_data *target_hist_data,
3233 			 char *system, char *event_name, char *field_name)
3234 {
3235 	struct hist_field *event_var;
3236 	char *synthetic_name;
3237 
3238 	synthetic_name = kzalloc(MAX_FILTER_STR_VAL, GFP_KERNEL);
3239 	if (!synthetic_name)
3240 		return ERR_PTR(-ENOMEM);
3241 
3242 	strcpy(synthetic_name, "synthetic_");
3243 	strcat(synthetic_name, field_name);
3244 
3245 	event_var = find_event_var(target_hist_data, system, event_name, synthetic_name);
3246 
3247 	kfree(synthetic_name);
3248 
3249 	return event_var;
3250 }
3251 
3252 /**
3253  * create_field_var_hist - Automatically create a histogram and var for a field
3254  * @target_hist_data: The target hist trigger
3255  * @subsys_name: Optional subsystem name
3256  * @event_name: Optional event name
3257  * @field_name: The name of the field (and the resulting variable)
3258  *
3259  * Hist trigger actions fetch data from variables, not directly from
3260  * events.  However, for convenience, users are allowed to directly
3261  * specify an event field in an action, which will be automatically
3262  * converted into a variable on their behalf.
3263 
3264  * If a user specifies a field on an event that isn't the event the
3265  * histogram currently being defined (the target event histogram), the
3266  * only way that can be accomplished is if a new hist trigger is
3267  * created and the field variable defined on that.
3268  *
3269  * This function creates a new histogram compatible with the target
3270  * event (meaning a histogram with the same key as the target
3271  * histogram), and creates a variable for the specified field, but
3272  * with 'synthetic_' prepended to the variable name in order to avoid
3273  * collision with normal field variables.
3274  *
3275  * Return: The variable created for the field.
3276  */
3277 static struct hist_field *
3278 create_field_var_hist(struct hist_trigger_data *target_hist_data,
3279 		      char *subsys_name, char *event_name, char *field_name)
3280 {
3281 	struct trace_array *tr = target_hist_data->event_file->tr;
3282 	struct hist_field *event_var = ERR_PTR(-EINVAL);
3283 	struct hist_trigger_data *hist_data;
3284 	unsigned int i, n, first = true;
3285 	struct field_var_hist *var_hist;
3286 	struct trace_event_file *file;
3287 	struct hist_field *key_field;
3288 	char *saved_filter;
3289 	char *cmd;
3290 	int ret;
3291 
3292 	if (target_hist_data->n_field_var_hists >= SYNTH_FIELDS_MAX) {
3293 		hist_err(tr, HIST_ERR_TOO_MANY_FIELD_VARS, errpos(field_name));
3294 		return ERR_PTR(-EINVAL);
3295 	}
3296 
3297 	file = event_file(tr, subsys_name, event_name);
3298 
3299 	if (IS_ERR(file)) {
3300 		hist_err(tr, HIST_ERR_EVENT_FILE_NOT_FOUND, errpos(field_name));
3301 		ret = PTR_ERR(file);
3302 		return ERR_PTR(ret);
3303 	}
3304 
3305 	/*
3306 	 * Look for a histogram compatible with target.  We'll use the
3307 	 * found histogram specification to create a new matching
3308 	 * histogram with our variable on it.  target_hist_data is not
3309 	 * yet a registered histogram so we can't use that.
3310 	 */
3311 	hist_data = find_compatible_hist(target_hist_data, file);
3312 	if (!hist_data) {
3313 		hist_err(tr, HIST_ERR_HIST_NOT_FOUND, errpos(field_name));
3314 		return ERR_PTR(-EINVAL);
3315 	}
3316 
3317 	/* See if a synthetic field variable has already been created */
3318 	event_var = find_synthetic_field_var(target_hist_data, subsys_name,
3319 					     event_name, field_name);
3320 	if (!IS_ERR_OR_NULL(event_var))
3321 		return event_var;
3322 
3323 	var_hist = kzalloc(sizeof(*var_hist), GFP_KERNEL);
3324 	if (!var_hist)
3325 		return ERR_PTR(-ENOMEM);
3326 
3327 	cmd = kzalloc(MAX_FILTER_STR_VAL, GFP_KERNEL);
3328 	if (!cmd) {
3329 		kfree(var_hist);
3330 		return ERR_PTR(-ENOMEM);
3331 	}
3332 
3333 	/* Use the same keys as the compatible histogram */
3334 	strcat(cmd, "keys=");
3335 
3336 	for_each_hist_key_field(i, hist_data) {
3337 		key_field = hist_data->fields[i];
3338 		if (!first)
3339 			strcat(cmd, ",");
3340 		strcat(cmd, key_field->field->name);
3341 		first = false;
3342 	}
3343 
3344 	/* Create the synthetic field variable specification */
3345 	strcat(cmd, ":synthetic_");
3346 	strcat(cmd, field_name);
3347 	strcat(cmd, "=");
3348 	strcat(cmd, field_name);
3349 
3350 	/* Use the same filter as the compatible histogram */
3351 	saved_filter = find_trigger_filter(hist_data, file);
3352 	if (saved_filter) {
3353 		strcat(cmd, " if ");
3354 		strcat(cmd, saved_filter);
3355 	}
3356 
3357 	var_hist->cmd = kstrdup(cmd, GFP_KERNEL);
3358 	if (!var_hist->cmd) {
3359 		kfree(cmd);
3360 		kfree(var_hist);
3361 		return ERR_PTR(-ENOMEM);
3362 	}
3363 
3364 	/* Save the compatible histogram information */
3365 	var_hist->hist_data = hist_data;
3366 
3367 	/* Create the new histogram with our variable */
3368 	ret = event_hist_trigger_func(&trigger_hist_cmd, file,
3369 				      "", "hist", cmd);
3370 	if (ret) {
3371 		kfree(cmd);
3372 		kfree(var_hist->cmd);
3373 		kfree(var_hist);
3374 		hist_err(tr, HIST_ERR_HIST_CREATE_FAIL, errpos(field_name));
3375 		return ERR_PTR(ret);
3376 	}
3377 
3378 	kfree(cmd);
3379 
3380 	/* If we can't find the variable, something went wrong */
3381 	event_var = find_synthetic_field_var(target_hist_data, subsys_name,
3382 					     event_name, field_name);
3383 	if (IS_ERR_OR_NULL(event_var)) {
3384 		kfree(var_hist->cmd);
3385 		kfree(var_hist);
3386 		hist_err(tr, HIST_ERR_SYNTH_VAR_NOT_FOUND, errpos(field_name));
3387 		return ERR_PTR(-EINVAL);
3388 	}
3389 
3390 	n = target_hist_data->n_field_var_hists;
3391 	target_hist_data->field_var_hists[n] = var_hist;
3392 	target_hist_data->n_field_var_hists++;
3393 
3394 	return event_var;
3395 }
3396 
3397 static struct hist_field *
3398 find_target_event_var(struct hist_trigger_data *hist_data,
3399 		      char *subsys_name, char *event_name, char *var_name)
3400 {
3401 	struct trace_event_file *file = hist_data->event_file;
3402 	struct hist_field *hist_field = NULL;
3403 
3404 	if (subsys_name) {
3405 		struct trace_event_call *call;
3406 
3407 		if (!event_name)
3408 			return NULL;
3409 
3410 		call = file->event_call;
3411 
3412 		if (strcmp(subsys_name, call->class->system) != 0)
3413 			return NULL;
3414 
3415 		if (strcmp(event_name, trace_event_name(call)) != 0)
3416 			return NULL;
3417 	}
3418 
3419 	hist_field = find_var_field(hist_data, var_name);
3420 
3421 	return hist_field;
3422 }
3423 
3424 static inline void __update_field_vars(struct tracing_map_elt *elt,
3425 				       struct ring_buffer_event *rbe,
3426 				       void *rec,
3427 				       struct field_var **field_vars,
3428 				       unsigned int n_field_vars,
3429 				       unsigned int field_var_str_start)
3430 {
3431 	struct hist_elt_data *elt_data = elt->private_data;
3432 	unsigned int i, j, var_idx;
3433 	u64 var_val;
3434 
3435 	for (i = 0, j = field_var_str_start; i < n_field_vars; i++) {
3436 		struct field_var *field_var = field_vars[i];
3437 		struct hist_field *var = field_var->var;
3438 		struct hist_field *val = field_var->val;
3439 
3440 		var_val = val->fn(val, elt, rbe, rec);
3441 		var_idx = var->var.idx;
3442 
3443 		if (val->flags & HIST_FIELD_FL_STRING) {
3444 			char *str = elt_data->field_var_str[j++];
3445 			char *val_str = (char *)(uintptr_t)var_val;
3446 
3447 			strscpy(str, val_str, STR_VAR_LEN_MAX);
3448 			var_val = (u64)(uintptr_t)str;
3449 		}
3450 		tracing_map_set_var(elt, var_idx, var_val);
3451 	}
3452 }
3453 
3454 static void update_field_vars(struct hist_trigger_data *hist_data,
3455 			      struct tracing_map_elt *elt,
3456 			      struct ring_buffer_event *rbe,
3457 			      void *rec)
3458 {
3459 	__update_field_vars(elt, rbe, rec, hist_data->field_vars,
3460 			    hist_data->n_field_vars, 0);
3461 }
3462 
3463 static void save_track_data_vars(struct hist_trigger_data *hist_data,
3464 				 struct tracing_map_elt *elt, void *rec,
3465 				 struct ring_buffer_event *rbe, void *key,
3466 				 struct action_data *data, u64 *var_ref_vals)
3467 {
3468 	__update_field_vars(elt, rbe, rec, hist_data->save_vars,
3469 			    hist_data->n_save_vars, hist_data->n_field_var_str);
3470 }
3471 
3472 static struct hist_field *create_var(struct hist_trigger_data *hist_data,
3473 				     struct trace_event_file *file,
3474 				     char *name, int size, const char *type)
3475 {
3476 	struct hist_field *var;
3477 	int idx;
3478 
3479 	if (find_var(hist_data, file, name) && !hist_data->remove) {
3480 		var = ERR_PTR(-EINVAL);
3481 		goto out;
3482 	}
3483 
3484 	var = kzalloc(sizeof(struct hist_field), GFP_KERNEL);
3485 	if (!var) {
3486 		var = ERR_PTR(-ENOMEM);
3487 		goto out;
3488 	}
3489 
3490 	idx = tracing_map_add_var(hist_data->map);
3491 	if (idx < 0) {
3492 		kfree(var);
3493 		var = ERR_PTR(-EINVAL);
3494 		goto out;
3495 	}
3496 
3497 	var->flags = HIST_FIELD_FL_VAR;
3498 	var->var.idx = idx;
3499 	var->var.hist_data = var->hist_data = hist_data;
3500 	var->size = size;
3501 	var->var.name = kstrdup(name, GFP_KERNEL);
3502 	var->type = kstrdup(type, GFP_KERNEL);
3503 	if (!var->var.name || !var->type) {
3504 		kfree(var->var.name);
3505 		kfree(var->type);
3506 		kfree(var);
3507 		var = ERR_PTR(-ENOMEM);
3508 	}
3509  out:
3510 	return var;
3511 }
3512 
3513 static struct field_var *create_field_var(struct hist_trigger_data *hist_data,
3514 					  struct trace_event_file *file,
3515 					  char *field_name)
3516 {
3517 	struct hist_field *val = NULL, *var = NULL;
3518 	unsigned long flags = HIST_FIELD_FL_VAR;
3519 	struct trace_array *tr = file->tr;
3520 	struct field_var *field_var;
3521 	int ret = 0;
3522 
3523 	if (hist_data->n_field_vars >= SYNTH_FIELDS_MAX) {
3524 		hist_err(tr, HIST_ERR_TOO_MANY_FIELD_VARS, errpos(field_name));
3525 		ret = -EINVAL;
3526 		goto err;
3527 	}
3528 
3529 	val = parse_atom(hist_data, file, field_name, &flags, NULL);
3530 	if (IS_ERR(val)) {
3531 		hist_err(tr, HIST_ERR_FIELD_VAR_PARSE_FAIL, errpos(field_name));
3532 		ret = PTR_ERR(val);
3533 		goto err;
3534 	}
3535 
3536 	var = create_var(hist_data, file, field_name, val->size, val->type);
3537 	if (IS_ERR(var)) {
3538 		hist_err(tr, HIST_ERR_VAR_CREATE_FIND_FAIL, errpos(field_name));
3539 		kfree(val);
3540 		ret = PTR_ERR(var);
3541 		goto err;
3542 	}
3543 
3544 	field_var = kzalloc(sizeof(struct field_var), GFP_KERNEL);
3545 	if (!field_var) {
3546 		kfree(val);
3547 		kfree(var);
3548 		ret =  -ENOMEM;
3549 		goto err;
3550 	}
3551 
3552 	field_var->var = var;
3553 	field_var->val = val;
3554  out:
3555 	return field_var;
3556  err:
3557 	field_var = ERR_PTR(ret);
3558 	goto out;
3559 }
3560 
3561 /**
3562  * create_target_field_var - Automatically create a variable for a field
3563  * @target_hist_data: The target hist trigger
3564  * @subsys_name: Optional subsystem name
3565  * @event_name: Optional event name
3566  * @var_name: The name of the field (and the resulting variable)
3567  *
3568  * Hist trigger actions fetch data from variables, not directly from
3569  * events.  However, for convenience, users are allowed to directly
3570  * specify an event field in an action, which will be automatically
3571  * converted into a variable on their behalf.
3572 
3573  * This function creates a field variable with the name var_name on
3574  * the hist trigger currently being defined on the target event.  If
3575  * subsys_name and event_name are specified, this function simply
3576  * verifies that they do in fact match the target event subsystem and
3577  * event name.
3578  *
3579  * Return: The variable created for the field.
3580  */
3581 static struct field_var *
3582 create_target_field_var(struct hist_trigger_data *target_hist_data,
3583 			char *subsys_name, char *event_name, char *var_name)
3584 {
3585 	struct trace_event_file *file = target_hist_data->event_file;
3586 
3587 	if (subsys_name) {
3588 		struct trace_event_call *call;
3589 
3590 		if (!event_name)
3591 			return NULL;
3592 
3593 		call = file->event_call;
3594 
3595 		if (strcmp(subsys_name, call->class->system) != 0)
3596 			return NULL;
3597 
3598 		if (strcmp(event_name, trace_event_name(call)) != 0)
3599 			return NULL;
3600 	}
3601 
3602 	return create_field_var(target_hist_data, file, var_name);
3603 }
3604 
3605 static bool check_track_val_max(u64 track_val, u64 var_val)
3606 {
3607 	if (var_val <= track_val)
3608 		return false;
3609 
3610 	return true;
3611 }
3612 
3613 static bool check_track_val_changed(u64 track_val, u64 var_val)
3614 {
3615 	if (var_val == track_val)
3616 		return false;
3617 
3618 	return true;
3619 }
3620 
3621 static u64 get_track_val(struct hist_trigger_data *hist_data,
3622 			 struct tracing_map_elt *elt,
3623 			 struct action_data *data)
3624 {
3625 	unsigned int track_var_idx = data->track_data.track_var->var.idx;
3626 	u64 track_val;
3627 
3628 	track_val = tracing_map_read_var(elt, track_var_idx);
3629 
3630 	return track_val;
3631 }
3632 
3633 static void save_track_val(struct hist_trigger_data *hist_data,
3634 			   struct tracing_map_elt *elt,
3635 			   struct action_data *data, u64 var_val)
3636 {
3637 	unsigned int track_var_idx = data->track_data.track_var->var.idx;
3638 
3639 	tracing_map_set_var(elt, track_var_idx, var_val);
3640 }
3641 
3642 static void save_track_data(struct hist_trigger_data *hist_data,
3643 			    struct tracing_map_elt *elt, void *rec,
3644 			    struct ring_buffer_event *rbe, void *key,
3645 			    struct action_data *data, u64 *var_ref_vals)
3646 {
3647 	if (data->track_data.save_data)
3648 		data->track_data.save_data(hist_data, elt, rec, rbe, key, data, var_ref_vals);
3649 }
3650 
3651 static bool check_track_val(struct tracing_map_elt *elt,
3652 			    struct action_data *data,
3653 			    u64 var_val)
3654 {
3655 	struct hist_trigger_data *hist_data;
3656 	u64 track_val;
3657 
3658 	hist_data = data->track_data.track_var->hist_data;
3659 	track_val = get_track_val(hist_data, elt, data);
3660 
3661 	return data->track_data.check_val(track_val, var_val);
3662 }
3663 
3664 #ifdef CONFIG_TRACER_SNAPSHOT
3665 static bool cond_snapshot_update(struct trace_array *tr, void *cond_data)
3666 {
3667 	/* called with tr->max_lock held */
3668 	struct track_data *track_data = tr->cond_snapshot->cond_data;
3669 	struct hist_elt_data *elt_data, *track_elt_data;
3670 	struct snapshot_context *context = cond_data;
3671 	struct action_data *action;
3672 	u64 track_val;
3673 
3674 	if (!track_data)
3675 		return false;
3676 
3677 	action = track_data->action_data;
3678 
3679 	track_val = get_track_val(track_data->hist_data, context->elt,
3680 				  track_data->action_data);
3681 
3682 	if (!action->track_data.check_val(track_data->track_val, track_val))
3683 		return false;
3684 
3685 	track_data->track_val = track_val;
3686 	memcpy(track_data->key, context->key, track_data->key_len);
3687 
3688 	elt_data = context->elt->private_data;
3689 	track_elt_data = track_data->elt.private_data;
3690 	if (elt_data->comm)
3691 		strncpy(track_elt_data->comm, elt_data->comm, TASK_COMM_LEN);
3692 
3693 	track_data->updated = true;
3694 
3695 	return true;
3696 }
3697 
3698 static void save_track_data_snapshot(struct hist_trigger_data *hist_data,
3699 				     struct tracing_map_elt *elt, void *rec,
3700 				     struct ring_buffer_event *rbe, void *key,
3701 				     struct action_data *data,
3702 				     u64 *var_ref_vals)
3703 {
3704 	struct trace_event_file *file = hist_data->event_file;
3705 	struct snapshot_context context;
3706 
3707 	context.elt = elt;
3708 	context.key = key;
3709 
3710 	tracing_snapshot_cond(file->tr, &context);
3711 }
3712 
3713 static void hist_trigger_print_key(struct seq_file *m,
3714 				   struct hist_trigger_data *hist_data,
3715 				   void *key,
3716 				   struct tracing_map_elt *elt);
3717 
3718 static struct action_data *snapshot_action(struct hist_trigger_data *hist_data)
3719 {
3720 	unsigned int i;
3721 
3722 	if (!hist_data->n_actions)
3723 		return NULL;
3724 
3725 	for (i = 0; i < hist_data->n_actions; i++) {
3726 		struct action_data *data = hist_data->actions[i];
3727 
3728 		if (data->action == ACTION_SNAPSHOT)
3729 			return data;
3730 	}
3731 
3732 	return NULL;
3733 }
3734 
3735 static void track_data_snapshot_print(struct seq_file *m,
3736 				      struct hist_trigger_data *hist_data)
3737 {
3738 	struct trace_event_file *file = hist_data->event_file;
3739 	struct track_data *track_data;
3740 	struct action_data *action;
3741 
3742 	track_data = tracing_cond_snapshot_data(file->tr);
3743 	if (!track_data)
3744 		return;
3745 
3746 	if (!track_data->updated)
3747 		return;
3748 
3749 	action = snapshot_action(hist_data);
3750 	if (!action)
3751 		return;
3752 
3753 	seq_puts(m, "\nSnapshot taken (see tracing/snapshot).  Details:\n");
3754 	seq_printf(m, "\ttriggering value { %s(%s) }: %10llu",
3755 		   action->handler == HANDLER_ONMAX ? "onmax" : "onchange",
3756 		   action->track_data.var_str, track_data->track_val);
3757 
3758 	seq_puts(m, "\ttriggered by event with key: ");
3759 	hist_trigger_print_key(m, hist_data, track_data->key, &track_data->elt);
3760 	seq_putc(m, '\n');
3761 }
3762 #else
3763 static bool cond_snapshot_update(struct trace_array *tr, void *cond_data)
3764 {
3765 	return false;
3766 }
3767 static void save_track_data_snapshot(struct hist_trigger_data *hist_data,
3768 				     struct tracing_map_elt *elt, void *rec,
3769 				     struct ring_buffer_event *rbe, void *key,
3770 				     struct action_data *data,
3771 				     u64 *var_ref_vals) {}
3772 static void track_data_snapshot_print(struct seq_file *m,
3773 				      struct hist_trigger_data *hist_data) {}
3774 #endif /* CONFIG_TRACER_SNAPSHOT */
3775 
3776 static void track_data_print(struct seq_file *m,
3777 			     struct hist_trigger_data *hist_data,
3778 			     struct tracing_map_elt *elt,
3779 			     struct action_data *data)
3780 {
3781 	u64 track_val = get_track_val(hist_data, elt, data);
3782 	unsigned int i, save_var_idx;
3783 
3784 	if (data->handler == HANDLER_ONMAX)
3785 		seq_printf(m, "\n\tmax: %10llu", track_val);
3786 	else if (data->handler == HANDLER_ONCHANGE)
3787 		seq_printf(m, "\n\tchanged: %10llu", track_val);
3788 
3789 	if (data->action == ACTION_SNAPSHOT)
3790 		return;
3791 
3792 	for (i = 0; i < hist_data->n_save_vars; i++) {
3793 		struct hist_field *save_val = hist_data->save_vars[i]->val;
3794 		struct hist_field *save_var = hist_data->save_vars[i]->var;
3795 		u64 val;
3796 
3797 		save_var_idx = save_var->var.idx;
3798 
3799 		val = tracing_map_read_var(elt, save_var_idx);
3800 
3801 		if (save_val->flags & HIST_FIELD_FL_STRING) {
3802 			seq_printf(m, "  %s: %-32s", save_var->var.name,
3803 				   (char *)(uintptr_t)(val));
3804 		} else
3805 			seq_printf(m, "  %s: %10llu", save_var->var.name, val);
3806 	}
3807 }
3808 
3809 static void ontrack_action(struct hist_trigger_data *hist_data,
3810 			   struct tracing_map_elt *elt, void *rec,
3811 			   struct ring_buffer_event *rbe, void *key,
3812 			   struct action_data *data, u64 *var_ref_vals)
3813 {
3814 	u64 var_val = var_ref_vals[data->track_data.var_ref->var_ref_idx];
3815 
3816 	if (check_track_val(elt, data, var_val)) {
3817 		save_track_val(hist_data, elt, data, var_val);
3818 		save_track_data(hist_data, elt, rec, rbe, key, data, var_ref_vals);
3819 	}
3820 }
3821 
3822 static void action_data_destroy(struct action_data *data)
3823 {
3824 	unsigned int i;
3825 
3826 	lockdep_assert_held(&event_mutex);
3827 
3828 	kfree(data->action_name);
3829 
3830 	for (i = 0; i < data->n_params; i++)
3831 		kfree(data->params[i]);
3832 
3833 	if (data->synth_event)
3834 		data->synth_event->ref--;
3835 
3836 	kfree(data->synth_event_name);
3837 
3838 	kfree(data);
3839 }
3840 
3841 static void track_data_destroy(struct hist_trigger_data *hist_data,
3842 			       struct action_data *data)
3843 {
3844 	struct trace_event_file *file = hist_data->event_file;
3845 
3846 	destroy_hist_field(data->track_data.track_var, 0);
3847 
3848 	if (data->action == ACTION_SNAPSHOT) {
3849 		struct track_data *track_data;
3850 
3851 		track_data = tracing_cond_snapshot_data(file->tr);
3852 		if (track_data && track_data->hist_data == hist_data) {
3853 			tracing_snapshot_cond_disable(file->tr);
3854 			track_data_free(track_data);
3855 		}
3856 	}
3857 
3858 	kfree(data->track_data.var_str);
3859 
3860 	action_data_destroy(data);
3861 }
3862 
3863 static int action_create(struct hist_trigger_data *hist_data,
3864 			 struct action_data *data);
3865 
3866 static int track_data_create(struct hist_trigger_data *hist_data,
3867 			     struct action_data *data)
3868 {
3869 	struct hist_field *var_field, *ref_field, *track_var = NULL;
3870 	struct trace_event_file *file = hist_data->event_file;
3871 	struct trace_array *tr = file->tr;
3872 	char *track_data_var_str;
3873 	int ret = 0;
3874 
3875 	track_data_var_str = data->track_data.var_str;
3876 	if (track_data_var_str[0] != '$') {
3877 		hist_err(tr, HIST_ERR_ONX_NOT_VAR, errpos(track_data_var_str));
3878 		return -EINVAL;
3879 	}
3880 	track_data_var_str++;
3881 
3882 	var_field = find_target_event_var(hist_data, NULL, NULL, track_data_var_str);
3883 	if (!var_field) {
3884 		hist_err(tr, HIST_ERR_ONX_VAR_NOT_FOUND, errpos(track_data_var_str));
3885 		return -EINVAL;
3886 	}
3887 
3888 	ref_field = create_var_ref(hist_data, var_field, NULL, NULL);
3889 	if (!ref_field)
3890 		return -ENOMEM;
3891 
3892 	data->track_data.var_ref = ref_field;
3893 
3894 	if (data->handler == HANDLER_ONMAX)
3895 		track_var = create_var(hist_data, file, "__max", sizeof(u64), "u64");
3896 	if (IS_ERR(track_var)) {
3897 		hist_err(tr, HIST_ERR_ONX_VAR_CREATE_FAIL, 0);
3898 		ret = PTR_ERR(track_var);
3899 		goto out;
3900 	}
3901 
3902 	if (data->handler == HANDLER_ONCHANGE)
3903 		track_var = create_var(hist_data, file, "__change", sizeof(u64), "u64");
3904 	if (IS_ERR(track_var)) {
3905 		hist_err(tr, HIST_ERR_ONX_VAR_CREATE_FAIL, 0);
3906 		ret = PTR_ERR(track_var);
3907 		goto out;
3908 	}
3909 	data->track_data.track_var = track_var;
3910 
3911 	ret = action_create(hist_data, data);
3912  out:
3913 	return ret;
3914 }
3915 
3916 static int parse_action_params(struct trace_array *tr, char *params,
3917 			       struct action_data *data)
3918 {
3919 	char *param, *saved_param;
3920 	bool first_param = true;
3921 	int ret = 0;
3922 
3923 	while (params) {
3924 		if (data->n_params >= SYNTH_FIELDS_MAX) {
3925 			hist_err(tr, HIST_ERR_TOO_MANY_PARAMS, 0);
3926 			goto out;
3927 		}
3928 
3929 		param = strsep(&params, ",");
3930 		if (!param) {
3931 			hist_err(tr, HIST_ERR_PARAM_NOT_FOUND, 0);
3932 			ret = -EINVAL;
3933 			goto out;
3934 		}
3935 
3936 		param = strstrip(param);
3937 		if (strlen(param) < 2) {
3938 			hist_err(tr, HIST_ERR_INVALID_PARAM, errpos(param));
3939 			ret = -EINVAL;
3940 			goto out;
3941 		}
3942 
3943 		saved_param = kstrdup(param, GFP_KERNEL);
3944 		if (!saved_param) {
3945 			ret = -ENOMEM;
3946 			goto out;
3947 		}
3948 
3949 		if (first_param && data->use_trace_keyword) {
3950 			data->synth_event_name = saved_param;
3951 			first_param = false;
3952 			continue;
3953 		}
3954 		first_param = false;
3955 
3956 		data->params[data->n_params++] = saved_param;
3957 	}
3958  out:
3959 	return ret;
3960 }
3961 
3962 static int action_parse(struct trace_array *tr, char *str, struct action_data *data,
3963 			enum handler_id handler)
3964 {
3965 	char *action_name;
3966 	int ret = 0;
3967 
3968 	strsep(&str, ".");
3969 	if (!str) {
3970 		hist_err(tr, HIST_ERR_ACTION_NOT_FOUND, 0);
3971 		ret = -EINVAL;
3972 		goto out;
3973 	}
3974 
3975 	action_name = strsep(&str, "(");
3976 	if (!action_name || !str) {
3977 		hist_err(tr, HIST_ERR_ACTION_NOT_FOUND, 0);
3978 		ret = -EINVAL;
3979 		goto out;
3980 	}
3981 
3982 	if (str_has_prefix(action_name, "save")) {
3983 		char *params = strsep(&str, ")");
3984 
3985 		if (!params) {
3986 			hist_err(tr, HIST_ERR_NO_SAVE_PARAMS, 0);
3987 			ret = -EINVAL;
3988 			goto out;
3989 		}
3990 
3991 		ret = parse_action_params(tr, params, data);
3992 		if (ret)
3993 			goto out;
3994 
3995 		if (handler == HANDLER_ONMAX)
3996 			data->track_data.check_val = check_track_val_max;
3997 		else if (handler == HANDLER_ONCHANGE)
3998 			data->track_data.check_val = check_track_val_changed;
3999 		else {
4000 			hist_err(tr, HIST_ERR_ACTION_MISMATCH, errpos(action_name));
4001 			ret = -EINVAL;
4002 			goto out;
4003 		}
4004 
4005 		data->track_data.save_data = save_track_data_vars;
4006 		data->fn = ontrack_action;
4007 		data->action = ACTION_SAVE;
4008 	} else if (str_has_prefix(action_name, "snapshot")) {
4009 		char *params = strsep(&str, ")");
4010 
4011 		if (!str) {
4012 			hist_err(tr, HIST_ERR_NO_CLOSING_PAREN, errpos(params));
4013 			ret = -EINVAL;
4014 			goto out;
4015 		}
4016 
4017 		if (handler == HANDLER_ONMAX)
4018 			data->track_data.check_val = check_track_val_max;
4019 		else if (handler == HANDLER_ONCHANGE)
4020 			data->track_data.check_val = check_track_val_changed;
4021 		else {
4022 			hist_err(tr, HIST_ERR_ACTION_MISMATCH, errpos(action_name));
4023 			ret = -EINVAL;
4024 			goto out;
4025 		}
4026 
4027 		data->track_data.save_data = save_track_data_snapshot;
4028 		data->fn = ontrack_action;
4029 		data->action = ACTION_SNAPSHOT;
4030 	} else {
4031 		char *params = strsep(&str, ")");
4032 
4033 		if (str_has_prefix(action_name, "trace"))
4034 			data->use_trace_keyword = true;
4035 
4036 		if (params) {
4037 			ret = parse_action_params(tr, params, data);
4038 			if (ret)
4039 				goto out;
4040 		}
4041 
4042 		if (handler == HANDLER_ONMAX)
4043 			data->track_data.check_val = check_track_val_max;
4044 		else if (handler == HANDLER_ONCHANGE)
4045 			data->track_data.check_val = check_track_val_changed;
4046 
4047 		if (handler != HANDLER_ONMATCH) {
4048 			data->track_data.save_data = action_trace;
4049 			data->fn = ontrack_action;
4050 		} else
4051 			data->fn = action_trace;
4052 
4053 		data->action = ACTION_TRACE;
4054 	}
4055 
4056 	data->action_name = kstrdup(action_name, GFP_KERNEL);
4057 	if (!data->action_name) {
4058 		ret = -ENOMEM;
4059 		goto out;
4060 	}
4061 
4062 	data->handler = handler;
4063  out:
4064 	return ret;
4065 }
4066 
4067 static struct action_data *track_data_parse(struct hist_trigger_data *hist_data,
4068 					    char *str, enum handler_id handler)
4069 {
4070 	struct action_data *data;
4071 	int ret = -EINVAL;
4072 	char *var_str;
4073 
4074 	data = kzalloc(sizeof(*data), GFP_KERNEL);
4075 	if (!data)
4076 		return ERR_PTR(-ENOMEM);
4077 
4078 	var_str = strsep(&str, ")");
4079 	if (!var_str || !str) {
4080 		ret = -EINVAL;
4081 		goto free;
4082 	}
4083 
4084 	data->track_data.var_str = kstrdup(var_str, GFP_KERNEL);
4085 	if (!data->track_data.var_str) {
4086 		ret = -ENOMEM;
4087 		goto free;
4088 	}
4089 
4090 	ret = action_parse(hist_data->event_file->tr, str, data, handler);
4091 	if (ret)
4092 		goto free;
4093  out:
4094 	return data;
4095  free:
4096 	track_data_destroy(hist_data, data);
4097 	data = ERR_PTR(ret);
4098 	goto out;
4099 }
4100 
4101 static void onmatch_destroy(struct action_data *data)
4102 {
4103 	kfree(data->match_data.event);
4104 	kfree(data->match_data.event_system);
4105 
4106 	action_data_destroy(data);
4107 }
4108 
4109 static void destroy_field_var(struct field_var *field_var)
4110 {
4111 	if (!field_var)
4112 		return;
4113 
4114 	destroy_hist_field(field_var->var, 0);
4115 	destroy_hist_field(field_var->val, 0);
4116 
4117 	kfree(field_var);
4118 }
4119 
4120 static void destroy_field_vars(struct hist_trigger_data *hist_data)
4121 {
4122 	unsigned int i;
4123 
4124 	for (i = 0; i < hist_data->n_field_vars; i++)
4125 		destroy_field_var(hist_data->field_vars[i]);
4126 }
4127 
4128 static void save_field_var(struct hist_trigger_data *hist_data,
4129 			   struct field_var *field_var)
4130 {
4131 	hist_data->field_vars[hist_data->n_field_vars++] = field_var;
4132 
4133 	if (field_var->val->flags & HIST_FIELD_FL_STRING)
4134 		hist_data->n_field_var_str++;
4135 }
4136 
4137 
4138 static int check_synth_field(struct synth_event *event,
4139 			     struct hist_field *hist_field,
4140 			     unsigned int field_pos)
4141 {
4142 	struct synth_field *field;
4143 
4144 	if (field_pos >= event->n_fields)
4145 		return -EINVAL;
4146 
4147 	field = event->fields[field_pos];
4148 
4149 	if (strcmp(field->type, hist_field->type) != 0)
4150 		return -EINVAL;
4151 
4152 	return 0;
4153 }
4154 
4155 static struct hist_field *
4156 trace_action_find_var(struct hist_trigger_data *hist_data,
4157 		      struct action_data *data,
4158 		      char *system, char *event, char *var)
4159 {
4160 	struct trace_array *tr = hist_data->event_file->tr;
4161 	struct hist_field *hist_field;
4162 
4163 	var++; /* skip '$' */
4164 
4165 	hist_field = find_target_event_var(hist_data, system, event, var);
4166 	if (!hist_field) {
4167 		if (!system && data->handler == HANDLER_ONMATCH) {
4168 			system = data->match_data.event_system;
4169 			event = data->match_data.event;
4170 		}
4171 
4172 		hist_field = find_event_var(hist_data, system, event, var);
4173 	}
4174 
4175 	if (!hist_field)
4176 		hist_err(tr, HIST_ERR_PARAM_NOT_FOUND, errpos(var));
4177 
4178 	return hist_field;
4179 }
4180 
4181 static struct hist_field *
4182 trace_action_create_field_var(struct hist_trigger_data *hist_data,
4183 			      struct action_data *data, char *system,
4184 			      char *event, char *var)
4185 {
4186 	struct hist_field *hist_field = NULL;
4187 	struct field_var *field_var;
4188 
4189 	/*
4190 	 * First try to create a field var on the target event (the
4191 	 * currently being defined).  This will create a variable for
4192 	 * unqualified fields on the target event, or if qualified,
4193 	 * target fields that have qualified names matching the target.
4194 	 */
4195 	field_var = create_target_field_var(hist_data, system, event, var);
4196 
4197 	if (field_var && !IS_ERR(field_var)) {
4198 		save_field_var(hist_data, field_var);
4199 		hist_field = field_var->var;
4200 	} else {
4201 		field_var = NULL;
4202 		/*
4203 		 * If no explicit system.event is specfied, default to
4204 		 * looking for fields on the onmatch(system.event.xxx)
4205 		 * event.
4206 		 */
4207 		if (!system && data->handler == HANDLER_ONMATCH) {
4208 			system = data->match_data.event_system;
4209 			event = data->match_data.event;
4210 		}
4211 
4212 		/*
4213 		 * At this point, we're looking at a field on another
4214 		 * event.  Because we can't modify a hist trigger on
4215 		 * another event to add a variable for a field, we need
4216 		 * to create a new trigger on that event and create the
4217 		 * variable at the same time.
4218 		 */
4219 		hist_field = create_field_var_hist(hist_data, system, event, var);
4220 		if (IS_ERR(hist_field))
4221 			goto free;
4222 	}
4223  out:
4224 	return hist_field;
4225  free:
4226 	destroy_field_var(field_var);
4227 	hist_field = NULL;
4228 	goto out;
4229 }
4230 
4231 static int trace_action_create(struct hist_trigger_data *hist_data,
4232 			       struct action_data *data)
4233 {
4234 	struct trace_array *tr = hist_data->event_file->tr;
4235 	char *event_name, *param, *system = NULL;
4236 	struct hist_field *hist_field, *var_ref;
4237 	unsigned int i, var_ref_idx;
4238 	unsigned int field_pos = 0;
4239 	struct synth_event *event;
4240 	char *synth_event_name;
4241 	int ret = 0;
4242 
4243 	lockdep_assert_held(&event_mutex);
4244 
4245 	if (data->use_trace_keyword)
4246 		synth_event_name = data->synth_event_name;
4247 	else
4248 		synth_event_name = data->action_name;
4249 
4250 	event = find_synth_event(synth_event_name);
4251 	if (!event) {
4252 		hist_err(tr, HIST_ERR_SYNTH_EVENT_NOT_FOUND, errpos(synth_event_name));
4253 		return -EINVAL;
4254 	}
4255 
4256 	event->ref++;
4257 
4258 	var_ref_idx = hist_data->n_var_refs;
4259 
4260 	for (i = 0; i < data->n_params; i++) {
4261 		char *p;
4262 
4263 		p = param = kstrdup(data->params[i], GFP_KERNEL);
4264 		if (!param) {
4265 			ret = -ENOMEM;
4266 			goto err;
4267 		}
4268 
4269 		system = strsep(&param, ".");
4270 		if (!param) {
4271 			param = (char *)system;
4272 			system = event_name = NULL;
4273 		} else {
4274 			event_name = strsep(&param, ".");
4275 			if (!param) {
4276 				kfree(p);
4277 				ret = -EINVAL;
4278 				goto err;
4279 			}
4280 		}
4281 
4282 		if (param[0] == '$')
4283 			hist_field = trace_action_find_var(hist_data, data,
4284 							   system, event_name,
4285 							   param);
4286 		else
4287 			hist_field = trace_action_create_field_var(hist_data,
4288 								   data,
4289 								   system,
4290 								   event_name,
4291 								   param);
4292 
4293 		if (!hist_field) {
4294 			kfree(p);
4295 			ret = -EINVAL;
4296 			goto err;
4297 		}
4298 
4299 		if (check_synth_field(event, hist_field, field_pos) == 0) {
4300 			var_ref = create_var_ref(hist_data, hist_field,
4301 						 system, event_name);
4302 			if (!var_ref) {
4303 				kfree(p);
4304 				ret = -ENOMEM;
4305 				goto err;
4306 			}
4307 
4308 			field_pos++;
4309 			kfree(p);
4310 			continue;
4311 		}
4312 
4313 		hist_err(tr, HIST_ERR_SYNTH_TYPE_MISMATCH, errpos(param));
4314 		kfree(p);
4315 		ret = -EINVAL;
4316 		goto err;
4317 	}
4318 
4319 	if (field_pos != event->n_fields) {
4320 		hist_err(tr, HIST_ERR_SYNTH_COUNT_MISMATCH, errpos(event->name));
4321 		ret = -EINVAL;
4322 		goto err;
4323 	}
4324 
4325 	data->synth_event = event;
4326 	data->var_ref_idx = var_ref_idx;
4327  out:
4328 	return ret;
4329  err:
4330 	event->ref--;
4331 
4332 	goto out;
4333 }
4334 
4335 static int action_create(struct hist_trigger_data *hist_data,
4336 			 struct action_data *data)
4337 {
4338 	struct trace_event_file *file = hist_data->event_file;
4339 	struct trace_array *tr = file->tr;
4340 	struct track_data *track_data;
4341 	struct field_var *field_var;
4342 	unsigned int i;
4343 	char *param;
4344 	int ret = 0;
4345 
4346 	if (data->action == ACTION_TRACE)
4347 		return trace_action_create(hist_data, data);
4348 
4349 	if (data->action == ACTION_SNAPSHOT) {
4350 		track_data = track_data_alloc(hist_data->key_size, data, hist_data);
4351 		if (IS_ERR(track_data)) {
4352 			ret = PTR_ERR(track_data);
4353 			goto out;
4354 		}
4355 
4356 		ret = tracing_snapshot_cond_enable(file->tr, track_data,
4357 						   cond_snapshot_update);
4358 		if (ret)
4359 			track_data_free(track_data);
4360 
4361 		goto out;
4362 	}
4363 
4364 	if (data->action == ACTION_SAVE) {
4365 		if (hist_data->n_save_vars) {
4366 			ret = -EEXIST;
4367 			hist_err(tr, HIST_ERR_TOO_MANY_SAVE_ACTIONS, 0);
4368 			goto out;
4369 		}
4370 
4371 		for (i = 0; i < data->n_params; i++) {
4372 			param = kstrdup(data->params[i], GFP_KERNEL);
4373 			if (!param) {
4374 				ret = -ENOMEM;
4375 				goto out;
4376 			}
4377 
4378 			field_var = create_target_field_var(hist_data, NULL, NULL, param);
4379 			if (IS_ERR(field_var)) {
4380 				hist_err(tr, HIST_ERR_FIELD_VAR_CREATE_FAIL,
4381 					 errpos(param));
4382 				ret = PTR_ERR(field_var);
4383 				kfree(param);
4384 				goto out;
4385 			}
4386 
4387 			hist_data->save_vars[hist_data->n_save_vars++] = field_var;
4388 			if (field_var->val->flags & HIST_FIELD_FL_STRING)
4389 				hist_data->n_save_var_str++;
4390 			kfree(param);
4391 		}
4392 	}
4393  out:
4394 	return ret;
4395 }
4396 
4397 static int onmatch_create(struct hist_trigger_data *hist_data,
4398 			  struct action_data *data)
4399 {
4400 	return action_create(hist_data, data);
4401 }
4402 
4403 static struct action_data *onmatch_parse(struct trace_array *tr, char *str)
4404 {
4405 	char *match_event, *match_event_system;
4406 	struct action_data *data;
4407 	int ret = -EINVAL;
4408 
4409 	data = kzalloc(sizeof(*data), GFP_KERNEL);
4410 	if (!data)
4411 		return ERR_PTR(-ENOMEM);
4412 
4413 	match_event = strsep(&str, ")");
4414 	if (!match_event || !str) {
4415 		hist_err(tr, HIST_ERR_NO_CLOSING_PAREN, errpos(match_event));
4416 		goto free;
4417 	}
4418 
4419 	match_event_system = strsep(&match_event, ".");
4420 	if (!match_event) {
4421 		hist_err(tr, HIST_ERR_SUBSYS_NOT_FOUND, errpos(match_event_system));
4422 		goto free;
4423 	}
4424 
4425 	if (IS_ERR(event_file(tr, match_event_system, match_event))) {
4426 		hist_err(tr, HIST_ERR_INVALID_SUBSYS_EVENT, errpos(match_event));
4427 		goto free;
4428 	}
4429 
4430 	data->match_data.event = kstrdup(match_event, GFP_KERNEL);
4431 	if (!data->match_data.event) {
4432 		ret = -ENOMEM;
4433 		goto free;
4434 	}
4435 
4436 	data->match_data.event_system = kstrdup(match_event_system, GFP_KERNEL);
4437 	if (!data->match_data.event_system) {
4438 		ret = -ENOMEM;
4439 		goto free;
4440 	}
4441 
4442 	ret = action_parse(tr, str, data, HANDLER_ONMATCH);
4443 	if (ret)
4444 		goto free;
4445  out:
4446 	return data;
4447  free:
4448 	onmatch_destroy(data);
4449 	data = ERR_PTR(ret);
4450 	goto out;
4451 }
4452 
4453 static int create_hitcount_val(struct hist_trigger_data *hist_data)
4454 {
4455 	hist_data->fields[HITCOUNT_IDX] =
4456 		create_hist_field(hist_data, NULL, HIST_FIELD_FL_HITCOUNT, NULL);
4457 	if (!hist_data->fields[HITCOUNT_IDX])
4458 		return -ENOMEM;
4459 
4460 	hist_data->n_vals++;
4461 	hist_data->n_fields++;
4462 
4463 	if (WARN_ON(hist_data->n_vals > TRACING_MAP_VALS_MAX))
4464 		return -EINVAL;
4465 
4466 	return 0;
4467 }
4468 
4469 static int __create_val_field(struct hist_trigger_data *hist_data,
4470 			      unsigned int val_idx,
4471 			      struct trace_event_file *file,
4472 			      char *var_name, char *field_str,
4473 			      unsigned long flags)
4474 {
4475 	struct hist_field *hist_field;
4476 	int ret = 0;
4477 
4478 	hist_field = parse_expr(hist_data, file, field_str, flags, var_name, 0);
4479 	if (IS_ERR(hist_field)) {
4480 		ret = PTR_ERR(hist_field);
4481 		goto out;
4482 	}
4483 
4484 	hist_data->fields[val_idx] = hist_field;
4485 
4486 	++hist_data->n_vals;
4487 	++hist_data->n_fields;
4488 
4489 	if (WARN_ON(hist_data->n_vals > TRACING_MAP_VALS_MAX + TRACING_MAP_VARS_MAX))
4490 		ret = -EINVAL;
4491  out:
4492 	return ret;
4493 }
4494 
4495 static int create_val_field(struct hist_trigger_data *hist_data,
4496 			    unsigned int val_idx,
4497 			    struct trace_event_file *file,
4498 			    char *field_str)
4499 {
4500 	if (WARN_ON(val_idx >= TRACING_MAP_VALS_MAX))
4501 		return -EINVAL;
4502 
4503 	return __create_val_field(hist_data, val_idx, file, NULL, field_str, 0);
4504 }
4505 
4506 static int create_var_field(struct hist_trigger_data *hist_data,
4507 			    unsigned int val_idx,
4508 			    struct trace_event_file *file,
4509 			    char *var_name, char *expr_str)
4510 {
4511 	struct trace_array *tr = hist_data->event_file->tr;
4512 	unsigned long flags = 0;
4513 
4514 	if (WARN_ON(val_idx >= TRACING_MAP_VALS_MAX + TRACING_MAP_VARS_MAX))
4515 		return -EINVAL;
4516 
4517 	if (find_var(hist_data, file, var_name) && !hist_data->remove) {
4518 		hist_err(tr, HIST_ERR_DUPLICATE_VAR, errpos(var_name));
4519 		return -EINVAL;
4520 	}
4521 
4522 	flags |= HIST_FIELD_FL_VAR;
4523 	hist_data->n_vars++;
4524 	if (WARN_ON(hist_data->n_vars > TRACING_MAP_VARS_MAX))
4525 		return -EINVAL;
4526 
4527 	return __create_val_field(hist_data, val_idx, file, var_name, expr_str, flags);
4528 }
4529 
4530 static int create_val_fields(struct hist_trigger_data *hist_data,
4531 			     struct trace_event_file *file)
4532 {
4533 	char *fields_str, *field_str;
4534 	unsigned int i, j = 1;
4535 	int ret;
4536 
4537 	ret = create_hitcount_val(hist_data);
4538 	if (ret)
4539 		goto out;
4540 
4541 	fields_str = hist_data->attrs->vals_str;
4542 	if (!fields_str)
4543 		goto out;
4544 
4545 	strsep(&fields_str, "=");
4546 	if (!fields_str)
4547 		goto out;
4548 
4549 	for (i = 0, j = 1; i < TRACING_MAP_VALS_MAX &&
4550 		     j < TRACING_MAP_VALS_MAX; i++) {
4551 		field_str = strsep(&fields_str, ",");
4552 		if (!field_str)
4553 			break;
4554 
4555 		if (strcmp(field_str, "hitcount") == 0)
4556 			continue;
4557 
4558 		ret = create_val_field(hist_data, j++, file, field_str);
4559 		if (ret)
4560 			goto out;
4561 	}
4562 
4563 	if (fields_str && (strcmp(fields_str, "hitcount") != 0))
4564 		ret = -EINVAL;
4565  out:
4566 	return ret;
4567 }
4568 
4569 static int create_key_field(struct hist_trigger_data *hist_data,
4570 			    unsigned int key_idx,
4571 			    unsigned int key_offset,
4572 			    struct trace_event_file *file,
4573 			    char *field_str)
4574 {
4575 	struct trace_array *tr = hist_data->event_file->tr;
4576 	struct hist_field *hist_field = NULL;
4577 	unsigned long flags = 0;
4578 	unsigned int key_size;
4579 	int ret = 0;
4580 
4581 	if (WARN_ON(key_idx >= HIST_FIELDS_MAX))
4582 		return -EINVAL;
4583 
4584 	flags |= HIST_FIELD_FL_KEY;
4585 
4586 	if (strcmp(field_str, "stacktrace") == 0) {
4587 		flags |= HIST_FIELD_FL_STACKTRACE;
4588 		key_size = sizeof(unsigned long) * HIST_STACKTRACE_DEPTH;
4589 		hist_field = create_hist_field(hist_data, NULL, flags, NULL);
4590 	} else {
4591 		hist_field = parse_expr(hist_data, file, field_str, flags,
4592 					NULL, 0);
4593 		if (IS_ERR(hist_field)) {
4594 			ret = PTR_ERR(hist_field);
4595 			goto out;
4596 		}
4597 
4598 		if (field_has_hist_vars(hist_field, 0))	{
4599 			hist_err(tr, HIST_ERR_INVALID_REF_KEY, errpos(field_str));
4600 			destroy_hist_field(hist_field, 0);
4601 			ret = -EINVAL;
4602 			goto out;
4603 		}
4604 
4605 		key_size = hist_field->size;
4606 	}
4607 
4608 	hist_data->fields[key_idx] = hist_field;
4609 
4610 	key_size = ALIGN(key_size, sizeof(u64));
4611 	hist_data->fields[key_idx]->size = key_size;
4612 	hist_data->fields[key_idx]->offset = key_offset;
4613 
4614 	hist_data->key_size += key_size;
4615 
4616 	if (hist_data->key_size > HIST_KEY_SIZE_MAX) {
4617 		ret = -EINVAL;
4618 		goto out;
4619 	}
4620 
4621 	hist_data->n_keys++;
4622 	hist_data->n_fields++;
4623 
4624 	if (WARN_ON(hist_data->n_keys > TRACING_MAP_KEYS_MAX))
4625 		return -EINVAL;
4626 
4627 	ret = key_size;
4628  out:
4629 	return ret;
4630 }
4631 
4632 static int create_key_fields(struct hist_trigger_data *hist_data,
4633 			     struct trace_event_file *file)
4634 {
4635 	unsigned int i, key_offset = 0, n_vals = hist_data->n_vals;
4636 	char *fields_str, *field_str;
4637 	int ret = -EINVAL;
4638 
4639 	fields_str = hist_data->attrs->keys_str;
4640 	if (!fields_str)
4641 		goto out;
4642 
4643 	strsep(&fields_str, "=");
4644 	if (!fields_str)
4645 		goto out;
4646 
4647 	for (i = n_vals; i < n_vals + TRACING_MAP_KEYS_MAX; i++) {
4648 		field_str = strsep(&fields_str, ",");
4649 		if (!field_str)
4650 			break;
4651 		ret = create_key_field(hist_data, i, key_offset,
4652 				       file, field_str);
4653 		if (ret < 0)
4654 			goto out;
4655 		key_offset += ret;
4656 	}
4657 	if (fields_str) {
4658 		ret = -EINVAL;
4659 		goto out;
4660 	}
4661 	ret = 0;
4662  out:
4663 	return ret;
4664 }
4665 
4666 static int create_var_fields(struct hist_trigger_data *hist_data,
4667 			     struct trace_event_file *file)
4668 {
4669 	unsigned int i, j = hist_data->n_vals;
4670 	int ret = 0;
4671 
4672 	unsigned int n_vars = hist_data->attrs->var_defs.n_vars;
4673 
4674 	for (i = 0; i < n_vars; i++) {
4675 		char *var_name = hist_data->attrs->var_defs.name[i];
4676 		char *expr = hist_data->attrs->var_defs.expr[i];
4677 
4678 		ret = create_var_field(hist_data, j++, file, var_name, expr);
4679 		if (ret)
4680 			goto out;
4681 	}
4682  out:
4683 	return ret;
4684 }
4685 
4686 static void free_var_defs(struct hist_trigger_data *hist_data)
4687 {
4688 	unsigned int i;
4689 
4690 	for (i = 0; i < hist_data->attrs->var_defs.n_vars; i++) {
4691 		kfree(hist_data->attrs->var_defs.name[i]);
4692 		kfree(hist_data->attrs->var_defs.expr[i]);
4693 	}
4694 
4695 	hist_data->attrs->var_defs.n_vars = 0;
4696 }
4697 
4698 static int parse_var_defs(struct hist_trigger_data *hist_data)
4699 {
4700 	struct trace_array *tr = hist_data->event_file->tr;
4701 	char *s, *str, *var_name, *field_str;
4702 	unsigned int i, j, n_vars = 0;
4703 	int ret = 0;
4704 
4705 	for (i = 0; i < hist_data->attrs->n_assignments; i++) {
4706 		str = hist_data->attrs->assignment_str[i];
4707 		for (j = 0; j < TRACING_MAP_VARS_MAX; j++) {
4708 			field_str = strsep(&str, ",");
4709 			if (!field_str)
4710 				break;
4711 
4712 			var_name = strsep(&field_str, "=");
4713 			if (!var_name || !field_str) {
4714 				hist_err(tr, HIST_ERR_MALFORMED_ASSIGNMENT,
4715 					 errpos(var_name));
4716 				ret = -EINVAL;
4717 				goto free;
4718 			}
4719 
4720 			if (n_vars == TRACING_MAP_VARS_MAX) {
4721 				hist_err(tr, HIST_ERR_TOO_MANY_VARS, errpos(var_name));
4722 				ret = -EINVAL;
4723 				goto free;
4724 			}
4725 
4726 			s = kstrdup(var_name, GFP_KERNEL);
4727 			if (!s) {
4728 				ret = -ENOMEM;
4729 				goto free;
4730 			}
4731 			hist_data->attrs->var_defs.name[n_vars] = s;
4732 
4733 			s = kstrdup(field_str, GFP_KERNEL);
4734 			if (!s) {
4735 				kfree(hist_data->attrs->var_defs.name[n_vars]);
4736 				ret = -ENOMEM;
4737 				goto free;
4738 			}
4739 			hist_data->attrs->var_defs.expr[n_vars++] = s;
4740 
4741 			hist_data->attrs->var_defs.n_vars = n_vars;
4742 		}
4743 	}
4744 
4745 	return ret;
4746  free:
4747 	free_var_defs(hist_data);
4748 
4749 	return ret;
4750 }
4751 
4752 static int create_hist_fields(struct hist_trigger_data *hist_data,
4753 			      struct trace_event_file *file)
4754 {
4755 	int ret;
4756 
4757 	ret = parse_var_defs(hist_data);
4758 	if (ret)
4759 		goto out;
4760 
4761 	ret = create_val_fields(hist_data, file);
4762 	if (ret)
4763 		goto out;
4764 
4765 	ret = create_var_fields(hist_data, file);
4766 	if (ret)
4767 		goto out;
4768 
4769 	ret = create_key_fields(hist_data, file);
4770 	if (ret)
4771 		goto out;
4772  out:
4773 	free_var_defs(hist_data);
4774 
4775 	return ret;
4776 }
4777 
4778 static int is_descending(const char *str)
4779 {
4780 	if (!str)
4781 		return 0;
4782 
4783 	if (strcmp(str, "descending") == 0)
4784 		return 1;
4785 
4786 	if (strcmp(str, "ascending") == 0)
4787 		return 0;
4788 
4789 	return -EINVAL;
4790 }
4791 
4792 static int create_sort_keys(struct hist_trigger_data *hist_data)
4793 {
4794 	char *fields_str = hist_data->attrs->sort_key_str;
4795 	struct tracing_map_sort_key *sort_key;
4796 	int descending, ret = 0;
4797 	unsigned int i, j, k;
4798 
4799 	hist_data->n_sort_keys = 1; /* we always have at least one, hitcount */
4800 
4801 	if (!fields_str)
4802 		goto out;
4803 
4804 	strsep(&fields_str, "=");
4805 	if (!fields_str) {
4806 		ret = -EINVAL;
4807 		goto out;
4808 	}
4809 
4810 	for (i = 0; i < TRACING_MAP_SORT_KEYS_MAX; i++) {
4811 		struct hist_field *hist_field;
4812 		char *field_str, *field_name;
4813 		const char *test_name;
4814 
4815 		sort_key = &hist_data->sort_keys[i];
4816 
4817 		field_str = strsep(&fields_str, ",");
4818 		if (!field_str) {
4819 			if (i == 0)
4820 				ret = -EINVAL;
4821 			break;
4822 		}
4823 
4824 		if ((i == TRACING_MAP_SORT_KEYS_MAX - 1) && fields_str) {
4825 			ret = -EINVAL;
4826 			break;
4827 		}
4828 
4829 		field_name = strsep(&field_str, ".");
4830 		if (!field_name) {
4831 			ret = -EINVAL;
4832 			break;
4833 		}
4834 
4835 		if (strcmp(field_name, "hitcount") == 0) {
4836 			descending = is_descending(field_str);
4837 			if (descending < 0) {
4838 				ret = descending;
4839 				break;
4840 			}
4841 			sort_key->descending = descending;
4842 			continue;
4843 		}
4844 
4845 		for (j = 1, k = 1; j < hist_data->n_fields; j++) {
4846 			unsigned int idx;
4847 
4848 			hist_field = hist_data->fields[j];
4849 			if (hist_field->flags & HIST_FIELD_FL_VAR)
4850 				continue;
4851 
4852 			idx = k++;
4853 
4854 			test_name = hist_field_name(hist_field, 0);
4855 
4856 			if (strcmp(field_name, test_name) == 0) {
4857 				sort_key->field_idx = idx;
4858 				descending = is_descending(field_str);
4859 				if (descending < 0) {
4860 					ret = descending;
4861 					goto out;
4862 				}
4863 				sort_key->descending = descending;
4864 				break;
4865 			}
4866 		}
4867 		if (j == hist_data->n_fields) {
4868 			ret = -EINVAL;
4869 			break;
4870 		}
4871 	}
4872 
4873 	hist_data->n_sort_keys = i;
4874  out:
4875 	return ret;
4876 }
4877 
4878 static void destroy_actions(struct hist_trigger_data *hist_data)
4879 {
4880 	unsigned int i;
4881 
4882 	for (i = 0; i < hist_data->n_actions; i++) {
4883 		struct action_data *data = hist_data->actions[i];
4884 
4885 		if (data->handler == HANDLER_ONMATCH)
4886 			onmatch_destroy(data);
4887 		else if (data->handler == HANDLER_ONMAX ||
4888 			 data->handler == HANDLER_ONCHANGE)
4889 			track_data_destroy(hist_data, data);
4890 		else
4891 			kfree(data);
4892 	}
4893 }
4894 
4895 static int parse_actions(struct hist_trigger_data *hist_data)
4896 {
4897 	struct trace_array *tr = hist_data->event_file->tr;
4898 	struct action_data *data;
4899 	unsigned int i;
4900 	int ret = 0;
4901 	char *str;
4902 	int len;
4903 
4904 	for (i = 0; i < hist_data->attrs->n_actions; i++) {
4905 		str = hist_data->attrs->action_str[i];
4906 
4907 		if ((len = str_has_prefix(str, "onmatch("))) {
4908 			char *action_str = str + len;
4909 
4910 			data = onmatch_parse(tr, action_str);
4911 			if (IS_ERR(data)) {
4912 				ret = PTR_ERR(data);
4913 				break;
4914 			}
4915 		} else if ((len = str_has_prefix(str, "onmax("))) {
4916 			char *action_str = str + len;
4917 
4918 			data = track_data_parse(hist_data, action_str,
4919 						HANDLER_ONMAX);
4920 			if (IS_ERR(data)) {
4921 				ret = PTR_ERR(data);
4922 				break;
4923 			}
4924 		} else if ((len = str_has_prefix(str, "onchange("))) {
4925 			char *action_str = str + len;
4926 
4927 			data = track_data_parse(hist_data, action_str,
4928 						HANDLER_ONCHANGE);
4929 			if (IS_ERR(data)) {
4930 				ret = PTR_ERR(data);
4931 				break;
4932 			}
4933 		} else {
4934 			ret = -EINVAL;
4935 			break;
4936 		}
4937 
4938 		hist_data->actions[hist_data->n_actions++] = data;
4939 	}
4940 
4941 	return ret;
4942 }
4943 
4944 static int create_actions(struct hist_trigger_data *hist_data)
4945 {
4946 	struct action_data *data;
4947 	unsigned int i;
4948 	int ret = 0;
4949 
4950 	for (i = 0; i < hist_data->attrs->n_actions; i++) {
4951 		data = hist_data->actions[i];
4952 
4953 		if (data->handler == HANDLER_ONMATCH) {
4954 			ret = onmatch_create(hist_data, data);
4955 			if (ret)
4956 				break;
4957 		} else if (data->handler == HANDLER_ONMAX ||
4958 			   data->handler == HANDLER_ONCHANGE) {
4959 			ret = track_data_create(hist_data, data);
4960 			if (ret)
4961 				break;
4962 		} else {
4963 			ret = -EINVAL;
4964 			break;
4965 		}
4966 	}
4967 
4968 	return ret;
4969 }
4970 
4971 static void print_actions(struct seq_file *m,
4972 			  struct hist_trigger_data *hist_data,
4973 			  struct tracing_map_elt *elt)
4974 {
4975 	unsigned int i;
4976 
4977 	for (i = 0; i < hist_data->n_actions; i++) {
4978 		struct action_data *data = hist_data->actions[i];
4979 
4980 		if (data->action == ACTION_SNAPSHOT)
4981 			continue;
4982 
4983 		if (data->handler == HANDLER_ONMAX ||
4984 		    data->handler == HANDLER_ONCHANGE)
4985 			track_data_print(m, hist_data, elt, data);
4986 	}
4987 }
4988 
4989 static void print_action_spec(struct seq_file *m,
4990 			      struct hist_trigger_data *hist_data,
4991 			      struct action_data *data)
4992 {
4993 	unsigned int i;
4994 
4995 	if (data->action == ACTION_SAVE) {
4996 		for (i = 0; i < hist_data->n_save_vars; i++) {
4997 			seq_printf(m, "%s", hist_data->save_vars[i]->var->var.name);
4998 			if (i < hist_data->n_save_vars - 1)
4999 				seq_puts(m, ",");
5000 		}
5001 	} else if (data->action == ACTION_TRACE) {
5002 		if (data->use_trace_keyword)
5003 			seq_printf(m, "%s", data->synth_event_name);
5004 		for (i = 0; i < data->n_params; i++) {
5005 			if (i || data->use_trace_keyword)
5006 				seq_puts(m, ",");
5007 			seq_printf(m, "%s", data->params[i]);
5008 		}
5009 	}
5010 }
5011 
5012 static void print_track_data_spec(struct seq_file *m,
5013 				  struct hist_trigger_data *hist_data,
5014 				  struct action_data *data)
5015 {
5016 	if (data->handler == HANDLER_ONMAX)
5017 		seq_puts(m, ":onmax(");
5018 	else if (data->handler == HANDLER_ONCHANGE)
5019 		seq_puts(m, ":onchange(");
5020 	seq_printf(m, "%s", data->track_data.var_str);
5021 	seq_printf(m, ").%s(", data->action_name);
5022 
5023 	print_action_spec(m, hist_data, data);
5024 
5025 	seq_puts(m, ")");
5026 }
5027 
5028 static void print_onmatch_spec(struct seq_file *m,
5029 			       struct hist_trigger_data *hist_data,
5030 			       struct action_data *data)
5031 {
5032 	seq_printf(m, ":onmatch(%s.%s).", data->match_data.event_system,
5033 		   data->match_data.event);
5034 
5035 	seq_printf(m, "%s(", data->action_name);
5036 
5037 	print_action_spec(m, hist_data, data);
5038 
5039 	seq_puts(m, ")");
5040 }
5041 
5042 static bool actions_match(struct hist_trigger_data *hist_data,
5043 			  struct hist_trigger_data *hist_data_test)
5044 {
5045 	unsigned int i, j;
5046 
5047 	if (hist_data->n_actions != hist_data_test->n_actions)
5048 		return false;
5049 
5050 	for (i = 0; i < hist_data->n_actions; i++) {
5051 		struct action_data *data = hist_data->actions[i];
5052 		struct action_data *data_test = hist_data_test->actions[i];
5053 		char *action_name, *action_name_test;
5054 
5055 		if (data->handler != data_test->handler)
5056 			return false;
5057 		if (data->action != data_test->action)
5058 			return false;
5059 
5060 		if (data->n_params != data_test->n_params)
5061 			return false;
5062 
5063 		for (j = 0; j < data->n_params; j++) {
5064 			if (strcmp(data->params[j], data_test->params[j]) != 0)
5065 				return false;
5066 		}
5067 
5068 		if (data->use_trace_keyword)
5069 			action_name = data->synth_event_name;
5070 		else
5071 			action_name = data->action_name;
5072 
5073 		if (data_test->use_trace_keyword)
5074 			action_name_test = data_test->synth_event_name;
5075 		else
5076 			action_name_test = data_test->action_name;
5077 
5078 		if (strcmp(action_name, action_name_test) != 0)
5079 			return false;
5080 
5081 		if (data->handler == HANDLER_ONMATCH) {
5082 			if (strcmp(data->match_data.event_system,
5083 				   data_test->match_data.event_system) != 0)
5084 				return false;
5085 			if (strcmp(data->match_data.event,
5086 				   data_test->match_data.event) != 0)
5087 				return false;
5088 		} else if (data->handler == HANDLER_ONMAX ||
5089 			   data->handler == HANDLER_ONCHANGE) {
5090 			if (strcmp(data->track_data.var_str,
5091 				   data_test->track_data.var_str) != 0)
5092 				return false;
5093 		}
5094 	}
5095 
5096 	return true;
5097 }
5098 
5099 
5100 static void print_actions_spec(struct seq_file *m,
5101 			       struct hist_trigger_data *hist_data)
5102 {
5103 	unsigned int i;
5104 
5105 	for (i = 0; i < hist_data->n_actions; i++) {
5106 		struct action_data *data = hist_data->actions[i];
5107 
5108 		if (data->handler == HANDLER_ONMATCH)
5109 			print_onmatch_spec(m, hist_data, data);
5110 		else if (data->handler == HANDLER_ONMAX ||
5111 			 data->handler == HANDLER_ONCHANGE)
5112 			print_track_data_spec(m, hist_data, data);
5113 	}
5114 }
5115 
5116 static void destroy_field_var_hists(struct hist_trigger_data *hist_data)
5117 {
5118 	unsigned int i;
5119 
5120 	for (i = 0; i < hist_data->n_field_var_hists; i++) {
5121 		kfree(hist_data->field_var_hists[i]->cmd);
5122 		kfree(hist_data->field_var_hists[i]);
5123 	}
5124 }
5125 
5126 static void destroy_hist_data(struct hist_trigger_data *hist_data)
5127 {
5128 	if (!hist_data)
5129 		return;
5130 
5131 	destroy_hist_trigger_attrs(hist_data->attrs);
5132 	destroy_hist_fields(hist_data);
5133 	tracing_map_destroy(hist_data->map);
5134 
5135 	destroy_actions(hist_data);
5136 	destroy_field_vars(hist_data);
5137 	destroy_field_var_hists(hist_data);
5138 
5139 	kfree(hist_data);
5140 }
5141 
5142 static int create_tracing_map_fields(struct hist_trigger_data *hist_data)
5143 {
5144 	struct tracing_map *map = hist_data->map;
5145 	struct ftrace_event_field *field;
5146 	struct hist_field *hist_field;
5147 	int i, idx = 0;
5148 
5149 	for_each_hist_field(i, hist_data) {
5150 		hist_field = hist_data->fields[i];
5151 		if (hist_field->flags & HIST_FIELD_FL_KEY) {
5152 			tracing_map_cmp_fn_t cmp_fn;
5153 
5154 			field = hist_field->field;
5155 
5156 			if (hist_field->flags & HIST_FIELD_FL_STACKTRACE)
5157 				cmp_fn = tracing_map_cmp_none;
5158 			else if (!field)
5159 				cmp_fn = tracing_map_cmp_num(hist_field->size,
5160 							     hist_field->is_signed);
5161 			else if (is_string_field(field))
5162 				cmp_fn = tracing_map_cmp_string;
5163 			else
5164 				cmp_fn = tracing_map_cmp_num(field->size,
5165 							     field->is_signed);
5166 			idx = tracing_map_add_key_field(map,
5167 							hist_field->offset,
5168 							cmp_fn);
5169 		} else if (!(hist_field->flags & HIST_FIELD_FL_VAR))
5170 			idx = tracing_map_add_sum_field(map);
5171 
5172 		if (idx < 0)
5173 			return idx;
5174 
5175 		if (hist_field->flags & HIST_FIELD_FL_VAR) {
5176 			idx = tracing_map_add_var(map);
5177 			if (idx < 0)
5178 				return idx;
5179 			hist_field->var.idx = idx;
5180 			hist_field->var.hist_data = hist_data;
5181 		}
5182 	}
5183 
5184 	return 0;
5185 }
5186 
5187 static struct hist_trigger_data *
5188 create_hist_data(unsigned int map_bits,
5189 		 struct hist_trigger_attrs *attrs,
5190 		 struct trace_event_file *file,
5191 		 bool remove)
5192 {
5193 	const struct tracing_map_ops *map_ops = NULL;
5194 	struct hist_trigger_data *hist_data;
5195 	int ret = 0;
5196 
5197 	hist_data = kzalloc(sizeof(*hist_data), GFP_KERNEL);
5198 	if (!hist_data)
5199 		return ERR_PTR(-ENOMEM);
5200 
5201 	hist_data->attrs = attrs;
5202 	hist_data->remove = remove;
5203 	hist_data->event_file = file;
5204 
5205 	ret = parse_actions(hist_data);
5206 	if (ret)
5207 		goto free;
5208 
5209 	ret = create_hist_fields(hist_data, file);
5210 	if (ret)
5211 		goto free;
5212 
5213 	ret = create_sort_keys(hist_data);
5214 	if (ret)
5215 		goto free;
5216 
5217 	map_ops = &hist_trigger_elt_data_ops;
5218 
5219 	hist_data->map = tracing_map_create(map_bits, hist_data->key_size,
5220 					    map_ops, hist_data);
5221 	if (IS_ERR(hist_data->map)) {
5222 		ret = PTR_ERR(hist_data->map);
5223 		hist_data->map = NULL;
5224 		goto free;
5225 	}
5226 
5227 	ret = create_tracing_map_fields(hist_data);
5228 	if (ret)
5229 		goto free;
5230  out:
5231 	return hist_data;
5232  free:
5233 	hist_data->attrs = NULL;
5234 
5235 	destroy_hist_data(hist_data);
5236 
5237 	hist_data = ERR_PTR(ret);
5238 
5239 	goto out;
5240 }
5241 
5242 static void hist_trigger_elt_update(struct hist_trigger_data *hist_data,
5243 				    struct tracing_map_elt *elt, void *rec,
5244 				    struct ring_buffer_event *rbe,
5245 				    u64 *var_ref_vals)
5246 {
5247 	struct hist_elt_data *elt_data;
5248 	struct hist_field *hist_field;
5249 	unsigned int i, var_idx;
5250 	u64 hist_val;
5251 
5252 	elt_data = elt->private_data;
5253 	elt_data->var_ref_vals = var_ref_vals;
5254 
5255 	for_each_hist_val_field(i, hist_data) {
5256 		hist_field = hist_data->fields[i];
5257 		hist_val = hist_field->fn(hist_field, elt, rbe, rec);
5258 		if (hist_field->flags & HIST_FIELD_FL_VAR) {
5259 			var_idx = hist_field->var.idx;
5260 			tracing_map_set_var(elt, var_idx, hist_val);
5261 			continue;
5262 		}
5263 		tracing_map_update_sum(elt, i, hist_val);
5264 	}
5265 
5266 	for_each_hist_key_field(i, hist_data) {
5267 		hist_field = hist_data->fields[i];
5268 		if (hist_field->flags & HIST_FIELD_FL_VAR) {
5269 			hist_val = hist_field->fn(hist_field, elt, rbe, rec);
5270 			var_idx = hist_field->var.idx;
5271 			tracing_map_set_var(elt, var_idx, hist_val);
5272 		}
5273 	}
5274 
5275 	update_field_vars(hist_data, elt, rbe, rec);
5276 }
5277 
5278 static inline void add_to_key(char *compound_key, void *key,
5279 			      struct hist_field *key_field, void *rec)
5280 {
5281 	size_t size = key_field->size;
5282 
5283 	if (key_field->flags & HIST_FIELD_FL_STRING) {
5284 		struct ftrace_event_field *field;
5285 
5286 		field = key_field->field;
5287 		if (field->filter_type == FILTER_DYN_STRING)
5288 			size = *(u32 *)(rec + field->offset) >> 16;
5289 		else if (field->filter_type == FILTER_PTR_STRING)
5290 			size = strlen(key);
5291 		else if (field->filter_type == FILTER_STATIC_STRING)
5292 			size = field->size;
5293 
5294 		/* ensure NULL-termination */
5295 		if (size > key_field->size - 1)
5296 			size = key_field->size - 1;
5297 
5298 		strncpy(compound_key + key_field->offset, (char *)key, size);
5299 	} else
5300 		memcpy(compound_key + key_field->offset, key, size);
5301 }
5302 
5303 static void
5304 hist_trigger_actions(struct hist_trigger_data *hist_data,
5305 		     struct tracing_map_elt *elt, void *rec,
5306 		     struct ring_buffer_event *rbe, void *key,
5307 		     u64 *var_ref_vals)
5308 {
5309 	struct action_data *data;
5310 	unsigned int i;
5311 
5312 	for (i = 0; i < hist_data->n_actions; i++) {
5313 		data = hist_data->actions[i];
5314 		data->fn(hist_data, elt, rec, rbe, key, data, var_ref_vals);
5315 	}
5316 }
5317 
5318 static void event_hist_trigger(struct event_trigger_data *data, void *rec,
5319 			       struct ring_buffer_event *rbe)
5320 {
5321 	struct hist_trigger_data *hist_data = data->private_data;
5322 	bool use_compound_key = (hist_data->n_keys > 1);
5323 	unsigned long entries[HIST_STACKTRACE_DEPTH];
5324 	u64 var_ref_vals[TRACING_MAP_VARS_MAX];
5325 	char compound_key[HIST_KEY_SIZE_MAX];
5326 	struct tracing_map_elt *elt = NULL;
5327 	struct hist_field *key_field;
5328 	u64 field_contents;
5329 	void *key = NULL;
5330 	unsigned int i;
5331 
5332 	memset(compound_key, 0, hist_data->key_size);
5333 
5334 	for_each_hist_key_field(i, hist_data) {
5335 		key_field = hist_data->fields[i];
5336 
5337 		if (key_field->flags & HIST_FIELD_FL_STACKTRACE) {
5338 			memset(entries, 0, HIST_STACKTRACE_SIZE);
5339 			stack_trace_save(entries, HIST_STACKTRACE_DEPTH,
5340 					 HIST_STACKTRACE_SKIP);
5341 			key = entries;
5342 		} else {
5343 			field_contents = key_field->fn(key_field, elt, rbe, rec);
5344 			if (key_field->flags & HIST_FIELD_FL_STRING) {
5345 				key = (void *)(unsigned long)field_contents;
5346 				use_compound_key = true;
5347 			} else
5348 				key = (void *)&field_contents;
5349 		}
5350 
5351 		if (use_compound_key)
5352 			add_to_key(compound_key, key, key_field, rec);
5353 	}
5354 
5355 	if (use_compound_key)
5356 		key = compound_key;
5357 
5358 	if (hist_data->n_var_refs &&
5359 	    !resolve_var_refs(hist_data, key, var_ref_vals, false))
5360 		return;
5361 
5362 	elt = tracing_map_insert(hist_data->map, key);
5363 	if (!elt)
5364 		return;
5365 
5366 	hist_trigger_elt_update(hist_data, elt, rec, rbe, var_ref_vals);
5367 
5368 	if (resolve_var_refs(hist_data, key, var_ref_vals, true))
5369 		hist_trigger_actions(hist_data, elt, rec, rbe, key, var_ref_vals);
5370 }
5371 
5372 static void hist_trigger_stacktrace_print(struct seq_file *m,
5373 					  unsigned long *stacktrace_entries,
5374 					  unsigned int max_entries)
5375 {
5376 	char str[KSYM_SYMBOL_LEN];
5377 	unsigned int spaces = 8;
5378 	unsigned int i;
5379 
5380 	for (i = 0; i < max_entries; i++) {
5381 		if (!stacktrace_entries[i])
5382 			return;
5383 
5384 		seq_printf(m, "%*c", 1 + spaces, ' ');
5385 		sprint_symbol(str, stacktrace_entries[i]);
5386 		seq_printf(m, "%s\n", str);
5387 	}
5388 }
5389 
5390 static void hist_trigger_print_key(struct seq_file *m,
5391 				   struct hist_trigger_data *hist_data,
5392 				   void *key,
5393 				   struct tracing_map_elt *elt)
5394 {
5395 	struct hist_field *key_field;
5396 	char str[KSYM_SYMBOL_LEN];
5397 	bool multiline = false;
5398 	const char *field_name;
5399 	unsigned int i;
5400 	u64 uval;
5401 
5402 	seq_puts(m, "{ ");
5403 
5404 	for_each_hist_key_field(i, hist_data) {
5405 		key_field = hist_data->fields[i];
5406 
5407 		if (i > hist_data->n_vals)
5408 			seq_puts(m, ", ");
5409 
5410 		field_name = hist_field_name(key_field, 0);
5411 
5412 		if (key_field->flags & HIST_FIELD_FL_HEX) {
5413 			uval = *(u64 *)(key + key_field->offset);
5414 			seq_printf(m, "%s: %llx", field_name, uval);
5415 		} else if (key_field->flags & HIST_FIELD_FL_SYM) {
5416 			uval = *(u64 *)(key + key_field->offset);
5417 			sprint_symbol_no_offset(str, uval);
5418 			seq_printf(m, "%s: [%llx] %-45s", field_name,
5419 				   uval, str);
5420 		} else if (key_field->flags & HIST_FIELD_FL_SYM_OFFSET) {
5421 			uval = *(u64 *)(key + key_field->offset);
5422 			sprint_symbol(str, uval);
5423 			seq_printf(m, "%s: [%llx] %-55s", field_name,
5424 				   uval, str);
5425 		} else if (key_field->flags & HIST_FIELD_FL_EXECNAME) {
5426 			struct hist_elt_data *elt_data = elt->private_data;
5427 			char *comm;
5428 
5429 			if (WARN_ON_ONCE(!elt_data))
5430 				return;
5431 
5432 			comm = elt_data->comm;
5433 
5434 			uval = *(u64 *)(key + key_field->offset);
5435 			seq_printf(m, "%s: %-16s[%10llu]", field_name,
5436 				   comm, uval);
5437 		} else if (key_field->flags & HIST_FIELD_FL_SYSCALL) {
5438 			const char *syscall_name;
5439 
5440 			uval = *(u64 *)(key + key_field->offset);
5441 			syscall_name = get_syscall_name(uval);
5442 			if (!syscall_name)
5443 				syscall_name = "unknown_syscall";
5444 
5445 			seq_printf(m, "%s: %-30s[%3llu]", field_name,
5446 				   syscall_name, uval);
5447 		} else if (key_field->flags & HIST_FIELD_FL_STACKTRACE) {
5448 			seq_puts(m, "stacktrace:\n");
5449 			hist_trigger_stacktrace_print(m,
5450 						      key + key_field->offset,
5451 						      HIST_STACKTRACE_DEPTH);
5452 			multiline = true;
5453 		} else if (key_field->flags & HIST_FIELD_FL_LOG2) {
5454 			seq_printf(m, "%s: ~ 2^%-2llu", field_name,
5455 				   *(u64 *)(key + key_field->offset));
5456 		} else if (key_field->flags & HIST_FIELD_FL_STRING) {
5457 			seq_printf(m, "%s: %-50s", field_name,
5458 				   (char *)(key + key_field->offset));
5459 		} else {
5460 			uval = *(u64 *)(key + key_field->offset);
5461 			seq_printf(m, "%s: %10llu", field_name, uval);
5462 		}
5463 	}
5464 
5465 	if (!multiline)
5466 		seq_puts(m, " ");
5467 
5468 	seq_puts(m, "}");
5469 }
5470 
5471 static void hist_trigger_entry_print(struct seq_file *m,
5472 				     struct hist_trigger_data *hist_data,
5473 				     void *key,
5474 				     struct tracing_map_elt *elt)
5475 {
5476 	const char *field_name;
5477 	unsigned int i;
5478 
5479 	hist_trigger_print_key(m, hist_data, key, elt);
5480 
5481 	seq_printf(m, " hitcount: %10llu",
5482 		   tracing_map_read_sum(elt, HITCOUNT_IDX));
5483 
5484 	for (i = 1; i < hist_data->n_vals; i++) {
5485 		field_name = hist_field_name(hist_data->fields[i], 0);
5486 
5487 		if (hist_data->fields[i]->flags & HIST_FIELD_FL_VAR ||
5488 		    hist_data->fields[i]->flags & HIST_FIELD_FL_EXPR)
5489 			continue;
5490 
5491 		if (hist_data->fields[i]->flags & HIST_FIELD_FL_HEX) {
5492 			seq_printf(m, "  %s: %10llx", field_name,
5493 				   tracing_map_read_sum(elt, i));
5494 		} else {
5495 			seq_printf(m, "  %s: %10llu", field_name,
5496 				   tracing_map_read_sum(elt, i));
5497 		}
5498 	}
5499 
5500 	print_actions(m, hist_data, elt);
5501 
5502 	seq_puts(m, "\n");
5503 }
5504 
5505 static int print_entries(struct seq_file *m,
5506 			 struct hist_trigger_data *hist_data)
5507 {
5508 	struct tracing_map_sort_entry **sort_entries = NULL;
5509 	struct tracing_map *map = hist_data->map;
5510 	int i, n_entries;
5511 
5512 	n_entries = tracing_map_sort_entries(map, hist_data->sort_keys,
5513 					     hist_data->n_sort_keys,
5514 					     &sort_entries);
5515 	if (n_entries < 0)
5516 		return n_entries;
5517 
5518 	for (i = 0; i < n_entries; i++)
5519 		hist_trigger_entry_print(m, hist_data,
5520 					 sort_entries[i]->key,
5521 					 sort_entries[i]->elt);
5522 
5523 	tracing_map_destroy_sort_entries(sort_entries, n_entries);
5524 
5525 	return n_entries;
5526 }
5527 
5528 static void hist_trigger_show(struct seq_file *m,
5529 			      struct event_trigger_data *data, int n)
5530 {
5531 	struct hist_trigger_data *hist_data;
5532 	int n_entries;
5533 
5534 	if (n > 0)
5535 		seq_puts(m, "\n\n");
5536 
5537 	seq_puts(m, "# event histogram\n#\n# trigger info: ");
5538 	data->ops->print(m, data->ops, data);
5539 	seq_puts(m, "#\n\n");
5540 
5541 	hist_data = data->private_data;
5542 	n_entries = print_entries(m, hist_data);
5543 	if (n_entries < 0)
5544 		n_entries = 0;
5545 
5546 	track_data_snapshot_print(m, hist_data);
5547 
5548 	seq_printf(m, "\nTotals:\n    Hits: %llu\n    Entries: %u\n    Dropped: %llu\n",
5549 		   (u64)atomic64_read(&hist_data->map->hits),
5550 		   n_entries, (u64)atomic64_read(&hist_data->map->drops));
5551 }
5552 
5553 static int hist_show(struct seq_file *m, void *v)
5554 {
5555 	struct event_trigger_data *data;
5556 	struct trace_event_file *event_file;
5557 	int n = 0, ret = 0;
5558 
5559 	mutex_lock(&event_mutex);
5560 
5561 	event_file = event_file_data(m->private);
5562 	if (unlikely(!event_file)) {
5563 		ret = -ENODEV;
5564 		goto out_unlock;
5565 	}
5566 
5567 	list_for_each_entry(data, &event_file->triggers, list) {
5568 		if (data->cmd_ops->trigger_type == ETT_EVENT_HIST)
5569 			hist_trigger_show(m, data, n++);
5570 	}
5571 
5572  out_unlock:
5573 	mutex_unlock(&event_mutex);
5574 
5575 	return ret;
5576 }
5577 
5578 static int event_hist_open(struct inode *inode, struct file *file)
5579 {
5580 	int ret;
5581 
5582 	ret = security_locked_down(LOCKDOWN_TRACEFS);
5583 	if (ret)
5584 		return ret;
5585 
5586 	return single_open(file, hist_show, file);
5587 }
5588 
5589 const struct file_operations event_hist_fops = {
5590 	.open = event_hist_open,
5591 	.read = seq_read,
5592 	.llseek = seq_lseek,
5593 	.release = single_release,
5594 };
5595 
5596 static void hist_field_print(struct seq_file *m, struct hist_field *hist_field)
5597 {
5598 	const char *field_name = hist_field_name(hist_field, 0);
5599 
5600 	if (hist_field->var.name)
5601 		seq_printf(m, "%s=", hist_field->var.name);
5602 
5603 	if (hist_field->flags & HIST_FIELD_FL_CPU)
5604 		seq_puts(m, "cpu");
5605 	else if (field_name) {
5606 		if (hist_field->flags & HIST_FIELD_FL_VAR_REF ||
5607 		    hist_field->flags & HIST_FIELD_FL_ALIAS)
5608 			seq_putc(m, '$');
5609 		seq_printf(m, "%s", field_name);
5610 	} else if (hist_field->flags & HIST_FIELD_FL_TIMESTAMP)
5611 		seq_puts(m, "common_timestamp");
5612 
5613 	if (hist_field->flags) {
5614 		if (!(hist_field->flags & HIST_FIELD_FL_VAR_REF) &&
5615 		    !(hist_field->flags & HIST_FIELD_FL_EXPR)) {
5616 			const char *flags = get_hist_field_flags(hist_field);
5617 
5618 			if (flags)
5619 				seq_printf(m, ".%s", flags);
5620 		}
5621 	}
5622 }
5623 
5624 static int event_hist_trigger_print(struct seq_file *m,
5625 				    struct event_trigger_ops *ops,
5626 				    struct event_trigger_data *data)
5627 {
5628 	struct hist_trigger_data *hist_data = data->private_data;
5629 	struct hist_field *field;
5630 	bool have_var = false;
5631 	unsigned int i;
5632 
5633 	seq_puts(m, "hist:");
5634 
5635 	if (data->name)
5636 		seq_printf(m, "%s:", data->name);
5637 
5638 	seq_puts(m, "keys=");
5639 
5640 	for_each_hist_key_field(i, hist_data) {
5641 		field = hist_data->fields[i];
5642 
5643 		if (i > hist_data->n_vals)
5644 			seq_puts(m, ",");
5645 
5646 		if (field->flags & HIST_FIELD_FL_STACKTRACE)
5647 			seq_puts(m, "stacktrace");
5648 		else
5649 			hist_field_print(m, field);
5650 	}
5651 
5652 	seq_puts(m, ":vals=");
5653 
5654 	for_each_hist_val_field(i, hist_data) {
5655 		field = hist_data->fields[i];
5656 		if (field->flags & HIST_FIELD_FL_VAR) {
5657 			have_var = true;
5658 			continue;
5659 		}
5660 
5661 		if (i == HITCOUNT_IDX)
5662 			seq_puts(m, "hitcount");
5663 		else {
5664 			seq_puts(m, ",");
5665 			hist_field_print(m, field);
5666 		}
5667 	}
5668 
5669 	if (have_var) {
5670 		unsigned int n = 0;
5671 
5672 		seq_puts(m, ":");
5673 
5674 		for_each_hist_val_field(i, hist_data) {
5675 			field = hist_data->fields[i];
5676 
5677 			if (field->flags & HIST_FIELD_FL_VAR) {
5678 				if (n++)
5679 					seq_puts(m, ",");
5680 				hist_field_print(m, field);
5681 			}
5682 		}
5683 	}
5684 
5685 	seq_puts(m, ":sort=");
5686 
5687 	for (i = 0; i < hist_data->n_sort_keys; i++) {
5688 		struct tracing_map_sort_key *sort_key;
5689 		unsigned int idx, first_key_idx;
5690 
5691 		/* skip VAR vals */
5692 		first_key_idx = hist_data->n_vals - hist_data->n_vars;
5693 
5694 		sort_key = &hist_data->sort_keys[i];
5695 		idx = sort_key->field_idx;
5696 
5697 		if (WARN_ON(idx >= HIST_FIELDS_MAX))
5698 			return -EINVAL;
5699 
5700 		if (i > 0)
5701 			seq_puts(m, ",");
5702 
5703 		if (idx == HITCOUNT_IDX)
5704 			seq_puts(m, "hitcount");
5705 		else {
5706 			if (idx >= first_key_idx)
5707 				idx += hist_data->n_vars;
5708 			hist_field_print(m, hist_data->fields[idx]);
5709 		}
5710 
5711 		if (sort_key->descending)
5712 			seq_puts(m, ".descending");
5713 	}
5714 	seq_printf(m, ":size=%u", (1 << hist_data->map->map_bits));
5715 	if (hist_data->enable_timestamps)
5716 		seq_printf(m, ":clock=%s", hist_data->attrs->clock);
5717 
5718 	print_actions_spec(m, hist_data);
5719 
5720 	if (data->filter_str)
5721 		seq_printf(m, " if %s", data->filter_str);
5722 
5723 	if (data->paused)
5724 		seq_puts(m, " [paused]");
5725 	else
5726 		seq_puts(m, " [active]");
5727 
5728 	seq_putc(m, '\n');
5729 
5730 	return 0;
5731 }
5732 
5733 static int event_hist_trigger_init(struct event_trigger_ops *ops,
5734 				   struct event_trigger_data *data)
5735 {
5736 	struct hist_trigger_data *hist_data = data->private_data;
5737 
5738 	if (!data->ref && hist_data->attrs->name)
5739 		save_named_trigger(hist_data->attrs->name, data);
5740 
5741 	data->ref++;
5742 
5743 	return 0;
5744 }
5745 
5746 static void unregister_field_var_hists(struct hist_trigger_data *hist_data)
5747 {
5748 	struct trace_event_file *file;
5749 	unsigned int i;
5750 	char *cmd;
5751 	int ret;
5752 
5753 	for (i = 0; i < hist_data->n_field_var_hists; i++) {
5754 		file = hist_data->field_var_hists[i]->hist_data->event_file;
5755 		cmd = hist_data->field_var_hists[i]->cmd;
5756 		ret = event_hist_trigger_func(&trigger_hist_cmd, file,
5757 					      "!hist", "hist", cmd);
5758 	}
5759 }
5760 
5761 static void event_hist_trigger_free(struct event_trigger_ops *ops,
5762 				    struct event_trigger_data *data)
5763 {
5764 	struct hist_trigger_data *hist_data = data->private_data;
5765 
5766 	if (WARN_ON_ONCE(data->ref <= 0))
5767 		return;
5768 
5769 	data->ref--;
5770 	if (!data->ref) {
5771 		if (data->name)
5772 			del_named_trigger(data);
5773 
5774 		trigger_data_free(data);
5775 
5776 		remove_hist_vars(hist_data);
5777 
5778 		unregister_field_var_hists(hist_data);
5779 
5780 		destroy_hist_data(hist_data);
5781 	}
5782 }
5783 
5784 static struct event_trigger_ops event_hist_trigger_ops = {
5785 	.func			= event_hist_trigger,
5786 	.print			= event_hist_trigger_print,
5787 	.init			= event_hist_trigger_init,
5788 	.free			= event_hist_trigger_free,
5789 };
5790 
5791 static int event_hist_trigger_named_init(struct event_trigger_ops *ops,
5792 					 struct event_trigger_data *data)
5793 {
5794 	data->ref++;
5795 
5796 	save_named_trigger(data->named_data->name, data);
5797 
5798 	event_hist_trigger_init(ops, data->named_data);
5799 
5800 	return 0;
5801 }
5802 
5803 static void event_hist_trigger_named_free(struct event_trigger_ops *ops,
5804 					  struct event_trigger_data *data)
5805 {
5806 	if (WARN_ON_ONCE(data->ref <= 0))
5807 		return;
5808 
5809 	event_hist_trigger_free(ops, data->named_data);
5810 
5811 	data->ref--;
5812 	if (!data->ref) {
5813 		del_named_trigger(data);
5814 		trigger_data_free(data);
5815 	}
5816 }
5817 
5818 static struct event_trigger_ops event_hist_trigger_named_ops = {
5819 	.func			= event_hist_trigger,
5820 	.print			= event_hist_trigger_print,
5821 	.init			= event_hist_trigger_named_init,
5822 	.free			= event_hist_trigger_named_free,
5823 };
5824 
5825 static struct event_trigger_ops *event_hist_get_trigger_ops(char *cmd,
5826 							    char *param)
5827 {
5828 	return &event_hist_trigger_ops;
5829 }
5830 
5831 static void hist_clear(struct event_trigger_data *data)
5832 {
5833 	struct hist_trigger_data *hist_data = data->private_data;
5834 
5835 	if (data->name)
5836 		pause_named_trigger(data);
5837 
5838 	tracepoint_synchronize_unregister();
5839 
5840 	tracing_map_clear(hist_data->map);
5841 
5842 	if (data->name)
5843 		unpause_named_trigger(data);
5844 }
5845 
5846 static bool compatible_field(struct ftrace_event_field *field,
5847 			     struct ftrace_event_field *test_field)
5848 {
5849 	if (field == test_field)
5850 		return true;
5851 	if (field == NULL || test_field == NULL)
5852 		return false;
5853 	if (strcmp(field->name, test_field->name) != 0)
5854 		return false;
5855 	if (strcmp(field->type, test_field->type) != 0)
5856 		return false;
5857 	if (field->size != test_field->size)
5858 		return false;
5859 	if (field->is_signed != test_field->is_signed)
5860 		return false;
5861 
5862 	return true;
5863 }
5864 
5865 static bool hist_trigger_match(struct event_trigger_data *data,
5866 			       struct event_trigger_data *data_test,
5867 			       struct event_trigger_data *named_data,
5868 			       bool ignore_filter)
5869 {
5870 	struct tracing_map_sort_key *sort_key, *sort_key_test;
5871 	struct hist_trigger_data *hist_data, *hist_data_test;
5872 	struct hist_field *key_field, *key_field_test;
5873 	unsigned int i;
5874 
5875 	if (named_data && (named_data != data_test) &&
5876 	    (named_data != data_test->named_data))
5877 		return false;
5878 
5879 	if (!named_data && is_named_trigger(data_test))
5880 		return false;
5881 
5882 	hist_data = data->private_data;
5883 	hist_data_test = data_test->private_data;
5884 
5885 	if (hist_data->n_vals != hist_data_test->n_vals ||
5886 	    hist_data->n_fields != hist_data_test->n_fields ||
5887 	    hist_data->n_sort_keys != hist_data_test->n_sort_keys)
5888 		return false;
5889 
5890 	if (!ignore_filter) {
5891 		if ((data->filter_str && !data_test->filter_str) ||
5892 		   (!data->filter_str && data_test->filter_str))
5893 			return false;
5894 	}
5895 
5896 	for_each_hist_field(i, hist_data) {
5897 		key_field = hist_data->fields[i];
5898 		key_field_test = hist_data_test->fields[i];
5899 
5900 		if (key_field->flags != key_field_test->flags)
5901 			return false;
5902 		if (!compatible_field(key_field->field, key_field_test->field))
5903 			return false;
5904 		if (key_field->offset != key_field_test->offset)
5905 			return false;
5906 		if (key_field->size != key_field_test->size)
5907 			return false;
5908 		if (key_field->is_signed != key_field_test->is_signed)
5909 			return false;
5910 		if (!!key_field->var.name != !!key_field_test->var.name)
5911 			return false;
5912 		if (key_field->var.name &&
5913 		    strcmp(key_field->var.name, key_field_test->var.name) != 0)
5914 			return false;
5915 	}
5916 
5917 	for (i = 0; i < hist_data->n_sort_keys; i++) {
5918 		sort_key = &hist_data->sort_keys[i];
5919 		sort_key_test = &hist_data_test->sort_keys[i];
5920 
5921 		if (sort_key->field_idx != sort_key_test->field_idx ||
5922 		    sort_key->descending != sort_key_test->descending)
5923 			return false;
5924 	}
5925 
5926 	if (!ignore_filter && data->filter_str &&
5927 	    (strcmp(data->filter_str, data_test->filter_str) != 0))
5928 		return false;
5929 
5930 	if (!actions_match(hist_data, hist_data_test))
5931 		return false;
5932 
5933 	return true;
5934 }
5935 
5936 static int hist_register_trigger(char *glob, struct event_trigger_ops *ops,
5937 				 struct event_trigger_data *data,
5938 				 struct trace_event_file *file)
5939 {
5940 	struct hist_trigger_data *hist_data = data->private_data;
5941 	struct event_trigger_data *test, *named_data = NULL;
5942 	struct trace_array *tr = file->tr;
5943 	int ret = 0;
5944 
5945 	if (hist_data->attrs->name) {
5946 		named_data = find_named_trigger(hist_data->attrs->name);
5947 		if (named_data) {
5948 			if (!hist_trigger_match(data, named_data, named_data,
5949 						true)) {
5950 				hist_err(tr, HIST_ERR_NAMED_MISMATCH, errpos(hist_data->attrs->name));
5951 				ret = -EINVAL;
5952 				goto out;
5953 			}
5954 		}
5955 	}
5956 
5957 	if (hist_data->attrs->name && !named_data)
5958 		goto new;
5959 
5960 	lockdep_assert_held(&event_mutex);
5961 
5962 	list_for_each_entry(test, &file->triggers, list) {
5963 		if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
5964 			if (!hist_trigger_match(data, test, named_data, false))
5965 				continue;
5966 			if (hist_data->attrs->pause)
5967 				test->paused = true;
5968 			else if (hist_data->attrs->cont)
5969 				test->paused = false;
5970 			else if (hist_data->attrs->clear)
5971 				hist_clear(test);
5972 			else {
5973 				hist_err(tr, HIST_ERR_TRIGGER_EEXIST, 0);
5974 				ret = -EEXIST;
5975 			}
5976 			goto out;
5977 		}
5978 	}
5979  new:
5980 	if (hist_data->attrs->cont || hist_data->attrs->clear) {
5981 		hist_err(tr, HIST_ERR_TRIGGER_ENOENT_CLEAR, 0);
5982 		ret = -ENOENT;
5983 		goto out;
5984 	}
5985 
5986 	if (hist_data->attrs->pause)
5987 		data->paused = true;
5988 
5989 	if (named_data) {
5990 		data->private_data = named_data->private_data;
5991 		set_named_trigger_data(data, named_data);
5992 		data->ops = &event_hist_trigger_named_ops;
5993 	}
5994 
5995 	if (data->ops->init) {
5996 		ret = data->ops->init(data->ops, data);
5997 		if (ret < 0)
5998 			goto out;
5999 	}
6000 
6001 	if (hist_data->enable_timestamps) {
6002 		char *clock = hist_data->attrs->clock;
6003 
6004 		ret = tracing_set_clock(file->tr, hist_data->attrs->clock);
6005 		if (ret) {
6006 			hist_err(tr, HIST_ERR_SET_CLOCK_FAIL, errpos(clock));
6007 			goto out;
6008 		}
6009 
6010 		tracing_set_time_stamp_abs(file->tr, true);
6011 	}
6012 
6013 	if (named_data)
6014 		destroy_hist_data(hist_data);
6015 
6016 	ret++;
6017  out:
6018 	return ret;
6019 }
6020 
6021 static int hist_trigger_enable(struct event_trigger_data *data,
6022 			       struct trace_event_file *file)
6023 {
6024 	int ret = 0;
6025 
6026 	list_add_tail_rcu(&data->list, &file->triggers);
6027 
6028 	update_cond_flag(file);
6029 
6030 	if (trace_event_trigger_enable_disable(file, 1) < 0) {
6031 		list_del_rcu(&data->list);
6032 		update_cond_flag(file);
6033 		ret--;
6034 	}
6035 
6036 	return ret;
6037 }
6038 
6039 static bool have_hist_trigger_match(struct event_trigger_data *data,
6040 				    struct trace_event_file *file)
6041 {
6042 	struct hist_trigger_data *hist_data = data->private_data;
6043 	struct event_trigger_data *test, *named_data = NULL;
6044 	bool match = false;
6045 
6046 	lockdep_assert_held(&event_mutex);
6047 
6048 	if (hist_data->attrs->name)
6049 		named_data = find_named_trigger(hist_data->attrs->name);
6050 
6051 	list_for_each_entry(test, &file->triggers, list) {
6052 		if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
6053 			if (hist_trigger_match(data, test, named_data, false)) {
6054 				match = true;
6055 				break;
6056 			}
6057 		}
6058 	}
6059 
6060 	return match;
6061 }
6062 
6063 static bool hist_trigger_check_refs(struct event_trigger_data *data,
6064 				    struct trace_event_file *file)
6065 {
6066 	struct hist_trigger_data *hist_data = data->private_data;
6067 	struct event_trigger_data *test, *named_data = NULL;
6068 
6069 	lockdep_assert_held(&event_mutex);
6070 
6071 	if (hist_data->attrs->name)
6072 		named_data = find_named_trigger(hist_data->attrs->name);
6073 
6074 	list_for_each_entry(test, &file->triggers, list) {
6075 		if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
6076 			if (!hist_trigger_match(data, test, named_data, false))
6077 				continue;
6078 			hist_data = test->private_data;
6079 			if (check_var_refs(hist_data))
6080 				return true;
6081 			break;
6082 		}
6083 	}
6084 
6085 	return false;
6086 }
6087 
6088 static void hist_unregister_trigger(char *glob, struct event_trigger_ops *ops,
6089 				    struct event_trigger_data *data,
6090 				    struct trace_event_file *file)
6091 {
6092 	struct hist_trigger_data *hist_data = data->private_data;
6093 	struct event_trigger_data *test, *named_data = NULL;
6094 	bool unregistered = false;
6095 
6096 	lockdep_assert_held(&event_mutex);
6097 
6098 	if (hist_data->attrs->name)
6099 		named_data = find_named_trigger(hist_data->attrs->name);
6100 
6101 	list_for_each_entry(test, &file->triggers, list) {
6102 		if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
6103 			if (!hist_trigger_match(data, test, named_data, false))
6104 				continue;
6105 			unregistered = true;
6106 			list_del_rcu(&test->list);
6107 			trace_event_trigger_enable_disable(file, 0);
6108 			update_cond_flag(file);
6109 			break;
6110 		}
6111 	}
6112 
6113 	if (unregistered && test->ops->free)
6114 		test->ops->free(test->ops, test);
6115 
6116 	if (hist_data->enable_timestamps) {
6117 		if (!hist_data->remove || unregistered)
6118 			tracing_set_time_stamp_abs(file->tr, false);
6119 	}
6120 }
6121 
6122 static bool hist_file_check_refs(struct trace_event_file *file)
6123 {
6124 	struct hist_trigger_data *hist_data;
6125 	struct event_trigger_data *test;
6126 
6127 	lockdep_assert_held(&event_mutex);
6128 
6129 	list_for_each_entry(test, &file->triggers, list) {
6130 		if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
6131 			hist_data = test->private_data;
6132 			if (check_var_refs(hist_data))
6133 				return true;
6134 		}
6135 	}
6136 
6137 	return false;
6138 }
6139 
6140 static void hist_unreg_all(struct trace_event_file *file)
6141 {
6142 	struct event_trigger_data *test, *n;
6143 	struct hist_trigger_data *hist_data;
6144 	struct synth_event *se;
6145 	const char *se_name;
6146 
6147 	lockdep_assert_held(&event_mutex);
6148 
6149 	if (hist_file_check_refs(file))
6150 		return;
6151 
6152 	list_for_each_entry_safe(test, n, &file->triggers, list) {
6153 		if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
6154 			hist_data = test->private_data;
6155 			list_del_rcu(&test->list);
6156 			trace_event_trigger_enable_disable(file, 0);
6157 
6158 			se_name = trace_event_name(file->event_call);
6159 			se = find_synth_event(se_name);
6160 			if (se)
6161 				se->ref--;
6162 
6163 			update_cond_flag(file);
6164 			if (hist_data->enable_timestamps)
6165 				tracing_set_time_stamp_abs(file->tr, false);
6166 			if (test->ops->free)
6167 				test->ops->free(test->ops, test);
6168 		}
6169 	}
6170 }
6171 
6172 static int event_hist_trigger_func(struct event_command *cmd_ops,
6173 				   struct trace_event_file *file,
6174 				   char *glob, char *cmd, char *param)
6175 {
6176 	unsigned int hist_trigger_bits = TRACING_MAP_BITS_DEFAULT;
6177 	struct event_trigger_data *trigger_data;
6178 	struct hist_trigger_attrs *attrs;
6179 	struct event_trigger_ops *trigger_ops;
6180 	struct hist_trigger_data *hist_data;
6181 	struct synth_event *se;
6182 	const char *se_name;
6183 	bool remove = false;
6184 	char *trigger, *p;
6185 	int ret = 0;
6186 
6187 	lockdep_assert_held(&event_mutex);
6188 
6189 	if (glob && strlen(glob)) {
6190 		hist_err_clear();
6191 		last_cmd_set(file, param);
6192 	}
6193 
6194 	if (!param)
6195 		return -EINVAL;
6196 
6197 	if (glob[0] == '!')
6198 		remove = true;
6199 
6200 	/*
6201 	 * separate the trigger from the filter (k:v [if filter])
6202 	 * allowing for whitespace in the trigger
6203 	 */
6204 	p = trigger = param;
6205 	do {
6206 		p = strstr(p, "if");
6207 		if (!p)
6208 			break;
6209 		if (p == param)
6210 			return -EINVAL;
6211 		if (*(p - 1) != ' ' && *(p - 1) != '\t') {
6212 			p++;
6213 			continue;
6214 		}
6215 		if (p >= param + strlen(param) - (sizeof("if") - 1) - 1)
6216 			return -EINVAL;
6217 		if (*(p + sizeof("if") - 1) != ' ' && *(p + sizeof("if") - 1) != '\t') {
6218 			p++;
6219 			continue;
6220 		}
6221 		break;
6222 	} while (p);
6223 
6224 	if (!p)
6225 		param = NULL;
6226 	else {
6227 		*(p - 1) = '\0';
6228 		param = strstrip(p);
6229 		trigger = strstrip(trigger);
6230 	}
6231 
6232 	attrs = parse_hist_trigger_attrs(file->tr, trigger);
6233 	if (IS_ERR(attrs))
6234 		return PTR_ERR(attrs);
6235 
6236 	if (attrs->map_bits)
6237 		hist_trigger_bits = attrs->map_bits;
6238 
6239 	hist_data = create_hist_data(hist_trigger_bits, attrs, file, remove);
6240 	if (IS_ERR(hist_data)) {
6241 		destroy_hist_trigger_attrs(attrs);
6242 		return PTR_ERR(hist_data);
6243 	}
6244 
6245 	trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
6246 
6247 	trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
6248 	if (!trigger_data) {
6249 		ret = -ENOMEM;
6250 		goto out_free;
6251 	}
6252 
6253 	trigger_data->count = -1;
6254 	trigger_data->ops = trigger_ops;
6255 	trigger_data->cmd_ops = cmd_ops;
6256 
6257 	INIT_LIST_HEAD(&trigger_data->list);
6258 	RCU_INIT_POINTER(trigger_data->filter, NULL);
6259 
6260 	trigger_data->private_data = hist_data;
6261 
6262 	/* if param is non-empty, it's supposed to be a filter */
6263 	if (param && cmd_ops->set_filter) {
6264 		ret = cmd_ops->set_filter(param, trigger_data, file);
6265 		if (ret < 0)
6266 			goto out_free;
6267 	}
6268 
6269 	if (remove) {
6270 		if (!have_hist_trigger_match(trigger_data, file))
6271 			goto out_free;
6272 
6273 		if (hist_trigger_check_refs(trigger_data, file)) {
6274 			ret = -EBUSY;
6275 			goto out_free;
6276 		}
6277 
6278 		cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
6279 		se_name = trace_event_name(file->event_call);
6280 		se = find_synth_event(se_name);
6281 		if (se)
6282 			se->ref--;
6283 		ret = 0;
6284 		goto out_free;
6285 	}
6286 
6287 	ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
6288 	/*
6289 	 * The above returns on success the # of triggers registered,
6290 	 * but if it didn't register any it returns zero.  Consider no
6291 	 * triggers registered a failure too.
6292 	 */
6293 	if (!ret) {
6294 		if (!(attrs->pause || attrs->cont || attrs->clear))
6295 			ret = -ENOENT;
6296 		goto out_free;
6297 	} else if (ret < 0)
6298 		goto out_free;
6299 
6300 	if (get_named_trigger_data(trigger_data))
6301 		goto enable;
6302 
6303 	if (has_hist_vars(hist_data))
6304 		save_hist_vars(hist_data);
6305 
6306 	ret = create_actions(hist_data);
6307 	if (ret)
6308 		goto out_unreg;
6309 
6310 	ret = tracing_map_init(hist_data->map);
6311 	if (ret)
6312 		goto out_unreg;
6313 enable:
6314 	ret = hist_trigger_enable(trigger_data, file);
6315 	if (ret)
6316 		goto out_unreg;
6317 
6318 	se_name = trace_event_name(file->event_call);
6319 	se = find_synth_event(se_name);
6320 	if (se)
6321 		se->ref++;
6322 	/* Just return zero, not the number of registered triggers */
6323 	ret = 0;
6324  out:
6325 	if (ret == 0)
6326 		hist_err_clear();
6327 
6328 	return ret;
6329  out_unreg:
6330 	cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
6331  out_free:
6332 	if (cmd_ops->set_filter)
6333 		cmd_ops->set_filter(NULL, trigger_data, NULL);
6334 
6335 	remove_hist_vars(hist_data);
6336 
6337 	kfree(trigger_data);
6338 
6339 	destroy_hist_data(hist_data);
6340 	goto out;
6341 }
6342 
6343 static struct event_command trigger_hist_cmd = {
6344 	.name			= "hist",
6345 	.trigger_type		= ETT_EVENT_HIST,
6346 	.flags			= EVENT_CMD_FL_NEEDS_REC,
6347 	.func			= event_hist_trigger_func,
6348 	.reg			= hist_register_trigger,
6349 	.unreg			= hist_unregister_trigger,
6350 	.unreg_all		= hist_unreg_all,
6351 	.get_trigger_ops	= event_hist_get_trigger_ops,
6352 	.set_filter		= set_trigger_filter,
6353 };
6354 
6355 __init int register_trigger_hist_cmd(void)
6356 {
6357 	int ret;
6358 
6359 	ret = register_event_command(&trigger_hist_cmd);
6360 	WARN_ON(ret < 0);
6361 
6362 	return ret;
6363 }
6364 
6365 static void
6366 hist_enable_trigger(struct event_trigger_data *data, void *rec,
6367 		    struct ring_buffer_event *event)
6368 {
6369 	struct enable_trigger_data *enable_data = data->private_data;
6370 	struct event_trigger_data *test;
6371 
6372 	list_for_each_entry_rcu(test, &enable_data->file->triggers, list,
6373 				lockdep_is_held(&event_mutex)) {
6374 		if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
6375 			if (enable_data->enable)
6376 				test->paused = false;
6377 			else
6378 				test->paused = true;
6379 		}
6380 	}
6381 }
6382 
6383 static void
6384 hist_enable_count_trigger(struct event_trigger_data *data, void *rec,
6385 			  struct ring_buffer_event *event)
6386 {
6387 	if (!data->count)
6388 		return;
6389 
6390 	if (data->count != -1)
6391 		(data->count)--;
6392 
6393 	hist_enable_trigger(data, rec, event);
6394 }
6395 
6396 static struct event_trigger_ops hist_enable_trigger_ops = {
6397 	.func			= hist_enable_trigger,
6398 	.print			= event_enable_trigger_print,
6399 	.init			= event_trigger_init,
6400 	.free			= event_enable_trigger_free,
6401 };
6402 
6403 static struct event_trigger_ops hist_enable_count_trigger_ops = {
6404 	.func			= hist_enable_count_trigger,
6405 	.print			= event_enable_trigger_print,
6406 	.init			= event_trigger_init,
6407 	.free			= event_enable_trigger_free,
6408 };
6409 
6410 static struct event_trigger_ops hist_disable_trigger_ops = {
6411 	.func			= hist_enable_trigger,
6412 	.print			= event_enable_trigger_print,
6413 	.init			= event_trigger_init,
6414 	.free			= event_enable_trigger_free,
6415 };
6416 
6417 static struct event_trigger_ops hist_disable_count_trigger_ops = {
6418 	.func			= hist_enable_count_trigger,
6419 	.print			= event_enable_trigger_print,
6420 	.init			= event_trigger_init,
6421 	.free			= event_enable_trigger_free,
6422 };
6423 
6424 static struct event_trigger_ops *
6425 hist_enable_get_trigger_ops(char *cmd, char *param)
6426 {
6427 	struct event_trigger_ops *ops;
6428 	bool enable;
6429 
6430 	enable = (strcmp(cmd, ENABLE_HIST_STR) == 0);
6431 
6432 	if (enable)
6433 		ops = param ? &hist_enable_count_trigger_ops :
6434 			&hist_enable_trigger_ops;
6435 	else
6436 		ops = param ? &hist_disable_count_trigger_ops :
6437 			&hist_disable_trigger_ops;
6438 
6439 	return ops;
6440 }
6441 
6442 static void hist_enable_unreg_all(struct trace_event_file *file)
6443 {
6444 	struct event_trigger_data *test, *n;
6445 
6446 	list_for_each_entry_safe(test, n, &file->triggers, list) {
6447 		if (test->cmd_ops->trigger_type == ETT_HIST_ENABLE) {
6448 			list_del_rcu(&test->list);
6449 			update_cond_flag(file);
6450 			trace_event_trigger_enable_disable(file, 0);
6451 			if (test->ops->free)
6452 				test->ops->free(test->ops, test);
6453 		}
6454 	}
6455 }
6456 
6457 static struct event_command trigger_hist_enable_cmd = {
6458 	.name			= ENABLE_HIST_STR,
6459 	.trigger_type		= ETT_HIST_ENABLE,
6460 	.func			= event_enable_trigger_func,
6461 	.reg			= event_enable_register_trigger,
6462 	.unreg			= event_enable_unregister_trigger,
6463 	.unreg_all		= hist_enable_unreg_all,
6464 	.get_trigger_ops	= hist_enable_get_trigger_ops,
6465 	.set_filter		= set_trigger_filter,
6466 };
6467 
6468 static struct event_command trigger_hist_disable_cmd = {
6469 	.name			= DISABLE_HIST_STR,
6470 	.trigger_type		= ETT_HIST_ENABLE,
6471 	.func			= event_enable_trigger_func,
6472 	.reg			= event_enable_register_trigger,
6473 	.unreg			= event_enable_unregister_trigger,
6474 	.unreg_all		= hist_enable_unreg_all,
6475 	.get_trigger_ops	= hist_enable_get_trigger_ops,
6476 	.set_filter		= set_trigger_filter,
6477 };
6478 
6479 static __init void unregister_trigger_hist_enable_disable_cmds(void)
6480 {
6481 	unregister_event_command(&trigger_hist_enable_cmd);
6482 	unregister_event_command(&trigger_hist_disable_cmd);
6483 }
6484 
6485 __init int register_trigger_hist_enable_disable_cmds(void)
6486 {
6487 	int ret;
6488 
6489 	ret = register_event_command(&trigger_hist_enable_cmd);
6490 	if (WARN_ON(ret < 0))
6491 		return ret;
6492 	ret = register_event_command(&trigger_hist_disable_cmd);
6493 	if (WARN_ON(ret < 0))
6494 		unregister_trigger_hist_enable_disable_cmds();
6495 
6496 	return ret;
6497 }
6498 
6499 static __init int trace_events_hist_init(void)
6500 {
6501 	struct dentry *entry = NULL;
6502 	struct dentry *d_tracer;
6503 	int err = 0;
6504 
6505 	err = dyn_event_register(&synth_event_ops);
6506 	if (err) {
6507 		pr_warn("Could not register synth_event_ops\n");
6508 		return err;
6509 	}
6510 
6511 	d_tracer = tracing_init_dentry();
6512 	if (IS_ERR(d_tracer)) {
6513 		err = PTR_ERR(d_tracer);
6514 		goto err;
6515 	}
6516 
6517 	entry = tracefs_create_file("synthetic_events", 0644, d_tracer,
6518 				    NULL, &synth_events_fops);
6519 	if (!entry) {
6520 		err = -ENODEV;
6521 		goto err;
6522 	}
6523 
6524 	return err;
6525  err:
6526 	pr_warn("Could not create tracefs 'synthetic_events' entry\n");
6527 
6528 	return err;
6529 }
6530 
6531 fs_initcall(trace_events_hist_init);
6532