xref: /linux/kernel/trace/trace_events_hist.c (revision 34f71a4a2de84dde52ccfcb96ce25240ea7981a8)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * trace_events_hist - trace event hist triggers
4  *
5  * Copyright (C) 2015 Tom Zanussi <tom.zanussi@linux.intel.com>
6  */
7 
8 #include <linux/module.h>
9 #include <linux/kallsyms.h>
10 #include <linux/security.h>
11 #include <linux/mutex.h>
12 #include <linux/slab.h>
13 #include <linux/stacktrace.h>
14 #include <linux/rculist.h>
15 #include <linux/tracefs.h>
16 
17 /* for gfp flag names */
18 #include <linux/trace_events.h>
19 #include <trace/events/mmflags.h>
20 
21 #include "tracing_map.h"
22 #include "trace.h"
23 #include "trace_dynevent.h"
24 
25 #define SYNTH_SYSTEM		"synthetic"
26 #define SYNTH_FIELDS_MAX	32
27 
28 #define STR_VAR_LEN_MAX		32 /* must be multiple of sizeof(u64) */
29 
30 #define ERRORS								\
31 	C(NONE,			"No error"),				\
32 	C(DUPLICATE_VAR,	"Variable already defined"),		\
33 	C(VAR_NOT_UNIQUE,	"Variable name not unique, need to use fully qualified name (subsys.event.var) for variable"), \
34 	C(TOO_MANY_VARS,	"Too many variables defined"),		\
35 	C(MALFORMED_ASSIGNMENT,	"Malformed assignment"),		\
36 	C(NAMED_MISMATCH,	"Named hist trigger doesn't match existing named trigger (includes variables)"), \
37 	C(TRIGGER_EEXIST,	"Hist trigger already exists"),		\
38 	C(TRIGGER_ENOENT_CLEAR,	"Can't clear or continue a nonexistent hist trigger"), \
39 	C(SET_CLOCK_FAIL,	"Couldn't set trace_clock"),		\
40 	C(BAD_FIELD_MODIFIER,	"Invalid field modifier"),		\
41 	C(TOO_MANY_SUBEXPR,	"Too many subexpressions (3 max)"),	\
42 	C(TIMESTAMP_MISMATCH,	"Timestamp units in expression don't match"), \
43 	C(TOO_MANY_FIELD_VARS,	"Too many field variables defined"),	\
44 	C(EVENT_FILE_NOT_FOUND,	"Event file not found"),		\
45 	C(HIST_NOT_FOUND,	"Matching event histogram not found"),	\
46 	C(HIST_CREATE_FAIL,	"Couldn't create histogram for field"),	\
47 	C(SYNTH_VAR_NOT_FOUND,	"Couldn't find synthetic variable"),	\
48 	C(SYNTH_EVENT_NOT_FOUND,"Couldn't find synthetic event"),	\
49 	C(SYNTH_TYPE_MISMATCH,	"Param type doesn't match synthetic event field type"), \
50 	C(SYNTH_COUNT_MISMATCH,	"Param count doesn't match synthetic event field count"), \
51 	C(FIELD_VAR_PARSE_FAIL,	"Couldn't parse field variable"),	\
52 	C(VAR_CREATE_FIND_FAIL,	"Couldn't create or find variable"),	\
53 	C(ONX_NOT_VAR,		"For onmax(x) or onchange(x), x must be a variable"), \
54 	C(ONX_VAR_NOT_FOUND,	"Couldn't find onmax or onchange variable"), \
55 	C(ONX_VAR_CREATE_FAIL,	"Couldn't create onmax or onchange variable"), \
56 	C(FIELD_VAR_CREATE_FAIL,"Couldn't create field variable"),	\
57 	C(TOO_MANY_PARAMS,	"Too many action params"),		\
58 	C(PARAM_NOT_FOUND,	"Couldn't find param"),			\
59 	C(INVALID_PARAM,	"Invalid action param"),		\
60 	C(ACTION_NOT_FOUND,	"No action found"),			\
61 	C(NO_SAVE_PARAMS,	"No params found for save()"),		\
62 	C(TOO_MANY_SAVE_ACTIONS,"Can't have more than one save() action per hist"), \
63 	C(ACTION_MISMATCH,	"Handler doesn't support action"),	\
64 	C(NO_CLOSING_PAREN,	"No closing paren found"),		\
65 	C(SUBSYS_NOT_FOUND,	"Missing subsystem"),			\
66 	C(INVALID_SUBSYS_EVENT,	"Invalid subsystem or event name"),	\
67 	C(INVALID_REF_KEY,	"Using variable references in keys not supported"), \
68 	C(VAR_NOT_FOUND,	"Couldn't find variable"),		\
69 	C(FIELD_NOT_FOUND,	"Couldn't find field"),			\
70 	C(EMPTY_ASSIGNMENT,	"Empty assignment"),			\
71 	C(INVALID_SORT_MODIFIER,"Invalid sort modifier"),		\
72 	C(EMPTY_SORT_FIELD,	"Empty sort field"),			\
73 	C(TOO_MANY_SORT_FIELDS,	"Too many sort fields (Max = 2)"),	\
74 	C(INVALID_SORT_FIELD,	"Sort field must be a key or a val"),
75 
76 #undef C
77 #define C(a, b)		HIST_ERR_##a
78 
79 enum { ERRORS };
80 
81 #undef C
82 #define C(a, b)		b
83 
84 static const char *err_text[] = { ERRORS };
85 
86 struct hist_field;
87 
88 typedef u64 (*hist_field_fn_t) (struct hist_field *field,
89 				struct tracing_map_elt *elt,
90 				struct ring_buffer_event *rbe,
91 				void *event);
92 
93 #define HIST_FIELD_OPERANDS_MAX	2
94 #define HIST_FIELDS_MAX		(TRACING_MAP_FIELDS_MAX + TRACING_MAP_VARS_MAX)
95 #define HIST_ACTIONS_MAX	8
96 
97 enum field_op_id {
98 	FIELD_OP_NONE,
99 	FIELD_OP_PLUS,
100 	FIELD_OP_MINUS,
101 	FIELD_OP_UNARY_MINUS,
102 };
103 
104 /*
105  * A hist_var (histogram variable) contains variable information for
106  * hist_fields having the HIST_FIELD_FL_VAR or HIST_FIELD_FL_VAR_REF
107  * flag set.  A hist_var has a variable name e.g. ts0, and is
108  * associated with a given histogram trigger, as specified by
109  * hist_data.  The hist_var idx is the unique index assigned to the
110  * variable by the hist trigger's tracing_map.  The idx is what is
111  * used to set a variable's value and, by a variable reference, to
112  * retrieve it.
113  */
114 struct hist_var {
115 	char				*name;
116 	struct hist_trigger_data	*hist_data;
117 	unsigned int			idx;
118 };
119 
120 struct hist_field {
121 	struct ftrace_event_field	*field;
122 	unsigned long			flags;
123 	hist_field_fn_t			fn;
124 	unsigned int			size;
125 	unsigned int			offset;
126 	unsigned int                    is_signed;
127 	const char			*type;
128 	struct hist_field		*operands[HIST_FIELD_OPERANDS_MAX];
129 	struct hist_trigger_data	*hist_data;
130 
131 	/*
132 	 * Variable fields contain variable-specific info in var.
133 	 */
134 	struct hist_var			var;
135 	enum field_op_id		operator;
136 	char				*system;
137 	char				*event_name;
138 
139 	/*
140 	 * The name field is used for EXPR and VAR_REF fields.  VAR
141 	 * fields contain the variable name in var.name.
142 	 */
143 	char				*name;
144 
145 	/*
146 	 * When a histogram trigger is hit, if it has any references
147 	 * to variables, the values of those variables are collected
148 	 * into a var_ref_vals array by resolve_var_refs().  The
149 	 * current value of each variable is read from the tracing_map
150 	 * using the hist field's hist_var.idx and entered into the
151 	 * var_ref_idx entry i.e. var_ref_vals[var_ref_idx].
152 	 */
153 	unsigned int			var_ref_idx;
154 	bool                            read_once;
155 };
156 
157 static u64 hist_field_none(struct hist_field *field,
158 			   struct tracing_map_elt *elt,
159 			   struct ring_buffer_event *rbe,
160 			   void *event)
161 {
162 	return 0;
163 }
164 
165 static u64 hist_field_counter(struct hist_field *field,
166 			      struct tracing_map_elt *elt,
167 			      struct ring_buffer_event *rbe,
168 			      void *event)
169 {
170 	return 1;
171 }
172 
173 static u64 hist_field_string(struct hist_field *hist_field,
174 			     struct tracing_map_elt *elt,
175 			     struct ring_buffer_event *rbe,
176 			     void *event)
177 {
178 	char *addr = (char *)(event + hist_field->field->offset);
179 
180 	return (u64)(unsigned long)addr;
181 }
182 
183 static u64 hist_field_dynstring(struct hist_field *hist_field,
184 				struct tracing_map_elt *elt,
185 				struct ring_buffer_event *rbe,
186 				void *event)
187 {
188 	u32 str_item = *(u32 *)(event + hist_field->field->offset);
189 	int str_loc = str_item & 0xffff;
190 	char *addr = (char *)(event + str_loc);
191 
192 	return (u64)(unsigned long)addr;
193 }
194 
195 static u64 hist_field_pstring(struct hist_field *hist_field,
196 			      struct tracing_map_elt *elt,
197 			      struct ring_buffer_event *rbe,
198 			      void *event)
199 {
200 	char **addr = (char **)(event + hist_field->field->offset);
201 
202 	return (u64)(unsigned long)*addr;
203 }
204 
205 static u64 hist_field_log2(struct hist_field *hist_field,
206 			   struct tracing_map_elt *elt,
207 			   struct ring_buffer_event *rbe,
208 			   void *event)
209 {
210 	struct hist_field *operand = hist_field->operands[0];
211 
212 	u64 val = operand->fn(operand, elt, rbe, event);
213 
214 	return (u64) ilog2(roundup_pow_of_two(val));
215 }
216 
217 static u64 hist_field_plus(struct hist_field *hist_field,
218 			   struct tracing_map_elt *elt,
219 			   struct ring_buffer_event *rbe,
220 			   void *event)
221 {
222 	struct hist_field *operand1 = hist_field->operands[0];
223 	struct hist_field *operand2 = hist_field->operands[1];
224 
225 	u64 val1 = operand1->fn(operand1, elt, rbe, event);
226 	u64 val2 = operand2->fn(operand2, elt, rbe, event);
227 
228 	return val1 + val2;
229 }
230 
231 static u64 hist_field_minus(struct hist_field *hist_field,
232 			    struct tracing_map_elt *elt,
233 			    struct ring_buffer_event *rbe,
234 			    void *event)
235 {
236 	struct hist_field *operand1 = hist_field->operands[0];
237 	struct hist_field *operand2 = hist_field->operands[1];
238 
239 	u64 val1 = operand1->fn(operand1, elt, rbe, event);
240 	u64 val2 = operand2->fn(operand2, elt, rbe, event);
241 
242 	return val1 - val2;
243 }
244 
245 static u64 hist_field_unary_minus(struct hist_field *hist_field,
246 				  struct tracing_map_elt *elt,
247 				  struct ring_buffer_event *rbe,
248 				  void *event)
249 {
250 	struct hist_field *operand = hist_field->operands[0];
251 
252 	s64 sval = (s64)operand->fn(operand, elt, rbe, event);
253 	u64 val = (u64)-sval;
254 
255 	return val;
256 }
257 
258 #define DEFINE_HIST_FIELD_FN(type)					\
259 	static u64 hist_field_##type(struct hist_field *hist_field,	\
260 				     struct tracing_map_elt *elt,	\
261 				     struct ring_buffer_event *rbe,	\
262 				     void *event)			\
263 {									\
264 	type *addr = (type *)(event + hist_field->field->offset);	\
265 									\
266 	return (u64)(unsigned long)*addr;				\
267 }
268 
269 DEFINE_HIST_FIELD_FN(s64);
270 DEFINE_HIST_FIELD_FN(u64);
271 DEFINE_HIST_FIELD_FN(s32);
272 DEFINE_HIST_FIELD_FN(u32);
273 DEFINE_HIST_FIELD_FN(s16);
274 DEFINE_HIST_FIELD_FN(u16);
275 DEFINE_HIST_FIELD_FN(s8);
276 DEFINE_HIST_FIELD_FN(u8);
277 
278 #define for_each_hist_field(i, hist_data)	\
279 	for ((i) = 0; (i) < (hist_data)->n_fields; (i)++)
280 
281 #define for_each_hist_val_field(i, hist_data)	\
282 	for ((i) = 0; (i) < (hist_data)->n_vals; (i)++)
283 
284 #define for_each_hist_key_field(i, hist_data)	\
285 	for ((i) = (hist_data)->n_vals; (i) < (hist_data)->n_fields; (i)++)
286 
287 #define HIST_STACKTRACE_DEPTH	16
288 #define HIST_STACKTRACE_SIZE	(HIST_STACKTRACE_DEPTH * sizeof(unsigned long))
289 #define HIST_STACKTRACE_SKIP	5
290 
291 #define HITCOUNT_IDX		0
292 #define HIST_KEY_SIZE_MAX	(MAX_FILTER_STR_VAL + HIST_STACKTRACE_SIZE)
293 
294 enum hist_field_flags {
295 	HIST_FIELD_FL_HITCOUNT		= 1 << 0,
296 	HIST_FIELD_FL_KEY		= 1 << 1,
297 	HIST_FIELD_FL_STRING		= 1 << 2,
298 	HIST_FIELD_FL_HEX		= 1 << 3,
299 	HIST_FIELD_FL_SYM		= 1 << 4,
300 	HIST_FIELD_FL_SYM_OFFSET	= 1 << 5,
301 	HIST_FIELD_FL_EXECNAME		= 1 << 6,
302 	HIST_FIELD_FL_SYSCALL		= 1 << 7,
303 	HIST_FIELD_FL_STACKTRACE	= 1 << 8,
304 	HIST_FIELD_FL_LOG2		= 1 << 9,
305 	HIST_FIELD_FL_TIMESTAMP		= 1 << 10,
306 	HIST_FIELD_FL_TIMESTAMP_USECS	= 1 << 11,
307 	HIST_FIELD_FL_VAR		= 1 << 12,
308 	HIST_FIELD_FL_EXPR		= 1 << 13,
309 	HIST_FIELD_FL_VAR_REF		= 1 << 14,
310 	HIST_FIELD_FL_CPU		= 1 << 15,
311 	HIST_FIELD_FL_ALIAS		= 1 << 16,
312 };
313 
314 struct var_defs {
315 	unsigned int	n_vars;
316 	char		*name[TRACING_MAP_VARS_MAX];
317 	char		*expr[TRACING_MAP_VARS_MAX];
318 };
319 
320 struct hist_trigger_attrs {
321 	char		*keys_str;
322 	char		*vals_str;
323 	char		*sort_key_str;
324 	char		*name;
325 	char		*clock;
326 	bool		pause;
327 	bool		cont;
328 	bool		clear;
329 	bool		ts_in_usecs;
330 	unsigned int	map_bits;
331 
332 	char		*assignment_str[TRACING_MAP_VARS_MAX];
333 	unsigned int	n_assignments;
334 
335 	char		*action_str[HIST_ACTIONS_MAX];
336 	unsigned int	n_actions;
337 
338 	struct var_defs	var_defs;
339 };
340 
341 struct field_var {
342 	struct hist_field	*var;
343 	struct hist_field	*val;
344 };
345 
346 struct field_var_hist {
347 	struct hist_trigger_data	*hist_data;
348 	char				*cmd;
349 };
350 
351 struct hist_trigger_data {
352 	struct hist_field               *fields[HIST_FIELDS_MAX];
353 	unsigned int			n_vals;
354 	unsigned int			n_keys;
355 	unsigned int			n_fields;
356 	unsigned int			n_vars;
357 	unsigned int			key_size;
358 	struct tracing_map_sort_key	sort_keys[TRACING_MAP_SORT_KEYS_MAX];
359 	unsigned int			n_sort_keys;
360 	struct trace_event_file		*event_file;
361 	struct hist_trigger_attrs	*attrs;
362 	struct tracing_map		*map;
363 	bool				enable_timestamps;
364 	bool				remove;
365 	struct hist_field               *var_refs[TRACING_MAP_VARS_MAX];
366 	unsigned int			n_var_refs;
367 
368 	struct action_data		*actions[HIST_ACTIONS_MAX];
369 	unsigned int			n_actions;
370 
371 	struct field_var		*field_vars[SYNTH_FIELDS_MAX];
372 	unsigned int			n_field_vars;
373 	unsigned int			n_field_var_str;
374 	struct field_var_hist		*field_var_hists[SYNTH_FIELDS_MAX];
375 	unsigned int			n_field_var_hists;
376 
377 	struct field_var		*save_vars[SYNTH_FIELDS_MAX];
378 	unsigned int			n_save_vars;
379 	unsigned int			n_save_var_str;
380 };
381 
382 static int synth_event_create(int argc, const char **argv);
383 static int synth_event_show(struct seq_file *m, struct dyn_event *ev);
384 static int synth_event_release(struct dyn_event *ev);
385 static bool synth_event_is_busy(struct dyn_event *ev);
386 static bool synth_event_match(const char *system, const char *event,
387 			int argc, const char **argv, struct dyn_event *ev);
388 
389 static struct dyn_event_operations synth_event_ops = {
390 	.create = synth_event_create,
391 	.show = synth_event_show,
392 	.is_busy = synth_event_is_busy,
393 	.free = synth_event_release,
394 	.match = synth_event_match,
395 };
396 
397 struct synth_field {
398 	char *type;
399 	char *name;
400 	size_t size;
401 	bool is_signed;
402 	bool is_string;
403 };
404 
405 struct synth_event {
406 	struct dyn_event			devent;
407 	int					ref;
408 	char					*name;
409 	struct synth_field			**fields;
410 	unsigned int				n_fields;
411 	unsigned int				n_u64;
412 	struct trace_event_class		class;
413 	struct trace_event_call			call;
414 	struct tracepoint			*tp;
415 };
416 
417 static bool is_synth_event(struct dyn_event *ev)
418 {
419 	return ev->ops == &synth_event_ops;
420 }
421 
422 static struct synth_event *to_synth_event(struct dyn_event *ev)
423 {
424 	return container_of(ev, struct synth_event, devent);
425 }
426 
427 static bool synth_event_is_busy(struct dyn_event *ev)
428 {
429 	struct synth_event *event = to_synth_event(ev);
430 
431 	return event->ref != 0;
432 }
433 
434 static bool synth_event_match(const char *system, const char *event,
435 			int argc, const char **argv, struct dyn_event *ev)
436 {
437 	struct synth_event *sev = to_synth_event(ev);
438 
439 	return strcmp(sev->name, event) == 0 &&
440 		(!system || strcmp(system, SYNTH_SYSTEM) == 0);
441 }
442 
443 struct action_data;
444 
445 typedef void (*action_fn_t) (struct hist_trigger_data *hist_data,
446 			     struct tracing_map_elt *elt, void *rec,
447 			     struct ring_buffer_event *rbe, void *key,
448 			     struct action_data *data, u64 *var_ref_vals);
449 
450 typedef bool (*check_track_val_fn_t) (u64 track_val, u64 var_val);
451 
452 enum handler_id {
453 	HANDLER_ONMATCH = 1,
454 	HANDLER_ONMAX,
455 	HANDLER_ONCHANGE,
456 };
457 
458 enum action_id {
459 	ACTION_SAVE = 1,
460 	ACTION_TRACE,
461 	ACTION_SNAPSHOT,
462 };
463 
464 struct action_data {
465 	enum handler_id		handler;
466 	enum action_id		action;
467 	char			*action_name;
468 	action_fn_t		fn;
469 
470 	unsigned int		n_params;
471 	char			*params[SYNTH_FIELDS_MAX];
472 
473 	/*
474 	 * When a histogram trigger is hit, the values of any
475 	 * references to variables, including variables being passed
476 	 * as parameters to synthetic events, are collected into a
477 	 * var_ref_vals array.  This var_ref_idx is the index of the
478 	 * first param in the array to be passed to the synthetic
479 	 * event invocation.
480 	 */
481 	unsigned int		var_ref_idx;
482 	struct synth_event	*synth_event;
483 	bool			use_trace_keyword;
484 	char			*synth_event_name;
485 
486 	union {
487 		struct {
488 			char			*event;
489 			char			*event_system;
490 		} match_data;
491 
492 		struct {
493 			/*
494 			 * var_str contains the $-unstripped variable
495 			 * name referenced by var_ref, and used when
496 			 * printing the action.  Because var_ref
497 			 * creation is deferred to create_actions(),
498 			 * we need a per-action way to save it until
499 			 * then, thus var_str.
500 			 */
501 			char			*var_str;
502 
503 			/*
504 			 * var_ref refers to the variable being
505 			 * tracked e.g onmax($var).
506 			 */
507 			struct hist_field	*var_ref;
508 
509 			/*
510 			 * track_var contains the 'invisible' tracking
511 			 * variable created to keep the current
512 			 * e.g. max value.
513 			 */
514 			struct hist_field	*track_var;
515 
516 			check_track_val_fn_t	check_val;
517 			action_fn_t		save_data;
518 		} track_data;
519 	};
520 };
521 
522 struct track_data {
523 	u64				track_val;
524 	bool				updated;
525 
526 	unsigned int			key_len;
527 	void				*key;
528 	struct tracing_map_elt		elt;
529 
530 	struct action_data		*action_data;
531 	struct hist_trigger_data	*hist_data;
532 };
533 
534 struct hist_elt_data {
535 	char *comm;
536 	u64 *var_ref_vals;
537 	char *field_var_str[SYNTH_FIELDS_MAX];
538 };
539 
540 struct snapshot_context {
541 	struct tracing_map_elt	*elt;
542 	void			*key;
543 };
544 
545 static void track_data_free(struct track_data *track_data)
546 {
547 	struct hist_elt_data *elt_data;
548 
549 	if (!track_data)
550 		return;
551 
552 	kfree(track_data->key);
553 
554 	elt_data = track_data->elt.private_data;
555 	if (elt_data) {
556 		kfree(elt_data->comm);
557 		kfree(elt_data);
558 	}
559 
560 	kfree(track_data);
561 }
562 
563 static struct track_data *track_data_alloc(unsigned int key_len,
564 					   struct action_data *action_data,
565 					   struct hist_trigger_data *hist_data)
566 {
567 	struct track_data *data = kzalloc(sizeof(*data), GFP_KERNEL);
568 	struct hist_elt_data *elt_data;
569 
570 	if (!data)
571 		return ERR_PTR(-ENOMEM);
572 
573 	data->key = kzalloc(key_len, GFP_KERNEL);
574 	if (!data->key) {
575 		track_data_free(data);
576 		return ERR_PTR(-ENOMEM);
577 	}
578 
579 	data->key_len = key_len;
580 	data->action_data = action_data;
581 	data->hist_data = hist_data;
582 
583 	elt_data = kzalloc(sizeof(*elt_data), GFP_KERNEL);
584 	if (!elt_data) {
585 		track_data_free(data);
586 		return ERR_PTR(-ENOMEM);
587 	}
588 	data->elt.private_data = elt_data;
589 
590 	elt_data->comm = kzalloc(TASK_COMM_LEN, GFP_KERNEL);
591 	if (!elt_data->comm) {
592 		track_data_free(data);
593 		return ERR_PTR(-ENOMEM);
594 	}
595 
596 	return data;
597 }
598 
599 static char last_cmd[MAX_FILTER_STR_VAL];
600 static char last_cmd_loc[MAX_FILTER_STR_VAL];
601 
602 static int errpos(char *str)
603 {
604 	return err_pos(last_cmd, str);
605 }
606 
607 static void last_cmd_set(struct trace_event_file *file, char *str)
608 {
609 	const char *system = NULL, *name = NULL;
610 	struct trace_event_call *call;
611 
612 	if (!str)
613 		return;
614 
615 	strcpy(last_cmd, "hist:");
616 	strncat(last_cmd, str, MAX_FILTER_STR_VAL - 1 - sizeof("hist:"));
617 
618 	if (file) {
619 		call = file->event_call;
620 
621 		system = call->class->system;
622 		if (system) {
623 			name = trace_event_name(call);
624 			if (!name)
625 				system = NULL;
626 		}
627 	}
628 
629 	if (system)
630 		snprintf(last_cmd_loc, MAX_FILTER_STR_VAL, "hist:%s:%s", system, name);
631 }
632 
633 static void hist_err(struct trace_array *tr, u8 err_type, u8 err_pos)
634 {
635 	tracing_log_err(tr, last_cmd_loc, last_cmd, err_text,
636 			err_type, err_pos);
637 }
638 
639 static void hist_err_clear(void)
640 {
641 	last_cmd[0] = '\0';
642 	last_cmd_loc[0] = '\0';
643 }
644 
645 struct synth_trace_event {
646 	struct trace_entry	ent;
647 	u64			fields[];
648 };
649 
650 static int synth_event_define_fields(struct trace_event_call *call)
651 {
652 	struct synth_trace_event trace;
653 	int offset = offsetof(typeof(trace), fields);
654 	struct synth_event *event = call->data;
655 	unsigned int i, size, n_u64;
656 	char *name, *type;
657 	bool is_signed;
658 	int ret = 0;
659 
660 	for (i = 0, n_u64 = 0; i < event->n_fields; i++) {
661 		size = event->fields[i]->size;
662 		is_signed = event->fields[i]->is_signed;
663 		type = event->fields[i]->type;
664 		name = event->fields[i]->name;
665 		ret = trace_define_field(call, type, name, offset, size,
666 					 is_signed, FILTER_OTHER);
667 		if (ret)
668 			break;
669 
670 		if (event->fields[i]->is_string) {
671 			offset += STR_VAR_LEN_MAX;
672 			n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
673 		} else {
674 			offset += sizeof(u64);
675 			n_u64++;
676 		}
677 	}
678 
679 	event->n_u64 = n_u64;
680 
681 	return ret;
682 }
683 
684 static bool synth_field_signed(char *type)
685 {
686 	if (str_has_prefix(type, "u"))
687 		return false;
688 	if (strcmp(type, "gfp_t") == 0)
689 		return false;
690 
691 	return true;
692 }
693 
694 static int synth_field_is_string(char *type)
695 {
696 	if (strstr(type, "char[") != NULL)
697 		return true;
698 
699 	return false;
700 }
701 
702 static int synth_field_string_size(char *type)
703 {
704 	char buf[4], *end, *start;
705 	unsigned int len;
706 	int size, err;
707 
708 	start = strstr(type, "char[");
709 	if (start == NULL)
710 		return -EINVAL;
711 	start += sizeof("char[") - 1;
712 
713 	end = strchr(type, ']');
714 	if (!end || end < start)
715 		return -EINVAL;
716 
717 	len = end - start;
718 	if (len > 3)
719 		return -EINVAL;
720 
721 	strncpy(buf, start, len);
722 	buf[len] = '\0';
723 
724 	err = kstrtouint(buf, 0, &size);
725 	if (err)
726 		return err;
727 
728 	if (size > STR_VAR_LEN_MAX)
729 		return -EINVAL;
730 
731 	return size;
732 }
733 
734 static int synth_field_size(char *type)
735 {
736 	int size = 0;
737 
738 	if (strcmp(type, "s64") == 0)
739 		size = sizeof(s64);
740 	else if (strcmp(type, "u64") == 0)
741 		size = sizeof(u64);
742 	else if (strcmp(type, "s32") == 0)
743 		size = sizeof(s32);
744 	else if (strcmp(type, "u32") == 0)
745 		size = sizeof(u32);
746 	else if (strcmp(type, "s16") == 0)
747 		size = sizeof(s16);
748 	else if (strcmp(type, "u16") == 0)
749 		size = sizeof(u16);
750 	else if (strcmp(type, "s8") == 0)
751 		size = sizeof(s8);
752 	else if (strcmp(type, "u8") == 0)
753 		size = sizeof(u8);
754 	else if (strcmp(type, "char") == 0)
755 		size = sizeof(char);
756 	else if (strcmp(type, "unsigned char") == 0)
757 		size = sizeof(unsigned char);
758 	else if (strcmp(type, "int") == 0)
759 		size = sizeof(int);
760 	else if (strcmp(type, "unsigned int") == 0)
761 		size = sizeof(unsigned int);
762 	else if (strcmp(type, "long") == 0)
763 		size = sizeof(long);
764 	else if (strcmp(type, "unsigned long") == 0)
765 		size = sizeof(unsigned long);
766 	else if (strcmp(type, "pid_t") == 0)
767 		size = sizeof(pid_t);
768 	else if (strcmp(type, "gfp_t") == 0)
769 		size = sizeof(gfp_t);
770 	else if (synth_field_is_string(type))
771 		size = synth_field_string_size(type);
772 
773 	return size;
774 }
775 
776 static const char *synth_field_fmt(char *type)
777 {
778 	const char *fmt = "%llu";
779 
780 	if (strcmp(type, "s64") == 0)
781 		fmt = "%lld";
782 	else if (strcmp(type, "u64") == 0)
783 		fmt = "%llu";
784 	else if (strcmp(type, "s32") == 0)
785 		fmt = "%d";
786 	else if (strcmp(type, "u32") == 0)
787 		fmt = "%u";
788 	else if (strcmp(type, "s16") == 0)
789 		fmt = "%d";
790 	else if (strcmp(type, "u16") == 0)
791 		fmt = "%u";
792 	else if (strcmp(type, "s8") == 0)
793 		fmt = "%d";
794 	else if (strcmp(type, "u8") == 0)
795 		fmt = "%u";
796 	else if (strcmp(type, "char") == 0)
797 		fmt = "%d";
798 	else if (strcmp(type, "unsigned char") == 0)
799 		fmt = "%u";
800 	else if (strcmp(type, "int") == 0)
801 		fmt = "%d";
802 	else if (strcmp(type, "unsigned int") == 0)
803 		fmt = "%u";
804 	else if (strcmp(type, "long") == 0)
805 		fmt = "%ld";
806 	else if (strcmp(type, "unsigned long") == 0)
807 		fmt = "%lu";
808 	else if (strcmp(type, "pid_t") == 0)
809 		fmt = "%d";
810 	else if (strcmp(type, "gfp_t") == 0)
811 		fmt = "%x";
812 	else if (synth_field_is_string(type))
813 		fmt = "%s";
814 
815 	return fmt;
816 }
817 
818 static enum print_line_t print_synth_event(struct trace_iterator *iter,
819 					   int flags,
820 					   struct trace_event *event)
821 {
822 	struct trace_array *tr = iter->tr;
823 	struct trace_seq *s = &iter->seq;
824 	struct synth_trace_event *entry;
825 	struct synth_event *se;
826 	unsigned int i, n_u64;
827 	char print_fmt[32];
828 	const char *fmt;
829 
830 	entry = (struct synth_trace_event *)iter->ent;
831 	se = container_of(event, struct synth_event, call.event);
832 
833 	trace_seq_printf(s, "%s: ", se->name);
834 
835 	for (i = 0, n_u64 = 0; i < se->n_fields; i++) {
836 		if (trace_seq_has_overflowed(s))
837 			goto end;
838 
839 		fmt = synth_field_fmt(se->fields[i]->type);
840 
841 		/* parameter types */
842 		if (tr && tr->trace_flags & TRACE_ITER_VERBOSE)
843 			trace_seq_printf(s, "%s ", fmt);
844 
845 		snprintf(print_fmt, sizeof(print_fmt), "%%s=%s%%s", fmt);
846 
847 		/* parameter values */
848 		if (se->fields[i]->is_string) {
849 			trace_seq_printf(s, print_fmt, se->fields[i]->name,
850 					 (char *)&entry->fields[n_u64],
851 					 i == se->n_fields - 1 ? "" : " ");
852 			n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
853 		} else {
854 			struct trace_print_flags __flags[] = {
855 			    __def_gfpflag_names, {-1, NULL} };
856 
857 			trace_seq_printf(s, print_fmt, se->fields[i]->name,
858 					 entry->fields[n_u64],
859 					 i == se->n_fields - 1 ? "" : " ");
860 
861 			if (strcmp(se->fields[i]->type, "gfp_t") == 0) {
862 				trace_seq_puts(s, " (");
863 				trace_print_flags_seq(s, "|",
864 						      entry->fields[n_u64],
865 						      __flags);
866 				trace_seq_putc(s, ')');
867 			}
868 			n_u64++;
869 		}
870 	}
871 end:
872 	trace_seq_putc(s, '\n');
873 
874 	return trace_handle_return(s);
875 }
876 
877 static struct trace_event_functions synth_event_funcs = {
878 	.trace		= print_synth_event
879 };
880 
881 static notrace void trace_event_raw_event_synth(void *__data,
882 						u64 *var_ref_vals,
883 						unsigned int var_ref_idx)
884 {
885 	struct trace_event_file *trace_file = __data;
886 	struct synth_trace_event *entry;
887 	struct trace_event_buffer fbuffer;
888 	struct trace_buffer *buffer;
889 	struct synth_event *event;
890 	unsigned int i, n_u64;
891 	int fields_size = 0;
892 
893 	event = trace_file->event_call->data;
894 
895 	if (trace_trigger_soft_disabled(trace_file))
896 		return;
897 
898 	fields_size = event->n_u64 * sizeof(u64);
899 
900 	/*
901 	 * Avoid ring buffer recursion detection, as this event
902 	 * is being performed within another event.
903 	 */
904 	buffer = trace_file->tr->array_buffer.buffer;
905 	ring_buffer_nest_start(buffer);
906 
907 	entry = trace_event_buffer_reserve(&fbuffer, trace_file,
908 					   sizeof(*entry) + fields_size);
909 	if (!entry)
910 		goto out;
911 
912 	for (i = 0, n_u64 = 0; i < event->n_fields; i++) {
913 		if (event->fields[i]->is_string) {
914 			char *str_val = (char *)(long)var_ref_vals[var_ref_idx + i];
915 			char *str_field = (char *)&entry->fields[n_u64];
916 
917 			strscpy(str_field, str_val, STR_VAR_LEN_MAX);
918 			n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
919 		} else {
920 			struct synth_field *field = event->fields[i];
921 			u64 val = var_ref_vals[var_ref_idx + i];
922 
923 			switch (field->size) {
924 			case 1:
925 				*(u8 *)&entry->fields[n_u64] = (u8)val;
926 				break;
927 
928 			case 2:
929 				*(u16 *)&entry->fields[n_u64] = (u16)val;
930 				break;
931 
932 			case 4:
933 				*(u32 *)&entry->fields[n_u64] = (u32)val;
934 				break;
935 
936 			default:
937 				entry->fields[n_u64] = val;
938 				break;
939 			}
940 			n_u64++;
941 		}
942 	}
943 
944 	trace_event_buffer_commit(&fbuffer);
945 out:
946 	ring_buffer_nest_end(buffer);
947 }
948 
949 static void free_synth_event_print_fmt(struct trace_event_call *call)
950 {
951 	if (call) {
952 		kfree(call->print_fmt);
953 		call->print_fmt = NULL;
954 	}
955 }
956 
957 static int __set_synth_event_print_fmt(struct synth_event *event,
958 				       char *buf, int len)
959 {
960 	const char *fmt;
961 	int pos = 0;
962 	int i;
963 
964 	/* When len=0, we just calculate the needed length */
965 #define LEN_OR_ZERO (len ? len - pos : 0)
966 
967 	pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
968 	for (i = 0; i < event->n_fields; i++) {
969 		fmt = synth_field_fmt(event->fields[i]->type);
970 		pos += snprintf(buf + pos, LEN_OR_ZERO, "%s=%s%s",
971 				event->fields[i]->name, fmt,
972 				i == event->n_fields - 1 ? "" : ", ");
973 	}
974 	pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
975 
976 	for (i = 0; i < event->n_fields; i++) {
977 		pos += snprintf(buf + pos, LEN_OR_ZERO,
978 				", REC->%s", event->fields[i]->name);
979 	}
980 
981 #undef LEN_OR_ZERO
982 
983 	/* return the length of print_fmt */
984 	return pos;
985 }
986 
987 static int set_synth_event_print_fmt(struct trace_event_call *call)
988 {
989 	struct synth_event *event = call->data;
990 	char *print_fmt;
991 	int len;
992 
993 	/* First: called with 0 length to calculate the needed length */
994 	len = __set_synth_event_print_fmt(event, NULL, 0);
995 
996 	print_fmt = kmalloc(len + 1, GFP_KERNEL);
997 	if (!print_fmt)
998 		return -ENOMEM;
999 
1000 	/* Second: actually write the @print_fmt */
1001 	__set_synth_event_print_fmt(event, print_fmt, len + 1);
1002 	call->print_fmt = print_fmt;
1003 
1004 	return 0;
1005 }
1006 
1007 static void free_synth_field(struct synth_field *field)
1008 {
1009 	kfree(field->type);
1010 	kfree(field->name);
1011 	kfree(field);
1012 }
1013 
1014 static struct synth_field *parse_synth_field(int argc, const char **argv,
1015 					     int *consumed)
1016 {
1017 	struct synth_field *field;
1018 	const char *prefix = NULL, *field_type = argv[0], *field_name, *array;
1019 	int len, ret = 0;
1020 
1021 	if (field_type[0] == ';')
1022 		field_type++;
1023 
1024 	if (!strcmp(field_type, "unsigned")) {
1025 		if (argc < 3)
1026 			return ERR_PTR(-EINVAL);
1027 		prefix = "unsigned ";
1028 		field_type = argv[1];
1029 		field_name = argv[2];
1030 		*consumed = 3;
1031 	} else {
1032 		field_name = argv[1];
1033 		*consumed = 2;
1034 	}
1035 
1036 	field = kzalloc(sizeof(*field), GFP_KERNEL);
1037 	if (!field)
1038 		return ERR_PTR(-ENOMEM);
1039 
1040 	len = strlen(field_name);
1041 	array = strchr(field_name, '[');
1042 	if (array)
1043 		len -= strlen(array);
1044 	else if (field_name[len - 1] == ';')
1045 		len--;
1046 
1047 	field->name = kmemdup_nul(field_name, len, GFP_KERNEL);
1048 	if (!field->name) {
1049 		ret = -ENOMEM;
1050 		goto free;
1051 	}
1052 
1053 	if (field_type[0] == ';')
1054 		field_type++;
1055 	len = strlen(field_type) + 1;
1056 	if (array)
1057 		len += strlen(array);
1058 	if (prefix)
1059 		len += strlen(prefix);
1060 
1061 	field->type = kzalloc(len, GFP_KERNEL);
1062 	if (!field->type) {
1063 		ret = -ENOMEM;
1064 		goto free;
1065 	}
1066 	if (prefix)
1067 		strcat(field->type, prefix);
1068 	strcat(field->type, field_type);
1069 	if (array) {
1070 		strcat(field->type, array);
1071 		if (field->type[len - 1] == ';')
1072 			field->type[len - 1] = '\0';
1073 	}
1074 
1075 	field->size = synth_field_size(field->type);
1076 	if (!field->size) {
1077 		ret = -EINVAL;
1078 		goto free;
1079 	}
1080 
1081 	if (synth_field_is_string(field->type))
1082 		field->is_string = true;
1083 
1084 	field->is_signed = synth_field_signed(field->type);
1085 
1086  out:
1087 	return field;
1088  free:
1089 	free_synth_field(field);
1090 	field = ERR_PTR(ret);
1091 	goto out;
1092 }
1093 
1094 static void free_synth_tracepoint(struct tracepoint *tp)
1095 {
1096 	if (!tp)
1097 		return;
1098 
1099 	kfree(tp->name);
1100 	kfree(tp);
1101 }
1102 
1103 static struct tracepoint *alloc_synth_tracepoint(char *name)
1104 {
1105 	struct tracepoint *tp;
1106 
1107 	tp = kzalloc(sizeof(*tp), GFP_KERNEL);
1108 	if (!tp)
1109 		return ERR_PTR(-ENOMEM);
1110 
1111 	tp->name = kstrdup(name, GFP_KERNEL);
1112 	if (!tp->name) {
1113 		kfree(tp);
1114 		return ERR_PTR(-ENOMEM);
1115 	}
1116 
1117 	return tp;
1118 }
1119 
1120 typedef void (*synth_probe_func_t) (void *__data, u64 *var_ref_vals,
1121 				    unsigned int var_ref_idx);
1122 
1123 static inline void trace_synth(struct synth_event *event, u64 *var_ref_vals,
1124 			       unsigned int var_ref_idx)
1125 {
1126 	struct tracepoint *tp = event->tp;
1127 
1128 	if (unlikely(atomic_read(&tp->key.enabled) > 0)) {
1129 		struct tracepoint_func *probe_func_ptr;
1130 		synth_probe_func_t probe_func;
1131 		void *__data;
1132 
1133 		if (!(cpu_online(raw_smp_processor_id())))
1134 			return;
1135 
1136 		probe_func_ptr = rcu_dereference_sched((tp)->funcs);
1137 		if (probe_func_ptr) {
1138 			do {
1139 				probe_func = probe_func_ptr->func;
1140 				__data = probe_func_ptr->data;
1141 				probe_func(__data, var_ref_vals, var_ref_idx);
1142 			} while ((++probe_func_ptr)->func);
1143 		}
1144 	}
1145 }
1146 
1147 static struct synth_event *find_synth_event(const char *name)
1148 {
1149 	struct dyn_event *pos;
1150 	struct synth_event *event;
1151 
1152 	for_each_dyn_event(pos) {
1153 		if (!is_synth_event(pos))
1154 			continue;
1155 		event = to_synth_event(pos);
1156 		if (strcmp(event->name, name) == 0)
1157 			return event;
1158 	}
1159 
1160 	return NULL;
1161 }
1162 
1163 static int register_synth_event(struct synth_event *event)
1164 {
1165 	struct trace_event_call *call = &event->call;
1166 	int ret = 0;
1167 
1168 	event->call.class = &event->class;
1169 	event->class.system = kstrdup(SYNTH_SYSTEM, GFP_KERNEL);
1170 	if (!event->class.system) {
1171 		ret = -ENOMEM;
1172 		goto out;
1173 	}
1174 
1175 	event->tp = alloc_synth_tracepoint(event->name);
1176 	if (IS_ERR(event->tp)) {
1177 		ret = PTR_ERR(event->tp);
1178 		event->tp = NULL;
1179 		goto out;
1180 	}
1181 
1182 	INIT_LIST_HEAD(&call->class->fields);
1183 	call->event.funcs = &synth_event_funcs;
1184 	call->class->define_fields = synth_event_define_fields;
1185 
1186 	ret = register_trace_event(&call->event);
1187 	if (!ret) {
1188 		ret = -ENODEV;
1189 		goto out;
1190 	}
1191 	call->flags = TRACE_EVENT_FL_TRACEPOINT;
1192 	call->class->reg = trace_event_reg;
1193 	call->class->probe = trace_event_raw_event_synth;
1194 	call->data = event;
1195 	call->tp = event->tp;
1196 
1197 	ret = trace_add_event_call(call);
1198 	if (ret) {
1199 		pr_warn("Failed to register synthetic event: %s\n",
1200 			trace_event_name(call));
1201 		goto err;
1202 	}
1203 
1204 	ret = set_synth_event_print_fmt(call);
1205 	if (ret < 0) {
1206 		trace_remove_event_call(call);
1207 		goto err;
1208 	}
1209  out:
1210 	return ret;
1211  err:
1212 	unregister_trace_event(&call->event);
1213 	goto out;
1214 }
1215 
1216 static int unregister_synth_event(struct synth_event *event)
1217 {
1218 	struct trace_event_call *call = &event->call;
1219 	int ret;
1220 
1221 	ret = trace_remove_event_call(call);
1222 
1223 	return ret;
1224 }
1225 
1226 static void free_synth_event(struct synth_event *event)
1227 {
1228 	unsigned int i;
1229 
1230 	if (!event)
1231 		return;
1232 
1233 	for (i = 0; i < event->n_fields; i++)
1234 		free_synth_field(event->fields[i]);
1235 
1236 	kfree(event->fields);
1237 	kfree(event->name);
1238 	kfree(event->class.system);
1239 	free_synth_tracepoint(event->tp);
1240 	free_synth_event_print_fmt(&event->call);
1241 	kfree(event);
1242 }
1243 
1244 static struct synth_event *alloc_synth_event(const char *name, int n_fields,
1245 					     struct synth_field **fields)
1246 {
1247 	struct synth_event *event;
1248 	unsigned int i;
1249 
1250 	event = kzalloc(sizeof(*event), GFP_KERNEL);
1251 	if (!event) {
1252 		event = ERR_PTR(-ENOMEM);
1253 		goto out;
1254 	}
1255 
1256 	event->name = kstrdup(name, GFP_KERNEL);
1257 	if (!event->name) {
1258 		kfree(event);
1259 		event = ERR_PTR(-ENOMEM);
1260 		goto out;
1261 	}
1262 
1263 	event->fields = kcalloc(n_fields, sizeof(*event->fields), GFP_KERNEL);
1264 	if (!event->fields) {
1265 		free_synth_event(event);
1266 		event = ERR_PTR(-ENOMEM);
1267 		goto out;
1268 	}
1269 
1270 	dyn_event_init(&event->devent, &synth_event_ops);
1271 
1272 	for (i = 0; i < n_fields; i++)
1273 		event->fields[i] = fields[i];
1274 
1275 	event->n_fields = n_fields;
1276  out:
1277 	return event;
1278 }
1279 
1280 static void action_trace(struct hist_trigger_data *hist_data,
1281 			 struct tracing_map_elt *elt, void *rec,
1282 			 struct ring_buffer_event *rbe, void *key,
1283 			 struct action_data *data, u64 *var_ref_vals)
1284 {
1285 	struct synth_event *event = data->synth_event;
1286 
1287 	trace_synth(event, var_ref_vals, data->var_ref_idx);
1288 }
1289 
1290 struct hist_var_data {
1291 	struct list_head list;
1292 	struct hist_trigger_data *hist_data;
1293 };
1294 
1295 static int __create_synth_event(int argc, const char *name, const char **argv)
1296 {
1297 	struct synth_field *field, *fields[SYNTH_FIELDS_MAX];
1298 	struct synth_event *event = NULL;
1299 	int i, consumed = 0, n_fields = 0, ret = 0;
1300 
1301 	/*
1302 	 * Argument syntax:
1303 	 *  - Add synthetic event: <event_name> field[;field] ...
1304 	 *  - Remove synthetic event: !<event_name> field[;field] ...
1305 	 *      where 'field' = type field_name
1306 	 */
1307 
1308 	if (name[0] == '\0' || argc < 1)
1309 		return -EINVAL;
1310 
1311 	mutex_lock(&event_mutex);
1312 
1313 	event = find_synth_event(name);
1314 	if (event) {
1315 		ret = -EEXIST;
1316 		goto out;
1317 	}
1318 
1319 	for (i = 0; i < argc - 1; i++) {
1320 		if (strcmp(argv[i], ";") == 0)
1321 			continue;
1322 		if (n_fields == SYNTH_FIELDS_MAX) {
1323 			ret = -EINVAL;
1324 			goto err;
1325 		}
1326 
1327 		field = parse_synth_field(argc - i, &argv[i], &consumed);
1328 		if (IS_ERR(field)) {
1329 			ret = PTR_ERR(field);
1330 			goto err;
1331 		}
1332 		fields[n_fields++] = field;
1333 		i += consumed - 1;
1334 	}
1335 
1336 	if (i < argc && strcmp(argv[i], ";") != 0) {
1337 		ret = -EINVAL;
1338 		goto err;
1339 	}
1340 
1341 	event = alloc_synth_event(name, n_fields, fields);
1342 	if (IS_ERR(event)) {
1343 		ret = PTR_ERR(event);
1344 		event = NULL;
1345 		goto err;
1346 	}
1347 	ret = register_synth_event(event);
1348 	if (!ret)
1349 		dyn_event_add(&event->devent);
1350 	else
1351 		free_synth_event(event);
1352  out:
1353 	mutex_unlock(&event_mutex);
1354 
1355 	return ret;
1356  err:
1357 	for (i = 0; i < n_fields; i++)
1358 		free_synth_field(fields[i]);
1359 
1360 	goto out;
1361 }
1362 
1363 static int create_or_delete_synth_event(int argc, char **argv)
1364 {
1365 	const char *name = argv[0];
1366 	struct synth_event *event = NULL;
1367 	int ret;
1368 
1369 	/* trace_run_command() ensures argc != 0 */
1370 	if (name[0] == '!') {
1371 		mutex_lock(&event_mutex);
1372 		event = find_synth_event(name + 1);
1373 		if (event) {
1374 			if (event->ref)
1375 				ret = -EBUSY;
1376 			else {
1377 				ret = unregister_synth_event(event);
1378 				if (!ret) {
1379 					dyn_event_remove(&event->devent);
1380 					free_synth_event(event);
1381 				}
1382 			}
1383 		} else
1384 			ret = -ENOENT;
1385 		mutex_unlock(&event_mutex);
1386 		return ret;
1387 	}
1388 
1389 	ret = __create_synth_event(argc - 1, name, (const char **)argv + 1);
1390 	return ret == -ECANCELED ? -EINVAL : ret;
1391 }
1392 
1393 int synth_event_run_command(const char *command)
1394 {
1395 	return trace_run_command(command, create_or_delete_synth_event);
1396 }
1397 
1398 static int synth_event_create(int argc, const char **argv)
1399 {
1400 	const char *name = argv[0];
1401 	int len;
1402 
1403 	if (name[0] != 's' || name[1] != ':')
1404 		return -ECANCELED;
1405 	name += 2;
1406 
1407 	/* This interface accepts group name prefix */
1408 	if (strchr(name, '/')) {
1409 		len = str_has_prefix(name, SYNTH_SYSTEM "/");
1410 		if (len == 0)
1411 			return -EINVAL;
1412 		name += len;
1413 	}
1414 	return __create_synth_event(argc - 1, name, argv + 1);
1415 }
1416 
1417 static int synth_event_release(struct dyn_event *ev)
1418 {
1419 	struct synth_event *event = to_synth_event(ev);
1420 	int ret;
1421 
1422 	if (event->ref)
1423 		return -EBUSY;
1424 
1425 	ret = unregister_synth_event(event);
1426 	if (ret)
1427 		return ret;
1428 
1429 	dyn_event_remove(ev);
1430 	free_synth_event(event);
1431 	return 0;
1432 }
1433 
1434 static int __synth_event_show(struct seq_file *m, struct synth_event *event)
1435 {
1436 	struct synth_field *field;
1437 	unsigned int i;
1438 
1439 	seq_printf(m, "%s\t", event->name);
1440 
1441 	for (i = 0; i < event->n_fields; i++) {
1442 		field = event->fields[i];
1443 
1444 		/* parameter values */
1445 		seq_printf(m, "%s %s%s", field->type, field->name,
1446 			   i == event->n_fields - 1 ? "" : "; ");
1447 	}
1448 
1449 	seq_putc(m, '\n');
1450 
1451 	return 0;
1452 }
1453 
1454 static int synth_event_show(struct seq_file *m, struct dyn_event *ev)
1455 {
1456 	struct synth_event *event = to_synth_event(ev);
1457 
1458 	seq_printf(m, "s:%s/", event->class.system);
1459 
1460 	return __synth_event_show(m, event);
1461 }
1462 
1463 static int synth_events_seq_show(struct seq_file *m, void *v)
1464 {
1465 	struct dyn_event *ev = v;
1466 
1467 	if (!is_synth_event(ev))
1468 		return 0;
1469 
1470 	return __synth_event_show(m, to_synth_event(ev));
1471 }
1472 
1473 static const struct seq_operations synth_events_seq_op = {
1474 	.start	= dyn_event_seq_start,
1475 	.next	= dyn_event_seq_next,
1476 	.stop	= dyn_event_seq_stop,
1477 	.show	= synth_events_seq_show,
1478 };
1479 
1480 static int synth_events_open(struct inode *inode, struct file *file)
1481 {
1482 	int ret;
1483 
1484 	ret = security_locked_down(LOCKDOWN_TRACEFS);
1485 	if (ret)
1486 		return ret;
1487 
1488 	if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
1489 		ret = dyn_events_release_all(&synth_event_ops);
1490 		if (ret < 0)
1491 			return ret;
1492 	}
1493 
1494 	return seq_open(file, &synth_events_seq_op);
1495 }
1496 
1497 static ssize_t synth_events_write(struct file *file,
1498 				  const char __user *buffer,
1499 				  size_t count, loff_t *ppos)
1500 {
1501 	return trace_parse_run_command(file, buffer, count, ppos,
1502 				       create_or_delete_synth_event);
1503 }
1504 
1505 static const struct file_operations synth_events_fops = {
1506 	.open           = synth_events_open,
1507 	.write		= synth_events_write,
1508 	.read           = seq_read,
1509 	.llseek         = seq_lseek,
1510 	.release        = seq_release,
1511 };
1512 
1513 static u64 hist_field_timestamp(struct hist_field *hist_field,
1514 				struct tracing_map_elt *elt,
1515 				struct ring_buffer_event *rbe,
1516 				void *event)
1517 {
1518 	struct hist_trigger_data *hist_data = hist_field->hist_data;
1519 	struct trace_array *tr = hist_data->event_file->tr;
1520 
1521 	u64 ts = ring_buffer_event_time_stamp(rbe);
1522 
1523 	if (hist_data->attrs->ts_in_usecs && trace_clock_in_ns(tr))
1524 		ts = ns2usecs(ts);
1525 
1526 	return ts;
1527 }
1528 
1529 static u64 hist_field_cpu(struct hist_field *hist_field,
1530 			  struct tracing_map_elt *elt,
1531 			  struct ring_buffer_event *rbe,
1532 			  void *event)
1533 {
1534 	int cpu = smp_processor_id();
1535 
1536 	return cpu;
1537 }
1538 
1539 /**
1540  * check_field_for_var_ref - Check if a VAR_REF field references a variable
1541  * @hist_field: The VAR_REF field to check
1542  * @var_data: The hist trigger that owns the variable
1543  * @var_idx: The trigger variable identifier
1544  *
1545  * Check the given VAR_REF field to see whether or not it references
1546  * the given variable associated with the given trigger.
1547  *
1548  * Return: The VAR_REF field if it does reference the variable, NULL if not
1549  */
1550 static struct hist_field *
1551 check_field_for_var_ref(struct hist_field *hist_field,
1552 			struct hist_trigger_data *var_data,
1553 			unsigned int var_idx)
1554 {
1555 	WARN_ON(!(hist_field && hist_field->flags & HIST_FIELD_FL_VAR_REF));
1556 
1557 	if (hist_field && hist_field->var.idx == var_idx &&
1558 	    hist_field->var.hist_data == var_data)
1559 		return hist_field;
1560 
1561 	return NULL;
1562 }
1563 
1564 /**
1565  * find_var_ref - Check if a trigger has a reference to a trigger variable
1566  * @hist_data: The hist trigger that might have a reference to the variable
1567  * @var_data: The hist trigger that owns the variable
1568  * @var_idx: The trigger variable identifier
1569  *
1570  * Check the list of var_refs[] on the first hist trigger to see
1571  * whether any of them are references to the variable on the second
1572  * trigger.
1573  *
1574  * Return: The VAR_REF field referencing the variable if so, NULL if not
1575  */
1576 static struct hist_field *find_var_ref(struct hist_trigger_data *hist_data,
1577 				       struct hist_trigger_data *var_data,
1578 				       unsigned int var_idx)
1579 {
1580 	struct hist_field *hist_field;
1581 	unsigned int i;
1582 
1583 	for (i = 0; i < hist_data->n_var_refs; i++) {
1584 		hist_field = hist_data->var_refs[i];
1585 		if (check_field_for_var_ref(hist_field, var_data, var_idx))
1586 			return hist_field;
1587 	}
1588 
1589 	return NULL;
1590 }
1591 
1592 /**
1593  * find_any_var_ref - Check if there is a reference to a given trigger variable
1594  * @hist_data: The hist trigger
1595  * @var_idx: The trigger variable identifier
1596  *
1597  * Check to see whether the given variable is currently referenced by
1598  * any other trigger.
1599  *
1600  * The trigger the variable is defined on is explicitly excluded - the
1601  * assumption being that a self-reference doesn't prevent a trigger
1602  * from being removed.
1603  *
1604  * Return: The VAR_REF field referencing the variable if so, NULL if not
1605  */
1606 static struct hist_field *find_any_var_ref(struct hist_trigger_data *hist_data,
1607 					   unsigned int var_idx)
1608 {
1609 	struct trace_array *tr = hist_data->event_file->tr;
1610 	struct hist_field *found = NULL;
1611 	struct hist_var_data *var_data;
1612 
1613 	list_for_each_entry(var_data, &tr->hist_vars, list) {
1614 		if (var_data->hist_data == hist_data)
1615 			continue;
1616 		found = find_var_ref(var_data->hist_data, hist_data, var_idx);
1617 		if (found)
1618 			break;
1619 	}
1620 
1621 	return found;
1622 }
1623 
1624 /**
1625  * check_var_refs - Check if there is a reference to any of trigger's variables
1626  * @hist_data: The hist trigger
1627  *
1628  * A trigger can define one or more variables.  If any one of them is
1629  * currently referenced by any other trigger, this function will
1630  * determine that.
1631 
1632  * Typically used to determine whether or not a trigger can be removed
1633  * - if there are any references to a trigger's variables, it cannot.
1634  *
1635  * Return: True if there is a reference to any of trigger's variables
1636  */
1637 static bool check_var_refs(struct hist_trigger_data *hist_data)
1638 {
1639 	struct hist_field *field;
1640 	bool found = false;
1641 	int i;
1642 
1643 	for_each_hist_field(i, hist_data) {
1644 		field = hist_data->fields[i];
1645 		if (field && field->flags & HIST_FIELD_FL_VAR) {
1646 			if (find_any_var_ref(hist_data, field->var.idx)) {
1647 				found = true;
1648 				break;
1649 			}
1650 		}
1651 	}
1652 
1653 	return found;
1654 }
1655 
1656 static struct hist_var_data *find_hist_vars(struct hist_trigger_data *hist_data)
1657 {
1658 	struct trace_array *tr = hist_data->event_file->tr;
1659 	struct hist_var_data *var_data, *found = NULL;
1660 
1661 	list_for_each_entry(var_data, &tr->hist_vars, list) {
1662 		if (var_data->hist_data == hist_data) {
1663 			found = var_data;
1664 			break;
1665 		}
1666 	}
1667 
1668 	return found;
1669 }
1670 
1671 static bool field_has_hist_vars(struct hist_field *hist_field,
1672 				unsigned int level)
1673 {
1674 	int i;
1675 
1676 	if (level > 3)
1677 		return false;
1678 
1679 	if (!hist_field)
1680 		return false;
1681 
1682 	if (hist_field->flags & HIST_FIELD_FL_VAR ||
1683 	    hist_field->flags & HIST_FIELD_FL_VAR_REF)
1684 		return true;
1685 
1686 	for (i = 0; i < HIST_FIELD_OPERANDS_MAX; i++) {
1687 		struct hist_field *operand;
1688 
1689 		operand = hist_field->operands[i];
1690 		if (field_has_hist_vars(operand, level + 1))
1691 			return true;
1692 	}
1693 
1694 	return false;
1695 }
1696 
1697 static bool has_hist_vars(struct hist_trigger_data *hist_data)
1698 {
1699 	struct hist_field *hist_field;
1700 	int i;
1701 
1702 	for_each_hist_field(i, hist_data) {
1703 		hist_field = hist_data->fields[i];
1704 		if (field_has_hist_vars(hist_field, 0))
1705 			return true;
1706 	}
1707 
1708 	return false;
1709 }
1710 
1711 static int save_hist_vars(struct hist_trigger_data *hist_data)
1712 {
1713 	struct trace_array *tr = hist_data->event_file->tr;
1714 	struct hist_var_data *var_data;
1715 
1716 	var_data = find_hist_vars(hist_data);
1717 	if (var_data)
1718 		return 0;
1719 
1720 	if (tracing_check_open_get_tr(tr))
1721 		return -ENODEV;
1722 
1723 	var_data = kzalloc(sizeof(*var_data), GFP_KERNEL);
1724 	if (!var_data) {
1725 		trace_array_put(tr);
1726 		return -ENOMEM;
1727 	}
1728 
1729 	var_data->hist_data = hist_data;
1730 	list_add(&var_data->list, &tr->hist_vars);
1731 
1732 	return 0;
1733 }
1734 
1735 static void remove_hist_vars(struct hist_trigger_data *hist_data)
1736 {
1737 	struct trace_array *tr = hist_data->event_file->tr;
1738 	struct hist_var_data *var_data;
1739 
1740 	var_data = find_hist_vars(hist_data);
1741 	if (!var_data)
1742 		return;
1743 
1744 	if (WARN_ON(check_var_refs(hist_data)))
1745 		return;
1746 
1747 	list_del(&var_data->list);
1748 
1749 	kfree(var_data);
1750 
1751 	trace_array_put(tr);
1752 }
1753 
1754 static struct hist_field *find_var_field(struct hist_trigger_data *hist_data,
1755 					 const char *var_name)
1756 {
1757 	struct hist_field *hist_field, *found = NULL;
1758 	int i;
1759 
1760 	for_each_hist_field(i, hist_data) {
1761 		hist_field = hist_data->fields[i];
1762 		if (hist_field && hist_field->flags & HIST_FIELD_FL_VAR &&
1763 		    strcmp(hist_field->var.name, var_name) == 0) {
1764 			found = hist_field;
1765 			break;
1766 		}
1767 	}
1768 
1769 	return found;
1770 }
1771 
1772 static struct hist_field *find_var(struct hist_trigger_data *hist_data,
1773 				   struct trace_event_file *file,
1774 				   const char *var_name)
1775 {
1776 	struct hist_trigger_data *test_data;
1777 	struct event_trigger_data *test;
1778 	struct hist_field *hist_field;
1779 
1780 	lockdep_assert_held(&event_mutex);
1781 
1782 	hist_field = find_var_field(hist_data, var_name);
1783 	if (hist_field)
1784 		return hist_field;
1785 
1786 	list_for_each_entry(test, &file->triggers, list) {
1787 		if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
1788 			test_data = test->private_data;
1789 			hist_field = find_var_field(test_data, var_name);
1790 			if (hist_field)
1791 				return hist_field;
1792 		}
1793 	}
1794 
1795 	return NULL;
1796 }
1797 
1798 static struct trace_event_file *find_var_file(struct trace_array *tr,
1799 					      char *system,
1800 					      char *event_name,
1801 					      char *var_name)
1802 {
1803 	struct hist_trigger_data *var_hist_data;
1804 	struct hist_var_data *var_data;
1805 	struct trace_event_file *file, *found = NULL;
1806 
1807 	if (system)
1808 		return find_event_file(tr, system, event_name);
1809 
1810 	list_for_each_entry(var_data, &tr->hist_vars, list) {
1811 		var_hist_data = var_data->hist_data;
1812 		file = var_hist_data->event_file;
1813 		if (file == found)
1814 			continue;
1815 
1816 		if (find_var_field(var_hist_data, var_name)) {
1817 			if (found) {
1818 				hist_err(tr, HIST_ERR_VAR_NOT_UNIQUE, errpos(var_name));
1819 				return NULL;
1820 			}
1821 
1822 			found = file;
1823 		}
1824 	}
1825 
1826 	return found;
1827 }
1828 
1829 static struct hist_field *find_file_var(struct trace_event_file *file,
1830 					const char *var_name)
1831 {
1832 	struct hist_trigger_data *test_data;
1833 	struct event_trigger_data *test;
1834 	struct hist_field *hist_field;
1835 
1836 	lockdep_assert_held(&event_mutex);
1837 
1838 	list_for_each_entry(test, &file->triggers, list) {
1839 		if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
1840 			test_data = test->private_data;
1841 			hist_field = find_var_field(test_data, var_name);
1842 			if (hist_field)
1843 				return hist_field;
1844 		}
1845 	}
1846 
1847 	return NULL;
1848 }
1849 
1850 static struct hist_field *
1851 find_match_var(struct hist_trigger_data *hist_data, char *var_name)
1852 {
1853 	struct trace_array *tr = hist_data->event_file->tr;
1854 	struct hist_field *hist_field, *found = NULL;
1855 	struct trace_event_file *file;
1856 	unsigned int i;
1857 
1858 	for (i = 0; i < hist_data->n_actions; i++) {
1859 		struct action_data *data = hist_data->actions[i];
1860 
1861 		if (data->handler == HANDLER_ONMATCH) {
1862 			char *system = data->match_data.event_system;
1863 			char *event_name = data->match_data.event;
1864 
1865 			file = find_var_file(tr, system, event_name, var_name);
1866 			if (!file)
1867 				continue;
1868 			hist_field = find_file_var(file, var_name);
1869 			if (hist_field) {
1870 				if (found) {
1871 					hist_err(tr, HIST_ERR_VAR_NOT_UNIQUE,
1872 						 errpos(var_name));
1873 					return ERR_PTR(-EINVAL);
1874 				}
1875 
1876 				found = hist_field;
1877 			}
1878 		}
1879 	}
1880 	return found;
1881 }
1882 
1883 static struct hist_field *find_event_var(struct hist_trigger_data *hist_data,
1884 					 char *system,
1885 					 char *event_name,
1886 					 char *var_name)
1887 {
1888 	struct trace_array *tr = hist_data->event_file->tr;
1889 	struct hist_field *hist_field = NULL;
1890 	struct trace_event_file *file;
1891 
1892 	if (!system || !event_name) {
1893 		hist_field = find_match_var(hist_data, var_name);
1894 		if (IS_ERR(hist_field))
1895 			return NULL;
1896 		if (hist_field)
1897 			return hist_field;
1898 	}
1899 
1900 	file = find_var_file(tr, system, event_name, var_name);
1901 	if (!file)
1902 		return NULL;
1903 
1904 	hist_field = find_file_var(file, var_name);
1905 
1906 	return hist_field;
1907 }
1908 
1909 static u64 hist_field_var_ref(struct hist_field *hist_field,
1910 			      struct tracing_map_elt *elt,
1911 			      struct ring_buffer_event *rbe,
1912 			      void *event)
1913 {
1914 	struct hist_elt_data *elt_data;
1915 	u64 var_val = 0;
1916 
1917 	if (WARN_ON_ONCE(!elt))
1918 		return var_val;
1919 
1920 	elt_data = elt->private_data;
1921 	var_val = elt_data->var_ref_vals[hist_field->var_ref_idx];
1922 
1923 	return var_val;
1924 }
1925 
1926 static bool resolve_var_refs(struct hist_trigger_data *hist_data, void *key,
1927 			     u64 *var_ref_vals, bool self)
1928 {
1929 	struct hist_trigger_data *var_data;
1930 	struct tracing_map_elt *var_elt;
1931 	struct hist_field *hist_field;
1932 	unsigned int i, var_idx;
1933 	bool resolved = true;
1934 	u64 var_val = 0;
1935 
1936 	for (i = 0; i < hist_data->n_var_refs; i++) {
1937 		hist_field = hist_data->var_refs[i];
1938 		var_idx = hist_field->var.idx;
1939 		var_data = hist_field->var.hist_data;
1940 
1941 		if (var_data == NULL) {
1942 			resolved = false;
1943 			break;
1944 		}
1945 
1946 		if ((self && var_data != hist_data) ||
1947 		    (!self && var_data == hist_data))
1948 			continue;
1949 
1950 		var_elt = tracing_map_lookup(var_data->map, key);
1951 		if (!var_elt) {
1952 			resolved = false;
1953 			break;
1954 		}
1955 
1956 		if (!tracing_map_var_set(var_elt, var_idx)) {
1957 			resolved = false;
1958 			break;
1959 		}
1960 
1961 		if (self || !hist_field->read_once)
1962 			var_val = tracing_map_read_var(var_elt, var_idx);
1963 		else
1964 			var_val = tracing_map_read_var_once(var_elt, var_idx);
1965 
1966 		var_ref_vals[i] = var_val;
1967 	}
1968 
1969 	return resolved;
1970 }
1971 
1972 static const char *hist_field_name(struct hist_field *field,
1973 				   unsigned int level)
1974 {
1975 	const char *field_name = "";
1976 
1977 	if (level > 1)
1978 		return field_name;
1979 
1980 	if (field->field)
1981 		field_name = field->field->name;
1982 	else if (field->flags & HIST_FIELD_FL_LOG2 ||
1983 		 field->flags & HIST_FIELD_FL_ALIAS)
1984 		field_name = hist_field_name(field->operands[0], ++level);
1985 	else if (field->flags & HIST_FIELD_FL_CPU)
1986 		field_name = "cpu";
1987 	else if (field->flags & HIST_FIELD_FL_EXPR ||
1988 		 field->flags & HIST_FIELD_FL_VAR_REF) {
1989 		if (field->system) {
1990 			static char full_name[MAX_FILTER_STR_VAL];
1991 
1992 			strcat(full_name, field->system);
1993 			strcat(full_name, ".");
1994 			strcat(full_name, field->event_name);
1995 			strcat(full_name, ".");
1996 			strcat(full_name, field->name);
1997 			field_name = full_name;
1998 		} else
1999 			field_name = field->name;
2000 	} else if (field->flags & HIST_FIELD_FL_TIMESTAMP)
2001 		field_name = "common_timestamp";
2002 
2003 	if (field_name == NULL)
2004 		field_name = "";
2005 
2006 	return field_name;
2007 }
2008 
2009 static hist_field_fn_t select_value_fn(int field_size, int field_is_signed)
2010 {
2011 	hist_field_fn_t fn = NULL;
2012 
2013 	switch (field_size) {
2014 	case 8:
2015 		if (field_is_signed)
2016 			fn = hist_field_s64;
2017 		else
2018 			fn = hist_field_u64;
2019 		break;
2020 	case 4:
2021 		if (field_is_signed)
2022 			fn = hist_field_s32;
2023 		else
2024 			fn = hist_field_u32;
2025 		break;
2026 	case 2:
2027 		if (field_is_signed)
2028 			fn = hist_field_s16;
2029 		else
2030 			fn = hist_field_u16;
2031 		break;
2032 	case 1:
2033 		if (field_is_signed)
2034 			fn = hist_field_s8;
2035 		else
2036 			fn = hist_field_u8;
2037 		break;
2038 	}
2039 
2040 	return fn;
2041 }
2042 
2043 static int parse_map_size(char *str)
2044 {
2045 	unsigned long size, map_bits;
2046 	int ret;
2047 
2048 	ret = kstrtoul(str, 0, &size);
2049 	if (ret)
2050 		goto out;
2051 
2052 	map_bits = ilog2(roundup_pow_of_two(size));
2053 	if (map_bits < TRACING_MAP_BITS_MIN ||
2054 	    map_bits > TRACING_MAP_BITS_MAX)
2055 		ret = -EINVAL;
2056 	else
2057 		ret = map_bits;
2058  out:
2059 	return ret;
2060 }
2061 
2062 static void destroy_hist_trigger_attrs(struct hist_trigger_attrs *attrs)
2063 {
2064 	unsigned int i;
2065 
2066 	if (!attrs)
2067 		return;
2068 
2069 	for (i = 0; i < attrs->n_assignments; i++)
2070 		kfree(attrs->assignment_str[i]);
2071 
2072 	for (i = 0; i < attrs->n_actions; i++)
2073 		kfree(attrs->action_str[i]);
2074 
2075 	kfree(attrs->name);
2076 	kfree(attrs->sort_key_str);
2077 	kfree(attrs->keys_str);
2078 	kfree(attrs->vals_str);
2079 	kfree(attrs->clock);
2080 	kfree(attrs);
2081 }
2082 
2083 static int parse_action(char *str, struct hist_trigger_attrs *attrs)
2084 {
2085 	int ret = -EINVAL;
2086 
2087 	if (attrs->n_actions >= HIST_ACTIONS_MAX)
2088 		return ret;
2089 
2090 	if ((str_has_prefix(str, "onmatch(")) ||
2091 	    (str_has_prefix(str, "onmax(")) ||
2092 	    (str_has_prefix(str, "onchange("))) {
2093 		attrs->action_str[attrs->n_actions] = kstrdup(str, GFP_KERNEL);
2094 		if (!attrs->action_str[attrs->n_actions]) {
2095 			ret = -ENOMEM;
2096 			return ret;
2097 		}
2098 		attrs->n_actions++;
2099 		ret = 0;
2100 	}
2101 	return ret;
2102 }
2103 
2104 static int parse_assignment(struct trace_array *tr,
2105 			    char *str, struct hist_trigger_attrs *attrs)
2106 {
2107 	int len, ret = 0;
2108 
2109 	if ((len = str_has_prefix(str, "key=")) ||
2110 	    (len = str_has_prefix(str, "keys="))) {
2111 		attrs->keys_str = kstrdup(str + len, GFP_KERNEL);
2112 		if (!attrs->keys_str) {
2113 			ret = -ENOMEM;
2114 			goto out;
2115 		}
2116 	} else if ((len = str_has_prefix(str, "val=")) ||
2117 		   (len = str_has_prefix(str, "vals=")) ||
2118 		   (len = str_has_prefix(str, "values="))) {
2119 		attrs->vals_str = kstrdup(str + len, GFP_KERNEL);
2120 		if (!attrs->vals_str) {
2121 			ret = -ENOMEM;
2122 			goto out;
2123 		}
2124 	} else if ((len = str_has_prefix(str, "sort="))) {
2125 		attrs->sort_key_str = kstrdup(str + len, GFP_KERNEL);
2126 		if (!attrs->sort_key_str) {
2127 			ret = -ENOMEM;
2128 			goto out;
2129 		}
2130 	} else if (str_has_prefix(str, "name=")) {
2131 		attrs->name = kstrdup(str, GFP_KERNEL);
2132 		if (!attrs->name) {
2133 			ret = -ENOMEM;
2134 			goto out;
2135 		}
2136 	} else if ((len = str_has_prefix(str, "clock="))) {
2137 		str += len;
2138 
2139 		str = strstrip(str);
2140 		attrs->clock = kstrdup(str, GFP_KERNEL);
2141 		if (!attrs->clock) {
2142 			ret = -ENOMEM;
2143 			goto out;
2144 		}
2145 	} else if ((len = str_has_prefix(str, "size="))) {
2146 		int map_bits = parse_map_size(str + len);
2147 
2148 		if (map_bits < 0) {
2149 			ret = map_bits;
2150 			goto out;
2151 		}
2152 		attrs->map_bits = map_bits;
2153 	} else {
2154 		char *assignment;
2155 
2156 		if (attrs->n_assignments == TRACING_MAP_VARS_MAX) {
2157 			hist_err(tr, HIST_ERR_TOO_MANY_VARS, errpos(str));
2158 			ret = -EINVAL;
2159 			goto out;
2160 		}
2161 
2162 		assignment = kstrdup(str, GFP_KERNEL);
2163 		if (!assignment) {
2164 			ret = -ENOMEM;
2165 			goto out;
2166 		}
2167 
2168 		attrs->assignment_str[attrs->n_assignments++] = assignment;
2169 	}
2170  out:
2171 	return ret;
2172 }
2173 
2174 static struct hist_trigger_attrs *
2175 parse_hist_trigger_attrs(struct trace_array *tr, char *trigger_str)
2176 {
2177 	struct hist_trigger_attrs *attrs;
2178 	int ret = 0;
2179 
2180 	attrs = kzalloc(sizeof(*attrs), GFP_KERNEL);
2181 	if (!attrs)
2182 		return ERR_PTR(-ENOMEM);
2183 
2184 	while (trigger_str) {
2185 		char *str = strsep(&trigger_str, ":");
2186 		char *rhs;
2187 
2188 		rhs = strchr(str, '=');
2189 		if (rhs) {
2190 			if (!strlen(++rhs)) {
2191 				ret = -EINVAL;
2192 				hist_err(tr, HIST_ERR_EMPTY_ASSIGNMENT, errpos(str));
2193 				goto free;
2194 			}
2195 			ret = parse_assignment(tr, str, attrs);
2196 			if (ret)
2197 				goto free;
2198 		} else if (strcmp(str, "pause") == 0)
2199 			attrs->pause = true;
2200 		else if ((strcmp(str, "cont") == 0) ||
2201 			 (strcmp(str, "continue") == 0))
2202 			attrs->cont = true;
2203 		else if (strcmp(str, "clear") == 0)
2204 			attrs->clear = true;
2205 		else {
2206 			ret = parse_action(str, attrs);
2207 			if (ret)
2208 				goto free;
2209 		}
2210 	}
2211 
2212 	if (!attrs->keys_str) {
2213 		ret = -EINVAL;
2214 		goto free;
2215 	}
2216 
2217 	if (!attrs->clock) {
2218 		attrs->clock = kstrdup("global", GFP_KERNEL);
2219 		if (!attrs->clock) {
2220 			ret = -ENOMEM;
2221 			goto free;
2222 		}
2223 	}
2224 
2225 	return attrs;
2226  free:
2227 	destroy_hist_trigger_attrs(attrs);
2228 
2229 	return ERR_PTR(ret);
2230 }
2231 
2232 static inline void save_comm(char *comm, struct task_struct *task)
2233 {
2234 	if (!task->pid) {
2235 		strcpy(comm, "<idle>");
2236 		return;
2237 	}
2238 
2239 	if (WARN_ON_ONCE(task->pid < 0)) {
2240 		strcpy(comm, "<XXX>");
2241 		return;
2242 	}
2243 
2244 	strncpy(comm, task->comm, TASK_COMM_LEN);
2245 }
2246 
2247 static void hist_elt_data_free(struct hist_elt_data *elt_data)
2248 {
2249 	unsigned int i;
2250 
2251 	for (i = 0; i < SYNTH_FIELDS_MAX; i++)
2252 		kfree(elt_data->field_var_str[i]);
2253 
2254 	kfree(elt_data->comm);
2255 	kfree(elt_data);
2256 }
2257 
2258 static void hist_trigger_elt_data_free(struct tracing_map_elt *elt)
2259 {
2260 	struct hist_elt_data *elt_data = elt->private_data;
2261 
2262 	hist_elt_data_free(elt_data);
2263 }
2264 
2265 static int hist_trigger_elt_data_alloc(struct tracing_map_elt *elt)
2266 {
2267 	struct hist_trigger_data *hist_data = elt->map->private_data;
2268 	unsigned int size = TASK_COMM_LEN;
2269 	struct hist_elt_data *elt_data;
2270 	struct hist_field *key_field;
2271 	unsigned int i, n_str;
2272 
2273 	elt_data = kzalloc(sizeof(*elt_data), GFP_KERNEL);
2274 	if (!elt_data)
2275 		return -ENOMEM;
2276 
2277 	for_each_hist_key_field(i, hist_data) {
2278 		key_field = hist_data->fields[i];
2279 
2280 		if (key_field->flags & HIST_FIELD_FL_EXECNAME) {
2281 			elt_data->comm = kzalloc(size, GFP_KERNEL);
2282 			if (!elt_data->comm) {
2283 				kfree(elt_data);
2284 				return -ENOMEM;
2285 			}
2286 			break;
2287 		}
2288 	}
2289 
2290 	n_str = hist_data->n_field_var_str + hist_data->n_save_var_str;
2291 
2292 	size = STR_VAR_LEN_MAX;
2293 
2294 	for (i = 0; i < n_str; i++) {
2295 		elt_data->field_var_str[i] = kzalloc(size, GFP_KERNEL);
2296 		if (!elt_data->field_var_str[i]) {
2297 			hist_elt_data_free(elt_data);
2298 			return -ENOMEM;
2299 		}
2300 	}
2301 
2302 	elt->private_data = elt_data;
2303 
2304 	return 0;
2305 }
2306 
2307 static void hist_trigger_elt_data_init(struct tracing_map_elt *elt)
2308 {
2309 	struct hist_elt_data *elt_data = elt->private_data;
2310 
2311 	if (elt_data->comm)
2312 		save_comm(elt_data->comm, current);
2313 }
2314 
2315 static const struct tracing_map_ops hist_trigger_elt_data_ops = {
2316 	.elt_alloc	= hist_trigger_elt_data_alloc,
2317 	.elt_free	= hist_trigger_elt_data_free,
2318 	.elt_init	= hist_trigger_elt_data_init,
2319 };
2320 
2321 static const char *get_hist_field_flags(struct hist_field *hist_field)
2322 {
2323 	const char *flags_str = NULL;
2324 
2325 	if (hist_field->flags & HIST_FIELD_FL_HEX)
2326 		flags_str = "hex";
2327 	else if (hist_field->flags & HIST_FIELD_FL_SYM)
2328 		flags_str = "sym";
2329 	else if (hist_field->flags & HIST_FIELD_FL_SYM_OFFSET)
2330 		flags_str = "sym-offset";
2331 	else if (hist_field->flags & HIST_FIELD_FL_EXECNAME)
2332 		flags_str = "execname";
2333 	else if (hist_field->flags & HIST_FIELD_FL_SYSCALL)
2334 		flags_str = "syscall";
2335 	else if (hist_field->flags & HIST_FIELD_FL_LOG2)
2336 		flags_str = "log2";
2337 	else if (hist_field->flags & HIST_FIELD_FL_TIMESTAMP_USECS)
2338 		flags_str = "usecs";
2339 
2340 	return flags_str;
2341 }
2342 
2343 static void expr_field_str(struct hist_field *field, char *expr)
2344 {
2345 	if (field->flags & HIST_FIELD_FL_VAR_REF)
2346 		strcat(expr, "$");
2347 
2348 	strcat(expr, hist_field_name(field, 0));
2349 
2350 	if (field->flags && !(field->flags & HIST_FIELD_FL_VAR_REF)) {
2351 		const char *flags_str = get_hist_field_flags(field);
2352 
2353 		if (flags_str) {
2354 			strcat(expr, ".");
2355 			strcat(expr, flags_str);
2356 		}
2357 	}
2358 }
2359 
2360 static char *expr_str(struct hist_field *field, unsigned int level)
2361 {
2362 	char *expr;
2363 
2364 	if (level > 1)
2365 		return NULL;
2366 
2367 	expr = kzalloc(MAX_FILTER_STR_VAL, GFP_KERNEL);
2368 	if (!expr)
2369 		return NULL;
2370 
2371 	if (!field->operands[0]) {
2372 		expr_field_str(field, expr);
2373 		return expr;
2374 	}
2375 
2376 	if (field->operator == FIELD_OP_UNARY_MINUS) {
2377 		char *subexpr;
2378 
2379 		strcat(expr, "-(");
2380 		subexpr = expr_str(field->operands[0], ++level);
2381 		if (!subexpr) {
2382 			kfree(expr);
2383 			return NULL;
2384 		}
2385 		strcat(expr, subexpr);
2386 		strcat(expr, ")");
2387 
2388 		kfree(subexpr);
2389 
2390 		return expr;
2391 	}
2392 
2393 	expr_field_str(field->operands[0], expr);
2394 
2395 	switch (field->operator) {
2396 	case FIELD_OP_MINUS:
2397 		strcat(expr, "-");
2398 		break;
2399 	case FIELD_OP_PLUS:
2400 		strcat(expr, "+");
2401 		break;
2402 	default:
2403 		kfree(expr);
2404 		return NULL;
2405 	}
2406 
2407 	expr_field_str(field->operands[1], expr);
2408 
2409 	return expr;
2410 }
2411 
2412 static int contains_operator(char *str)
2413 {
2414 	enum field_op_id field_op = FIELD_OP_NONE;
2415 	char *op;
2416 
2417 	op = strpbrk(str, "+-");
2418 	if (!op)
2419 		return FIELD_OP_NONE;
2420 
2421 	switch (*op) {
2422 	case '-':
2423 		if (*str == '-')
2424 			field_op = FIELD_OP_UNARY_MINUS;
2425 		else
2426 			field_op = FIELD_OP_MINUS;
2427 		break;
2428 	case '+':
2429 		field_op = FIELD_OP_PLUS;
2430 		break;
2431 	default:
2432 		break;
2433 	}
2434 
2435 	return field_op;
2436 }
2437 
2438 static void __destroy_hist_field(struct hist_field *hist_field)
2439 {
2440 	kfree(hist_field->var.name);
2441 	kfree(hist_field->name);
2442 	kfree(hist_field->type);
2443 
2444 	kfree(hist_field);
2445 }
2446 
2447 static void destroy_hist_field(struct hist_field *hist_field,
2448 			       unsigned int level)
2449 {
2450 	unsigned int i;
2451 
2452 	if (level > 3)
2453 		return;
2454 
2455 	if (!hist_field)
2456 		return;
2457 
2458 	if (hist_field->flags & HIST_FIELD_FL_VAR_REF)
2459 		return; /* var refs will be destroyed separately */
2460 
2461 	for (i = 0; i < HIST_FIELD_OPERANDS_MAX; i++)
2462 		destroy_hist_field(hist_field->operands[i], level + 1);
2463 
2464 	__destroy_hist_field(hist_field);
2465 }
2466 
2467 static struct hist_field *create_hist_field(struct hist_trigger_data *hist_data,
2468 					    struct ftrace_event_field *field,
2469 					    unsigned long flags,
2470 					    char *var_name)
2471 {
2472 	struct hist_field *hist_field;
2473 
2474 	if (field && is_function_field(field))
2475 		return NULL;
2476 
2477 	hist_field = kzalloc(sizeof(struct hist_field), GFP_KERNEL);
2478 	if (!hist_field)
2479 		return NULL;
2480 
2481 	hist_field->hist_data = hist_data;
2482 
2483 	if (flags & HIST_FIELD_FL_EXPR || flags & HIST_FIELD_FL_ALIAS)
2484 		goto out; /* caller will populate */
2485 
2486 	if (flags & HIST_FIELD_FL_VAR_REF) {
2487 		hist_field->fn = hist_field_var_ref;
2488 		goto out;
2489 	}
2490 
2491 	if (flags & HIST_FIELD_FL_HITCOUNT) {
2492 		hist_field->fn = hist_field_counter;
2493 		hist_field->size = sizeof(u64);
2494 		hist_field->type = kstrdup("u64", GFP_KERNEL);
2495 		if (!hist_field->type)
2496 			goto free;
2497 		goto out;
2498 	}
2499 
2500 	if (flags & HIST_FIELD_FL_STACKTRACE) {
2501 		hist_field->fn = hist_field_none;
2502 		goto out;
2503 	}
2504 
2505 	if (flags & HIST_FIELD_FL_LOG2) {
2506 		unsigned long fl = flags & ~HIST_FIELD_FL_LOG2;
2507 		hist_field->fn = hist_field_log2;
2508 		hist_field->operands[0] = create_hist_field(hist_data, field, fl, NULL);
2509 		hist_field->size = hist_field->operands[0]->size;
2510 		hist_field->type = kstrdup(hist_field->operands[0]->type, GFP_KERNEL);
2511 		if (!hist_field->type)
2512 			goto free;
2513 		goto out;
2514 	}
2515 
2516 	if (flags & HIST_FIELD_FL_TIMESTAMP) {
2517 		hist_field->fn = hist_field_timestamp;
2518 		hist_field->size = sizeof(u64);
2519 		hist_field->type = kstrdup("u64", GFP_KERNEL);
2520 		if (!hist_field->type)
2521 			goto free;
2522 		goto out;
2523 	}
2524 
2525 	if (flags & HIST_FIELD_FL_CPU) {
2526 		hist_field->fn = hist_field_cpu;
2527 		hist_field->size = sizeof(int);
2528 		hist_field->type = kstrdup("unsigned int", GFP_KERNEL);
2529 		if (!hist_field->type)
2530 			goto free;
2531 		goto out;
2532 	}
2533 
2534 	if (WARN_ON_ONCE(!field))
2535 		goto out;
2536 
2537 	if (is_string_field(field)) {
2538 		flags |= HIST_FIELD_FL_STRING;
2539 
2540 		hist_field->size = MAX_FILTER_STR_VAL;
2541 		hist_field->type = kstrdup(field->type, GFP_KERNEL);
2542 		if (!hist_field->type)
2543 			goto free;
2544 
2545 		if (field->filter_type == FILTER_STATIC_STRING)
2546 			hist_field->fn = hist_field_string;
2547 		else if (field->filter_type == FILTER_DYN_STRING)
2548 			hist_field->fn = hist_field_dynstring;
2549 		else
2550 			hist_field->fn = hist_field_pstring;
2551 	} else {
2552 		hist_field->size = field->size;
2553 		hist_field->is_signed = field->is_signed;
2554 		hist_field->type = kstrdup(field->type, GFP_KERNEL);
2555 		if (!hist_field->type)
2556 			goto free;
2557 
2558 		hist_field->fn = select_value_fn(field->size,
2559 						 field->is_signed);
2560 		if (!hist_field->fn) {
2561 			destroy_hist_field(hist_field, 0);
2562 			return NULL;
2563 		}
2564 	}
2565  out:
2566 	hist_field->field = field;
2567 	hist_field->flags = flags;
2568 
2569 	if (var_name) {
2570 		hist_field->var.name = kstrdup(var_name, GFP_KERNEL);
2571 		if (!hist_field->var.name)
2572 			goto free;
2573 	}
2574 
2575 	return hist_field;
2576  free:
2577 	destroy_hist_field(hist_field, 0);
2578 	return NULL;
2579 }
2580 
2581 static void destroy_hist_fields(struct hist_trigger_data *hist_data)
2582 {
2583 	unsigned int i;
2584 
2585 	for (i = 0; i < HIST_FIELDS_MAX; i++) {
2586 		if (hist_data->fields[i]) {
2587 			destroy_hist_field(hist_data->fields[i], 0);
2588 			hist_data->fields[i] = NULL;
2589 		}
2590 	}
2591 
2592 	for (i = 0; i < hist_data->n_var_refs; i++) {
2593 		WARN_ON(!(hist_data->var_refs[i]->flags & HIST_FIELD_FL_VAR_REF));
2594 		__destroy_hist_field(hist_data->var_refs[i]);
2595 		hist_data->var_refs[i] = NULL;
2596 	}
2597 }
2598 
2599 static int init_var_ref(struct hist_field *ref_field,
2600 			struct hist_field *var_field,
2601 			char *system, char *event_name)
2602 {
2603 	int err = 0;
2604 
2605 	ref_field->var.idx = var_field->var.idx;
2606 	ref_field->var.hist_data = var_field->hist_data;
2607 	ref_field->size = var_field->size;
2608 	ref_field->is_signed = var_field->is_signed;
2609 	ref_field->flags |= var_field->flags &
2610 		(HIST_FIELD_FL_TIMESTAMP | HIST_FIELD_FL_TIMESTAMP_USECS);
2611 
2612 	if (system) {
2613 		ref_field->system = kstrdup(system, GFP_KERNEL);
2614 		if (!ref_field->system)
2615 			return -ENOMEM;
2616 	}
2617 
2618 	if (event_name) {
2619 		ref_field->event_name = kstrdup(event_name, GFP_KERNEL);
2620 		if (!ref_field->event_name) {
2621 			err = -ENOMEM;
2622 			goto free;
2623 		}
2624 	}
2625 
2626 	if (var_field->var.name) {
2627 		ref_field->name = kstrdup(var_field->var.name, GFP_KERNEL);
2628 		if (!ref_field->name) {
2629 			err = -ENOMEM;
2630 			goto free;
2631 		}
2632 	} else if (var_field->name) {
2633 		ref_field->name = kstrdup(var_field->name, GFP_KERNEL);
2634 		if (!ref_field->name) {
2635 			err = -ENOMEM;
2636 			goto free;
2637 		}
2638 	}
2639 
2640 	ref_field->type = kstrdup(var_field->type, GFP_KERNEL);
2641 	if (!ref_field->type) {
2642 		err = -ENOMEM;
2643 		goto free;
2644 	}
2645  out:
2646 	return err;
2647  free:
2648 	kfree(ref_field->system);
2649 	kfree(ref_field->event_name);
2650 	kfree(ref_field->name);
2651 
2652 	goto out;
2653 }
2654 
2655 /**
2656  * create_var_ref - Create a variable reference and attach it to trigger
2657  * @hist_data: The trigger that will be referencing the variable
2658  * @var_field: The VAR field to create a reference to
2659  * @system: The optional system string
2660  * @event_name: The optional event_name string
2661  *
2662  * Given a variable hist_field, create a VAR_REF hist_field that
2663  * represents a reference to it.
2664  *
2665  * This function also adds the reference to the trigger that
2666  * now references the variable.
2667  *
2668  * Return: The VAR_REF field if successful, NULL if not
2669  */
2670 static struct hist_field *create_var_ref(struct hist_trigger_data *hist_data,
2671 					 struct hist_field *var_field,
2672 					 char *system, char *event_name)
2673 {
2674 	unsigned long flags = HIST_FIELD_FL_VAR_REF;
2675 	struct hist_field *ref_field;
2676 
2677 	ref_field = create_hist_field(var_field->hist_data, NULL, flags, NULL);
2678 	if (ref_field) {
2679 		if (init_var_ref(ref_field, var_field, system, event_name)) {
2680 			destroy_hist_field(ref_field, 0);
2681 			return NULL;
2682 		}
2683 
2684 		hist_data->var_refs[hist_data->n_var_refs] = ref_field;
2685 		ref_field->var_ref_idx = hist_data->n_var_refs++;
2686 	}
2687 
2688 	return ref_field;
2689 }
2690 
2691 static bool is_var_ref(char *var_name)
2692 {
2693 	if (!var_name || strlen(var_name) < 2 || var_name[0] != '$')
2694 		return false;
2695 
2696 	return true;
2697 }
2698 
2699 static char *field_name_from_var(struct hist_trigger_data *hist_data,
2700 				 char *var_name)
2701 {
2702 	char *name, *field;
2703 	unsigned int i;
2704 
2705 	for (i = 0; i < hist_data->attrs->var_defs.n_vars; i++) {
2706 		name = hist_data->attrs->var_defs.name[i];
2707 
2708 		if (strcmp(var_name, name) == 0) {
2709 			field = hist_data->attrs->var_defs.expr[i];
2710 			if (contains_operator(field) || is_var_ref(field))
2711 				continue;
2712 			return field;
2713 		}
2714 	}
2715 
2716 	return NULL;
2717 }
2718 
2719 static char *local_field_var_ref(struct hist_trigger_data *hist_data,
2720 				 char *system, char *event_name,
2721 				 char *var_name)
2722 {
2723 	struct trace_event_call *call;
2724 
2725 	if (system && event_name) {
2726 		call = hist_data->event_file->event_call;
2727 
2728 		if (strcmp(system, call->class->system) != 0)
2729 			return NULL;
2730 
2731 		if (strcmp(event_name, trace_event_name(call)) != 0)
2732 			return NULL;
2733 	}
2734 
2735 	if (!!system != !!event_name)
2736 		return NULL;
2737 
2738 	if (!is_var_ref(var_name))
2739 		return NULL;
2740 
2741 	var_name++;
2742 
2743 	return field_name_from_var(hist_data, var_name);
2744 }
2745 
2746 static struct hist_field *parse_var_ref(struct hist_trigger_data *hist_data,
2747 					char *system, char *event_name,
2748 					char *var_name)
2749 {
2750 	struct hist_field *var_field = NULL, *ref_field = NULL;
2751 	struct trace_array *tr = hist_data->event_file->tr;
2752 
2753 	if (!is_var_ref(var_name))
2754 		return NULL;
2755 
2756 	var_name++;
2757 
2758 	var_field = find_event_var(hist_data, system, event_name, var_name);
2759 	if (var_field)
2760 		ref_field = create_var_ref(hist_data, var_field,
2761 					   system, event_name);
2762 
2763 	if (!ref_field)
2764 		hist_err(tr, HIST_ERR_VAR_NOT_FOUND, errpos(var_name));
2765 
2766 	return ref_field;
2767 }
2768 
2769 static struct ftrace_event_field *
2770 parse_field(struct hist_trigger_data *hist_data, struct trace_event_file *file,
2771 	    char *field_str, unsigned long *flags)
2772 {
2773 	struct ftrace_event_field *field = NULL;
2774 	char *field_name, *modifier, *str;
2775 	struct trace_array *tr = file->tr;
2776 
2777 	modifier = str = kstrdup(field_str, GFP_KERNEL);
2778 	if (!modifier)
2779 		return ERR_PTR(-ENOMEM);
2780 
2781 	field_name = strsep(&modifier, ".");
2782 	if (modifier) {
2783 		if (strcmp(modifier, "hex") == 0)
2784 			*flags |= HIST_FIELD_FL_HEX;
2785 		else if (strcmp(modifier, "sym") == 0)
2786 			*flags |= HIST_FIELD_FL_SYM;
2787 		else if (strcmp(modifier, "sym-offset") == 0)
2788 			*flags |= HIST_FIELD_FL_SYM_OFFSET;
2789 		else if ((strcmp(modifier, "execname") == 0) &&
2790 			 (strcmp(field_name, "common_pid") == 0))
2791 			*flags |= HIST_FIELD_FL_EXECNAME;
2792 		else if (strcmp(modifier, "syscall") == 0)
2793 			*flags |= HIST_FIELD_FL_SYSCALL;
2794 		else if (strcmp(modifier, "log2") == 0)
2795 			*flags |= HIST_FIELD_FL_LOG2;
2796 		else if (strcmp(modifier, "usecs") == 0)
2797 			*flags |= HIST_FIELD_FL_TIMESTAMP_USECS;
2798 		else {
2799 			hist_err(tr, HIST_ERR_BAD_FIELD_MODIFIER, errpos(modifier));
2800 			field = ERR_PTR(-EINVAL);
2801 			goto out;
2802 		}
2803 	}
2804 
2805 	if (strcmp(field_name, "common_timestamp") == 0) {
2806 		*flags |= HIST_FIELD_FL_TIMESTAMP;
2807 		hist_data->enable_timestamps = true;
2808 		if (*flags & HIST_FIELD_FL_TIMESTAMP_USECS)
2809 			hist_data->attrs->ts_in_usecs = true;
2810 	} else if (strcmp(field_name, "cpu") == 0)
2811 		*flags |= HIST_FIELD_FL_CPU;
2812 	else {
2813 		field = trace_find_event_field(file->event_call, field_name);
2814 		if (!field || !field->size) {
2815 			hist_err(tr, HIST_ERR_FIELD_NOT_FOUND, errpos(field_name));
2816 			field = ERR_PTR(-EINVAL);
2817 			goto out;
2818 		}
2819 	}
2820  out:
2821 	kfree(str);
2822 
2823 	return field;
2824 }
2825 
2826 static struct hist_field *create_alias(struct hist_trigger_data *hist_data,
2827 				       struct hist_field *var_ref,
2828 				       char *var_name)
2829 {
2830 	struct hist_field *alias = NULL;
2831 	unsigned long flags = HIST_FIELD_FL_ALIAS | HIST_FIELD_FL_VAR;
2832 
2833 	alias = create_hist_field(hist_data, NULL, flags, var_name);
2834 	if (!alias)
2835 		return NULL;
2836 
2837 	alias->fn = var_ref->fn;
2838 	alias->operands[0] = var_ref;
2839 
2840 	if (init_var_ref(alias, var_ref, var_ref->system, var_ref->event_name)) {
2841 		destroy_hist_field(alias, 0);
2842 		return NULL;
2843 	}
2844 
2845 	alias->var_ref_idx = var_ref->var_ref_idx;
2846 
2847 	return alias;
2848 }
2849 
2850 static struct hist_field *parse_atom(struct hist_trigger_data *hist_data,
2851 				     struct trace_event_file *file, char *str,
2852 				     unsigned long *flags, char *var_name)
2853 {
2854 	char *s, *ref_system = NULL, *ref_event = NULL, *ref_var = str;
2855 	struct ftrace_event_field *field = NULL;
2856 	struct hist_field *hist_field = NULL;
2857 	int ret = 0;
2858 
2859 	s = strchr(str, '.');
2860 	if (s) {
2861 		s = strchr(++s, '.');
2862 		if (s) {
2863 			ref_system = strsep(&str, ".");
2864 			if (!str) {
2865 				ret = -EINVAL;
2866 				goto out;
2867 			}
2868 			ref_event = strsep(&str, ".");
2869 			if (!str) {
2870 				ret = -EINVAL;
2871 				goto out;
2872 			}
2873 			ref_var = str;
2874 		}
2875 	}
2876 
2877 	s = local_field_var_ref(hist_data, ref_system, ref_event, ref_var);
2878 	if (!s) {
2879 		hist_field = parse_var_ref(hist_data, ref_system,
2880 					   ref_event, ref_var);
2881 		if (hist_field) {
2882 			if (var_name) {
2883 				hist_field = create_alias(hist_data, hist_field, var_name);
2884 				if (!hist_field) {
2885 					ret = -ENOMEM;
2886 					goto out;
2887 				}
2888 			}
2889 			return hist_field;
2890 		}
2891 	} else
2892 		str = s;
2893 
2894 	field = parse_field(hist_data, file, str, flags);
2895 	if (IS_ERR(field)) {
2896 		ret = PTR_ERR(field);
2897 		goto out;
2898 	}
2899 
2900 	hist_field = create_hist_field(hist_data, field, *flags, var_name);
2901 	if (!hist_field) {
2902 		ret = -ENOMEM;
2903 		goto out;
2904 	}
2905 
2906 	return hist_field;
2907  out:
2908 	return ERR_PTR(ret);
2909 }
2910 
2911 static struct hist_field *parse_expr(struct hist_trigger_data *hist_data,
2912 				     struct trace_event_file *file,
2913 				     char *str, unsigned long flags,
2914 				     char *var_name, unsigned int level);
2915 
2916 static struct hist_field *parse_unary(struct hist_trigger_data *hist_data,
2917 				      struct trace_event_file *file,
2918 				      char *str, unsigned long flags,
2919 				      char *var_name, unsigned int level)
2920 {
2921 	struct hist_field *operand1, *expr = NULL;
2922 	unsigned long operand_flags;
2923 	int ret = 0;
2924 	char *s;
2925 
2926 	/* we support only -(xxx) i.e. explicit parens required */
2927 
2928 	if (level > 3) {
2929 		hist_err(file->tr, HIST_ERR_TOO_MANY_SUBEXPR, errpos(str));
2930 		ret = -EINVAL;
2931 		goto free;
2932 	}
2933 
2934 	str++; /* skip leading '-' */
2935 
2936 	s = strchr(str, '(');
2937 	if (s)
2938 		str++;
2939 	else {
2940 		ret = -EINVAL;
2941 		goto free;
2942 	}
2943 
2944 	s = strrchr(str, ')');
2945 	if (s)
2946 		*s = '\0';
2947 	else {
2948 		ret = -EINVAL; /* no closing ')' */
2949 		goto free;
2950 	}
2951 
2952 	flags |= HIST_FIELD_FL_EXPR;
2953 	expr = create_hist_field(hist_data, NULL, flags, var_name);
2954 	if (!expr) {
2955 		ret = -ENOMEM;
2956 		goto free;
2957 	}
2958 
2959 	operand_flags = 0;
2960 	operand1 = parse_expr(hist_data, file, str, operand_flags, NULL, ++level);
2961 	if (IS_ERR(operand1)) {
2962 		ret = PTR_ERR(operand1);
2963 		goto free;
2964 	}
2965 
2966 	expr->flags |= operand1->flags &
2967 		(HIST_FIELD_FL_TIMESTAMP | HIST_FIELD_FL_TIMESTAMP_USECS);
2968 	expr->fn = hist_field_unary_minus;
2969 	expr->operands[0] = operand1;
2970 	expr->operator = FIELD_OP_UNARY_MINUS;
2971 	expr->name = expr_str(expr, 0);
2972 	expr->type = kstrdup(operand1->type, GFP_KERNEL);
2973 	if (!expr->type) {
2974 		ret = -ENOMEM;
2975 		goto free;
2976 	}
2977 
2978 	return expr;
2979  free:
2980 	destroy_hist_field(expr, 0);
2981 	return ERR_PTR(ret);
2982 }
2983 
2984 static int check_expr_operands(struct trace_array *tr,
2985 			       struct hist_field *operand1,
2986 			       struct hist_field *operand2)
2987 {
2988 	unsigned long operand1_flags = operand1->flags;
2989 	unsigned long operand2_flags = operand2->flags;
2990 
2991 	if ((operand1_flags & HIST_FIELD_FL_VAR_REF) ||
2992 	    (operand1_flags & HIST_FIELD_FL_ALIAS)) {
2993 		struct hist_field *var;
2994 
2995 		var = find_var_field(operand1->var.hist_data, operand1->name);
2996 		if (!var)
2997 			return -EINVAL;
2998 		operand1_flags = var->flags;
2999 	}
3000 
3001 	if ((operand2_flags & HIST_FIELD_FL_VAR_REF) ||
3002 	    (operand2_flags & HIST_FIELD_FL_ALIAS)) {
3003 		struct hist_field *var;
3004 
3005 		var = find_var_field(operand2->var.hist_data, operand2->name);
3006 		if (!var)
3007 			return -EINVAL;
3008 		operand2_flags = var->flags;
3009 	}
3010 
3011 	if ((operand1_flags & HIST_FIELD_FL_TIMESTAMP_USECS) !=
3012 	    (operand2_flags & HIST_FIELD_FL_TIMESTAMP_USECS)) {
3013 		hist_err(tr, HIST_ERR_TIMESTAMP_MISMATCH, 0);
3014 		return -EINVAL;
3015 	}
3016 
3017 	return 0;
3018 }
3019 
3020 static struct hist_field *parse_expr(struct hist_trigger_data *hist_data,
3021 				     struct trace_event_file *file,
3022 				     char *str, unsigned long flags,
3023 				     char *var_name, unsigned int level)
3024 {
3025 	struct hist_field *operand1 = NULL, *operand2 = NULL, *expr = NULL;
3026 	unsigned long operand_flags;
3027 	int field_op, ret = -EINVAL;
3028 	char *sep, *operand1_str;
3029 
3030 	if (level > 3) {
3031 		hist_err(file->tr, HIST_ERR_TOO_MANY_SUBEXPR, errpos(str));
3032 		return ERR_PTR(-EINVAL);
3033 	}
3034 
3035 	field_op = contains_operator(str);
3036 
3037 	if (field_op == FIELD_OP_NONE)
3038 		return parse_atom(hist_data, file, str, &flags, var_name);
3039 
3040 	if (field_op == FIELD_OP_UNARY_MINUS)
3041 		return parse_unary(hist_data, file, str, flags, var_name, ++level);
3042 
3043 	switch (field_op) {
3044 	case FIELD_OP_MINUS:
3045 		sep = "-";
3046 		break;
3047 	case FIELD_OP_PLUS:
3048 		sep = "+";
3049 		break;
3050 	default:
3051 		goto free;
3052 	}
3053 
3054 	operand1_str = strsep(&str, sep);
3055 	if (!operand1_str || !str)
3056 		goto free;
3057 
3058 	operand_flags = 0;
3059 	operand1 = parse_atom(hist_data, file, operand1_str,
3060 			      &operand_flags, NULL);
3061 	if (IS_ERR(operand1)) {
3062 		ret = PTR_ERR(operand1);
3063 		operand1 = NULL;
3064 		goto free;
3065 	}
3066 
3067 	/* rest of string could be another expression e.g. b+c in a+b+c */
3068 	operand_flags = 0;
3069 	operand2 = parse_expr(hist_data, file, str, operand_flags, NULL, ++level);
3070 	if (IS_ERR(operand2)) {
3071 		ret = PTR_ERR(operand2);
3072 		operand2 = NULL;
3073 		goto free;
3074 	}
3075 
3076 	ret = check_expr_operands(file->tr, operand1, operand2);
3077 	if (ret)
3078 		goto free;
3079 
3080 	flags |= HIST_FIELD_FL_EXPR;
3081 
3082 	flags |= operand1->flags &
3083 		(HIST_FIELD_FL_TIMESTAMP | HIST_FIELD_FL_TIMESTAMP_USECS);
3084 
3085 	expr = create_hist_field(hist_data, NULL, flags, var_name);
3086 	if (!expr) {
3087 		ret = -ENOMEM;
3088 		goto free;
3089 	}
3090 
3091 	operand1->read_once = true;
3092 	operand2->read_once = true;
3093 
3094 	expr->operands[0] = operand1;
3095 	expr->operands[1] = operand2;
3096 	expr->operator = field_op;
3097 	expr->name = expr_str(expr, 0);
3098 	expr->type = kstrdup(operand1->type, GFP_KERNEL);
3099 	if (!expr->type) {
3100 		ret = -ENOMEM;
3101 		goto free;
3102 	}
3103 
3104 	switch (field_op) {
3105 	case FIELD_OP_MINUS:
3106 		expr->fn = hist_field_minus;
3107 		break;
3108 	case FIELD_OP_PLUS:
3109 		expr->fn = hist_field_plus;
3110 		break;
3111 	default:
3112 		ret = -EINVAL;
3113 		goto free;
3114 	}
3115 
3116 	return expr;
3117  free:
3118 	destroy_hist_field(operand1, 0);
3119 	destroy_hist_field(operand2, 0);
3120 	destroy_hist_field(expr, 0);
3121 
3122 	return ERR_PTR(ret);
3123 }
3124 
3125 static char *find_trigger_filter(struct hist_trigger_data *hist_data,
3126 				 struct trace_event_file *file)
3127 {
3128 	struct event_trigger_data *test;
3129 
3130 	lockdep_assert_held(&event_mutex);
3131 
3132 	list_for_each_entry(test, &file->triggers, list) {
3133 		if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
3134 			if (test->private_data == hist_data)
3135 				return test->filter_str;
3136 		}
3137 	}
3138 
3139 	return NULL;
3140 }
3141 
3142 static struct event_command trigger_hist_cmd;
3143 static int event_hist_trigger_func(struct event_command *cmd_ops,
3144 				   struct trace_event_file *file,
3145 				   char *glob, char *cmd, char *param);
3146 
3147 static bool compatible_keys(struct hist_trigger_data *target_hist_data,
3148 			    struct hist_trigger_data *hist_data,
3149 			    unsigned int n_keys)
3150 {
3151 	struct hist_field *target_hist_field, *hist_field;
3152 	unsigned int n, i, j;
3153 
3154 	if (hist_data->n_fields - hist_data->n_vals != n_keys)
3155 		return false;
3156 
3157 	i = hist_data->n_vals;
3158 	j = target_hist_data->n_vals;
3159 
3160 	for (n = 0; n < n_keys; n++) {
3161 		hist_field = hist_data->fields[i + n];
3162 		target_hist_field = target_hist_data->fields[j + n];
3163 
3164 		if (strcmp(hist_field->type, target_hist_field->type) != 0)
3165 			return false;
3166 		if (hist_field->size != target_hist_field->size)
3167 			return false;
3168 		if (hist_field->is_signed != target_hist_field->is_signed)
3169 			return false;
3170 	}
3171 
3172 	return true;
3173 }
3174 
3175 static struct hist_trigger_data *
3176 find_compatible_hist(struct hist_trigger_data *target_hist_data,
3177 		     struct trace_event_file *file)
3178 {
3179 	struct hist_trigger_data *hist_data;
3180 	struct event_trigger_data *test;
3181 	unsigned int n_keys;
3182 
3183 	lockdep_assert_held(&event_mutex);
3184 
3185 	n_keys = target_hist_data->n_fields - target_hist_data->n_vals;
3186 
3187 	list_for_each_entry(test, &file->triggers, list) {
3188 		if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
3189 			hist_data = test->private_data;
3190 
3191 			if (compatible_keys(target_hist_data, hist_data, n_keys))
3192 				return hist_data;
3193 		}
3194 	}
3195 
3196 	return NULL;
3197 }
3198 
3199 static struct trace_event_file *event_file(struct trace_array *tr,
3200 					   char *system, char *event_name)
3201 {
3202 	struct trace_event_file *file;
3203 
3204 	file = __find_event_file(tr, system, event_name);
3205 	if (!file)
3206 		return ERR_PTR(-EINVAL);
3207 
3208 	return file;
3209 }
3210 
3211 static struct hist_field *
3212 find_synthetic_field_var(struct hist_trigger_data *target_hist_data,
3213 			 char *system, char *event_name, char *field_name)
3214 {
3215 	struct hist_field *event_var;
3216 	char *synthetic_name;
3217 
3218 	synthetic_name = kzalloc(MAX_FILTER_STR_VAL, GFP_KERNEL);
3219 	if (!synthetic_name)
3220 		return ERR_PTR(-ENOMEM);
3221 
3222 	strcpy(synthetic_name, "synthetic_");
3223 	strcat(synthetic_name, field_name);
3224 
3225 	event_var = find_event_var(target_hist_data, system, event_name, synthetic_name);
3226 
3227 	kfree(synthetic_name);
3228 
3229 	return event_var;
3230 }
3231 
3232 /**
3233  * create_field_var_hist - Automatically create a histogram and var for a field
3234  * @target_hist_data: The target hist trigger
3235  * @subsys_name: Optional subsystem name
3236  * @event_name: Optional event name
3237  * @field_name: The name of the field (and the resulting variable)
3238  *
3239  * Hist trigger actions fetch data from variables, not directly from
3240  * events.  However, for convenience, users are allowed to directly
3241  * specify an event field in an action, which will be automatically
3242  * converted into a variable on their behalf.
3243 
3244  * If a user specifies a field on an event that isn't the event the
3245  * histogram currently being defined (the target event histogram), the
3246  * only way that can be accomplished is if a new hist trigger is
3247  * created and the field variable defined on that.
3248  *
3249  * This function creates a new histogram compatible with the target
3250  * event (meaning a histogram with the same key as the target
3251  * histogram), and creates a variable for the specified field, but
3252  * with 'synthetic_' prepended to the variable name in order to avoid
3253  * collision with normal field variables.
3254  *
3255  * Return: The variable created for the field.
3256  */
3257 static struct hist_field *
3258 create_field_var_hist(struct hist_trigger_data *target_hist_data,
3259 		      char *subsys_name, char *event_name, char *field_name)
3260 {
3261 	struct trace_array *tr = target_hist_data->event_file->tr;
3262 	struct hist_field *event_var = ERR_PTR(-EINVAL);
3263 	struct hist_trigger_data *hist_data;
3264 	unsigned int i, n, first = true;
3265 	struct field_var_hist *var_hist;
3266 	struct trace_event_file *file;
3267 	struct hist_field *key_field;
3268 	char *saved_filter;
3269 	char *cmd;
3270 	int ret;
3271 
3272 	if (target_hist_data->n_field_var_hists >= SYNTH_FIELDS_MAX) {
3273 		hist_err(tr, HIST_ERR_TOO_MANY_FIELD_VARS, errpos(field_name));
3274 		return ERR_PTR(-EINVAL);
3275 	}
3276 
3277 	file = event_file(tr, subsys_name, event_name);
3278 
3279 	if (IS_ERR(file)) {
3280 		hist_err(tr, HIST_ERR_EVENT_FILE_NOT_FOUND, errpos(field_name));
3281 		ret = PTR_ERR(file);
3282 		return ERR_PTR(ret);
3283 	}
3284 
3285 	/*
3286 	 * Look for a histogram compatible with target.  We'll use the
3287 	 * found histogram specification to create a new matching
3288 	 * histogram with our variable on it.  target_hist_data is not
3289 	 * yet a registered histogram so we can't use that.
3290 	 */
3291 	hist_data = find_compatible_hist(target_hist_data, file);
3292 	if (!hist_data) {
3293 		hist_err(tr, HIST_ERR_HIST_NOT_FOUND, errpos(field_name));
3294 		return ERR_PTR(-EINVAL);
3295 	}
3296 
3297 	/* See if a synthetic field variable has already been created */
3298 	event_var = find_synthetic_field_var(target_hist_data, subsys_name,
3299 					     event_name, field_name);
3300 	if (!IS_ERR_OR_NULL(event_var))
3301 		return event_var;
3302 
3303 	var_hist = kzalloc(sizeof(*var_hist), GFP_KERNEL);
3304 	if (!var_hist)
3305 		return ERR_PTR(-ENOMEM);
3306 
3307 	cmd = kzalloc(MAX_FILTER_STR_VAL, GFP_KERNEL);
3308 	if (!cmd) {
3309 		kfree(var_hist);
3310 		return ERR_PTR(-ENOMEM);
3311 	}
3312 
3313 	/* Use the same keys as the compatible histogram */
3314 	strcat(cmd, "keys=");
3315 
3316 	for_each_hist_key_field(i, hist_data) {
3317 		key_field = hist_data->fields[i];
3318 		if (!first)
3319 			strcat(cmd, ",");
3320 		strcat(cmd, key_field->field->name);
3321 		first = false;
3322 	}
3323 
3324 	/* Create the synthetic field variable specification */
3325 	strcat(cmd, ":synthetic_");
3326 	strcat(cmd, field_name);
3327 	strcat(cmd, "=");
3328 	strcat(cmd, field_name);
3329 
3330 	/* Use the same filter as the compatible histogram */
3331 	saved_filter = find_trigger_filter(hist_data, file);
3332 	if (saved_filter) {
3333 		strcat(cmd, " if ");
3334 		strcat(cmd, saved_filter);
3335 	}
3336 
3337 	var_hist->cmd = kstrdup(cmd, GFP_KERNEL);
3338 	if (!var_hist->cmd) {
3339 		kfree(cmd);
3340 		kfree(var_hist);
3341 		return ERR_PTR(-ENOMEM);
3342 	}
3343 
3344 	/* Save the compatible histogram information */
3345 	var_hist->hist_data = hist_data;
3346 
3347 	/* Create the new histogram with our variable */
3348 	ret = event_hist_trigger_func(&trigger_hist_cmd, file,
3349 				      "", "hist", cmd);
3350 	if (ret) {
3351 		kfree(cmd);
3352 		kfree(var_hist->cmd);
3353 		kfree(var_hist);
3354 		hist_err(tr, HIST_ERR_HIST_CREATE_FAIL, errpos(field_name));
3355 		return ERR_PTR(ret);
3356 	}
3357 
3358 	kfree(cmd);
3359 
3360 	/* If we can't find the variable, something went wrong */
3361 	event_var = find_synthetic_field_var(target_hist_data, subsys_name,
3362 					     event_name, field_name);
3363 	if (IS_ERR_OR_NULL(event_var)) {
3364 		kfree(var_hist->cmd);
3365 		kfree(var_hist);
3366 		hist_err(tr, HIST_ERR_SYNTH_VAR_NOT_FOUND, errpos(field_name));
3367 		return ERR_PTR(-EINVAL);
3368 	}
3369 
3370 	n = target_hist_data->n_field_var_hists;
3371 	target_hist_data->field_var_hists[n] = var_hist;
3372 	target_hist_data->n_field_var_hists++;
3373 
3374 	return event_var;
3375 }
3376 
3377 static struct hist_field *
3378 find_target_event_var(struct hist_trigger_data *hist_data,
3379 		      char *subsys_name, char *event_name, char *var_name)
3380 {
3381 	struct trace_event_file *file = hist_data->event_file;
3382 	struct hist_field *hist_field = NULL;
3383 
3384 	if (subsys_name) {
3385 		struct trace_event_call *call;
3386 
3387 		if (!event_name)
3388 			return NULL;
3389 
3390 		call = file->event_call;
3391 
3392 		if (strcmp(subsys_name, call->class->system) != 0)
3393 			return NULL;
3394 
3395 		if (strcmp(event_name, trace_event_name(call)) != 0)
3396 			return NULL;
3397 	}
3398 
3399 	hist_field = find_var_field(hist_data, var_name);
3400 
3401 	return hist_field;
3402 }
3403 
3404 static inline void __update_field_vars(struct tracing_map_elt *elt,
3405 				       struct ring_buffer_event *rbe,
3406 				       void *rec,
3407 				       struct field_var **field_vars,
3408 				       unsigned int n_field_vars,
3409 				       unsigned int field_var_str_start)
3410 {
3411 	struct hist_elt_data *elt_data = elt->private_data;
3412 	unsigned int i, j, var_idx;
3413 	u64 var_val;
3414 
3415 	for (i = 0, j = field_var_str_start; i < n_field_vars; i++) {
3416 		struct field_var *field_var = field_vars[i];
3417 		struct hist_field *var = field_var->var;
3418 		struct hist_field *val = field_var->val;
3419 
3420 		var_val = val->fn(val, elt, rbe, rec);
3421 		var_idx = var->var.idx;
3422 
3423 		if (val->flags & HIST_FIELD_FL_STRING) {
3424 			char *str = elt_data->field_var_str[j++];
3425 			char *val_str = (char *)(uintptr_t)var_val;
3426 
3427 			strscpy(str, val_str, STR_VAR_LEN_MAX);
3428 			var_val = (u64)(uintptr_t)str;
3429 		}
3430 		tracing_map_set_var(elt, var_idx, var_val);
3431 	}
3432 }
3433 
3434 static void update_field_vars(struct hist_trigger_data *hist_data,
3435 			      struct tracing_map_elt *elt,
3436 			      struct ring_buffer_event *rbe,
3437 			      void *rec)
3438 {
3439 	__update_field_vars(elt, rbe, rec, hist_data->field_vars,
3440 			    hist_data->n_field_vars, 0);
3441 }
3442 
3443 static void save_track_data_vars(struct hist_trigger_data *hist_data,
3444 				 struct tracing_map_elt *elt, void *rec,
3445 				 struct ring_buffer_event *rbe, void *key,
3446 				 struct action_data *data, u64 *var_ref_vals)
3447 {
3448 	__update_field_vars(elt, rbe, rec, hist_data->save_vars,
3449 			    hist_data->n_save_vars, hist_data->n_field_var_str);
3450 }
3451 
3452 static struct hist_field *create_var(struct hist_trigger_data *hist_data,
3453 				     struct trace_event_file *file,
3454 				     char *name, int size, const char *type)
3455 {
3456 	struct hist_field *var;
3457 	int idx;
3458 
3459 	if (find_var(hist_data, file, name) && !hist_data->remove) {
3460 		var = ERR_PTR(-EINVAL);
3461 		goto out;
3462 	}
3463 
3464 	var = kzalloc(sizeof(struct hist_field), GFP_KERNEL);
3465 	if (!var) {
3466 		var = ERR_PTR(-ENOMEM);
3467 		goto out;
3468 	}
3469 
3470 	idx = tracing_map_add_var(hist_data->map);
3471 	if (idx < 0) {
3472 		kfree(var);
3473 		var = ERR_PTR(-EINVAL);
3474 		goto out;
3475 	}
3476 
3477 	var->flags = HIST_FIELD_FL_VAR;
3478 	var->var.idx = idx;
3479 	var->var.hist_data = var->hist_data = hist_data;
3480 	var->size = size;
3481 	var->var.name = kstrdup(name, GFP_KERNEL);
3482 	var->type = kstrdup(type, GFP_KERNEL);
3483 	if (!var->var.name || !var->type) {
3484 		kfree(var->var.name);
3485 		kfree(var->type);
3486 		kfree(var);
3487 		var = ERR_PTR(-ENOMEM);
3488 	}
3489  out:
3490 	return var;
3491 }
3492 
3493 static struct field_var *create_field_var(struct hist_trigger_data *hist_data,
3494 					  struct trace_event_file *file,
3495 					  char *field_name)
3496 {
3497 	struct hist_field *val = NULL, *var = NULL;
3498 	unsigned long flags = HIST_FIELD_FL_VAR;
3499 	struct trace_array *tr = file->tr;
3500 	struct field_var *field_var;
3501 	int ret = 0;
3502 
3503 	if (hist_data->n_field_vars >= SYNTH_FIELDS_MAX) {
3504 		hist_err(tr, HIST_ERR_TOO_MANY_FIELD_VARS, errpos(field_name));
3505 		ret = -EINVAL;
3506 		goto err;
3507 	}
3508 
3509 	val = parse_atom(hist_data, file, field_name, &flags, NULL);
3510 	if (IS_ERR(val)) {
3511 		hist_err(tr, HIST_ERR_FIELD_VAR_PARSE_FAIL, errpos(field_name));
3512 		ret = PTR_ERR(val);
3513 		goto err;
3514 	}
3515 
3516 	var = create_var(hist_data, file, field_name, val->size, val->type);
3517 	if (IS_ERR(var)) {
3518 		hist_err(tr, HIST_ERR_VAR_CREATE_FIND_FAIL, errpos(field_name));
3519 		kfree(val);
3520 		ret = PTR_ERR(var);
3521 		goto err;
3522 	}
3523 
3524 	field_var = kzalloc(sizeof(struct field_var), GFP_KERNEL);
3525 	if (!field_var) {
3526 		kfree(val);
3527 		kfree(var);
3528 		ret =  -ENOMEM;
3529 		goto err;
3530 	}
3531 
3532 	field_var->var = var;
3533 	field_var->val = val;
3534  out:
3535 	return field_var;
3536  err:
3537 	field_var = ERR_PTR(ret);
3538 	goto out;
3539 }
3540 
3541 /**
3542  * create_target_field_var - Automatically create a variable for a field
3543  * @target_hist_data: The target hist trigger
3544  * @subsys_name: Optional subsystem name
3545  * @event_name: Optional event name
3546  * @var_name: The name of the field (and the resulting variable)
3547  *
3548  * Hist trigger actions fetch data from variables, not directly from
3549  * events.  However, for convenience, users are allowed to directly
3550  * specify an event field in an action, which will be automatically
3551  * converted into a variable on their behalf.
3552 
3553  * This function creates a field variable with the name var_name on
3554  * the hist trigger currently being defined on the target event.  If
3555  * subsys_name and event_name are specified, this function simply
3556  * verifies that they do in fact match the target event subsystem and
3557  * event name.
3558  *
3559  * Return: The variable created for the field.
3560  */
3561 static struct field_var *
3562 create_target_field_var(struct hist_trigger_data *target_hist_data,
3563 			char *subsys_name, char *event_name, char *var_name)
3564 {
3565 	struct trace_event_file *file = target_hist_data->event_file;
3566 
3567 	if (subsys_name) {
3568 		struct trace_event_call *call;
3569 
3570 		if (!event_name)
3571 			return NULL;
3572 
3573 		call = file->event_call;
3574 
3575 		if (strcmp(subsys_name, call->class->system) != 0)
3576 			return NULL;
3577 
3578 		if (strcmp(event_name, trace_event_name(call)) != 0)
3579 			return NULL;
3580 	}
3581 
3582 	return create_field_var(target_hist_data, file, var_name);
3583 }
3584 
3585 static bool check_track_val_max(u64 track_val, u64 var_val)
3586 {
3587 	if (var_val <= track_val)
3588 		return false;
3589 
3590 	return true;
3591 }
3592 
3593 static bool check_track_val_changed(u64 track_val, u64 var_val)
3594 {
3595 	if (var_val == track_val)
3596 		return false;
3597 
3598 	return true;
3599 }
3600 
3601 static u64 get_track_val(struct hist_trigger_data *hist_data,
3602 			 struct tracing_map_elt *elt,
3603 			 struct action_data *data)
3604 {
3605 	unsigned int track_var_idx = data->track_data.track_var->var.idx;
3606 	u64 track_val;
3607 
3608 	track_val = tracing_map_read_var(elt, track_var_idx);
3609 
3610 	return track_val;
3611 }
3612 
3613 static void save_track_val(struct hist_trigger_data *hist_data,
3614 			   struct tracing_map_elt *elt,
3615 			   struct action_data *data, u64 var_val)
3616 {
3617 	unsigned int track_var_idx = data->track_data.track_var->var.idx;
3618 
3619 	tracing_map_set_var(elt, track_var_idx, var_val);
3620 }
3621 
3622 static void save_track_data(struct hist_trigger_data *hist_data,
3623 			    struct tracing_map_elt *elt, void *rec,
3624 			    struct ring_buffer_event *rbe, void *key,
3625 			    struct action_data *data, u64 *var_ref_vals)
3626 {
3627 	if (data->track_data.save_data)
3628 		data->track_data.save_data(hist_data, elt, rec, rbe, key, data, var_ref_vals);
3629 }
3630 
3631 static bool check_track_val(struct tracing_map_elt *elt,
3632 			    struct action_data *data,
3633 			    u64 var_val)
3634 {
3635 	struct hist_trigger_data *hist_data;
3636 	u64 track_val;
3637 
3638 	hist_data = data->track_data.track_var->hist_data;
3639 	track_val = get_track_val(hist_data, elt, data);
3640 
3641 	return data->track_data.check_val(track_val, var_val);
3642 }
3643 
3644 #ifdef CONFIG_TRACER_SNAPSHOT
3645 static bool cond_snapshot_update(struct trace_array *tr, void *cond_data)
3646 {
3647 	/* called with tr->max_lock held */
3648 	struct track_data *track_data = tr->cond_snapshot->cond_data;
3649 	struct hist_elt_data *elt_data, *track_elt_data;
3650 	struct snapshot_context *context = cond_data;
3651 	struct action_data *action;
3652 	u64 track_val;
3653 
3654 	if (!track_data)
3655 		return false;
3656 
3657 	action = track_data->action_data;
3658 
3659 	track_val = get_track_val(track_data->hist_data, context->elt,
3660 				  track_data->action_data);
3661 
3662 	if (!action->track_data.check_val(track_data->track_val, track_val))
3663 		return false;
3664 
3665 	track_data->track_val = track_val;
3666 	memcpy(track_data->key, context->key, track_data->key_len);
3667 
3668 	elt_data = context->elt->private_data;
3669 	track_elt_data = track_data->elt.private_data;
3670 	if (elt_data->comm)
3671 		strncpy(track_elt_data->comm, elt_data->comm, TASK_COMM_LEN);
3672 
3673 	track_data->updated = true;
3674 
3675 	return true;
3676 }
3677 
3678 static void save_track_data_snapshot(struct hist_trigger_data *hist_data,
3679 				     struct tracing_map_elt *elt, void *rec,
3680 				     struct ring_buffer_event *rbe, void *key,
3681 				     struct action_data *data,
3682 				     u64 *var_ref_vals)
3683 {
3684 	struct trace_event_file *file = hist_data->event_file;
3685 	struct snapshot_context context;
3686 
3687 	context.elt = elt;
3688 	context.key = key;
3689 
3690 	tracing_snapshot_cond(file->tr, &context);
3691 }
3692 
3693 static void hist_trigger_print_key(struct seq_file *m,
3694 				   struct hist_trigger_data *hist_data,
3695 				   void *key,
3696 				   struct tracing_map_elt *elt);
3697 
3698 static struct action_data *snapshot_action(struct hist_trigger_data *hist_data)
3699 {
3700 	unsigned int i;
3701 
3702 	if (!hist_data->n_actions)
3703 		return NULL;
3704 
3705 	for (i = 0; i < hist_data->n_actions; i++) {
3706 		struct action_data *data = hist_data->actions[i];
3707 
3708 		if (data->action == ACTION_SNAPSHOT)
3709 			return data;
3710 	}
3711 
3712 	return NULL;
3713 }
3714 
3715 static void track_data_snapshot_print(struct seq_file *m,
3716 				      struct hist_trigger_data *hist_data)
3717 {
3718 	struct trace_event_file *file = hist_data->event_file;
3719 	struct track_data *track_data;
3720 	struct action_data *action;
3721 
3722 	track_data = tracing_cond_snapshot_data(file->tr);
3723 	if (!track_data)
3724 		return;
3725 
3726 	if (!track_data->updated)
3727 		return;
3728 
3729 	action = snapshot_action(hist_data);
3730 	if (!action)
3731 		return;
3732 
3733 	seq_puts(m, "\nSnapshot taken (see tracing/snapshot).  Details:\n");
3734 	seq_printf(m, "\ttriggering value { %s(%s) }: %10llu",
3735 		   action->handler == HANDLER_ONMAX ? "onmax" : "onchange",
3736 		   action->track_data.var_str, track_data->track_val);
3737 
3738 	seq_puts(m, "\ttriggered by event with key: ");
3739 	hist_trigger_print_key(m, hist_data, track_data->key, &track_data->elt);
3740 	seq_putc(m, '\n');
3741 }
3742 #else
3743 static bool cond_snapshot_update(struct trace_array *tr, void *cond_data)
3744 {
3745 	return false;
3746 }
3747 static void save_track_data_snapshot(struct hist_trigger_data *hist_data,
3748 				     struct tracing_map_elt *elt, void *rec,
3749 				     struct ring_buffer_event *rbe, void *key,
3750 				     struct action_data *data,
3751 				     u64 *var_ref_vals) {}
3752 static void track_data_snapshot_print(struct seq_file *m,
3753 				      struct hist_trigger_data *hist_data) {}
3754 #endif /* CONFIG_TRACER_SNAPSHOT */
3755 
3756 static void track_data_print(struct seq_file *m,
3757 			     struct hist_trigger_data *hist_data,
3758 			     struct tracing_map_elt *elt,
3759 			     struct action_data *data)
3760 {
3761 	u64 track_val = get_track_val(hist_data, elt, data);
3762 	unsigned int i, save_var_idx;
3763 
3764 	if (data->handler == HANDLER_ONMAX)
3765 		seq_printf(m, "\n\tmax: %10llu", track_val);
3766 	else if (data->handler == HANDLER_ONCHANGE)
3767 		seq_printf(m, "\n\tchanged: %10llu", track_val);
3768 
3769 	if (data->action == ACTION_SNAPSHOT)
3770 		return;
3771 
3772 	for (i = 0; i < hist_data->n_save_vars; i++) {
3773 		struct hist_field *save_val = hist_data->save_vars[i]->val;
3774 		struct hist_field *save_var = hist_data->save_vars[i]->var;
3775 		u64 val;
3776 
3777 		save_var_idx = save_var->var.idx;
3778 
3779 		val = tracing_map_read_var(elt, save_var_idx);
3780 
3781 		if (save_val->flags & HIST_FIELD_FL_STRING) {
3782 			seq_printf(m, "  %s: %-32s", save_var->var.name,
3783 				   (char *)(uintptr_t)(val));
3784 		} else
3785 			seq_printf(m, "  %s: %10llu", save_var->var.name, val);
3786 	}
3787 }
3788 
3789 static void ontrack_action(struct hist_trigger_data *hist_data,
3790 			   struct tracing_map_elt *elt, void *rec,
3791 			   struct ring_buffer_event *rbe, void *key,
3792 			   struct action_data *data, u64 *var_ref_vals)
3793 {
3794 	u64 var_val = var_ref_vals[data->track_data.var_ref->var_ref_idx];
3795 
3796 	if (check_track_val(elt, data, var_val)) {
3797 		save_track_val(hist_data, elt, data, var_val);
3798 		save_track_data(hist_data, elt, rec, rbe, key, data, var_ref_vals);
3799 	}
3800 }
3801 
3802 static void action_data_destroy(struct action_data *data)
3803 {
3804 	unsigned int i;
3805 
3806 	lockdep_assert_held(&event_mutex);
3807 
3808 	kfree(data->action_name);
3809 
3810 	for (i = 0; i < data->n_params; i++)
3811 		kfree(data->params[i]);
3812 
3813 	if (data->synth_event)
3814 		data->synth_event->ref--;
3815 
3816 	kfree(data->synth_event_name);
3817 
3818 	kfree(data);
3819 }
3820 
3821 static void track_data_destroy(struct hist_trigger_data *hist_data,
3822 			       struct action_data *data)
3823 {
3824 	struct trace_event_file *file = hist_data->event_file;
3825 
3826 	destroy_hist_field(data->track_data.track_var, 0);
3827 
3828 	if (data->action == ACTION_SNAPSHOT) {
3829 		struct track_data *track_data;
3830 
3831 		track_data = tracing_cond_snapshot_data(file->tr);
3832 		if (track_data && track_data->hist_data == hist_data) {
3833 			tracing_snapshot_cond_disable(file->tr);
3834 			track_data_free(track_data);
3835 		}
3836 	}
3837 
3838 	kfree(data->track_data.var_str);
3839 
3840 	action_data_destroy(data);
3841 }
3842 
3843 static int action_create(struct hist_trigger_data *hist_data,
3844 			 struct action_data *data);
3845 
3846 static int track_data_create(struct hist_trigger_data *hist_data,
3847 			     struct action_data *data)
3848 {
3849 	struct hist_field *var_field, *ref_field, *track_var = NULL;
3850 	struct trace_event_file *file = hist_data->event_file;
3851 	struct trace_array *tr = file->tr;
3852 	char *track_data_var_str;
3853 	int ret = 0;
3854 
3855 	track_data_var_str = data->track_data.var_str;
3856 	if (track_data_var_str[0] != '$') {
3857 		hist_err(tr, HIST_ERR_ONX_NOT_VAR, errpos(track_data_var_str));
3858 		return -EINVAL;
3859 	}
3860 	track_data_var_str++;
3861 
3862 	var_field = find_target_event_var(hist_data, NULL, NULL, track_data_var_str);
3863 	if (!var_field) {
3864 		hist_err(tr, HIST_ERR_ONX_VAR_NOT_FOUND, errpos(track_data_var_str));
3865 		return -EINVAL;
3866 	}
3867 
3868 	ref_field = create_var_ref(hist_data, var_field, NULL, NULL);
3869 	if (!ref_field)
3870 		return -ENOMEM;
3871 
3872 	data->track_data.var_ref = ref_field;
3873 
3874 	if (data->handler == HANDLER_ONMAX)
3875 		track_var = create_var(hist_data, file, "__max", sizeof(u64), "u64");
3876 	if (IS_ERR(track_var)) {
3877 		hist_err(tr, HIST_ERR_ONX_VAR_CREATE_FAIL, 0);
3878 		ret = PTR_ERR(track_var);
3879 		goto out;
3880 	}
3881 
3882 	if (data->handler == HANDLER_ONCHANGE)
3883 		track_var = create_var(hist_data, file, "__change", sizeof(u64), "u64");
3884 	if (IS_ERR(track_var)) {
3885 		hist_err(tr, HIST_ERR_ONX_VAR_CREATE_FAIL, 0);
3886 		ret = PTR_ERR(track_var);
3887 		goto out;
3888 	}
3889 	data->track_data.track_var = track_var;
3890 
3891 	ret = action_create(hist_data, data);
3892  out:
3893 	return ret;
3894 }
3895 
3896 static int parse_action_params(struct trace_array *tr, char *params,
3897 			       struct action_data *data)
3898 {
3899 	char *param, *saved_param;
3900 	bool first_param = true;
3901 	int ret = 0;
3902 
3903 	while (params) {
3904 		if (data->n_params >= SYNTH_FIELDS_MAX) {
3905 			hist_err(tr, HIST_ERR_TOO_MANY_PARAMS, 0);
3906 			goto out;
3907 		}
3908 
3909 		param = strsep(&params, ",");
3910 		if (!param) {
3911 			hist_err(tr, HIST_ERR_PARAM_NOT_FOUND, 0);
3912 			ret = -EINVAL;
3913 			goto out;
3914 		}
3915 
3916 		param = strstrip(param);
3917 		if (strlen(param) < 2) {
3918 			hist_err(tr, HIST_ERR_INVALID_PARAM, errpos(param));
3919 			ret = -EINVAL;
3920 			goto out;
3921 		}
3922 
3923 		saved_param = kstrdup(param, GFP_KERNEL);
3924 		if (!saved_param) {
3925 			ret = -ENOMEM;
3926 			goto out;
3927 		}
3928 
3929 		if (first_param && data->use_trace_keyword) {
3930 			data->synth_event_name = saved_param;
3931 			first_param = false;
3932 			continue;
3933 		}
3934 		first_param = false;
3935 
3936 		data->params[data->n_params++] = saved_param;
3937 	}
3938  out:
3939 	return ret;
3940 }
3941 
3942 static int action_parse(struct trace_array *tr, char *str, struct action_data *data,
3943 			enum handler_id handler)
3944 {
3945 	char *action_name;
3946 	int ret = 0;
3947 
3948 	strsep(&str, ".");
3949 	if (!str) {
3950 		hist_err(tr, HIST_ERR_ACTION_NOT_FOUND, 0);
3951 		ret = -EINVAL;
3952 		goto out;
3953 	}
3954 
3955 	action_name = strsep(&str, "(");
3956 	if (!action_name || !str) {
3957 		hist_err(tr, HIST_ERR_ACTION_NOT_FOUND, 0);
3958 		ret = -EINVAL;
3959 		goto out;
3960 	}
3961 
3962 	if (str_has_prefix(action_name, "save")) {
3963 		char *params = strsep(&str, ")");
3964 
3965 		if (!params) {
3966 			hist_err(tr, HIST_ERR_NO_SAVE_PARAMS, 0);
3967 			ret = -EINVAL;
3968 			goto out;
3969 		}
3970 
3971 		ret = parse_action_params(tr, params, data);
3972 		if (ret)
3973 			goto out;
3974 
3975 		if (handler == HANDLER_ONMAX)
3976 			data->track_data.check_val = check_track_val_max;
3977 		else if (handler == HANDLER_ONCHANGE)
3978 			data->track_data.check_val = check_track_val_changed;
3979 		else {
3980 			hist_err(tr, HIST_ERR_ACTION_MISMATCH, errpos(action_name));
3981 			ret = -EINVAL;
3982 			goto out;
3983 		}
3984 
3985 		data->track_data.save_data = save_track_data_vars;
3986 		data->fn = ontrack_action;
3987 		data->action = ACTION_SAVE;
3988 	} else if (str_has_prefix(action_name, "snapshot")) {
3989 		char *params = strsep(&str, ")");
3990 
3991 		if (!str) {
3992 			hist_err(tr, HIST_ERR_NO_CLOSING_PAREN, errpos(params));
3993 			ret = -EINVAL;
3994 			goto out;
3995 		}
3996 
3997 		if (handler == HANDLER_ONMAX)
3998 			data->track_data.check_val = check_track_val_max;
3999 		else if (handler == HANDLER_ONCHANGE)
4000 			data->track_data.check_val = check_track_val_changed;
4001 		else {
4002 			hist_err(tr, HIST_ERR_ACTION_MISMATCH, errpos(action_name));
4003 			ret = -EINVAL;
4004 			goto out;
4005 		}
4006 
4007 		data->track_data.save_data = save_track_data_snapshot;
4008 		data->fn = ontrack_action;
4009 		data->action = ACTION_SNAPSHOT;
4010 	} else {
4011 		char *params = strsep(&str, ")");
4012 
4013 		if (str_has_prefix(action_name, "trace"))
4014 			data->use_trace_keyword = true;
4015 
4016 		if (params) {
4017 			ret = parse_action_params(tr, params, data);
4018 			if (ret)
4019 				goto out;
4020 		}
4021 
4022 		if (handler == HANDLER_ONMAX)
4023 			data->track_data.check_val = check_track_val_max;
4024 		else if (handler == HANDLER_ONCHANGE)
4025 			data->track_data.check_val = check_track_val_changed;
4026 
4027 		if (handler != HANDLER_ONMATCH) {
4028 			data->track_data.save_data = action_trace;
4029 			data->fn = ontrack_action;
4030 		} else
4031 			data->fn = action_trace;
4032 
4033 		data->action = ACTION_TRACE;
4034 	}
4035 
4036 	data->action_name = kstrdup(action_name, GFP_KERNEL);
4037 	if (!data->action_name) {
4038 		ret = -ENOMEM;
4039 		goto out;
4040 	}
4041 
4042 	data->handler = handler;
4043  out:
4044 	return ret;
4045 }
4046 
4047 static struct action_data *track_data_parse(struct hist_trigger_data *hist_data,
4048 					    char *str, enum handler_id handler)
4049 {
4050 	struct action_data *data;
4051 	int ret = -EINVAL;
4052 	char *var_str;
4053 
4054 	data = kzalloc(sizeof(*data), GFP_KERNEL);
4055 	if (!data)
4056 		return ERR_PTR(-ENOMEM);
4057 
4058 	var_str = strsep(&str, ")");
4059 	if (!var_str || !str) {
4060 		ret = -EINVAL;
4061 		goto free;
4062 	}
4063 
4064 	data->track_data.var_str = kstrdup(var_str, GFP_KERNEL);
4065 	if (!data->track_data.var_str) {
4066 		ret = -ENOMEM;
4067 		goto free;
4068 	}
4069 
4070 	ret = action_parse(hist_data->event_file->tr, str, data, handler);
4071 	if (ret)
4072 		goto free;
4073  out:
4074 	return data;
4075  free:
4076 	track_data_destroy(hist_data, data);
4077 	data = ERR_PTR(ret);
4078 	goto out;
4079 }
4080 
4081 static void onmatch_destroy(struct action_data *data)
4082 {
4083 	kfree(data->match_data.event);
4084 	kfree(data->match_data.event_system);
4085 
4086 	action_data_destroy(data);
4087 }
4088 
4089 static void destroy_field_var(struct field_var *field_var)
4090 {
4091 	if (!field_var)
4092 		return;
4093 
4094 	destroy_hist_field(field_var->var, 0);
4095 	destroy_hist_field(field_var->val, 0);
4096 
4097 	kfree(field_var);
4098 }
4099 
4100 static void destroy_field_vars(struct hist_trigger_data *hist_data)
4101 {
4102 	unsigned int i;
4103 
4104 	for (i = 0; i < hist_data->n_field_vars; i++)
4105 		destroy_field_var(hist_data->field_vars[i]);
4106 }
4107 
4108 static void save_field_var(struct hist_trigger_data *hist_data,
4109 			   struct field_var *field_var)
4110 {
4111 	hist_data->field_vars[hist_data->n_field_vars++] = field_var;
4112 
4113 	if (field_var->val->flags & HIST_FIELD_FL_STRING)
4114 		hist_data->n_field_var_str++;
4115 }
4116 
4117 
4118 static int check_synth_field(struct synth_event *event,
4119 			     struct hist_field *hist_field,
4120 			     unsigned int field_pos)
4121 {
4122 	struct synth_field *field;
4123 
4124 	if (field_pos >= event->n_fields)
4125 		return -EINVAL;
4126 
4127 	field = event->fields[field_pos];
4128 
4129 	if (strcmp(field->type, hist_field->type) != 0) {
4130 		if (field->size != hist_field->size ||
4131 		    field->is_signed != hist_field->is_signed)
4132 			return -EINVAL;
4133 	}
4134 
4135 	return 0;
4136 }
4137 
4138 static struct hist_field *
4139 trace_action_find_var(struct hist_trigger_data *hist_data,
4140 		      struct action_data *data,
4141 		      char *system, char *event, char *var)
4142 {
4143 	struct trace_array *tr = hist_data->event_file->tr;
4144 	struct hist_field *hist_field;
4145 
4146 	var++; /* skip '$' */
4147 
4148 	hist_field = find_target_event_var(hist_data, system, event, var);
4149 	if (!hist_field) {
4150 		if (!system && data->handler == HANDLER_ONMATCH) {
4151 			system = data->match_data.event_system;
4152 			event = data->match_data.event;
4153 		}
4154 
4155 		hist_field = find_event_var(hist_data, system, event, var);
4156 	}
4157 
4158 	if (!hist_field)
4159 		hist_err(tr, HIST_ERR_PARAM_NOT_FOUND, errpos(var));
4160 
4161 	return hist_field;
4162 }
4163 
4164 static struct hist_field *
4165 trace_action_create_field_var(struct hist_trigger_data *hist_data,
4166 			      struct action_data *data, char *system,
4167 			      char *event, char *var)
4168 {
4169 	struct hist_field *hist_field = NULL;
4170 	struct field_var *field_var;
4171 
4172 	/*
4173 	 * First try to create a field var on the target event (the
4174 	 * currently being defined).  This will create a variable for
4175 	 * unqualified fields on the target event, or if qualified,
4176 	 * target fields that have qualified names matching the target.
4177 	 */
4178 	field_var = create_target_field_var(hist_data, system, event, var);
4179 
4180 	if (field_var && !IS_ERR(field_var)) {
4181 		save_field_var(hist_data, field_var);
4182 		hist_field = field_var->var;
4183 	} else {
4184 		field_var = NULL;
4185 		/*
4186 		 * If no explicit system.event is specfied, default to
4187 		 * looking for fields on the onmatch(system.event.xxx)
4188 		 * event.
4189 		 */
4190 		if (!system && data->handler == HANDLER_ONMATCH) {
4191 			system = data->match_data.event_system;
4192 			event = data->match_data.event;
4193 		}
4194 
4195 		/*
4196 		 * At this point, we're looking at a field on another
4197 		 * event.  Because we can't modify a hist trigger on
4198 		 * another event to add a variable for a field, we need
4199 		 * to create a new trigger on that event and create the
4200 		 * variable at the same time.
4201 		 */
4202 		hist_field = create_field_var_hist(hist_data, system, event, var);
4203 		if (IS_ERR(hist_field))
4204 			goto free;
4205 	}
4206  out:
4207 	return hist_field;
4208  free:
4209 	destroy_field_var(field_var);
4210 	hist_field = NULL;
4211 	goto out;
4212 }
4213 
4214 static int trace_action_create(struct hist_trigger_data *hist_data,
4215 			       struct action_data *data)
4216 {
4217 	struct trace_array *tr = hist_data->event_file->tr;
4218 	char *event_name, *param, *system = NULL;
4219 	struct hist_field *hist_field, *var_ref;
4220 	unsigned int i, var_ref_idx;
4221 	unsigned int field_pos = 0;
4222 	struct synth_event *event;
4223 	char *synth_event_name;
4224 	int ret = 0;
4225 
4226 	lockdep_assert_held(&event_mutex);
4227 
4228 	if (data->use_trace_keyword)
4229 		synth_event_name = data->synth_event_name;
4230 	else
4231 		synth_event_name = data->action_name;
4232 
4233 	event = find_synth_event(synth_event_name);
4234 	if (!event) {
4235 		hist_err(tr, HIST_ERR_SYNTH_EVENT_NOT_FOUND, errpos(synth_event_name));
4236 		return -EINVAL;
4237 	}
4238 
4239 	event->ref++;
4240 
4241 	var_ref_idx = hist_data->n_var_refs;
4242 
4243 	for (i = 0; i < data->n_params; i++) {
4244 		char *p;
4245 
4246 		p = param = kstrdup(data->params[i], GFP_KERNEL);
4247 		if (!param) {
4248 			ret = -ENOMEM;
4249 			goto err;
4250 		}
4251 
4252 		system = strsep(&param, ".");
4253 		if (!param) {
4254 			param = (char *)system;
4255 			system = event_name = NULL;
4256 		} else {
4257 			event_name = strsep(&param, ".");
4258 			if (!param) {
4259 				kfree(p);
4260 				ret = -EINVAL;
4261 				goto err;
4262 			}
4263 		}
4264 
4265 		if (param[0] == '$')
4266 			hist_field = trace_action_find_var(hist_data, data,
4267 							   system, event_name,
4268 							   param);
4269 		else
4270 			hist_field = trace_action_create_field_var(hist_data,
4271 								   data,
4272 								   system,
4273 								   event_name,
4274 								   param);
4275 
4276 		if (!hist_field) {
4277 			kfree(p);
4278 			ret = -EINVAL;
4279 			goto err;
4280 		}
4281 
4282 		if (check_synth_field(event, hist_field, field_pos) == 0) {
4283 			var_ref = create_var_ref(hist_data, hist_field,
4284 						 system, event_name);
4285 			if (!var_ref) {
4286 				kfree(p);
4287 				ret = -ENOMEM;
4288 				goto err;
4289 			}
4290 
4291 			field_pos++;
4292 			kfree(p);
4293 			continue;
4294 		}
4295 
4296 		hist_err(tr, HIST_ERR_SYNTH_TYPE_MISMATCH, errpos(param));
4297 		kfree(p);
4298 		ret = -EINVAL;
4299 		goto err;
4300 	}
4301 
4302 	if (field_pos != event->n_fields) {
4303 		hist_err(tr, HIST_ERR_SYNTH_COUNT_MISMATCH, errpos(event->name));
4304 		ret = -EINVAL;
4305 		goto err;
4306 	}
4307 
4308 	data->synth_event = event;
4309 	data->var_ref_idx = var_ref_idx;
4310  out:
4311 	return ret;
4312  err:
4313 	event->ref--;
4314 
4315 	goto out;
4316 }
4317 
4318 static int action_create(struct hist_trigger_data *hist_data,
4319 			 struct action_data *data)
4320 {
4321 	struct trace_event_file *file = hist_data->event_file;
4322 	struct trace_array *tr = file->tr;
4323 	struct track_data *track_data;
4324 	struct field_var *field_var;
4325 	unsigned int i;
4326 	char *param;
4327 	int ret = 0;
4328 
4329 	if (data->action == ACTION_TRACE)
4330 		return trace_action_create(hist_data, data);
4331 
4332 	if (data->action == ACTION_SNAPSHOT) {
4333 		track_data = track_data_alloc(hist_data->key_size, data, hist_data);
4334 		if (IS_ERR(track_data)) {
4335 			ret = PTR_ERR(track_data);
4336 			goto out;
4337 		}
4338 
4339 		ret = tracing_snapshot_cond_enable(file->tr, track_data,
4340 						   cond_snapshot_update);
4341 		if (ret)
4342 			track_data_free(track_data);
4343 
4344 		goto out;
4345 	}
4346 
4347 	if (data->action == ACTION_SAVE) {
4348 		if (hist_data->n_save_vars) {
4349 			ret = -EEXIST;
4350 			hist_err(tr, HIST_ERR_TOO_MANY_SAVE_ACTIONS, 0);
4351 			goto out;
4352 		}
4353 
4354 		for (i = 0; i < data->n_params; i++) {
4355 			param = kstrdup(data->params[i], GFP_KERNEL);
4356 			if (!param) {
4357 				ret = -ENOMEM;
4358 				goto out;
4359 			}
4360 
4361 			field_var = create_target_field_var(hist_data, NULL, NULL, param);
4362 			if (IS_ERR(field_var)) {
4363 				hist_err(tr, HIST_ERR_FIELD_VAR_CREATE_FAIL,
4364 					 errpos(param));
4365 				ret = PTR_ERR(field_var);
4366 				kfree(param);
4367 				goto out;
4368 			}
4369 
4370 			hist_data->save_vars[hist_data->n_save_vars++] = field_var;
4371 			if (field_var->val->flags & HIST_FIELD_FL_STRING)
4372 				hist_data->n_save_var_str++;
4373 			kfree(param);
4374 		}
4375 	}
4376  out:
4377 	return ret;
4378 }
4379 
4380 static int onmatch_create(struct hist_trigger_data *hist_data,
4381 			  struct action_data *data)
4382 {
4383 	return action_create(hist_data, data);
4384 }
4385 
4386 static struct action_data *onmatch_parse(struct trace_array *tr, char *str)
4387 {
4388 	char *match_event, *match_event_system;
4389 	struct action_data *data;
4390 	int ret = -EINVAL;
4391 
4392 	data = kzalloc(sizeof(*data), GFP_KERNEL);
4393 	if (!data)
4394 		return ERR_PTR(-ENOMEM);
4395 
4396 	match_event = strsep(&str, ")");
4397 	if (!match_event || !str) {
4398 		hist_err(tr, HIST_ERR_NO_CLOSING_PAREN, errpos(match_event));
4399 		goto free;
4400 	}
4401 
4402 	match_event_system = strsep(&match_event, ".");
4403 	if (!match_event) {
4404 		hist_err(tr, HIST_ERR_SUBSYS_NOT_FOUND, errpos(match_event_system));
4405 		goto free;
4406 	}
4407 
4408 	if (IS_ERR(event_file(tr, match_event_system, match_event))) {
4409 		hist_err(tr, HIST_ERR_INVALID_SUBSYS_EVENT, errpos(match_event));
4410 		goto free;
4411 	}
4412 
4413 	data->match_data.event = kstrdup(match_event, GFP_KERNEL);
4414 	if (!data->match_data.event) {
4415 		ret = -ENOMEM;
4416 		goto free;
4417 	}
4418 
4419 	data->match_data.event_system = kstrdup(match_event_system, GFP_KERNEL);
4420 	if (!data->match_data.event_system) {
4421 		ret = -ENOMEM;
4422 		goto free;
4423 	}
4424 
4425 	ret = action_parse(tr, str, data, HANDLER_ONMATCH);
4426 	if (ret)
4427 		goto free;
4428  out:
4429 	return data;
4430  free:
4431 	onmatch_destroy(data);
4432 	data = ERR_PTR(ret);
4433 	goto out;
4434 }
4435 
4436 static int create_hitcount_val(struct hist_trigger_data *hist_data)
4437 {
4438 	hist_data->fields[HITCOUNT_IDX] =
4439 		create_hist_field(hist_data, NULL, HIST_FIELD_FL_HITCOUNT, NULL);
4440 	if (!hist_data->fields[HITCOUNT_IDX])
4441 		return -ENOMEM;
4442 
4443 	hist_data->n_vals++;
4444 	hist_data->n_fields++;
4445 
4446 	if (WARN_ON(hist_data->n_vals > TRACING_MAP_VALS_MAX))
4447 		return -EINVAL;
4448 
4449 	return 0;
4450 }
4451 
4452 static int __create_val_field(struct hist_trigger_data *hist_data,
4453 			      unsigned int val_idx,
4454 			      struct trace_event_file *file,
4455 			      char *var_name, char *field_str,
4456 			      unsigned long flags)
4457 {
4458 	struct hist_field *hist_field;
4459 	int ret = 0;
4460 
4461 	hist_field = parse_expr(hist_data, file, field_str, flags, var_name, 0);
4462 	if (IS_ERR(hist_field)) {
4463 		ret = PTR_ERR(hist_field);
4464 		goto out;
4465 	}
4466 
4467 	hist_data->fields[val_idx] = hist_field;
4468 
4469 	++hist_data->n_vals;
4470 	++hist_data->n_fields;
4471 
4472 	if (WARN_ON(hist_data->n_vals > TRACING_MAP_VALS_MAX + TRACING_MAP_VARS_MAX))
4473 		ret = -EINVAL;
4474  out:
4475 	return ret;
4476 }
4477 
4478 static int create_val_field(struct hist_trigger_data *hist_data,
4479 			    unsigned int val_idx,
4480 			    struct trace_event_file *file,
4481 			    char *field_str)
4482 {
4483 	if (WARN_ON(val_idx >= TRACING_MAP_VALS_MAX))
4484 		return -EINVAL;
4485 
4486 	return __create_val_field(hist_data, val_idx, file, NULL, field_str, 0);
4487 }
4488 
4489 static int create_var_field(struct hist_trigger_data *hist_data,
4490 			    unsigned int val_idx,
4491 			    struct trace_event_file *file,
4492 			    char *var_name, char *expr_str)
4493 {
4494 	struct trace_array *tr = hist_data->event_file->tr;
4495 	unsigned long flags = 0;
4496 
4497 	if (WARN_ON(val_idx >= TRACING_MAP_VALS_MAX + TRACING_MAP_VARS_MAX))
4498 		return -EINVAL;
4499 
4500 	if (find_var(hist_data, file, var_name) && !hist_data->remove) {
4501 		hist_err(tr, HIST_ERR_DUPLICATE_VAR, errpos(var_name));
4502 		return -EINVAL;
4503 	}
4504 
4505 	flags |= HIST_FIELD_FL_VAR;
4506 	hist_data->n_vars++;
4507 	if (WARN_ON(hist_data->n_vars > TRACING_MAP_VARS_MAX))
4508 		return -EINVAL;
4509 
4510 	return __create_val_field(hist_data, val_idx, file, var_name, expr_str, flags);
4511 }
4512 
4513 static int create_val_fields(struct hist_trigger_data *hist_data,
4514 			     struct trace_event_file *file)
4515 {
4516 	char *fields_str, *field_str;
4517 	unsigned int i, j = 1;
4518 	int ret;
4519 
4520 	ret = create_hitcount_val(hist_data);
4521 	if (ret)
4522 		goto out;
4523 
4524 	fields_str = hist_data->attrs->vals_str;
4525 	if (!fields_str)
4526 		goto out;
4527 
4528 	for (i = 0, j = 1; i < TRACING_MAP_VALS_MAX &&
4529 		     j < TRACING_MAP_VALS_MAX; i++) {
4530 		field_str = strsep(&fields_str, ",");
4531 		if (!field_str)
4532 			break;
4533 
4534 		if (strcmp(field_str, "hitcount") == 0)
4535 			continue;
4536 
4537 		ret = create_val_field(hist_data, j++, file, field_str);
4538 		if (ret)
4539 			goto out;
4540 	}
4541 
4542 	if (fields_str && (strcmp(fields_str, "hitcount") != 0))
4543 		ret = -EINVAL;
4544  out:
4545 	return ret;
4546 }
4547 
4548 static int create_key_field(struct hist_trigger_data *hist_data,
4549 			    unsigned int key_idx,
4550 			    unsigned int key_offset,
4551 			    struct trace_event_file *file,
4552 			    char *field_str)
4553 {
4554 	struct trace_array *tr = hist_data->event_file->tr;
4555 	struct hist_field *hist_field = NULL;
4556 	unsigned long flags = 0;
4557 	unsigned int key_size;
4558 	int ret = 0;
4559 
4560 	if (WARN_ON(key_idx >= HIST_FIELDS_MAX))
4561 		return -EINVAL;
4562 
4563 	flags |= HIST_FIELD_FL_KEY;
4564 
4565 	if (strcmp(field_str, "stacktrace") == 0) {
4566 		flags |= HIST_FIELD_FL_STACKTRACE;
4567 		key_size = sizeof(unsigned long) * HIST_STACKTRACE_DEPTH;
4568 		hist_field = create_hist_field(hist_data, NULL, flags, NULL);
4569 	} else {
4570 		hist_field = parse_expr(hist_data, file, field_str, flags,
4571 					NULL, 0);
4572 		if (IS_ERR(hist_field)) {
4573 			ret = PTR_ERR(hist_field);
4574 			goto out;
4575 		}
4576 
4577 		if (field_has_hist_vars(hist_field, 0))	{
4578 			hist_err(tr, HIST_ERR_INVALID_REF_KEY, errpos(field_str));
4579 			destroy_hist_field(hist_field, 0);
4580 			ret = -EINVAL;
4581 			goto out;
4582 		}
4583 
4584 		key_size = hist_field->size;
4585 	}
4586 
4587 	hist_data->fields[key_idx] = hist_field;
4588 
4589 	key_size = ALIGN(key_size, sizeof(u64));
4590 	hist_data->fields[key_idx]->size = key_size;
4591 	hist_data->fields[key_idx]->offset = key_offset;
4592 
4593 	hist_data->key_size += key_size;
4594 
4595 	if (hist_data->key_size > HIST_KEY_SIZE_MAX) {
4596 		ret = -EINVAL;
4597 		goto out;
4598 	}
4599 
4600 	hist_data->n_keys++;
4601 	hist_data->n_fields++;
4602 
4603 	if (WARN_ON(hist_data->n_keys > TRACING_MAP_KEYS_MAX))
4604 		return -EINVAL;
4605 
4606 	ret = key_size;
4607  out:
4608 	return ret;
4609 }
4610 
4611 static int create_key_fields(struct hist_trigger_data *hist_data,
4612 			     struct trace_event_file *file)
4613 {
4614 	unsigned int i, key_offset = 0, n_vals = hist_data->n_vals;
4615 	char *fields_str, *field_str;
4616 	int ret = -EINVAL;
4617 
4618 	fields_str = hist_data->attrs->keys_str;
4619 	if (!fields_str)
4620 		goto out;
4621 
4622 	for (i = n_vals; i < n_vals + TRACING_MAP_KEYS_MAX; i++) {
4623 		field_str = strsep(&fields_str, ",");
4624 		if (!field_str)
4625 			break;
4626 		ret = create_key_field(hist_data, i, key_offset,
4627 				       file, field_str);
4628 		if (ret < 0)
4629 			goto out;
4630 		key_offset += ret;
4631 	}
4632 	if (fields_str) {
4633 		ret = -EINVAL;
4634 		goto out;
4635 	}
4636 	ret = 0;
4637  out:
4638 	return ret;
4639 }
4640 
4641 static int create_var_fields(struct hist_trigger_data *hist_data,
4642 			     struct trace_event_file *file)
4643 {
4644 	unsigned int i, j = hist_data->n_vals;
4645 	int ret = 0;
4646 
4647 	unsigned int n_vars = hist_data->attrs->var_defs.n_vars;
4648 
4649 	for (i = 0; i < n_vars; i++) {
4650 		char *var_name = hist_data->attrs->var_defs.name[i];
4651 		char *expr = hist_data->attrs->var_defs.expr[i];
4652 
4653 		ret = create_var_field(hist_data, j++, file, var_name, expr);
4654 		if (ret)
4655 			goto out;
4656 	}
4657  out:
4658 	return ret;
4659 }
4660 
4661 static void free_var_defs(struct hist_trigger_data *hist_data)
4662 {
4663 	unsigned int i;
4664 
4665 	for (i = 0; i < hist_data->attrs->var_defs.n_vars; i++) {
4666 		kfree(hist_data->attrs->var_defs.name[i]);
4667 		kfree(hist_data->attrs->var_defs.expr[i]);
4668 	}
4669 
4670 	hist_data->attrs->var_defs.n_vars = 0;
4671 }
4672 
4673 static int parse_var_defs(struct hist_trigger_data *hist_data)
4674 {
4675 	struct trace_array *tr = hist_data->event_file->tr;
4676 	char *s, *str, *var_name, *field_str;
4677 	unsigned int i, j, n_vars = 0;
4678 	int ret = 0;
4679 
4680 	for (i = 0; i < hist_data->attrs->n_assignments; i++) {
4681 		str = hist_data->attrs->assignment_str[i];
4682 		for (j = 0; j < TRACING_MAP_VARS_MAX; j++) {
4683 			field_str = strsep(&str, ",");
4684 			if (!field_str)
4685 				break;
4686 
4687 			var_name = strsep(&field_str, "=");
4688 			if (!var_name || !field_str) {
4689 				hist_err(tr, HIST_ERR_MALFORMED_ASSIGNMENT,
4690 					 errpos(var_name));
4691 				ret = -EINVAL;
4692 				goto free;
4693 			}
4694 
4695 			if (n_vars == TRACING_MAP_VARS_MAX) {
4696 				hist_err(tr, HIST_ERR_TOO_MANY_VARS, errpos(var_name));
4697 				ret = -EINVAL;
4698 				goto free;
4699 			}
4700 
4701 			s = kstrdup(var_name, GFP_KERNEL);
4702 			if (!s) {
4703 				ret = -ENOMEM;
4704 				goto free;
4705 			}
4706 			hist_data->attrs->var_defs.name[n_vars] = s;
4707 
4708 			s = kstrdup(field_str, GFP_KERNEL);
4709 			if (!s) {
4710 				kfree(hist_data->attrs->var_defs.name[n_vars]);
4711 				ret = -ENOMEM;
4712 				goto free;
4713 			}
4714 			hist_data->attrs->var_defs.expr[n_vars++] = s;
4715 
4716 			hist_data->attrs->var_defs.n_vars = n_vars;
4717 		}
4718 	}
4719 
4720 	return ret;
4721  free:
4722 	free_var_defs(hist_data);
4723 
4724 	return ret;
4725 }
4726 
4727 static int create_hist_fields(struct hist_trigger_data *hist_data,
4728 			      struct trace_event_file *file)
4729 {
4730 	int ret;
4731 
4732 	ret = parse_var_defs(hist_data);
4733 	if (ret)
4734 		goto out;
4735 
4736 	ret = create_val_fields(hist_data, file);
4737 	if (ret)
4738 		goto out;
4739 
4740 	ret = create_var_fields(hist_data, file);
4741 	if (ret)
4742 		goto out;
4743 
4744 	ret = create_key_fields(hist_data, file);
4745 	if (ret)
4746 		goto out;
4747  out:
4748 	free_var_defs(hist_data);
4749 
4750 	return ret;
4751 }
4752 
4753 static int is_descending(struct trace_array *tr, const char *str)
4754 {
4755 	if (!str)
4756 		return 0;
4757 
4758 	if (strcmp(str, "descending") == 0)
4759 		return 1;
4760 
4761 	if (strcmp(str, "ascending") == 0)
4762 		return 0;
4763 
4764 	hist_err(tr, HIST_ERR_INVALID_SORT_MODIFIER, errpos((char *)str));
4765 
4766 	return -EINVAL;
4767 }
4768 
4769 static int create_sort_keys(struct hist_trigger_data *hist_data)
4770 {
4771 	struct trace_array *tr = hist_data->event_file->tr;
4772 	char *fields_str = hist_data->attrs->sort_key_str;
4773 	struct tracing_map_sort_key *sort_key;
4774 	int descending, ret = 0;
4775 	unsigned int i, j, k;
4776 
4777 	hist_data->n_sort_keys = 1; /* we always have at least one, hitcount */
4778 
4779 	if (!fields_str)
4780 		goto out;
4781 
4782 	for (i = 0; i < TRACING_MAP_SORT_KEYS_MAX; i++) {
4783 		struct hist_field *hist_field;
4784 		char *field_str, *field_name;
4785 		const char *test_name;
4786 
4787 		sort_key = &hist_data->sort_keys[i];
4788 
4789 		field_str = strsep(&fields_str, ",");
4790 		if (!field_str)
4791 			break;
4792 
4793 		if (!*field_str) {
4794 			ret = -EINVAL;
4795 			hist_err(tr, HIST_ERR_EMPTY_SORT_FIELD, errpos("sort="));
4796 			break;
4797 		}
4798 
4799 		if ((i == TRACING_MAP_SORT_KEYS_MAX - 1) && fields_str) {
4800 			hist_err(tr, HIST_ERR_TOO_MANY_SORT_FIELDS, errpos("sort="));
4801 			ret = -EINVAL;
4802 			break;
4803 		}
4804 
4805 		field_name = strsep(&field_str, ".");
4806 		if (!field_name || !*field_name) {
4807 			ret = -EINVAL;
4808 			hist_err(tr, HIST_ERR_EMPTY_SORT_FIELD, errpos("sort="));
4809 			break;
4810 		}
4811 
4812 		if (strcmp(field_name, "hitcount") == 0) {
4813 			descending = is_descending(tr, field_str);
4814 			if (descending < 0) {
4815 				ret = descending;
4816 				break;
4817 			}
4818 			sort_key->descending = descending;
4819 			continue;
4820 		}
4821 
4822 		for (j = 1, k = 1; j < hist_data->n_fields; j++) {
4823 			unsigned int idx;
4824 
4825 			hist_field = hist_data->fields[j];
4826 			if (hist_field->flags & HIST_FIELD_FL_VAR)
4827 				continue;
4828 
4829 			idx = k++;
4830 
4831 			test_name = hist_field_name(hist_field, 0);
4832 
4833 			if (strcmp(field_name, test_name) == 0) {
4834 				sort_key->field_idx = idx;
4835 				descending = is_descending(tr, field_str);
4836 				if (descending < 0) {
4837 					ret = descending;
4838 					goto out;
4839 				}
4840 				sort_key->descending = descending;
4841 				break;
4842 			}
4843 		}
4844 		if (j == hist_data->n_fields) {
4845 			ret = -EINVAL;
4846 			hist_err(tr, HIST_ERR_INVALID_SORT_FIELD, errpos(field_name));
4847 			break;
4848 		}
4849 	}
4850 
4851 	hist_data->n_sort_keys = i;
4852  out:
4853 	return ret;
4854 }
4855 
4856 static void destroy_actions(struct hist_trigger_data *hist_data)
4857 {
4858 	unsigned int i;
4859 
4860 	for (i = 0; i < hist_data->n_actions; i++) {
4861 		struct action_data *data = hist_data->actions[i];
4862 
4863 		if (data->handler == HANDLER_ONMATCH)
4864 			onmatch_destroy(data);
4865 		else if (data->handler == HANDLER_ONMAX ||
4866 			 data->handler == HANDLER_ONCHANGE)
4867 			track_data_destroy(hist_data, data);
4868 		else
4869 			kfree(data);
4870 	}
4871 }
4872 
4873 static int parse_actions(struct hist_trigger_data *hist_data)
4874 {
4875 	struct trace_array *tr = hist_data->event_file->tr;
4876 	struct action_data *data;
4877 	unsigned int i;
4878 	int ret = 0;
4879 	char *str;
4880 	int len;
4881 
4882 	for (i = 0; i < hist_data->attrs->n_actions; i++) {
4883 		str = hist_data->attrs->action_str[i];
4884 
4885 		if ((len = str_has_prefix(str, "onmatch("))) {
4886 			char *action_str = str + len;
4887 
4888 			data = onmatch_parse(tr, action_str);
4889 			if (IS_ERR(data)) {
4890 				ret = PTR_ERR(data);
4891 				break;
4892 			}
4893 		} else if ((len = str_has_prefix(str, "onmax("))) {
4894 			char *action_str = str + len;
4895 
4896 			data = track_data_parse(hist_data, action_str,
4897 						HANDLER_ONMAX);
4898 			if (IS_ERR(data)) {
4899 				ret = PTR_ERR(data);
4900 				break;
4901 			}
4902 		} else if ((len = str_has_prefix(str, "onchange("))) {
4903 			char *action_str = str + len;
4904 
4905 			data = track_data_parse(hist_data, action_str,
4906 						HANDLER_ONCHANGE);
4907 			if (IS_ERR(data)) {
4908 				ret = PTR_ERR(data);
4909 				break;
4910 			}
4911 		} else {
4912 			ret = -EINVAL;
4913 			break;
4914 		}
4915 
4916 		hist_data->actions[hist_data->n_actions++] = data;
4917 	}
4918 
4919 	return ret;
4920 }
4921 
4922 static int create_actions(struct hist_trigger_data *hist_data)
4923 {
4924 	struct action_data *data;
4925 	unsigned int i;
4926 	int ret = 0;
4927 
4928 	for (i = 0; i < hist_data->attrs->n_actions; i++) {
4929 		data = hist_data->actions[i];
4930 
4931 		if (data->handler == HANDLER_ONMATCH) {
4932 			ret = onmatch_create(hist_data, data);
4933 			if (ret)
4934 				break;
4935 		} else if (data->handler == HANDLER_ONMAX ||
4936 			   data->handler == HANDLER_ONCHANGE) {
4937 			ret = track_data_create(hist_data, data);
4938 			if (ret)
4939 				break;
4940 		} else {
4941 			ret = -EINVAL;
4942 			break;
4943 		}
4944 	}
4945 
4946 	return ret;
4947 }
4948 
4949 static void print_actions(struct seq_file *m,
4950 			  struct hist_trigger_data *hist_data,
4951 			  struct tracing_map_elt *elt)
4952 {
4953 	unsigned int i;
4954 
4955 	for (i = 0; i < hist_data->n_actions; i++) {
4956 		struct action_data *data = hist_data->actions[i];
4957 
4958 		if (data->action == ACTION_SNAPSHOT)
4959 			continue;
4960 
4961 		if (data->handler == HANDLER_ONMAX ||
4962 		    data->handler == HANDLER_ONCHANGE)
4963 			track_data_print(m, hist_data, elt, data);
4964 	}
4965 }
4966 
4967 static void print_action_spec(struct seq_file *m,
4968 			      struct hist_trigger_data *hist_data,
4969 			      struct action_data *data)
4970 {
4971 	unsigned int i;
4972 
4973 	if (data->action == ACTION_SAVE) {
4974 		for (i = 0; i < hist_data->n_save_vars; i++) {
4975 			seq_printf(m, "%s", hist_data->save_vars[i]->var->var.name);
4976 			if (i < hist_data->n_save_vars - 1)
4977 				seq_puts(m, ",");
4978 		}
4979 	} else if (data->action == ACTION_TRACE) {
4980 		if (data->use_trace_keyword)
4981 			seq_printf(m, "%s", data->synth_event_name);
4982 		for (i = 0; i < data->n_params; i++) {
4983 			if (i || data->use_trace_keyword)
4984 				seq_puts(m, ",");
4985 			seq_printf(m, "%s", data->params[i]);
4986 		}
4987 	}
4988 }
4989 
4990 static void print_track_data_spec(struct seq_file *m,
4991 				  struct hist_trigger_data *hist_data,
4992 				  struct action_data *data)
4993 {
4994 	if (data->handler == HANDLER_ONMAX)
4995 		seq_puts(m, ":onmax(");
4996 	else if (data->handler == HANDLER_ONCHANGE)
4997 		seq_puts(m, ":onchange(");
4998 	seq_printf(m, "%s", data->track_data.var_str);
4999 	seq_printf(m, ").%s(", data->action_name);
5000 
5001 	print_action_spec(m, hist_data, data);
5002 
5003 	seq_puts(m, ")");
5004 }
5005 
5006 static void print_onmatch_spec(struct seq_file *m,
5007 			       struct hist_trigger_data *hist_data,
5008 			       struct action_data *data)
5009 {
5010 	seq_printf(m, ":onmatch(%s.%s).", data->match_data.event_system,
5011 		   data->match_data.event);
5012 
5013 	seq_printf(m, "%s(", data->action_name);
5014 
5015 	print_action_spec(m, hist_data, data);
5016 
5017 	seq_puts(m, ")");
5018 }
5019 
5020 static bool actions_match(struct hist_trigger_data *hist_data,
5021 			  struct hist_trigger_data *hist_data_test)
5022 {
5023 	unsigned int i, j;
5024 
5025 	if (hist_data->n_actions != hist_data_test->n_actions)
5026 		return false;
5027 
5028 	for (i = 0; i < hist_data->n_actions; i++) {
5029 		struct action_data *data = hist_data->actions[i];
5030 		struct action_data *data_test = hist_data_test->actions[i];
5031 		char *action_name, *action_name_test;
5032 
5033 		if (data->handler != data_test->handler)
5034 			return false;
5035 		if (data->action != data_test->action)
5036 			return false;
5037 
5038 		if (data->n_params != data_test->n_params)
5039 			return false;
5040 
5041 		for (j = 0; j < data->n_params; j++) {
5042 			if (strcmp(data->params[j], data_test->params[j]) != 0)
5043 				return false;
5044 		}
5045 
5046 		if (data->use_trace_keyword)
5047 			action_name = data->synth_event_name;
5048 		else
5049 			action_name = data->action_name;
5050 
5051 		if (data_test->use_trace_keyword)
5052 			action_name_test = data_test->synth_event_name;
5053 		else
5054 			action_name_test = data_test->action_name;
5055 
5056 		if (strcmp(action_name, action_name_test) != 0)
5057 			return false;
5058 
5059 		if (data->handler == HANDLER_ONMATCH) {
5060 			if (strcmp(data->match_data.event_system,
5061 				   data_test->match_data.event_system) != 0)
5062 				return false;
5063 			if (strcmp(data->match_data.event,
5064 				   data_test->match_data.event) != 0)
5065 				return false;
5066 		} else if (data->handler == HANDLER_ONMAX ||
5067 			   data->handler == HANDLER_ONCHANGE) {
5068 			if (strcmp(data->track_data.var_str,
5069 				   data_test->track_data.var_str) != 0)
5070 				return false;
5071 		}
5072 	}
5073 
5074 	return true;
5075 }
5076 
5077 
5078 static void print_actions_spec(struct seq_file *m,
5079 			       struct hist_trigger_data *hist_data)
5080 {
5081 	unsigned int i;
5082 
5083 	for (i = 0; i < hist_data->n_actions; i++) {
5084 		struct action_data *data = hist_data->actions[i];
5085 
5086 		if (data->handler == HANDLER_ONMATCH)
5087 			print_onmatch_spec(m, hist_data, data);
5088 		else if (data->handler == HANDLER_ONMAX ||
5089 			 data->handler == HANDLER_ONCHANGE)
5090 			print_track_data_spec(m, hist_data, data);
5091 	}
5092 }
5093 
5094 static void destroy_field_var_hists(struct hist_trigger_data *hist_data)
5095 {
5096 	unsigned int i;
5097 
5098 	for (i = 0; i < hist_data->n_field_var_hists; i++) {
5099 		kfree(hist_data->field_var_hists[i]->cmd);
5100 		kfree(hist_data->field_var_hists[i]);
5101 	}
5102 }
5103 
5104 static void destroy_hist_data(struct hist_trigger_data *hist_data)
5105 {
5106 	if (!hist_data)
5107 		return;
5108 
5109 	destroy_hist_trigger_attrs(hist_data->attrs);
5110 	destroy_hist_fields(hist_data);
5111 	tracing_map_destroy(hist_data->map);
5112 
5113 	destroy_actions(hist_data);
5114 	destroy_field_vars(hist_data);
5115 	destroy_field_var_hists(hist_data);
5116 
5117 	kfree(hist_data);
5118 }
5119 
5120 static int create_tracing_map_fields(struct hist_trigger_data *hist_data)
5121 {
5122 	struct tracing_map *map = hist_data->map;
5123 	struct ftrace_event_field *field;
5124 	struct hist_field *hist_field;
5125 	int i, idx = 0;
5126 
5127 	for_each_hist_field(i, hist_data) {
5128 		hist_field = hist_data->fields[i];
5129 		if (hist_field->flags & HIST_FIELD_FL_KEY) {
5130 			tracing_map_cmp_fn_t cmp_fn;
5131 
5132 			field = hist_field->field;
5133 
5134 			if (hist_field->flags & HIST_FIELD_FL_STACKTRACE)
5135 				cmp_fn = tracing_map_cmp_none;
5136 			else if (!field)
5137 				cmp_fn = tracing_map_cmp_num(hist_field->size,
5138 							     hist_field->is_signed);
5139 			else if (is_string_field(field))
5140 				cmp_fn = tracing_map_cmp_string;
5141 			else
5142 				cmp_fn = tracing_map_cmp_num(field->size,
5143 							     field->is_signed);
5144 			idx = tracing_map_add_key_field(map,
5145 							hist_field->offset,
5146 							cmp_fn);
5147 		} else if (!(hist_field->flags & HIST_FIELD_FL_VAR))
5148 			idx = tracing_map_add_sum_field(map);
5149 
5150 		if (idx < 0)
5151 			return idx;
5152 
5153 		if (hist_field->flags & HIST_FIELD_FL_VAR) {
5154 			idx = tracing_map_add_var(map);
5155 			if (idx < 0)
5156 				return idx;
5157 			hist_field->var.idx = idx;
5158 			hist_field->var.hist_data = hist_data;
5159 		}
5160 	}
5161 
5162 	return 0;
5163 }
5164 
5165 static struct hist_trigger_data *
5166 create_hist_data(unsigned int map_bits,
5167 		 struct hist_trigger_attrs *attrs,
5168 		 struct trace_event_file *file,
5169 		 bool remove)
5170 {
5171 	const struct tracing_map_ops *map_ops = NULL;
5172 	struct hist_trigger_data *hist_data;
5173 	int ret = 0;
5174 
5175 	hist_data = kzalloc(sizeof(*hist_data), GFP_KERNEL);
5176 	if (!hist_data)
5177 		return ERR_PTR(-ENOMEM);
5178 
5179 	hist_data->attrs = attrs;
5180 	hist_data->remove = remove;
5181 	hist_data->event_file = file;
5182 
5183 	ret = parse_actions(hist_data);
5184 	if (ret)
5185 		goto free;
5186 
5187 	ret = create_hist_fields(hist_data, file);
5188 	if (ret)
5189 		goto free;
5190 
5191 	ret = create_sort_keys(hist_data);
5192 	if (ret)
5193 		goto free;
5194 
5195 	map_ops = &hist_trigger_elt_data_ops;
5196 
5197 	hist_data->map = tracing_map_create(map_bits, hist_data->key_size,
5198 					    map_ops, hist_data);
5199 	if (IS_ERR(hist_data->map)) {
5200 		ret = PTR_ERR(hist_data->map);
5201 		hist_data->map = NULL;
5202 		goto free;
5203 	}
5204 
5205 	ret = create_tracing_map_fields(hist_data);
5206 	if (ret)
5207 		goto free;
5208  out:
5209 	return hist_data;
5210  free:
5211 	hist_data->attrs = NULL;
5212 
5213 	destroy_hist_data(hist_data);
5214 
5215 	hist_data = ERR_PTR(ret);
5216 
5217 	goto out;
5218 }
5219 
5220 static void hist_trigger_elt_update(struct hist_trigger_data *hist_data,
5221 				    struct tracing_map_elt *elt, void *rec,
5222 				    struct ring_buffer_event *rbe,
5223 				    u64 *var_ref_vals)
5224 {
5225 	struct hist_elt_data *elt_data;
5226 	struct hist_field *hist_field;
5227 	unsigned int i, var_idx;
5228 	u64 hist_val;
5229 
5230 	elt_data = elt->private_data;
5231 	elt_data->var_ref_vals = var_ref_vals;
5232 
5233 	for_each_hist_val_field(i, hist_data) {
5234 		hist_field = hist_data->fields[i];
5235 		hist_val = hist_field->fn(hist_field, elt, rbe, rec);
5236 		if (hist_field->flags & HIST_FIELD_FL_VAR) {
5237 			var_idx = hist_field->var.idx;
5238 			tracing_map_set_var(elt, var_idx, hist_val);
5239 			continue;
5240 		}
5241 		tracing_map_update_sum(elt, i, hist_val);
5242 	}
5243 
5244 	for_each_hist_key_field(i, hist_data) {
5245 		hist_field = hist_data->fields[i];
5246 		if (hist_field->flags & HIST_FIELD_FL_VAR) {
5247 			hist_val = hist_field->fn(hist_field, elt, rbe, rec);
5248 			var_idx = hist_field->var.idx;
5249 			tracing_map_set_var(elt, var_idx, hist_val);
5250 		}
5251 	}
5252 
5253 	update_field_vars(hist_data, elt, rbe, rec);
5254 }
5255 
5256 static inline void add_to_key(char *compound_key, void *key,
5257 			      struct hist_field *key_field, void *rec)
5258 {
5259 	size_t size = key_field->size;
5260 
5261 	if (key_field->flags & HIST_FIELD_FL_STRING) {
5262 		struct ftrace_event_field *field;
5263 
5264 		field = key_field->field;
5265 		if (field->filter_type == FILTER_DYN_STRING)
5266 			size = *(u32 *)(rec + field->offset) >> 16;
5267 		else if (field->filter_type == FILTER_PTR_STRING)
5268 			size = strlen(key);
5269 		else if (field->filter_type == FILTER_STATIC_STRING)
5270 			size = field->size;
5271 
5272 		/* ensure NULL-termination */
5273 		if (size > key_field->size - 1)
5274 			size = key_field->size - 1;
5275 
5276 		strncpy(compound_key + key_field->offset, (char *)key, size);
5277 	} else
5278 		memcpy(compound_key + key_field->offset, key, size);
5279 }
5280 
5281 static void
5282 hist_trigger_actions(struct hist_trigger_data *hist_data,
5283 		     struct tracing_map_elt *elt, void *rec,
5284 		     struct ring_buffer_event *rbe, void *key,
5285 		     u64 *var_ref_vals)
5286 {
5287 	struct action_data *data;
5288 	unsigned int i;
5289 
5290 	for (i = 0; i < hist_data->n_actions; i++) {
5291 		data = hist_data->actions[i];
5292 		data->fn(hist_data, elt, rec, rbe, key, data, var_ref_vals);
5293 	}
5294 }
5295 
5296 static void event_hist_trigger(struct event_trigger_data *data, void *rec,
5297 			       struct ring_buffer_event *rbe)
5298 {
5299 	struct hist_trigger_data *hist_data = data->private_data;
5300 	bool use_compound_key = (hist_data->n_keys > 1);
5301 	unsigned long entries[HIST_STACKTRACE_DEPTH];
5302 	u64 var_ref_vals[TRACING_MAP_VARS_MAX];
5303 	char compound_key[HIST_KEY_SIZE_MAX];
5304 	struct tracing_map_elt *elt = NULL;
5305 	struct hist_field *key_field;
5306 	u64 field_contents;
5307 	void *key = NULL;
5308 	unsigned int i;
5309 
5310 	memset(compound_key, 0, hist_data->key_size);
5311 
5312 	for_each_hist_key_field(i, hist_data) {
5313 		key_field = hist_data->fields[i];
5314 
5315 		if (key_field->flags & HIST_FIELD_FL_STACKTRACE) {
5316 			memset(entries, 0, HIST_STACKTRACE_SIZE);
5317 			stack_trace_save(entries, HIST_STACKTRACE_DEPTH,
5318 					 HIST_STACKTRACE_SKIP);
5319 			key = entries;
5320 		} else {
5321 			field_contents = key_field->fn(key_field, elt, rbe, rec);
5322 			if (key_field->flags & HIST_FIELD_FL_STRING) {
5323 				key = (void *)(unsigned long)field_contents;
5324 				use_compound_key = true;
5325 			} else
5326 				key = (void *)&field_contents;
5327 		}
5328 
5329 		if (use_compound_key)
5330 			add_to_key(compound_key, key, key_field, rec);
5331 	}
5332 
5333 	if (use_compound_key)
5334 		key = compound_key;
5335 
5336 	if (hist_data->n_var_refs &&
5337 	    !resolve_var_refs(hist_data, key, var_ref_vals, false))
5338 		return;
5339 
5340 	elt = tracing_map_insert(hist_data->map, key);
5341 	if (!elt)
5342 		return;
5343 
5344 	hist_trigger_elt_update(hist_data, elt, rec, rbe, var_ref_vals);
5345 
5346 	if (resolve_var_refs(hist_data, key, var_ref_vals, true))
5347 		hist_trigger_actions(hist_data, elt, rec, rbe, key, var_ref_vals);
5348 }
5349 
5350 static void hist_trigger_stacktrace_print(struct seq_file *m,
5351 					  unsigned long *stacktrace_entries,
5352 					  unsigned int max_entries)
5353 {
5354 	char str[KSYM_SYMBOL_LEN];
5355 	unsigned int spaces = 8;
5356 	unsigned int i;
5357 
5358 	for (i = 0; i < max_entries; i++) {
5359 		if (!stacktrace_entries[i])
5360 			return;
5361 
5362 		seq_printf(m, "%*c", 1 + spaces, ' ');
5363 		sprint_symbol(str, stacktrace_entries[i]);
5364 		seq_printf(m, "%s\n", str);
5365 	}
5366 }
5367 
5368 static void hist_trigger_print_key(struct seq_file *m,
5369 				   struct hist_trigger_data *hist_data,
5370 				   void *key,
5371 				   struct tracing_map_elt *elt)
5372 {
5373 	struct hist_field *key_field;
5374 	char str[KSYM_SYMBOL_LEN];
5375 	bool multiline = false;
5376 	const char *field_name;
5377 	unsigned int i;
5378 	u64 uval;
5379 
5380 	seq_puts(m, "{ ");
5381 
5382 	for_each_hist_key_field(i, hist_data) {
5383 		key_field = hist_data->fields[i];
5384 
5385 		if (i > hist_data->n_vals)
5386 			seq_puts(m, ", ");
5387 
5388 		field_name = hist_field_name(key_field, 0);
5389 
5390 		if (key_field->flags & HIST_FIELD_FL_HEX) {
5391 			uval = *(u64 *)(key + key_field->offset);
5392 			seq_printf(m, "%s: %llx", field_name, uval);
5393 		} else if (key_field->flags & HIST_FIELD_FL_SYM) {
5394 			uval = *(u64 *)(key + key_field->offset);
5395 			sprint_symbol_no_offset(str, uval);
5396 			seq_printf(m, "%s: [%llx] %-45s", field_name,
5397 				   uval, str);
5398 		} else if (key_field->flags & HIST_FIELD_FL_SYM_OFFSET) {
5399 			uval = *(u64 *)(key + key_field->offset);
5400 			sprint_symbol(str, uval);
5401 			seq_printf(m, "%s: [%llx] %-55s", field_name,
5402 				   uval, str);
5403 		} else if (key_field->flags & HIST_FIELD_FL_EXECNAME) {
5404 			struct hist_elt_data *elt_data = elt->private_data;
5405 			char *comm;
5406 
5407 			if (WARN_ON_ONCE(!elt_data))
5408 				return;
5409 
5410 			comm = elt_data->comm;
5411 
5412 			uval = *(u64 *)(key + key_field->offset);
5413 			seq_printf(m, "%s: %-16s[%10llu]", field_name,
5414 				   comm, uval);
5415 		} else if (key_field->flags & HIST_FIELD_FL_SYSCALL) {
5416 			const char *syscall_name;
5417 
5418 			uval = *(u64 *)(key + key_field->offset);
5419 			syscall_name = get_syscall_name(uval);
5420 			if (!syscall_name)
5421 				syscall_name = "unknown_syscall";
5422 
5423 			seq_printf(m, "%s: %-30s[%3llu]", field_name,
5424 				   syscall_name, uval);
5425 		} else if (key_field->flags & HIST_FIELD_FL_STACKTRACE) {
5426 			seq_puts(m, "stacktrace:\n");
5427 			hist_trigger_stacktrace_print(m,
5428 						      key + key_field->offset,
5429 						      HIST_STACKTRACE_DEPTH);
5430 			multiline = true;
5431 		} else if (key_field->flags & HIST_FIELD_FL_LOG2) {
5432 			seq_printf(m, "%s: ~ 2^%-2llu", field_name,
5433 				   *(u64 *)(key + key_field->offset));
5434 		} else if (key_field->flags & HIST_FIELD_FL_STRING) {
5435 			seq_printf(m, "%s: %-50s", field_name,
5436 				   (char *)(key + key_field->offset));
5437 		} else {
5438 			uval = *(u64 *)(key + key_field->offset);
5439 			seq_printf(m, "%s: %10llu", field_name, uval);
5440 		}
5441 	}
5442 
5443 	if (!multiline)
5444 		seq_puts(m, " ");
5445 
5446 	seq_puts(m, "}");
5447 }
5448 
5449 static void hist_trigger_entry_print(struct seq_file *m,
5450 				     struct hist_trigger_data *hist_data,
5451 				     void *key,
5452 				     struct tracing_map_elt *elt)
5453 {
5454 	const char *field_name;
5455 	unsigned int i;
5456 
5457 	hist_trigger_print_key(m, hist_data, key, elt);
5458 
5459 	seq_printf(m, " hitcount: %10llu",
5460 		   tracing_map_read_sum(elt, HITCOUNT_IDX));
5461 
5462 	for (i = 1; i < hist_data->n_vals; i++) {
5463 		field_name = hist_field_name(hist_data->fields[i], 0);
5464 
5465 		if (hist_data->fields[i]->flags & HIST_FIELD_FL_VAR ||
5466 		    hist_data->fields[i]->flags & HIST_FIELD_FL_EXPR)
5467 			continue;
5468 
5469 		if (hist_data->fields[i]->flags & HIST_FIELD_FL_HEX) {
5470 			seq_printf(m, "  %s: %10llx", field_name,
5471 				   tracing_map_read_sum(elt, i));
5472 		} else {
5473 			seq_printf(m, "  %s: %10llu", field_name,
5474 				   tracing_map_read_sum(elt, i));
5475 		}
5476 	}
5477 
5478 	print_actions(m, hist_data, elt);
5479 
5480 	seq_puts(m, "\n");
5481 }
5482 
5483 static int print_entries(struct seq_file *m,
5484 			 struct hist_trigger_data *hist_data)
5485 {
5486 	struct tracing_map_sort_entry **sort_entries = NULL;
5487 	struct tracing_map *map = hist_data->map;
5488 	int i, n_entries;
5489 
5490 	n_entries = tracing_map_sort_entries(map, hist_data->sort_keys,
5491 					     hist_data->n_sort_keys,
5492 					     &sort_entries);
5493 	if (n_entries < 0)
5494 		return n_entries;
5495 
5496 	for (i = 0; i < n_entries; i++)
5497 		hist_trigger_entry_print(m, hist_data,
5498 					 sort_entries[i]->key,
5499 					 sort_entries[i]->elt);
5500 
5501 	tracing_map_destroy_sort_entries(sort_entries, n_entries);
5502 
5503 	return n_entries;
5504 }
5505 
5506 static void hist_trigger_show(struct seq_file *m,
5507 			      struct event_trigger_data *data, int n)
5508 {
5509 	struct hist_trigger_data *hist_data;
5510 	int n_entries;
5511 
5512 	if (n > 0)
5513 		seq_puts(m, "\n\n");
5514 
5515 	seq_puts(m, "# event histogram\n#\n# trigger info: ");
5516 	data->ops->print(m, data->ops, data);
5517 	seq_puts(m, "#\n\n");
5518 
5519 	hist_data = data->private_data;
5520 	n_entries = print_entries(m, hist_data);
5521 	if (n_entries < 0)
5522 		n_entries = 0;
5523 
5524 	track_data_snapshot_print(m, hist_data);
5525 
5526 	seq_printf(m, "\nTotals:\n    Hits: %llu\n    Entries: %u\n    Dropped: %llu\n",
5527 		   (u64)atomic64_read(&hist_data->map->hits),
5528 		   n_entries, (u64)atomic64_read(&hist_data->map->drops));
5529 }
5530 
5531 static int hist_show(struct seq_file *m, void *v)
5532 {
5533 	struct event_trigger_data *data;
5534 	struct trace_event_file *event_file;
5535 	int n = 0, ret = 0;
5536 
5537 	mutex_lock(&event_mutex);
5538 
5539 	event_file = event_file_data(m->private);
5540 	if (unlikely(!event_file)) {
5541 		ret = -ENODEV;
5542 		goto out_unlock;
5543 	}
5544 
5545 	list_for_each_entry(data, &event_file->triggers, list) {
5546 		if (data->cmd_ops->trigger_type == ETT_EVENT_HIST)
5547 			hist_trigger_show(m, data, n++);
5548 	}
5549 
5550  out_unlock:
5551 	mutex_unlock(&event_mutex);
5552 
5553 	return ret;
5554 }
5555 
5556 static int event_hist_open(struct inode *inode, struct file *file)
5557 {
5558 	int ret;
5559 
5560 	ret = security_locked_down(LOCKDOWN_TRACEFS);
5561 	if (ret)
5562 		return ret;
5563 
5564 	return single_open(file, hist_show, file);
5565 }
5566 
5567 const struct file_operations event_hist_fops = {
5568 	.open = event_hist_open,
5569 	.read = seq_read,
5570 	.llseek = seq_lseek,
5571 	.release = single_release,
5572 };
5573 
5574 static void hist_field_print(struct seq_file *m, struct hist_field *hist_field)
5575 {
5576 	const char *field_name = hist_field_name(hist_field, 0);
5577 
5578 	if (hist_field->var.name)
5579 		seq_printf(m, "%s=", hist_field->var.name);
5580 
5581 	if (hist_field->flags & HIST_FIELD_FL_CPU)
5582 		seq_puts(m, "cpu");
5583 	else if (field_name) {
5584 		if (hist_field->flags & HIST_FIELD_FL_VAR_REF ||
5585 		    hist_field->flags & HIST_FIELD_FL_ALIAS)
5586 			seq_putc(m, '$');
5587 		seq_printf(m, "%s", field_name);
5588 	} else if (hist_field->flags & HIST_FIELD_FL_TIMESTAMP)
5589 		seq_puts(m, "common_timestamp");
5590 
5591 	if (hist_field->flags) {
5592 		if (!(hist_field->flags & HIST_FIELD_FL_VAR_REF) &&
5593 		    !(hist_field->flags & HIST_FIELD_FL_EXPR)) {
5594 			const char *flags = get_hist_field_flags(hist_field);
5595 
5596 			if (flags)
5597 				seq_printf(m, ".%s", flags);
5598 		}
5599 	}
5600 }
5601 
5602 static int event_hist_trigger_print(struct seq_file *m,
5603 				    struct event_trigger_ops *ops,
5604 				    struct event_trigger_data *data)
5605 {
5606 	struct hist_trigger_data *hist_data = data->private_data;
5607 	struct hist_field *field;
5608 	bool have_var = false;
5609 	unsigned int i;
5610 
5611 	seq_puts(m, "hist:");
5612 
5613 	if (data->name)
5614 		seq_printf(m, "%s:", data->name);
5615 
5616 	seq_puts(m, "keys=");
5617 
5618 	for_each_hist_key_field(i, hist_data) {
5619 		field = hist_data->fields[i];
5620 
5621 		if (i > hist_data->n_vals)
5622 			seq_puts(m, ",");
5623 
5624 		if (field->flags & HIST_FIELD_FL_STACKTRACE)
5625 			seq_puts(m, "stacktrace");
5626 		else
5627 			hist_field_print(m, field);
5628 	}
5629 
5630 	seq_puts(m, ":vals=");
5631 
5632 	for_each_hist_val_field(i, hist_data) {
5633 		field = hist_data->fields[i];
5634 		if (field->flags & HIST_FIELD_FL_VAR) {
5635 			have_var = true;
5636 			continue;
5637 		}
5638 
5639 		if (i == HITCOUNT_IDX)
5640 			seq_puts(m, "hitcount");
5641 		else {
5642 			seq_puts(m, ",");
5643 			hist_field_print(m, field);
5644 		}
5645 	}
5646 
5647 	if (have_var) {
5648 		unsigned int n = 0;
5649 
5650 		seq_puts(m, ":");
5651 
5652 		for_each_hist_val_field(i, hist_data) {
5653 			field = hist_data->fields[i];
5654 
5655 			if (field->flags & HIST_FIELD_FL_VAR) {
5656 				if (n++)
5657 					seq_puts(m, ",");
5658 				hist_field_print(m, field);
5659 			}
5660 		}
5661 	}
5662 
5663 	seq_puts(m, ":sort=");
5664 
5665 	for (i = 0; i < hist_data->n_sort_keys; i++) {
5666 		struct tracing_map_sort_key *sort_key;
5667 		unsigned int idx, first_key_idx;
5668 
5669 		/* skip VAR vals */
5670 		first_key_idx = hist_data->n_vals - hist_data->n_vars;
5671 
5672 		sort_key = &hist_data->sort_keys[i];
5673 		idx = sort_key->field_idx;
5674 
5675 		if (WARN_ON(idx >= HIST_FIELDS_MAX))
5676 			return -EINVAL;
5677 
5678 		if (i > 0)
5679 			seq_puts(m, ",");
5680 
5681 		if (idx == HITCOUNT_IDX)
5682 			seq_puts(m, "hitcount");
5683 		else {
5684 			if (idx >= first_key_idx)
5685 				idx += hist_data->n_vars;
5686 			hist_field_print(m, hist_data->fields[idx]);
5687 		}
5688 
5689 		if (sort_key->descending)
5690 			seq_puts(m, ".descending");
5691 	}
5692 	seq_printf(m, ":size=%u", (1 << hist_data->map->map_bits));
5693 	if (hist_data->enable_timestamps)
5694 		seq_printf(m, ":clock=%s", hist_data->attrs->clock);
5695 
5696 	print_actions_spec(m, hist_data);
5697 
5698 	if (data->filter_str)
5699 		seq_printf(m, " if %s", data->filter_str);
5700 
5701 	if (data->paused)
5702 		seq_puts(m, " [paused]");
5703 	else
5704 		seq_puts(m, " [active]");
5705 
5706 	seq_putc(m, '\n');
5707 
5708 	return 0;
5709 }
5710 
5711 static int event_hist_trigger_init(struct event_trigger_ops *ops,
5712 				   struct event_trigger_data *data)
5713 {
5714 	struct hist_trigger_data *hist_data = data->private_data;
5715 
5716 	if (!data->ref && hist_data->attrs->name)
5717 		save_named_trigger(hist_data->attrs->name, data);
5718 
5719 	data->ref++;
5720 
5721 	return 0;
5722 }
5723 
5724 static void unregister_field_var_hists(struct hist_trigger_data *hist_data)
5725 {
5726 	struct trace_event_file *file;
5727 	unsigned int i;
5728 	char *cmd;
5729 	int ret;
5730 
5731 	for (i = 0; i < hist_data->n_field_var_hists; i++) {
5732 		file = hist_data->field_var_hists[i]->hist_data->event_file;
5733 		cmd = hist_data->field_var_hists[i]->cmd;
5734 		ret = event_hist_trigger_func(&trigger_hist_cmd, file,
5735 					      "!hist", "hist", cmd);
5736 	}
5737 }
5738 
5739 static void event_hist_trigger_free(struct event_trigger_ops *ops,
5740 				    struct event_trigger_data *data)
5741 {
5742 	struct hist_trigger_data *hist_data = data->private_data;
5743 
5744 	if (WARN_ON_ONCE(data->ref <= 0))
5745 		return;
5746 
5747 	data->ref--;
5748 	if (!data->ref) {
5749 		if (data->name)
5750 			del_named_trigger(data);
5751 
5752 		trigger_data_free(data);
5753 
5754 		remove_hist_vars(hist_data);
5755 
5756 		unregister_field_var_hists(hist_data);
5757 
5758 		destroy_hist_data(hist_data);
5759 	}
5760 }
5761 
5762 static struct event_trigger_ops event_hist_trigger_ops = {
5763 	.func			= event_hist_trigger,
5764 	.print			= event_hist_trigger_print,
5765 	.init			= event_hist_trigger_init,
5766 	.free			= event_hist_trigger_free,
5767 };
5768 
5769 static int event_hist_trigger_named_init(struct event_trigger_ops *ops,
5770 					 struct event_trigger_data *data)
5771 {
5772 	data->ref++;
5773 
5774 	save_named_trigger(data->named_data->name, data);
5775 
5776 	event_hist_trigger_init(ops, data->named_data);
5777 
5778 	return 0;
5779 }
5780 
5781 static void event_hist_trigger_named_free(struct event_trigger_ops *ops,
5782 					  struct event_trigger_data *data)
5783 {
5784 	if (WARN_ON_ONCE(data->ref <= 0))
5785 		return;
5786 
5787 	event_hist_trigger_free(ops, data->named_data);
5788 
5789 	data->ref--;
5790 	if (!data->ref) {
5791 		del_named_trigger(data);
5792 		trigger_data_free(data);
5793 	}
5794 }
5795 
5796 static struct event_trigger_ops event_hist_trigger_named_ops = {
5797 	.func			= event_hist_trigger,
5798 	.print			= event_hist_trigger_print,
5799 	.init			= event_hist_trigger_named_init,
5800 	.free			= event_hist_trigger_named_free,
5801 };
5802 
5803 static struct event_trigger_ops *event_hist_get_trigger_ops(char *cmd,
5804 							    char *param)
5805 {
5806 	return &event_hist_trigger_ops;
5807 }
5808 
5809 static void hist_clear(struct event_trigger_data *data)
5810 {
5811 	struct hist_trigger_data *hist_data = data->private_data;
5812 
5813 	if (data->name)
5814 		pause_named_trigger(data);
5815 
5816 	tracepoint_synchronize_unregister();
5817 
5818 	tracing_map_clear(hist_data->map);
5819 
5820 	if (data->name)
5821 		unpause_named_trigger(data);
5822 }
5823 
5824 static bool compatible_field(struct ftrace_event_field *field,
5825 			     struct ftrace_event_field *test_field)
5826 {
5827 	if (field == test_field)
5828 		return true;
5829 	if (field == NULL || test_field == NULL)
5830 		return false;
5831 	if (strcmp(field->name, test_field->name) != 0)
5832 		return false;
5833 	if (strcmp(field->type, test_field->type) != 0)
5834 		return false;
5835 	if (field->size != test_field->size)
5836 		return false;
5837 	if (field->is_signed != test_field->is_signed)
5838 		return false;
5839 
5840 	return true;
5841 }
5842 
5843 static bool hist_trigger_match(struct event_trigger_data *data,
5844 			       struct event_trigger_data *data_test,
5845 			       struct event_trigger_data *named_data,
5846 			       bool ignore_filter)
5847 {
5848 	struct tracing_map_sort_key *sort_key, *sort_key_test;
5849 	struct hist_trigger_data *hist_data, *hist_data_test;
5850 	struct hist_field *key_field, *key_field_test;
5851 	unsigned int i;
5852 
5853 	if (named_data && (named_data != data_test) &&
5854 	    (named_data != data_test->named_data))
5855 		return false;
5856 
5857 	if (!named_data && is_named_trigger(data_test))
5858 		return false;
5859 
5860 	hist_data = data->private_data;
5861 	hist_data_test = data_test->private_data;
5862 
5863 	if (hist_data->n_vals != hist_data_test->n_vals ||
5864 	    hist_data->n_fields != hist_data_test->n_fields ||
5865 	    hist_data->n_sort_keys != hist_data_test->n_sort_keys)
5866 		return false;
5867 
5868 	if (!ignore_filter) {
5869 		if ((data->filter_str && !data_test->filter_str) ||
5870 		   (!data->filter_str && data_test->filter_str))
5871 			return false;
5872 	}
5873 
5874 	for_each_hist_field(i, hist_data) {
5875 		key_field = hist_data->fields[i];
5876 		key_field_test = hist_data_test->fields[i];
5877 
5878 		if (key_field->flags != key_field_test->flags)
5879 			return false;
5880 		if (!compatible_field(key_field->field, key_field_test->field))
5881 			return false;
5882 		if (key_field->offset != key_field_test->offset)
5883 			return false;
5884 		if (key_field->size != key_field_test->size)
5885 			return false;
5886 		if (key_field->is_signed != key_field_test->is_signed)
5887 			return false;
5888 		if (!!key_field->var.name != !!key_field_test->var.name)
5889 			return false;
5890 		if (key_field->var.name &&
5891 		    strcmp(key_field->var.name, key_field_test->var.name) != 0)
5892 			return false;
5893 	}
5894 
5895 	for (i = 0; i < hist_data->n_sort_keys; i++) {
5896 		sort_key = &hist_data->sort_keys[i];
5897 		sort_key_test = &hist_data_test->sort_keys[i];
5898 
5899 		if (sort_key->field_idx != sort_key_test->field_idx ||
5900 		    sort_key->descending != sort_key_test->descending)
5901 			return false;
5902 	}
5903 
5904 	if (!ignore_filter && data->filter_str &&
5905 	    (strcmp(data->filter_str, data_test->filter_str) != 0))
5906 		return false;
5907 
5908 	if (!actions_match(hist_data, hist_data_test))
5909 		return false;
5910 
5911 	return true;
5912 }
5913 
5914 static int hist_register_trigger(char *glob, struct event_trigger_ops *ops,
5915 				 struct event_trigger_data *data,
5916 				 struct trace_event_file *file)
5917 {
5918 	struct hist_trigger_data *hist_data = data->private_data;
5919 	struct event_trigger_data *test, *named_data = NULL;
5920 	struct trace_array *tr = file->tr;
5921 	int ret = 0;
5922 
5923 	if (hist_data->attrs->name) {
5924 		named_data = find_named_trigger(hist_data->attrs->name);
5925 		if (named_data) {
5926 			if (!hist_trigger_match(data, named_data, named_data,
5927 						true)) {
5928 				hist_err(tr, HIST_ERR_NAMED_MISMATCH, errpos(hist_data->attrs->name));
5929 				ret = -EINVAL;
5930 				goto out;
5931 			}
5932 		}
5933 	}
5934 
5935 	if (hist_data->attrs->name && !named_data)
5936 		goto new;
5937 
5938 	lockdep_assert_held(&event_mutex);
5939 
5940 	list_for_each_entry(test, &file->triggers, list) {
5941 		if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
5942 			if (!hist_trigger_match(data, test, named_data, false))
5943 				continue;
5944 			if (hist_data->attrs->pause)
5945 				test->paused = true;
5946 			else if (hist_data->attrs->cont)
5947 				test->paused = false;
5948 			else if (hist_data->attrs->clear)
5949 				hist_clear(test);
5950 			else {
5951 				hist_err(tr, HIST_ERR_TRIGGER_EEXIST, 0);
5952 				ret = -EEXIST;
5953 			}
5954 			goto out;
5955 		}
5956 	}
5957  new:
5958 	if (hist_data->attrs->cont || hist_data->attrs->clear) {
5959 		hist_err(tr, HIST_ERR_TRIGGER_ENOENT_CLEAR, 0);
5960 		ret = -ENOENT;
5961 		goto out;
5962 	}
5963 
5964 	if (hist_data->attrs->pause)
5965 		data->paused = true;
5966 
5967 	if (named_data) {
5968 		data->private_data = named_data->private_data;
5969 		set_named_trigger_data(data, named_data);
5970 		data->ops = &event_hist_trigger_named_ops;
5971 	}
5972 
5973 	if (data->ops->init) {
5974 		ret = data->ops->init(data->ops, data);
5975 		if (ret < 0)
5976 			goto out;
5977 	}
5978 
5979 	if (hist_data->enable_timestamps) {
5980 		char *clock = hist_data->attrs->clock;
5981 
5982 		ret = tracing_set_clock(file->tr, hist_data->attrs->clock);
5983 		if (ret) {
5984 			hist_err(tr, HIST_ERR_SET_CLOCK_FAIL, errpos(clock));
5985 			goto out;
5986 		}
5987 
5988 		tracing_set_time_stamp_abs(file->tr, true);
5989 	}
5990 
5991 	if (named_data)
5992 		destroy_hist_data(hist_data);
5993 
5994 	ret++;
5995  out:
5996 	return ret;
5997 }
5998 
5999 static int hist_trigger_enable(struct event_trigger_data *data,
6000 			       struct trace_event_file *file)
6001 {
6002 	int ret = 0;
6003 
6004 	list_add_tail_rcu(&data->list, &file->triggers);
6005 
6006 	update_cond_flag(file);
6007 
6008 	if (trace_event_trigger_enable_disable(file, 1) < 0) {
6009 		list_del_rcu(&data->list);
6010 		update_cond_flag(file);
6011 		ret--;
6012 	}
6013 
6014 	return ret;
6015 }
6016 
6017 static bool have_hist_trigger_match(struct event_trigger_data *data,
6018 				    struct trace_event_file *file)
6019 {
6020 	struct hist_trigger_data *hist_data = data->private_data;
6021 	struct event_trigger_data *test, *named_data = NULL;
6022 	bool match = false;
6023 
6024 	lockdep_assert_held(&event_mutex);
6025 
6026 	if (hist_data->attrs->name)
6027 		named_data = find_named_trigger(hist_data->attrs->name);
6028 
6029 	list_for_each_entry(test, &file->triggers, list) {
6030 		if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
6031 			if (hist_trigger_match(data, test, named_data, false)) {
6032 				match = true;
6033 				break;
6034 			}
6035 		}
6036 	}
6037 
6038 	return match;
6039 }
6040 
6041 static bool hist_trigger_check_refs(struct event_trigger_data *data,
6042 				    struct trace_event_file *file)
6043 {
6044 	struct hist_trigger_data *hist_data = data->private_data;
6045 	struct event_trigger_data *test, *named_data = NULL;
6046 
6047 	lockdep_assert_held(&event_mutex);
6048 
6049 	if (hist_data->attrs->name)
6050 		named_data = find_named_trigger(hist_data->attrs->name);
6051 
6052 	list_for_each_entry(test, &file->triggers, list) {
6053 		if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
6054 			if (!hist_trigger_match(data, test, named_data, false))
6055 				continue;
6056 			hist_data = test->private_data;
6057 			if (check_var_refs(hist_data))
6058 				return true;
6059 			break;
6060 		}
6061 	}
6062 
6063 	return false;
6064 }
6065 
6066 static void hist_unregister_trigger(char *glob, struct event_trigger_ops *ops,
6067 				    struct event_trigger_data *data,
6068 				    struct trace_event_file *file)
6069 {
6070 	struct hist_trigger_data *hist_data = data->private_data;
6071 	struct event_trigger_data *test, *named_data = NULL;
6072 	bool unregistered = false;
6073 
6074 	lockdep_assert_held(&event_mutex);
6075 
6076 	if (hist_data->attrs->name)
6077 		named_data = find_named_trigger(hist_data->attrs->name);
6078 
6079 	list_for_each_entry(test, &file->triggers, list) {
6080 		if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
6081 			if (!hist_trigger_match(data, test, named_data, false))
6082 				continue;
6083 			unregistered = true;
6084 			list_del_rcu(&test->list);
6085 			trace_event_trigger_enable_disable(file, 0);
6086 			update_cond_flag(file);
6087 			break;
6088 		}
6089 	}
6090 
6091 	if (unregistered && test->ops->free)
6092 		test->ops->free(test->ops, test);
6093 
6094 	if (hist_data->enable_timestamps) {
6095 		if (!hist_data->remove || unregistered)
6096 			tracing_set_time_stamp_abs(file->tr, false);
6097 	}
6098 }
6099 
6100 static bool hist_file_check_refs(struct trace_event_file *file)
6101 {
6102 	struct hist_trigger_data *hist_data;
6103 	struct event_trigger_data *test;
6104 
6105 	lockdep_assert_held(&event_mutex);
6106 
6107 	list_for_each_entry(test, &file->triggers, list) {
6108 		if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
6109 			hist_data = test->private_data;
6110 			if (check_var_refs(hist_data))
6111 				return true;
6112 		}
6113 	}
6114 
6115 	return false;
6116 }
6117 
6118 static void hist_unreg_all(struct trace_event_file *file)
6119 {
6120 	struct event_trigger_data *test, *n;
6121 	struct hist_trigger_data *hist_data;
6122 	struct synth_event *se;
6123 	const char *se_name;
6124 
6125 	lockdep_assert_held(&event_mutex);
6126 
6127 	if (hist_file_check_refs(file))
6128 		return;
6129 
6130 	list_for_each_entry_safe(test, n, &file->triggers, list) {
6131 		if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
6132 			hist_data = test->private_data;
6133 			list_del_rcu(&test->list);
6134 			trace_event_trigger_enable_disable(file, 0);
6135 
6136 			se_name = trace_event_name(file->event_call);
6137 			se = find_synth_event(se_name);
6138 			if (se)
6139 				se->ref--;
6140 
6141 			update_cond_flag(file);
6142 			if (hist_data->enable_timestamps)
6143 				tracing_set_time_stamp_abs(file->tr, false);
6144 			if (test->ops->free)
6145 				test->ops->free(test->ops, test);
6146 		}
6147 	}
6148 }
6149 
6150 static int event_hist_trigger_func(struct event_command *cmd_ops,
6151 				   struct trace_event_file *file,
6152 				   char *glob, char *cmd, char *param)
6153 {
6154 	unsigned int hist_trigger_bits = TRACING_MAP_BITS_DEFAULT;
6155 	struct event_trigger_data *trigger_data;
6156 	struct hist_trigger_attrs *attrs;
6157 	struct event_trigger_ops *trigger_ops;
6158 	struct hist_trigger_data *hist_data;
6159 	struct synth_event *se;
6160 	const char *se_name;
6161 	bool remove = false;
6162 	char *trigger, *p;
6163 	int ret = 0;
6164 
6165 	lockdep_assert_held(&event_mutex);
6166 
6167 	if (glob && strlen(glob)) {
6168 		hist_err_clear();
6169 		last_cmd_set(file, param);
6170 	}
6171 
6172 	if (!param)
6173 		return -EINVAL;
6174 
6175 	if (glob[0] == '!')
6176 		remove = true;
6177 
6178 	/*
6179 	 * separate the trigger from the filter (k:v [if filter])
6180 	 * allowing for whitespace in the trigger
6181 	 */
6182 	p = trigger = param;
6183 	do {
6184 		p = strstr(p, "if");
6185 		if (!p)
6186 			break;
6187 		if (p == param)
6188 			return -EINVAL;
6189 		if (*(p - 1) != ' ' && *(p - 1) != '\t') {
6190 			p++;
6191 			continue;
6192 		}
6193 		if (p >= param + strlen(param) - (sizeof("if") - 1) - 1)
6194 			return -EINVAL;
6195 		if (*(p + sizeof("if") - 1) != ' ' && *(p + sizeof("if") - 1) != '\t') {
6196 			p++;
6197 			continue;
6198 		}
6199 		break;
6200 	} while (p);
6201 
6202 	if (!p)
6203 		param = NULL;
6204 	else {
6205 		*(p - 1) = '\0';
6206 		param = strstrip(p);
6207 		trigger = strstrip(trigger);
6208 	}
6209 
6210 	attrs = parse_hist_trigger_attrs(file->tr, trigger);
6211 	if (IS_ERR(attrs))
6212 		return PTR_ERR(attrs);
6213 
6214 	if (attrs->map_bits)
6215 		hist_trigger_bits = attrs->map_bits;
6216 
6217 	hist_data = create_hist_data(hist_trigger_bits, attrs, file, remove);
6218 	if (IS_ERR(hist_data)) {
6219 		destroy_hist_trigger_attrs(attrs);
6220 		return PTR_ERR(hist_data);
6221 	}
6222 
6223 	trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
6224 
6225 	trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
6226 	if (!trigger_data) {
6227 		ret = -ENOMEM;
6228 		goto out_free;
6229 	}
6230 
6231 	trigger_data->count = -1;
6232 	trigger_data->ops = trigger_ops;
6233 	trigger_data->cmd_ops = cmd_ops;
6234 
6235 	INIT_LIST_HEAD(&trigger_data->list);
6236 	RCU_INIT_POINTER(trigger_data->filter, NULL);
6237 
6238 	trigger_data->private_data = hist_data;
6239 
6240 	/* if param is non-empty, it's supposed to be a filter */
6241 	if (param && cmd_ops->set_filter) {
6242 		ret = cmd_ops->set_filter(param, trigger_data, file);
6243 		if (ret < 0)
6244 			goto out_free;
6245 	}
6246 
6247 	if (remove) {
6248 		if (!have_hist_trigger_match(trigger_data, file))
6249 			goto out_free;
6250 
6251 		if (hist_trigger_check_refs(trigger_data, file)) {
6252 			ret = -EBUSY;
6253 			goto out_free;
6254 		}
6255 
6256 		cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
6257 		se_name = trace_event_name(file->event_call);
6258 		se = find_synth_event(se_name);
6259 		if (se)
6260 			se->ref--;
6261 		ret = 0;
6262 		goto out_free;
6263 	}
6264 
6265 	ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
6266 	/*
6267 	 * The above returns on success the # of triggers registered,
6268 	 * but if it didn't register any it returns zero.  Consider no
6269 	 * triggers registered a failure too.
6270 	 */
6271 	if (!ret) {
6272 		if (!(attrs->pause || attrs->cont || attrs->clear))
6273 			ret = -ENOENT;
6274 		goto out_free;
6275 	} else if (ret < 0)
6276 		goto out_free;
6277 
6278 	if (get_named_trigger_data(trigger_data))
6279 		goto enable;
6280 
6281 	if (has_hist_vars(hist_data))
6282 		save_hist_vars(hist_data);
6283 
6284 	ret = create_actions(hist_data);
6285 	if (ret)
6286 		goto out_unreg;
6287 
6288 	ret = tracing_map_init(hist_data->map);
6289 	if (ret)
6290 		goto out_unreg;
6291 enable:
6292 	ret = hist_trigger_enable(trigger_data, file);
6293 	if (ret)
6294 		goto out_unreg;
6295 
6296 	se_name = trace_event_name(file->event_call);
6297 	se = find_synth_event(se_name);
6298 	if (se)
6299 		se->ref++;
6300 	/* Just return zero, not the number of registered triggers */
6301 	ret = 0;
6302  out:
6303 	if (ret == 0)
6304 		hist_err_clear();
6305 
6306 	return ret;
6307  out_unreg:
6308 	cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
6309  out_free:
6310 	if (cmd_ops->set_filter)
6311 		cmd_ops->set_filter(NULL, trigger_data, NULL);
6312 
6313 	remove_hist_vars(hist_data);
6314 
6315 	kfree(trigger_data);
6316 
6317 	destroy_hist_data(hist_data);
6318 	goto out;
6319 }
6320 
6321 static struct event_command trigger_hist_cmd = {
6322 	.name			= "hist",
6323 	.trigger_type		= ETT_EVENT_HIST,
6324 	.flags			= EVENT_CMD_FL_NEEDS_REC,
6325 	.func			= event_hist_trigger_func,
6326 	.reg			= hist_register_trigger,
6327 	.unreg			= hist_unregister_trigger,
6328 	.unreg_all		= hist_unreg_all,
6329 	.get_trigger_ops	= event_hist_get_trigger_ops,
6330 	.set_filter		= set_trigger_filter,
6331 };
6332 
6333 __init int register_trigger_hist_cmd(void)
6334 {
6335 	int ret;
6336 
6337 	ret = register_event_command(&trigger_hist_cmd);
6338 	WARN_ON(ret < 0);
6339 
6340 	return ret;
6341 }
6342 
6343 static void
6344 hist_enable_trigger(struct event_trigger_data *data, void *rec,
6345 		    struct ring_buffer_event *event)
6346 {
6347 	struct enable_trigger_data *enable_data = data->private_data;
6348 	struct event_trigger_data *test;
6349 
6350 	list_for_each_entry_rcu(test, &enable_data->file->triggers, list,
6351 				lockdep_is_held(&event_mutex)) {
6352 		if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
6353 			if (enable_data->enable)
6354 				test->paused = false;
6355 			else
6356 				test->paused = true;
6357 		}
6358 	}
6359 }
6360 
6361 static void
6362 hist_enable_count_trigger(struct event_trigger_data *data, void *rec,
6363 			  struct ring_buffer_event *event)
6364 {
6365 	if (!data->count)
6366 		return;
6367 
6368 	if (data->count != -1)
6369 		(data->count)--;
6370 
6371 	hist_enable_trigger(data, rec, event);
6372 }
6373 
6374 static struct event_trigger_ops hist_enable_trigger_ops = {
6375 	.func			= hist_enable_trigger,
6376 	.print			= event_enable_trigger_print,
6377 	.init			= event_trigger_init,
6378 	.free			= event_enable_trigger_free,
6379 };
6380 
6381 static struct event_trigger_ops hist_enable_count_trigger_ops = {
6382 	.func			= hist_enable_count_trigger,
6383 	.print			= event_enable_trigger_print,
6384 	.init			= event_trigger_init,
6385 	.free			= event_enable_trigger_free,
6386 };
6387 
6388 static struct event_trigger_ops hist_disable_trigger_ops = {
6389 	.func			= hist_enable_trigger,
6390 	.print			= event_enable_trigger_print,
6391 	.init			= event_trigger_init,
6392 	.free			= event_enable_trigger_free,
6393 };
6394 
6395 static struct event_trigger_ops hist_disable_count_trigger_ops = {
6396 	.func			= hist_enable_count_trigger,
6397 	.print			= event_enable_trigger_print,
6398 	.init			= event_trigger_init,
6399 	.free			= event_enable_trigger_free,
6400 };
6401 
6402 static struct event_trigger_ops *
6403 hist_enable_get_trigger_ops(char *cmd, char *param)
6404 {
6405 	struct event_trigger_ops *ops;
6406 	bool enable;
6407 
6408 	enable = (strcmp(cmd, ENABLE_HIST_STR) == 0);
6409 
6410 	if (enable)
6411 		ops = param ? &hist_enable_count_trigger_ops :
6412 			&hist_enable_trigger_ops;
6413 	else
6414 		ops = param ? &hist_disable_count_trigger_ops :
6415 			&hist_disable_trigger_ops;
6416 
6417 	return ops;
6418 }
6419 
6420 static void hist_enable_unreg_all(struct trace_event_file *file)
6421 {
6422 	struct event_trigger_data *test, *n;
6423 
6424 	list_for_each_entry_safe(test, n, &file->triggers, list) {
6425 		if (test->cmd_ops->trigger_type == ETT_HIST_ENABLE) {
6426 			list_del_rcu(&test->list);
6427 			update_cond_flag(file);
6428 			trace_event_trigger_enable_disable(file, 0);
6429 			if (test->ops->free)
6430 				test->ops->free(test->ops, test);
6431 		}
6432 	}
6433 }
6434 
6435 static struct event_command trigger_hist_enable_cmd = {
6436 	.name			= ENABLE_HIST_STR,
6437 	.trigger_type		= ETT_HIST_ENABLE,
6438 	.func			= event_enable_trigger_func,
6439 	.reg			= event_enable_register_trigger,
6440 	.unreg			= event_enable_unregister_trigger,
6441 	.unreg_all		= hist_enable_unreg_all,
6442 	.get_trigger_ops	= hist_enable_get_trigger_ops,
6443 	.set_filter		= set_trigger_filter,
6444 };
6445 
6446 static struct event_command trigger_hist_disable_cmd = {
6447 	.name			= DISABLE_HIST_STR,
6448 	.trigger_type		= ETT_HIST_ENABLE,
6449 	.func			= event_enable_trigger_func,
6450 	.reg			= event_enable_register_trigger,
6451 	.unreg			= event_enable_unregister_trigger,
6452 	.unreg_all		= hist_enable_unreg_all,
6453 	.get_trigger_ops	= hist_enable_get_trigger_ops,
6454 	.set_filter		= set_trigger_filter,
6455 };
6456 
6457 static __init void unregister_trigger_hist_enable_disable_cmds(void)
6458 {
6459 	unregister_event_command(&trigger_hist_enable_cmd);
6460 	unregister_event_command(&trigger_hist_disable_cmd);
6461 }
6462 
6463 __init int register_trigger_hist_enable_disable_cmds(void)
6464 {
6465 	int ret;
6466 
6467 	ret = register_event_command(&trigger_hist_enable_cmd);
6468 	if (WARN_ON(ret < 0))
6469 		return ret;
6470 	ret = register_event_command(&trigger_hist_disable_cmd);
6471 	if (WARN_ON(ret < 0))
6472 		unregister_trigger_hist_enable_disable_cmds();
6473 
6474 	return ret;
6475 }
6476 
6477 static __init int trace_events_hist_init(void)
6478 {
6479 	struct dentry *entry = NULL;
6480 	struct dentry *d_tracer;
6481 	int err = 0;
6482 
6483 	err = dyn_event_register(&synth_event_ops);
6484 	if (err) {
6485 		pr_warn("Could not register synth_event_ops\n");
6486 		return err;
6487 	}
6488 
6489 	d_tracer = tracing_init_dentry();
6490 	if (IS_ERR(d_tracer)) {
6491 		err = PTR_ERR(d_tracer);
6492 		goto err;
6493 	}
6494 
6495 	entry = tracefs_create_file("synthetic_events", 0644, d_tracer,
6496 				    NULL, &synth_events_fops);
6497 	if (!entry) {
6498 		err = -ENODEV;
6499 		goto err;
6500 	}
6501 
6502 	return err;
6503  err:
6504 	pr_warn("Could not create tracefs 'synthetic_events' entry\n");
6505 
6506 	return err;
6507 }
6508 
6509 fs_initcall(trace_events_hist_init);
6510