xref: /linux/kernel/trace/trace_events_hist.c (revision 06afb0f36106ecb839c5e2509905e68c1e2677de)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * trace_events_hist - trace event hist triggers
4  *
5  * Copyright (C) 2015 Tom Zanussi <tom.zanussi@linux.intel.com>
6  */
7 
8 #include <linux/module.h>
9 #include <linux/kallsyms.h>
10 #include <linux/security.h>
11 #include <linux/mutex.h>
12 #include <linux/slab.h>
13 #include <linux/stacktrace.h>
14 #include <linux/rculist.h>
15 #include <linux/tracefs.h>
16 
17 /* for gfp flag names */
18 #include <linux/trace_events.h>
19 #include <trace/events/mmflags.h>
20 
21 #include "tracing_map.h"
22 #include "trace_synth.h"
23 
24 #define ERRORS								\
25 	C(NONE,			"No error"),				\
26 	C(DUPLICATE_VAR,	"Variable already defined"),		\
27 	C(VAR_NOT_UNIQUE,	"Variable name not unique, need to use fully qualified name (subsys.event.var) for variable"), \
28 	C(TOO_MANY_VARS,	"Too many variables defined"),		\
29 	C(MALFORMED_ASSIGNMENT,	"Malformed assignment"),		\
30 	C(NAMED_MISMATCH,	"Named hist trigger doesn't match existing named trigger (includes variables)"), \
31 	C(TRIGGER_EEXIST,	"Hist trigger already exists"),		\
32 	C(TRIGGER_ENOENT_CLEAR,	"Can't clear or continue a nonexistent hist trigger"), \
33 	C(SET_CLOCK_FAIL,	"Couldn't set trace_clock"),		\
34 	C(BAD_FIELD_MODIFIER,	"Invalid field modifier"),		\
35 	C(TOO_MANY_SUBEXPR,	"Too many subexpressions (3 max)"),	\
36 	C(TIMESTAMP_MISMATCH,	"Timestamp units in expression don't match"), \
37 	C(TOO_MANY_FIELD_VARS,	"Too many field variables defined"),	\
38 	C(EVENT_FILE_NOT_FOUND,	"Event file not found"),		\
39 	C(HIST_NOT_FOUND,	"Matching event histogram not found"),	\
40 	C(HIST_CREATE_FAIL,	"Couldn't create histogram for field"),	\
41 	C(SYNTH_VAR_NOT_FOUND,	"Couldn't find synthetic variable"),	\
42 	C(SYNTH_EVENT_NOT_FOUND,"Couldn't find synthetic event"),	\
43 	C(SYNTH_TYPE_MISMATCH,	"Param type doesn't match synthetic event field type"), \
44 	C(SYNTH_COUNT_MISMATCH,	"Param count doesn't match synthetic event field count"), \
45 	C(FIELD_VAR_PARSE_FAIL,	"Couldn't parse field variable"),	\
46 	C(VAR_CREATE_FIND_FAIL,	"Couldn't create or find variable"),	\
47 	C(ONX_NOT_VAR,		"For onmax(x) or onchange(x), x must be a variable"), \
48 	C(ONX_VAR_NOT_FOUND,	"Couldn't find onmax or onchange variable"), \
49 	C(ONX_VAR_CREATE_FAIL,	"Couldn't create onmax or onchange variable"), \
50 	C(FIELD_VAR_CREATE_FAIL,"Couldn't create field variable"),	\
51 	C(TOO_MANY_PARAMS,	"Too many action params"),		\
52 	C(PARAM_NOT_FOUND,	"Couldn't find param"),			\
53 	C(INVALID_PARAM,	"Invalid action param"),		\
54 	C(ACTION_NOT_FOUND,	"No action found"),			\
55 	C(NO_SAVE_PARAMS,	"No params found for save()"),		\
56 	C(TOO_MANY_SAVE_ACTIONS,"Can't have more than one save() action per hist"), \
57 	C(ACTION_MISMATCH,	"Handler doesn't support action"),	\
58 	C(NO_CLOSING_PAREN,	"No closing paren found"),		\
59 	C(SUBSYS_NOT_FOUND,	"Missing subsystem"),			\
60 	C(INVALID_SUBSYS_EVENT,	"Invalid subsystem or event name"),	\
61 	C(INVALID_REF_KEY,	"Using variable references in keys not supported"), \
62 	C(VAR_NOT_FOUND,	"Couldn't find variable"),		\
63 	C(FIELD_NOT_FOUND,	"Couldn't find field"),			\
64 	C(EMPTY_ASSIGNMENT,	"Empty assignment"),			\
65 	C(INVALID_SORT_MODIFIER,"Invalid sort modifier"),		\
66 	C(EMPTY_SORT_FIELD,	"Empty sort field"),			\
67 	C(TOO_MANY_SORT_FIELDS,	"Too many sort fields (Max = 2)"),	\
68 	C(INVALID_SORT_FIELD,	"Sort field must be a key or a val"),	\
69 	C(INVALID_STR_OPERAND,	"String type can not be an operand in expression"), \
70 	C(EXPECT_NUMBER,	"Expecting numeric literal"),		\
71 	C(UNARY_MINUS_SUBEXPR,	"Unary minus not supported in sub-expressions"), \
72 	C(DIVISION_BY_ZERO,	"Division by zero"),			\
73 	C(NEED_NOHC_VAL,	"Non-hitcount value is required for 'nohitcount'"),
74 
75 #undef C
76 #define C(a, b)		HIST_ERR_##a
77 
78 enum { ERRORS };
79 
80 #undef C
81 #define C(a, b)		b
82 
83 static const char *err_text[] = { ERRORS };
84 
85 struct hist_field;
86 
87 typedef u64 (*hist_field_fn_t) (struct hist_field *field,
88 				struct tracing_map_elt *elt,
89 				struct trace_buffer *buffer,
90 				struct ring_buffer_event *rbe,
91 				void *event);
92 
93 #define HIST_FIELD_OPERANDS_MAX	2
94 #define HIST_FIELDS_MAX		(TRACING_MAP_FIELDS_MAX + TRACING_MAP_VARS_MAX)
95 #define HIST_ACTIONS_MAX	8
96 #define HIST_CONST_DIGITS_MAX	21
97 #define HIST_DIV_SHIFT		20  /* For optimizing division by constants */
98 
99 enum field_op_id {
100 	FIELD_OP_NONE,
101 	FIELD_OP_PLUS,
102 	FIELD_OP_MINUS,
103 	FIELD_OP_UNARY_MINUS,
104 	FIELD_OP_DIV,
105 	FIELD_OP_MULT,
106 };
107 
108 enum hist_field_fn {
109 	HIST_FIELD_FN_NOP,
110 	HIST_FIELD_FN_VAR_REF,
111 	HIST_FIELD_FN_COUNTER,
112 	HIST_FIELD_FN_CONST,
113 	HIST_FIELD_FN_LOG2,
114 	HIST_FIELD_FN_BUCKET,
115 	HIST_FIELD_FN_TIMESTAMP,
116 	HIST_FIELD_FN_CPU,
117 	HIST_FIELD_FN_STRING,
118 	HIST_FIELD_FN_DYNSTRING,
119 	HIST_FIELD_FN_RELDYNSTRING,
120 	HIST_FIELD_FN_PSTRING,
121 	HIST_FIELD_FN_S64,
122 	HIST_FIELD_FN_U64,
123 	HIST_FIELD_FN_S32,
124 	HIST_FIELD_FN_U32,
125 	HIST_FIELD_FN_S16,
126 	HIST_FIELD_FN_U16,
127 	HIST_FIELD_FN_S8,
128 	HIST_FIELD_FN_U8,
129 	HIST_FIELD_FN_UMINUS,
130 	HIST_FIELD_FN_MINUS,
131 	HIST_FIELD_FN_PLUS,
132 	HIST_FIELD_FN_DIV,
133 	HIST_FIELD_FN_MULT,
134 	HIST_FIELD_FN_DIV_POWER2,
135 	HIST_FIELD_FN_DIV_NOT_POWER2,
136 	HIST_FIELD_FN_DIV_MULT_SHIFT,
137 	HIST_FIELD_FN_EXECNAME,
138 	HIST_FIELD_FN_STACK,
139 };
140 
141 /*
142  * A hist_var (histogram variable) contains variable information for
143  * hist_fields having the HIST_FIELD_FL_VAR or HIST_FIELD_FL_VAR_REF
144  * flag set.  A hist_var has a variable name e.g. ts0, and is
145  * associated with a given histogram trigger, as specified by
146  * hist_data.  The hist_var idx is the unique index assigned to the
147  * variable by the hist trigger's tracing_map.  The idx is what is
148  * used to set a variable's value and, by a variable reference, to
149  * retrieve it.
150  */
151 struct hist_var {
152 	char				*name;
153 	struct hist_trigger_data	*hist_data;
154 	unsigned int			idx;
155 };
156 
157 struct hist_field {
158 	struct ftrace_event_field	*field;
159 	unsigned long			flags;
160 	unsigned long			buckets;
161 	const char			*type;
162 	struct hist_field		*operands[HIST_FIELD_OPERANDS_MAX];
163 	struct hist_trigger_data	*hist_data;
164 	enum hist_field_fn		fn_num;
165 	unsigned int			ref;
166 	unsigned int			size;
167 	unsigned int			offset;
168 	unsigned int                    is_signed;
169 
170 	/*
171 	 * Variable fields contain variable-specific info in var.
172 	 */
173 	struct hist_var			var;
174 	enum field_op_id		operator;
175 	char				*system;
176 	char				*event_name;
177 
178 	/*
179 	 * The name field is used for EXPR and VAR_REF fields.  VAR
180 	 * fields contain the variable name in var.name.
181 	 */
182 	char				*name;
183 
184 	/*
185 	 * When a histogram trigger is hit, if it has any references
186 	 * to variables, the values of those variables are collected
187 	 * into a var_ref_vals array by resolve_var_refs().  The
188 	 * current value of each variable is read from the tracing_map
189 	 * using the hist field's hist_var.idx and entered into the
190 	 * var_ref_idx entry i.e. var_ref_vals[var_ref_idx].
191 	 */
192 	unsigned int			var_ref_idx;
193 	bool                            read_once;
194 
195 	unsigned int			var_str_idx;
196 
197 	/* Numeric literals are represented as u64 */
198 	u64				constant;
199 	/* Used to optimize division by constants */
200 	u64				div_multiplier;
201 };
202 
203 static u64 hist_fn_call(struct hist_field *hist_field,
204 			struct tracing_map_elt *elt,
205 			struct trace_buffer *buffer,
206 			struct ring_buffer_event *rbe,
207 			void *event);
208 
hist_field_const(struct hist_field * field,struct tracing_map_elt * elt,struct trace_buffer * buffer,struct ring_buffer_event * rbe,void * event)209 static u64 hist_field_const(struct hist_field *field,
210 			   struct tracing_map_elt *elt,
211 			   struct trace_buffer *buffer,
212 			   struct ring_buffer_event *rbe,
213 			   void *event)
214 {
215 	return field->constant;
216 }
217 
hist_field_counter(struct hist_field * field,struct tracing_map_elt * elt,struct trace_buffer * buffer,struct ring_buffer_event * rbe,void * event)218 static u64 hist_field_counter(struct hist_field *field,
219 			      struct tracing_map_elt *elt,
220 			      struct trace_buffer *buffer,
221 			      struct ring_buffer_event *rbe,
222 			      void *event)
223 {
224 	return 1;
225 }
226 
hist_field_string(struct hist_field * hist_field,struct tracing_map_elt * elt,struct trace_buffer * buffer,struct ring_buffer_event * rbe,void * event)227 static u64 hist_field_string(struct hist_field *hist_field,
228 			     struct tracing_map_elt *elt,
229 			     struct trace_buffer *buffer,
230 			     struct ring_buffer_event *rbe,
231 			     void *event)
232 {
233 	char *addr = (char *)(event + hist_field->field->offset);
234 
235 	return (u64)(unsigned long)addr;
236 }
237 
hist_field_dynstring(struct hist_field * hist_field,struct tracing_map_elt * elt,struct trace_buffer * buffer,struct ring_buffer_event * rbe,void * event)238 static u64 hist_field_dynstring(struct hist_field *hist_field,
239 				struct tracing_map_elt *elt,
240 				struct trace_buffer *buffer,
241 				struct ring_buffer_event *rbe,
242 				void *event)
243 {
244 	u32 str_item = *(u32 *)(event + hist_field->field->offset);
245 	int str_loc = str_item & 0xffff;
246 	char *addr = (char *)(event + str_loc);
247 
248 	return (u64)(unsigned long)addr;
249 }
250 
hist_field_reldynstring(struct hist_field * hist_field,struct tracing_map_elt * elt,struct trace_buffer * buffer,struct ring_buffer_event * rbe,void * event)251 static u64 hist_field_reldynstring(struct hist_field *hist_field,
252 				   struct tracing_map_elt *elt,
253 				   struct trace_buffer *buffer,
254 				   struct ring_buffer_event *rbe,
255 				   void *event)
256 {
257 	u32 *item = event + hist_field->field->offset;
258 	u32 str_item = *item;
259 	int str_loc = str_item & 0xffff;
260 	char *addr = (char *)&item[1] + str_loc;
261 
262 	return (u64)(unsigned long)addr;
263 }
264 
hist_field_pstring(struct hist_field * hist_field,struct tracing_map_elt * elt,struct trace_buffer * buffer,struct ring_buffer_event * rbe,void * event)265 static u64 hist_field_pstring(struct hist_field *hist_field,
266 			      struct tracing_map_elt *elt,
267 			      struct trace_buffer *buffer,
268 			      struct ring_buffer_event *rbe,
269 			      void *event)
270 {
271 	char **addr = (char **)(event + hist_field->field->offset);
272 
273 	return (u64)(unsigned long)*addr;
274 }
275 
hist_field_log2(struct hist_field * hist_field,struct tracing_map_elt * elt,struct trace_buffer * buffer,struct ring_buffer_event * rbe,void * event)276 static u64 hist_field_log2(struct hist_field *hist_field,
277 			   struct tracing_map_elt *elt,
278 			   struct trace_buffer *buffer,
279 			   struct ring_buffer_event *rbe,
280 			   void *event)
281 {
282 	struct hist_field *operand = hist_field->operands[0];
283 
284 	u64 val = hist_fn_call(operand, elt, buffer, rbe, event);
285 
286 	return (u64) ilog2(roundup_pow_of_two(val));
287 }
288 
hist_field_bucket(struct hist_field * hist_field,struct tracing_map_elt * elt,struct trace_buffer * buffer,struct ring_buffer_event * rbe,void * event)289 static u64 hist_field_bucket(struct hist_field *hist_field,
290 			     struct tracing_map_elt *elt,
291 			     struct trace_buffer *buffer,
292 			     struct ring_buffer_event *rbe,
293 			     void *event)
294 {
295 	struct hist_field *operand = hist_field->operands[0];
296 	unsigned long buckets = hist_field->buckets;
297 
298 	u64 val = hist_fn_call(operand, elt, buffer, rbe, event);
299 
300 	if (WARN_ON_ONCE(!buckets))
301 		return val;
302 
303 	if (val >= LONG_MAX)
304 		val = div64_ul(val, buckets);
305 	else
306 		val = (u64)((unsigned long)val / buckets);
307 	return val * buckets;
308 }
309 
hist_field_plus(struct hist_field * hist_field,struct tracing_map_elt * elt,struct trace_buffer * buffer,struct ring_buffer_event * rbe,void * event)310 static u64 hist_field_plus(struct hist_field *hist_field,
311 			   struct tracing_map_elt *elt,
312 			   struct trace_buffer *buffer,
313 			   struct ring_buffer_event *rbe,
314 			   void *event)
315 {
316 	struct hist_field *operand1 = hist_field->operands[0];
317 	struct hist_field *operand2 = hist_field->operands[1];
318 
319 	u64 val1 = hist_fn_call(operand1, elt, buffer, rbe, event);
320 	u64 val2 = hist_fn_call(operand2, elt, buffer, rbe, event);
321 
322 	return val1 + val2;
323 }
324 
hist_field_minus(struct hist_field * hist_field,struct tracing_map_elt * elt,struct trace_buffer * buffer,struct ring_buffer_event * rbe,void * event)325 static u64 hist_field_minus(struct hist_field *hist_field,
326 			    struct tracing_map_elt *elt,
327 			    struct trace_buffer *buffer,
328 			    struct ring_buffer_event *rbe,
329 			    void *event)
330 {
331 	struct hist_field *operand1 = hist_field->operands[0];
332 	struct hist_field *operand2 = hist_field->operands[1];
333 
334 	u64 val1 = hist_fn_call(operand1, elt, buffer, rbe, event);
335 	u64 val2 = hist_fn_call(operand2, elt, buffer, rbe, event);
336 
337 	return val1 - val2;
338 }
339 
hist_field_div(struct hist_field * hist_field,struct tracing_map_elt * elt,struct trace_buffer * buffer,struct ring_buffer_event * rbe,void * event)340 static u64 hist_field_div(struct hist_field *hist_field,
341 			   struct tracing_map_elt *elt,
342 			   struct trace_buffer *buffer,
343 			   struct ring_buffer_event *rbe,
344 			   void *event)
345 {
346 	struct hist_field *operand1 = hist_field->operands[0];
347 	struct hist_field *operand2 = hist_field->operands[1];
348 
349 	u64 val1 = hist_fn_call(operand1, elt, buffer, rbe, event);
350 	u64 val2 = hist_fn_call(operand2, elt, buffer, rbe, event);
351 
352 	/* Return -1 for the undefined case */
353 	if (!val2)
354 		return -1;
355 
356 	/* Use shift if the divisor is a power of 2 */
357 	if (!(val2 & (val2 - 1)))
358 		return val1 >> __ffs64(val2);
359 
360 	return div64_u64(val1, val2);
361 }
362 
div_by_power_of_two(struct hist_field * hist_field,struct tracing_map_elt * elt,struct trace_buffer * buffer,struct ring_buffer_event * rbe,void * event)363 static u64 div_by_power_of_two(struct hist_field *hist_field,
364 				struct tracing_map_elt *elt,
365 				struct trace_buffer *buffer,
366 				struct ring_buffer_event *rbe,
367 				void *event)
368 {
369 	struct hist_field *operand1 = hist_field->operands[0];
370 	struct hist_field *operand2 = hist_field->operands[1];
371 
372 	u64 val1 = hist_fn_call(operand1, elt, buffer, rbe, event);
373 
374 	return val1 >> __ffs64(operand2->constant);
375 }
376 
div_by_not_power_of_two(struct hist_field * hist_field,struct tracing_map_elt * elt,struct trace_buffer * buffer,struct ring_buffer_event * rbe,void * event)377 static u64 div_by_not_power_of_two(struct hist_field *hist_field,
378 				struct tracing_map_elt *elt,
379 				struct trace_buffer *buffer,
380 				struct ring_buffer_event *rbe,
381 				void *event)
382 {
383 	struct hist_field *operand1 = hist_field->operands[0];
384 	struct hist_field *operand2 = hist_field->operands[1];
385 
386 	u64 val1 = hist_fn_call(operand1, elt, buffer, rbe, event);
387 
388 	return div64_u64(val1, operand2->constant);
389 }
390 
div_by_mult_and_shift(struct hist_field * hist_field,struct tracing_map_elt * elt,struct trace_buffer * buffer,struct ring_buffer_event * rbe,void * event)391 static u64 div_by_mult_and_shift(struct hist_field *hist_field,
392 				struct tracing_map_elt *elt,
393 				struct trace_buffer *buffer,
394 				struct ring_buffer_event *rbe,
395 				void *event)
396 {
397 	struct hist_field *operand1 = hist_field->operands[0];
398 	struct hist_field *operand2 = hist_field->operands[1];
399 
400 	u64 val1 = hist_fn_call(operand1, elt, buffer, rbe, event);
401 
402 	/*
403 	 * If the divisor is a constant, do a multiplication and shift instead.
404 	 *
405 	 * Choose Z = some power of 2. If Y <= Z, then:
406 	 *     X / Y = (X * (Z / Y)) / Z
407 	 *
408 	 * (Z / Y) is a constant (mult) which is calculated at parse time, so:
409 	 *     X / Y = (X * mult) / Z
410 	 *
411 	 * The division by Z can be replaced by a shift since Z is a power of 2:
412 	 *     X / Y = (X * mult) >> HIST_DIV_SHIFT
413 	 *
414 	 * As long, as X < Z the results will not be off by more than 1.
415 	 */
416 	if (val1 < (1 << HIST_DIV_SHIFT)) {
417 		u64 mult = operand2->div_multiplier;
418 
419 		return (val1 * mult + ((1 << HIST_DIV_SHIFT) - 1)) >> HIST_DIV_SHIFT;
420 	}
421 
422 	return div64_u64(val1, operand2->constant);
423 }
424 
hist_field_mult(struct hist_field * hist_field,struct tracing_map_elt * elt,struct trace_buffer * buffer,struct ring_buffer_event * rbe,void * event)425 static u64 hist_field_mult(struct hist_field *hist_field,
426 			   struct tracing_map_elt *elt,
427 			   struct trace_buffer *buffer,
428 			   struct ring_buffer_event *rbe,
429 			   void *event)
430 {
431 	struct hist_field *operand1 = hist_field->operands[0];
432 	struct hist_field *operand2 = hist_field->operands[1];
433 
434 	u64 val1 = hist_fn_call(operand1, elt, buffer, rbe, event);
435 	u64 val2 = hist_fn_call(operand2, elt, buffer, rbe, event);
436 
437 	return val1 * val2;
438 }
439 
hist_field_unary_minus(struct hist_field * hist_field,struct tracing_map_elt * elt,struct trace_buffer * buffer,struct ring_buffer_event * rbe,void * event)440 static u64 hist_field_unary_minus(struct hist_field *hist_field,
441 				  struct tracing_map_elt *elt,
442 				  struct trace_buffer *buffer,
443 				  struct ring_buffer_event *rbe,
444 				  void *event)
445 {
446 	struct hist_field *operand = hist_field->operands[0];
447 
448 	s64 sval = (s64)hist_fn_call(operand, elt, buffer, rbe, event);
449 	u64 val = (u64)-sval;
450 
451 	return val;
452 }
453 
454 #define DEFINE_HIST_FIELD_FN(type)					\
455 	static u64 hist_field_##type(struct hist_field *hist_field,	\
456 				     struct tracing_map_elt *elt,	\
457 				     struct trace_buffer *buffer,	\
458 				     struct ring_buffer_event *rbe,	\
459 				     void *event)			\
460 {									\
461 	type *addr = (type *)(event + hist_field->field->offset);	\
462 									\
463 	return (u64)(unsigned long)*addr;				\
464 }
465 
466 DEFINE_HIST_FIELD_FN(s64);
467 DEFINE_HIST_FIELD_FN(u64);
468 DEFINE_HIST_FIELD_FN(s32);
469 DEFINE_HIST_FIELD_FN(u32);
470 DEFINE_HIST_FIELD_FN(s16);
471 DEFINE_HIST_FIELD_FN(u16);
472 DEFINE_HIST_FIELD_FN(s8);
473 DEFINE_HIST_FIELD_FN(u8);
474 
475 #define for_each_hist_field(i, hist_data)	\
476 	for ((i) = 0; (i) < (hist_data)->n_fields; (i)++)
477 
478 #define for_each_hist_val_field(i, hist_data)	\
479 	for ((i) = 0; (i) < (hist_data)->n_vals; (i)++)
480 
481 #define for_each_hist_key_field(i, hist_data)	\
482 	for ((i) = (hist_data)->n_vals; (i) < (hist_data)->n_fields; (i)++)
483 
484 #define HITCOUNT_IDX		0
485 #define HIST_KEY_SIZE_MAX	(MAX_FILTER_STR_VAL + HIST_STACKTRACE_SIZE)
486 
487 enum hist_field_flags {
488 	HIST_FIELD_FL_HITCOUNT		= 1 << 0,
489 	HIST_FIELD_FL_KEY		= 1 << 1,
490 	HIST_FIELD_FL_STRING		= 1 << 2,
491 	HIST_FIELD_FL_HEX		= 1 << 3,
492 	HIST_FIELD_FL_SYM		= 1 << 4,
493 	HIST_FIELD_FL_SYM_OFFSET	= 1 << 5,
494 	HIST_FIELD_FL_EXECNAME		= 1 << 6,
495 	HIST_FIELD_FL_SYSCALL		= 1 << 7,
496 	HIST_FIELD_FL_STACKTRACE	= 1 << 8,
497 	HIST_FIELD_FL_LOG2		= 1 << 9,
498 	HIST_FIELD_FL_TIMESTAMP		= 1 << 10,
499 	HIST_FIELD_FL_TIMESTAMP_USECS	= 1 << 11,
500 	HIST_FIELD_FL_VAR		= 1 << 12,
501 	HIST_FIELD_FL_EXPR		= 1 << 13,
502 	HIST_FIELD_FL_VAR_REF		= 1 << 14,
503 	HIST_FIELD_FL_CPU		= 1 << 15,
504 	HIST_FIELD_FL_ALIAS		= 1 << 16,
505 	HIST_FIELD_FL_BUCKET		= 1 << 17,
506 	HIST_FIELD_FL_CONST		= 1 << 18,
507 	HIST_FIELD_FL_PERCENT		= 1 << 19,
508 	HIST_FIELD_FL_GRAPH		= 1 << 20,
509 };
510 
511 struct var_defs {
512 	unsigned int	n_vars;
513 	char		*name[TRACING_MAP_VARS_MAX];
514 	char		*expr[TRACING_MAP_VARS_MAX];
515 };
516 
517 struct hist_trigger_attrs {
518 	char		*keys_str;
519 	char		*vals_str;
520 	char		*sort_key_str;
521 	char		*name;
522 	char		*clock;
523 	bool		pause;
524 	bool		cont;
525 	bool		clear;
526 	bool		ts_in_usecs;
527 	bool		no_hitcount;
528 	unsigned int	map_bits;
529 
530 	char		*assignment_str[TRACING_MAP_VARS_MAX];
531 	unsigned int	n_assignments;
532 
533 	char		*action_str[HIST_ACTIONS_MAX];
534 	unsigned int	n_actions;
535 
536 	struct var_defs	var_defs;
537 };
538 
539 struct field_var {
540 	struct hist_field	*var;
541 	struct hist_field	*val;
542 };
543 
544 struct field_var_hist {
545 	struct hist_trigger_data	*hist_data;
546 	char				*cmd;
547 };
548 
549 struct hist_trigger_data {
550 	struct hist_field               *fields[HIST_FIELDS_MAX];
551 	unsigned int			n_vals;
552 	unsigned int			n_keys;
553 	unsigned int			n_fields;
554 	unsigned int			n_vars;
555 	unsigned int			n_var_str;
556 	unsigned int			key_size;
557 	struct tracing_map_sort_key	sort_keys[TRACING_MAP_SORT_KEYS_MAX];
558 	unsigned int			n_sort_keys;
559 	struct trace_event_file		*event_file;
560 	struct hist_trigger_attrs	*attrs;
561 	struct tracing_map		*map;
562 	bool				enable_timestamps;
563 	bool				remove;
564 	struct hist_field               *var_refs[TRACING_MAP_VARS_MAX];
565 	unsigned int			n_var_refs;
566 
567 	struct action_data		*actions[HIST_ACTIONS_MAX];
568 	unsigned int			n_actions;
569 
570 	struct field_var		*field_vars[SYNTH_FIELDS_MAX];
571 	unsigned int			n_field_vars;
572 	unsigned int			n_field_var_str;
573 	struct field_var_hist		*field_var_hists[SYNTH_FIELDS_MAX];
574 	unsigned int			n_field_var_hists;
575 
576 	struct field_var		*save_vars[SYNTH_FIELDS_MAX];
577 	unsigned int			n_save_vars;
578 	unsigned int			n_save_var_str;
579 };
580 
581 struct action_data;
582 
583 typedef void (*action_fn_t) (struct hist_trigger_data *hist_data,
584 			     struct tracing_map_elt *elt,
585 			     struct trace_buffer *buffer, void *rec,
586 			     struct ring_buffer_event *rbe, void *key,
587 			     struct action_data *data, u64 *var_ref_vals);
588 
589 typedef bool (*check_track_val_fn_t) (u64 track_val, u64 var_val);
590 
591 enum handler_id {
592 	HANDLER_ONMATCH = 1,
593 	HANDLER_ONMAX,
594 	HANDLER_ONCHANGE,
595 };
596 
597 enum action_id {
598 	ACTION_SAVE = 1,
599 	ACTION_TRACE,
600 	ACTION_SNAPSHOT,
601 };
602 
603 struct action_data {
604 	enum handler_id		handler;
605 	enum action_id		action;
606 	char			*action_name;
607 	action_fn_t		fn;
608 
609 	unsigned int		n_params;
610 	char			*params[SYNTH_FIELDS_MAX];
611 
612 	/*
613 	 * When a histogram trigger is hit, the values of any
614 	 * references to variables, including variables being passed
615 	 * as parameters to synthetic events, are collected into a
616 	 * var_ref_vals array.  This var_ref_idx array is an array of
617 	 * indices into the var_ref_vals array, one for each synthetic
618 	 * event param, and is passed to the synthetic event
619 	 * invocation.
620 	 */
621 	unsigned int		var_ref_idx[SYNTH_FIELDS_MAX];
622 	struct synth_event	*synth_event;
623 	bool			use_trace_keyword;
624 	char			*synth_event_name;
625 
626 	union {
627 		struct {
628 			char			*event;
629 			char			*event_system;
630 		} match_data;
631 
632 		struct {
633 			/*
634 			 * var_str contains the $-unstripped variable
635 			 * name referenced by var_ref, and used when
636 			 * printing the action.  Because var_ref
637 			 * creation is deferred to create_actions(),
638 			 * we need a per-action way to save it until
639 			 * then, thus var_str.
640 			 */
641 			char			*var_str;
642 
643 			/*
644 			 * var_ref refers to the variable being
645 			 * tracked e.g onmax($var).
646 			 */
647 			struct hist_field	*var_ref;
648 
649 			/*
650 			 * track_var contains the 'invisible' tracking
651 			 * variable created to keep the current
652 			 * e.g. max value.
653 			 */
654 			struct hist_field	*track_var;
655 
656 			check_track_val_fn_t	check_val;
657 			action_fn_t		save_data;
658 		} track_data;
659 	};
660 };
661 
662 struct track_data {
663 	u64				track_val;
664 	bool				updated;
665 
666 	unsigned int			key_len;
667 	void				*key;
668 	struct tracing_map_elt		elt;
669 
670 	struct action_data		*action_data;
671 	struct hist_trigger_data	*hist_data;
672 };
673 
674 struct hist_elt_data {
675 	char *comm;
676 	u64 *var_ref_vals;
677 	char **field_var_str;
678 	int n_field_var_str;
679 };
680 
681 struct snapshot_context {
682 	struct tracing_map_elt	*elt;
683 	void			*key;
684 };
685 
686 /*
687  * Returns the specific division function to use if the divisor
688  * is constant. This avoids extra branches when the trigger is hit.
689  */
hist_field_get_div_fn(struct hist_field * divisor)690 static enum hist_field_fn hist_field_get_div_fn(struct hist_field *divisor)
691 {
692 	u64 div = divisor->constant;
693 
694 	if (!(div & (div - 1)))
695 		return HIST_FIELD_FN_DIV_POWER2;
696 
697 	/* If the divisor is too large, do a regular division */
698 	if (div > (1 << HIST_DIV_SHIFT))
699 		return HIST_FIELD_FN_DIV_NOT_POWER2;
700 
701 	divisor->div_multiplier = div64_u64((u64)(1 << HIST_DIV_SHIFT), div);
702 	return HIST_FIELD_FN_DIV_MULT_SHIFT;
703 }
704 
track_data_free(struct track_data * track_data)705 static void track_data_free(struct track_data *track_data)
706 {
707 	struct hist_elt_data *elt_data;
708 
709 	if (!track_data)
710 		return;
711 
712 	kfree(track_data->key);
713 
714 	elt_data = track_data->elt.private_data;
715 	if (elt_data) {
716 		kfree(elt_data->comm);
717 		kfree(elt_data);
718 	}
719 
720 	kfree(track_data);
721 }
722 
track_data_alloc(unsigned int key_len,struct action_data * action_data,struct hist_trigger_data * hist_data)723 static struct track_data *track_data_alloc(unsigned int key_len,
724 					   struct action_data *action_data,
725 					   struct hist_trigger_data *hist_data)
726 {
727 	struct track_data *data = kzalloc(sizeof(*data), GFP_KERNEL);
728 	struct hist_elt_data *elt_data;
729 
730 	if (!data)
731 		return ERR_PTR(-ENOMEM);
732 
733 	data->key = kzalloc(key_len, GFP_KERNEL);
734 	if (!data->key) {
735 		track_data_free(data);
736 		return ERR_PTR(-ENOMEM);
737 	}
738 
739 	data->key_len = key_len;
740 	data->action_data = action_data;
741 	data->hist_data = hist_data;
742 
743 	elt_data = kzalloc(sizeof(*elt_data), GFP_KERNEL);
744 	if (!elt_data) {
745 		track_data_free(data);
746 		return ERR_PTR(-ENOMEM);
747 	}
748 
749 	data->elt.private_data = elt_data;
750 
751 	elt_data->comm = kzalloc(TASK_COMM_LEN, GFP_KERNEL);
752 	if (!elt_data->comm) {
753 		track_data_free(data);
754 		return ERR_PTR(-ENOMEM);
755 	}
756 
757 	return data;
758 }
759 
760 #define HIST_PREFIX "hist:"
761 
762 static char *last_cmd;
763 static char last_cmd_loc[MAX_FILTER_STR_VAL];
764 
errpos(char * str)765 static int errpos(char *str)
766 {
767 	if (!str || !last_cmd)
768 		return 0;
769 
770 	return err_pos(last_cmd, str);
771 }
772 
last_cmd_set(struct trace_event_file * file,char * str)773 static void last_cmd_set(struct trace_event_file *file, char *str)
774 {
775 	const char *system = NULL, *name = NULL;
776 	struct trace_event_call *call;
777 
778 	if (!str)
779 		return;
780 
781 	kfree(last_cmd);
782 
783 	last_cmd = kasprintf(GFP_KERNEL, HIST_PREFIX "%s", str);
784 	if (!last_cmd)
785 		return;
786 
787 	if (file) {
788 		call = file->event_call;
789 		system = call->class->system;
790 		if (system) {
791 			name = trace_event_name(call);
792 			if (!name)
793 				system = NULL;
794 		}
795 	}
796 
797 	if (system)
798 		snprintf(last_cmd_loc, MAX_FILTER_STR_VAL, HIST_PREFIX "%s:%s", system, name);
799 }
800 
hist_err(struct trace_array * tr,u8 err_type,u16 err_pos)801 static void hist_err(struct trace_array *tr, u8 err_type, u16 err_pos)
802 {
803 	if (!last_cmd)
804 		return;
805 
806 	tracing_log_err(tr, last_cmd_loc, last_cmd, err_text,
807 			err_type, err_pos);
808 }
809 
hist_err_clear(void)810 static void hist_err_clear(void)
811 {
812 	if (last_cmd)
813 		last_cmd[0] = '\0';
814 	last_cmd_loc[0] = '\0';
815 }
816 
817 typedef void (*synth_probe_func_t) (void *__data, u64 *var_ref_vals,
818 				    unsigned int *var_ref_idx);
819 
trace_synth(struct synth_event * event,u64 * var_ref_vals,unsigned int * var_ref_idx)820 static inline void trace_synth(struct synth_event *event, u64 *var_ref_vals,
821 			       unsigned int *var_ref_idx)
822 {
823 	struct tracepoint *tp = event->tp;
824 
825 	if (unlikely(static_key_enabled(&tp->key))) {
826 		struct tracepoint_func *probe_func_ptr;
827 		synth_probe_func_t probe_func;
828 		void *__data;
829 
830 		if (!(cpu_online(raw_smp_processor_id())))
831 			return;
832 
833 		probe_func_ptr = rcu_dereference_sched((tp)->funcs);
834 		if (probe_func_ptr) {
835 			do {
836 				probe_func = probe_func_ptr->func;
837 				__data = probe_func_ptr->data;
838 				probe_func(__data, var_ref_vals, var_ref_idx);
839 			} while ((++probe_func_ptr)->func);
840 		}
841 	}
842 }
843 
action_trace(struct hist_trigger_data * hist_data,struct tracing_map_elt * elt,struct trace_buffer * buffer,void * rec,struct ring_buffer_event * rbe,void * key,struct action_data * data,u64 * var_ref_vals)844 static void action_trace(struct hist_trigger_data *hist_data,
845 			 struct tracing_map_elt *elt,
846 			 struct trace_buffer *buffer, void *rec,
847 			 struct ring_buffer_event *rbe, void *key,
848 			 struct action_data *data, u64 *var_ref_vals)
849 {
850 	struct synth_event *event = data->synth_event;
851 
852 	trace_synth(event, var_ref_vals, data->var_ref_idx);
853 }
854 
855 struct hist_var_data {
856 	struct list_head list;
857 	struct hist_trigger_data *hist_data;
858 };
859 
hist_field_timestamp(struct hist_field * hist_field,struct tracing_map_elt * elt,struct trace_buffer * buffer,struct ring_buffer_event * rbe,void * event)860 static u64 hist_field_timestamp(struct hist_field *hist_field,
861 				struct tracing_map_elt *elt,
862 				struct trace_buffer *buffer,
863 				struct ring_buffer_event *rbe,
864 				void *event)
865 {
866 	struct hist_trigger_data *hist_data = hist_field->hist_data;
867 	struct trace_array *tr = hist_data->event_file->tr;
868 
869 	u64 ts = ring_buffer_event_time_stamp(buffer, rbe);
870 
871 	if (hist_data->attrs->ts_in_usecs && trace_clock_in_ns(tr))
872 		ts = ns2usecs(ts);
873 
874 	return ts;
875 }
876 
hist_field_cpu(struct hist_field * hist_field,struct tracing_map_elt * elt,struct trace_buffer * buffer,struct ring_buffer_event * rbe,void * event)877 static u64 hist_field_cpu(struct hist_field *hist_field,
878 			  struct tracing_map_elt *elt,
879 			  struct trace_buffer *buffer,
880 			  struct ring_buffer_event *rbe,
881 			  void *event)
882 {
883 	int cpu = smp_processor_id();
884 
885 	return cpu;
886 }
887 
888 /**
889  * check_field_for_var_ref - Check if a VAR_REF field references a variable
890  * @hist_field: The VAR_REF field to check
891  * @var_data: The hist trigger that owns the variable
892  * @var_idx: The trigger variable identifier
893  *
894  * Check the given VAR_REF field to see whether or not it references
895  * the given variable associated with the given trigger.
896  *
897  * Return: The VAR_REF field if it does reference the variable, NULL if not
898  */
899 static struct hist_field *
check_field_for_var_ref(struct hist_field * hist_field,struct hist_trigger_data * var_data,unsigned int var_idx)900 check_field_for_var_ref(struct hist_field *hist_field,
901 			struct hist_trigger_data *var_data,
902 			unsigned int var_idx)
903 {
904 	WARN_ON(!(hist_field && hist_field->flags & HIST_FIELD_FL_VAR_REF));
905 
906 	if (hist_field && hist_field->var.idx == var_idx &&
907 	    hist_field->var.hist_data == var_data)
908 		return hist_field;
909 
910 	return NULL;
911 }
912 
913 /**
914  * find_var_ref - Check if a trigger has a reference to a trigger variable
915  * @hist_data: The hist trigger that might have a reference to the variable
916  * @var_data: The hist trigger that owns the variable
917  * @var_idx: The trigger variable identifier
918  *
919  * Check the list of var_refs[] on the first hist trigger to see
920  * whether any of them are references to the variable on the second
921  * trigger.
922  *
923  * Return: The VAR_REF field referencing the variable if so, NULL if not
924  */
find_var_ref(struct hist_trigger_data * hist_data,struct hist_trigger_data * var_data,unsigned int var_idx)925 static struct hist_field *find_var_ref(struct hist_trigger_data *hist_data,
926 				       struct hist_trigger_data *var_data,
927 				       unsigned int var_idx)
928 {
929 	struct hist_field *hist_field;
930 	unsigned int i;
931 
932 	for (i = 0; i < hist_data->n_var_refs; i++) {
933 		hist_field = hist_data->var_refs[i];
934 		if (check_field_for_var_ref(hist_field, var_data, var_idx))
935 			return hist_field;
936 	}
937 
938 	return NULL;
939 }
940 
941 /**
942  * find_any_var_ref - Check if there is a reference to a given trigger variable
943  * @hist_data: The hist trigger
944  * @var_idx: The trigger variable identifier
945  *
946  * Check to see whether the given variable is currently referenced by
947  * any other trigger.
948  *
949  * The trigger the variable is defined on is explicitly excluded - the
950  * assumption being that a self-reference doesn't prevent a trigger
951  * from being removed.
952  *
953  * Return: The VAR_REF field referencing the variable if so, NULL if not
954  */
find_any_var_ref(struct hist_trigger_data * hist_data,unsigned int var_idx)955 static struct hist_field *find_any_var_ref(struct hist_trigger_data *hist_data,
956 					   unsigned int var_idx)
957 {
958 	struct trace_array *tr = hist_data->event_file->tr;
959 	struct hist_field *found = NULL;
960 	struct hist_var_data *var_data;
961 
962 	list_for_each_entry(var_data, &tr->hist_vars, list) {
963 		if (var_data->hist_data == hist_data)
964 			continue;
965 		found = find_var_ref(var_data->hist_data, hist_data, var_idx);
966 		if (found)
967 			break;
968 	}
969 
970 	return found;
971 }
972 
973 /**
974  * check_var_refs - Check if there is a reference to any of trigger's variables
975  * @hist_data: The hist trigger
976  *
977  * A trigger can define one or more variables.  If any one of them is
978  * currently referenced by any other trigger, this function will
979  * determine that.
980  *
981  * Typically used to determine whether or not a trigger can be removed
982  * - if there are any references to a trigger's variables, it cannot.
983  *
984  * Return: True if there is a reference to any of trigger's variables
985  */
check_var_refs(struct hist_trigger_data * hist_data)986 static bool check_var_refs(struct hist_trigger_data *hist_data)
987 {
988 	struct hist_field *field;
989 	bool found = false;
990 	int i;
991 
992 	for_each_hist_field(i, hist_data) {
993 		field = hist_data->fields[i];
994 		if (field && field->flags & HIST_FIELD_FL_VAR) {
995 			if (find_any_var_ref(hist_data, field->var.idx)) {
996 				found = true;
997 				break;
998 			}
999 		}
1000 	}
1001 
1002 	return found;
1003 }
1004 
find_hist_vars(struct hist_trigger_data * hist_data)1005 static struct hist_var_data *find_hist_vars(struct hist_trigger_data *hist_data)
1006 {
1007 	struct trace_array *tr = hist_data->event_file->tr;
1008 	struct hist_var_data *var_data, *found = NULL;
1009 
1010 	list_for_each_entry(var_data, &tr->hist_vars, list) {
1011 		if (var_data->hist_data == hist_data) {
1012 			found = var_data;
1013 			break;
1014 		}
1015 	}
1016 
1017 	return found;
1018 }
1019 
field_has_hist_vars(struct hist_field * hist_field,unsigned int level)1020 static bool field_has_hist_vars(struct hist_field *hist_field,
1021 				unsigned int level)
1022 {
1023 	int i;
1024 
1025 	if (level > 3)
1026 		return false;
1027 
1028 	if (!hist_field)
1029 		return false;
1030 
1031 	if (hist_field->flags & HIST_FIELD_FL_VAR ||
1032 	    hist_field->flags & HIST_FIELD_FL_VAR_REF)
1033 		return true;
1034 
1035 	for (i = 0; i < HIST_FIELD_OPERANDS_MAX; i++) {
1036 		struct hist_field *operand;
1037 
1038 		operand = hist_field->operands[i];
1039 		if (field_has_hist_vars(operand, level + 1))
1040 			return true;
1041 	}
1042 
1043 	return false;
1044 }
1045 
has_hist_vars(struct hist_trigger_data * hist_data)1046 static bool has_hist_vars(struct hist_trigger_data *hist_data)
1047 {
1048 	struct hist_field *hist_field;
1049 	int i;
1050 
1051 	for_each_hist_field(i, hist_data) {
1052 		hist_field = hist_data->fields[i];
1053 		if (field_has_hist_vars(hist_field, 0))
1054 			return true;
1055 	}
1056 
1057 	return false;
1058 }
1059 
save_hist_vars(struct hist_trigger_data * hist_data)1060 static int save_hist_vars(struct hist_trigger_data *hist_data)
1061 {
1062 	struct trace_array *tr = hist_data->event_file->tr;
1063 	struct hist_var_data *var_data;
1064 
1065 	var_data = find_hist_vars(hist_data);
1066 	if (var_data)
1067 		return 0;
1068 
1069 	if (tracing_check_open_get_tr(tr))
1070 		return -ENODEV;
1071 
1072 	var_data = kzalloc(sizeof(*var_data), GFP_KERNEL);
1073 	if (!var_data) {
1074 		trace_array_put(tr);
1075 		return -ENOMEM;
1076 	}
1077 
1078 	var_data->hist_data = hist_data;
1079 	list_add(&var_data->list, &tr->hist_vars);
1080 
1081 	return 0;
1082 }
1083 
remove_hist_vars(struct hist_trigger_data * hist_data)1084 static void remove_hist_vars(struct hist_trigger_data *hist_data)
1085 {
1086 	struct trace_array *tr = hist_data->event_file->tr;
1087 	struct hist_var_data *var_data;
1088 
1089 	var_data = find_hist_vars(hist_data);
1090 	if (!var_data)
1091 		return;
1092 
1093 	if (WARN_ON(check_var_refs(hist_data)))
1094 		return;
1095 
1096 	list_del(&var_data->list);
1097 
1098 	kfree(var_data);
1099 
1100 	trace_array_put(tr);
1101 }
1102 
find_var_field(struct hist_trigger_data * hist_data,const char * var_name)1103 static struct hist_field *find_var_field(struct hist_trigger_data *hist_data,
1104 					 const char *var_name)
1105 {
1106 	struct hist_field *hist_field, *found = NULL;
1107 	int i;
1108 
1109 	for_each_hist_field(i, hist_data) {
1110 		hist_field = hist_data->fields[i];
1111 		if (hist_field && hist_field->flags & HIST_FIELD_FL_VAR &&
1112 		    strcmp(hist_field->var.name, var_name) == 0) {
1113 			found = hist_field;
1114 			break;
1115 		}
1116 	}
1117 
1118 	return found;
1119 }
1120 
find_var(struct hist_trigger_data * hist_data,struct trace_event_file * file,const char * var_name)1121 static struct hist_field *find_var(struct hist_trigger_data *hist_data,
1122 				   struct trace_event_file *file,
1123 				   const char *var_name)
1124 {
1125 	struct hist_trigger_data *test_data;
1126 	struct event_trigger_data *test;
1127 	struct hist_field *hist_field;
1128 
1129 	lockdep_assert_held(&event_mutex);
1130 
1131 	hist_field = find_var_field(hist_data, var_name);
1132 	if (hist_field)
1133 		return hist_field;
1134 
1135 	list_for_each_entry(test, &file->triggers, list) {
1136 		if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
1137 			test_data = test->private_data;
1138 			hist_field = find_var_field(test_data, var_name);
1139 			if (hist_field)
1140 				return hist_field;
1141 		}
1142 	}
1143 
1144 	return NULL;
1145 }
1146 
find_var_file(struct trace_array * tr,char * system,char * event_name,char * var_name)1147 static struct trace_event_file *find_var_file(struct trace_array *tr,
1148 					      char *system,
1149 					      char *event_name,
1150 					      char *var_name)
1151 {
1152 	struct hist_trigger_data *var_hist_data;
1153 	struct hist_var_data *var_data;
1154 	struct trace_event_file *file, *found = NULL;
1155 
1156 	if (system)
1157 		return find_event_file(tr, system, event_name);
1158 
1159 	list_for_each_entry(var_data, &tr->hist_vars, list) {
1160 		var_hist_data = var_data->hist_data;
1161 		file = var_hist_data->event_file;
1162 		if (file == found)
1163 			continue;
1164 
1165 		if (find_var_field(var_hist_data, var_name)) {
1166 			if (found) {
1167 				hist_err(tr, HIST_ERR_VAR_NOT_UNIQUE, errpos(var_name));
1168 				return NULL;
1169 			}
1170 
1171 			found = file;
1172 		}
1173 	}
1174 
1175 	return found;
1176 }
1177 
find_file_var(struct trace_event_file * file,const char * var_name)1178 static struct hist_field *find_file_var(struct trace_event_file *file,
1179 					const char *var_name)
1180 {
1181 	struct hist_trigger_data *test_data;
1182 	struct event_trigger_data *test;
1183 	struct hist_field *hist_field;
1184 
1185 	lockdep_assert_held(&event_mutex);
1186 
1187 	list_for_each_entry(test, &file->triggers, list) {
1188 		if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
1189 			test_data = test->private_data;
1190 			hist_field = find_var_field(test_data, var_name);
1191 			if (hist_field)
1192 				return hist_field;
1193 		}
1194 	}
1195 
1196 	return NULL;
1197 }
1198 
1199 static struct hist_field *
find_match_var(struct hist_trigger_data * hist_data,char * var_name)1200 find_match_var(struct hist_trigger_data *hist_data, char *var_name)
1201 {
1202 	struct trace_array *tr = hist_data->event_file->tr;
1203 	struct hist_field *hist_field, *found = NULL;
1204 	struct trace_event_file *file;
1205 	unsigned int i;
1206 
1207 	for (i = 0; i < hist_data->n_actions; i++) {
1208 		struct action_data *data = hist_data->actions[i];
1209 
1210 		if (data->handler == HANDLER_ONMATCH) {
1211 			char *system = data->match_data.event_system;
1212 			char *event_name = data->match_data.event;
1213 
1214 			file = find_var_file(tr, system, event_name, var_name);
1215 			if (!file)
1216 				continue;
1217 			hist_field = find_file_var(file, var_name);
1218 			if (hist_field) {
1219 				if (found) {
1220 					hist_err(tr, HIST_ERR_VAR_NOT_UNIQUE,
1221 						 errpos(var_name));
1222 					return ERR_PTR(-EINVAL);
1223 				}
1224 
1225 				found = hist_field;
1226 			}
1227 		}
1228 	}
1229 	return found;
1230 }
1231 
find_event_var(struct hist_trigger_data * hist_data,char * system,char * event_name,char * var_name)1232 static struct hist_field *find_event_var(struct hist_trigger_data *hist_data,
1233 					 char *system,
1234 					 char *event_name,
1235 					 char *var_name)
1236 {
1237 	struct trace_array *tr = hist_data->event_file->tr;
1238 	struct hist_field *hist_field = NULL;
1239 	struct trace_event_file *file;
1240 
1241 	if (!system || !event_name) {
1242 		hist_field = find_match_var(hist_data, var_name);
1243 		if (IS_ERR(hist_field))
1244 			return NULL;
1245 		if (hist_field)
1246 			return hist_field;
1247 	}
1248 
1249 	file = find_var_file(tr, system, event_name, var_name);
1250 	if (!file)
1251 		return NULL;
1252 
1253 	hist_field = find_file_var(file, var_name);
1254 
1255 	return hist_field;
1256 }
1257 
hist_field_var_ref(struct hist_field * hist_field,struct tracing_map_elt * elt,struct trace_buffer * buffer,struct ring_buffer_event * rbe,void * event)1258 static u64 hist_field_var_ref(struct hist_field *hist_field,
1259 			      struct tracing_map_elt *elt,
1260 			      struct trace_buffer *buffer,
1261 			      struct ring_buffer_event *rbe,
1262 			      void *event)
1263 {
1264 	struct hist_elt_data *elt_data;
1265 	u64 var_val = 0;
1266 
1267 	if (WARN_ON_ONCE(!elt))
1268 		return var_val;
1269 
1270 	elt_data = elt->private_data;
1271 	var_val = elt_data->var_ref_vals[hist_field->var_ref_idx];
1272 
1273 	return var_val;
1274 }
1275 
resolve_var_refs(struct hist_trigger_data * hist_data,void * key,u64 * var_ref_vals,bool self)1276 static bool resolve_var_refs(struct hist_trigger_data *hist_data, void *key,
1277 			     u64 *var_ref_vals, bool self)
1278 {
1279 	struct hist_trigger_data *var_data;
1280 	struct tracing_map_elt *var_elt;
1281 	struct hist_field *hist_field;
1282 	unsigned int i, var_idx;
1283 	bool resolved = true;
1284 	u64 var_val = 0;
1285 
1286 	for (i = 0; i < hist_data->n_var_refs; i++) {
1287 		hist_field = hist_data->var_refs[i];
1288 		var_idx = hist_field->var.idx;
1289 		var_data = hist_field->var.hist_data;
1290 
1291 		if (var_data == NULL) {
1292 			resolved = false;
1293 			break;
1294 		}
1295 
1296 		if ((self && var_data != hist_data) ||
1297 		    (!self && var_data == hist_data))
1298 			continue;
1299 
1300 		var_elt = tracing_map_lookup(var_data->map, key);
1301 		if (!var_elt) {
1302 			resolved = false;
1303 			break;
1304 		}
1305 
1306 		if (!tracing_map_var_set(var_elt, var_idx)) {
1307 			resolved = false;
1308 			break;
1309 		}
1310 
1311 		if (self || !hist_field->read_once)
1312 			var_val = tracing_map_read_var(var_elt, var_idx);
1313 		else
1314 			var_val = tracing_map_read_var_once(var_elt, var_idx);
1315 
1316 		var_ref_vals[i] = var_val;
1317 	}
1318 
1319 	return resolved;
1320 }
1321 
hist_field_name(struct hist_field * field,unsigned int level)1322 static const char *hist_field_name(struct hist_field *field,
1323 				   unsigned int level)
1324 {
1325 	const char *field_name = "";
1326 
1327 	if (WARN_ON_ONCE(!field))
1328 		return field_name;
1329 
1330 	if (level > 1)
1331 		return field_name;
1332 
1333 	if (field->field)
1334 		field_name = field->field->name;
1335 	else if (field->flags & HIST_FIELD_FL_LOG2 ||
1336 		 field->flags & HIST_FIELD_FL_ALIAS ||
1337 		 field->flags & HIST_FIELD_FL_BUCKET)
1338 		field_name = hist_field_name(field->operands[0], ++level);
1339 	else if (field->flags & HIST_FIELD_FL_CPU)
1340 		field_name = "common_cpu";
1341 	else if (field->flags & HIST_FIELD_FL_EXPR ||
1342 		 field->flags & HIST_FIELD_FL_VAR_REF) {
1343 		if (field->system) {
1344 			static char full_name[MAX_FILTER_STR_VAL];
1345 
1346 			strcat(full_name, field->system);
1347 			strcat(full_name, ".");
1348 			strcat(full_name, field->event_name);
1349 			strcat(full_name, ".");
1350 			strcat(full_name, field->name);
1351 			field_name = full_name;
1352 		} else
1353 			field_name = field->name;
1354 	} else if (field->flags & HIST_FIELD_FL_TIMESTAMP)
1355 		field_name = "common_timestamp";
1356 	else if (field->flags & HIST_FIELD_FL_STACKTRACE) {
1357 		field_name = "common_stacktrace";
1358 	} else if (field->flags & HIST_FIELD_FL_HITCOUNT)
1359 		field_name = "hitcount";
1360 
1361 	if (field_name == NULL)
1362 		field_name = "";
1363 
1364 	return field_name;
1365 }
1366 
select_value_fn(int field_size,int field_is_signed)1367 static enum hist_field_fn select_value_fn(int field_size, int field_is_signed)
1368 {
1369 	switch (field_size) {
1370 	case 8:
1371 		if (field_is_signed)
1372 			return HIST_FIELD_FN_S64;
1373 		else
1374 			return HIST_FIELD_FN_U64;
1375 	case 4:
1376 		if (field_is_signed)
1377 			return HIST_FIELD_FN_S32;
1378 		else
1379 			return HIST_FIELD_FN_U32;
1380 	case 2:
1381 		if (field_is_signed)
1382 			return HIST_FIELD_FN_S16;
1383 		else
1384 			return HIST_FIELD_FN_U16;
1385 	case 1:
1386 		if (field_is_signed)
1387 			return HIST_FIELD_FN_S8;
1388 		else
1389 			return HIST_FIELD_FN_U8;
1390 	}
1391 
1392 	return HIST_FIELD_FN_NOP;
1393 }
1394 
parse_map_size(char * str)1395 static int parse_map_size(char *str)
1396 {
1397 	unsigned long size, map_bits;
1398 	int ret;
1399 
1400 	ret = kstrtoul(str, 0, &size);
1401 	if (ret)
1402 		goto out;
1403 
1404 	map_bits = ilog2(roundup_pow_of_two(size));
1405 	if (map_bits < TRACING_MAP_BITS_MIN ||
1406 	    map_bits > TRACING_MAP_BITS_MAX)
1407 		ret = -EINVAL;
1408 	else
1409 		ret = map_bits;
1410  out:
1411 	return ret;
1412 }
1413 
destroy_hist_trigger_attrs(struct hist_trigger_attrs * attrs)1414 static void destroy_hist_trigger_attrs(struct hist_trigger_attrs *attrs)
1415 {
1416 	unsigned int i;
1417 
1418 	if (!attrs)
1419 		return;
1420 
1421 	for (i = 0; i < attrs->n_assignments; i++)
1422 		kfree(attrs->assignment_str[i]);
1423 
1424 	for (i = 0; i < attrs->n_actions; i++)
1425 		kfree(attrs->action_str[i]);
1426 
1427 	kfree(attrs->name);
1428 	kfree(attrs->sort_key_str);
1429 	kfree(attrs->keys_str);
1430 	kfree(attrs->vals_str);
1431 	kfree(attrs->clock);
1432 	kfree(attrs);
1433 }
1434 
parse_action(char * str,struct hist_trigger_attrs * attrs)1435 static int parse_action(char *str, struct hist_trigger_attrs *attrs)
1436 {
1437 	int ret = -EINVAL;
1438 
1439 	if (attrs->n_actions >= HIST_ACTIONS_MAX)
1440 		return ret;
1441 
1442 	if ((str_has_prefix(str, "onmatch(")) ||
1443 	    (str_has_prefix(str, "onmax(")) ||
1444 	    (str_has_prefix(str, "onchange("))) {
1445 		attrs->action_str[attrs->n_actions] = kstrdup(str, GFP_KERNEL);
1446 		if (!attrs->action_str[attrs->n_actions]) {
1447 			ret = -ENOMEM;
1448 			return ret;
1449 		}
1450 		attrs->n_actions++;
1451 		ret = 0;
1452 	}
1453 	return ret;
1454 }
1455 
parse_assignment(struct trace_array * tr,char * str,struct hist_trigger_attrs * attrs)1456 static int parse_assignment(struct trace_array *tr,
1457 			    char *str, struct hist_trigger_attrs *attrs)
1458 {
1459 	int len, ret = 0;
1460 
1461 	if ((len = str_has_prefix(str, "key=")) ||
1462 	    (len = str_has_prefix(str, "keys="))) {
1463 		attrs->keys_str = kstrdup(str + len, GFP_KERNEL);
1464 		if (!attrs->keys_str) {
1465 			ret = -ENOMEM;
1466 			goto out;
1467 		}
1468 	} else if ((len = str_has_prefix(str, "val=")) ||
1469 		   (len = str_has_prefix(str, "vals=")) ||
1470 		   (len = str_has_prefix(str, "values="))) {
1471 		attrs->vals_str = kstrdup(str + len, GFP_KERNEL);
1472 		if (!attrs->vals_str) {
1473 			ret = -ENOMEM;
1474 			goto out;
1475 		}
1476 	} else if ((len = str_has_prefix(str, "sort="))) {
1477 		attrs->sort_key_str = kstrdup(str + len, GFP_KERNEL);
1478 		if (!attrs->sort_key_str) {
1479 			ret = -ENOMEM;
1480 			goto out;
1481 		}
1482 	} else if (str_has_prefix(str, "name=")) {
1483 		attrs->name = kstrdup(str, GFP_KERNEL);
1484 		if (!attrs->name) {
1485 			ret = -ENOMEM;
1486 			goto out;
1487 		}
1488 	} else if ((len = str_has_prefix(str, "clock="))) {
1489 		str += len;
1490 
1491 		str = strstrip(str);
1492 		attrs->clock = kstrdup(str, GFP_KERNEL);
1493 		if (!attrs->clock) {
1494 			ret = -ENOMEM;
1495 			goto out;
1496 		}
1497 	} else if ((len = str_has_prefix(str, "size="))) {
1498 		int map_bits = parse_map_size(str + len);
1499 
1500 		if (map_bits < 0) {
1501 			ret = map_bits;
1502 			goto out;
1503 		}
1504 		attrs->map_bits = map_bits;
1505 	} else {
1506 		char *assignment;
1507 
1508 		if (attrs->n_assignments == TRACING_MAP_VARS_MAX) {
1509 			hist_err(tr, HIST_ERR_TOO_MANY_VARS, errpos(str));
1510 			ret = -EINVAL;
1511 			goto out;
1512 		}
1513 
1514 		assignment = kstrdup(str, GFP_KERNEL);
1515 		if (!assignment) {
1516 			ret = -ENOMEM;
1517 			goto out;
1518 		}
1519 
1520 		attrs->assignment_str[attrs->n_assignments++] = assignment;
1521 	}
1522  out:
1523 	return ret;
1524 }
1525 
1526 static struct hist_trigger_attrs *
parse_hist_trigger_attrs(struct trace_array * tr,char * trigger_str)1527 parse_hist_trigger_attrs(struct trace_array *tr, char *trigger_str)
1528 {
1529 	struct hist_trigger_attrs *attrs;
1530 	int ret = 0;
1531 
1532 	attrs = kzalloc(sizeof(*attrs), GFP_KERNEL);
1533 	if (!attrs)
1534 		return ERR_PTR(-ENOMEM);
1535 
1536 	while (trigger_str) {
1537 		char *str = strsep(&trigger_str, ":");
1538 		char *rhs;
1539 
1540 		rhs = strchr(str, '=');
1541 		if (rhs) {
1542 			if (!strlen(++rhs)) {
1543 				ret = -EINVAL;
1544 				hist_err(tr, HIST_ERR_EMPTY_ASSIGNMENT, errpos(str));
1545 				goto free;
1546 			}
1547 			ret = parse_assignment(tr, str, attrs);
1548 			if (ret)
1549 				goto free;
1550 		} else if (strcmp(str, "nohitcount") == 0 ||
1551 			   strcmp(str, "NOHC") == 0)
1552 			attrs->no_hitcount = true;
1553 		else if (strcmp(str, "pause") == 0)
1554 			attrs->pause = true;
1555 		else if ((strcmp(str, "cont") == 0) ||
1556 			 (strcmp(str, "continue") == 0))
1557 			attrs->cont = true;
1558 		else if (strcmp(str, "clear") == 0)
1559 			attrs->clear = true;
1560 		else {
1561 			ret = parse_action(str, attrs);
1562 			if (ret)
1563 				goto free;
1564 		}
1565 	}
1566 
1567 	if (!attrs->keys_str) {
1568 		ret = -EINVAL;
1569 		goto free;
1570 	}
1571 
1572 	if (!attrs->clock) {
1573 		attrs->clock = kstrdup("global", GFP_KERNEL);
1574 		if (!attrs->clock) {
1575 			ret = -ENOMEM;
1576 			goto free;
1577 		}
1578 	}
1579 
1580 	return attrs;
1581  free:
1582 	destroy_hist_trigger_attrs(attrs);
1583 
1584 	return ERR_PTR(ret);
1585 }
1586 
save_comm(char * comm,struct task_struct * task)1587 static inline void save_comm(char *comm, struct task_struct *task)
1588 {
1589 	if (!task->pid) {
1590 		strcpy(comm, "<idle>");
1591 		return;
1592 	}
1593 
1594 	if (WARN_ON_ONCE(task->pid < 0)) {
1595 		strcpy(comm, "<XXX>");
1596 		return;
1597 	}
1598 
1599 	strscpy(comm, task->comm, TASK_COMM_LEN);
1600 }
1601 
hist_elt_data_free(struct hist_elt_data * elt_data)1602 static void hist_elt_data_free(struct hist_elt_data *elt_data)
1603 {
1604 	unsigned int i;
1605 
1606 	for (i = 0; i < elt_data->n_field_var_str; i++)
1607 		kfree(elt_data->field_var_str[i]);
1608 
1609 	kfree(elt_data->field_var_str);
1610 
1611 	kfree(elt_data->comm);
1612 	kfree(elt_data);
1613 }
1614 
hist_trigger_elt_data_free(struct tracing_map_elt * elt)1615 static void hist_trigger_elt_data_free(struct tracing_map_elt *elt)
1616 {
1617 	struct hist_elt_data *elt_data = elt->private_data;
1618 
1619 	hist_elt_data_free(elt_data);
1620 }
1621 
hist_trigger_elt_data_alloc(struct tracing_map_elt * elt)1622 static int hist_trigger_elt_data_alloc(struct tracing_map_elt *elt)
1623 {
1624 	struct hist_trigger_data *hist_data = elt->map->private_data;
1625 	unsigned int size = TASK_COMM_LEN;
1626 	struct hist_elt_data *elt_data;
1627 	struct hist_field *hist_field;
1628 	unsigned int i, n_str;
1629 
1630 	elt_data = kzalloc(sizeof(*elt_data), GFP_KERNEL);
1631 	if (!elt_data)
1632 		return -ENOMEM;
1633 
1634 	for_each_hist_field(i, hist_data) {
1635 		hist_field = hist_data->fields[i];
1636 
1637 		if (hist_field->flags & HIST_FIELD_FL_EXECNAME) {
1638 			elt_data->comm = kzalloc(size, GFP_KERNEL);
1639 			if (!elt_data->comm) {
1640 				kfree(elt_data);
1641 				return -ENOMEM;
1642 			}
1643 			break;
1644 		}
1645 	}
1646 
1647 	n_str = hist_data->n_field_var_str + hist_data->n_save_var_str +
1648 		hist_data->n_var_str;
1649 	if (n_str > SYNTH_FIELDS_MAX) {
1650 		hist_elt_data_free(elt_data);
1651 		return -EINVAL;
1652 	}
1653 
1654 	BUILD_BUG_ON(STR_VAR_LEN_MAX & (sizeof(u64) - 1));
1655 
1656 	size = STR_VAR_LEN_MAX;
1657 
1658 	elt_data->field_var_str = kcalloc(n_str, sizeof(char *), GFP_KERNEL);
1659 	if (!elt_data->field_var_str) {
1660 		hist_elt_data_free(elt_data);
1661 		return -EINVAL;
1662 	}
1663 	elt_data->n_field_var_str = n_str;
1664 
1665 	for (i = 0; i < n_str; i++) {
1666 		elt_data->field_var_str[i] = kzalloc(size, GFP_KERNEL);
1667 		if (!elt_data->field_var_str[i]) {
1668 			hist_elt_data_free(elt_data);
1669 			return -ENOMEM;
1670 		}
1671 	}
1672 
1673 	elt->private_data = elt_data;
1674 
1675 	return 0;
1676 }
1677 
hist_trigger_elt_data_init(struct tracing_map_elt * elt)1678 static void hist_trigger_elt_data_init(struct tracing_map_elt *elt)
1679 {
1680 	struct hist_elt_data *elt_data = elt->private_data;
1681 
1682 	if (elt_data->comm)
1683 		save_comm(elt_data->comm, current);
1684 }
1685 
1686 static const struct tracing_map_ops hist_trigger_elt_data_ops = {
1687 	.elt_alloc	= hist_trigger_elt_data_alloc,
1688 	.elt_free	= hist_trigger_elt_data_free,
1689 	.elt_init	= hist_trigger_elt_data_init,
1690 };
1691 
get_hist_field_flags(struct hist_field * hist_field)1692 static const char *get_hist_field_flags(struct hist_field *hist_field)
1693 {
1694 	const char *flags_str = NULL;
1695 
1696 	if (hist_field->flags & HIST_FIELD_FL_HEX)
1697 		flags_str = "hex";
1698 	else if (hist_field->flags & HIST_FIELD_FL_SYM)
1699 		flags_str = "sym";
1700 	else if (hist_field->flags & HIST_FIELD_FL_SYM_OFFSET)
1701 		flags_str = "sym-offset";
1702 	else if (hist_field->flags & HIST_FIELD_FL_EXECNAME)
1703 		flags_str = "execname";
1704 	else if (hist_field->flags & HIST_FIELD_FL_SYSCALL)
1705 		flags_str = "syscall";
1706 	else if (hist_field->flags & HIST_FIELD_FL_LOG2)
1707 		flags_str = "log2";
1708 	else if (hist_field->flags & HIST_FIELD_FL_BUCKET)
1709 		flags_str = "buckets";
1710 	else if (hist_field->flags & HIST_FIELD_FL_TIMESTAMP_USECS)
1711 		flags_str = "usecs";
1712 	else if (hist_field->flags & HIST_FIELD_FL_PERCENT)
1713 		flags_str = "percent";
1714 	else if (hist_field->flags & HIST_FIELD_FL_GRAPH)
1715 		flags_str = "graph";
1716 	else if (hist_field->flags & HIST_FIELD_FL_STACKTRACE)
1717 		flags_str = "stacktrace";
1718 
1719 	return flags_str;
1720 }
1721 
expr_field_str(struct hist_field * field,char * expr)1722 static void expr_field_str(struct hist_field *field, char *expr)
1723 {
1724 	if (field->flags & HIST_FIELD_FL_VAR_REF)
1725 		strcat(expr, "$");
1726 	else if (field->flags & HIST_FIELD_FL_CONST) {
1727 		char str[HIST_CONST_DIGITS_MAX];
1728 
1729 		snprintf(str, HIST_CONST_DIGITS_MAX, "%llu", field->constant);
1730 		strcat(expr, str);
1731 	}
1732 
1733 	strcat(expr, hist_field_name(field, 0));
1734 
1735 	if (field->flags && !(field->flags & HIST_FIELD_FL_VAR_REF)) {
1736 		const char *flags_str = get_hist_field_flags(field);
1737 
1738 		if (flags_str) {
1739 			strcat(expr, ".");
1740 			strcat(expr, flags_str);
1741 		}
1742 	}
1743 }
1744 
expr_str(struct hist_field * field,unsigned int level)1745 static char *expr_str(struct hist_field *field, unsigned int level)
1746 {
1747 	char *expr;
1748 
1749 	if (level > 1)
1750 		return NULL;
1751 
1752 	expr = kzalloc(MAX_FILTER_STR_VAL, GFP_KERNEL);
1753 	if (!expr)
1754 		return NULL;
1755 
1756 	if (!field->operands[0]) {
1757 		expr_field_str(field, expr);
1758 		return expr;
1759 	}
1760 
1761 	if (field->operator == FIELD_OP_UNARY_MINUS) {
1762 		char *subexpr;
1763 
1764 		strcat(expr, "-(");
1765 		subexpr = expr_str(field->operands[0], ++level);
1766 		if (!subexpr) {
1767 			kfree(expr);
1768 			return NULL;
1769 		}
1770 		strcat(expr, subexpr);
1771 		strcat(expr, ")");
1772 
1773 		kfree(subexpr);
1774 
1775 		return expr;
1776 	}
1777 
1778 	expr_field_str(field->operands[0], expr);
1779 
1780 	switch (field->operator) {
1781 	case FIELD_OP_MINUS:
1782 		strcat(expr, "-");
1783 		break;
1784 	case FIELD_OP_PLUS:
1785 		strcat(expr, "+");
1786 		break;
1787 	case FIELD_OP_DIV:
1788 		strcat(expr, "/");
1789 		break;
1790 	case FIELD_OP_MULT:
1791 		strcat(expr, "*");
1792 		break;
1793 	default:
1794 		kfree(expr);
1795 		return NULL;
1796 	}
1797 
1798 	expr_field_str(field->operands[1], expr);
1799 
1800 	return expr;
1801 }
1802 
1803 /*
1804  * If field_op != FIELD_OP_NONE, *sep points to the root operator
1805  * of the expression tree to be evaluated.
1806  */
contains_operator(char * str,char ** sep)1807 static int contains_operator(char *str, char **sep)
1808 {
1809 	enum field_op_id field_op = FIELD_OP_NONE;
1810 	char *minus_op, *plus_op, *div_op, *mult_op;
1811 
1812 
1813 	/*
1814 	 * Report the last occurrence of the operators first, so that the
1815 	 * expression is evaluated left to right. This is important since
1816 	 * subtraction and division are not associative.
1817 	 *
1818 	 *	e.g
1819 	 *		64/8/4/2 is 1, i.e 64/8/4/2 = ((64/8)/4)/2
1820 	 *		14-7-5-2 is 0, i.e 14-7-5-2 = ((14-7)-5)-2
1821 	 */
1822 
1823 	/*
1824 	 * First, find lower precedence addition and subtraction
1825 	 * since the expression will be evaluated recursively.
1826 	 */
1827 	minus_op = strrchr(str, '-');
1828 	if (minus_op) {
1829 		/*
1830 		 * Unary minus is not supported in sub-expressions. If
1831 		 * present, it is always the next root operator.
1832 		 */
1833 		if (minus_op == str) {
1834 			field_op = FIELD_OP_UNARY_MINUS;
1835 			goto out;
1836 		}
1837 
1838 		field_op = FIELD_OP_MINUS;
1839 	}
1840 
1841 	plus_op = strrchr(str, '+');
1842 	if (plus_op || minus_op) {
1843 		/*
1844 		 * For operators of the same precedence use to rightmost as the
1845 		 * root, so that the expression is evaluated left to right.
1846 		 */
1847 		if (plus_op > minus_op)
1848 			field_op = FIELD_OP_PLUS;
1849 		goto out;
1850 	}
1851 
1852 	/*
1853 	 * Multiplication and division have higher precedence than addition and
1854 	 * subtraction.
1855 	 */
1856 	div_op = strrchr(str, '/');
1857 	if (div_op)
1858 		field_op = FIELD_OP_DIV;
1859 
1860 	mult_op = strrchr(str, '*');
1861 	/*
1862 	 * For operators of the same precedence use to rightmost as the
1863 	 * root, so that the expression is evaluated left to right.
1864 	 */
1865 	if (mult_op > div_op)
1866 		field_op = FIELD_OP_MULT;
1867 
1868 out:
1869 	if (sep) {
1870 		switch (field_op) {
1871 		case FIELD_OP_UNARY_MINUS:
1872 		case FIELD_OP_MINUS:
1873 			*sep = minus_op;
1874 			break;
1875 		case FIELD_OP_PLUS:
1876 			*sep = plus_op;
1877 			break;
1878 		case FIELD_OP_DIV:
1879 			*sep = div_op;
1880 			break;
1881 		case FIELD_OP_MULT:
1882 			*sep = mult_op;
1883 			break;
1884 		case FIELD_OP_NONE:
1885 		default:
1886 			*sep = NULL;
1887 			break;
1888 		}
1889 	}
1890 
1891 	return field_op;
1892 }
1893 
get_hist_field(struct hist_field * hist_field)1894 static void get_hist_field(struct hist_field *hist_field)
1895 {
1896 	hist_field->ref++;
1897 }
1898 
__destroy_hist_field(struct hist_field * hist_field)1899 static void __destroy_hist_field(struct hist_field *hist_field)
1900 {
1901 	if (--hist_field->ref > 1)
1902 		return;
1903 
1904 	kfree(hist_field->var.name);
1905 	kfree(hist_field->name);
1906 
1907 	/* Can likely be a const */
1908 	kfree_const(hist_field->type);
1909 
1910 	kfree(hist_field->system);
1911 	kfree(hist_field->event_name);
1912 
1913 	kfree(hist_field);
1914 }
1915 
destroy_hist_field(struct hist_field * hist_field,unsigned int level)1916 static void destroy_hist_field(struct hist_field *hist_field,
1917 			       unsigned int level)
1918 {
1919 	unsigned int i;
1920 
1921 	if (level > 3)
1922 		return;
1923 
1924 	if (!hist_field)
1925 		return;
1926 
1927 	if (hist_field->flags & HIST_FIELD_FL_VAR_REF)
1928 		return; /* var refs will be destroyed separately */
1929 
1930 	for (i = 0; i < HIST_FIELD_OPERANDS_MAX; i++)
1931 		destroy_hist_field(hist_field->operands[i], level + 1);
1932 
1933 	__destroy_hist_field(hist_field);
1934 }
1935 
create_hist_field(struct hist_trigger_data * hist_data,struct ftrace_event_field * field,unsigned long flags,char * var_name)1936 static struct hist_field *create_hist_field(struct hist_trigger_data *hist_data,
1937 					    struct ftrace_event_field *field,
1938 					    unsigned long flags,
1939 					    char *var_name)
1940 {
1941 	struct hist_field *hist_field;
1942 
1943 	if (field && is_function_field(field))
1944 		return NULL;
1945 
1946 	hist_field = kzalloc(sizeof(struct hist_field), GFP_KERNEL);
1947 	if (!hist_field)
1948 		return NULL;
1949 
1950 	hist_field->ref = 1;
1951 
1952 	hist_field->hist_data = hist_data;
1953 
1954 	if (flags & HIST_FIELD_FL_EXPR || flags & HIST_FIELD_FL_ALIAS)
1955 		goto out; /* caller will populate */
1956 
1957 	if (flags & HIST_FIELD_FL_VAR_REF) {
1958 		hist_field->fn_num = HIST_FIELD_FN_VAR_REF;
1959 		goto out;
1960 	}
1961 
1962 	if (flags & HIST_FIELD_FL_HITCOUNT) {
1963 		hist_field->fn_num = HIST_FIELD_FN_COUNTER;
1964 		hist_field->size = sizeof(u64);
1965 		hist_field->type = "u64";
1966 		goto out;
1967 	}
1968 
1969 	if (flags & HIST_FIELD_FL_CONST) {
1970 		hist_field->fn_num = HIST_FIELD_FN_CONST;
1971 		hist_field->size = sizeof(u64);
1972 		hist_field->type = kstrdup("u64", GFP_KERNEL);
1973 		if (!hist_field->type)
1974 			goto free;
1975 		goto out;
1976 	}
1977 
1978 	if (flags & HIST_FIELD_FL_STACKTRACE) {
1979 		if (field)
1980 			hist_field->fn_num = HIST_FIELD_FN_STACK;
1981 		else
1982 			hist_field->fn_num = HIST_FIELD_FN_NOP;
1983 		hist_field->size = HIST_STACKTRACE_SIZE;
1984 		hist_field->type = kstrdup_const("unsigned long[]", GFP_KERNEL);
1985 		if (!hist_field->type)
1986 			goto free;
1987 		goto out;
1988 	}
1989 
1990 	if (flags & (HIST_FIELD_FL_LOG2 | HIST_FIELD_FL_BUCKET)) {
1991 		unsigned long fl = flags & ~(HIST_FIELD_FL_LOG2 | HIST_FIELD_FL_BUCKET);
1992 		hist_field->fn_num = flags & HIST_FIELD_FL_LOG2 ? HIST_FIELD_FN_LOG2 :
1993 			HIST_FIELD_FN_BUCKET;
1994 		hist_field->operands[0] = create_hist_field(hist_data, field, fl, NULL);
1995 		if (!hist_field->operands[0])
1996 			goto free;
1997 		hist_field->size = hist_field->operands[0]->size;
1998 		hist_field->type = kstrdup_const(hist_field->operands[0]->type, GFP_KERNEL);
1999 		if (!hist_field->type)
2000 			goto free;
2001 		goto out;
2002 	}
2003 
2004 	if (flags & HIST_FIELD_FL_TIMESTAMP) {
2005 		hist_field->fn_num = HIST_FIELD_FN_TIMESTAMP;
2006 		hist_field->size = sizeof(u64);
2007 		hist_field->type = "u64";
2008 		goto out;
2009 	}
2010 
2011 	if (flags & HIST_FIELD_FL_CPU) {
2012 		hist_field->fn_num = HIST_FIELD_FN_CPU;
2013 		hist_field->size = sizeof(int);
2014 		hist_field->type = "unsigned int";
2015 		goto out;
2016 	}
2017 
2018 	if (WARN_ON_ONCE(!field))
2019 		goto out;
2020 
2021 	/* Pointers to strings are just pointers and dangerous to dereference */
2022 	if (is_string_field(field) &&
2023 	    (field->filter_type != FILTER_PTR_STRING)) {
2024 		flags |= HIST_FIELD_FL_STRING;
2025 
2026 		hist_field->size = MAX_FILTER_STR_VAL;
2027 		hist_field->type = kstrdup_const(field->type, GFP_KERNEL);
2028 		if (!hist_field->type)
2029 			goto free;
2030 
2031 		if (field->filter_type == FILTER_STATIC_STRING) {
2032 			hist_field->fn_num = HIST_FIELD_FN_STRING;
2033 			hist_field->size = field->size;
2034 		} else if (field->filter_type == FILTER_DYN_STRING) {
2035 			hist_field->fn_num = HIST_FIELD_FN_DYNSTRING;
2036 		} else if (field->filter_type == FILTER_RDYN_STRING)
2037 			hist_field->fn_num = HIST_FIELD_FN_RELDYNSTRING;
2038 		else
2039 			hist_field->fn_num = HIST_FIELD_FN_PSTRING;
2040 	} else {
2041 		hist_field->size = field->size;
2042 		hist_field->is_signed = field->is_signed;
2043 		hist_field->type = kstrdup_const(field->type, GFP_KERNEL);
2044 		if (!hist_field->type)
2045 			goto free;
2046 
2047 		hist_field->fn_num = select_value_fn(field->size,
2048 						     field->is_signed);
2049 		if (hist_field->fn_num == HIST_FIELD_FN_NOP) {
2050 			destroy_hist_field(hist_field, 0);
2051 			return NULL;
2052 		}
2053 	}
2054  out:
2055 	hist_field->field = field;
2056 	hist_field->flags = flags;
2057 
2058 	if (var_name) {
2059 		hist_field->var.name = kstrdup(var_name, GFP_KERNEL);
2060 		if (!hist_field->var.name)
2061 			goto free;
2062 	}
2063 
2064 	return hist_field;
2065  free:
2066 	destroy_hist_field(hist_field, 0);
2067 	return NULL;
2068 }
2069 
destroy_hist_fields(struct hist_trigger_data * hist_data)2070 static void destroy_hist_fields(struct hist_trigger_data *hist_data)
2071 {
2072 	unsigned int i;
2073 
2074 	for (i = 0; i < HIST_FIELDS_MAX; i++) {
2075 		if (hist_data->fields[i]) {
2076 			destroy_hist_field(hist_data->fields[i], 0);
2077 			hist_data->fields[i] = NULL;
2078 		}
2079 	}
2080 
2081 	for (i = 0; i < hist_data->n_var_refs; i++) {
2082 		WARN_ON(!(hist_data->var_refs[i]->flags & HIST_FIELD_FL_VAR_REF));
2083 		__destroy_hist_field(hist_data->var_refs[i]);
2084 		hist_data->var_refs[i] = NULL;
2085 	}
2086 }
2087 
init_var_ref(struct hist_field * ref_field,struct hist_field * var_field,char * system,char * event_name)2088 static int init_var_ref(struct hist_field *ref_field,
2089 			struct hist_field *var_field,
2090 			char *system, char *event_name)
2091 {
2092 	int err = 0;
2093 
2094 	ref_field->var.idx = var_field->var.idx;
2095 	ref_field->var.hist_data = var_field->hist_data;
2096 	ref_field->size = var_field->size;
2097 	ref_field->is_signed = var_field->is_signed;
2098 	ref_field->flags |= var_field->flags &
2099 		(HIST_FIELD_FL_TIMESTAMP | HIST_FIELD_FL_TIMESTAMP_USECS);
2100 
2101 	if (system) {
2102 		ref_field->system = kstrdup(system, GFP_KERNEL);
2103 		if (!ref_field->system)
2104 			return -ENOMEM;
2105 	}
2106 
2107 	if (event_name) {
2108 		ref_field->event_name = kstrdup(event_name, GFP_KERNEL);
2109 		if (!ref_field->event_name) {
2110 			err = -ENOMEM;
2111 			goto free;
2112 		}
2113 	}
2114 
2115 	if (var_field->var.name) {
2116 		ref_field->name = kstrdup(var_field->var.name, GFP_KERNEL);
2117 		if (!ref_field->name) {
2118 			err = -ENOMEM;
2119 			goto free;
2120 		}
2121 	} else if (var_field->name) {
2122 		ref_field->name = kstrdup(var_field->name, GFP_KERNEL);
2123 		if (!ref_field->name) {
2124 			err = -ENOMEM;
2125 			goto free;
2126 		}
2127 	}
2128 
2129 	ref_field->type = kstrdup_const(var_field->type, GFP_KERNEL);
2130 	if (!ref_field->type) {
2131 		err = -ENOMEM;
2132 		goto free;
2133 	}
2134  out:
2135 	return err;
2136  free:
2137 	kfree(ref_field->system);
2138 	ref_field->system = NULL;
2139 	kfree(ref_field->event_name);
2140 	ref_field->event_name = NULL;
2141 	kfree(ref_field->name);
2142 	ref_field->name = NULL;
2143 
2144 	goto out;
2145 }
2146 
find_var_ref_idx(struct hist_trigger_data * hist_data,struct hist_field * var_field)2147 static int find_var_ref_idx(struct hist_trigger_data *hist_data,
2148 			    struct hist_field *var_field)
2149 {
2150 	struct hist_field *ref_field;
2151 	int i;
2152 
2153 	for (i = 0; i < hist_data->n_var_refs; i++) {
2154 		ref_field = hist_data->var_refs[i];
2155 		if (ref_field->var.idx == var_field->var.idx &&
2156 		    ref_field->var.hist_data == var_field->hist_data)
2157 			return i;
2158 	}
2159 
2160 	return -ENOENT;
2161 }
2162 
2163 /**
2164  * create_var_ref - Create a variable reference and attach it to trigger
2165  * @hist_data: The trigger that will be referencing the variable
2166  * @var_field: The VAR field to create a reference to
2167  * @system: The optional system string
2168  * @event_name: The optional event_name string
2169  *
2170  * Given a variable hist_field, create a VAR_REF hist_field that
2171  * represents a reference to it.
2172  *
2173  * This function also adds the reference to the trigger that
2174  * now references the variable.
2175  *
2176  * Return: The VAR_REF field if successful, NULL if not
2177  */
create_var_ref(struct hist_trigger_data * hist_data,struct hist_field * var_field,char * system,char * event_name)2178 static struct hist_field *create_var_ref(struct hist_trigger_data *hist_data,
2179 					 struct hist_field *var_field,
2180 					 char *system, char *event_name)
2181 {
2182 	unsigned long flags = HIST_FIELD_FL_VAR_REF;
2183 	struct hist_field *ref_field;
2184 	int i;
2185 
2186 	/* Check if the variable already exists */
2187 	for (i = 0; i < hist_data->n_var_refs; i++) {
2188 		ref_field = hist_data->var_refs[i];
2189 		if (ref_field->var.idx == var_field->var.idx &&
2190 		    ref_field->var.hist_data == var_field->hist_data) {
2191 			get_hist_field(ref_field);
2192 			return ref_field;
2193 		}
2194 	}
2195 	/* Sanity check to avoid out-of-bound write on 'hist_data->var_refs' */
2196 	if (hist_data->n_var_refs >= TRACING_MAP_VARS_MAX)
2197 		return NULL;
2198 	ref_field = create_hist_field(var_field->hist_data, NULL, flags, NULL);
2199 	if (ref_field) {
2200 		if (init_var_ref(ref_field, var_field, system, event_name)) {
2201 			destroy_hist_field(ref_field, 0);
2202 			return NULL;
2203 		}
2204 
2205 		hist_data->var_refs[hist_data->n_var_refs] = ref_field;
2206 		ref_field->var_ref_idx = hist_data->n_var_refs++;
2207 	}
2208 
2209 	return ref_field;
2210 }
2211 
is_var_ref(char * var_name)2212 static bool is_var_ref(char *var_name)
2213 {
2214 	if (!var_name || strlen(var_name) < 2 || var_name[0] != '$')
2215 		return false;
2216 
2217 	return true;
2218 }
2219 
field_name_from_var(struct hist_trigger_data * hist_data,char * var_name)2220 static char *field_name_from_var(struct hist_trigger_data *hist_data,
2221 				 char *var_name)
2222 {
2223 	char *name, *field;
2224 	unsigned int i;
2225 
2226 	for (i = 0; i < hist_data->attrs->var_defs.n_vars; i++) {
2227 		name = hist_data->attrs->var_defs.name[i];
2228 
2229 		if (strcmp(var_name, name) == 0) {
2230 			field = hist_data->attrs->var_defs.expr[i];
2231 			if (contains_operator(field, NULL) || is_var_ref(field))
2232 				continue;
2233 			return field;
2234 		}
2235 	}
2236 
2237 	return NULL;
2238 }
2239 
local_field_var_ref(struct hist_trigger_data * hist_data,char * system,char * event_name,char * var_name)2240 static char *local_field_var_ref(struct hist_trigger_data *hist_data,
2241 				 char *system, char *event_name,
2242 				 char *var_name)
2243 {
2244 	struct trace_event_call *call;
2245 
2246 	if (system && event_name) {
2247 		call = hist_data->event_file->event_call;
2248 
2249 		if (strcmp(system, call->class->system) != 0)
2250 			return NULL;
2251 
2252 		if (strcmp(event_name, trace_event_name(call)) != 0)
2253 			return NULL;
2254 	}
2255 
2256 	if (!!system != !!event_name)
2257 		return NULL;
2258 
2259 	if (!is_var_ref(var_name))
2260 		return NULL;
2261 
2262 	var_name++;
2263 
2264 	return field_name_from_var(hist_data, var_name);
2265 }
2266 
parse_var_ref(struct hist_trigger_data * hist_data,char * system,char * event_name,char * var_name)2267 static struct hist_field *parse_var_ref(struct hist_trigger_data *hist_data,
2268 					char *system, char *event_name,
2269 					char *var_name)
2270 {
2271 	struct hist_field *var_field = NULL, *ref_field = NULL;
2272 	struct trace_array *tr = hist_data->event_file->tr;
2273 
2274 	if (!is_var_ref(var_name))
2275 		return NULL;
2276 
2277 	var_name++;
2278 
2279 	var_field = find_event_var(hist_data, system, event_name, var_name);
2280 	if (var_field)
2281 		ref_field = create_var_ref(hist_data, var_field,
2282 					   system, event_name);
2283 
2284 	if (!ref_field)
2285 		hist_err(tr, HIST_ERR_VAR_NOT_FOUND, errpos(var_name));
2286 
2287 	return ref_field;
2288 }
2289 
2290 static struct ftrace_event_field *
parse_field(struct hist_trigger_data * hist_data,struct trace_event_file * file,char * field_str,unsigned long * flags,unsigned long * buckets)2291 parse_field(struct hist_trigger_data *hist_data, struct trace_event_file *file,
2292 	    char *field_str, unsigned long *flags, unsigned long *buckets)
2293 {
2294 	struct ftrace_event_field *field = NULL;
2295 	char *field_name, *modifier, *str;
2296 	struct trace_array *tr = file->tr;
2297 
2298 	modifier = str = kstrdup(field_str, GFP_KERNEL);
2299 	if (!modifier)
2300 		return ERR_PTR(-ENOMEM);
2301 
2302 	field_name = strsep(&modifier, ".");
2303 	if (modifier) {
2304 		if (strcmp(modifier, "hex") == 0)
2305 			*flags |= HIST_FIELD_FL_HEX;
2306 		else if (strcmp(modifier, "sym") == 0)
2307 			*flags |= HIST_FIELD_FL_SYM;
2308 		/*
2309 		 * 'sym-offset' occurrences in the trigger string are modified
2310 		 * to 'symXoffset' to simplify arithmetic expression parsing.
2311 		 */
2312 		else if (strcmp(modifier, "symXoffset") == 0)
2313 			*flags |= HIST_FIELD_FL_SYM_OFFSET;
2314 		else if ((strcmp(modifier, "execname") == 0) &&
2315 			 (strcmp(field_name, "common_pid") == 0))
2316 			*flags |= HIST_FIELD_FL_EXECNAME;
2317 		else if (strcmp(modifier, "syscall") == 0)
2318 			*flags |= HIST_FIELD_FL_SYSCALL;
2319 		else if (strcmp(modifier, "stacktrace") == 0)
2320 			*flags |= HIST_FIELD_FL_STACKTRACE;
2321 		else if (strcmp(modifier, "log2") == 0)
2322 			*flags |= HIST_FIELD_FL_LOG2;
2323 		else if (strcmp(modifier, "usecs") == 0)
2324 			*flags |= HIST_FIELD_FL_TIMESTAMP_USECS;
2325 		else if (strncmp(modifier, "bucket", 6) == 0) {
2326 			int ret;
2327 
2328 			modifier += 6;
2329 
2330 			if (*modifier == 's')
2331 				modifier++;
2332 			if (*modifier != '=')
2333 				goto error;
2334 			modifier++;
2335 			ret = kstrtoul(modifier, 0, buckets);
2336 			if (ret || !(*buckets))
2337 				goto error;
2338 			*flags |= HIST_FIELD_FL_BUCKET;
2339 		} else if (strncmp(modifier, "percent", 7) == 0) {
2340 			if (*flags & (HIST_FIELD_FL_VAR | HIST_FIELD_FL_KEY))
2341 				goto error;
2342 			*flags |= HIST_FIELD_FL_PERCENT;
2343 		} else if (strncmp(modifier, "graph", 5) == 0) {
2344 			if (*flags & (HIST_FIELD_FL_VAR | HIST_FIELD_FL_KEY))
2345 				goto error;
2346 			*flags |= HIST_FIELD_FL_GRAPH;
2347 		} else {
2348  error:
2349 			hist_err(tr, HIST_ERR_BAD_FIELD_MODIFIER, errpos(modifier));
2350 			field = ERR_PTR(-EINVAL);
2351 			goto out;
2352 		}
2353 	}
2354 
2355 	if (strcmp(field_name, "common_timestamp") == 0) {
2356 		*flags |= HIST_FIELD_FL_TIMESTAMP;
2357 		hist_data->enable_timestamps = true;
2358 		if (*flags & HIST_FIELD_FL_TIMESTAMP_USECS)
2359 			hist_data->attrs->ts_in_usecs = true;
2360 	} else if (strcmp(field_name, "common_stacktrace") == 0) {
2361 		*flags |= HIST_FIELD_FL_STACKTRACE;
2362 	} else if (strcmp(field_name, "common_cpu") == 0)
2363 		*flags |= HIST_FIELD_FL_CPU;
2364 	else if (strcmp(field_name, "hitcount") == 0)
2365 		*flags |= HIST_FIELD_FL_HITCOUNT;
2366 	else {
2367 		field = trace_find_event_field(file->event_call, field_name);
2368 		if (!field || !field->size) {
2369 			/*
2370 			 * For backward compatibility, if field_name
2371 			 * was "cpu" or "stacktrace", then we treat this
2372 			 * the same as common_cpu and common_stacktrace
2373 			 * respectively. This also works for "CPU", and
2374 			 * "STACKTRACE".
2375 			 */
2376 			if (field && field->filter_type == FILTER_CPU) {
2377 				*flags |= HIST_FIELD_FL_CPU;
2378 			} else if (field && field->filter_type == FILTER_STACKTRACE) {
2379 				*flags |= HIST_FIELD_FL_STACKTRACE;
2380 			} else {
2381 				hist_err(tr, HIST_ERR_FIELD_NOT_FOUND,
2382 					 errpos(field_name));
2383 				field = ERR_PTR(-EINVAL);
2384 				goto out;
2385 			}
2386 		}
2387 	}
2388  out:
2389 	kfree(str);
2390 
2391 	return field;
2392 }
2393 
create_alias(struct hist_trigger_data * hist_data,struct hist_field * var_ref,char * var_name)2394 static struct hist_field *create_alias(struct hist_trigger_data *hist_data,
2395 				       struct hist_field *var_ref,
2396 				       char *var_name)
2397 {
2398 	struct hist_field *alias = NULL;
2399 	unsigned long flags = HIST_FIELD_FL_ALIAS | HIST_FIELD_FL_VAR;
2400 
2401 	alias = create_hist_field(hist_data, NULL, flags, var_name);
2402 	if (!alias)
2403 		return NULL;
2404 
2405 	alias->fn_num = var_ref->fn_num;
2406 	alias->operands[0] = var_ref;
2407 
2408 	if (init_var_ref(alias, var_ref, var_ref->system, var_ref->event_name)) {
2409 		destroy_hist_field(alias, 0);
2410 		return NULL;
2411 	}
2412 
2413 	alias->var_ref_idx = var_ref->var_ref_idx;
2414 
2415 	return alias;
2416 }
2417 
parse_const(struct hist_trigger_data * hist_data,char * str,char * var_name,unsigned long * flags)2418 static struct hist_field *parse_const(struct hist_trigger_data *hist_data,
2419 				      char *str, char *var_name,
2420 				      unsigned long *flags)
2421 {
2422 	struct trace_array *tr = hist_data->event_file->tr;
2423 	struct hist_field *field = NULL;
2424 	u64 constant;
2425 
2426 	if (kstrtoull(str, 0, &constant)) {
2427 		hist_err(tr, HIST_ERR_EXPECT_NUMBER, errpos(str));
2428 		return NULL;
2429 	}
2430 
2431 	*flags |= HIST_FIELD_FL_CONST;
2432 	field = create_hist_field(hist_data, NULL, *flags, var_name);
2433 	if (!field)
2434 		return NULL;
2435 
2436 	field->constant = constant;
2437 
2438 	return field;
2439 }
2440 
parse_atom(struct hist_trigger_data * hist_data,struct trace_event_file * file,char * str,unsigned long * flags,char * var_name)2441 static struct hist_field *parse_atom(struct hist_trigger_data *hist_data,
2442 				     struct trace_event_file *file, char *str,
2443 				     unsigned long *flags, char *var_name)
2444 {
2445 	char *s, *ref_system = NULL, *ref_event = NULL, *ref_var = str;
2446 	struct ftrace_event_field *field = NULL;
2447 	struct hist_field *hist_field = NULL;
2448 	unsigned long buckets = 0;
2449 	int ret = 0;
2450 
2451 	if (isdigit(str[0])) {
2452 		hist_field = parse_const(hist_data, str, var_name, flags);
2453 		if (!hist_field) {
2454 			ret = -EINVAL;
2455 			goto out;
2456 		}
2457 		return hist_field;
2458 	}
2459 
2460 	s = strchr(str, '.');
2461 	if (s) {
2462 		s = strchr(++s, '.');
2463 		if (s) {
2464 			ref_system = strsep(&str, ".");
2465 			if (!str) {
2466 				ret = -EINVAL;
2467 				goto out;
2468 			}
2469 			ref_event = strsep(&str, ".");
2470 			if (!str) {
2471 				ret = -EINVAL;
2472 				goto out;
2473 			}
2474 			ref_var = str;
2475 		}
2476 	}
2477 
2478 	s = local_field_var_ref(hist_data, ref_system, ref_event, ref_var);
2479 	if (!s) {
2480 		hist_field = parse_var_ref(hist_data, ref_system,
2481 					   ref_event, ref_var);
2482 		if (hist_field) {
2483 			if (var_name) {
2484 				hist_field = create_alias(hist_data, hist_field, var_name);
2485 				if (!hist_field) {
2486 					ret = -ENOMEM;
2487 					goto out;
2488 				}
2489 			}
2490 			return hist_field;
2491 		}
2492 	} else
2493 		str = s;
2494 
2495 	field = parse_field(hist_data, file, str, flags, &buckets);
2496 	if (IS_ERR(field)) {
2497 		ret = PTR_ERR(field);
2498 		goto out;
2499 	}
2500 
2501 	hist_field = create_hist_field(hist_data, field, *flags, var_name);
2502 	if (!hist_field) {
2503 		ret = -ENOMEM;
2504 		goto out;
2505 	}
2506 	hist_field->buckets = buckets;
2507 
2508 	return hist_field;
2509  out:
2510 	return ERR_PTR(ret);
2511 }
2512 
2513 static struct hist_field *parse_expr(struct hist_trigger_data *hist_data,
2514 				     struct trace_event_file *file,
2515 				     char *str, unsigned long flags,
2516 				     char *var_name, unsigned int *n_subexprs);
2517 
parse_unary(struct hist_trigger_data * hist_data,struct trace_event_file * file,char * str,unsigned long flags,char * var_name,unsigned int * n_subexprs)2518 static struct hist_field *parse_unary(struct hist_trigger_data *hist_data,
2519 				      struct trace_event_file *file,
2520 				      char *str, unsigned long flags,
2521 				      char *var_name, unsigned int *n_subexprs)
2522 {
2523 	struct hist_field *operand1, *expr = NULL;
2524 	unsigned long operand_flags;
2525 	int ret = 0;
2526 	char *s;
2527 
2528 	/* Unary minus operator, increment n_subexprs */
2529 	++*n_subexprs;
2530 
2531 	/* we support only -(xxx) i.e. explicit parens required */
2532 
2533 	if (*n_subexprs > 3) {
2534 		hist_err(file->tr, HIST_ERR_TOO_MANY_SUBEXPR, errpos(str));
2535 		ret = -EINVAL;
2536 		goto free;
2537 	}
2538 
2539 	str++; /* skip leading '-' */
2540 
2541 	s = strchr(str, '(');
2542 	if (s)
2543 		str++;
2544 	else {
2545 		ret = -EINVAL;
2546 		goto free;
2547 	}
2548 
2549 	s = strrchr(str, ')');
2550 	if (s) {
2551 		 /* unary minus not supported in sub-expressions */
2552 		if (*(s+1) != '\0') {
2553 			hist_err(file->tr, HIST_ERR_UNARY_MINUS_SUBEXPR,
2554 				 errpos(str));
2555 			ret = -EINVAL;
2556 			goto free;
2557 		}
2558 		*s = '\0';
2559 	}
2560 	else {
2561 		ret = -EINVAL; /* no closing ')' */
2562 		goto free;
2563 	}
2564 
2565 	flags |= HIST_FIELD_FL_EXPR;
2566 	expr = create_hist_field(hist_data, NULL, flags, var_name);
2567 	if (!expr) {
2568 		ret = -ENOMEM;
2569 		goto free;
2570 	}
2571 
2572 	operand_flags = 0;
2573 	operand1 = parse_expr(hist_data, file, str, operand_flags, NULL, n_subexprs);
2574 	if (IS_ERR(operand1)) {
2575 		ret = PTR_ERR(operand1);
2576 		goto free;
2577 	}
2578 	if (operand1->flags & HIST_FIELD_FL_STRING) {
2579 		/* String type can not be the operand of unary operator. */
2580 		hist_err(file->tr, HIST_ERR_INVALID_STR_OPERAND, errpos(str));
2581 		destroy_hist_field(operand1, 0);
2582 		ret = -EINVAL;
2583 		goto free;
2584 	}
2585 
2586 	expr->flags |= operand1->flags &
2587 		(HIST_FIELD_FL_TIMESTAMP | HIST_FIELD_FL_TIMESTAMP_USECS);
2588 	expr->fn_num = HIST_FIELD_FN_UMINUS;
2589 	expr->operands[0] = operand1;
2590 	expr->size = operand1->size;
2591 	expr->is_signed = operand1->is_signed;
2592 	expr->operator = FIELD_OP_UNARY_MINUS;
2593 	expr->name = expr_str(expr, 0);
2594 	expr->type = kstrdup_const(operand1->type, GFP_KERNEL);
2595 	if (!expr->type) {
2596 		ret = -ENOMEM;
2597 		goto free;
2598 	}
2599 
2600 	return expr;
2601  free:
2602 	destroy_hist_field(expr, 0);
2603 	return ERR_PTR(ret);
2604 }
2605 
2606 /*
2607  * If the operands are var refs, return pointers the
2608  * variable(s) referenced in var1 and var2, else NULL.
2609  */
check_expr_operands(struct trace_array * tr,struct hist_field * operand1,struct hist_field * operand2,struct hist_field ** var1,struct hist_field ** var2)2610 static int check_expr_operands(struct trace_array *tr,
2611 			       struct hist_field *operand1,
2612 			       struct hist_field *operand2,
2613 			       struct hist_field **var1,
2614 			       struct hist_field **var2)
2615 {
2616 	unsigned long operand1_flags = operand1->flags;
2617 	unsigned long operand2_flags = operand2->flags;
2618 
2619 	if ((operand1_flags & HIST_FIELD_FL_VAR_REF) ||
2620 	    (operand1_flags & HIST_FIELD_FL_ALIAS)) {
2621 		struct hist_field *var;
2622 
2623 		var = find_var_field(operand1->var.hist_data, operand1->name);
2624 		if (!var)
2625 			return -EINVAL;
2626 		operand1_flags = var->flags;
2627 		*var1 = var;
2628 	}
2629 
2630 	if ((operand2_flags & HIST_FIELD_FL_VAR_REF) ||
2631 	    (operand2_flags & HIST_FIELD_FL_ALIAS)) {
2632 		struct hist_field *var;
2633 
2634 		var = find_var_field(operand2->var.hist_data, operand2->name);
2635 		if (!var)
2636 			return -EINVAL;
2637 		operand2_flags = var->flags;
2638 		*var2 = var;
2639 	}
2640 
2641 	if ((operand1_flags & HIST_FIELD_FL_TIMESTAMP_USECS) !=
2642 	    (operand2_flags & HIST_FIELD_FL_TIMESTAMP_USECS)) {
2643 		hist_err(tr, HIST_ERR_TIMESTAMP_MISMATCH, 0);
2644 		return -EINVAL;
2645 	}
2646 
2647 	return 0;
2648 }
2649 
parse_expr(struct hist_trigger_data * hist_data,struct trace_event_file * file,char * str,unsigned long flags,char * var_name,unsigned int * n_subexprs)2650 static struct hist_field *parse_expr(struct hist_trigger_data *hist_data,
2651 				     struct trace_event_file *file,
2652 				     char *str, unsigned long flags,
2653 				     char *var_name, unsigned int *n_subexprs)
2654 {
2655 	struct hist_field *operand1 = NULL, *operand2 = NULL, *expr = NULL;
2656 	struct hist_field *var1 = NULL, *var2 = NULL;
2657 	unsigned long operand_flags, operand2_flags;
2658 	int field_op, ret = -EINVAL;
2659 	char *sep, *operand1_str;
2660 	enum hist_field_fn op_fn;
2661 	bool combine_consts;
2662 
2663 	if (*n_subexprs > 3) {
2664 		hist_err(file->tr, HIST_ERR_TOO_MANY_SUBEXPR, errpos(str));
2665 		return ERR_PTR(-EINVAL);
2666 	}
2667 
2668 	field_op = contains_operator(str, &sep);
2669 
2670 	if (field_op == FIELD_OP_NONE)
2671 		return parse_atom(hist_data, file, str, &flags, var_name);
2672 
2673 	if (field_op == FIELD_OP_UNARY_MINUS)
2674 		return parse_unary(hist_data, file, str, flags, var_name, n_subexprs);
2675 
2676 	/* Binary operator found, increment n_subexprs */
2677 	++*n_subexprs;
2678 
2679 	/* Split the expression string at the root operator */
2680 	if (!sep)
2681 		return ERR_PTR(-EINVAL);
2682 
2683 	*sep = '\0';
2684 	operand1_str = str;
2685 	str = sep+1;
2686 
2687 	/* Binary operator requires both operands */
2688 	if (*operand1_str == '\0' || *str == '\0')
2689 		return ERR_PTR(-EINVAL);
2690 
2691 	operand_flags = 0;
2692 
2693 	/* LHS of string is an expression e.g. a+b in a+b+c */
2694 	operand1 = parse_expr(hist_data, file, operand1_str, operand_flags, NULL, n_subexprs);
2695 	if (IS_ERR(operand1))
2696 		return ERR_CAST(operand1);
2697 
2698 	if (operand1->flags & HIST_FIELD_FL_STRING) {
2699 		hist_err(file->tr, HIST_ERR_INVALID_STR_OPERAND, errpos(operand1_str));
2700 		ret = -EINVAL;
2701 		goto free_op1;
2702 	}
2703 
2704 	/* RHS of string is another expression e.g. c in a+b+c */
2705 	operand_flags = 0;
2706 	operand2 = parse_expr(hist_data, file, str, operand_flags, NULL, n_subexprs);
2707 	if (IS_ERR(operand2)) {
2708 		ret = PTR_ERR(operand2);
2709 		goto free_op1;
2710 	}
2711 	if (operand2->flags & HIST_FIELD_FL_STRING) {
2712 		hist_err(file->tr, HIST_ERR_INVALID_STR_OPERAND, errpos(str));
2713 		ret = -EINVAL;
2714 		goto free_operands;
2715 	}
2716 
2717 	switch (field_op) {
2718 	case FIELD_OP_MINUS:
2719 		op_fn = HIST_FIELD_FN_MINUS;
2720 		break;
2721 	case FIELD_OP_PLUS:
2722 		op_fn = HIST_FIELD_FN_PLUS;
2723 		break;
2724 	case FIELD_OP_DIV:
2725 		op_fn = HIST_FIELD_FN_DIV;
2726 		break;
2727 	case FIELD_OP_MULT:
2728 		op_fn = HIST_FIELD_FN_MULT;
2729 		break;
2730 	default:
2731 		ret = -EINVAL;
2732 		goto free_operands;
2733 	}
2734 
2735 	ret = check_expr_operands(file->tr, operand1, operand2, &var1, &var2);
2736 	if (ret)
2737 		goto free_operands;
2738 
2739 	operand_flags = var1 ? var1->flags : operand1->flags;
2740 	operand2_flags = var2 ? var2->flags : operand2->flags;
2741 
2742 	/*
2743 	 * If both operands are constant, the expression can be
2744 	 * collapsed to a single constant.
2745 	 */
2746 	combine_consts = operand_flags & operand2_flags & HIST_FIELD_FL_CONST;
2747 
2748 	flags |= combine_consts ? HIST_FIELD_FL_CONST : HIST_FIELD_FL_EXPR;
2749 
2750 	flags |= operand1->flags &
2751 		(HIST_FIELD_FL_TIMESTAMP | HIST_FIELD_FL_TIMESTAMP_USECS);
2752 
2753 	expr = create_hist_field(hist_data, NULL, flags, var_name);
2754 	if (!expr) {
2755 		ret = -ENOMEM;
2756 		goto free_operands;
2757 	}
2758 
2759 	operand1->read_once = true;
2760 	operand2->read_once = true;
2761 
2762 	/* The operands are now owned and free'd by 'expr' */
2763 	expr->operands[0] = operand1;
2764 	expr->operands[1] = operand2;
2765 
2766 	if (field_op == FIELD_OP_DIV &&
2767 			operand2_flags & HIST_FIELD_FL_CONST) {
2768 		u64 divisor = var2 ? var2->constant : operand2->constant;
2769 
2770 		if (!divisor) {
2771 			hist_err(file->tr, HIST_ERR_DIVISION_BY_ZERO, errpos(str));
2772 			ret = -EDOM;
2773 			goto free_expr;
2774 		}
2775 
2776 		/*
2777 		 * Copy the divisor here so we don't have to look it up
2778 		 * later if this is a var ref
2779 		 */
2780 		operand2->constant = divisor;
2781 		op_fn = hist_field_get_div_fn(operand2);
2782 	}
2783 
2784 	expr->fn_num = op_fn;
2785 
2786 	if (combine_consts) {
2787 		if (var1)
2788 			expr->operands[0] = var1;
2789 		if (var2)
2790 			expr->operands[1] = var2;
2791 
2792 		expr->constant = hist_fn_call(expr, NULL, NULL, NULL, NULL);
2793 		expr->fn_num = HIST_FIELD_FN_CONST;
2794 
2795 		expr->operands[0] = NULL;
2796 		expr->operands[1] = NULL;
2797 
2798 		/*
2799 		 * var refs won't be destroyed immediately
2800 		 * See: destroy_hist_field()
2801 		 */
2802 		destroy_hist_field(operand2, 0);
2803 		destroy_hist_field(operand1, 0);
2804 
2805 		expr->name = expr_str(expr, 0);
2806 	} else {
2807 		/* The operand sizes should be the same, so just pick one */
2808 		expr->size = operand1->size;
2809 		expr->is_signed = operand1->is_signed;
2810 
2811 		expr->operator = field_op;
2812 		expr->type = kstrdup_const(operand1->type, GFP_KERNEL);
2813 		if (!expr->type) {
2814 			ret = -ENOMEM;
2815 			goto free_expr;
2816 		}
2817 
2818 		expr->name = expr_str(expr, 0);
2819 	}
2820 
2821 	return expr;
2822 
2823 free_operands:
2824 	destroy_hist_field(operand2, 0);
2825 free_op1:
2826 	destroy_hist_field(operand1, 0);
2827 	return ERR_PTR(ret);
2828 
2829 free_expr:
2830 	destroy_hist_field(expr, 0);
2831 	return ERR_PTR(ret);
2832 }
2833 
find_trigger_filter(struct hist_trigger_data * hist_data,struct trace_event_file * file)2834 static char *find_trigger_filter(struct hist_trigger_data *hist_data,
2835 				 struct trace_event_file *file)
2836 {
2837 	struct event_trigger_data *test;
2838 
2839 	lockdep_assert_held(&event_mutex);
2840 
2841 	list_for_each_entry(test, &file->triggers, list) {
2842 		if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
2843 			if (test->private_data == hist_data)
2844 				return test->filter_str;
2845 		}
2846 	}
2847 
2848 	return NULL;
2849 }
2850 
2851 static struct event_command trigger_hist_cmd;
2852 static int event_hist_trigger_parse(struct event_command *cmd_ops,
2853 				    struct trace_event_file *file,
2854 				    char *glob, char *cmd,
2855 				    char *param_and_filter);
2856 
compatible_keys(struct hist_trigger_data * target_hist_data,struct hist_trigger_data * hist_data,unsigned int n_keys)2857 static bool compatible_keys(struct hist_trigger_data *target_hist_data,
2858 			    struct hist_trigger_data *hist_data,
2859 			    unsigned int n_keys)
2860 {
2861 	struct hist_field *target_hist_field, *hist_field;
2862 	unsigned int n, i, j;
2863 
2864 	if (hist_data->n_fields - hist_data->n_vals != n_keys)
2865 		return false;
2866 
2867 	i = hist_data->n_vals;
2868 	j = target_hist_data->n_vals;
2869 
2870 	for (n = 0; n < n_keys; n++) {
2871 		hist_field = hist_data->fields[i + n];
2872 		target_hist_field = target_hist_data->fields[j + n];
2873 
2874 		if (strcmp(hist_field->type, target_hist_field->type) != 0)
2875 			return false;
2876 		if (hist_field->size != target_hist_field->size)
2877 			return false;
2878 		if (hist_field->is_signed != target_hist_field->is_signed)
2879 			return false;
2880 	}
2881 
2882 	return true;
2883 }
2884 
2885 static struct hist_trigger_data *
find_compatible_hist(struct hist_trigger_data * target_hist_data,struct trace_event_file * file)2886 find_compatible_hist(struct hist_trigger_data *target_hist_data,
2887 		     struct trace_event_file *file)
2888 {
2889 	struct hist_trigger_data *hist_data;
2890 	struct event_trigger_data *test;
2891 	unsigned int n_keys;
2892 
2893 	lockdep_assert_held(&event_mutex);
2894 
2895 	n_keys = target_hist_data->n_fields - target_hist_data->n_vals;
2896 
2897 	list_for_each_entry(test, &file->triggers, list) {
2898 		if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
2899 			hist_data = test->private_data;
2900 
2901 			if (compatible_keys(target_hist_data, hist_data, n_keys))
2902 				return hist_data;
2903 		}
2904 	}
2905 
2906 	return NULL;
2907 }
2908 
event_file(struct trace_array * tr,char * system,char * event_name)2909 static struct trace_event_file *event_file(struct trace_array *tr,
2910 					   char *system, char *event_name)
2911 {
2912 	struct trace_event_file *file;
2913 
2914 	file = __find_event_file(tr, system, event_name);
2915 	if (!file)
2916 		return ERR_PTR(-EINVAL);
2917 
2918 	return file;
2919 }
2920 
2921 static struct hist_field *
find_synthetic_field_var(struct hist_trigger_data * target_hist_data,char * system,char * event_name,char * field_name)2922 find_synthetic_field_var(struct hist_trigger_data *target_hist_data,
2923 			 char *system, char *event_name, char *field_name)
2924 {
2925 	struct hist_field *event_var;
2926 	char *synthetic_name;
2927 
2928 	synthetic_name = kzalloc(MAX_FILTER_STR_VAL, GFP_KERNEL);
2929 	if (!synthetic_name)
2930 		return ERR_PTR(-ENOMEM);
2931 
2932 	strcpy(synthetic_name, "synthetic_");
2933 	strcat(synthetic_name, field_name);
2934 
2935 	event_var = find_event_var(target_hist_data, system, event_name, synthetic_name);
2936 
2937 	kfree(synthetic_name);
2938 
2939 	return event_var;
2940 }
2941 
2942 /**
2943  * create_field_var_hist - Automatically create a histogram and var for a field
2944  * @target_hist_data: The target hist trigger
2945  * @subsys_name: Optional subsystem name
2946  * @event_name: Optional event name
2947  * @field_name: The name of the field (and the resulting variable)
2948  *
2949  * Hist trigger actions fetch data from variables, not directly from
2950  * events.  However, for convenience, users are allowed to directly
2951  * specify an event field in an action, which will be automatically
2952  * converted into a variable on their behalf.
2953  *
2954  * If a user specifies a field on an event that isn't the event the
2955  * histogram currently being defined (the target event histogram), the
2956  * only way that can be accomplished is if a new hist trigger is
2957  * created and the field variable defined on that.
2958  *
2959  * This function creates a new histogram compatible with the target
2960  * event (meaning a histogram with the same key as the target
2961  * histogram), and creates a variable for the specified field, but
2962  * with 'synthetic_' prepended to the variable name in order to avoid
2963  * collision with normal field variables.
2964  *
2965  * Return: The variable created for the field.
2966  */
2967 static struct hist_field *
create_field_var_hist(struct hist_trigger_data * target_hist_data,char * subsys_name,char * event_name,char * field_name)2968 create_field_var_hist(struct hist_trigger_data *target_hist_data,
2969 		      char *subsys_name, char *event_name, char *field_name)
2970 {
2971 	struct trace_array *tr = target_hist_data->event_file->tr;
2972 	struct hist_trigger_data *hist_data;
2973 	unsigned int i, n, first = true;
2974 	struct field_var_hist *var_hist;
2975 	struct trace_event_file *file;
2976 	struct hist_field *key_field;
2977 	struct hist_field *event_var;
2978 	char *saved_filter;
2979 	char *cmd;
2980 	int ret;
2981 
2982 	if (target_hist_data->n_field_var_hists >= SYNTH_FIELDS_MAX) {
2983 		hist_err(tr, HIST_ERR_TOO_MANY_FIELD_VARS, errpos(field_name));
2984 		return ERR_PTR(-EINVAL);
2985 	}
2986 
2987 	file = event_file(tr, subsys_name, event_name);
2988 
2989 	if (IS_ERR(file)) {
2990 		hist_err(tr, HIST_ERR_EVENT_FILE_NOT_FOUND, errpos(field_name));
2991 		ret = PTR_ERR(file);
2992 		return ERR_PTR(ret);
2993 	}
2994 
2995 	/*
2996 	 * Look for a histogram compatible with target.  We'll use the
2997 	 * found histogram specification to create a new matching
2998 	 * histogram with our variable on it.  target_hist_data is not
2999 	 * yet a registered histogram so we can't use that.
3000 	 */
3001 	hist_data = find_compatible_hist(target_hist_data, file);
3002 	if (!hist_data) {
3003 		hist_err(tr, HIST_ERR_HIST_NOT_FOUND, errpos(field_name));
3004 		return ERR_PTR(-EINVAL);
3005 	}
3006 
3007 	/* See if a synthetic field variable has already been created */
3008 	event_var = find_synthetic_field_var(target_hist_data, subsys_name,
3009 					     event_name, field_name);
3010 	if (!IS_ERR_OR_NULL(event_var))
3011 		return event_var;
3012 
3013 	var_hist = kzalloc(sizeof(*var_hist), GFP_KERNEL);
3014 	if (!var_hist)
3015 		return ERR_PTR(-ENOMEM);
3016 
3017 	cmd = kzalloc(MAX_FILTER_STR_VAL, GFP_KERNEL);
3018 	if (!cmd) {
3019 		kfree(var_hist);
3020 		return ERR_PTR(-ENOMEM);
3021 	}
3022 
3023 	/* Use the same keys as the compatible histogram */
3024 	strcat(cmd, "keys=");
3025 
3026 	for_each_hist_key_field(i, hist_data) {
3027 		key_field = hist_data->fields[i];
3028 		if (!first)
3029 			strcat(cmd, ",");
3030 		strcat(cmd, key_field->field->name);
3031 		first = false;
3032 	}
3033 
3034 	/* Create the synthetic field variable specification */
3035 	strcat(cmd, ":synthetic_");
3036 	strcat(cmd, field_name);
3037 	strcat(cmd, "=");
3038 	strcat(cmd, field_name);
3039 
3040 	/* Use the same filter as the compatible histogram */
3041 	saved_filter = find_trigger_filter(hist_data, file);
3042 	if (saved_filter) {
3043 		strcat(cmd, " if ");
3044 		strcat(cmd, saved_filter);
3045 	}
3046 
3047 	var_hist->cmd = kstrdup(cmd, GFP_KERNEL);
3048 	if (!var_hist->cmd) {
3049 		kfree(cmd);
3050 		kfree(var_hist);
3051 		return ERR_PTR(-ENOMEM);
3052 	}
3053 
3054 	/* Save the compatible histogram information */
3055 	var_hist->hist_data = hist_data;
3056 
3057 	/* Create the new histogram with our variable */
3058 	ret = event_hist_trigger_parse(&trigger_hist_cmd, file,
3059 				       "", "hist", cmd);
3060 	if (ret) {
3061 		kfree(cmd);
3062 		kfree(var_hist->cmd);
3063 		kfree(var_hist);
3064 		hist_err(tr, HIST_ERR_HIST_CREATE_FAIL, errpos(field_name));
3065 		return ERR_PTR(ret);
3066 	}
3067 
3068 	kfree(cmd);
3069 
3070 	/* If we can't find the variable, something went wrong */
3071 	event_var = find_synthetic_field_var(target_hist_data, subsys_name,
3072 					     event_name, field_name);
3073 	if (IS_ERR_OR_NULL(event_var)) {
3074 		kfree(var_hist->cmd);
3075 		kfree(var_hist);
3076 		hist_err(tr, HIST_ERR_SYNTH_VAR_NOT_FOUND, errpos(field_name));
3077 		return ERR_PTR(-EINVAL);
3078 	}
3079 
3080 	n = target_hist_data->n_field_var_hists;
3081 	target_hist_data->field_var_hists[n] = var_hist;
3082 	target_hist_data->n_field_var_hists++;
3083 
3084 	return event_var;
3085 }
3086 
3087 static struct hist_field *
find_target_event_var(struct hist_trigger_data * hist_data,char * subsys_name,char * event_name,char * var_name)3088 find_target_event_var(struct hist_trigger_data *hist_data,
3089 		      char *subsys_name, char *event_name, char *var_name)
3090 {
3091 	struct trace_event_file *file = hist_data->event_file;
3092 	struct hist_field *hist_field = NULL;
3093 
3094 	if (subsys_name) {
3095 		struct trace_event_call *call;
3096 
3097 		if (!event_name)
3098 			return NULL;
3099 
3100 		call = file->event_call;
3101 
3102 		if (strcmp(subsys_name, call->class->system) != 0)
3103 			return NULL;
3104 
3105 		if (strcmp(event_name, trace_event_name(call)) != 0)
3106 			return NULL;
3107 	}
3108 
3109 	hist_field = find_var_field(hist_data, var_name);
3110 
3111 	return hist_field;
3112 }
3113 
__update_field_vars(struct tracing_map_elt * elt,struct trace_buffer * buffer,struct ring_buffer_event * rbe,void * rec,struct field_var ** field_vars,unsigned int n_field_vars,unsigned int field_var_str_start)3114 static inline void __update_field_vars(struct tracing_map_elt *elt,
3115 				       struct trace_buffer *buffer,
3116 				       struct ring_buffer_event *rbe,
3117 				       void *rec,
3118 				       struct field_var **field_vars,
3119 				       unsigned int n_field_vars,
3120 				       unsigned int field_var_str_start)
3121 {
3122 	struct hist_elt_data *elt_data = elt->private_data;
3123 	unsigned int i, j, var_idx;
3124 	u64 var_val;
3125 
3126 	/* Make sure stacktrace can fit in the string variable length */
3127 	BUILD_BUG_ON((HIST_STACKTRACE_DEPTH + 1) * sizeof(long) >= STR_VAR_LEN_MAX);
3128 
3129 	for (i = 0, j = field_var_str_start; i < n_field_vars; i++) {
3130 		struct field_var *field_var = field_vars[i];
3131 		struct hist_field *var = field_var->var;
3132 		struct hist_field *val = field_var->val;
3133 
3134 		var_val = hist_fn_call(val, elt, buffer, rbe, rec);
3135 		var_idx = var->var.idx;
3136 
3137 		if (val->flags & (HIST_FIELD_FL_STRING |
3138 				  HIST_FIELD_FL_STACKTRACE)) {
3139 			char *str = elt_data->field_var_str[j++];
3140 			char *val_str = (char *)(uintptr_t)var_val;
3141 			unsigned int size;
3142 
3143 			if (val->flags & HIST_FIELD_FL_STRING) {
3144 				size = min(val->size, STR_VAR_LEN_MAX);
3145 				strscpy(str, val_str, size);
3146 			} else {
3147 				char *stack_start = str + sizeof(unsigned long);
3148 				int e;
3149 
3150 				e = stack_trace_save((void *)stack_start,
3151 						     HIST_STACKTRACE_DEPTH,
3152 						     HIST_STACKTRACE_SKIP);
3153 				if (e < HIST_STACKTRACE_DEPTH - 1)
3154 					((unsigned long *)stack_start)[e] = 0;
3155 				*((unsigned long *)str) = e;
3156 			}
3157 			var_val = (u64)(uintptr_t)str;
3158 		}
3159 		tracing_map_set_var(elt, var_idx, var_val);
3160 	}
3161 }
3162 
update_field_vars(struct hist_trigger_data * hist_data,struct tracing_map_elt * elt,struct trace_buffer * buffer,struct ring_buffer_event * rbe,void * rec)3163 static void update_field_vars(struct hist_trigger_data *hist_data,
3164 			      struct tracing_map_elt *elt,
3165 			      struct trace_buffer *buffer,
3166 			      struct ring_buffer_event *rbe,
3167 			      void *rec)
3168 {
3169 	__update_field_vars(elt, buffer, rbe, rec, hist_data->field_vars,
3170 			    hist_data->n_field_vars, 0);
3171 }
3172 
save_track_data_vars(struct hist_trigger_data * hist_data,struct tracing_map_elt * elt,struct trace_buffer * buffer,void * rec,struct ring_buffer_event * rbe,void * key,struct action_data * data,u64 * var_ref_vals)3173 static void save_track_data_vars(struct hist_trigger_data *hist_data,
3174 				 struct tracing_map_elt *elt,
3175 				 struct trace_buffer *buffer,  void *rec,
3176 				 struct ring_buffer_event *rbe, void *key,
3177 				 struct action_data *data, u64 *var_ref_vals)
3178 {
3179 	__update_field_vars(elt, buffer, rbe, rec, hist_data->save_vars,
3180 			    hist_data->n_save_vars, hist_data->n_field_var_str);
3181 }
3182 
create_var(struct hist_trigger_data * hist_data,struct trace_event_file * file,char * name,int size,const char * type)3183 static struct hist_field *create_var(struct hist_trigger_data *hist_data,
3184 				     struct trace_event_file *file,
3185 				     char *name, int size, const char *type)
3186 {
3187 	struct hist_field *var;
3188 	int idx;
3189 
3190 	if (find_var(hist_data, file, name) && !hist_data->remove) {
3191 		var = ERR_PTR(-EINVAL);
3192 		goto out;
3193 	}
3194 
3195 	var = kzalloc(sizeof(struct hist_field), GFP_KERNEL);
3196 	if (!var) {
3197 		var = ERR_PTR(-ENOMEM);
3198 		goto out;
3199 	}
3200 
3201 	idx = tracing_map_add_var(hist_data->map);
3202 	if (idx < 0) {
3203 		kfree(var);
3204 		var = ERR_PTR(-EINVAL);
3205 		goto out;
3206 	}
3207 
3208 	var->ref = 1;
3209 	var->flags = HIST_FIELD_FL_VAR;
3210 	var->var.idx = idx;
3211 	var->var.hist_data = var->hist_data = hist_data;
3212 	var->size = size;
3213 	var->var.name = kstrdup(name, GFP_KERNEL);
3214 	var->type = kstrdup_const(type, GFP_KERNEL);
3215 	if (!var->var.name || !var->type) {
3216 		kfree_const(var->type);
3217 		kfree(var->var.name);
3218 		kfree(var);
3219 		var = ERR_PTR(-ENOMEM);
3220 	}
3221  out:
3222 	return var;
3223 }
3224 
create_field_var(struct hist_trigger_data * hist_data,struct trace_event_file * file,char * field_name)3225 static struct field_var *create_field_var(struct hist_trigger_data *hist_data,
3226 					  struct trace_event_file *file,
3227 					  char *field_name)
3228 {
3229 	struct hist_field *val = NULL, *var = NULL;
3230 	unsigned long flags = HIST_FIELD_FL_VAR;
3231 	struct trace_array *tr = file->tr;
3232 	struct field_var *field_var;
3233 	int ret = 0;
3234 
3235 	if (hist_data->n_field_vars >= SYNTH_FIELDS_MAX) {
3236 		hist_err(tr, HIST_ERR_TOO_MANY_FIELD_VARS, errpos(field_name));
3237 		ret = -EINVAL;
3238 		goto err;
3239 	}
3240 
3241 	val = parse_atom(hist_data, file, field_name, &flags, NULL);
3242 	if (IS_ERR(val)) {
3243 		hist_err(tr, HIST_ERR_FIELD_VAR_PARSE_FAIL, errpos(field_name));
3244 		ret = PTR_ERR(val);
3245 		goto err;
3246 	}
3247 
3248 	var = create_var(hist_data, file, field_name, val->size, val->type);
3249 	if (IS_ERR(var)) {
3250 		hist_err(tr, HIST_ERR_VAR_CREATE_FIND_FAIL, errpos(field_name));
3251 		kfree(val);
3252 		ret = PTR_ERR(var);
3253 		goto err;
3254 	}
3255 
3256 	field_var = kzalloc(sizeof(struct field_var), GFP_KERNEL);
3257 	if (!field_var) {
3258 		kfree(val);
3259 		kfree(var);
3260 		ret =  -ENOMEM;
3261 		goto err;
3262 	}
3263 
3264 	field_var->var = var;
3265 	field_var->val = val;
3266  out:
3267 	return field_var;
3268  err:
3269 	field_var = ERR_PTR(ret);
3270 	goto out;
3271 }
3272 
3273 /**
3274  * create_target_field_var - Automatically create a variable for a field
3275  * @target_hist_data: The target hist trigger
3276  * @subsys_name: Optional subsystem name
3277  * @event_name: Optional event name
3278  * @var_name: The name of the field (and the resulting variable)
3279  *
3280  * Hist trigger actions fetch data from variables, not directly from
3281  * events.  However, for convenience, users are allowed to directly
3282  * specify an event field in an action, which will be automatically
3283  * converted into a variable on their behalf.
3284  *
3285  * This function creates a field variable with the name var_name on
3286  * the hist trigger currently being defined on the target event.  If
3287  * subsys_name and event_name are specified, this function simply
3288  * verifies that they do in fact match the target event subsystem and
3289  * event name.
3290  *
3291  * Return: The variable created for the field.
3292  */
3293 static struct field_var *
create_target_field_var(struct hist_trigger_data * target_hist_data,char * subsys_name,char * event_name,char * var_name)3294 create_target_field_var(struct hist_trigger_data *target_hist_data,
3295 			char *subsys_name, char *event_name, char *var_name)
3296 {
3297 	struct trace_event_file *file = target_hist_data->event_file;
3298 
3299 	if (subsys_name) {
3300 		struct trace_event_call *call;
3301 
3302 		if (!event_name)
3303 			return NULL;
3304 
3305 		call = file->event_call;
3306 
3307 		if (strcmp(subsys_name, call->class->system) != 0)
3308 			return NULL;
3309 
3310 		if (strcmp(event_name, trace_event_name(call)) != 0)
3311 			return NULL;
3312 	}
3313 
3314 	return create_field_var(target_hist_data, file, var_name);
3315 }
3316 
check_track_val_max(u64 track_val,u64 var_val)3317 static bool check_track_val_max(u64 track_val, u64 var_val)
3318 {
3319 	if (var_val <= track_val)
3320 		return false;
3321 
3322 	return true;
3323 }
3324 
check_track_val_changed(u64 track_val,u64 var_val)3325 static bool check_track_val_changed(u64 track_val, u64 var_val)
3326 {
3327 	if (var_val == track_val)
3328 		return false;
3329 
3330 	return true;
3331 }
3332 
get_track_val(struct hist_trigger_data * hist_data,struct tracing_map_elt * elt,struct action_data * data)3333 static u64 get_track_val(struct hist_trigger_data *hist_data,
3334 			 struct tracing_map_elt *elt,
3335 			 struct action_data *data)
3336 {
3337 	unsigned int track_var_idx = data->track_data.track_var->var.idx;
3338 	u64 track_val;
3339 
3340 	track_val = tracing_map_read_var(elt, track_var_idx);
3341 
3342 	return track_val;
3343 }
3344 
save_track_val(struct hist_trigger_data * hist_data,struct tracing_map_elt * elt,struct action_data * data,u64 var_val)3345 static void save_track_val(struct hist_trigger_data *hist_data,
3346 			   struct tracing_map_elt *elt,
3347 			   struct action_data *data, u64 var_val)
3348 {
3349 	unsigned int track_var_idx = data->track_data.track_var->var.idx;
3350 
3351 	tracing_map_set_var(elt, track_var_idx, var_val);
3352 }
3353 
save_track_data(struct hist_trigger_data * hist_data,struct tracing_map_elt * elt,struct trace_buffer * buffer,void * rec,struct ring_buffer_event * rbe,void * key,struct action_data * data,u64 * var_ref_vals)3354 static void save_track_data(struct hist_trigger_data *hist_data,
3355 			    struct tracing_map_elt *elt,
3356 			    struct trace_buffer *buffer, void *rec,
3357 			    struct ring_buffer_event *rbe, void *key,
3358 			    struct action_data *data, u64 *var_ref_vals)
3359 {
3360 	if (data->track_data.save_data)
3361 		data->track_data.save_data(hist_data, elt, buffer, rec, rbe,
3362 					   key, data, var_ref_vals);
3363 }
3364 
check_track_val(struct tracing_map_elt * elt,struct action_data * data,u64 var_val)3365 static bool check_track_val(struct tracing_map_elt *elt,
3366 			    struct action_data *data,
3367 			    u64 var_val)
3368 {
3369 	struct hist_trigger_data *hist_data;
3370 	u64 track_val;
3371 
3372 	hist_data = data->track_data.track_var->hist_data;
3373 	track_val = get_track_val(hist_data, elt, data);
3374 
3375 	return data->track_data.check_val(track_val, var_val);
3376 }
3377 
3378 #ifdef CONFIG_TRACER_SNAPSHOT
cond_snapshot_update(struct trace_array * tr,void * cond_data)3379 static bool cond_snapshot_update(struct trace_array *tr, void *cond_data)
3380 {
3381 	/* called with tr->max_lock held */
3382 	struct track_data *track_data = tr->cond_snapshot->cond_data;
3383 	struct hist_elt_data *elt_data, *track_elt_data;
3384 	struct snapshot_context *context = cond_data;
3385 	struct action_data *action;
3386 	u64 track_val;
3387 
3388 	if (!track_data)
3389 		return false;
3390 
3391 	action = track_data->action_data;
3392 
3393 	track_val = get_track_val(track_data->hist_data, context->elt,
3394 				  track_data->action_data);
3395 
3396 	if (!action->track_data.check_val(track_data->track_val, track_val))
3397 		return false;
3398 
3399 	track_data->track_val = track_val;
3400 	memcpy(track_data->key, context->key, track_data->key_len);
3401 
3402 	elt_data = context->elt->private_data;
3403 	track_elt_data = track_data->elt.private_data;
3404 	if (elt_data->comm)
3405 		strscpy(track_elt_data->comm, elt_data->comm, TASK_COMM_LEN);
3406 
3407 	track_data->updated = true;
3408 
3409 	return true;
3410 }
3411 
save_track_data_snapshot(struct hist_trigger_data * hist_data,struct tracing_map_elt * elt,struct trace_buffer * buffer,void * rec,struct ring_buffer_event * rbe,void * key,struct action_data * data,u64 * var_ref_vals)3412 static void save_track_data_snapshot(struct hist_trigger_data *hist_data,
3413 				     struct tracing_map_elt *elt,
3414 				     struct trace_buffer *buffer, void *rec,
3415 				     struct ring_buffer_event *rbe, void *key,
3416 				     struct action_data *data,
3417 				     u64 *var_ref_vals)
3418 {
3419 	struct trace_event_file *file = hist_data->event_file;
3420 	struct snapshot_context context;
3421 
3422 	context.elt = elt;
3423 	context.key = key;
3424 
3425 	tracing_snapshot_cond(file->tr, &context);
3426 }
3427 
3428 static void hist_trigger_print_key(struct seq_file *m,
3429 				   struct hist_trigger_data *hist_data,
3430 				   void *key,
3431 				   struct tracing_map_elt *elt);
3432 
snapshot_action(struct hist_trigger_data * hist_data)3433 static struct action_data *snapshot_action(struct hist_trigger_data *hist_data)
3434 {
3435 	unsigned int i;
3436 
3437 	if (!hist_data->n_actions)
3438 		return NULL;
3439 
3440 	for (i = 0; i < hist_data->n_actions; i++) {
3441 		struct action_data *data = hist_data->actions[i];
3442 
3443 		if (data->action == ACTION_SNAPSHOT)
3444 			return data;
3445 	}
3446 
3447 	return NULL;
3448 }
3449 
track_data_snapshot_print(struct seq_file * m,struct hist_trigger_data * hist_data)3450 static void track_data_snapshot_print(struct seq_file *m,
3451 				      struct hist_trigger_data *hist_data)
3452 {
3453 	struct trace_event_file *file = hist_data->event_file;
3454 	struct track_data *track_data;
3455 	struct action_data *action;
3456 
3457 	track_data = tracing_cond_snapshot_data(file->tr);
3458 	if (!track_data)
3459 		return;
3460 
3461 	if (!track_data->updated)
3462 		return;
3463 
3464 	action = snapshot_action(hist_data);
3465 	if (!action)
3466 		return;
3467 
3468 	seq_puts(m, "\nSnapshot taken (see tracing/snapshot).  Details:\n");
3469 	seq_printf(m, "\ttriggering value { %s(%s) }: %10llu",
3470 		   action->handler == HANDLER_ONMAX ? "onmax" : "onchange",
3471 		   action->track_data.var_str, track_data->track_val);
3472 
3473 	seq_puts(m, "\ttriggered by event with key: ");
3474 	hist_trigger_print_key(m, hist_data, track_data->key, &track_data->elt);
3475 	seq_putc(m, '\n');
3476 }
3477 #else
cond_snapshot_update(struct trace_array * tr,void * cond_data)3478 static bool cond_snapshot_update(struct trace_array *tr, void *cond_data)
3479 {
3480 	return false;
3481 }
save_track_data_snapshot(struct hist_trigger_data * hist_data,struct tracing_map_elt * elt,struct trace_buffer * buffer,void * rec,struct ring_buffer_event * rbe,void * key,struct action_data * data,u64 * var_ref_vals)3482 static void save_track_data_snapshot(struct hist_trigger_data *hist_data,
3483 				     struct tracing_map_elt *elt,
3484 				     struct trace_buffer *buffer, void *rec,
3485 				     struct ring_buffer_event *rbe, void *key,
3486 				     struct action_data *data,
3487 				     u64 *var_ref_vals) {}
track_data_snapshot_print(struct seq_file * m,struct hist_trigger_data * hist_data)3488 static void track_data_snapshot_print(struct seq_file *m,
3489 				      struct hist_trigger_data *hist_data) {}
3490 #endif /* CONFIG_TRACER_SNAPSHOT */
3491 
track_data_print(struct seq_file * m,struct hist_trigger_data * hist_data,struct tracing_map_elt * elt,struct action_data * data)3492 static void track_data_print(struct seq_file *m,
3493 			     struct hist_trigger_data *hist_data,
3494 			     struct tracing_map_elt *elt,
3495 			     struct action_data *data)
3496 {
3497 	u64 track_val = get_track_val(hist_data, elt, data);
3498 	unsigned int i, save_var_idx;
3499 
3500 	if (data->handler == HANDLER_ONMAX)
3501 		seq_printf(m, "\n\tmax: %10llu", track_val);
3502 	else if (data->handler == HANDLER_ONCHANGE)
3503 		seq_printf(m, "\n\tchanged: %10llu", track_val);
3504 
3505 	if (data->action == ACTION_SNAPSHOT)
3506 		return;
3507 
3508 	for (i = 0; i < hist_data->n_save_vars; i++) {
3509 		struct hist_field *save_val = hist_data->save_vars[i]->val;
3510 		struct hist_field *save_var = hist_data->save_vars[i]->var;
3511 		u64 val;
3512 
3513 		save_var_idx = save_var->var.idx;
3514 
3515 		val = tracing_map_read_var(elt, save_var_idx);
3516 
3517 		if (save_val->flags & HIST_FIELD_FL_STRING) {
3518 			seq_printf(m, "  %s: %-32s", save_var->var.name,
3519 				   (char *)(uintptr_t)(val));
3520 		} else
3521 			seq_printf(m, "  %s: %10llu", save_var->var.name, val);
3522 	}
3523 }
3524 
ontrack_action(struct hist_trigger_data * hist_data,struct tracing_map_elt * elt,struct trace_buffer * buffer,void * rec,struct ring_buffer_event * rbe,void * key,struct action_data * data,u64 * var_ref_vals)3525 static void ontrack_action(struct hist_trigger_data *hist_data,
3526 			   struct tracing_map_elt *elt,
3527 			   struct trace_buffer *buffer, void *rec,
3528 			   struct ring_buffer_event *rbe, void *key,
3529 			   struct action_data *data, u64 *var_ref_vals)
3530 {
3531 	u64 var_val = var_ref_vals[data->track_data.var_ref->var_ref_idx];
3532 
3533 	if (check_track_val(elt, data, var_val)) {
3534 		save_track_val(hist_data, elt, data, var_val);
3535 		save_track_data(hist_data, elt, buffer, rec, rbe,
3536 				key, data, var_ref_vals);
3537 	}
3538 }
3539 
action_data_destroy(struct action_data * data)3540 static void action_data_destroy(struct action_data *data)
3541 {
3542 	unsigned int i;
3543 
3544 	lockdep_assert_held(&event_mutex);
3545 
3546 	kfree(data->action_name);
3547 
3548 	for (i = 0; i < data->n_params; i++)
3549 		kfree(data->params[i]);
3550 
3551 	if (data->synth_event)
3552 		data->synth_event->ref--;
3553 
3554 	kfree(data->synth_event_name);
3555 
3556 	kfree(data);
3557 }
3558 
track_data_destroy(struct hist_trigger_data * hist_data,struct action_data * data)3559 static void track_data_destroy(struct hist_trigger_data *hist_data,
3560 			       struct action_data *data)
3561 {
3562 	struct trace_event_file *file = hist_data->event_file;
3563 
3564 	destroy_hist_field(data->track_data.track_var, 0);
3565 
3566 	if (data->action == ACTION_SNAPSHOT) {
3567 		struct track_data *track_data;
3568 
3569 		track_data = tracing_cond_snapshot_data(file->tr);
3570 		if (track_data && track_data->hist_data == hist_data) {
3571 			tracing_snapshot_cond_disable(file->tr);
3572 			track_data_free(track_data);
3573 		}
3574 	}
3575 
3576 	kfree(data->track_data.var_str);
3577 
3578 	action_data_destroy(data);
3579 }
3580 
3581 static int action_create(struct hist_trigger_data *hist_data,
3582 			 struct action_data *data);
3583 
track_data_create(struct hist_trigger_data * hist_data,struct action_data * data)3584 static int track_data_create(struct hist_trigger_data *hist_data,
3585 			     struct action_data *data)
3586 {
3587 	struct hist_field *var_field, *ref_field, *track_var = NULL;
3588 	struct trace_event_file *file = hist_data->event_file;
3589 	struct trace_array *tr = file->tr;
3590 	char *track_data_var_str;
3591 	int ret = 0;
3592 
3593 	track_data_var_str = data->track_data.var_str;
3594 	if (track_data_var_str[0] != '$') {
3595 		hist_err(tr, HIST_ERR_ONX_NOT_VAR, errpos(track_data_var_str));
3596 		return -EINVAL;
3597 	}
3598 	track_data_var_str++;
3599 
3600 	var_field = find_target_event_var(hist_data, NULL, NULL, track_data_var_str);
3601 	if (!var_field) {
3602 		hist_err(tr, HIST_ERR_ONX_VAR_NOT_FOUND, errpos(track_data_var_str));
3603 		return -EINVAL;
3604 	}
3605 
3606 	ref_field = create_var_ref(hist_data, var_field, NULL, NULL);
3607 	if (!ref_field)
3608 		return -ENOMEM;
3609 
3610 	data->track_data.var_ref = ref_field;
3611 
3612 	if (data->handler == HANDLER_ONMAX)
3613 		track_var = create_var(hist_data, file, "__max", sizeof(u64), "u64");
3614 	if (IS_ERR(track_var)) {
3615 		hist_err(tr, HIST_ERR_ONX_VAR_CREATE_FAIL, 0);
3616 		ret = PTR_ERR(track_var);
3617 		goto out;
3618 	}
3619 
3620 	if (data->handler == HANDLER_ONCHANGE)
3621 		track_var = create_var(hist_data, file, "__change", sizeof(u64), "u64");
3622 	if (IS_ERR(track_var)) {
3623 		hist_err(tr, HIST_ERR_ONX_VAR_CREATE_FAIL, 0);
3624 		ret = PTR_ERR(track_var);
3625 		goto out;
3626 	}
3627 	data->track_data.track_var = track_var;
3628 
3629 	ret = action_create(hist_data, data);
3630  out:
3631 	return ret;
3632 }
3633 
parse_action_params(struct trace_array * tr,char * params,struct action_data * data)3634 static int parse_action_params(struct trace_array *tr, char *params,
3635 			       struct action_data *data)
3636 {
3637 	char *param, *saved_param;
3638 	bool first_param = true;
3639 	int ret = 0;
3640 
3641 	while (params) {
3642 		if (data->n_params >= SYNTH_FIELDS_MAX) {
3643 			hist_err(tr, HIST_ERR_TOO_MANY_PARAMS, 0);
3644 			ret = -EINVAL;
3645 			goto out;
3646 		}
3647 
3648 		param = strsep(&params, ",");
3649 		if (!param) {
3650 			hist_err(tr, HIST_ERR_PARAM_NOT_FOUND, 0);
3651 			ret = -EINVAL;
3652 			goto out;
3653 		}
3654 
3655 		param = strstrip(param);
3656 		if (strlen(param) < 2) {
3657 			hist_err(tr, HIST_ERR_INVALID_PARAM, errpos(param));
3658 			ret = -EINVAL;
3659 			goto out;
3660 		}
3661 
3662 		saved_param = kstrdup(param, GFP_KERNEL);
3663 		if (!saved_param) {
3664 			ret = -ENOMEM;
3665 			goto out;
3666 		}
3667 
3668 		if (first_param && data->use_trace_keyword) {
3669 			data->synth_event_name = saved_param;
3670 			first_param = false;
3671 			continue;
3672 		}
3673 		first_param = false;
3674 
3675 		data->params[data->n_params++] = saved_param;
3676 	}
3677  out:
3678 	return ret;
3679 }
3680 
action_parse(struct trace_array * tr,char * str,struct action_data * data,enum handler_id handler)3681 static int action_parse(struct trace_array *tr, char *str, struct action_data *data,
3682 			enum handler_id handler)
3683 {
3684 	char *action_name;
3685 	int ret = 0;
3686 
3687 	strsep(&str, ".");
3688 	if (!str) {
3689 		hist_err(tr, HIST_ERR_ACTION_NOT_FOUND, 0);
3690 		ret = -EINVAL;
3691 		goto out;
3692 	}
3693 
3694 	action_name = strsep(&str, "(");
3695 	if (!action_name || !str) {
3696 		hist_err(tr, HIST_ERR_ACTION_NOT_FOUND, 0);
3697 		ret = -EINVAL;
3698 		goto out;
3699 	}
3700 
3701 	if (str_has_prefix(action_name, "save")) {
3702 		char *params = strsep(&str, ")");
3703 
3704 		if (!params) {
3705 			hist_err(tr, HIST_ERR_NO_SAVE_PARAMS, 0);
3706 			ret = -EINVAL;
3707 			goto out;
3708 		}
3709 
3710 		ret = parse_action_params(tr, params, data);
3711 		if (ret)
3712 			goto out;
3713 
3714 		if (handler == HANDLER_ONMAX)
3715 			data->track_data.check_val = check_track_val_max;
3716 		else if (handler == HANDLER_ONCHANGE)
3717 			data->track_data.check_val = check_track_val_changed;
3718 		else {
3719 			hist_err(tr, HIST_ERR_ACTION_MISMATCH, errpos(action_name));
3720 			ret = -EINVAL;
3721 			goto out;
3722 		}
3723 
3724 		data->track_data.save_data = save_track_data_vars;
3725 		data->fn = ontrack_action;
3726 		data->action = ACTION_SAVE;
3727 	} else if (str_has_prefix(action_name, "snapshot")) {
3728 		char *params = strsep(&str, ")");
3729 
3730 		if (!str) {
3731 			hist_err(tr, HIST_ERR_NO_CLOSING_PAREN, errpos(params));
3732 			ret = -EINVAL;
3733 			goto out;
3734 		}
3735 
3736 		if (handler == HANDLER_ONMAX)
3737 			data->track_data.check_val = check_track_val_max;
3738 		else if (handler == HANDLER_ONCHANGE)
3739 			data->track_data.check_val = check_track_val_changed;
3740 		else {
3741 			hist_err(tr, HIST_ERR_ACTION_MISMATCH, errpos(action_name));
3742 			ret = -EINVAL;
3743 			goto out;
3744 		}
3745 
3746 		data->track_data.save_data = save_track_data_snapshot;
3747 		data->fn = ontrack_action;
3748 		data->action = ACTION_SNAPSHOT;
3749 	} else {
3750 		char *params = strsep(&str, ")");
3751 
3752 		if (str_has_prefix(action_name, "trace"))
3753 			data->use_trace_keyword = true;
3754 
3755 		if (params) {
3756 			ret = parse_action_params(tr, params, data);
3757 			if (ret)
3758 				goto out;
3759 		}
3760 
3761 		if (handler == HANDLER_ONMAX)
3762 			data->track_data.check_val = check_track_val_max;
3763 		else if (handler == HANDLER_ONCHANGE)
3764 			data->track_data.check_val = check_track_val_changed;
3765 
3766 		if (handler != HANDLER_ONMATCH) {
3767 			data->track_data.save_data = action_trace;
3768 			data->fn = ontrack_action;
3769 		} else
3770 			data->fn = action_trace;
3771 
3772 		data->action = ACTION_TRACE;
3773 	}
3774 
3775 	data->action_name = kstrdup(action_name, GFP_KERNEL);
3776 	if (!data->action_name) {
3777 		ret = -ENOMEM;
3778 		goto out;
3779 	}
3780 
3781 	data->handler = handler;
3782  out:
3783 	return ret;
3784 }
3785 
track_data_parse(struct hist_trigger_data * hist_data,char * str,enum handler_id handler)3786 static struct action_data *track_data_parse(struct hist_trigger_data *hist_data,
3787 					    char *str, enum handler_id handler)
3788 {
3789 	struct action_data *data;
3790 	int ret = -EINVAL;
3791 	char *var_str;
3792 
3793 	data = kzalloc(sizeof(*data), GFP_KERNEL);
3794 	if (!data)
3795 		return ERR_PTR(-ENOMEM);
3796 
3797 	var_str = strsep(&str, ")");
3798 	if (!var_str || !str) {
3799 		ret = -EINVAL;
3800 		goto free;
3801 	}
3802 
3803 	data->track_data.var_str = kstrdup(var_str, GFP_KERNEL);
3804 	if (!data->track_data.var_str) {
3805 		ret = -ENOMEM;
3806 		goto free;
3807 	}
3808 
3809 	ret = action_parse(hist_data->event_file->tr, str, data, handler);
3810 	if (ret)
3811 		goto free;
3812  out:
3813 	return data;
3814  free:
3815 	track_data_destroy(hist_data, data);
3816 	data = ERR_PTR(ret);
3817 	goto out;
3818 }
3819 
onmatch_destroy(struct action_data * data)3820 static void onmatch_destroy(struct action_data *data)
3821 {
3822 	kfree(data->match_data.event);
3823 	kfree(data->match_data.event_system);
3824 
3825 	action_data_destroy(data);
3826 }
3827 
destroy_field_var(struct field_var * field_var)3828 static void destroy_field_var(struct field_var *field_var)
3829 {
3830 	if (!field_var)
3831 		return;
3832 
3833 	destroy_hist_field(field_var->var, 0);
3834 	destroy_hist_field(field_var->val, 0);
3835 
3836 	kfree(field_var);
3837 }
3838 
destroy_field_vars(struct hist_trigger_data * hist_data)3839 static void destroy_field_vars(struct hist_trigger_data *hist_data)
3840 {
3841 	unsigned int i;
3842 
3843 	for (i = 0; i < hist_data->n_field_vars; i++)
3844 		destroy_field_var(hist_data->field_vars[i]);
3845 
3846 	for (i = 0; i < hist_data->n_save_vars; i++)
3847 		destroy_field_var(hist_data->save_vars[i]);
3848 }
3849 
save_field_var(struct hist_trigger_data * hist_data,struct field_var * field_var)3850 static void save_field_var(struct hist_trigger_data *hist_data,
3851 			   struct field_var *field_var)
3852 {
3853 	hist_data->field_vars[hist_data->n_field_vars++] = field_var;
3854 
3855 	/* Stack traces are saved in the string storage too */
3856 	if (field_var->val->flags & (HIST_FIELD_FL_STRING | HIST_FIELD_FL_STACKTRACE))
3857 		hist_data->n_field_var_str++;
3858 }
3859 
3860 
check_synth_field(struct synth_event * event,struct hist_field * hist_field,unsigned int field_pos)3861 static int check_synth_field(struct synth_event *event,
3862 			     struct hist_field *hist_field,
3863 			     unsigned int field_pos)
3864 {
3865 	struct synth_field *field;
3866 
3867 	if (field_pos >= event->n_fields)
3868 		return -EINVAL;
3869 
3870 	field = event->fields[field_pos];
3871 
3872 	/*
3873 	 * A dynamic string synth field can accept static or
3874 	 * dynamic. A static string synth field can only accept a
3875 	 * same-sized static string, which is checked for later.
3876 	 */
3877 	if (strstr(hist_field->type, "char[") && field->is_string
3878 	    && field->is_dynamic)
3879 		return 0;
3880 
3881 	if (strstr(hist_field->type, "long[") && field->is_stack)
3882 		return 0;
3883 
3884 	if (strcmp(field->type, hist_field->type) != 0) {
3885 		if (field->size != hist_field->size ||
3886 		    (!field->is_string && field->is_signed != hist_field->is_signed))
3887 			return -EINVAL;
3888 	}
3889 
3890 	return 0;
3891 }
3892 
3893 static struct hist_field *
trace_action_find_var(struct hist_trigger_data * hist_data,struct action_data * data,char * system,char * event,char * var)3894 trace_action_find_var(struct hist_trigger_data *hist_data,
3895 		      struct action_data *data,
3896 		      char *system, char *event, char *var)
3897 {
3898 	struct trace_array *tr = hist_data->event_file->tr;
3899 	struct hist_field *hist_field;
3900 
3901 	var++; /* skip '$' */
3902 
3903 	hist_field = find_target_event_var(hist_data, system, event, var);
3904 	if (!hist_field) {
3905 		if (!system && data->handler == HANDLER_ONMATCH) {
3906 			system = data->match_data.event_system;
3907 			event = data->match_data.event;
3908 		}
3909 
3910 		hist_field = find_event_var(hist_data, system, event, var);
3911 	}
3912 
3913 	if (!hist_field)
3914 		hist_err(tr, HIST_ERR_PARAM_NOT_FOUND, errpos(var));
3915 
3916 	return hist_field;
3917 }
3918 
3919 static struct hist_field *
trace_action_create_field_var(struct hist_trigger_data * hist_data,struct action_data * data,char * system,char * event,char * var)3920 trace_action_create_field_var(struct hist_trigger_data *hist_data,
3921 			      struct action_data *data, char *system,
3922 			      char *event, char *var)
3923 {
3924 	struct hist_field *hist_field = NULL;
3925 	struct field_var *field_var;
3926 
3927 	/*
3928 	 * First try to create a field var on the target event (the
3929 	 * currently being defined).  This will create a variable for
3930 	 * unqualified fields on the target event, or if qualified,
3931 	 * target fields that have qualified names matching the target.
3932 	 */
3933 	field_var = create_target_field_var(hist_data, system, event, var);
3934 
3935 	if (field_var && !IS_ERR(field_var)) {
3936 		save_field_var(hist_data, field_var);
3937 		hist_field = field_var->var;
3938 	} else {
3939 		field_var = NULL;
3940 		/*
3941 		 * If no explicit system.event is specified, default to
3942 		 * looking for fields on the onmatch(system.event.xxx)
3943 		 * event.
3944 		 */
3945 		if (!system && data->handler == HANDLER_ONMATCH) {
3946 			system = data->match_data.event_system;
3947 			event = data->match_data.event;
3948 		}
3949 
3950 		if (!event)
3951 			goto free;
3952 		/*
3953 		 * At this point, we're looking at a field on another
3954 		 * event.  Because we can't modify a hist trigger on
3955 		 * another event to add a variable for a field, we need
3956 		 * to create a new trigger on that event and create the
3957 		 * variable at the same time.
3958 		 */
3959 		hist_field = create_field_var_hist(hist_data, system, event, var);
3960 		if (IS_ERR(hist_field))
3961 			goto free;
3962 	}
3963  out:
3964 	return hist_field;
3965  free:
3966 	destroy_field_var(field_var);
3967 	hist_field = NULL;
3968 	goto out;
3969 }
3970 
trace_action_create(struct hist_trigger_data * hist_data,struct action_data * data)3971 static int trace_action_create(struct hist_trigger_data *hist_data,
3972 			       struct action_data *data)
3973 {
3974 	struct trace_array *tr = hist_data->event_file->tr;
3975 	char *event_name, *param, *system = NULL;
3976 	struct hist_field *hist_field, *var_ref;
3977 	unsigned int i;
3978 	unsigned int field_pos = 0;
3979 	struct synth_event *event;
3980 	char *synth_event_name;
3981 	int var_ref_idx, ret = 0;
3982 
3983 	lockdep_assert_held(&event_mutex);
3984 
3985 	/* Sanity check to avoid out-of-bound write on 'data->var_ref_idx' */
3986 	if (data->n_params > SYNTH_FIELDS_MAX)
3987 		return -EINVAL;
3988 
3989 	if (data->use_trace_keyword)
3990 		synth_event_name = data->synth_event_name;
3991 	else
3992 		synth_event_name = data->action_name;
3993 
3994 	event = find_synth_event(synth_event_name);
3995 	if (!event) {
3996 		hist_err(tr, HIST_ERR_SYNTH_EVENT_NOT_FOUND, errpos(synth_event_name));
3997 		return -EINVAL;
3998 	}
3999 
4000 	event->ref++;
4001 
4002 	for (i = 0; i < data->n_params; i++) {
4003 		char *p;
4004 
4005 		p = param = kstrdup(data->params[i], GFP_KERNEL);
4006 		if (!param) {
4007 			ret = -ENOMEM;
4008 			goto err;
4009 		}
4010 
4011 		system = strsep(&param, ".");
4012 		if (!param) {
4013 			param = (char *)system;
4014 			system = event_name = NULL;
4015 		} else {
4016 			event_name = strsep(&param, ".");
4017 			if (!param) {
4018 				kfree(p);
4019 				ret = -EINVAL;
4020 				goto err;
4021 			}
4022 		}
4023 
4024 		if (param[0] == '$')
4025 			hist_field = trace_action_find_var(hist_data, data,
4026 							   system, event_name,
4027 							   param);
4028 		else
4029 			hist_field = trace_action_create_field_var(hist_data,
4030 								   data,
4031 								   system,
4032 								   event_name,
4033 								   param);
4034 
4035 		if (!hist_field) {
4036 			kfree(p);
4037 			ret = -EINVAL;
4038 			goto err;
4039 		}
4040 
4041 		if (check_synth_field(event, hist_field, field_pos) == 0) {
4042 			var_ref = create_var_ref(hist_data, hist_field,
4043 						 system, event_name);
4044 			if (!var_ref) {
4045 				kfree(p);
4046 				ret = -ENOMEM;
4047 				goto err;
4048 			}
4049 
4050 			var_ref_idx = find_var_ref_idx(hist_data, var_ref);
4051 			if (WARN_ON(var_ref_idx < 0)) {
4052 				kfree(p);
4053 				ret = var_ref_idx;
4054 				goto err;
4055 			}
4056 
4057 			data->var_ref_idx[i] = var_ref_idx;
4058 
4059 			field_pos++;
4060 			kfree(p);
4061 			continue;
4062 		}
4063 
4064 		hist_err(tr, HIST_ERR_SYNTH_TYPE_MISMATCH, errpos(param));
4065 		kfree(p);
4066 		ret = -EINVAL;
4067 		goto err;
4068 	}
4069 
4070 	if (field_pos != event->n_fields) {
4071 		hist_err(tr, HIST_ERR_SYNTH_COUNT_MISMATCH, errpos(event->name));
4072 		ret = -EINVAL;
4073 		goto err;
4074 	}
4075 
4076 	data->synth_event = event;
4077  out:
4078 	return ret;
4079  err:
4080 	event->ref--;
4081 
4082 	goto out;
4083 }
4084 
action_create(struct hist_trigger_data * hist_data,struct action_data * data)4085 static int action_create(struct hist_trigger_data *hist_data,
4086 			 struct action_data *data)
4087 {
4088 	struct trace_event_file *file = hist_data->event_file;
4089 	struct trace_array *tr = file->tr;
4090 	struct track_data *track_data;
4091 	struct field_var *field_var;
4092 	unsigned int i;
4093 	char *param;
4094 	int ret = 0;
4095 
4096 	if (data->action == ACTION_TRACE)
4097 		return trace_action_create(hist_data, data);
4098 
4099 	if (data->action == ACTION_SNAPSHOT) {
4100 		track_data = track_data_alloc(hist_data->key_size, data, hist_data);
4101 		if (IS_ERR(track_data)) {
4102 			ret = PTR_ERR(track_data);
4103 			goto out;
4104 		}
4105 
4106 		ret = tracing_snapshot_cond_enable(file->tr, track_data,
4107 						   cond_snapshot_update);
4108 		if (ret)
4109 			track_data_free(track_data);
4110 
4111 		goto out;
4112 	}
4113 
4114 	if (data->action == ACTION_SAVE) {
4115 		if (hist_data->n_save_vars) {
4116 			ret = -EEXIST;
4117 			hist_err(tr, HIST_ERR_TOO_MANY_SAVE_ACTIONS, 0);
4118 			goto out;
4119 		}
4120 
4121 		for (i = 0; i < data->n_params; i++) {
4122 			param = kstrdup(data->params[i], GFP_KERNEL);
4123 			if (!param) {
4124 				ret = -ENOMEM;
4125 				goto out;
4126 			}
4127 
4128 			field_var = create_target_field_var(hist_data, NULL, NULL, param);
4129 			if (IS_ERR(field_var)) {
4130 				hist_err(tr, HIST_ERR_FIELD_VAR_CREATE_FAIL,
4131 					 errpos(param));
4132 				ret = PTR_ERR(field_var);
4133 				kfree(param);
4134 				goto out;
4135 			}
4136 
4137 			hist_data->save_vars[hist_data->n_save_vars++] = field_var;
4138 			if (field_var->val->flags &
4139 			    (HIST_FIELD_FL_STRING | HIST_FIELD_FL_STACKTRACE))
4140 				hist_data->n_save_var_str++;
4141 			kfree(param);
4142 		}
4143 	}
4144  out:
4145 	return ret;
4146 }
4147 
onmatch_create(struct hist_trigger_data * hist_data,struct action_data * data)4148 static int onmatch_create(struct hist_trigger_data *hist_data,
4149 			  struct action_data *data)
4150 {
4151 	return action_create(hist_data, data);
4152 }
4153 
onmatch_parse(struct trace_array * tr,char * str)4154 static struct action_data *onmatch_parse(struct trace_array *tr, char *str)
4155 {
4156 	char *match_event, *match_event_system;
4157 	struct action_data *data;
4158 	int ret = -EINVAL;
4159 
4160 	data = kzalloc(sizeof(*data), GFP_KERNEL);
4161 	if (!data)
4162 		return ERR_PTR(-ENOMEM);
4163 
4164 	match_event = strsep(&str, ")");
4165 	if (!match_event || !str) {
4166 		hist_err(tr, HIST_ERR_NO_CLOSING_PAREN, errpos(match_event));
4167 		goto free;
4168 	}
4169 
4170 	match_event_system = strsep(&match_event, ".");
4171 	if (!match_event) {
4172 		hist_err(tr, HIST_ERR_SUBSYS_NOT_FOUND, errpos(match_event_system));
4173 		goto free;
4174 	}
4175 
4176 	if (IS_ERR(event_file(tr, match_event_system, match_event))) {
4177 		hist_err(tr, HIST_ERR_INVALID_SUBSYS_EVENT, errpos(match_event));
4178 		goto free;
4179 	}
4180 
4181 	data->match_data.event = kstrdup(match_event, GFP_KERNEL);
4182 	if (!data->match_data.event) {
4183 		ret = -ENOMEM;
4184 		goto free;
4185 	}
4186 
4187 	data->match_data.event_system = kstrdup(match_event_system, GFP_KERNEL);
4188 	if (!data->match_data.event_system) {
4189 		ret = -ENOMEM;
4190 		goto free;
4191 	}
4192 
4193 	ret = action_parse(tr, str, data, HANDLER_ONMATCH);
4194 	if (ret)
4195 		goto free;
4196  out:
4197 	return data;
4198  free:
4199 	onmatch_destroy(data);
4200 	data = ERR_PTR(ret);
4201 	goto out;
4202 }
4203 
create_hitcount_val(struct hist_trigger_data * hist_data)4204 static int create_hitcount_val(struct hist_trigger_data *hist_data)
4205 {
4206 	hist_data->fields[HITCOUNT_IDX] =
4207 		create_hist_field(hist_data, NULL, HIST_FIELD_FL_HITCOUNT, NULL);
4208 	if (!hist_data->fields[HITCOUNT_IDX])
4209 		return -ENOMEM;
4210 
4211 	hist_data->n_vals++;
4212 	hist_data->n_fields++;
4213 
4214 	if (WARN_ON(hist_data->n_vals > TRACING_MAP_VALS_MAX))
4215 		return -EINVAL;
4216 
4217 	return 0;
4218 }
4219 
__create_val_field(struct hist_trigger_data * hist_data,unsigned int val_idx,struct trace_event_file * file,char * var_name,char * field_str,unsigned long flags)4220 static int __create_val_field(struct hist_trigger_data *hist_data,
4221 			      unsigned int val_idx,
4222 			      struct trace_event_file *file,
4223 			      char *var_name, char *field_str,
4224 			      unsigned long flags)
4225 {
4226 	struct hist_field *hist_field;
4227 	int ret = 0, n_subexprs = 0;
4228 
4229 	hist_field = parse_expr(hist_data, file, field_str, flags, var_name, &n_subexprs);
4230 	if (IS_ERR(hist_field)) {
4231 		ret = PTR_ERR(hist_field);
4232 		goto out;
4233 	}
4234 
4235 	/* values and variables should not have some modifiers */
4236 	if (hist_field->flags & HIST_FIELD_FL_VAR) {
4237 		/* Variable */
4238 		if (hist_field->flags & (HIST_FIELD_FL_GRAPH | HIST_FIELD_FL_PERCENT |
4239 					 HIST_FIELD_FL_BUCKET | HIST_FIELD_FL_LOG2))
4240 			goto err;
4241 	} else {
4242 		/* Value */
4243 		if (hist_field->flags & (HIST_FIELD_FL_GRAPH | HIST_FIELD_FL_PERCENT |
4244 					 HIST_FIELD_FL_BUCKET | HIST_FIELD_FL_LOG2 |
4245 					 HIST_FIELD_FL_SYM | HIST_FIELD_FL_SYM_OFFSET |
4246 					 HIST_FIELD_FL_SYSCALL | HIST_FIELD_FL_STACKTRACE))
4247 			goto err;
4248 	}
4249 
4250 	hist_data->fields[val_idx] = hist_field;
4251 
4252 	++hist_data->n_vals;
4253 	++hist_data->n_fields;
4254 
4255 	if (WARN_ON(hist_data->n_vals > TRACING_MAP_VALS_MAX + TRACING_MAP_VARS_MAX))
4256 		ret = -EINVAL;
4257  out:
4258 	return ret;
4259  err:
4260 	hist_err(file->tr, HIST_ERR_BAD_FIELD_MODIFIER, errpos(field_str));
4261 	return -EINVAL;
4262 }
4263 
create_val_field(struct hist_trigger_data * hist_data,unsigned int val_idx,struct trace_event_file * file,char * field_str)4264 static int create_val_field(struct hist_trigger_data *hist_data,
4265 			    unsigned int val_idx,
4266 			    struct trace_event_file *file,
4267 			    char *field_str)
4268 {
4269 	if (WARN_ON(val_idx >= TRACING_MAP_VALS_MAX))
4270 		return -EINVAL;
4271 
4272 	return __create_val_field(hist_data, val_idx, file, NULL, field_str, 0);
4273 }
4274 
4275 static const char no_comm[] = "(no comm)";
4276 
hist_field_execname(struct hist_field * hist_field,struct tracing_map_elt * elt,struct trace_buffer * buffer,struct ring_buffer_event * rbe,void * event)4277 static u64 hist_field_execname(struct hist_field *hist_field,
4278 			       struct tracing_map_elt *elt,
4279 			       struct trace_buffer *buffer,
4280 			       struct ring_buffer_event *rbe,
4281 			       void *event)
4282 {
4283 	struct hist_elt_data *elt_data;
4284 
4285 	if (WARN_ON_ONCE(!elt))
4286 		return (u64)(unsigned long)no_comm;
4287 
4288 	elt_data = elt->private_data;
4289 
4290 	if (WARN_ON_ONCE(!elt_data->comm))
4291 		return (u64)(unsigned long)no_comm;
4292 
4293 	return (u64)(unsigned long)(elt_data->comm);
4294 }
4295 
hist_field_stack(struct hist_field * hist_field,struct tracing_map_elt * elt,struct trace_buffer * buffer,struct ring_buffer_event * rbe,void * event)4296 static u64 hist_field_stack(struct hist_field *hist_field,
4297 			    struct tracing_map_elt *elt,
4298 			    struct trace_buffer *buffer,
4299 			    struct ring_buffer_event *rbe,
4300 			    void *event)
4301 {
4302 	u32 str_item = *(u32 *)(event + hist_field->field->offset);
4303 	int str_loc = str_item & 0xffff;
4304 	char *addr = (char *)(event + str_loc);
4305 
4306 	return (u64)(unsigned long)addr;
4307 }
4308 
hist_fn_call(struct hist_field * hist_field,struct tracing_map_elt * elt,struct trace_buffer * buffer,struct ring_buffer_event * rbe,void * event)4309 static u64 hist_fn_call(struct hist_field *hist_field,
4310 			struct tracing_map_elt *elt,
4311 			struct trace_buffer *buffer,
4312 			struct ring_buffer_event *rbe,
4313 			void *event)
4314 {
4315 	switch (hist_field->fn_num) {
4316 	case HIST_FIELD_FN_VAR_REF:
4317 		return hist_field_var_ref(hist_field, elt, buffer, rbe, event);
4318 	case HIST_FIELD_FN_COUNTER:
4319 		return hist_field_counter(hist_field, elt, buffer, rbe, event);
4320 	case HIST_FIELD_FN_CONST:
4321 		return hist_field_const(hist_field, elt, buffer, rbe, event);
4322 	case HIST_FIELD_FN_LOG2:
4323 		return hist_field_log2(hist_field, elt, buffer, rbe, event);
4324 	case HIST_FIELD_FN_BUCKET:
4325 		return hist_field_bucket(hist_field, elt, buffer, rbe, event);
4326 	case HIST_FIELD_FN_TIMESTAMP:
4327 		return hist_field_timestamp(hist_field, elt, buffer, rbe, event);
4328 	case HIST_FIELD_FN_CPU:
4329 		return hist_field_cpu(hist_field, elt, buffer, rbe, event);
4330 	case HIST_FIELD_FN_STRING:
4331 		return hist_field_string(hist_field, elt, buffer, rbe, event);
4332 	case HIST_FIELD_FN_DYNSTRING:
4333 		return hist_field_dynstring(hist_field, elt, buffer, rbe, event);
4334 	case HIST_FIELD_FN_RELDYNSTRING:
4335 		return hist_field_reldynstring(hist_field, elt, buffer, rbe, event);
4336 	case HIST_FIELD_FN_PSTRING:
4337 		return hist_field_pstring(hist_field, elt, buffer, rbe, event);
4338 	case HIST_FIELD_FN_S64:
4339 		return hist_field_s64(hist_field, elt, buffer, rbe, event);
4340 	case HIST_FIELD_FN_U64:
4341 		return hist_field_u64(hist_field, elt, buffer, rbe, event);
4342 	case HIST_FIELD_FN_S32:
4343 		return hist_field_s32(hist_field, elt, buffer, rbe, event);
4344 	case HIST_FIELD_FN_U32:
4345 		return hist_field_u32(hist_field, elt, buffer, rbe, event);
4346 	case HIST_FIELD_FN_S16:
4347 		return hist_field_s16(hist_field, elt, buffer, rbe, event);
4348 	case HIST_FIELD_FN_U16:
4349 		return hist_field_u16(hist_field, elt, buffer, rbe, event);
4350 	case HIST_FIELD_FN_S8:
4351 		return hist_field_s8(hist_field, elt, buffer, rbe, event);
4352 	case HIST_FIELD_FN_U8:
4353 		return hist_field_u8(hist_field, elt, buffer, rbe, event);
4354 	case HIST_FIELD_FN_UMINUS:
4355 		return hist_field_unary_minus(hist_field, elt, buffer, rbe, event);
4356 	case HIST_FIELD_FN_MINUS:
4357 		return hist_field_minus(hist_field, elt, buffer, rbe, event);
4358 	case HIST_FIELD_FN_PLUS:
4359 		return hist_field_plus(hist_field, elt, buffer, rbe, event);
4360 	case HIST_FIELD_FN_DIV:
4361 		return hist_field_div(hist_field, elt, buffer, rbe, event);
4362 	case HIST_FIELD_FN_MULT:
4363 		return hist_field_mult(hist_field, elt, buffer, rbe, event);
4364 	case HIST_FIELD_FN_DIV_POWER2:
4365 		return div_by_power_of_two(hist_field, elt, buffer, rbe, event);
4366 	case HIST_FIELD_FN_DIV_NOT_POWER2:
4367 		return div_by_not_power_of_two(hist_field, elt, buffer, rbe, event);
4368 	case HIST_FIELD_FN_DIV_MULT_SHIFT:
4369 		return div_by_mult_and_shift(hist_field, elt, buffer, rbe, event);
4370 	case HIST_FIELD_FN_EXECNAME:
4371 		return hist_field_execname(hist_field, elt, buffer, rbe, event);
4372 	case HIST_FIELD_FN_STACK:
4373 		return hist_field_stack(hist_field, elt, buffer, rbe, event);
4374 	default:
4375 		return 0;
4376 	}
4377 }
4378 
4379 /* Convert a var that points to common_pid.execname to a string */
update_var_execname(struct hist_field * hist_field)4380 static void update_var_execname(struct hist_field *hist_field)
4381 {
4382 	hist_field->flags = HIST_FIELD_FL_STRING | HIST_FIELD_FL_VAR |
4383 		HIST_FIELD_FL_EXECNAME;
4384 	hist_field->size = MAX_FILTER_STR_VAL;
4385 	hist_field->is_signed = 0;
4386 
4387 	kfree_const(hist_field->type);
4388 	hist_field->type = "char[]";
4389 
4390 	hist_field->fn_num = HIST_FIELD_FN_EXECNAME;
4391 }
4392 
create_var_field(struct hist_trigger_data * hist_data,unsigned int val_idx,struct trace_event_file * file,char * var_name,char * expr_str)4393 static int create_var_field(struct hist_trigger_data *hist_data,
4394 			    unsigned int val_idx,
4395 			    struct trace_event_file *file,
4396 			    char *var_name, char *expr_str)
4397 {
4398 	struct trace_array *tr = hist_data->event_file->tr;
4399 	unsigned long flags = 0;
4400 	int ret;
4401 
4402 	if (WARN_ON(val_idx >= TRACING_MAP_VALS_MAX + TRACING_MAP_VARS_MAX))
4403 		return -EINVAL;
4404 
4405 	if (find_var(hist_data, file, var_name) && !hist_data->remove) {
4406 		hist_err(tr, HIST_ERR_DUPLICATE_VAR, errpos(var_name));
4407 		return -EINVAL;
4408 	}
4409 
4410 	flags |= HIST_FIELD_FL_VAR;
4411 	hist_data->n_vars++;
4412 	if (WARN_ON(hist_data->n_vars > TRACING_MAP_VARS_MAX))
4413 		return -EINVAL;
4414 
4415 	ret = __create_val_field(hist_data, val_idx, file, var_name, expr_str, flags);
4416 
4417 	if (!ret && hist_data->fields[val_idx]->flags & HIST_FIELD_FL_EXECNAME)
4418 		update_var_execname(hist_data->fields[val_idx]);
4419 
4420 	if (!ret && hist_data->fields[val_idx]->flags &
4421 	    (HIST_FIELD_FL_STRING | HIST_FIELD_FL_STACKTRACE))
4422 		hist_data->fields[val_idx]->var_str_idx = hist_data->n_var_str++;
4423 
4424 	return ret;
4425 }
4426 
create_val_fields(struct hist_trigger_data * hist_data,struct trace_event_file * file)4427 static int create_val_fields(struct hist_trigger_data *hist_data,
4428 			     struct trace_event_file *file)
4429 {
4430 	unsigned int i, j = 1, n_hitcount = 0;
4431 	char *fields_str, *field_str;
4432 	int ret;
4433 
4434 	ret = create_hitcount_val(hist_data);
4435 	if (ret)
4436 		goto out;
4437 
4438 	fields_str = hist_data->attrs->vals_str;
4439 	if (!fields_str)
4440 		goto out;
4441 
4442 	for (i = 0, j = 1; i < TRACING_MAP_VALS_MAX &&
4443 		     j < TRACING_MAP_VALS_MAX; i++) {
4444 		field_str = strsep(&fields_str, ",");
4445 		if (!field_str)
4446 			break;
4447 
4448 		if (strcmp(field_str, "hitcount") == 0) {
4449 			if (!n_hitcount++)
4450 				continue;
4451 		}
4452 
4453 		ret = create_val_field(hist_data, j++, file, field_str);
4454 		if (ret)
4455 			goto out;
4456 	}
4457 
4458 	if (fields_str && (strcmp(fields_str, "hitcount") != 0))
4459 		ret = -EINVAL;
4460  out:
4461 	/* There is only raw hitcount but nohitcount suppresses it. */
4462 	if (j == 1 && hist_data->attrs->no_hitcount) {
4463 		hist_err(hist_data->event_file->tr, HIST_ERR_NEED_NOHC_VAL, 0);
4464 		ret = -ENOENT;
4465 	}
4466 
4467 	return ret;
4468 }
4469 
create_key_field(struct hist_trigger_data * hist_data,unsigned int key_idx,unsigned int key_offset,struct trace_event_file * file,char * field_str)4470 static int create_key_field(struct hist_trigger_data *hist_data,
4471 			    unsigned int key_idx,
4472 			    unsigned int key_offset,
4473 			    struct trace_event_file *file,
4474 			    char *field_str)
4475 {
4476 	struct trace_array *tr = hist_data->event_file->tr;
4477 	struct hist_field *hist_field = NULL;
4478 	unsigned long flags = 0;
4479 	unsigned int key_size;
4480 	int ret = 0, n_subexprs = 0;
4481 
4482 	if (WARN_ON(key_idx >= HIST_FIELDS_MAX))
4483 		return -EINVAL;
4484 
4485 	flags |= HIST_FIELD_FL_KEY;
4486 
4487 	if (strcmp(field_str, "stacktrace") == 0) {
4488 		flags |= HIST_FIELD_FL_STACKTRACE;
4489 		key_size = sizeof(unsigned long) * HIST_STACKTRACE_DEPTH;
4490 		hist_field = create_hist_field(hist_data, NULL, flags, NULL);
4491 	} else {
4492 		hist_field = parse_expr(hist_data, file, field_str, flags,
4493 					NULL, &n_subexprs);
4494 		if (IS_ERR(hist_field)) {
4495 			ret = PTR_ERR(hist_field);
4496 			goto out;
4497 		}
4498 
4499 		if (field_has_hist_vars(hist_field, 0))	{
4500 			hist_err(tr, HIST_ERR_INVALID_REF_KEY, errpos(field_str));
4501 			destroy_hist_field(hist_field, 0);
4502 			ret = -EINVAL;
4503 			goto out;
4504 		}
4505 
4506 		key_size = hist_field->size;
4507 	}
4508 
4509 	hist_data->fields[key_idx] = hist_field;
4510 
4511 	key_size = ALIGN(key_size, sizeof(u64));
4512 	hist_data->fields[key_idx]->size = key_size;
4513 	hist_data->fields[key_idx]->offset = key_offset;
4514 
4515 	hist_data->key_size += key_size;
4516 
4517 	if (hist_data->key_size > HIST_KEY_SIZE_MAX) {
4518 		ret = -EINVAL;
4519 		goto out;
4520 	}
4521 
4522 	hist_data->n_keys++;
4523 	hist_data->n_fields++;
4524 
4525 	if (WARN_ON(hist_data->n_keys > TRACING_MAP_KEYS_MAX))
4526 		return -EINVAL;
4527 
4528 	ret = key_size;
4529  out:
4530 	return ret;
4531 }
4532 
create_key_fields(struct hist_trigger_data * hist_data,struct trace_event_file * file)4533 static int create_key_fields(struct hist_trigger_data *hist_data,
4534 			     struct trace_event_file *file)
4535 {
4536 	unsigned int i, key_offset = 0, n_vals = hist_data->n_vals;
4537 	char *fields_str, *field_str;
4538 	int ret = -EINVAL;
4539 
4540 	fields_str = hist_data->attrs->keys_str;
4541 	if (!fields_str)
4542 		goto out;
4543 
4544 	for (i = n_vals; i < n_vals + TRACING_MAP_KEYS_MAX; i++) {
4545 		field_str = strsep(&fields_str, ",");
4546 		if (!field_str)
4547 			break;
4548 		ret = create_key_field(hist_data, i, key_offset,
4549 				       file, field_str);
4550 		if (ret < 0)
4551 			goto out;
4552 		key_offset += ret;
4553 	}
4554 	if (fields_str) {
4555 		ret = -EINVAL;
4556 		goto out;
4557 	}
4558 	ret = 0;
4559  out:
4560 	return ret;
4561 }
4562 
create_var_fields(struct hist_trigger_data * hist_data,struct trace_event_file * file)4563 static int create_var_fields(struct hist_trigger_data *hist_data,
4564 			     struct trace_event_file *file)
4565 {
4566 	unsigned int i, j = hist_data->n_vals;
4567 	int ret = 0;
4568 
4569 	unsigned int n_vars = hist_data->attrs->var_defs.n_vars;
4570 
4571 	for (i = 0; i < n_vars; i++) {
4572 		char *var_name = hist_data->attrs->var_defs.name[i];
4573 		char *expr = hist_data->attrs->var_defs.expr[i];
4574 
4575 		ret = create_var_field(hist_data, j++, file, var_name, expr);
4576 		if (ret)
4577 			goto out;
4578 	}
4579  out:
4580 	return ret;
4581 }
4582 
free_var_defs(struct hist_trigger_data * hist_data)4583 static void free_var_defs(struct hist_trigger_data *hist_data)
4584 {
4585 	unsigned int i;
4586 
4587 	for (i = 0; i < hist_data->attrs->var_defs.n_vars; i++) {
4588 		kfree(hist_data->attrs->var_defs.name[i]);
4589 		kfree(hist_data->attrs->var_defs.expr[i]);
4590 	}
4591 
4592 	hist_data->attrs->var_defs.n_vars = 0;
4593 }
4594 
parse_var_defs(struct hist_trigger_data * hist_data)4595 static int parse_var_defs(struct hist_trigger_data *hist_data)
4596 {
4597 	struct trace_array *tr = hist_data->event_file->tr;
4598 	char *s, *str, *var_name, *field_str;
4599 	unsigned int i, j, n_vars = 0;
4600 	int ret = 0;
4601 
4602 	for (i = 0; i < hist_data->attrs->n_assignments; i++) {
4603 		str = hist_data->attrs->assignment_str[i];
4604 		for (j = 0; j < TRACING_MAP_VARS_MAX; j++) {
4605 			field_str = strsep(&str, ",");
4606 			if (!field_str)
4607 				break;
4608 
4609 			var_name = strsep(&field_str, "=");
4610 			if (!var_name || !field_str) {
4611 				hist_err(tr, HIST_ERR_MALFORMED_ASSIGNMENT,
4612 					 errpos(var_name));
4613 				ret = -EINVAL;
4614 				goto free;
4615 			}
4616 
4617 			if (n_vars == TRACING_MAP_VARS_MAX) {
4618 				hist_err(tr, HIST_ERR_TOO_MANY_VARS, errpos(var_name));
4619 				ret = -EINVAL;
4620 				goto free;
4621 			}
4622 
4623 			s = kstrdup(var_name, GFP_KERNEL);
4624 			if (!s) {
4625 				ret = -ENOMEM;
4626 				goto free;
4627 			}
4628 			hist_data->attrs->var_defs.name[n_vars] = s;
4629 
4630 			s = kstrdup(field_str, GFP_KERNEL);
4631 			if (!s) {
4632 				kfree(hist_data->attrs->var_defs.name[n_vars]);
4633 				hist_data->attrs->var_defs.name[n_vars] = NULL;
4634 				ret = -ENOMEM;
4635 				goto free;
4636 			}
4637 			hist_data->attrs->var_defs.expr[n_vars++] = s;
4638 
4639 			hist_data->attrs->var_defs.n_vars = n_vars;
4640 		}
4641 	}
4642 
4643 	return ret;
4644  free:
4645 	free_var_defs(hist_data);
4646 
4647 	return ret;
4648 }
4649 
create_hist_fields(struct hist_trigger_data * hist_data,struct trace_event_file * file)4650 static int create_hist_fields(struct hist_trigger_data *hist_data,
4651 			      struct trace_event_file *file)
4652 {
4653 	int ret;
4654 
4655 	ret = parse_var_defs(hist_data);
4656 	if (ret)
4657 		return ret;
4658 
4659 	ret = create_val_fields(hist_data, file);
4660 	if (ret)
4661 		goto out;
4662 
4663 	ret = create_var_fields(hist_data, file);
4664 	if (ret)
4665 		goto out;
4666 
4667 	ret = create_key_fields(hist_data, file);
4668 
4669  out:
4670 	free_var_defs(hist_data);
4671 
4672 	return ret;
4673 }
4674 
is_descending(struct trace_array * tr,const char * str)4675 static int is_descending(struct trace_array *tr, const char *str)
4676 {
4677 	if (!str)
4678 		return 0;
4679 
4680 	if (strcmp(str, "descending") == 0)
4681 		return 1;
4682 
4683 	if (strcmp(str, "ascending") == 0)
4684 		return 0;
4685 
4686 	hist_err(tr, HIST_ERR_INVALID_SORT_MODIFIER, errpos((char *)str));
4687 
4688 	return -EINVAL;
4689 }
4690 
create_sort_keys(struct hist_trigger_data * hist_data)4691 static int create_sort_keys(struct hist_trigger_data *hist_data)
4692 {
4693 	struct trace_array *tr = hist_data->event_file->tr;
4694 	char *fields_str = hist_data->attrs->sort_key_str;
4695 	struct tracing_map_sort_key *sort_key;
4696 	int descending, ret = 0;
4697 	unsigned int i, j, k;
4698 
4699 	hist_data->n_sort_keys = 1; /* we always have at least one, hitcount */
4700 
4701 	if (!fields_str)
4702 		goto out;
4703 
4704 	for (i = 0; i < TRACING_MAP_SORT_KEYS_MAX; i++) {
4705 		struct hist_field *hist_field;
4706 		char *field_str, *field_name;
4707 		const char *test_name;
4708 
4709 		sort_key = &hist_data->sort_keys[i];
4710 
4711 		field_str = strsep(&fields_str, ",");
4712 		if (!field_str)
4713 			break;
4714 
4715 		if (!*field_str) {
4716 			ret = -EINVAL;
4717 			hist_err(tr, HIST_ERR_EMPTY_SORT_FIELD, errpos("sort="));
4718 			break;
4719 		}
4720 
4721 		if ((i == TRACING_MAP_SORT_KEYS_MAX - 1) && fields_str) {
4722 			hist_err(tr, HIST_ERR_TOO_MANY_SORT_FIELDS, errpos("sort="));
4723 			ret = -EINVAL;
4724 			break;
4725 		}
4726 
4727 		field_name = strsep(&field_str, ".");
4728 		if (!field_name || !*field_name) {
4729 			ret = -EINVAL;
4730 			hist_err(tr, HIST_ERR_EMPTY_SORT_FIELD, errpos("sort="));
4731 			break;
4732 		}
4733 
4734 		if (strcmp(field_name, "hitcount") == 0) {
4735 			descending = is_descending(tr, field_str);
4736 			if (descending < 0) {
4737 				ret = descending;
4738 				break;
4739 			}
4740 			sort_key->descending = descending;
4741 			continue;
4742 		}
4743 
4744 		for (j = 1, k = 1; j < hist_data->n_fields; j++) {
4745 			unsigned int idx;
4746 
4747 			hist_field = hist_data->fields[j];
4748 			if (hist_field->flags & HIST_FIELD_FL_VAR)
4749 				continue;
4750 
4751 			idx = k++;
4752 
4753 			test_name = hist_field_name(hist_field, 0);
4754 
4755 			if (strcmp(field_name, test_name) == 0) {
4756 				sort_key->field_idx = idx;
4757 				descending = is_descending(tr, field_str);
4758 				if (descending < 0) {
4759 					ret = descending;
4760 					goto out;
4761 				}
4762 				sort_key->descending = descending;
4763 				break;
4764 			}
4765 		}
4766 		if (j == hist_data->n_fields) {
4767 			ret = -EINVAL;
4768 			hist_err(tr, HIST_ERR_INVALID_SORT_FIELD, errpos(field_name));
4769 			break;
4770 		}
4771 	}
4772 
4773 	hist_data->n_sort_keys = i;
4774  out:
4775 	return ret;
4776 }
4777 
destroy_actions(struct hist_trigger_data * hist_data)4778 static void destroy_actions(struct hist_trigger_data *hist_data)
4779 {
4780 	unsigned int i;
4781 
4782 	for (i = 0; i < hist_data->n_actions; i++) {
4783 		struct action_data *data = hist_data->actions[i];
4784 
4785 		if (data->handler == HANDLER_ONMATCH)
4786 			onmatch_destroy(data);
4787 		else if (data->handler == HANDLER_ONMAX ||
4788 			 data->handler == HANDLER_ONCHANGE)
4789 			track_data_destroy(hist_data, data);
4790 		else
4791 			kfree(data);
4792 	}
4793 }
4794 
parse_actions(struct hist_trigger_data * hist_data)4795 static int parse_actions(struct hist_trigger_data *hist_data)
4796 {
4797 	struct trace_array *tr = hist_data->event_file->tr;
4798 	struct action_data *data;
4799 	unsigned int i;
4800 	int ret = 0;
4801 	char *str;
4802 	int len;
4803 
4804 	for (i = 0; i < hist_data->attrs->n_actions; i++) {
4805 		enum handler_id hid = 0;
4806 		char *action_str;
4807 
4808 		str = hist_data->attrs->action_str[i];
4809 
4810 		if ((len = str_has_prefix(str, "onmatch(")))
4811 			hid = HANDLER_ONMATCH;
4812 		else if ((len = str_has_prefix(str, "onmax(")))
4813 			hid = HANDLER_ONMAX;
4814 		else if ((len = str_has_prefix(str, "onchange(")))
4815 			hid = HANDLER_ONCHANGE;
4816 
4817 		action_str = str + len;
4818 
4819 		switch (hid) {
4820 		case HANDLER_ONMATCH:
4821 			data = onmatch_parse(tr, action_str);
4822 			break;
4823 		case HANDLER_ONMAX:
4824 		case HANDLER_ONCHANGE:
4825 			data = track_data_parse(hist_data, action_str, hid);
4826 			break;
4827 		default:
4828 			data = ERR_PTR(-EINVAL);
4829 			break;
4830 		}
4831 
4832 		if (IS_ERR(data)) {
4833 			ret = PTR_ERR(data);
4834 			break;
4835 		}
4836 
4837 		hist_data->actions[hist_data->n_actions++] = data;
4838 	}
4839 
4840 	return ret;
4841 }
4842 
create_actions(struct hist_trigger_data * hist_data)4843 static int create_actions(struct hist_trigger_data *hist_data)
4844 {
4845 	struct action_data *data;
4846 	unsigned int i;
4847 	int ret = 0;
4848 
4849 	for (i = 0; i < hist_data->attrs->n_actions; i++) {
4850 		data = hist_data->actions[i];
4851 
4852 		if (data->handler == HANDLER_ONMATCH) {
4853 			ret = onmatch_create(hist_data, data);
4854 			if (ret)
4855 				break;
4856 		} else if (data->handler == HANDLER_ONMAX ||
4857 			   data->handler == HANDLER_ONCHANGE) {
4858 			ret = track_data_create(hist_data, data);
4859 			if (ret)
4860 				break;
4861 		} else {
4862 			ret = -EINVAL;
4863 			break;
4864 		}
4865 	}
4866 
4867 	return ret;
4868 }
4869 
print_actions(struct seq_file * m,struct hist_trigger_data * hist_data,struct tracing_map_elt * elt)4870 static void print_actions(struct seq_file *m,
4871 			  struct hist_trigger_data *hist_data,
4872 			  struct tracing_map_elt *elt)
4873 {
4874 	unsigned int i;
4875 
4876 	for (i = 0; i < hist_data->n_actions; i++) {
4877 		struct action_data *data = hist_data->actions[i];
4878 
4879 		if (data->action == ACTION_SNAPSHOT)
4880 			continue;
4881 
4882 		if (data->handler == HANDLER_ONMAX ||
4883 		    data->handler == HANDLER_ONCHANGE)
4884 			track_data_print(m, hist_data, elt, data);
4885 	}
4886 }
4887 
print_action_spec(struct seq_file * m,struct hist_trigger_data * hist_data,struct action_data * data)4888 static void print_action_spec(struct seq_file *m,
4889 			      struct hist_trigger_data *hist_data,
4890 			      struct action_data *data)
4891 {
4892 	unsigned int i;
4893 
4894 	if (data->action == ACTION_SAVE) {
4895 		for (i = 0; i < hist_data->n_save_vars; i++) {
4896 			seq_printf(m, "%s", hist_data->save_vars[i]->var->var.name);
4897 			if (i < hist_data->n_save_vars - 1)
4898 				seq_puts(m, ",");
4899 		}
4900 	} else if (data->action == ACTION_TRACE) {
4901 		if (data->use_trace_keyword)
4902 			seq_printf(m, "%s", data->synth_event_name);
4903 		for (i = 0; i < data->n_params; i++) {
4904 			if (i || data->use_trace_keyword)
4905 				seq_puts(m, ",");
4906 			seq_printf(m, "%s", data->params[i]);
4907 		}
4908 	}
4909 }
4910 
print_track_data_spec(struct seq_file * m,struct hist_trigger_data * hist_data,struct action_data * data)4911 static void print_track_data_spec(struct seq_file *m,
4912 				  struct hist_trigger_data *hist_data,
4913 				  struct action_data *data)
4914 {
4915 	if (data->handler == HANDLER_ONMAX)
4916 		seq_puts(m, ":onmax(");
4917 	else if (data->handler == HANDLER_ONCHANGE)
4918 		seq_puts(m, ":onchange(");
4919 	seq_printf(m, "%s", data->track_data.var_str);
4920 	seq_printf(m, ").%s(", data->action_name);
4921 
4922 	print_action_spec(m, hist_data, data);
4923 
4924 	seq_puts(m, ")");
4925 }
4926 
print_onmatch_spec(struct seq_file * m,struct hist_trigger_data * hist_data,struct action_data * data)4927 static void print_onmatch_spec(struct seq_file *m,
4928 			       struct hist_trigger_data *hist_data,
4929 			       struct action_data *data)
4930 {
4931 	seq_printf(m, ":onmatch(%s.%s).", data->match_data.event_system,
4932 		   data->match_data.event);
4933 
4934 	seq_printf(m, "%s(", data->action_name);
4935 
4936 	print_action_spec(m, hist_data, data);
4937 
4938 	seq_puts(m, ")");
4939 }
4940 
actions_match(struct hist_trigger_data * hist_data,struct hist_trigger_data * hist_data_test)4941 static bool actions_match(struct hist_trigger_data *hist_data,
4942 			  struct hist_trigger_data *hist_data_test)
4943 {
4944 	unsigned int i, j;
4945 
4946 	if (hist_data->n_actions != hist_data_test->n_actions)
4947 		return false;
4948 
4949 	for (i = 0; i < hist_data->n_actions; i++) {
4950 		struct action_data *data = hist_data->actions[i];
4951 		struct action_data *data_test = hist_data_test->actions[i];
4952 		char *action_name, *action_name_test;
4953 
4954 		if (data->handler != data_test->handler)
4955 			return false;
4956 		if (data->action != data_test->action)
4957 			return false;
4958 
4959 		if (data->n_params != data_test->n_params)
4960 			return false;
4961 
4962 		for (j = 0; j < data->n_params; j++) {
4963 			if (strcmp(data->params[j], data_test->params[j]) != 0)
4964 				return false;
4965 		}
4966 
4967 		if (data->use_trace_keyword)
4968 			action_name = data->synth_event_name;
4969 		else
4970 			action_name = data->action_name;
4971 
4972 		if (data_test->use_trace_keyword)
4973 			action_name_test = data_test->synth_event_name;
4974 		else
4975 			action_name_test = data_test->action_name;
4976 
4977 		if (strcmp(action_name, action_name_test) != 0)
4978 			return false;
4979 
4980 		if (data->handler == HANDLER_ONMATCH) {
4981 			if (strcmp(data->match_data.event_system,
4982 				   data_test->match_data.event_system) != 0)
4983 				return false;
4984 			if (strcmp(data->match_data.event,
4985 				   data_test->match_data.event) != 0)
4986 				return false;
4987 		} else if (data->handler == HANDLER_ONMAX ||
4988 			   data->handler == HANDLER_ONCHANGE) {
4989 			if (strcmp(data->track_data.var_str,
4990 				   data_test->track_data.var_str) != 0)
4991 				return false;
4992 		}
4993 	}
4994 
4995 	return true;
4996 }
4997 
4998 
print_actions_spec(struct seq_file * m,struct hist_trigger_data * hist_data)4999 static void print_actions_spec(struct seq_file *m,
5000 			       struct hist_trigger_data *hist_data)
5001 {
5002 	unsigned int i;
5003 
5004 	for (i = 0; i < hist_data->n_actions; i++) {
5005 		struct action_data *data = hist_data->actions[i];
5006 
5007 		if (data->handler == HANDLER_ONMATCH)
5008 			print_onmatch_spec(m, hist_data, data);
5009 		else if (data->handler == HANDLER_ONMAX ||
5010 			 data->handler == HANDLER_ONCHANGE)
5011 			print_track_data_spec(m, hist_data, data);
5012 	}
5013 }
5014 
destroy_field_var_hists(struct hist_trigger_data * hist_data)5015 static void destroy_field_var_hists(struct hist_trigger_data *hist_data)
5016 {
5017 	unsigned int i;
5018 
5019 	for (i = 0; i < hist_data->n_field_var_hists; i++) {
5020 		kfree(hist_data->field_var_hists[i]->cmd);
5021 		kfree(hist_data->field_var_hists[i]);
5022 	}
5023 }
5024 
destroy_hist_data(struct hist_trigger_data * hist_data)5025 static void destroy_hist_data(struct hist_trigger_data *hist_data)
5026 {
5027 	if (!hist_data)
5028 		return;
5029 
5030 	destroy_hist_trigger_attrs(hist_data->attrs);
5031 	destroy_hist_fields(hist_data);
5032 	tracing_map_destroy(hist_data->map);
5033 
5034 	destroy_actions(hist_data);
5035 	destroy_field_vars(hist_data);
5036 	destroy_field_var_hists(hist_data);
5037 
5038 	kfree(hist_data);
5039 }
5040 
create_tracing_map_fields(struct hist_trigger_data * hist_data)5041 static int create_tracing_map_fields(struct hist_trigger_data *hist_data)
5042 {
5043 	struct tracing_map *map = hist_data->map;
5044 	struct ftrace_event_field *field;
5045 	struct hist_field *hist_field;
5046 	int i, idx = 0;
5047 
5048 	for_each_hist_field(i, hist_data) {
5049 		hist_field = hist_data->fields[i];
5050 		if (hist_field->flags & HIST_FIELD_FL_KEY) {
5051 			tracing_map_cmp_fn_t cmp_fn;
5052 
5053 			field = hist_field->field;
5054 
5055 			if (hist_field->flags & HIST_FIELD_FL_STACKTRACE)
5056 				cmp_fn = tracing_map_cmp_none;
5057 			else if (!field || hist_field->flags & HIST_FIELD_FL_CPU)
5058 				cmp_fn = tracing_map_cmp_num(hist_field->size,
5059 							     hist_field->is_signed);
5060 			else if (is_string_field(field))
5061 				cmp_fn = tracing_map_cmp_string;
5062 			else
5063 				cmp_fn = tracing_map_cmp_num(field->size,
5064 							     field->is_signed);
5065 			idx = tracing_map_add_key_field(map,
5066 							hist_field->offset,
5067 							cmp_fn);
5068 		} else if (!(hist_field->flags & HIST_FIELD_FL_VAR))
5069 			idx = tracing_map_add_sum_field(map);
5070 
5071 		if (idx < 0)
5072 			return idx;
5073 
5074 		if (hist_field->flags & HIST_FIELD_FL_VAR) {
5075 			idx = tracing_map_add_var(map);
5076 			if (idx < 0)
5077 				return idx;
5078 			hist_field->var.idx = idx;
5079 			hist_field->var.hist_data = hist_data;
5080 		}
5081 	}
5082 
5083 	return 0;
5084 }
5085 
5086 static struct hist_trigger_data *
create_hist_data(unsigned int map_bits,struct hist_trigger_attrs * attrs,struct trace_event_file * file,bool remove)5087 create_hist_data(unsigned int map_bits,
5088 		 struct hist_trigger_attrs *attrs,
5089 		 struct trace_event_file *file,
5090 		 bool remove)
5091 {
5092 	const struct tracing_map_ops *map_ops = NULL;
5093 	struct hist_trigger_data *hist_data;
5094 	int ret = 0;
5095 
5096 	hist_data = kzalloc(sizeof(*hist_data), GFP_KERNEL);
5097 	if (!hist_data)
5098 		return ERR_PTR(-ENOMEM);
5099 
5100 	hist_data->attrs = attrs;
5101 	hist_data->remove = remove;
5102 	hist_data->event_file = file;
5103 
5104 	ret = parse_actions(hist_data);
5105 	if (ret)
5106 		goto free;
5107 
5108 	ret = create_hist_fields(hist_data, file);
5109 	if (ret)
5110 		goto free;
5111 
5112 	ret = create_sort_keys(hist_data);
5113 	if (ret)
5114 		goto free;
5115 
5116 	map_ops = &hist_trigger_elt_data_ops;
5117 
5118 	hist_data->map = tracing_map_create(map_bits, hist_data->key_size,
5119 					    map_ops, hist_data);
5120 	if (IS_ERR(hist_data->map)) {
5121 		ret = PTR_ERR(hist_data->map);
5122 		hist_data->map = NULL;
5123 		goto free;
5124 	}
5125 
5126 	ret = create_tracing_map_fields(hist_data);
5127 	if (ret)
5128 		goto free;
5129  out:
5130 	return hist_data;
5131  free:
5132 	hist_data->attrs = NULL;
5133 
5134 	destroy_hist_data(hist_data);
5135 
5136 	hist_data = ERR_PTR(ret);
5137 
5138 	goto out;
5139 }
5140 
hist_trigger_elt_update(struct hist_trigger_data * hist_data,struct tracing_map_elt * elt,struct trace_buffer * buffer,void * rec,struct ring_buffer_event * rbe,u64 * var_ref_vals)5141 static void hist_trigger_elt_update(struct hist_trigger_data *hist_data,
5142 				    struct tracing_map_elt *elt,
5143 				    struct trace_buffer *buffer, void *rec,
5144 				    struct ring_buffer_event *rbe,
5145 				    u64 *var_ref_vals)
5146 {
5147 	struct hist_elt_data *elt_data;
5148 	struct hist_field *hist_field;
5149 	unsigned int i, var_idx;
5150 	u64 hist_val;
5151 
5152 	elt_data = elt->private_data;
5153 	elt_data->var_ref_vals = var_ref_vals;
5154 
5155 	for_each_hist_val_field(i, hist_data) {
5156 		hist_field = hist_data->fields[i];
5157 		hist_val = hist_fn_call(hist_field, elt, buffer, rbe, rec);
5158 		if (hist_field->flags & HIST_FIELD_FL_VAR) {
5159 			var_idx = hist_field->var.idx;
5160 
5161 			if (hist_field->flags &
5162 			    (HIST_FIELD_FL_STRING | HIST_FIELD_FL_STACKTRACE)) {
5163 				unsigned int str_start, var_str_idx, idx;
5164 				char *str, *val_str;
5165 				unsigned int size;
5166 
5167 				str_start = hist_data->n_field_var_str +
5168 					hist_data->n_save_var_str;
5169 				var_str_idx = hist_field->var_str_idx;
5170 				idx = str_start + var_str_idx;
5171 
5172 				str = elt_data->field_var_str[idx];
5173 				val_str = (char *)(uintptr_t)hist_val;
5174 
5175 				if (hist_field->flags & HIST_FIELD_FL_STRING) {
5176 					size = min(hist_field->size, STR_VAR_LEN_MAX);
5177 					strscpy(str, val_str, size);
5178 				} else {
5179 					char *stack_start = str + sizeof(unsigned long);
5180 					int e;
5181 
5182 					e = stack_trace_save((void *)stack_start,
5183 							     HIST_STACKTRACE_DEPTH,
5184 							     HIST_STACKTRACE_SKIP);
5185 					if (e < HIST_STACKTRACE_DEPTH - 1)
5186 						((unsigned long *)stack_start)[e] = 0;
5187 					*((unsigned long *)str) = e;
5188 				}
5189 				hist_val = (u64)(uintptr_t)str;
5190 			}
5191 			tracing_map_set_var(elt, var_idx, hist_val);
5192 			continue;
5193 		}
5194 		tracing_map_update_sum(elt, i, hist_val);
5195 	}
5196 
5197 	for_each_hist_key_field(i, hist_data) {
5198 		hist_field = hist_data->fields[i];
5199 		if (hist_field->flags & HIST_FIELD_FL_VAR) {
5200 			hist_val = hist_fn_call(hist_field, elt, buffer, rbe, rec);
5201 			var_idx = hist_field->var.idx;
5202 			tracing_map_set_var(elt, var_idx, hist_val);
5203 		}
5204 	}
5205 
5206 	update_field_vars(hist_data, elt, buffer, rbe, rec);
5207 }
5208 
add_to_key(char * compound_key,void * key,struct hist_field * key_field,void * rec)5209 static inline void add_to_key(char *compound_key, void *key,
5210 			      struct hist_field *key_field, void *rec)
5211 {
5212 	size_t size = key_field->size;
5213 
5214 	if (key_field->flags & HIST_FIELD_FL_STRING) {
5215 		struct ftrace_event_field *field;
5216 
5217 		field = key_field->field;
5218 		if (field->filter_type == FILTER_DYN_STRING ||
5219 		    field->filter_type == FILTER_RDYN_STRING)
5220 			size = *(u32 *)(rec + field->offset) >> 16;
5221 		else if (field->filter_type == FILTER_STATIC_STRING)
5222 			size = field->size;
5223 
5224 		/* ensure NULL-termination */
5225 		if (size > key_field->size - 1)
5226 			size = key_field->size - 1;
5227 
5228 		strncpy(compound_key + key_field->offset, (char *)key, size);
5229 	} else
5230 		memcpy(compound_key + key_field->offset, key, size);
5231 }
5232 
5233 static void
hist_trigger_actions(struct hist_trigger_data * hist_data,struct tracing_map_elt * elt,struct trace_buffer * buffer,void * rec,struct ring_buffer_event * rbe,void * key,u64 * var_ref_vals)5234 hist_trigger_actions(struct hist_trigger_data *hist_data,
5235 		     struct tracing_map_elt *elt,
5236 		     struct trace_buffer *buffer, void *rec,
5237 		     struct ring_buffer_event *rbe, void *key,
5238 		     u64 *var_ref_vals)
5239 {
5240 	struct action_data *data;
5241 	unsigned int i;
5242 
5243 	for (i = 0; i < hist_data->n_actions; i++) {
5244 		data = hist_data->actions[i];
5245 		data->fn(hist_data, elt, buffer, rec, rbe, key, data, var_ref_vals);
5246 	}
5247 }
5248 
event_hist_trigger(struct event_trigger_data * data,struct trace_buffer * buffer,void * rec,struct ring_buffer_event * rbe)5249 static void event_hist_trigger(struct event_trigger_data *data,
5250 			       struct trace_buffer *buffer, void *rec,
5251 			       struct ring_buffer_event *rbe)
5252 {
5253 	struct hist_trigger_data *hist_data = data->private_data;
5254 	bool use_compound_key = (hist_data->n_keys > 1);
5255 	unsigned long entries[HIST_STACKTRACE_DEPTH];
5256 	u64 var_ref_vals[TRACING_MAP_VARS_MAX];
5257 	char compound_key[HIST_KEY_SIZE_MAX];
5258 	struct tracing_map_elt *elt = NULL;
5259 	struct hist_field *key_field;
5260 	u64 field_contents;
5261 	void *key = NULL;
5262 	unsigned int i;
5263 
5264 	if (unlikely(!rbe))
5265 		return;
5266 
5267 	memset(compound_key, 0, hist_data->key_size);
5268 
5269 	for_each_hist_key_field(i, hist_data) {
5270 		key_field = hist_data->fields[i];
5271 
5272 		if (key_field->flags & HIST_FIELD_FL_STACKTRACE) {
5273 			memset(entries, 0, HIST_STACKTRACE_SIZE);
5274 			if (key_field->field) {
5275 				unsigned long *stack, n_entries;
5276 
5277 				field_contents = hist_fn_call(key_field, elt, buffer, rbe, rec);
5278 				stack = (unsigned long *)(long)field_contents;
5279 				n_entries = *stack;
5280 				memcpy(entries, ++stack, n_entries * sizeof(unsigned long));
5281 			} else {
5282 				stack_trace_save(entries, HIST_STACKTRACE_DEPTH,
5283 						 HIST_STACKTRACE_SKIP);
5284 			}
5285 			key = entries;
5286 		} else {
5287 			field_contents = hist_fn_call(key_field, elt, buffer, rbe, rec);
5288 			if (key_field->flags & HIST_FIELD_FL_STRING) {
5289 				key = (void *)(unsigned long)field_contents;
5290 				use_compound_key = true;
5291 			} else
5292 				key = (void *)&field_contents;
5293 		}
5294 
5295 		if (use_compound_key)
5296 			add_to_key(compound_key, key, key_field, rec);
5297 	}
5298 
5299 	if (use_compound_key)
5300 		key = compound_key;
5301 
5302 	if (hist_data->n_var_refs &&
5303 	    !resolve_var_refs(hist_data, key, var_ref_vals, false))
5304 		return;
5305 
5306 	elt = tracing_map_insert(hist_data->map, key);
5307 	if (!elt)
5308 		return;
5309 
5310 	hist_trigger_elt_update(hist_data, elt, buffer, rec, rbe, var_ref_vals);
5311 
5312 	if (resolve_var_refs(hist_data, key, var_ref_vals, true))
5313 		hist_trigger_actions(hist_data, elt, buffer, rec, rbe, key, var_ref_vals);
5314 }
5315 
hist_trigger_stacktrace_print(struct seq_file * m,unsigned long * stacktrace_entries,unsigned int max_entries)5316 static void hist_trigger_stacktrace_print(struct seq_file *m,
5317 					  unsigned long *stacktrace_entries,
5318 					  unsigned int max_entries)
5319 {
5320 	unsigned int spaces = 8;
5321 	unsigned int i;
5322 
5323 	for (i = 0; i < max_entries; i++) {
5324 		if (!stacktrace_entries[i])
5325 			return;
5326 
5327 		seq_printf(m, "%*c", 1 + spaces, ' ');
5328 		seq_printf(m, "%pS\n", (void*)stacktrace_entries[i]);
5329 	}
5330 }
5331 
hist_trigger_print_key(struct seq_file * m,struct hist_trigger_data * hist_data,void * key,struct tracing_map_elt * elt)5332 static void hist_trigger_print_key(struct seq_file *m,
5333 				   struct hist_trigger_data *hist_data,
5334 				   void *key,
5335 				   struct tracing_map_elt *elt)
5336 {
5337 	struct hist_field *key_field;
5338 	bool multiline = false;
5339 	const char *field_name;
5340 	unsigned int i;
5341 	u64 uval;
5342 
5343 	seq_puts(m, "{ ");
5344 
5345 	for_each_hist_key_field(i, hist_data) {
5346 		key_field = hist_data->fields[i];
5347 
5348 		if (i > hist_data->n_vals)
5349 			seq_puts(m, ", ");
5350 
5351 		field_name = hist_field_name(key_field, 0);
5352 
5353 		if (key_field->flags & HIST_FIELD_FL_HEX) {
5354 			uval = *(u64 *)(key + key_field->offset);
5355 			seq_printf(m, "%s: %llx", field_name, uval);
5356 		} else if (key_field->flags & HIST_FIELD_FL_SYM) {
5357 			uval = *(u64 *)(key + key_field->offset);
5358 			seq_printf(m, "%s: [%llx] %-45ps", field_name,
5359 				   uval, (void *)(uintptr_t)uval);
5360 		} else if (key_field->flags & HIST_FIELD_FL_SYM_OFFSET) {
5361 			uval = *(u64 *)(key + key_field->offset);
5362 			seq_printf(m, "%s: [%llx] %-55pS", field_name,
5363 				   uval, (void *)(uintptr_t)uval);
5364 		} else if (key_field->flags & HIST_FIELD_FL_EXECNAME) {
5365 			struct hist_elt_data *elt_data = elt->private_data;
5366 			char *comm;
5367 
5368 			if (WARN_ON_ONCE(!elt_data))
5369 				return;
5370 
5371 			comm = elt_data->comm;
5372 
5373 			uval = *(u64 *)(key + key_field->offset);
5374 			seq_printf(m, "%s: %-16s[%10llu]", field_name,
5375 				   comm, uval);
5376 		} else if (key_field->flags & HIST_FIELD_FL_SYSCALL) {
5377 			const char *syscall_name;
5378 
5379 			uval = *(u64 *)(key + key_field->offset);
5380 			syscall_name = get_syscall_name(uval);
5381 			if (!syscall_name)
5382 				syscall_name = "unknown_syscall";
5383 
5384 			seq_printf(m, "%s: %-30s[%3llu]", field_name,
5385 				   syscall_name, uval);
5386 		} else if (key_field->flags & HIST_FIELD_FL_STACKTRACE) {
5387 			if (key_field->field)
5388 				seq_printf(m, "%s.stacktrace", key_field->field->name);
5389 			else
5390 				seq_puts(m, "common_stacktrace:\n");
5391 			hist_trigger_stacktrace_print(m,
5392 						      key + key_field->offset,
5393 						      HIST_STACKTRACE_DEPTH);
5394 			multiline = true;
5395 		} else if (key_field->flags & HIST_FIELD_FL_LOG2) {
5396 			seq_printf(m, "%s: ~ 2^%-2llu", field_name,
5397 				   *(u64 *)(key + key_field->offset));
5398 		} else if (key_field->flags & HIST_FIELD_FL_BUCKET) {
5399 			unsigned long buckets = key_field->buckets;
5400 			uval = *(u64 *)(key + key_field->offset);
5401 			seq_printf(m, "%s: ~ %llu-%llu", field_name,
5402 				   uval, uval + buckets -1);
5403 		} else if (key_field->flags & HIST_FIELD_FL_STRING) {
5404 			seq_printf(m, "%s: %-50s", field_name,
5405 				   (char *)(key + key_field->offset));
5406 		} else {
5407 			uval = *(u64 *)(key + key_field->offset);
5408 			seq_printf(m, "%s: %10llu", field_name, uval);
5409 		}
5410 	}
5411 
5412 	if (!multiline)
5413 		seq_puts(m, " ");
5414 
5415 	seq_puts(m, "}");
5416 }
5417 
5418 /* Get the 100 times of the percentage of @val in @total */
__get_percentage(u64 val,u64 total)5419 static inline unsigned int __get_percentage(u64 val, u64 total)
5420 {
5421 	if (!total)
5422 		goto div0;
5423 
5424 	if (val < (U64_MAX / 10000))
5425 		return (unsigned int)div64_ul(val * 10000, total);
5426 
5427 	total = div64_u64(total, 10000);
5428 	if (!total)
5429 		goto div0;
5430 
5431 	return (unsigned int)div64_ul(val, total);
5432 div0:
5433 	return val ? UINT_MAX : 0;
5434 }
5435 
5436 #define BAR_CHAR '#'
5437 
__fill_bar_str(char * buf,int size,u64 val,u64 max)5438 static inline const char *__fill_bar_str(char *buf, int size, u64 val, u64 max)
5439 {
5440 	unsigned int len = __get_percentage(val, max);
5441 	int i;
5442 
5443 	if (len == UINT_MAX) {
5444 		snprintf(buf, size, "[ERROR]");
5445 		return buf;
5446 	}
5447 
5448 	len = len * size / 10000;
5449 	for (i = 0; i < len && i < size; i++)
5450 		buf[i] = BAR_CHAR;
5451 	while (i < size)
5452 		buf[i++] = ' ';
5453 	buf[size] = '\0';
5454 
5455 	return buf;
5456 }
5457 
5458 struct hist_val_stat {
5459 	u64 max;
5460 	u64 total;
5461 };
5462 
hist_trigger_print_val(struct seq_file * m,unsigned int idx,const char * field_name,unsigned long flags,struct hist_val_stat * stats,struct tracing_map_elt * elt)5463 static void hist_trigger_print_val(struct seq_file *m, unsigned int idx,
5464 				   const char *field_name, unsigned long flags,
5465 				   struct hist_val_stat *stats,
5466 				   struct tracing_map_elt *elt)
5467 {
5468 	u64 val = tracing_map_read_sum(elt, idx);
5469 	unsigned int pc;
5470 	char bar[21];
5471 
5472 	if (flags & HIST_FIELD_FL_PERCENT) {
5473 		pc = __get_percentage(val, stats[idx].total);
5474 		if (pc == UINT_MAX)
5475 			seq_printf(m, " %s (%%):[ERROR]", field_name);
5476 		else
5477 			seq_printf(m, " %s (%%): %3u.%02u", field_name,
5478 					pc / 100, pc % 100);
5479 	} else if (flags & HIST_FIELD_FL_GRAPH) {
5480 		seq_printf(m, " %s: %20s", field_name,
5481 			   __fill_bar_str(bar, 20, val, stats[idx].max));
5482 	} else if (flags & HIST_FIELD_FL_HEX) {
5483 		seq_printf(m, " %s: %10llx", field_name, val);
5484 	} else {
5485 		seq_printf(m, " %s: %10llu", field_name, val);
5486 	}
5487 }
5488 
hist_trigger_entry_print(struct seq_file * m,struct hist_trigger_data * hist_data,struct hist_val_stat * stats,void * key,struct tracing_map_elt * elt)5489 static void hist_trigger_entry_print(struct seq_file *m,
5490 				     struct hist_trigger_data *hist_data,
5491 				     struct hist_val_stat *stats,
5492 				     void *key,
5493 				     struct tracing_map_elt *elt)
5494 {
5495 	const char *field_name;
5496 	unsigned int i = HITCOUNT_IDX;
5497 	unsigned long flags;
5498 
5499 	hist_trigger_print_key(m, hist_data, key, elt);
5500 
5501 	/* At first, show the raw hitcount if !nohitcount */
5502 	if (!hist_data->attrs->no_hitcount)
5503 		hist_trigger_print_val(m, i, "hitcount", 0, stats, elt);
5504 
5505 	for (i = 1; i < hist_data->n_vals; i++) {
5506 		field_name = hist_field_name(hist_data->fields[i], 0);
5507 		flags = hist_data->fields[i]->flags;
5508 		if (flags & HIST_FIELD_FL_VAR || flags & HIST_FIELD_FL_EXPR)
5509 			continue;
5510 
5511 		seq_puts(m, " ");
5512 		hist_trigger_print_val(m, i, field_name, flags, stats, elt);
5513 	}
5514 
5515 	print_actions(m, hist_data, elt);
5516 
5517 	seq_puts(m, "\n");
5518 }
5519 
print_entries(struct seq_file * m,struct hist_trigger_data * hist_data)5520 static int print_entries(struct seq_file *m,
5521 			 struct hist_trigger_data *hist_data)
5522 {
5523 	struct tracing_map_sort_entry **sort_entries = NULL;
5524 	struct tracing_map *map = hist_data->map;
5525 	int i, j, n_entries;
5526 	struct hist_val_stat *stats = NULL;
5527 	u64 val;
5528 
5529 	n_entries = tracing_map_sort_entries(map, hist_data->sort_keys,
5530 					     hist_data->n_sort_keys,
5531 					     &sort_entries);
5532 	if (n_entries < 0)
5533 		return n_entries;
5534 
5535 	/* Calculate the max and the total for each field if needed. */
5536 	for (j = 0; j < hist_data->n_vals; j++) {
5537 		if (!(hist_data->fields[j]->flags &
5538 			(HIST_FIELD_FL_PERCENT | HIST_FIELD_FL_GRAPH)))
5539 			continue;
5540 		if (!stats) {
5541 			stats = kcalloc(hist_data->n_vals, sizeof(*stats),
5542 				       GFP_KERNEL);
5543 			if (!stats) {
5544 				n_entries = -ENOMEM;
5545 				goto out;
5546 			}
5547 		}
5548 		for (i = 0; i < n_entries; i++) {
5549 			val = tracing_map_read_sum(sort_entries[i]->elt, j);
5550 			stats[j].total += val;
5551 			if (stats[j].max < val)
5552 				stats[j].max = val;
5553 		}
5554 	}
5555 
5556 	for (i = 0; i < n_entries; i++)
5557 		hist_trigger_entry_print(m, hist_data, stats,
5558 					 sort_entries[i]->key,
5559 					 sort_entries[i]->elt);
5560 
5561 	kfree(stats);
5562 out:
5563 	tracing_map_destroy_sort_entries(sort_entries, n_entries);
5564 
5565 	return n_entries;
5566 }
5567 
hist_trigger_show(struct seq_file * m,struct event_trigger_data * data,int n)5568 static void hist_trigger_show(struct seq_file *m,
5569 			      struct event_trigger_data *data, int n)
5570 {
5571 	struct hist_trigger_data *hist_data;
5572 	int n_entries;
5573 
5574 	if (n > 0)
5575 		seq_puts(m, "\n\n");
5576 
5577 	seq_puts(m, "# event histogram\n#\n# trigger info: ");
5578 	data->ops->print(m, data);
5579 	seq_puts(m, "#\n\n");
5580 
5581 	hist_data = data->private_data;
5582 	n_entries = print_entries(m, hist_data);
5583 	if (n_entries < 0)
5584 		n_entries = 0;
5585 
5586 	track_data_snapshot_print(m, hist_data);
5587 
5588 	seq_printf(m, "\nTotals:\n    Hits: %llu\n    Entries: %u\n    Dropped: %llu\n",
5589 		   (u64)atomic64_read(&hist_data->map->hits),
5590 		   n_entries, (u64)atomic64_read(&hist_data->map->drops));
5591 }
5592 
hist_show(struct seq_file * m,void * v)5593 static int hist_show(struct seq_file *m, void *v)
5594 {
5595 	struct event_trigger_data *data;
5596 	struct trace_event_file *event_file;
5597 	int n = 0, ret = 0;
5598 
5599 	mutex_lock(&event_mutex);
5600 
5601 	event_file = event_file_file(m->private);
5602 	if (unlikely(!event_file)) {
5603 		ret = -ENODEV;
5604 		goto out_unlock;
5605 	}
5606 
5607 	list_for_each_entry(data, &event_file->triggers, list) {
5608 		if (data->cmd_ops->trigger_type == ETT_EVENT_HIST)
5609 			hist_trigger_show(m, data, n++);
5610 	}
5611 
5612  out_unlock:
5613 	mutex_unlock(&event_mutex);
5614 
5615 	return ret;
5616 }
5617 
event_hist_open(struct inode * inode,struct file * file)5618 static int event_hist_open(struct inode *inode, struct file *file)
5619 {
5620 	int ret;
5621 
5622 	ret = tracing_open_file_tr(inode, file);
5623 	if (ret)
5624 		return ret;
5625 
5626 	/* Clear private_data to avoid warning in single_open() */
5627 	file->private_data = NULL;
5628 	return single_open(file, hist_show, file);
5629 }
5630 
5631 const struct file_operations event_hist_fops = {
5632 	.open = event_hist_open,
5633 	.read = seq_read,
5634 	.llseek = seq_lseek,
5635 	.release = tracing_single_release_file_tr,
5636 };
5637 
5638 #ifdef CONFIG_HIST_TRIGGERS_DEBUG
hist_field_debug_show_flags(struct seq_file * m,unsigned long flags)5639 static void hist_field_debug_show_flags(struct seq_file *m,
5640 					unsigned long flags)
5641 {
5642 	seq_puts(m, "      flags:\n");
5643 
5644 	if (flags & HIST_FIELD_FL_KEY)
5645 		seq_puts(m, "        HIST_FIELD_FL_KEY\n");
5646 	else if (flags & HIST_FIELD_FL_HITCOUNT)
5647 		seq_puts(m, "        VAL: HIST_FIELD_FL_HITCOUNT\n");
5648 	else if (flags & HIST_FIELD_FL_VAR)
5649 		seq_puts(m, "        HIST_FIELD_FL_VAR\n");
5650 	else if (flags & HIST_FIELD_FL_VAR_REF)
5651 		seq_puts(m, "        HIST_FIELD_FL_VAR_REF\n");
5652 	else
5653 		seq_puts(m, "        VAL: normal u64 value\n");
5654 
5655 	if (flags & HIST_FIELD_FL_ALIAS)
5656 		seq_puts(m, "        HIST_FIELD_FL_ALIAS\n");
5657 	else if (flags & HIST_FIELD_FL_CONST)
5658 		seq_puts(m, "        HIST_FIELD_FL_CONST\n");
5659 }
5660 
hist_field_debug_show(struct seq_file * m,struct hist_field * field,unsigned long flags)5661 static int hist_field_debug_show(struct seq_file *m,
5662 				 struct hist_field *field, unsigned long flags)
5663 {
5664 	if ((field->flags & flags) != flags) {
5665 		seq_printf(m, "ERROR: bad flags - %lx\n", flags);
5666 		return -EINVAL;
5667 	}
5668 
5669 	hist_field_debug_show_flags(m, field->flags);
5670 	if (field->field)
5671 		seq_printf(m, "      ftrace_event_field name: %s\n",
5672 			   field->field->name);
5673 
5674 	if (field->flags & HIST_FIELD_FL_VAR) {
5675 		seq_printf(m, "      var.name: %s\n", field->var.name);
5676 		seq_printf(m, "      var.idx (into tracing_map_elt.vars[]): %u\n",
5677 			   field->var.idx);
5678 	}
5679 
5680 	if (field->flags & HIST_FIELD_FL_CONST)
5681 		seq_printf(m, "      constant: %llu\n", field->constant);
5682 
5683 	if (field->flags & HIST_FIELD_FL_ALIAS)
5684 		seq_printf(m, "      var_ref_idx (into hist_data->var_refs[]): %u\n",
5685 			   field->var_ref_idx);
5686 
5687 	if (field->flags & HIST_FIELD_FL_VAR_REF) {
5688 		seq_printf(m, "      name: %s\n", field->name);
5689 		seq_printf(m, "      var.idx (into tracing_map_elt.vars[]): %u\n",
5690 			   field->var.idx);
5691 		seq_printf(m, "      var.hist_data: %p\n", field->var.hist_data);
5692 		seq_printf(m, "      var_ref_idx (into hist_data->var_refs[]): %u\n",
5693 			   field->var_ref_idx);
5694 		if (field->system)
5695 			seq_printf(m, "      system: %s\n", field->system);
5696 		if (field->event_name)
5697 			seq_printf(m, "      event_name: %s\n", field->event_name);
5698 	}
5699 
5700 	seq_printf(m, "      type: %s\n", field->type);
5701 	seq_printf(m, "      size: %u\n", field->size);
5702 	seq_printf(m, "      is_signed: %u\n", field->is_signed);
5703 
5704 	return 0;
5705 }
5706 
field_var_debug_show(struct seq_file * m,struct field_var * field_var,unsigned int i,bool save_vars)5707 static int field_var_debug_show(struct seq_file *m,
5708 				struct field_var *field_var, unsigned int i,
5709 				bool save_vars)
5710 {
5711 	const char *vars_name = save_vars ? "save_vars" : "field_vars";
5712 	struct hist_field *field;
5713 	int ret = 0;
5714 
5715 	seq_printf(m, "\n    hist_data->%s[%d]:\n", vars_name, i);
5716 
5717 	field = field_var->var;
5718 
5719 	seq_printf(m, "\n      %s[%d].var:\n", vars_name, i);
5720 
5721 	hist_field_debug_show_flags(m, field->flags);
5722 	seq_printf(m, "      var.name: %s\n", field->var.name);
5723 	seq_printf(m, "      var.idx (into tracing_map_elt.vars[]): %u\n",
5724 		   field->var.idx);
5725 
5726 	field = field_var->val;
5727 
5728 	seq_printf(m, "\n      %s[%d].val:\n", vars_name, i);
5729 	if (field->field)
5730 		seq_printf(m, "      ftrace_event_field name: %s\n",
5731 			   field->field->name);
5732 	else {
5733 		ret = -EINVAL;
5734 		goto out;
5735 	}
5736 
5737 	seq_printf(m, "      type: %s\n", field->type);
5738 	seq_printf(m, "      size: %u\n", field->size);
5739 	seq_printf(m, "      is_signed: %u\n", field->is_signed);
5740 out:
5741 	return ret;
5742 }
5743 
hist_action_debug_show(struct seq_file * m,struct action_data * data,int i)5744 static int hist_action_debug_show(struct seq_file *m,
5745 				  struct action_data *data, int i)
5746 {
5747 	int ret = 0;
5748 
5749 	if (data->handler == HANDLER_ONMAX ||
5750 	    data->handler == HANDLER_ONCHANGE) {
5751 		seq_printf(m, "\n    hist_data->actions[%d].track_data.var_ref:\n", i);
5752 		ret = hist_field_debug_show(m, data->track_data.var_ref,
5753 					    HIST_FIELD_FL_VAR_REF);
5754 		if (ret)
5755 			goto out;
5756 
5757 		seq_printf(m, "\n    hist_data->actions[%d].track_data.track_var:\n", i);
5758 		ret = hist_field_debug_show(m, data->track_data.track_var,
5759 					    HIST_FIELD_FL_VAR);
5760 		if (ret)
5761 			goto out;
5762 	}
5763 
5764 	if (data->handler == HANDLER_ONMATCH) {
5765 		seq_printf(m, "\n    hist_data->actions[%d].match_data.event_system: %s\n",
5766 			   i, data->match_data.event_system);
5767 		seq_printf(m, "    hist_data->actions[%d].match_data.event: %s\n",
5768 			   i, data->match_data.event);
5769 	}
5770 out:
5771 	return ret;
5772 }
5773 
hist_actions_debug_show(struct seq_file * m,struct hist_trigger_data * hist_data)5774 static int hist_actions_debug_show(struct seq_file *m,
5775 				   struct hist_trigger_data *hist_data)
5776 {
5777 	int i, ret = 0;
5778 
5779 	if (hist_data->n_actions)
5780 		seq_puts(m, "\n  action tracking variables (for onmax()/onchange()/onmatch()):\n");
5781 
5782 	for (i = 0; i < hist_data->n_actions; i++) {
5783 		struct action_data *action = hist_data->actions[i];
5784 
5785 		ret = hist_action_debug_show(m, action, i);
5786 		if (ret)
5787 			goto out;
5788 	}
5789 
5790 	if (hist_data->n_save_vars)
5791 		seq_puts(m, "\n  save action variables (save() params):\n");
5792 
5793 	for (i = 0; i < hist_data->n_save_vars; i++) {
5794 		ret = field_var_debug_show(m, hist_data->save_vars[i], i, true);
5795 		if (ret)
5796 			goto out;
5797 	}
5798 out:
5799 	return ret;
5800 }
5801 
hist_trigger_debug_show(struct seq_file * m,struct event_trigger_data * data,int n)5802 static void hist_trigger_debug_show(struct seq_file *m,
5803 				    struct event_trigger_data *data, int n)
5804 {
5805 	struct hist_trigger_data *hist_data;
5806 	int i, ret;
5807 
5808 	if (n > 0)
5809 		seq_puts(m, "\n\n");
5810 
5811 	seq_puts(m, "# event histogram\n#\n# trigger info: ");
5812 	data->ops->print(m, data);
5813 	seq_puts(m, "#\n\n");
5814 
5815 	hist_data = data->private_data;
5816 
5817 	seq_printf(m, "hist_data: %p\n\n", hist_data);
5818 	seq_printf(m, "  n_vals: %u\n", hist_data->n_vals);
5819 	seq_printf(m, "  n_keys: %u\n", hist_data->n_keys);
5820 	seq_printf(m, "  n_fields: %u\n", hist_data->n_fields);
5821 
5822 	seq_puts(m, "\n  val fields:\n\n");
5823 
5824 	seq_puts(m, "    hist_data->fields[0]:\n");
5825 	ret = hist_field_debug_show(m, hist_data->fields[0],
5826 				    HIST_FIELD_FL_HITCOUNT);
5827 	if (ret)
5828 		return;
5829 
5830 	for (i = 1; i < hist_data->n_vals; i++) {
5831 		seq_printf(m, "\n    hist_data->fields[%d]:\n", i);
5832 		ret = hist_field_debug_show(m, hist_data->fields[i], 0);
5833 		if (ret)
5834 			return;
5835 	}
5836 
5837 	seq_puts(m, "\n  key fields:\n");
5838 
5839 	for (i = hist_data->n_vals; i < hist_data->n_fields; i++) {
5840 		seq_printf(m, "\n    hist_data->fields[%d]:\n", i);
5841 		ret = hist_field_debug_show(m, hist_data->fields[i],
5842 					    HIST_FIELD_FL_KEY);
5843 		if (ret)
5844 			return;
5845 	}
5846 
5847 	if (hist_data->n_var_refs)
5848 		seq_puts(m, "\n  variable reference fields:\n");
5849 
5850 	for (i = 0; i < hist_data->n_var_refs; i++) {
5851 		seq_printf(m, "\n    hist_data->var_refs[%d]:\n", i);
5852 		ret = hist_field_debug_show(m, hist_data->var_refs[i],
5853 					    HIST_FIELD_FL_VAR_REF);
5854 		if (ret)
5855 			return;
5856 	}
5857 
5858 	if (hist_data->n_field_vars)
5859 		seq_puts(m, "\n  field variables:\n");
5860 
5861 	for (i = 0; i < hist_data->n_field_vars; i++) {
5862 		ret = field_var_debug_show(m, hist_data->field_vars[i], i, false);
5863 		if (ret)
5864 			return;
5865 	}
5866 
5867 	ret = hist_actions_debug_show(m, hist_data);
5868 	if (ret)
5869 		return;
5870 }
5871 
hist_debug_show(struct seq_file * m,void * v)5872 static int hist_debug_show(struct seq_file *m, void *v)
5873 {
5874 	struct event_trigger_data *data;
5875 	struct trace_event_file *event_file;
5876 	int n = 0, ret = 0;
5877 
5878 	mutex_lock(&event_mutex);
5879 
5880 	event_file = event_file_file(m->private);
5881 	if (unlikely(!event_file)) {
5882 		ret = -ENODEV;
5883 		goto out_unlock;
5884 	}
5885 
5886 	list_for_each_entry(data, &event_file->triggers, list) {
5887 		if (data->cmd_ops->trigger_type == ETT_EVENT_HIST)
5888 			hist_trigger_debug_show(m, data, n++);
5889 	}
5890 
5891  out_unlock:
5892 	mutex_unlock(&event_mutex);
5893 
5894 	return ret;
5895 }
5896 
event_hist_debug_open(struct inode * inode,struct file * file)5897 static int event_hist_debug_open(struct inode *inode, struct file *file)
5898 {
5899 	int ret;
5900 
5901 	ret = tracing_open_file_tr(inode, file);
5902 	if (ret)
5903 		return ret;
5904 
5905 	/* Clear private_data to avoid warning in single_open() */
5906 	file->private_data = NULL;
5907 	return single_open(file, hist_debug_show, file);
5908 }
5909 
5910 const struct file_operations event_hist_debug_fops = {
5911 	.open = event_hist_debug_open,
5912 	.read = seq_read,
5913 	.llseek = seq_lseek,
5914 	.release = tracing_single_release_file_tr,
5915 };
5916 #endif
5917 
hist_field_print(struct seq_file * m,struct hist_field * hist_field)5918 static void hist_field_print(struct seq_file *m, struct hist_field *hist_field)
5919 {
5920 	const char *field_name = hist_field_name(hist_field, 0);
5921 
5922 	if (hist_field->var.name)
5923 		seq_printf(m, "%s=", hist_field->var.name);
5924 
5925 	if (hist_field->flags & HIST_FIELD_FL_CPU)
5926 		seq_puts(m, "common_cpu");
5927 	else if (hist_field->flags & HIST_FIELD_FL_CONST)
5928 		seq_printf(m, "%llu", hist_field->constant);
5929 	else if (field_name) {
5930 		if (hist_field->flags & HIST_FIELD_FL_VAR_REF ||
5931 		    hist_field->flags & HIST_FIELD_FL_ALIAS)
5932 			seq_putc(m, '$');
5933 		seq_printf(m, "%s", field_name);
5934 	} else if (hist_field->flags & HIST_FIELD_FL_TIMESTAMP)
5935 		seq_puts(m, "common_timestamp");
5936 
5937 	if (hist_field->flags) {
5938 		if (!(hist_field->flags & HIST_FIELD_FL_VAR_REF) &&
5939 		    !(hist_field->flags & HIST_FIELD_FL_EXPR) &&
5940 		    !(hist_field->flags & HIST_FIELD_FL_STACKTRACE)) {
5941 			const char *flags = get_hist_field_flags(hist_field);
5942 
5943 			if (flags)
5944 				seq_printf(m, ".%s", flags);
5945 		}
5946 	}
5947 	if (hist_field->buckets)
5948 		seq_printf(m, "=%ld", hist_field->buckets);
5949 }
5950 
event_hist_trigger_print(struct seq_file * m,struct event_trigger_data * data)5951 static int event_hist_trigger_print(struct seq_file *m,
5952 				    struct event_trigger_data *data)
5953 {
5954 	struct hist_trigger_data *hist_data = data->private_data;
5955 	struct hist_field *field;
5956 	bool have_var = false;
5957 	bool show_val = false;
5958 	unsigned int i;
5959 
5960 	seq_puts(m, HIST_PREFIX);
5961 
5962 	if (data->name)
5963 		seq_printf(m, "%s:", data->name);
5964 
5965 	seq_puts(m, "keys=");
5966 
5967 	for_each_hist_key_field(i, hist_data) {
5968 		field = hist_data->fields[i];
5969 
5970 		if (i > hist_data->n_vals)
5971 			seq_puts(m, ",");
5972 
5973 		if (field->flags & HIST_FIELD_FL_STACKTRACE) {
5974 			if (field->field)
5975 				seq_printf(m, "%s.stacktrace", field->field->name);
5976 			else
5977 				seq_puts(m, "common_stacktrace");
5978 		} else
5979 			hist_field_print(m, field);
5980 	}
5981 
5982 	seq_puts(m, ":vals=");
5983 
5984 	for_each_hist_val_field(i, hist_data) {
5985 		field = hist_data->fields[i];
5986 		if (field->flags & HIST_FIELD_FL_VAR) {
5987 			have_var = true;
5988 			continue;
5989 		}
5990 
5991 		if (i == HITCOUNT_IDX) {
5992 			if (hist_data->attrs->no_hitcount)
5993 				continue;
5994 			seq_puts(m, "hitcount");
5995 		} else {
5996 			if (show_val)
5997 				seq_puts(m, ",");
5998 			hist_field_print(m, field);
5999 		}
6000 		show_val = true;
6001 	}
6002 
6003 	if (have_var) {
6004 		unsigned int n = 0;
6005 
6006 		seq_puts(m, ":");
6007 
6008 		for_each_hist_val_field(i, hist_data) {
6009 			field = hist_data->fields[i];
6010 
6011 			if (field->flags & HIST_FIELD_FL_VAR) {
6012 				if (n++)
6013 					seq_puts(m, ",");
6014 				hist_field_print(m, field);
6015 			}
6016 		}
6017 	}
6018 
6019 	seq_puts(m, ":sort=");
6020 
6021 	for (i = 0; i < hist_data->n_sort_keys; i++) {
6022 		struct tracing_map_sort_key *sort_key;
6023 		unsigned int idx, first_key_idx;
6024 
6025 		/* skip VAR vals */
6026 		first_key_idx = hist_data->n_vals - hist_data->n_vars;
6027 
6028 		sort_key = &hist_data->sort_keys[i];
6029 		idx = sort_key->field_idx;
6030 
6031 		if (WARN_ON(idx >= HIST_FIELDS_MAX))
6032 			return -EINVAL;
6033 
6034 		if (i > 0)
6035 			seq_puts(m, ",");
6036 
6037 		if (idx == HITCOUNT_IDX)
6038 			seq_puts(m, "hitcount");
6039 		else {
6040 			if (idx >= first_key_idx)
6041 				idx += hist_data->n_vars;
6042 			hist_field_print(m, hist_data->fields[idx]);
6043 		}
6044 
6045 		if (sort_key->descending)
6046 			seq_puts(m, ".descending");
6047 	}
6048 	seq_printf(m, ":size=%u", (1 << hist_data->map->map_bits));
6049 	if (hist_data->enable_timestamps)
6050 		seq_printf(m, ":clock=%s", hist_data->attrs->clock);
6051 	if (hist_data->attrs->no_hitcount)
6052 		seq_puts(m, ":nohitcount");
6053 
6054 	print_actions_spec(m, hist_data);
6055 
6056 	if (data->filter_str)
6057 		seq_printf(m, " if %s", data->filter_str);
6058 
6059 	if (data->paused)
6060 		seq_puts(m, " [paused]");
6061 	else
6062 		seq_puts(m, " [active]");
6063 
6064 	seq_putc(m, '\n');
6065 
6066 	return 0;
6067 }
6068 
event_hist_trigger_init(struct event_trigger_data * data)6069 static int event_hist_trigger_init(struct event_trigger_data *data)
6070 {
6071 	struct hist_trigger_data *hist_data = data->private_data;
6072 
6073 	if (!data->ref && hist_data->attrs->name)
6074 		save_named_trigger(hist_data->attrs->name, data);
6075 
6076 	data->ref++;
6077 
6078 	return 0;
6079 }
6080 
unregister_field_var_hists(struct hist_trigger_data * hist_data)6081 static void unregister_field_var_hists(struct hist_trigger_data *hist_data)
6082 {
6083 	struct trace_event_file *file;
6084 	unsigned int i;
6085 	char *cmd;
6086 	int ret;
6087 
6088 	for (i = 0; i < hist_data->n_field_var_hists; i++) {
6089 		file = hist_data->field_var_hists[i]->hist_data->event_file;
6090 		cmd = hist_data->field_var_hists[i]->cmd;
6091 		ret = event_hist_trigger_parse(&trigger_hist_cmd, file,
6092 					       "!hist", "hist", cmd);
6093 		WARN_ON_ONCE(ret < 0);
6094 	}
6095 }
6096 
event_hist_trigger_free(struct event_trigger_data * data)6097 static void event_hist_trigger_free(struct event_trigger_data *data)
6098 {
6099 	struct hist_trigger_data *hist_data = data->private_data;
6100 
6101 	if (WARN_ON_ONCE(data->ref <= 0))
6102 		return;
6103 
6104 	data->ref--;
6105 	if (!data->ref) {
6106 		if (data->name)
6107 			del_named_trigger(data);
6108 
6109 		trigger_data_free(data);
6110 
6111 		remove_hist_vars(hist_data);
6112 
6113 		unregister_field_var_hists(hist_data);
6114 
6115 		destroy_hist_data(hist_data);
6116 	}
6117 }
6118 
6119 static struct event_trigger_ops event_hist_trigger_ops = {
6120 	.trigger		= event_hist_trigger,
6121 	.print			= event_hist_trigger_print,
6122 	.init			= event_hist_trigger_init,
6123 	.free			= event_hist_trigger_free,
6124 };
6125 
event_hist_trigger_named_init(struct event_trigger_data * data)6126 static int event_hist_trigger_named_init(struct event_trigger_data *data)
6127 {
6128 	data->ref++;
6129 
6130 	save_named_trigger(data->named_data->name, data);
6131 
6132 	event_hist_trigger_init(data->named_data);
6133 
6134 	return 0;
6135 }
6136 
event_hist_trigger_named_free(struct event_trigger_data * data)6137 static void event_hist_trigger_named_free(struct event_trigger_data *data)
6138 {
6139 	if (WARN_ON_ONCE(data->ref <= 0))
6140 		return;
6141 
6142 	event_hist_trigger_free(data->named_data);
6143 
6144 	data->ref--;
6145 	if (!data->ref) {
6146 		del_named_trigger(data);
6147 		trigger_data_free(data);
6148 	}
6149 }
6150 
6151 static struct event_trigger_ops event_hist_trigger_named_ops = {
6152 	.trigger		= event_hist_trigger,
6153 	.print			= event_hist_trigger_print,
6154 	.init			= event_hist_trigger_named_init,
6155 	.free			= event_hist_trigger_named_free,
6156 };
6157 
event_hist_get_trigger_ops(char * cmd,char * param)6158 static struct event_trigger_ops *event_hist_get_trigger_ops(char *cmd,
6159 							    char *param)
6160 {
6161 	return &event_hist_trigger_ops;
6162 }
6163 
hist_clear(struct event_trigger_data * data)6164 static void hist_clear(struct event_trigger_data *data)
6165 {
6166 	struct hist_trigger_data *hist_data = data->private_data;
6167 
6168 	if (data->name)
6169 		pause_named_trigger(data);
6170 
6171 	tracepoint_synchronize_unregister();
6172 
6173 	tracing_map_clear(hist_data->map);
6174 
6175 	if (data->name)
6176 		unpause_named_trigger(data);
6177 }
6178 
compatible_field(struct ftrace_event_field * field,struct ftrace_event_field * test_field)6179 static bool compatible_field(struct ftrace_event_field *field,
6180 			     struct ftrace_event_field *test_field)
6181 {
6182 	if (field == test_field)
6183 		return true;
6184 	if (field == NULL || test_field == NULL)
6185 		return false;
6186 	if (strcmp(field->name, test_field->name) != 0)
6187 		return false;
6188 	if (strcmp(field->type, test_field->type) != 0)
6189 		return false;
6190 	if (field->size != test_field->size)
6191 		return false;
6192 	if (field->is_signed != test_field->is_signed)
6193 		return false;
6194 
6195 	return true;
6196 }
6197 
hist_trigger_match(struct event_trigger_data * data,struct event_trigger_data * data_test,struct event_trigger_data * named_data,bool ignore_filter)6198 static bool hist_trigger_match(struct event_trigger_data *data,
6199 			       struct event_trigger_data *data_test,
6200 			       struct event_trigger_data *named_data,
6201 			       bool ignore_filter)
6202 {
6203 	struct tracing_map_sort_key *sort_key, *sort_key_test;
6204 	struct hist_trigger_data *hist_data, *hist_data_test;
6205 	struct hist_field *key_field, *key_field_test;
6206 	unsigned int i;
6207 
6208 	if (named_data && (named_data != data_test) &&
6209 	    (named_data != data_test->named_data))
6210 		return false;
6211 
6212 	if (!named_data && is_named_trigger(data_test))
6213 		return false;
6214 
6215 	hist_data = data->private_data;
6216 	hist_data_test = data_test->private_data;
6217 
6218 	if (hist_data->n_vals != hist_data_test->n_vals ||
6219 	    hist_data->n_fields != hist_data_test->n_fields ||
6220 	    hist_data->n_sort_keys != hist_data_test->n_sort_keys)
6221 		return false;
6222 
6223 	if (!ignore_filter) {
6224 		if ((data->filter_str && !data_test->filter_str) ||
6225 		   (!data->filter_str && data_test->filter_str))
6226 			return false;
6227 	}
6228 
6229 	for_each_hist_field(i, hist_data) {
6230 		key_field = hist_data->fields[i];
6231 		key_field_test = hist_data_test->fields[i];
6232 
6233 		if (key_field->flags != key_field_test->flags)
6234 			return false;
6235 		if (!compatible_field(key_field->field, key_field_test->field))
6236 			return false;
6237 		if (key_field->offset != key_field_test->offset)
6238 			return false;
6239 		if (key_field->size != key_field_test->size)
6240 			return false;
6241 		if (key_field->is_signed != key_field_test->is_signed)
6242 			return false;
6243 		if (!!key_field->var.name != !!key_field_test->var.name)
6244 			return false;
6245 		if (key_field->var.name &&
6246 		    strcmp(key_field->var.name, key_field_test->var.name) != 0)
6247 			return false;
6248 	}
6249 
6250 	for (i = 0; i < hist_data->n_sort_keys; i++) {
6251 		sort_key = &hist_data->sort_keys[i];
6252 		sort_key_test = &hist_data_test->sort_keys[i];
6253 
6254 		if (sort_key->field_idx != sort_key_test->field_idx ||
6255 		    sort_key->descending != sort_key_test->descending)
6256 			return false;
6257 	}
6258 
6259 	if (!ignore_filter && data->filter_str &&
6260 	    (strcmp(data->filter_str, data_test->filter_str) != 0))
6261 		return false;
6262 
6263 	if (!actions_match(hist_data, hist_data_test))
6264 		return false;
6265 
6266 	return true;
6267 }
6268 
existing_hist_update_only(char * glob,struct event_trigger_data * data,struct trace_event_file * file)6269 static bool existing_hist_update_only(char *glob,
6270 				      struct event_trigger_data *data,
6271 				      struct trace_event_file *file)
6272 {
6273 	struct hist_trigger_data *hist_data = data->private_data;
6274 	struct event_trigger_data *test, *named_data = NULL;
6275 	bool updated = false;
6276 
6277 	if (!hist_data->attrs->pause && !hist_data->attrs->cont &&
6278 	    !hist_data->attrs->clear)
6279 		goto out;
6280 
6281 	if (hist_data->attrs->name) {
6282 		named_data = find_named_trigger(hist_data->attrs->name);
6283 		if (named_data) {
6284 			if (!hist_trigger_match(data, named_data, named_data,
6285 						true))
6286 				goto out;
6287 		}
6288 	}
6289 
6290 	if (hist_data->attrs->name && !named_data)
6291 		goto out;
6292 
6293 	list_for_each_entry(test, &file->triggers, list) {
6294 		if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
6295 			if (!hist_trigger_match(data, test, named_data, false))
6296 				continue;
6297 			if (hist_data->attrs->pause)
6298 				test->paused = true;
6299 			else if (hist_data->attrs->cont)
6300 				test->paused = false;
6301 			else if (hist_data->attrs->clear)
6302 				hist_clear(test);
6303 			updated = true;
6304 			goto out;
6305 		}
6306 	}
6307  out:
6308 	return updated;
6309 }
6310 
hist_register_trigger(char * glob,struct event_trigger_data * data,struct trace_event_file * file)6311 static int hist_register_trigger(char *glob,
6312 				 struct event_trigger_data *data,
6313 				 struct trace_event_file *file)
6314 {
6315 	struct hist_trigger_data *hist_data = data->private_data;
6316 	struct event_trigger_data *test, *named_data = NULL;
6317 	struct trace_array *tr = file->tr;
6318 	int ret = 0;
6319 
6320 	if (hist_data->attrs->name) {
6321 		named_data = find_named_trigger(hist_data->attrs->name);
6322 		if (named_data) {
6323 			if (!hist_trigger_match(data, named_data, named_data,
6324 						true)) {
6325 				hist_err(tr, HIST_ERR_NAMED_MISMATCH, errpos(hist_data->attrs->name));
6326 				ret = -EINVAL;
6327 				goto out;
6328 			}
6329 		}
6330 	}
6331 
6332 	if (hist_data->attrs->name && !named_data)
6333 		goto new;
6334 
6335 	lockdep_assert_held(&event_mutex);
6336 
6337 	list_for_each_entry(test, &file->triggers, list) {
6338 		if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
6339 			if (hist_trigger_match(data, test, named_data, false)) {
6340 				hist_err(tr, HIST_ERR_TRIGGER_EEXIST, 0);
6341 				ret = -EEXIST;
6342 				goto out;
6343 			}
6344 		}
6345 	}
6346  new:
6347 	if (hist_data->attrs->cont || hist_data->attrs->clear) {
6348 		hist_err(tr, HIST_ERR_TRIGGER_ENOENT_CLEAR, 0);
6349 		ret = -ENOENT;
6350 		goto out;
6351 	}
6352 
6353 	if (hist_data->attrs->pause)
6354 		data->paused = true;
6355 
6356 	if (named_data) {
6357 		data->private_data = named_data->private_data;
6358 		set_named_trigger_data(data, named_data);
6359 		data->ops = &event_hist_trigger_named_ops;
6360 	}
6361 
6362 	if (data->ops->init) {
6363 		ret = data->ops->init(data);
6364 		if (ret < 0)
6365 			goto out;
6366 	}
6367 
6368 	if (hist_data->enable_timestamps) {
6369 		char *clock = hist_data->attrs->clock;
6370 
6371 		ret = tracing_set_clock(file->tr, hist_data->attrs->clock);
6372 		if (ret) {
6373 			hist_err(tr, HIST_ERR_SET_CLOCK_FAIL, errpos(clock));
6374 			goto out;
6375 		}
6376 
6377 		tracing_set_filter_buffering(file->tr, true);
6378 	}
6379 
6380 	if (named_data)
6381 		destroy_hist_data(hist_data);
6382  out:
6383 	return ret;
6384 }
6385 
hist_trigger_enable(struct event_trigger_data * data,struct trace_event_file * file)6386 static int hist_trigger_enable(struct event_trigger_data *data,
6387 			       struct trace_event_file *file)
6388 {
6389 	int ret = 0;
6390 
6391 	list_add_tail_rcu(&data->list, &file->triggers);
6392 
6393 	update_cond_flag(file);
6394 
6395 	if (trace_event_trigger_enable_disable(file, 1) < 0) {
6396 		list_del_rcu(&data->list);
6397 		update_cond_flag(file);
6398 		ret--;
6399 	}
6400 
6401 	return ret;
6402 }
6403 
have_hist_trigger_match(struct event_trigger_data * data,struct trace_event_file * file)6404 static bool have_hist_trigger_match(struct event_trigger_data *data,
6405 				    struct trace_event_file *file)
6406 {
6407 	struct hist_trigger_data *hist_data = data->private_data;
6408 	struct event_trigger_data *test, *named_data = NULL;
6409 	bool match = false;
6410 
6411 	lockdep_assert_held(&event_mutex);
6412 
6413 	if (hist_data->attrs->name)
6414 		named_data = find_named_trigger(hist_data->attrs->name);
6415 
6416 	list_for_each_entry(test, &file->triggers, list) {
6417 		if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
6418 			if (hist_trigger_match(data, test, named_data, false)) {
6419 				match = true;
6420 				break;
6421 			}
6422 		}
6423 	}
6424 
6425 	return match;
6426 }
6427 
hist_trigger_check_refs(struct event_trigger_data * data,struct trace_event_file * file)6428 static bool hist_trigger_check_refs(struct event_trigger_data *data,
6429 				    struct trace_event_file *file)
6430 {
6431 	struct hist_trigger_data *hist_data = data->private_data;
6432 	struct event_trigger_data *test, *named_data = NULL;
6433 
6434 	lockdep_assert_held(&event_mutex);
6435 
6436 	if (hist_data->attrs->name)
6437 		named_data = find_named_trigger(hist_data->attrs->name);
6438 
6439 	list_for_each_entry(test, &file->triggers, list) {
6440 		if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
6441 			if (!hist_trigger_match(data, test, named_data, false))
6442 				continue;
6443 			hist_data = test->private_data;
6444 			if (check_var_refs(hist_data))
6445 				return true;
6446 			break;
6447 		}
6448 	}
6449 
6450 	return false;
6451 }
6452 
hist_unregister_trigger(char * glob,struct event_trigger_data * data,struct trace_event_file * file)6453 static void hist_unregister_trigger(char *glob,
6454 				    struct event_trigger_data *data,
6455 				    struct trace_event_file *file)
6456 {
6457 	struct event_trigger_data *test = NULL, *iter, *named_data = NULL;
6458 	struct hist_trigger_data *hist_data = data->private_data;
6459 
6460 	lockdep_assert_held(&event_mutex);
6461 
6462 	if (hist_data->attrs->name)
6463 		named_data = find_named_trigger(hist_data->attrs->name);
6464 
6465 	list_for_each_entry(iter, &file->triggers, list) {
6466 		if (iter->cmd_ops->trigger_type == ETT_EVENT_HIST) {
6467 			if (!hist_trigger_match(data, iter, named_data, false))
6468 				continue;
6469 			test = iter;
6470 			list_del_rcu(&test->list);
6471 			trace_event_trigger_enable_disable(file, 0);
6472 			update_cond_flag(file);
6473 			break;
6474 		}
6475 	}
6476 
6477 	if (test && test->ops->free)
6478 		test->ops->free(test);
6479 
6480 	if (hist_data->enable_timestamps) {
6481 		if (!hist_data->remove || test)
6482 			tracing_set_filter_buffering(file->tr, false);
6483 	}
6484 }
6485 
hist_file_check_refs(struct trace_event_file * file)6486 static bool hist_file_check_refs(struct trace_event_file *file)
6487 {
6488 	struct hist_trigger_data *hist_data;
6489 	struct event_trigger_data *test;
6490 
6491 	lockdep_assert_held(&event_mutex);
6492 
6493 	list_for_each_entry(test, &file->triggers, list) {
6494 		if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
6495 			hist_data = test->private_data;
6496 			if (check_var_refs(hist_data))
6497 				return true;
6498 		}
6499 	}
6500 
6501 	return false;
6502 }
6503 
hist_unreg_all(struct trace_event_file * file)6504 static void hist_unreg_all(struct trace_event_file *file)
6505 {
6506 	struct event_trigger_data *test, *n;
6507 	struct hist_trigger_data *hist_data;
6508 	struct synth_event *se;
6509 	const char *se_name;
6510 
6511 	lockdep_assert_held(&event_mutex);
6512 
6513 	if (hist_file_check_refs(file))
6514 		return;
6515 
6516 	list_for_each_entry_safe(test, n, &file->triggers, list) {
6517 		if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
6518 			hist_data = test->private_data;
6519 			list_del_rcu(&test->list);
6520 			trace_event_trigger_enable_disable(file, 0);
6521 
6522 			se_name = trace_event_name(file->event_call);
6523 			se = find_synth_event(se_name);
6524 			if (se)
6525 				se->ref--;
6526 
6527 			update_cond_flag(file);
6528 			if (hist_data->enable_timestamps)
6529 				tracing_set_filter_buffering(file->tr, false);
6530 			if (test->ops->free)
6531 				test->ops->free(test);
6532 		}
6533 	}
6534 }
6535 
event_hist_trigger_parse(struct event_command * cmd_ops,struct trace_event_file * file,char * glob,char * cmd,char * param_and_filter)6536 static int event_hist_trigger_parse(struct event_command *cmd_ops,
6537 				    struct trace_event_file *file,
6538 				    char *glob, char *cmd,
6539 				    char *param_and_filter)
6540 {
6541 	unsigned int hist_trigger_bits = TRACING_MAP_BITS_DEFAULT;
6542 	struct event_trigger_data *trigger_data;
6543 	struct hist_trigger_attrs *attrs;
6544 	struct hist_trigger_data *hist_data;
6545 	char *param, *filter, *p, *start;
6546 	struct synth_event *se;
6547 	const char *se_name;
6548 	bool remove;
6549 	int ret = 0;
6550 
6551 	lockdep_assert_held(&event_mutex);
6552 
6553 	if (WARN_ON(!glob))
6554 		return -EINVAL;
6555 
6556 	if (glob[0]) {
6557 		hist_err_clear();
6558 		last_cmd_set(file, param_and_filter);
6559 	}
6560 
6561 	remove = event_trigger_check_remove(glob);
6562 
6563 	if (event_trigger_empty_param(param_and_filter))
6564 		return -EINVAL;
6565 
6566 	/*
6567 	 * separate the trigger from the filter (k:v [if filter])
6568 	 * allowing for whitespace in the trigger
6569 	 */
6570 	p = param = param_and_filter;
6571 	do {
6572 		p = strstr(p, "if");
6573 		if (!p)
6574 			break;
6575 		if (p == param_and_filter)
6576 			return -EINVAL;
6577 		if (*(p - 1) != ' ' && *(p - 1) != '\t') {
6578 			p++;
6579 			continue;
6580 		}
6581 		if (p >= param_and_filter + strlen(param_and_filter) - (sizeof("if") - 1) - 1)
6582 			return -EINVAL;
6583 		if (*(p + sizeof("if") - 1) != ' ' && *(p + sizeof("if") - 1) != '\t') {
6584 			p++;
6585 			continue;
6586 		}
6587 		break;
6588 	} while (1);
6589 
6590 	if (!p)
6591 		filter = NULL;
6592 	else {
6593 		*(p - 1) = '\0';
6594 		filter = strstrip(p);
6595 		param = strstrip(param);
6596 	}
6597 
6598 	/*
6599 	 * To simplify arithmetic expression parsing, replace occurrences of
6600 	 * '.sym-offset' modifier with '.symXoffset'
6601 	 */
6602 	start = strstr(param, ".sym-offset");
6603 	while (start) {
6604 		*(start + 4) = 'X';
6605 		start = strstr(start + 11, ".sym-offset");
6606 	}
6607 
6608 	attrs = parse_hist_trigger_attrs(file->tr, param);
6609 	if (IS_ERR(attrs))
6610 		return PTR_ERR(attrs);
6611 
6612 	if (attrs->map_bits)
6613 		hist_trigger_bits = attrs->map_bits;
6614 
6615 	hist_data = create_hist_data(hist_trigger_bits, attrs, file, remove);
6616 	if (IS_ERR(hist_data)) {
6617 		destroy_hist_trigger_attrs(attrs);
6618 		return PTR_ERR(hist_data);
6619 	}
6620 
6621 	trigger_data = event_trigger_alloc(cmd_ops, cmd, param, hist_data);
6622 	if (!trigger_data) {
6623 		ret = -ENOMEM;
6624 		goto out_free;
6625 	}
6626 
6627 	ret = event_trigger_set_filter(cmd_ops, file, filter, trigger_data);
6628 	if (ret < 0)
6629 		goto out_free;
6630 
6631 	if (remove) {
6632 		if (!have_hist_trigger_match(trigger_data, file))
6633 			goto out_free;
6634 
6635 		if (hist_trigger_check_refs(trigger_data, file)) {
6636 			ret = -EBUSY;
6637 			goto out_free;
6638 		}
6639 
6640 		event_trigger_unregister(cmd_ops, file, glob+1, trigger_data);
6641 		se_name = trace_event_name(file->event_call);
6642 		se = find_synth_event(se_name);
6643 		if (se)
6644 			se->ref--;
6645 		ret = 0;
6646 		goto out_free;
6647 	}
6648 
6649 	if (existing_hist_update_only(glob, trigger_data, file))
6650 		goto out_free;
6651 
6652 	ret = event_trigger_register(cmd_ops, file, glob, trigger_data);
6653 	if (ret < 0)
6654 		goto out_free;
6655 
6656 	if (get_named_trigger_data(trigger_data))
6657 		goto enable;
6658 
6659 	ret = create_actions(hist_data);
6660 	if (ret)
6661 		goto out_unreg;
6662 
6663 	if (has_hist_vars(hist_data) || hist_data->n_var_refs) {
6664 		ret = save_hist_vars(hist_data);
6665 		if (ret)
6666 			goto out_unreg;
6667 	}
6668 
6669 	ret = tracing_map_init(hist_data->map);
6670 	if (ret)
6671 		goto out_unreg;
6672 enable:
6673 	ret = hist_trigger_enable(trigger_data, file);
6674 	if (ret)
6675 		goto out_unreg;
6676 
6677 	se_name = trace_event_name(file->event_call);
6678 	se = find_synth_event(se_name);
6679 	if (se)
6680 		se->ref++;
6681  out:
6682 	if (ret == 0 && glob[0])
6683 		hist_err_clear();
6684 
6685 	return ret;
6686  out_unreg:
6687 	event_trigger_unregister(cmd_ops, file, glob+1, trigger_data);
6688  out_free:
6689 	event_trigger_reset_filter(cmd_ops, trigger_data);
6690 
6691 	remove_hist_vars(hist_data);
6692 
6693 	kfree(trigger_data);
6694 
6695 	destroy_hist_data(hist_data);
6696 	goto out;
6697 }
6698 
6699 static struct event_command trigger_hist_cmd = {
6700 	.name			= "hist",
6701 	.trigger_type		= ETT_EVENT_HIST,
6702 	.flags			= EVENT_CMD_FL_NEEDS_REC,
6703 	.parse			= event_hist_trigger_parse,
6704 	.reg			= hist_register_trigger,
6705 	.unreg			= hist_unregister_trigger,
6706 	.unreg_all		= hist_unreg_all,
6707 	.get_trigger_ops	= event_hist_get_trigger_ops,
6708 	.set_filter		= set_trigger_filter,
6709 };
6710 
register_trigger_hist_cmd(void)6711 __init int register_trigger_hist_cmd(void)
6712 {
6713 	int ret;
6714 
6715 	ret = register_event_command(&trigger_hist_cmd);
6716 	WARN_ON(ret < 0);
6717 
6718 	return ret;
6719 }
6720 
6721 static void
hist_enable_trigger(struct event_trigger_data * data,struct trace_buffer * buffer,void * rec,struct ring_buffer_event * event)6722 hist_enable_trigger(struct event_trigger_data *data,
6723 		    struct trace_buffer *buffer,  void *rec,
6724 		    struct ring_buffer_event *event)
6725 {
6726 	struct enable_trigger_data *enable_data = data->private_data;
6727 	struct event_trigger_data *test;
6728 
6729 	list_for_each_entry_rcu(test, &enable_data->file->triggers, list,
6730 				lockdep_is_held(&event_mutex)) {
6731 		if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
6732 			if (enable_data->enable)
6733 				test->paused = false;
6734 			else
6735 				test->paused = true;
6736 		}
6737 	}
6738 }
6739 
6740 static void
hist_enable_count_trigger(struct event_trigger_data * data,struct trace_buffer * buffer,void * rec,struct ring_buffer_event * event)6741 hist_enable_count_trigger(struct event_trigger_data *data,
6742 			  struct trace_buffer *buffer,  void *rec,
6743 			  struct ring_buffer_event *event)
6744 {
6745 	if (!data->count)
6746 		return;
6747 
6748 	if (data->count != -1)
6749 		(data->count)--;
6750 
6751 	hist_enable_trigger(data, buffer, rec, event);
6752 }
6753 
6754 static struct event_trigger_ops hist_enable_trigger_ops = {
6755 	.trigger		= hist_enable_trigger,
6756 	.print			= event_enable_trigger_print,
6757 	.init			= event_trigger_init,
6758 	.free			= event_enable_trigger_free,
6759 };
6760 
6761 static struct event_trigger_ops hist_enable_count_trigger_ops = {
6762 	.trigger		= hist_enable_count_trigger,
6763 	.print			= event_enable_trigger_print,
6764 	.init			= event_trigger_init,
6765 	.free			= event_enable_trigger_free,
6766 };
6767 
6768 static struct event_trigger_ops hist_disable_trigger_ops = {
6769 	.trigger		= hist_enable_trigger,
6770 	.print			= event_enable_trigger_print,
6771 	.init			= event_trigger_init,
6772 	.free			= event_enable_trigger_free,
6773 };
6774 
6775 static struct event_trigger_ops hist_disable_count_trigger_ops = {
6776 	.trigger		= hist_enable_count_trigger,
6777 	.print			= event_enable_trigger_print,
6778 	.init			= event_trigger_init,
6779 	.free			= event_enable_trigger_free,
6780 };
6781 
6782 static struct event_trigger_ops *
hist_enable_get_trigger_ops(char * cmd,char * param)6783 hist_enable_get_trigger_ops(char *cmd, char *param)
6784 {
6785 	struct event_trigger_ops *ops;
6786 	bool enable;
6787 
6788 	enable = (strcmp(cmd, ENABLE_HIST_STR) == 0);
6789 
6790 	if (enable)
6791 		ops = param ? &hist_enable_count_trigger_ops :
6792 			&hist_enable_trigger_ops;
6793 	else
6794 		ops = param ? &hist_disable_count_trigger_ops :
6795 			&hist_disable_trigger_ops;
6796 
6797 	return ops;
6798 }
6799 
hist_enable_unreg_all(struct trace_event_file * file)6800 static void hist_enable_unreg_all(struct trace_event_file *file)
6801 {
6802 	struct event_trigger_data *test, *n;
6803 
6804 	list_for_each_entry_safe(test, n, &file->triggers, list) {
6805 		if (test->cmd_ops->trigger_type == ETT_HIST_ENABLE) {
6806 			list_del_rcu(&test->list);
6807 			update_cond_flag(file);
6808 			trace_event_trigger_enable_disable(file, 0);
6809 			if (test->ops->free)
6810 				test->ops->free(test);
6811 		}
6812 	}
6813 }
6814 
6815 static struct event_command trigger_hist_enable_cmd = {
6816 	.name			= ENABLE_HIST_STR,
6817 	.trigger_type		= ETT_HIST_ENABLE,
6818 	.parse			= event_enable_trigger_parse,
6819 	.reg			= event_enable_register_trigger,
6820 	.unreg			= event_enable_unregister_trigger,
6821 	.unreg_all		= hist_enable_unreg_all,
6822 	.get_trigger_ops	= hist_enable_get_trigger_ops,
6823 	.set_filter		= set_trigger_filter,
6824 };
6825 
6826 static struct event_command trigger_hist_disable_cmd = {
6827 	.name			= DISABLE_HIST_STR,
6828 	.trigger_type		= ETT_HIST_ENABLE,
6829 	.parse			= event_enable_trigger_parse,
6830 	.reg			= event_enable_register_trigger,
6831 	.unreg			= event_enable_unregister_trigger,
6832 	.unreg_all		= hist_enable_unreg_all,
6833 	.get_trigger_ops	= hist_enable_get_trigger_ops,
6834 	.set_filter		= set_trigger_filter,
6835 };
6836 
unregister_trigger_hist_enable_disable_cmds(void)6837 static __init void unregister_trigger_hist_enable_disable_cmds(void)
6838 {
6839 	unregister_event_command(&trigger_hist_enable_cmd);
6840 	unregister_event_command(&trigger_hist_disable_cmd);
6841 }
6842 
register_trigger_hist_enable_disable_cmds(void)6843 __init int register_trigger_hist_enable_disable_cmds(void)
6844 {
6845 	int ret;
6846 
6847 	ret = register_event_command(&trigger_hist_enable_cmd);
6848 	if (WARN_ON(ret < 0))
6849 		return ret;
6850 	ret = register_event_command(&trigger_hist_disable_cmd);
6851 	if (WARN_ON(ret < 0))
6852 		unregister_trigger_hist_enable_disable_cmds();
6853 
6854 	return ret;
6855 }
6856