xref: /linux/kernel/trace/trace_events_synth.c (revision b83a8ff87a0c10c1d86eb7f96e14009d91fae024)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * trace_events_synth - synthetic trace events
4  *
5  * Copyright (C) 2015, 2020 Tom Zanussi <tom.zanussi@linux.intel.com>
6  */
7 
8 #include <linux/module.h>
9 #include <linux/kallsyms.h>
10 #include <linux/security.h>
11 #include <linux/mutex.h>
12 #include <linux/slab.h>
13 #include <linux/stacktrace.h>
14 #include <linux/rculist.h>
15 #include <linux/tracefs.h>
16 
17 /* for gfp flag names */
18 #include <linux/trace_events.h>
19 #include <trace/events/mmflags.h>
20 #include "trace_probe.h"
21 #include "trace_probe_kernel.h"
22 
23 #include "trace_synth.h"
24 
25 #undef ERRORS
26 #define ERRORS	\
27 	C(BAD_NAME,		"Illegal name"),		\
28 	C(INVALID_CMD,		"Command must be of the form: <name> field[;field] ..."),\
29 	C(INVALID_DYN_CMD,	"Command must be of the form: s or -:[synthetic/]<name> field[;field] ..."),\
30 	C(EVENT_EXISTS,		"Event already exists"),	\
31 	C(TOO_MANY_FIELDS,	"Too many fields"),		\
32 	C(INCOMPLETE_TYPE,	"Incomplete type"),		\
33 	C(INVALID_TYPE,		"Invalid type"),		\
34 	C(INVALID_FIELD,        "Invalid field"),		\
35 	C(INVALID_ARRAY_SPEC,	"Invalid array specification"),
36 
37 #undef C
38 #define C(a, b)		SYNTH_ERR_##a
39 
40 enum { ERRORS };
41 
42 #undef C
43 #define C(a, b)		b
44 
45 static const char *err_text[] = { ERRORS };
46 
47 static DEFINE_MUTEX(lastcmd_mutex);
48 static char *last_cmd;
49 
errpos(const char * str)50 static int errpos(const char *str)
51 {
52 	guard(mutex)(&lastcmd_mutex);
53 	if (!str || !last_cmd)
54 		return 0;
55 
56 	return err_pos(last_cmd, str);
57 }
58 
last_cmd_set(const char * str)59 static void last_cmd_set(const char *str)
60 {
61 	if (!str)
62 		return;
63 
64 	mutex_lock(&lastcmd_mutex);
65 	kfree(last_cmd);
66 	last_cmd = kstrdup(str, GFP_KERNEL);
67 	mutex_unlock(&lastcmd_mutex);
68 }
69 
synth_err(u8 err_type,u16 err_pos)70 static void synth_err(u8 err_type, u16 err_pos)
71 {
72 	guard(mutex)(&lastcmd_mutex);
73 	if (!last_cmd)
74 		return;
75 
76 	tracing_log_err(NULL, "synthetic_events", last_cmd, err_text,
77 			err_type, err_pos);
78 }
79 
80 static int create_synth_event(const char *raw_command);
81 static int synth_event_show(struct seq_file *m, struct dyn_event *ev);
82 static int synth_event_release(struct dyn_event *ev);
83 static bool synth_event_is_busy(struct dyn_event *ev);
84 static bool synth_event_match(const char *system, const char *event,
85 			int argc, const char **argv, struct dyn_event *ev);
86 
87 static struct dyn_event_operations synth_event_ops = {
88 	.create = create_synth_event,
89 	.show = synth_event_show,
90 	.is_busy = synth_event_is_busy,
91 	.free = synth_event_release,
92 	.match = synth_event_match,
93 };
94 
is_synth_event(struct dyn_event * ev)95 static bool is_synth_event(struct dyn_event *ev)
96 {
97 	return ev->ops == &synth_event_ops;
98 }
99 
to_synth_event(struct dyn_event * ev)100 static struct synth_event *to_synth_event(struct dyn_event *ev)
101 {
102 	return container_of(ev, struct synth_event, devent);
103 }
104 
synth_event_is_busy(struct dyn_event * ev)105 static bool synth_event_is_busy(struct dyn_event *ev)
106 {
107 	struct synth_event *event = to_synth_event(ev);
108 
109 	return event->ref != 0;
110 }
111 
synth_event_match(const char * system,const char * event,int argc,const char ** argv,struct dyn_event * ev)112 static bool synth_event_match(const char *system, const char *event,
113 			int argc, const char **argv, struct dyn_event *ev)
114 {
115 	struct synth_event *sev = to_synth_event(ev);
116 
117 	return strcmp(sev->name, event) == 0 &&
118 		(!system || strcmp(system, SYNTH_SYSTEM) == 0);
119 }
120 
121 struct synth_trace_event {
122 	struct trace_entry	ent;
123 	union trace_synth_field	fields[];
124 };
125 
synth_event_define_fields(struct trace_event_call * call)126 static int synth_event_define_fields(struct trace_event_call *call)
127 {
128 	struct synth_trace_event trace;
129 	int offset = offsetof(typeof(trace), fields);
130 	struct synth_event *event = call->data;
131 	unsigned int i, size, n_u64;
132 	char *name, *type;
133 	int filter_type;
134 	bool is_signed;
135 	bool is_stack;
136 	int ret = 0;
137 
138 	for (i = 0, n_u64 = 0; i < event->n_fields; i++) {
139 		size = event->fields[i]->size;
140 		is_signed = event->fields[i]->is_signed;
141 		type = event->fields[i]->type;
142 		name = event->fields[i]->name;
143 		is_stack = event->fields[i]->is_stack;
144 
145 		filter_type = is_stack ? FILTER_STACKTRACE : FILTER_OTHER;
146 
147 		ret = trace_define_field(call, type, name, offset, size,
148 					 is_signed, filter_type);
149 		if (ret)
150 			break;
151 
152 		event->fields[i]->offset = n_u64;
153 
154 		if (event->fields[i]->is_string && !event->fields[i]->is_dynamic) {
155 			offset += STR_VAR_LEN_MAX;
156 			n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
157 		} else {
158 			offset += sizeof(u64);
159 			n_u64++;
160 		}
161 	}
162 
163 	event->n_u64 = n_u64;
164 
165 	return ret;
166 }
167 
synth_field_signed(char * type)168 static bool synth_field_signed(char *type)
169 {
170 	if (str_has_prefix(type, "u"))
171 		return false;
172 	if (strcmp(type, "gfp_t") == 0)
173 		return false;
174 
175 	return true;
176 }
177 
synth_field_is_string(char * type)178 static int synth_field_is_string(char *type)
179 {
180 	if (strstr(type, "char[") != NULL)
181 		return true;
182 
183 	return false;
184 }
185 
synth_field_is_stack(char * type)186 static int synth_field_is_stack(char *type)
187 {
188 	if (strstr(type, "long[") != NULL)
189 		return true;
190 
191 	return false;
192 }
193 
synth_field_string_size(char * type)194 static int synth_field_string_size(char *type)
195 {
196 	char buf[4], *end, *start;
197 	unsigned int len;
198 	int size, err;
199 
200 	start = strstr(type, "char[");
201 	if (start == NULL)
202 		return -EINVAL;
203 	start += sizeof("char[") - 1;
204 
205 	end = strchr(type, ']');
206 	if (!end || end < start || type + strlen(type) > end + 1)
207 		return -EINVAL;
208 
209 	len = end - start;
210 	if (len > 3)
211 		return -EINVAL;
212 
213 	if (len == 0)
214 		return 0; /* variable-length string */
215 
216 	memcpy(buf, start, len);
217 	buf[len] = '\0';
218 
219 	err = kstrtouint(buf, 0, &size);
220 	if (err)
221 		return err;
222 
223 	if (size > STR_VAR_LEN_MAX)
224 		return -EINVAL;
225 
226 	return size;
227 }
228 
synth_field_size(char * type)229 static int synth_field_size(char *type)
230 {
231 	int size = 0;
232 
233 	if (strcmp(type, "s64") == 0)
234 		size = sizeof(s64);
235 	else if (strcmp(type, "u64") == 0)
236 		size = sizeof(u64);
237 	else if (strcmp(type, "s32") == 0)
238 		size = sizeof(s32);
239 	else if (strcmp(type, "u32") == 0)
240 		size = sizeof(u32);
241 	else if (strcmp(type, "s16") == 0)
242 		size = sizeof(s16);
243 	else if (strcmp(type, "u16") == 0)
244 		size = sizeof(u16);
245 	else if (strcmp(type, "s8") == 0)
246 		size = sizeof(s8);
247 	else if (strcmp(type, "u8") == 0)
248 		size = sizeof(u8);
249 	else if (strcmp(type, "char") == 0)
250 		size = sizeof(char);
251 	else if (strcmp(type, "unsigned char") == 0)
252 		size = sizeof(unsigned char);
253 	else if (strcmp(type, "int") == 0)
254 		size = sizeof(int);
255 	else if (strcmp(type, "unsigned int") == 0)
256 		size = sizeof(unsigned int);
257 	else if (strcmp(type, "long") == 0)
258 		size = sizeof(long);
259 	else if (strcmp(type, "unsigned long") == 0)
260 		size = sizeof(unsigned long);
261 	else if (strcmp(type, "bool") == 0)
262 		size = sizeof(bool);
263 	else if (strcmp(type, "pid_t") == 0)
264 		size = sizeof(pid_t);
265 	else if (strcmp(type, "gfp_t") == 0)
266 		size = sizeof(gfp_t);
267 	else if (synth_field_is_string(type))
268 		size = synth_field_string_size(type);
269 	else if (synth_field_is_stack(type))
270 		size = 0;
271 
272 	return size;
273 }
274 
synth_field_fmt(char * type)275 static const char *synth_field_fmt(char *type)
276 {
277 	const char *fmt = "%llu";
278 
279 	if (strcmp(type, "s64") == 0)
280 		fmt = "%lld";
281 	else if (strcmp(type, "u64") == 0)
282 		fmt = "%llu";
283 	else if (strcmp(type, "s32") == 0)
284 		fmt = "%d";
285 	else if (strcmp(type, "u32") == 0)
286 		fmt = "%u";
287 	else if (strcmp(type, "s16") == 0)
288 		fmt = "%d";
289 	else if (strcmp(type, "u16") == 0)
290 		fmt = "%u";
291 	else if (strcmp(type, "s8") == 0)
292 		fmt = "%d";
293 	else if (strcmp(type, "u8") == 0)
294 		fmt = "%u";
295 	else if (strcmp(type, "char") == 0)
296 		fmt = "%d";
297 	else if (strcmp(type, "unsigned char") == 0)
298 		fmt = "%u";
299 	else if (strcmp(type, "int") == 0)
300 		fmt = "%d";
301 	else if (strcmp(type, "unsigned int") == 0)
302 		fmt = "%u";
303 	else if (strcmp(type, "long") == 0)
304 		fmt = "%ld";
305 	else if (strcmp(type, "unsigned long") == 0)
306 		fmt = "%lu";
307 	else if (strcmp(type, "bool") == 0)
308 		fmt = "%d";
309 	else if (strcmp(type, "pid_t") == 0)
310 		fmt = "%d";
311 	else if (strcmp(type, "gfp_t") == 0)
312 		fmt = "%x";
313 	else if (synth_field_is_string(type))
314 		fmt = "%s";
315 	else if (synth_field_is_stack(type))
316 		fmt = "%s";
317 
318 	return fmt;
319 }
320 
print_synth_event_num_val(struct trace_seq * s,char * print_fmt,char * name,int size,union trace_synth_field * val,char * space)321 static void print_synth_event_num_val(struct trace_seq *s,
322 				      char *print_fmt, char *name,
323 				      int size, union trace_synth_field *val, char *space)
324 {
325 	switch (size) {
326 	case 1:
327 		trace_seq_printf(s, print_fmt, name, val->as_u8, space);
328 		break;
329 
330 	case 2:
331 		trace_seq_printf(s, print_fmt, name, val->as_u16, space);
332 		break;
333 
334 	case 4:
335 		trace_seq_printf(s, print_fmt, name, val->as_u32, space);
336 		break;
337 
338 	default:
339 		trace_seq_printf(s, print_fmt, name, val->as_u64, space);
340 		break;
341 	}
342 }
343 
print_synth_event(struct trace_iterator * iter,int flags,struct trace_event * event)344 static enum print_line_t print_synth_event(struct trace_iterator *iter,
345 					   int flags,
346 					   struct trace_event *event)
347 {
348 	struct trace_array *tr = iter->tr;
349 	struct trace_seq *s = &iter->seq;
350 	struct synth_trace_event *entry;
351 	struct synth_event *se;
352 	unsigned int i, j, n_u64;
353 	char print_fmt[32];
354 	const char *fmt;
355 
356 	entry = (struct synth_trace_event *)iter->ent;
357 	se = container_of(event, struct synth_event, call.event);
358 
359 	trace_seq_printf(s, "%s: ", se->name);
360 
361 	for (i = 0, n_u64 = 0; i < se->n_fields; i++) {
362 		if (trace_seq_has_overflowed(s))
363 			goto end;
364 
365 		fmt = synth_field_fmt(se->fields[i]->type);
366 
367 		/* parameter types */
368 		if (tr && tr->trace_flags & TRACE_ITER(VERBOSE))
369 			trace_seq_printf(s, "%s ", fmt);
370 
371 		snprintf(print_fmt, sizeof(print_fmt), "%%s=%s%%s", fmt);
372 
373 		/* parameter values */
374 		if (se->fields[i]->is_string) {
375 			if (se->fields[i]->is_dynamic) {
376 				union trace_synth_field *data = &entry->fields[n_u64];
377 
378 				trace_seq_printf(s, print_fmt, se->fields[i]->name,
379 						 (char *)entry + data->as_dynamic.offset,
380 						 i == se->n_fields - 1 ? "" : " ");
381 				n_u64++;
382 			} else {
383 				trace_seq_printf(s, print_fmt, se->fields[i]->name,
384 						 (char *)&entry->fields[n_u64].as_u64,
385 						 i == se->n_fields - 1 ? "" : " ");
386 				n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
387 			}
388 		} else if (se->fields[i]->is_stack) {
389 			union trace_synth_field *data = &entry->fields[n_u64];
390 			unsigned long *p = (void *)entry + data->as_dynamic.offset;
391 
392 			trace_seq_printf(s, "%s=STACK:\n", se->fields[i]->name);
393 			for (j = 1; j < data->as_dynamic.len / sizeof(long); j++)
394 				trace_seq_printf(s, "=> %pS\n", (void *)p[j]);
395 			n_u64++;
396 		} else {
397 			struct trace_print_flags __flags[] = {
398 			    __def_gfpflag_names, {-1, NULL} };
399 			char *space = (i == se->n_fields - 1 ? "" : " ");
400 
401 			print_synth_event_num_val(s, print_fmt,
402 						  se->fields[i]->name,
403 						  se->fields[i]->size,
404 						  &entry->fields[n_u64],
405 						  space);
406 
407 			if (strcmp(se->fields[i]->type, "gfp_t") == 0) {
408 				trace_seq_puts(s, " (");
409 				trace_print_flags_seq(s, "|",
410 						      entry->fields[n_u64].as_u64,
411 						      __flags);
412 				trace_seq_putc(s, ')');
413 			}
414 			n_u64++;
415 		}
416 	}
417 end:
418 	trace_seq_putc(s, '\n');
419 
420 	return trace_handle_return(s);
421 }
422 
423 static struct trace_event_functions synth_event_funcs = {
424 	.trace		= print_synth_event
425 };
426 
trace_string(struct synth_trace_event * entry,struct synth_event * event,char * str_val,bool is_dynamic,unsigned int data_size,unsigned int * n_u64)427 static unsigned int trace_string(struct synth_trace_event *entry,
428 				 struct synth_event *event,
429 				 char *str_val,
430 				 bool is_dynamic,
431 				 unsigned int data_size,
432 				 unsigned int *n_u64)
433 {
434 	unsigned int len = 0;
435 	char *str_field;
436 	int ret;
437 
438 	if (is_dynamic) {
439 		union trace_synth_field *data = &entry->fields[*n_u64];
440 
441 		len = fetch_store_strlen((unsigned long)str_val);
442 		data->as_dynamic.offset = struct_size(entry, fields, event->n_u64) + data_size;
443 		data->as_dynamic.len = len;
444 
445 		ret = fetch_store_string((unsigned long)str_val, &entry->fields[*n_u64], entry);
446 
447 		(*n_u64)++;
448 	} else {
449 		str_field = (char *)&entry->fields[*n_u64].as_u64;
450 
451 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
452 		if ((unsigned long)str_val < TASK_SIZE)
453 			ret = strncpy_from_user_nofault(str_field, (const void __user *)str_val, STR_VAR_LEN_MAX);
454 		else
455 #endif
456 			ret = strncpy_from_kernel_nofault(str_field, str_val, STR_VAR_LEN_MAX);
457 
458 		if (ret < 0)
459 			strcpy(str_field, FAULT_STRING);
460 
461 		(*n_u64) += STR_VAR_LEN_MAX / sizeof(u64);
462 	}
463 
464 	return len;
465 }
466 
trace_stack(struct synth_trace_event * entry,struct synth_event * event,long * stack,unsigned int data_size,unsigned int * n_u64)467 static unsigned int trace_stack(struct synth_trace_event *entry,
468 				 struct synth_event *event,
469 				 long *stack,
470 				 unsigned int data_size,
471 				 unsigned int *n_u64)
472 {
473 	union trace_synth_field *data = &entry->fields[*n_u64];
474 	unsigned int len;
475 	u32 data_offset;
476 	void *data_loc;
477 
478 	data_offset = struct_size(entry, fields, event->n_u64);
479 	data_offset += data_size;
480 
481 	for (len = 0; len < HIST_STACKTRACE_DEPTH; len++) {
482 		if (!stack[len])
483 			break;
484 	}
485 
486 	len *= sizeof(long);
487 
488 	/* Find the dynamic section to copy the stack into. */
489 	data_loc = (void *)entry + data_offset;
490 	memcpy(data_loc, stack, len);
491 
492 	/* Fill in the field that holds the offset/len combo */
493 
494 	data->as_dynamic.offset = data_offset;
495 	data->as_dynamic.len = len;
496 
497 	(*n_u64)++;
498 
499 	return len;
500 }
501 
trace_event_raw_event_synth(void * __data,u64 * var_ref_vals,unsigned int * var_ref_idx)502 static notrace void trace_event_raw_event_synth(void *__data,
503 						u64 *var_ref_vals,
504 						unsigned int *var_ref_idx)
505 {
506 	unsigned int i, n_u64, val_idx, len, data_size = 0;
507 	struct trace_event_file *trace_file = __data;
508 	struct synth_trace_event *entry;
509 	struct trace_event_buffer fbuffer;
510 	struct trace_buffer *buffer;
511 	struct synth_event *event;
512 	int fields_size = 0;
513 
514 	event = trace_file->event_call->data;
515 
516 	if (trace_trigger_soft_disabled(trace_file))
517 		return;
518 
519 	fields_size = event->n_u64 * sizeof(u64);
520 
521 	for (i = 0; i < event->n_dynamic_fields; i++) {
522 		unsigned int field_pos = event->dynamic_fields[i]->field_pos;
523 		char *str_val;
524 
525 		val_idx = var_ref_idx[field_pos];
526 		str_val = (char *)(long)var_ref_vals[val_idx];
527 
528 		if (event->dynamic_fields[i]->is_stack) {
529 			/* reserve one extra element for size */
530 			len = *((unsigned long *)str_val) + 1;
531 			len *= sizeof(unsigned long);
532 		} else {
533 			len = fetch_store_strlen((unsigned long)str_val);
534 		}
535 
536 		fields_size += len;
537 	}
538 
539 	/*
540 	 * Avoid ring buffer recursion detection, as this event
541 	 * is being performed within another event.
542 	 */
543 	buffer = trace_file->tr->array_buffer.buffer;
544 	guard(ring_buffer_nest)(buffer);
545 
546 	entry = trace_event_buffer_reserve(&fbuffer, trace_file,
547 					   sizeof(*entry) + fields_size);
548 	if (!entry)
549 		return;
550 
551 	for (i = 0, n_u64 = 0; i < event->n_fields; i++) {
552 		val_idx = var_ref_idx[i];
553 		if (event->fields[i]->is_string) {
554 			char *str_val = (char *)(long)var_ref_vals[val_idx];
555 
556 			len = trace_string(entry, event, str_val,
557 					   event->fields[i]->is_dynamic,
558 					   data_size, &n_u64);
559 			data_size += len; /* only dynamic string increments */
560 		} else if (event->fields[i]->is_stack) {
561 			long *stack = (long *)(long)var_ref_vals[val_idx];
562 
563 			len = trace_stack(entry, event, stack,
564 					   data_size, &n_u64);
565 			data_size += len;
566 		} else {
567 			struct synth_field *field = event->fields[i];
568 			u64 val = var_ref_vals[val_idx];
569 
570 			switch (field->size) {
571 			case 1:
572 				entry->fields[n_u64].as_u8 = (u8)val;
573 				break;
574 
575 			case 2:
576 				entry->fields[n_u64].as_u16 = (u16)val;
577 				break;
578 
579 			case 4:
580 				entry->fields[n_u64].as_u32 = (u32)val;
581 				break;
582 
583 			default:
584 				entry->fields[n_u64].as_u64 = val;
585 				break;
586 			}
587 			n_u64++;
588 		}
589 	}
590 
591 	trace_event_buffer_commit(&fbuffer);
592 }
593 
free_synth_event_print_fmt(struct trace_event_call * call)594 static void free_synth_event_print_fmt(struct trace_event_call *call)
595 {
596 	if (call) {
597 		kfree(call->print_fmt);
598 		call->print_fmt = NULL;
599 	}
600 }
601 
__set_synth_event_print_fmt(struct synth_event * event,char * buf,int len)602 static int __set_synth_event_print_fmt(struct synth_event *event,
603 				       char *buf, int len)
604 {
605 	const char *fmt;
606 	int pos = 0;
607 	int i;
608 
609 	/* When len=0, we just calculate the needed length */
610 #define LEN_OR_ZERO (len ? len - pos : 0)
611 
612 	pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
613 	for (i = 0; i < event->n_fields; i++) {
614 		fmt = synth_field_fmt(event->fields[i]->type);
615 		pos += snprintf(buf + pos, LEN_OR_ZERO, "%s=%s%s",
616 				event->fields[i]->name, fmt,
617 				i == event->n_fields - 1 ? "" : " ");
618 	}
619 	pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
620 
621 	for (i = 0; i < event->n_fields; i++) {
622 		if (event->fields[i]->is_string &&
623 		    event->fields[i]->is_dynamic)
624 			pos += snprintf(buf + pos, LEN_OR_ZERO,
625 				", __get_str(%s)", event->fields[i]->name);
626 		else if (event->fields[i]->is_stack)
627 			pos += snprintf(buf + pos, LEN_OR_ZERO,
628 				", __get_stacktrace(%s)", event->fields[i]->name);
629 		else
630 			pos += snprintf(buf + pos, LEN_OR_ZERO,
631 					", REC->%s", event->fields[i]->name);
632 	}
633 
634 #undef LEN_OR_ZERO
635 
636 	/* return the length of print_fmt */
637 	return pos;
638 }
639 
set_synth_event_print_fmt(struct trace_event_call * call)640 static int set_synth_event_print_fmt(struct trace_event_call *call)
641 {
642 	struct synth_event *event = call->data;
643 	char *print_fmt;
644 	int len;
645 
646 	/* First: called with 0 length to calculate the needed length */
647 	len = __set_synth_event_print_fmt(event, NULL, 0);
648 
649 	print_fmt = kmalloc(len + 1, GFP_KERNEL);
650 	if (!print_fmt)
651 		return -ENOMEM;
652 
653 	/* Second: actually write the @print_fmt */
654 	__set_synth_event_print_fmt(event, print_fmt, len + 1);
655 	call->print_fmt = print_fmt;
656 
657 	return 0;
658 }
659 
free_synth_field(struct synth_field * field)660 static void free_synth_field(struct synth_field *field)
661 {
662 	kfree(field->type);
663 	kfree(field->name);
664 	kfree(field);
665 }
666 
check_field_version(const char * prefix,const char * field_type,const char * field_name)667 static int check_field_version(const char *prefix, const char *field_type,
668 			       const char *field_name)
669 {
670 	/*
671 	 * For backward compatibility, the old synthetic event command
672 	 * format did not require semicolons, and in order to not
673 	 * break user space, that old format must still work. If a new
674 	 * feature is added, then the format that uses the new feature
675 	 * will be required to have semicolons, as nothing that uses
676 	 * the old format would be using the new, yet to be created,
677 	 * feature. When a new feature is added, this will detect it,
678 	 * and return a number greater than 1, and require the format
679 	 * to use semicolons.
680 	 */
681 	return 1;
682 }
683 
parse_synth_field(int argc,char ** argv,int * consumed,int * field_version)684 static struct synth_field *parse_synth_field(int argc, char **argv,
685 					     int *consumed, int *field_version)
686 {
687 	const char *prefix = NULL, *field_type = argv[0], *field_name, *array;
688 	struct synth_field *field;
689 	int len, ret = -ENOMEM;
690 	struct seq_buf s;
691 	ssize_t size;
692 
693 	if (!strcmp(field_type, "unsigned")) {
694 		if (argc < 3) {
695 			synth_err(SYNTH_ERR_INCOMPLETE_TYPE, errpos(field_type));
696 			return ERR_PTR(-EINVAL);
697 		}
698 		prefix = "unsigned ";
699 		field_type = argv[1];
700 		field_name = argv[2];
701 		*consumed += 3;
702 	} else {
703 		field_name = argv[1];
704 		*consumed += 2;
705 	}
706 
707 	if (!field_name) {
708 		synth_err(SYNTH_ERR_INVALID_FIELD, errpos(field_type));
709 		return ERR_PTR(-EINVAL);
710 	}
711 
712 	*field_version = check_field_version(prefix, field_type, field_name);
713 
714 	field = kzalloc(sizeof(*field), GFP_KERNEL);
715 	if (!field)
716 		return ERR_PTR(-ENOMEM);
717 
718 	len = strlen(field_name);
719 	array = strchr(field_name, '[');
720 	if (array)
721 		len -= strlen(array);
722 
723 	field->name = kmemdup_nul(field_name, len, GFP_KERNEL);
724 	if (!field->name)
725 		goto free;
726 
727 	if (!is_good_name(field->name)) {
728 		synth_err(SYNTH_ERR_BAD_NAME, errpos(field_name));
729 		ret = -EINVAL;
730 		goto free;
731 	}
732 
733 	len = strlen(field_type) + 1;
734 
735 	if (array)
736 		len += strlen(array);
737 
738 	if (prefix)
739 		len += strlen(prefix);
740 
741 	field->type = kzalloc(len, GFP_KERNEL);
742 	if (!field->type)
743 		goto free;
744 
745 	seq_buf_init(&s, field->type, len);
746 	if (prefix)
747 		seq_buf_puts(&s, prefix);
748 	seq_buf_puts(&s, field_type);
749 	if (array)
750 		seq_buf_puts(&s, array);
751 	if (WARN_ON_ONCE(!seq_buf_buffer_left(&s)))
752 		goto free;
753 
754 	s.buffer[s.len] = '\0';
755 
756 	size = synth_field_size(field->type);
757 	if (size < 0) {
758 		if (array)
759 			synth_err(SYNTH_ERR_INVALID_ARRAY_SPEC, errpos(field_name));
760 		else
761 			synth_err(SYNTH_ERR_INVALID_TYPE, errpos(field_type));
762 		ret = -EINVAL;
763 		goto free;
764 	} else if (size == 0) {
765 		if (synth_field_is_string(field->type) ||
766 		    synth_field_is_stack(field->type)) {
767 			char *type;
768 
769 			len = sizeof("__data_loc ") + strlen(field->type) + 1;
770 			type = kzalloc(len, GFP_KERNEL);
771 			if (!type)
772 				goto free;
773 
774 			seq_buf_init(&s, type, len);
775 			seq_buf_puts(&s, "__data_loc ");
776 			seq_buf_puts(&s, field->type);
777 
778 			if (WARN_ON_ONCE(!seq_buf_buffer_left(&s)))
779 				goto free;
780 			s.buffer[s.len] = '\0';
781 
782 			kfree(field->type);
783 			field->type = type;
784 
785 			field->is_dynamic = true;
786 			size = sizeof(u64);
787 		} else {
788 			synth_err(SYNTH_ERR_INVALID_TYPE, errpos(field_type));
789 			ret = -EINVAL;
790 			goto free;
791 		}
792 	}
793 	field->size = size;
794 
795 	if (synth_field_is_string(field->type))
796 		field->is_string = true;
797 	else if (synth_field_is_stack(field->type))
798 		field->is_stack = true;
799 
800 	field->is_signed = synth_field_signed(field->type);
801  out:
802 	return field;
803  free:
804 	free_synth_field(field);
805 	field = ERR_PTR(ret);
806 	goto out;
807 }
808 
free_synth_tracepoint(struct tracepoint * tp)809 static void free_synth_tracepoint(struct tracepoint *tp)
810 {
811 	if (!tp)
812 		return;
813 
814 	kfree(tp->name);
815 	kfree(tp);
816 }
817 
alloc_synth_tracepoint(char * name)818 static struct tracepoint *alloc_synth_tracepoint(char *name)
819 {
820 	struct tracepoint *tp;
821 
822 	tp = kzalloc(sizeof(*tp), GFP_KERNEL);
823 	if (!tp)
824 		return ERR_PTR(-ENOMEM);
825 
826 	tp->name = kstrdup(name, GFP_KERNEL);
827 	if (!tp->name) {
828 		kfree(tp);
829 		return ERR_PTR(-ENOMEM);
830 	}
831 
832 	return tp;
833 }
834 
find_synth_event(const char * name)835 struct synth_event *find_synth_event(const char *name)
836 {
837 	struct dyn_event *pos;
838 	struct synth_event *event;
839 
840 	for_each_dyn_event(pos) {
841 		if (!is_synth_event(pos))
842 			continue;
843 		event = to_synth_event(pos);
844 		if (strcmp(event->name, name) == 0)
845 			return event;
846 	}
847 
848 	return NULL;
849 }
850 
851 static struct trace_event_fields synth_event_fields_array[] = {
852 	{ .type = TRACE_FUNCTION_TYPE,
853 	  .define_fields = synth_event_define_fields },
854 	{}
855 };
856 
synth_event_reg(struct trace_event_call * call,enum trace_reg type,void * data)857 static int synth_event_reg(struct trace_event_call *call,
858 		    enum trace_reg type, void *data)
859 {
860 	struct synth_event *event = container_of(call, struct synth_event, call);
861 
862 	switch (type) {
863 #ifdef CONFIG_PERF_EVENTS
864 	case TRACE_REG_PERF_REGISTER:
865 #endif
866 	case TRACE_REG_REGISTER:
867 		if (!try_module_get(event->mod))
868 			return -EBUSY;
869 		break;
870 	default:
871 		break;
872 	}
873 
874 	int ret = trace_event_reg(call, type, data);
875 
876 	switch (type) {
877 #ifdef CONFIG_PERF_EVENTS
878 	case TRACE_REG_PERF_UNREGISTER:
879 #endif
880 	case TRACE_REG_UNREGISTER:
881 		module_put(event->mod);
882 		break;
883 	default:
884 		break;
885 	}
886 	return ret;
887 }
888 
register_synth_event(struct synth_event * event)889 static int register_synth_event(struct synth_event *event)
890 {
891 	struct trace_event_call *call = &event->call;
892 	int ret = 0;
893 
894 	event->call.class = &event->class;
895 	event->class.system = kstrdup(SYNTH_SYSTEM, GFP_KERNEL);
896 	if (!event->class.system) {
897 		ret = -ENOMEM;
898 		goto out;
899 	}
900 
901 	event->tp = alloc_synth_tracepoint(event->name);
902 	if (IS_ERR(event->tp)) {
903 		ret = PTR_ERR(event->tp);
904 		event->tp = NULL;
905 		goto out;
906 	}
907 
908 	INIT_LIST_HEAD(&call->class->fields);
909 	call->event.funcs = &synth_event_funcs;
910 	call->class->fields_array = synth_event_fields_array;
911 
912 	ret = register_trace_event(&call->event);
913 	if (!ret) {
914 		ret = -ENODEV;
915 		goto out;
916 	}
917 	call->flags = TRACE_EVENT_FL_TRACEPOINT;
918 	call->class->reg = synth_event_reg;
919 	call->class->probe = trace_event_raw_event_synth;
920 	call->data = event;
921 	call->tp = event->tp;
922 
923 	ret = trace_add_event_call(call);
924 	if (ret) {
925 		pr_warn("Failed to register synthetic event: %s\n",
926 			trace_event_name(call));
927 		goto err;
928 	}
929 
930 	ret = set_synth_event_print_fmt(call);
931 	/* unregister_trace_event() will be called inside */
932 	if (ret < 0)
933 		trace_remove_event_call(call);
934  out:
935 	return ret;
936  err:
937 	unregister_trace_event(&call->event);
938 	goto out;
939 }
940 
unregister_synth_event(struct synth_event * event)941 static int unregister_synth_event(struct synth_event *event)
942 {
943 	struct trace_event_call *call = &event->call;
944 	int ret;
945 
946 	ret = trace_remove_event_call(call);
947 
948 	return ret;
949 }
950 
free_synth_event(struct synth_event * event)951 static void free_synth_event(struct synth_event *event)
952 {
953 	unsigned int i;
954 
955 	if (!event)
956 		return;
957 
958 	for (i = 0; i < event->n_fields; i++)
959 		free_synth_field(event->fields[i]);
960 
961 	kfree(event->fields);
962 	kfree(event->dynamic_fields);
963 	kfree(event->name);
964 	kfree(event->class.system);
965 	free_synth_tracepoint(event->tp);
966 	free_synth_event_print_fmt(&event->call);
967 	kfree(event);
968 }
969 
alloc_synth_event(const char * name,int n_fields,struct synth_field ** fields)970 static struct synth_event *alloc_synth_event(const char *name, int n_fields,
971 					     struct synth_field **fields)
972 {
973 	unsigned int i, j, n_dynamic_fields = 0;
974 	struct synth_event *event;
975 
976 	event = kzalloc(sizeof(*event), GFP_KERNEL);
977 	if (!event) {
978 		event = ERR_PTR(-ENOMEM);
979 		goto out;
980 	}
981 
982 	event->name = kstrdup(name, GFP_KERNEL);
983 	if (!event->name) {
984 		kfree(event);
985 		event = ERR_PTR(-ENOMEM);
986 		goto out;
987 	}
988 
989 	event->fields = kcalloc(n_fields, sizeof(*event->fields), GFP_KERNEL);
990 	if (!event->fields) {
991 		free_synth_event(event);
992 		event = ERR_PTR(-ENOMEM);
993 		goto out;
994 	}
995 
996 	for (i = 0; i < n_fields; i++)
997 		if (fields[i]->is_dynamic)
998 			n_dynamic_fields++;
999 
1000 	if (n_dynamic_fields) {
1001 		event->dynamic_fields = kcalloc(n_dynamic_fields,
1002 						sizeof(*event->dynamic_fields),
1003 						GFP_KERNEL);
1004 		if (!event->dynamic_fields) {
1005 			free_synth_event(event);
1006 			event = ERR_PTR(-ENOMEM);
1007 			goto out;
1008 		}
1009 	}
1010 
1011 	dyn_event_init(&event->devent, &synth_event_ops);
1012 
1013 	for (i = 0, j = 0; i < n_fields; i++) {
1014 		fields[i]->field_pos = i;
1015 		event->fields[i] = fields[i];
1016 
1017 		if (fields[i]->is_dynamic)
1018 			event->dynamic_fields[j++] = fields[i];
1019 	}
1020 	event->n_dynamic_fields = j;
1021 	event->n_fields = n_fields;
1022  out:
1023 	return event;
1024 }
1025 
synth_event_check_arg_fn(void * data)1026 static int synth_event_check_arg_fn(void *data)
1027 {
1028 	struct dynevent_arg_pair *arg_pair = data;
1029 	int size;
1030 
1031 	size = synth_field_size((char *)arg_pair->lhs);
1032 	if (size == 0) {
1033 		if (strstr((char *)arg_pair->lhs, "["))
1034 			return 0;
1035 	}
1036 
1037 	return size ? 0 : -EINVAL;
1038 }
1039 
1040 /**
1041  * synth_event_add_field - Add a new field to a synthetic event cmd
1042  * @cmd: A pointer to the dynevent_cmd struct representing the new event
1043  * @type: The type of the new field to add
1044  * @name: The name of the new field to add
1045  *
1046  * Add a new field to a synthetic event cmd object.  Field ordering is in
1047  * the same order the fields are added.
1048  *
1049  * See synth_field_size() for available types. If field_name contains
1050  * [n] the field is considered to be an array.
1051  *
1052  * Return: 0 if successful, error otherwise.
1053  */
synth_event_add_field(struct dynevent_cmd * cmd,const char * type,const char * name)1054 int synth_event_add_field(struct dynevent_cmd *cmd, const char *type,
1055 			  const char *name)
1056 {
1057 	struct dynevent_arg_pair arg_pair;
1058 	int ret;
1059 
1060 	if (cmd->type != DYNEVENT_TYPE_SYNTH)
1061 		return -EINVAL;
1062 
1063 	if (!type || !name)
1064 		return -EINVAL;
1065 
1066 	dynevent_arg_pair_init(&arg_pair, 0, ';');
1067 
1068 	arg_pair.lhs = type;
1069 	arg_pair.rhs = name;
1070 
1071 	ret = dynevent_arg_pair_add(cmd, &arg_pair, synth_event_check_arg_fn);
1072 	if (ret)
1073 		return ret;
1074 
1075 	if (++cmd->n_fields > SYNTH_FIELDS_MAX)
1076 		ret = -EINVAL;
1077 
1078 	return ret;
1079 }
1080 EXPORT_SYMBOL_GPL(synth_event_add_field);
1081 
1082 /**
1083  * synth_event_add_field_str - Add a new field to a synthetic event cmd
1084  * @cmd: A pointer to the dynevent_cmd struct representing the new event
1085  * @type_name: The type and name of the new field to add, as a single string
1086  *
1087  * Add a new field to a synthetic event cmd object, as a single
1088  * string.  The @type_name string is expected to be of the form 'type
1089  * name', which will be appended by ';'.  No sanity checking is done -
1090  * what's passed in is assumed to already be well-formed.  Field
1091  * ordering is in the same order the fields are added.
1092  *
1093  * See synth_field_size() for available types. If field_name contains
1094  * [n] the field is considered to be an array.
1095  *
1096  * Return: 0 if successful, error otherwise.
1097  */
synth_event_add_field_str(struct dynevent_cmd * cmd,const char * type_name)1098 int synth_event_add_field_str(struct dynevent_cmd *cmd, const char *type_name)
1099 {
1100 	struct dynevent_arg arg;
1101 	int ret;
1102 
1103 	if (cmd->type != DYNEVENT_TYPE_SYNTH)
1104 		return -EINVAL;
1105 
1106 	if (!type_name)
1107 		return -EINVAL;
1108 
1109 	dynevent_arg_init(&arg, ';');
1110 
1111 	arg.str = type_name;
1112 
1113 	ret = dynevent_arg_add(cmd, &arg, NULL);
1114 	if (ret)
1115 		return ret;
1116 
1117 	if (++cmd->n_fields > SYNTH_FIELDS_MAX)
1118 		ret = -EINVAL;
1119 
1120 	return ret;
1121 }
1122 EXPORT_SYMBOL_GPL(synth_event_add_field_str);
1123 
1124 /**
1125  * synth_event_add_fields - Add multiple fields to a synthetic event cmd
1126  * @cmd: A pointer to the dynevent_cmd struct representing the new event
1127  * @fields: An array of type/name field descriptions
1128  * @n_fields: The number of field descriptions contained in the fields array
1129  *
1130  * Add a new set of fields to a synthetic event cmd object.  The event
1131  * fields that will be defined for the event should be passed in as an
1132  * array of struct synth_field_desc, and the number of elements in the
1133  * array passed in as n_fields.  Field ordering will retain the
1134  * ordering given in the fields array.
1135  *
1136  * See synth_field_size() for available types. If field_name contains
1137  * [n] the field is considered to be an array.
1138  *
1139  * Return: 0 if successful, error otherwise.
1140  */
synth_event_add_fields(struct dynevent_cmd * cmd,struct synth_field_desc * fields,unsigned int n_fields)1141 int synth_event_add_fields(struct dynevent_cmd *cmd,
1142 			   struct synth_field_desc *fields,
1143 			   unsigned int n_fields)
1144 {
1145 	unsigned int i;
1146 	int ret = 0;
1147 
1148 	for (i = 0; i < n_fields; i++) {
1149 		if (fields[i].type == NULL || fields[i].name == NULL) {
1150 			ret = -EINVAL;
1151 			break;
1152 		}
1153 
1154 		ret = synth_event_add_field(cmd, fields[i].type, fields[i].name);
1155 		if (ret)
1156 			break;
1157 	}
1158 
1159 	return ret;
1160 }
1161 EXPORT_SYMBOL_GPL(synth_event_add_fields);
1162 
1163 /**
1164  * __synth_event_gen_cmd_start - Start a synthetic event command from arg list
1165  * @cmd: A pointer to the dynevent_cmd struct representing the new event
1166  * @name: The name of the synthetic event
1167  * @mod: The module creating the event, NULL if not created from a module
1168  * @...: Variable number of arg (pairs), one pair for each field
1169  *
1170  * NOTE: Users normally won't want to call this function directly, but
1171  * rather use the synth_event_gen_cmd_start() wrapper, which
1172  * automatically adds a NULL to the end of the arg list.  If this
1173  * function is used directly, make sure the last arg in the variable
1174  * arg list is NULL.
1175  *
1176  * Generate a synthetic event command to be executed by
1177  * synth_event_gen_cmd_end().  This function can be used to generate
1178  * the complete command or only the first part of it; in the latter
1179  * case, synth_event_add_field(), synth_event_add_field_str(), or
1180  * synth_event_add_fields() can be used to add more fields following
1181  * this.
1182  *
1183  * There should be an even number variable args, each pair consisting
1184  * of a type followed by a field name.
1185  *
1186  * See synth_field_size() for available types. If field_name contains
1187  * [n] the field is considered to be an array.
1188  *
1189  * Return: 0 if successful, error otherwise.
1190  */
__synth_event_gen_cmd_start(struct dynevent_cmd * cmd,const char * name,struct module * mod,...)1191 int __synth_event_gen_cmd_start(struct dynevent_cmd *cmd, const char *name,
1192 				struct module *mod, ...)
1193 {
1194 	struct dynevent_arg arg;
1195 	va_list args;
1196 	int ret;
1197 
1198 	cmd->event_name = name;
1199 	cmd->private_data = mod;
1200 
1201 	if (cmd->type != DYNEVENT_TYPE_SYNTH)
1202 		return -EINVAL;
1203 
1204 	dynevent_arg_init(&arg, 0);
1205 	arg.str = name;
1206 	ret = dynevent_arg_add(cmd, &arg, NULL);
1207 	if (ret)
1208 		return ret;
1209 
1210 	va_start(args, mod);
1211 	for (;;) {
1212 		const char *type, *name;
1213 
1214 		type = va_arg(args, const char *);
1215 		if (!type)
1216 			break;
1217 		name = va_arg(args, const char *);
1218 		if (!name)
1219 			break;
1220 
1221 		if (++cmd->n_fields > SYNTH_FIELDS_MAX) {
1222 			ret = -EINVAL;
1223 			break;
1224 		}
1225 
1226 		ret = synth_event_add_field(cmd, type, name);
1227 		if (ret)
1228 			break;
1229 	}
1230 	va_end(args);
1231 
1232 	return ret;
1233 }
1234 EXPORT_SYMBOL_GPL(__synth_event_gen_cmd_start);
1235 
1236 /**
1237  * synth_event_gen_cmd_array_start - Start synthetic event command from an array
1238  * @cmd: A pointer to the dynevent_cmd struct representing the new event
1239  * @name: The name of the synthetic event
1240  * @mod: The module creating the event, NULL if not created from a module
1241  * @fields: An array of type/name field descriptions
1242  * @n_fields: The number of field descriptions contained in the fields array
1243  *
1244  * Generate a synthetic event command to be executed by
1245  * synth_event_gen_cmd_end().  This function can be used to generate
1246  * the complete command or only the first part of it; in the latter
1247  * case, synth_event_add_field(), synth_event_add_field_str(), or
1248  * synth_event_add_fields() can be used to add more fields following
1249  * this.
1250  *
1251  * The event fields that will be defined for the event should be
1252  * passed in as an array of struct synth_field_desc, and the number of
1253  * elements in the array passed in as n_fields.  Field ordering will
1254  * retain the ordering given in the fields array.
1255  *
1256  * See synth_field_size() for available types. If field_name contains
1257  * [n] the field is considered to be an array.
1258  *
1259  * Return: 0 if successful, error otherwise.
1260  */
synth_event_gen_cmd_array_start(struct dynevent_cmd * cmd,const char * name,struct module * mod,struct synth_field_desc * fields,unsigned int n_fields)1261 int synth_event_gen_cmd_array_start(struct dynevent_cmd *cmd, const char *name,
1262 				    struct module *mod,
1263 				    struct synth_field_desc *fields,
1264 				    unsigned int n_fields)
1265 {
1266 	struct dynevent_arg arg;
1267 	unsigned int i;
1268 	int ret = 0;
1269 
1270 	cmd->event_name = name;
1271 	cmd->private_data = mod;
1272 
1273 	if (cmd->type != DYNEVENT_TYPE_SYNTH)
1274 		return -EINVAL;
1275 
1276 	if (n_fields > SYNTH_FIELDS_MAX)
1277 		return -EINVAL;
1278 
1279 	dynevent_arg_init(&arg, 0);
1280 	arg.str = name;
1281 	ret = dynevent_arg_add(cmd, &arg, NULL);
1282 	if (ret)
1283 		return ret;
1284 
1285 	for (i = 0; i < n_fields; i++) {
1286 		if (fields[i].type == NULL || fields[i].name == NULL)
1287 			return -EINVAL;
1288 
1289 		ret = synth_event_add_field(cmd, fields[i].type, fields[i].name);
1290 		if (ret)
1291 			break;
1292 	}
1293 
1294 	return ret;
1295 }
1296 EXPORT_SYMBOL_GPL(synth_event_gen_cmd_array_start);
1297 
__create_synth_event(const char * name,const char * raw_fields)1298 static int __create_synth_event(const char *name, const char *raw_fields)
1299 {
1300 	char **argv, *field_str, *tmp_fields, *saved_fields = NULL;
1301 	struct synth_field *field, *fields[SYNTH_FIELDS_MAX];
1302 	int consumed, cmd_version = 1, n_fields_this_loop;
1303 	int i, argc, n_fields = 0, ret = 0;
1304 	struct synth_event *event = NULL;
1305 
1306 	/*
1307 	 * Argument syntax:
1308 	 *  - Add synthetic event: <event_name> field[;field] ...
1309 	 *  - Remove synthetic event: !<event_name> field[;field] ...
1310 	 *      where 'field' = type field_name
1311 	 */
1312 
1313 	if (name[0] == '\0') {
1314 		synth_err(SYNTH_ERR_INVALID_CMD, 0);
1315 		return -EINVAL;
1316 	}
1317 
1318 	if (!is_good_name(name)) {
1319 		synth_err(SYNTH_ERR_BAD_NAME, errpos(name));
1320 		return -EINVAL;
1321 	}
1322 
1323 	mutex_lock(&event_mutex);
1324 
1325 	event = find_synth_event(name);
1326 	if (event) {
1327 		synth_err(SYNTH_ERR_EVENT_EXISTS, errpos(name));
1328 		ret = -EEXIST;
1329 		goto err;
1330 	}
1331 
1332 	tmp_fields = saved_fields = kstrdup(raw_fields, GFP_KERNEL);
1333 	if (!tmp_fields) {
1334 		ret = -ENOMEM;
1335 		goto err;
1336 	}
1337 
1338 	while ((field_str = strsep(&tmp_fields, ";")) != NULL) {
1339 		argv = argv_split(GFP_KERNEL, field_str, &argc);
1340 		if (!argv) {
1341 			ret = -ENOMEM;
1342 			goto err;
1343 		}
1344 
1345 		if (!argc) {
1346 			argv_free(argv);
1347 			continue;
1348 		}
1349 
1350 		n_fields_this_loop = 0;
1351 		consumed = 0;
1352 		while (argc > consumed) {
1353 			int field_version;
1354 
1355 			field = parse_synth_field(argc - consumed,
1356 						  argv + consumed, &consumed,
1357 						  &field_version);
1358 			if (IS_ERR(field)) {
1359 				ret = PTR_ERR(field);
1360 				goto err_free_arg;
1361 			}
1362 
1363 			/*
1364 			 * Track the highest version of any field we
1365 			 * found in the command.
1366 			 */
1367 			if (field_version > cmd_version)
1368 				cmd_version = field_version;
1369 
1370 			/*
1371 			 * Now sort out what is and isn't valid for
1372 			 * each supported version.
1373 			 *
1374 			 * If we see more than 1 field per loop, it
1375 			 * means we have multiple fields between
1376 			 * semicolons, and that's something we no
1377 			 * longer support in a version 2 or greater
1378 			 * command.
1379 			 */
1380 			if (cmd_version > 1 && n_fields_this_loop >= 1) {
1381 				synth_err(SYNTH_ERR_INVALID_CMD, errpos(field_str));
1382 				ret = -EINVAL;
1383 				goto err_free_arg;
1384 			}
1385 
1386 			if (n_fields == SYNTH_FIELDS_MAX) {
1387 				synth_err(SYNTH_ERR_TOO_MANY_FIELDS, 0);
1388 				ret = -EINVAL;
1389 				goto err_free_arg;
1390 			}
1391 			fields[n_fields++] = field;
1392 
1393 			n_fields_this_loop++;
1394 		}
1395 		argv_free(argv);
1396 
1397 		if (consumed < argc) {
1398 			synth_err(SYNTH_ERR_INVALID_CMD, 0);
1399 			ret = -EINVAL;
1400 			goto err;
1401 		}
1402 
1403 	}
1404 
1405 	if (n_fields == 0) {
1406 		synth_err(SYNTH_ERR_INVALID_CMD, 0);
1407 		ret = -EINVAL;
1408 		goto err;
1409 	}
1410 
1411 	event = alloc_synth_event(name, n_fields, fields);
1412 	if (IS_ERR(event)) {
1413 		ret = PTR_ERR(event);
1414 		event = NULL;
1415 		goto err;
1416 	}
1417 	ret = register_synth_event(event);
1418 	if (!ret)
1419 		dyn_event_add(&event->devent, &event->call);
1420 	else
1421 		free_synth_event(event);
1422  out:
1423 	mutex_unlock(&event_mutex);
1424 
1425 	kfree(saved_fields);
1426 
1427 	return ret;
1428  err_free_arg:
1429 	argv_free(argv);
1430  err:
1431 	for (i = 0; i < n_fields; i++)
1432 		free_synth_field(fields[i]);
1433 
1434 	goto out;
1435 }
1436 
1437 /**
1438  * synth_event_create - Create a new synthetic event
1439  * @name: The name of the new synthetic event
1440  * @fields: An array of type/name field descriptions
1441  * @n_fields: The number of field descriptions contained in the fields array
1442  * @mod: The module creating the event, NULL if not created from a module
1443  *
1444  * Create a new synthetic event with the given name under the
1445  * trace/events/synthetic/ directory.  The event fields that will be
1446  * defined for the event should be passed in as an array of struct
1447  * synth_field_desc, and the number elements in the array passed in as
1448  * n_fields. Field ordering will retain the ordering given in the
1449  * fields array.
1450  *
1451  * If the new synthetic event is being created from a module, the mod
1452  * param must be non-NULL.  This will ensure that the trace buffer
1453  * won't contain unreadable events.
1454  *
1455  * The new synth event should be deleted using synth_event_delete()
1456  * function.  The new synthetic event can be generated from modules or
1457  * other kernel code using trace_synth_event() and related functions.
1458  *
1459  * Return: 0 if successful, error otherwise.
1460  */
synth_event_create(const char * name,struct synth_field_desc * fields,unsigned int n_fields,struct module * mod)1461 int synth_event_create(const char *name, struct synth_field_desc *fields,
1462 		       unsigned int n_fields, struct module *mod)
1463 {
1464 	struct dynevent_cmd cmd;
1465 	char *buf;
1466 	int ret;
1467 
1468 	buf = kzalloc(MAX_DYNEVENT_CMD_LEN, GFP_KERNEL);
1469 	if (!buf)
1470 		return -ENOMEM;
1471 
1472 	synth_event_cmd_init(&cmd, buf, MAX_DYNEVENT_CMD_LEN);
1473 
1474 	ret = synth_event_gen_cmd_array_start(&cmd, name, mod,
1475 					      fields, n_fields);
1476 	if (ret)
1477 		goto out;
1478 
1479 	ret = synth_event_gen_cmd_end(&cmd);
1480  out:
1481 	kfree(buf);
1482 
1483 	return ret;
1484 }
1485 EXPORT_SYMBOL_GPL(synth_event_create);
1486 
destroy_synth_event(struct synth_event * se)1487 static int destroy_synth_event(struct synth_event *se)
1488 {
1489 	int ret;
1490 
1491 	if (se->ref)
1492 		return -EBUSY;
1493 
1494 	if (trace_event_dyn_busy(&se->call))
1495 		return -EBUSY;
1496 
1497 	ret = unregister_synth_event(se);
1498 	if (!ret) {
1499 		dyn_event_remove(&se->devent);
1500 		free_synth_event(se);
1501 	}
1502 
1503 	return ret;
1504 }
1505 
1506 /**
1507  * synth_event_delete - Delete a synthetic event
1508  * @event_name: The name of the new synthetic event
1509  *
1510  * Delete a synthetic event that was created with synth_event_create().
1511  *
1512  * Return: 0 if successful, error otherwise.
1513  */
synth_event_delete(const char * event_name)1514 int synth_event_delete(const char *event_name)
1515 {
1516 	struct synth_event *se = NULL;
1517 	struct module *mod = NULL;
1518 	int ret = -ENOENT;
1519 
1520 	mutex_lock(&event_mutex);
1521 	se = find_synth_event(event_name);
1522 	if (se) {
1523 		mod = se->mod;
1524 		ret = destroy_synth_event(se);
1525 	}
1526 	mutex_unlock(&event_mutex);
1527 
1528 	if (mod) {
1529 		/*
1530 		 * It is safest to reset the ring buffer if the module
1531 		 * being unloaded registered any events that were
1532 		 * used. The only worry is if a new module gets
1533 		 * loaded, and takes on the same id as the events of
1534 		 * this module. When printing out the buffer, traced
1535 		 * events left over from this module may be passed to
1536 		 * the new module events and unexpected results may
1537 		 * occur.
1538 		 */
1539 		tracing_reset_all_online_cpus();
1540 	}
1541 
1542 	return ret;
1543 }
1544 EXPORT_SYMBOL_GPL(synth_event_delete);
1545 
check_command(const char * raw_command)1546 static int check_command(const char *raw_command)
1547 {
1548 	char **argv = NULL, *cmd, *saved_cmd, *name_and_field;
1549 	int argc, ret = 0;
1550 
1551 	cmd = saved_cmd = kstrdup(raw_command, GFP_KERNEL);
1552 	if (!cmd)
1553 		return -ENOMEM;
1554 
1555 	name_and_field = strsep(&cmd, ";");
1556 	if (!name_and_field) {
1557 		ret = -EINVAL;
1558 		goto free;
1559 	}
1560 
1561 	if (name_and_field[0] == '!')
1562 		goto free;
1563 
1564 	argv = argv_split(GFP_KERNEL, name_and_field, &argc);
1565 	if (!argv) {
1566 		ret = -ENOMEM;
1567 		goto free;
1568 	}
1569 	argv_free(argv);
1570 
1571 	if (argc < 3)
1572 		ret = -EINVAL;
1573 free:
1574 	kfree(saved_cmd);
1575 
1576 	return ret;
1577 }
1578 
create_or_delete_synth_event(const char * raw_command)1579 static int create_or_delete_synth_event(const char *raw_command)
1580 {
1581 	char *name = NULL, *fields, *p;
1582 	int ret = 0;
1583 
1584 	raw_command = skip_spaces(raw_command);
1585 	if (raw_command[0] == '\0')
1586 		return ret;
1587 
1588 	last_cmd_set(raw_command);
1589 
1590 	ret = check_command(raw_command);
1591 	if (ret) {
1592 		synth_err(SYNTH_ERR_INVALID_CMD, 0);
1593 		return ret;
1594 	}
1595 
1596 	p = strpbrk(raw_command, " \t");
1597 	if (!p && raw_command[0] != '!') {
1598 		synth_err(SYNTH_ERR_INVALID_CMD, 0);
1599 		ret = -EINVAL;
1600 		goto free;
1601 	}
1602 
1603 	name = kmemdup_nul(raw_command, p ? p - raw_command : strlen(raw_command), GFP_KERNEL);
1604 	if (!name)
1605 		return -ENOMEM;
1606 
1607 	if (name[0] == '!') {
1608 		ret = synth_event_delete(name + 1);
1609 		goto free;
1610 	}
1611 
1612 	fields = skip_spaces(p);
1613 
1614 	ret = __create_synth_event(name, fields);
1615 free:
1616 	kfree(name);
1617 
1618 	return ret;
1619 }
1620 
synth_event_run_command(struct dynevent_cmd * cmd)1621 static int synth_event_run_command(struct dynevent_cmd *cmd)
1622 {
1623 	struct synth_event *se;
1624 	int ret;
1625 
1626 	ret = create_or_delete_synth_event(cmd->seq.buffer);
1627 	if (ret)
1628 		return ret;
1629 
1630 	se = find_synth_event(cmd->event_name);
1631 	if (WARN_ON(!se))
1632 		return -ENOENT;
1633 
1634 	se->mod = cmd->private_data;
1635 
1636 	return ret;
1637 }
1638 
1639 /**
1640  * synth_event_cmd_init - Initialize a synthetic event command object
1641  * @cmd: A pointer to the dynevent_cmd struct representing the new event
1642  * @buf: A pointer to the buffer used to build the command
1643  * @maxlen: The length of the buffer passed in @buf
1644  *
1645  * Initialize a synthetic event command object.  Use this before
1646  * calling any of the other dyenvent_cmd functions.
1647  */
synth_event_cmd_init(struct dynevent_cmd * cmd,char * buf,int maxlen)1648 void synth_event_cmd_init(struct dynevent_cmd *cmd, char *buf, int maxlen)
1649 {
1650 	dynevent_cmd_init(cmd, buf, maxlen, DYNEVENT_TYPE_SYNTH,
1651 			  synth_event_run_command);
1652 }
1653 EXPORT_SYMBOL_GPL(synth_event_cmd_init);
1654 
1655 static inline int
__synth_event_trace_init(struct trace_event_file * file,struct synth_event_trace_state * trace_state)1656 __synth_event_trace_init(struct trace_event_file *file,
1657 			 struct synth_event_trace_state *trace_state)
1658 {
1659 	int ret = 0;
1660 
1661 	memset(trace_state, '\0', sizeof(*trace_state));
1662 
1663 	/*
1664 	 * Normal event tracing doesn't get called at all unless the
1665 	 * ENABLED bit is set (which attaches the probe thus allowing
1666 	 * this code to be called, etc).  Because this is called
1667 	 * directly by the user, we don't have that but we still need
1668 	 * to honor not logging when disabled.  For the iterated
1669 	 * trace case, we save the enabled state upon start and just
1670 	 * ignore the following data calls.
1671 	 */
1672 	if (!(file->flags & EVENT_FILE_FL_ENABLED) ||
1673 	    trace_trigger_soft_disabled(file)) {
1674 		trace_state->disabled = true;
1675 		ret = -ENOENT;
1676 		goto out;
1677 	}
1678 
1679 	trace_state->event = file->event_call->data;
1680 out:
1681 	return ret;
1682 }
1683 
1684 static inline int
__synth_event_trace_start(struct trace_event_file * file,struct synth_event_trace_state * trace_state,int dynamic_fields_size)1685 __synth_event_trace_start(struct trace_event_file *file,
1686 			  struct synth_event_trace_state *trace_state,
1687 			  int dynamic_fields_size)
1688 {
1689 	int entry_size, fields_size = 0;
1690 	int ret = 0;
1691 
1692 	fields_size = trace_state->event->n_u64 * sizeof(u64);
1693 	fields_size += dynamic_fields_size;
1694 
1695 	/*
1696 	 * Avoid ring buffer recursion detection, as this event
1697 	 * is being performed within another event.
1698 	 */
1699 	trace_state->buffer = file->tr->array_buffer.buffer;
1700 	ring_buffer_nest_start(trace_state->buffer);
1701 
1702 	entry_size = sizeof(*trace_state->entry) + fields_size;
1703 	trace_state->entry = trace_event_buffer_reserve(&trace_state->fbuffer,
1704 							file,
1705 							entry_size);
1706 	if (!trace_state->entry) {
1707 		ring_buffer_nest_end(trace_state->buffer);
1708 		ret = -EINVAL;
1709 	}
1710 
1711 	return ret;
1712 }
1713 
1714 static inline void
__synth_event_trace_end(struct synth_event_trace_state * trace_state)1715 __synth_event_trace_end(struct synth_event_trace_state *trace_state)
1716 {
1717 	trace_event_buffer_commit(&trace_state->fbuffer);
1718 
1719 	ring_buffer_nest_end(trace_state->buffer);
1720 }
1721 
1722 /**
1723  * synth_event_trace - Trace a synthetic event
1724  * @file: The trace_event_file representing the synthetic event
1725  * @n_vals: The number of values in vals
1726  * @...: Variable number of args containing the event values
1727  *
1728  * Trace a synthetic event using the values passed in the variable
1729  * argument list.
1730  *
1731  * The argument list should be a list 'n_vals' u64 values.  The number
1732  * of vals must match the number of field in the synthetic event, and
1733  * must be in the same order as the synthetic event fields.
1734  *
1735  * All vals should be cast to u64, and string vals are just pointers
1736  * to strings, cast to u64.  Strings will be copied into space
1737  * reserved in the event for the string, using these pointers.
1738  *
1739  * Return: 0 on success, err otherwise.
1740  */
synth_event_trace(struct trace_event_file * file,unsigned int n_vals,...)1741 int synth_event_trace(struct trace_event_file *file, unsigned int n_vals, ...)
1742 {
1743 	unsigned int i, n_u64, len, data_size = 0;
1744 	struct synth_event_trace_state state;
1745 	va_list args;
1746 	int ret;
1747 
1748 	ret = __synth_event_trace_init(file, &state);
1749 	if (ret) {
1750 		if (ret == -ENOENT)
1751 			ret = 0; /* just disabled, not really an error */
1752 		return ret;
1753 	}
1754 
1755 	if (state.event->n_dynamic_fields) {
1756 		va_start(args, n_vals);
1757 
1758 		for (i = 0; i < state.event->n_fields; i++) {
1759 			u64 val = va_arg(args, u64);
1760 
1761 			if (state.event->fields[i]->is_string &&
1762 			    state.event->fields[i]->is_dynamic) {
1763 				char *str_val = (char *)(long)val;
1764 
1765 				data_size += strlen(str_val) + 1;
1766 			}
1767 		}
1768 
1769 		va_end(args);
1770 	}
1771 
1772 	ret = __synth_event_trace_start(file, &state, data_size);
1773 	if (ret)
1774 		return ret;
1775 
1776 	if (n_vals != state.event->n_fields) {
1777 		ret = -EINVAL;
1778 		goto out;
1779 	}
1780 
1781 	data_size = 0;
1782 
1783 	va_start(args, n_vals);
1784 	for (i = 0, n_u64 = 0; i < state.event->n_fields; i++) {
1785 		u64 val;
1786 
1787 		val = va_arg(args, u64);
1788 
1789 		if (state.event->fields[i]->is_string) {
1790 			char *str_val = (char *)(long)val;
1791 
1792 			len = trace_string(state.entry, state.event, str_val,
1793 					   state.event->fields[i]->is_dynamic,
1794 					   data_size, &n_u64);
1795 			data_size += len; /* only dynamic string increments */
1796 		} else {
1797 			struct synth_field *field = state.event->fields[i];
1798 
1799 			switch (field->size) {
1800 			case 1:
1801 				state.entry->fields[n_u64].as_u8 = (u8)val;
1802 				break;
1803 
1804 			case 2:
1805 				state.entry->fields[n_u64].as_u16 = (u16)val;
1806 				break;
1807 
1808 			case 4:
1809 				state.entry->fields[n_u64].as_u32 = (u32)val;
1810 				break;
1811 
1812 			default:
1813 				state.entry->fields[n_u64].as_u64 = val;
1814 				break;
1815 			}
1816 			n_u64++;
1817 		}
1818 	}
1819 	va_end(args);
1820 out:
1821 	__synth_event_trace_end(&state);
1822 
1823 	return ret;
1824 }
1825 EXPORT_SYMBOL_GPL(synth_event_trace);
1826 
1827 /**
1828  * synth_event_trace_array - Trace a synthetic event from an array
1829  * @file: The trace_event_file representing the synthetic event
1830  * @vals: Array of values
1831  * @n_vals: The number of values in vals
1832  *
1833  * Trace a synthetic event using the values passed in as 'vals'.
1834  *
1835  * The 'vals' array is just an array of 'n_vals' u64.  The number of
1836  * vals must match the number of field in the synthetic event, and
1837  * must be in the same order as the synthetic event fields.
1838  *
1839  * All vals should be cast to u64, and string vals are just pointers
1840  * to strings, cast to u64.  Strings will be copied into space
1841  * reserved in the event for the string, using these pointers.
1842  *
1843  * Return: 0 on success, err otherwise.
1844  */
synth_event_trace_array(struct trace_event_file * file,u64 * vals,unsigned int n_vals)1845 int synth_event_trace_array(struct trace_event_file *file, u64 *vals,
1846 			    unsigned int n_vals)
1847 {
1848 	unsigned int i, n_u64, field_pos, len, data_size = 0;
1849 	struct synth_event_trace_state state;
1850 	char *str_val;
1851 	int ret;
1852 
1853 	ret = __synth_event_trace_init(file, &state);
1854 	if (ret) {
1855 		if (ret == -ENOENT)
1856 			ret = 0; /* just disabled, not really an error */
1857 		return ret;
1858 	}
1859 
1860 	if (state.event->n_dynamic_fields) {
1861 		for (i = 0; i < state.event->n_dynamic_fields; i++) {
1862 			field_pos = state.event->dynamic_fields[i]->field_pos;
1863 			str_val = (char *)(long)vals[field_pos];
1864 			len = strlen(str_val) + 1;
1865 			data_size += len;
1866 		}
1867 	}
1868 
1869 	ret = __synth_event_trace_start(file, &state, data_size);
1870 	if (ret)
1871 		return ret;
1872 
1873 	if (n_vals != state.event->n_fields) {
1874 		ret = -EINVAL;
1875 		goto out;
1876 	}
1877 
1878 	data_size = 0;
1879 
1880 	for (i = 0, n_u64 = 0; i < state.event->n_fields; i++) {
1881 		if (state.event->fields[i]->is_string) {
1882 			char *str_val = (char *)(long)vals[i];
1883 
1884 			len = trace_string(state.entry, state.event, str_val,
1885 					   state.event->fields[i]->is_dynamic,
1886 					   data_size, &n_u64);
1887 			data_size += len; /* only dynamic string increments */
1888 		} else {
1889 			struct synth_field *field = state.event->fields[i];
1890 			u64 val = vals[i];
1891 
1892 			switch (field->size) {
1893 			case 1:
1894 				state.entry->fields[n_u64].as_u8 = (u8)val;
1895 				break;
1896 
1897 			case 2:
1898 				state.entry->fields[n_u64].as_u16 = (u16)val;
1899 				break;
1900 
1901 			case 4:
1902 				state.entry->fields[n_u64].as_u32 = (u32)val;
1903 				break;
1904 
1905 			default:
1906 				state.entry->fields[n_u64].as_u64 = val;
1907 				break;
1908 			}
1909 			n_u64++;
1910 		}
1911 	}
1912 out:
1913 	__synth_event_trace_end(&state);
1914 
1915 	return ret;
1916 }
1917 EXPORT_SYMBOL_GPL(synth_event_trace_array);
1918 
1919 /**
1920  * synth_event_trace_start - Start piecewise synthetic event trace
1921  * @file: The trace_event_file representing the synthetic event
1922  * @trace_state: A pointer to object tracking the piecewise trace state
1923  *
1924  * Start the trace of a synthetic event field-by-field rather than all
1925  * at once.
1926  *
1927  * This function 'opens' an event trace, which means space is reserved
1928  * for the event in the trace buffer, after which the event's
1929  * individual field values can be set through either
1930  * synth_event_add_next_val() or synth_event_add_val().
1931  *
1932  * A pointer to a trace_state object is passed in, which will keep
1933  * track of the current event trace state until the event trace is
1934  * closed (and the event finally traced) using
1935  * synth_event_trace_end().
1936  *
1937  * Note that synth_event_trace_end() must be called after all values
1938  * have been added for each event trace, regardless of whether adding
1939  * all field values succeeded or not.
1940  *
1941  * Note also that for a given event trace, all fields must be added
1942  * using either synth_event_add_next_val() or synth_event_add_val()
1943  * but not both together or interleaved.
1944  *
1945  * Return: 0 on success, err otherwise.
1946  */
synth_event_trace_start(struct trace_event_file * file,struct synth_event_trace_state * trace_state)1947 int synth_event_trace_start(struct trace_event_file *file,
1948 			    struct synth_event_trace_state *trace_state)
1949 {
1950 	int ret;
1951 
1952 	if (!trace_state)
1953 		return -EINVAL;
1954 
1955 	ret = __synth_event_trace_init(file, trace_state);
1956 	if (ret) {
1957 		if (ret == -ENOENT)
1958 			ret = 0; /* just disabled, not really an error */
1959 		return ret;
1960 	}
1961 
1962 	if (trace_state->event->n_dynamic_fields)
1963 		return -ENOTSUPP;
1964 
1965 	ret = __synth_event_trace_start(file, trace_state, 0);
1966 
1967 	return ret;
1968 }
1969 EXPORT_SYMBOL_GPL(synth_event_trace_start);
1970 
__synth_event_add_val(const char * field_name,u64 val,struct synth_event_trace_state * trace_state)1971 static int __synth_event_add_val(const char *field_name, u64 val,
1972 				 struct synth_event_trace_state *trace_state)
1973 {
1974 	struct synth_field *field = NULL;
1975 	struct synth_trace_event *entry;
1976 	struct synth_event *event;
1977 	int i, ret = 0;
1978 
1979 	if (!trace_state) {
1980 		ret = -EINVAL;
1981 		goto out;
1982 	}
1983 
1984 	/* can't mix add_next_synth_val() with add_synth_val() */
1985 	if (field_name) {
1986 		if (trace_state->add_next) {
1987 			ret = -EINVAL;
1988 			goto out;
1989 		}
1990 		trace_state->add_name = true;
1991 	} else {
1992 		if (trace_state->add_name) {
1993 			ret = -EINVAL;
1994 			goto out;
1995 		}
1996 		trace_state->add_next = true;
1997 	}
1998 
1999 	if (trace_state->disabled)
2000 		goto out;
2001 
2002 	event = trace_state->event;
2003 	if (trace_state->add_name) {
2004 		for (i = 0; i < event->n_fields; i++) {
2005 			field = event->fields[i];
2006 			if (strcmp(field->name, field_name) == 0)
2007 				break;
2008 		}
2009 		if (!field) {
2010 			ret = -EINVAL;
2011 			goto out;
2012 		}
2013 	} else {
2014 		if (trace_state->cur_field >= event->n_fields) {
2015 			ret = -EINVAL;
2016 			goto out;
2017 		}
2018 		field = event->fields[trace_state->cur_field++];
2019 	}
2020 
2021 	entry = trace_state->entry;
2022 	if (field->is_string) {
2023 		char *str_val = (char *)(long)val;
2024 		char *str_field;
2025 
2026 		if (field->is_dynamic) { /* add_val can't do dynamic strings */
2027 			ret = -EINVAL;
2028 			goto out;
2029 		}
2030 
2031 		if (!str_val) {
2032 			ret = -EINVAL;
2033 			goto out;
2034 		}
2035 
2036 		str_field = (char *)&entry->fields[field->offset];
2037 		strscpy(str_field, str_val, STR_VAR_LEN_MAX);
2038 	} else {
2039 		switch (field->size) {
2040 		case 1:
2041 			trace_state->entry->fields[field->offset].as_u8 = (u8)val;
2042 			break;
2043 
2044 		case 2:
2045 			trace_state->entry->fields[field->offset].as_u16 = (u16)val;
2046 			break;
2047 
2048 		case 4:
2049 			trace_state->entry->fields[field->offset].as_u32 = (u32)val;
2050 			break;
2051 
2052 		default:
2053 			trace_state->entry->fields[field->offset].as_u64 = val;
2054 			break;
2055 		}
2056 	}
2057  out:
2058 	return ret;
2059 }
2060 
2061 /**
2062  * synth_event_add_next_val - Add the next field's value to an open synth trace
2063  * @val: The value to set the next field to
2064  * @trace_state: A pointer to object tracking the piecewise trace state
2065  *
2066  * Set the value of the next field in an event that's been opened by
2067  * synth_event_trace_start().
2068  *
2069  * The val param should be the value cast to u64.  If the value points
2070  * to a string, the val param should be a char * cast to u64.
2071  *
2072  * This function assumes all the fields in an event are to be set one
2073  * after another - successive calls to this function are made, one for
2074  * each field, in the order of the fields in the event, until all
2075  * fields have been set.  If you'd rather set each field individually
2076  * without regard to ordering, synth_event_add_val() can be used
2077  * instead.
2078  *
2079  * Note however that synth_event_add_next_val() and
2080  * synth_event_add_val() can't be intermixed for a given event trace -
2081  * one or the other but not both can be used at the same time.
2082  *
2083  * Note also that synth_event_trace_end() must be called after all
2084  * values have been added for each event trace, regardless of whether
2085  * adding all field values succeeded or not.
2086  *
2087  * Return: 0 on success, err otherwise.
2088  */
synth_event_add_next_val(u64 val,struct synth_event_trace_state * trace_state)2089 int synth_event_add_next_val(u64 val,
2090 			     struct synth_event_trace_state *trace_state)
2091 {
2092 	return __synth_event_add_val(NULL, val, trace_state);
2093 }
2094 EXPORT_SYMBOL_GPL(synth_event_add_next_val);
2095 
2096 /**
2097  * synth_event_add_val - Add a named field's value to an open synth trace
2098  * @field_name: The name of the synthetic event field value to set
2099  * @val: The value to set the named field to
2100  * @trace_state: A pointer to object tracking the piecewise trace state
2101  *
2102  * Set the value of the named field in an event that's been opened by
2103  * synth_event_trace_start().
2104  *
2105  * The val param should be the value cast to u64.  If the value points
2106  * to a string, the val param should be a char * cast to u64.
2107  *
2108  * This function looks up the field name, and if found, sets the field
2109  * to the specified value.  This lookup makes this function more
2110  * expensive than synth_event_add_next_val(), so use that or the
2111  * none-piecewise synth_event_trace() instead if efficiency is more
2112  * important.
2113  *
2114  * Note however that synth_event_add_next_val() and
2115  * synth_event_add_val() can't be intermixed for a given event trace -
2116  * one or the other but not both can be used at the same time.
2117  *
2118  * Note also that synth_event_trace_end() must be called after all
2119  * values have been added for each event trace, regardless of whether
2120  * adding all field values succeeded or not.
2121  *
2122  * Return: 0 on success, err otherwise.
2123  */
synth_event_add_val(const char * field_name,u64 val,struct synth_event_trace_state * trace_state)2124 int synth_event_add_val(const char *field_name, u64 val,
2125 			struct synth_event_trace_state *trace_state)
2126 {
2127 	return __synth_event_add_val(field_name, val, trace_state);
2128 }
2129 EXPORT_SYMBOL_GPL(synth_event_add_val);
2130 
2131 /**
2132  * synth_event_trace_end - End piecewise synthetic event trace
2133  * @trace_state: A pointer to object tracking the piecewise trace state
2134  *
2135  * End the trace of a synthetic event opened by
2136  * synth_event_trace__start().
2137  *
2138  * This function 'closes' an event trace, which basically means that
2139  * it commits the reserved event and cleans up other loose ends.
2140  *
2141  * A pointer to a trace_state object is passed in, which will keep
2142  * track of the current event trace state opened with
2143  * synth_event_trace_start().
2144  *
2145  * Note that this function must be called after all values have been
2146  * added for each event trace, regardless of whether adding all field
2147  * values succeeded or not.
2148  *
2149  * Return: 0 on success, err otherwise.
2150  */
synth_event_trace_end(struct synth_event_trace_state * trace_state)2151 int synth_event_trace_end(struct synth_event_trace_state *trace_state)
2152 {
2153 	if (!trace_state)
2154 		return -EINVAL;
2155 
2156 	__synth_event_trace_end(trace_state);
2157 
2158 	return 0;
2159 }
2160 EXPORT_SYMBOL_GPL(synth_event_trace_end);
2161 
create_synth_event(const char * raw_command)2162 static int create_synth_event(const char *raw_command)
2163 {
2164 	char *fields, *p;
2165 	const char *name;
2166 	int len, ret = 0;
2167 
2168 	raw_command = skip_spaces(raw_command);
2169 	if (raw_command[0] == '\0')
2170 		return ret;
2171 
2172 	last_cmd_set(raw_command);
2173 
2174 	name = raw_command;
2175 
2176 	/* Don't try to process if not our system */
2177 	if (name[0] != 's' || name[1] != ':')
2178 		return -ECANCELED;
2179 	name += 2;
2180 
2181 	p = strpbrk(raw_command, " \t");
2182 	if (!p) {
2183 		synth_err(SYNTH_ERR_INVALID_CMD, 0);
2184 		return -EINVAL;
2185 	}
2186 
2187 	fields = skip_spaces(p);
2188 
2189 	/* This interface accepts group name prefix */
2190 	if (strchr(name, '/')) {
2191 		len = str_has_prefix(name, SYNTH_SYSTEM "/");
2192 		if (len == 0) {
2193 			synth_err(SYNTH_ERR_INVALID_DYN_CMD, 0);
2194 			return -EINVAL;
2195 		}
2196 		name += len;
2197 	}
2198 
2199 	len = name - raw_command;
2200 
2201 	ret = check_command(raw_command + len);
2202 	if (ret) {
2203 		synth_err(SYNTH_ERR_INVALID_CMD, 0);
2204 		return ret;
2205 	}
2206 
2207 	name = kmemdup_nul(raw_command + len, p - raw_command - len, GFP_KERNEL);
2208 	if (!name)
2209 		return -ENOMEM;
2210 
2211 	ret = __create_synth_event(name, fields);
2212 
2213 	kfree(name);
2214 
2215 	return ret;
2216 }
2217 
synth_event_release(struct dyn_event * ev)2218 static int synth_event_release(struct dyn_event *ev)
2219 {
2220 	struct synth_event *event = to_synth_event(ev);
2221 	int ret;
2222 
2223 	if (event->ref)
2224 		return -EBUSY;
2225 
2226 	if (trace_event_dyn_busy(&event->call))
2227 		return -EBUSY;
2228 
2229 	ret = unregister_synth_event(event);
2230 	if (ret)
2231 		return ret;
2232 
2233 	dyn_event_remove(ev);
2234 	free_synth_event(event);
2235 	return 0;
2236 }
2237 
__synth_event_show(struct seq_file * m,struct synth_event * event)2238 static int __synth_event_show(struct seq_file *m, struct synth_event *event)
2239 {
2240 	struct synth_field *field;
2241 	unsigned int i;
2242 	char *type, *t;
2243 
2244 	seq_printf(m, "%s\t", event->name);
2245 
2246 	for (i = 0; i < event->n_fields; i++) {
2247 		field = event->fields[i];
2248 
2249 		type = field->type;
2250 		t = strstr(type, "__data_loc");
2251 		if (t) { /* __data_loc belongs in format but not event desc */
2252 			t += sizeof("__data_loc");
2253 			type = t;
2254 		}
2255 
2256 		/* parameter values */
2257 		seq_printf(m, "%s %s%s", type, field->name,
2258 			   i == event->n_fields - 1 ? "" : "; ");
2259 	}
2260 
2261 	seq_putc(m, '\n');
2262 
2263 	return 0;
2264 }
2265 
synth_event_show(struct seq_file * m,struct dyn_event * ev)2266 static int synth_event_show(struct seq_file *m, struct dyn_event *ev)
2267 {
2268 	struct synth_event *event = to_synth_event(ev);
2269 
2270 	seq_printf(m, "s:%s/", event->class.system);
2271 
2272 	return __synth_event_show(m, event);
2273 }
2274 
synth_events_seq_show(struct seq_file * m,void * v)2275 static int synth_events_seq_show(struct seq_file *m, void *v)
2276 {
2277 	struct dyn_event *ev = v;
2278 
2279 	if (!is_synth_event(ev))
2280 		return 0;
2281 
2282 	return __synth_event_show(m, to_synth_event(ev));
2283 }
2284 
2285 static const struct seq_operations synth_events_seq_op = {
2286 	.start	= dyn_event_seq_start,
2287 	.next	= dyn_event_seq_next,
2288 	.stop	= dyn_event_seq_stop,
2289 	.show	= synth_events_seq_show,
2290 };
2291 
synth_events_open(struct inode * inode,struct file * file)2292 static int synth_events_open(struct inode *inode, struct file *file)
2293 {
2294 	int ret;
2295 
2296 	ret = security_locked_down(LOCKDOWN_TRACEFS);
2297 	if (ret)
2298 		return ret;
2299 
2300 	if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
2301 		ret = dyn_events_release_all(&synth_event_ops);
2302 		if (ret < 0)
2303 			return ret;
2304 	}
2305 
2306 	return seq_open(file, &synth_events_seq_op);
2307 }
2308 
synth_events_write(struct file * file,const char __user * buffer,size_t count,loff_t * ppos)2309 static ssize_t synth_events_write(struct file *file,
2310 				  const char __user *buffer,
2311 				  size_t count, loff_t *ppos)
2312 {
2313 	return trace_parse_run_command(file, buffer, count, ppos,
2314 				       create_or_delete_synth_event);
2315 }
2316 
2317 static const struct file_operations synth_events_fops = {
2318 	.open           = synth_events_open,
2319 	.write		= synth_events_write,
2320 	.read           = seq_read,
2321 	.llseek         = seq_lseek,
2322 	.release        = seq_release,
2323 };
2324 
2325 /*
2326  * Register dynevent at core_initcall. This allows kernel to setup kprobe
2327  * events in postcore_initcall without tracefs.
2328  */
trace_events_synth_init_early(void)2329 static __init int trace_events_synth_init_early(void)
2330 {
2331 	int err = 0;
2332 
2333 	err = dyn_event_register(&synth_event_ops);
2334 	if (err)
2335 		pr_warn("Could not register synth_event_ops\n");
2336 
2337 	return err;
2338 }
2339 core_initcall(trace_events_synth_init_early);
2340 
trace_events_synth_init(void)2341 static __init int trace_events_synth_init(void)
2342 {
2343 	struct dentry *entry = NULL;
2344 	int err = 0;
2345 	err = tracing_init_dentry();
2346 	if (err)
2347 		goto err;
2348 
2349 	entry = tracefs_create_file("synthetic_events", TRACE_MODE_WRITE,
2350 				    NULL, NULL, &synth_events_fops);
2351 	if (!entry) {
2352 		err = -ENODEV;
2353 		goto err;
2354 	}
2355 
2356 	return err;
2357  err:
2358 	pr_warn("Could not create tracefs 'synthetic_events' entry\n");
2359 
2360 	return err;
2361 }
2362 
2363 fs_initcall(trace_events_synth_init);
2364