xref: /linux/kernel/trace/trace_syscalls.c (revision 3a39d672e7f48b8d6b91a09afa4b55352773b4b5)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <trace/syscall.h>
3 #include <trace/events/syscalls.h>
4 #include <linux/syscalls.h>
5 #include <linux/slab.h>
6 #include <linux/kernel.h>
7 #include <linux/module.h>	/* for MODULE_NAME_LEN via KSYM_SYMBOL_LEN */
8 #include <linux/ftrace.h>
9 #include <linux/perf_event.h>
10 #include <linux/xarray.h>
11 #include <asm/syscall.h>
12 
13 #include "trace_output.h"
14 #include "trace.h"
15 
16 static DEFINE_MUTEX(syscall_trace_lock);
17 
18 static int syscall_enter_register(struct trace_event_call *event,
19 				 enum trace_reg type, void *data);
20 static int syscall_exit_register(struct trace_event_call *event,
21 				 enum trace_reg type, void *data);
22 
23 static struct list_head *
syscall_get_enter_fields(struct trace_event_call * call)24 syscall_get_enter_fields(struct trace_event_call *call)
25 {
26 	struct syscall_metadata *entry = call->data;
27 
28 	return &entry->enter_fields;
29 }
30 
31 extern struct syscall_metadata *__start_syscalls_metadata[];
32 extern struct syscall_metadata *__stop_syscalls_metadata[];
33 
34 static DEFINE_XARRAY(syscalls_metadata_sparse);
35 static struct syscall_metadata **syscalls_metadata;
36 
37 #ifndef ARCH_HAS_SYSCALL_MATCH_SYM_NAME
arch_syscall_match_sym_name(const char * sym,const char * name)38 static inline bool arch_syscall_match_sym_name(const char *sym, const char *name)
39 {
40 	/*
41 	 * Only compare after the "sys" prefix. Archs that use
42 	 * syscall wrappers may have syscalls symbols aliases prefixed
43 	 * with ".SyS" or ".sys" instead of "sys", leading to an unwanted
44 	 * mismatch.
45 	 */
46 	return !strcmp(sym + 3, name + 3);
47 }
48 #endif
49 
50 #ifdef ARCH_TRACE_IGNORE_COMPAT_SYSCALLS
51 /*
52  * Some architectures that allow for 32bit applications
53  * to run on a 64bit kernel, do not map the syscalls for
54  * the 32bit tasks the same as they do for 64bit tasks.
55  *
56  *     *cough*x86*cough*
57  *
58  * In such a case, instead of reporting the wrong syscalls,
59  * simply ignore them.
60  *
61  * For an arch to ignore the compat syscalls it needs to
62  * define ARCH_TRACE_IGNORE_COMPAT_SYSCALLS as well as
63  * define the function arch_trace_is_compat_syscall() to let
64  * the tracing system know that it should ignore it.
65  */
66 static int
trace_get_syscall_nr(struct task_struct * task,struct pt_regs * regs)67 trace_get_syscall_nr(struct task_struct *task, struct pt_regs *regs)
68 {
69 	if (unlikely(arch_trace_is_compat_syscall(regs)))
70 		return -1;
71 
72 	return syscall_get_nr(task, regs);
73 }
74 #else
75 static inline int
trace_get_syscall_nr(struct task_struct * task,struct pt_regs * regs)76 trace_get_syscall_nr(struct task_struct *task, struct pt_regs *regs)
77 {
78 	return syscall_get_nr(task, regs);
79 }
80 #endif /* ARCH_TRACE_IGNORE_COMPAT_SYSCALLS */
81 
82 static __init struct syscall_metadata *
find_syscall_meta(unsigned long syscall)83 find_syscall_meta(unsigned long syscall)
84 {
85 	struct syscall_metadata **start;
86 	struct syscall_metadata **stop;
87 	char str[KSYM_SYMBOL_LEN];
88 
89 
90 	start = __start_syscalls_metadata;
91 	stop = __stop_syscalls_metadata;
92 	kallsyms_lookup(syscall, NULL, NULL, NULL, str);
93 
94 	if (arch_syscall_match_sym_name(str, "sys_ni_syscall"))
95 		return NULL;
96 
97 	for ( ; start < stop; start++) {
98 		if ((*start)->name && arch_syscall_match_sym_name(str, (*start)->name))
99 			return *start;
100 	}
101 	return NULL;
102 }
103 
syscall_nr_to_meta(int nr)104 static struct syscall_metadata *syscall_nr_to_meta(int nr)
105 {
106 	if (IS_ENABLED(CONFIG_HAVE_SPARSE_SYSCALL_NR))
107 		return xa_load(&syscalls_metadata_sparse, (unsigned long)nr);
108 
109 	if (!syscalls_metadata || nr >= NR_syscalls || nr < 0)
110 		return NULL;
111 
112 	return syscalls_metadata[nr];
113 }
114 
get_syscall_name(int syscall)115 const char *get_syscall_name(int syscall)
116 {
117 	struct syscall_metadata *entry;
118 
119 	entry = syscall_nr_to_meta(syscall);
120 	if (!entry)
121 		return NULL;
122 
123 	return entry->name;
124 }
125 
126 static enum print_line_t
print_syscall_enter(struct trace_iterator * iter,int flags,struct trace_event * event)127 print_syscall_enter(struct trace_iterator *iter, int flags,
128 		    struct trace_event *event)
129 {
130 	struct trace_array *tr = iter->tr;
131 	struct trace_seq *s = &iter->seq;
132 	struct trace_entry *ent = iter->ent;
133 	struct syscall_trace_enter *trace;
134 	struct syscall_metadata *entry;
135 	int i, syscall;
136 
137 	trace = (typeof(trace))ent;
138 	syscall = trace->nr;
139 	entry = syscall_nr_to_meta(syscall);
140 
141 	if (!entry)
142 		goto end;
143 
144 	if (entry->enter_event->event.type != ent->type) {
145 		WARN_ON_ONCE(1);
146 		goto end;
147 	}
148 
149 	trace_seq_printf(s, "%s(", entry->name);
150 
151 	for (i = 0; i < entry->nb_args; i++) {
152 
153 		if (trace_seq_has_overflowed(s))
154 			goto end;
155 
156 		/* parameter types */
157 		if (tr && tr->trace_flags & TRACE_ITER_VERBOSE)
158 			trace_seq_printf(s, "%s ", entry->types[i]);
159 
160 		/* parameter values */
161 		trace_seq_printf(s, "%s: %lx%s", entry->args[i],
162 				 trace->args[i],
163 				 i == entry->nb_args - 1 ? "" : ", ");
164 	}
165 
166 	trace_seq_putc(s, ')');
167 end:
168 	trace_seq_putc(s, '\n');
169 
170 	return trace_handle_return(s);
171 }
172 
173 static enum print_line_t
print_syscall_exit(struct trace_iterator * iter,int flags,struct trace_event * event)174 print_syscall_exit(struct trace_iterator *iter, int flags,
175 		   struct trace_event *event)
176 {
177 	struct trace_seq *s = &iter->seq;
178 	struct trace_entry *ent = iter->ent;
179 	struct syscall_trace_exit *trace;
180 	int syscall;
181 	struct syscall_metadata *entry;
182 
183 	trace = (typeof(trace))ent;
184 	syscall = trace->nr;
185 	entry = syscall_nr_to_meta(syscall);
186 
187 	if (!entry) {
188 		trace_seq_putc(s, '\n');
189 		goto out;
190 	}
191 
192 	if (entry->exit_event->event.type != ent->type) {
193 		WARN_ON_ONCE(1);
194 		return TRACE_TYPE_UNHANDLED;
195 	}
196 
197 	trace_seq_printf(s, "%s -> 0x%lx\n", entry->name,
198 				trace->ret);
199 
200  out:
201 	return trace_handle_return(s);
202 }
203 
204 #define SYSCALL_FIELD(_type, _name) {					\
205 	.type = #_type, .name = #_name,					\
206 	.size = sizeof(_type), .align = __alignof__(_type),		\
207 	.is_signed = is_signed_type(_type), .filter_type = FILTER_OTHER }
208 
209 static int __init
__set_enter_print_fmt(struct syscall_metadata * entry,char * buf,int len)210 __set_enter_print_fmt(struct syscall_metadata *entry, char *buf, int len)
211 {
212 	int i;
213 	int pos = 0;
214 
215 	/* When len=0, we just calculate the needed length */
216 #define LEN_OR_ZERO (len ? len - pos : 0)
217 
218 	pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
219 	for (i = 0; i < entry->nb_args; i++) {
220 		pos += snprintf(buf + pos, LEN_OR_ZERO, "%s: 0x%%0%zulx%s",
221 				entry->args[i], sizeof(unsigned long),
222 				i == entry->nb_args - 1 ? "" : ", ");
223 	}
224 	pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
225 
226 	for (i = 0; i < entry->nb_args; i++) {
227 		pos += snprintf(buf + pos, LEN_OR_ZERO,
228 				", ((unsigned long)(REC->%s))", entry->args[i]);
229 	}
230 
231 #undef LEN_OR_ZERO
232 
233 	/* return the length of print_fmt */
234 	return pos;
235 }
236 
set_syscall_print_fmt(struct trace_event_call * call)237 static int __init set_syscall_print_fmt(struct trace_event_call *call)
238 {
239 	char *print_fmt;
240 	int len;
241 	struct syscall_metadata *entry = call->data;
242 
243 	if (entry->enter_event != call) {
244 		call->print_fmt = "\"0x%lx\", REC->ret";
245 		return 0;
246 	}
247 
248 	/* First: called with 0 length to calculate the needed length */
249 	len = __set_enter_print_fmt(entry, NULL, 0);
250 
251 	print_fmt = kmalloc(len + 1, GFP_KERNEL);
252 	if (!print_fmt)
253 		return -ENOMEM;
254 
255 	/* Second: actually write the @print_fmt */
256 	__set_enter_print_fmt(entry, print_fmt, len + 1);
257 	call->print_fmt = print_fmt;
258 
259 	return 0;
260 }
261 
free_syscall_print_fmt(struct trace_event_call * call)262 static void __init free_syscall_print_fmt(struct trace_event_call *call)
263 {
264 	struct syscall_metadata *entry = call->data;
265 
266 	if (entry->enter_event == call)
267 		kfree(call->print_fmt);
268 }
269 
syscall_enter_define_fields(struct trace_event_call * call)270 static int __init syscall_enter_define_fields(struct trace_event_call *call)
271 {
272 	struct syscall_trace_enter trace;
273 	struct syscall_metadata *meta = call->data;
274 	int offset = offsetof(typeof(trace), args);
275 	int ret = 0;
276 	int i;
277 
278 	for (i = 0; i < meta->nb_args; i++) {
279 		ret = trace_define_field(call, meta->types[i],
280 					 meta->args[i], offset,
281 					 sizeof(unsigned long), 0,
282 					 FILTER_OTHER);
283 		if (ret)
284 			break;
285 		offset += sizeof(unsigned long);
286 	}
287 
288 	return ret;
289 }
290 
ftrace_syscall_enter(void * data,struct pt_regs * regs,long id)291 static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id)
292 {
293 	struct trace_array *tr = data;
294 	struct trace_event_file *trace_file;
295 	struct syscall_trace_enter *entry;
296 	struct syscall_metadata *sys_data;
297 	struct trace_event_buffer fbuffer;
298 	unsigned long args[6];
299 	int syscall_nr;
300 	int size;
301 
302 	syscall_nr = trace_get_syscall_nr(current, regs);
303 	if (syscall_nr < 0 || syscall_nr >= NR_syscalls)
304 		return;
305 
306 	/* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE) */
307 	trace_file = rcu_dereference_sched(tr->enter_syscall_files[syscall_nr]);
308 	if (!trace_file)
309 		return;
310 
311 	if (trace_trigger_soft_disabled(trace_file))
312 		return;
313 
314 	sys_data = syscall_nr_to_meta(syscall_nr);
315 	if (!sys_data)
316 		return;
317 
318 	size = sizeof(*entry) + sizeof(unsigned long) * sys_data->nb_args;
319 
320 	entry = trace_event_buffer_reserve(&fbuffer, trace_file, size);
321 	if (!entry)
322 		return;
323 
324 	entry = ring_buffer_event_data(fbuffer.event);
325 	entry->nr = syscall_nr;
326 	syscall_get_arguments(current, regs, args);
327 	memcpy(entry->args, args, sizeof(unsigned long) * sys_data->nb_args);
328 
329 	trace_event_buffer_commit(&fbuffer);
330 }
331 
ftrace_syscall_exit(void * data,struct pt_regs * regs,long ret)332 static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret)
333 {
334 	struct trace_array *tr = data;
335 	struct trace_event_file *trace_file;
336 	struct syscall_trace_exit *entry;
337 	struct syscall_metadata *sys_data;
338 	struct trace_event_buffer fbuffer;
339 	int syscall_nr;
340 
341 	syscall_nr = trace_get_syscall_nr(current, regs);
342 	if (syscall_nr < 0 || syscall_nr >= NR_syscalls)
343 		return;
344 
345 	/* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE()) */
346 	trace_file = rcu_dereference_sched(tr->exit_syscall_files[syscall_nr]);
347 	if (!trace_file)
348 		return;
349 
350 	if (trace_trigger_soft_disabled(trace_file))
351 		return;
352 
353 	sys_data = syscall_nr_to_meta(syscall_nr);
354 	if (!sys_data)
355 		return;
356 
357 	entry = trace_event_buffer_reserve(&fbuffer, trace_file, sizeof(*entry));
358 	if (!entry)
359 		return;
360 
361 	entry = ring_buffer_event_data(fbuffer.event);
362 	entry->nr = syscall_nr;
363 	entry->ret = syscall_get_return_value(current, regs);
364 
365 	trace_event_buffer_commit(&fbuffer);
366 }
367 
reg_event_syscall_enter(struct trace_event_file * file,struct trace_event_call * call)368 static int reg_event_syscall_enter(struct trace_event_file *file,
369 				   struct trace_event_call *call)
370 {
371 	struct trace_array *tr = file->tr;
372 	int ret = 0;
373 	int num;
374 
375 	num = ((struct syscall_metadata *)call->data)->syscall_nr;
376 	if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
377 		return -ENOSYS;
378 	mutex_lock(&syscall_trace_lock);
379 	if (!tr->sys_refcount_enter)
380 		ret = register_trace_sys_enter(ftrace_syscall_enter, tr);
381 	if (!ret) {
382 		rcu_assign_pointer(tr->enter_syscall_files[num], file);
383 		tr->sys_refcount_enter++;
384 	}
385 	mutex_unlock(&syscall_trace_lock);
386 	return ret;
387 }
388 
unreg_event_syscall_enter(struct trace_event_file * file,struct trace_event_call * call)389 static void unreg_event_syscall_enter(struct trace_event_file *file,
390 				      struct trace_event_call *call)
391 {
392 	struct trace_array *tr = file->tr;
393 	int num;
394 
395 	num = ((struct syscall_metadata *)call->data)->syscall_nr;
396 	if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
397 		return;
398 	mutex_lock(&syscall_trace_lock);
399 	tr->sys_refcount_enter--;
400 	RCU_INIT_POINTER(tr->enter_syscall_files[num], NULL);
401 	if (!tr->sys_refcount_enter)
402 		unregister_trace_sys_enter(ftrace_syscall_enter, tr);
403 	mutex_unlock(&syscall_trace_lock);
404 }
405 
reg_event_syscall_exit(struct trace_event_file * file,struct trace_event_call * call)406 static int reg_event_syscall_exit(struct trace_event_file *file,
407 				  struct trace_event_call *call)
408 {
409 	struct trace_array *tr = file->tr;
410 	int ret = 0;
411 	int num;
412 
413 	num = ((struct syscall_metadata *)call->data)->syscall_nr;
414 	if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
415 		return -ENOSYS;
416 	mutex_lock(&syscall_trace_lock);
417 	if (!tr->sys_refcount_exit)
418 		ret = register_trace_sys_exit(ftrace_syscall_exit, tr);
419 	if (!ret) {
420 		rcu_assign_pointer(tr->exit_syscall_files[num], file);
421 		tr->sys_refcount_exit++;
422 	}
423 	mutex_unlock(&syscall_trace_lock);
424 	return ret;
425 }
426 
unreg_event_syscall_exit(struct trace_event_file * file,struct trace_event_call * call)427 static void unreg_event_syscall_exit(struct trace_event_file *file,
428 				     struct trace_event_call *call)
429 {
430 	struct trace_array *tr = file->tr;
431 	int num;
432 
433 	num = ((struct syscall_metadata *)call->data)->syscall_nr;
434 	if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
435 		return;
436 	mutex_lock(&syscall_trace_lock);
437 	tr->sys_refcount_exit--;
438 	RCU_INIT_POINTER(tr->exit_syscall_files[num], NULL);
439 	if (!tr->sys_refcount_exit)
440 		unregister_trace_sys_exit(ftrace_syscall_exit, tr);
441 	mutex_unlock(&syscall_trace_lock);
442 }
443 
init_syscall_trace(struct trace_event_call * call)444 static int __init init_syscall_trace(struct trace_event_call *call)
445 {
446 	int id;
447 	int num;
448 
449 	num = ((struct syscall_metadata *)call->data)->syscall_nr;
450 	if (num < 0 || num >= NR_syscalls) {
451 		pr_debug("syscall %s metadata not mapped, disabling ftrace event\n",
452 				((struct syscall_metadata *)call->data)->name);
453 		return -ENOSYS;
454 	}
455 
456 	if (set_syscall_print_fmt(call) < 0)
457 		return -ENOMEM;
458 
459 	id = trace_event_raw_init(call);
460 
461 	if (id < 0) {
462 		free_syscall_print_fmt(call);
463 		return id;
464 	}
465 
466 	return id;
467 }
468 
469 static struct trace_event_fields __refdata syscall_enter_fields_array[] = {
470 	SYSCALL_FIELD(int, __syscall_nr),
471 	{ .type = TRACE_FUNCTION_TYPE,
472 	  .define_fields = syscall_enter_define_fields },
473 	{}
474 };
475 
476 struct trace_event_functions enter_syscall_print_funcs = {
477 	.trace		= print_syscall_enter,
478 };
479 
480 struct trace_event_functions exit_syscall_print_funcs = {
481 	.trace		= print_syscall_exit,
482 };
483 
484 struct trace_event_class __refdata event_class_syscall_enter = {
485 	.system		= "syscalls",
486 	.reg		= syscall_enter_register,
487 	.fields_array	= syscall_enter_fields_array,
488 	.get_fields	= syscall_get_enter_fields,
489 	.raw_init	= init_syscall_trace,
490 };
491 
492 struct trace_event_class __refdata event_class_syscall_exit = {
493 	.system		= "syscalls",
494 	.reg		= syscall_exit_register,
495 	.fields_array	= (struct trace_event_fields[]){
496 		SYSCALL_FIELD(int, __syscall_nr),
497 		SYSCALL_FIELD(long, ret),
498 		{}
499 	},
500 	.fields		= LIST_HEAD_INIT(event_class_syscall_exit.fields),
501 	.raw_init	= init_syscall_trace,
502 };
503 
arch_syscall_addr(int nr)504 unsigned long __init __weak arch_syscall_addr(int nr)
505 {
506 	return (unsigned long)sys_call_table[nr];
507 }
508 
init_ftrace_syscalls(void)509 void __init init_ftrace_syscalls(void)
510 {
511 	struct syscall_metadata *meta;
512 	unsigned long addr;
513 	int i;
514 	void *ret;
515 
516 	if (!IS_ENABLED(CONFIG_HAVE_SPARSE_SYSCALL_NR)) {
517 		syscalls_metadata = kcalloc(NR_syscalls,
518 					sizeof(*syscalls_metadata),
519 					GFP_KERNEL);
520 		if (!syscalls_metadata) {
521 			WARN_ON(1);
522 			return;
523 		}
524 	}
525 
526 	for (i = 0; i < NR_syscalls; i++) {
527 		addr = arch_syscall_addr(i);
528 		meta = find_syscall_meta(addr);
529 		if (!meta)
530 			continue;
531 
532 		meta->syscall_nr = i;
533 
534 		if (!IS_ENABLED(CONFIG_HAVE_SPARSE_SYSCALL_NR)) {
535 			syscalls_metadata[i] = meta;
536 		} else {
537 			ret = xa_store(&syscalls_metadata_sparse, i, meta,
538 					GFP_KERNEL);
539 			WARN(xa_is_err(ret),
540 				"Syscall memory allocation failed\n");
541 		}
542 
543 	}
544 }
545 
546 #ifdef CONFIG_PERF_EVENTS
547 
548 static DECLARE_BITMAP(enabled_perf_enter_syscalls, NR_syscalls);
549 static DECLARE_BITMAP(enabled_perf_exit_syscalls, NR_syscalls);
550 static int sys_perf_refcount_enter;
551 static int sys_perf_refcount_exit;
552 
perf_call_bpf_enter(struct trace_event_call * call,struct pt_regs * regs,struct syscall_metadata * sys_data,struct syscall_trace_enter * rec)553 static int perf_call_bpf_enter(struct trace_event_call *call, struct pt_regs *regs,
554 			       struct syscall_metadata *sys_data,
555 			       struct syscall_trace_enter *rec)
556 {
557 	struct syscall_tp_t {
558 		struct trace_entry ent;
559 		int syscall_nr;
560 		unsigned long args[SYSCALL_DEFINE_MAXARGS];
561 	} __aligned(8) param;
562 	int i;
563 
564 	BUILD_BUG_ON(sizeof(param.ent) < sizeof(void *));
565 
566 	/* bpf prog requires 'regs' to be the first member in the ctx (a.k.a. &param) */
567 	perf_fetch_caller_regs(regs);
568 	*(struct pt_regs **)&param = regs;
569 	param.syscall_nr = rec->nr;
570 	for (i = 0; i < sys_data->nb_args; i++)
571 		param.args[i] = rec->args[i];
572 	return trace_call_bpf(call, &param);
573 }
574 
perf_syscall_enter(void * ignore,struct pt_regs * regs,long id)575 static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id)
576 {
577 	struct syscall_metadata *sys_data;
578 	struct syscall_trace_enter *rec;
579 	struct pt_regs *fake_regs;
580 	struct hlist_head *head;
581 	unsigned long args[6];
582 	bool valid_prog_array;
583 	int syscall_nr;
584 	int rctx;
585 	int size;
586 
587 	syscall_nr = trace_get_syscall_nr(current, regs);
588 	if (syscall_nr < 0 || syscall_nr >= NR_syscalls)
589 		return;
590 	if (!test_bit(syscall_nr, enabled_perf_enter_syscalls))
591 		return;
592 
593 	sys_data = syscall_nr_to_meta(syscall_nr);
594 	if (!sys_data)
595 		return;
596 
597 	head = this_cpu_ptr(sys_data->enter_event->perf_events);
598 	valid_prog_array = bpf_prog_array_valid(sys_data->enter_event);
599 	if (!valid_prog_array && hlist_empty(head))
600 		return;
601 
602 	/* get the size after alignment with the u32 buffer size field */
603 	size = sizeof(unsigned long) * sys_data->nb_args + sizeof(*rec);
604 	size = ALIGN(size + sizeof(u32), sizeof(u64));
605 	size -= sizeof(u32);
606 
607 	rec = perf_trace_buf_alloc(size, &fake_regs, &rctx);
608 	if (!rec)
609 		return;
610 
611 	rec->nr = syscall_nr;
612 	syscall_get_arguments(current, regs, args);
613 	memcpy(&rec->args, args, sizeof(unsigned long) * sys_data->nb_args);
614 
615 	if ((valid_prog_array &&
616 	     !perf_call_bpf_enter(sys_data->enter_event, fake_regs, sys_data, rec)) ||
617 	    hlist_empty(head)) {
618 		perf_swevent_put_recursion_context(rctx);
619 		return;
620 	}
621 
622 	perf_trace_buf_submit(rec, size, rctx,
623 			      sys_data->enter_event->event.type, 1, regs,
624 			      head, NULL);
625 }
626 
perf_sysenter_enable(struct trace_event_call * call)627 static int perf_sysenter_enable(struct trace_event_call *call)
628 {
629 	int ret = 0;
630 	int num;
631 
632 	num = ((struct syscall_metadata *)call->data)->syscall_nr;
633 
634 	mutex_lock(&syscall_trace_lock);
635 	if (!sys_perf_refcount_enter)
636 		ret = register_trace_sys_enter(perf_syscall_enter, NULL);
637 	if (ret) {
638 		pr_info("event trace: Could not activate syscall entry trace point");
639 	} else {
640 		set_bit(num, enabled_perf_enter_syscalls);
641 		sys_perf_refcount_enter++;
642 	}
643 	mutex_unlock(&syscall_trace_lock);
644 	return ret;
645 }
646 
perf_sysenter_disable(struct trace_event_call * call)647 static void perf_sysenter_disable(struct trace_event_call *call)
648 {
649 	int num;
650 
651 	num = ((struct syscall_metadata *)call->data)->syscall_nr;
652 
653 	mutex_lock(&syscall_trace_lock);
654 	sys_perf_refcount_enter--;
655 	clear_bit(num, enabled_perf_enter_syscalls);
656 	if (!sys_perf_refcount_enter)
657 		unregister_trace_sys_enter(perf_syscall_enter, NULL);
658 	mutex_unlock(&syscall_trace_lock);
659 }
660 
perf_call_bpf_exit(struct trace_event_call * call,struct pt_regs * regs,struct syscall_trace_exit * rec)661 static int perf_call_bpf_exit(struct trace_event_call *call, struct pt_regs *regs,
662 			      struct syscall_trace_exit *rec)
663 {
664 	struct syscall_tp_t {
665 		struct trace_entry ent;
666 		int syscall_nr;
667 		unsigned long ret;
668 	} __aligned(8) param;
669 
670 	/* bpf prog requires 'regs' to be the first member in the ctx (a.k.a. &param) */
671 	perf_fetch_caller_regs(regs);
672 	*(struct pt_regs **)&param = regs;
673 	param.syscall_nr = rec->nr;
674 	param.ret = rec->ret;
675 	return trace_call_bpf(call, &param);
676 }
677 
perf_syscall_exit(void * ignore,struct pt_regs * regs,long ret)678 static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
679 {
680 	struct syscall_metadata *sys_data;
681 	struct syscall_trace_exit *rec;
682 	struct pt_regs *fake_regs;
683 	struct hlist_head *head;
684 	bool valid_prog_array;
685 	int syscall_nr;
686 	int rctx;
687 	int size;
688 
689 	syscall_nr = trace_get_syscall_nr(current, regs);
690 	if (syscall_nr < 0 || syscall_nr >= NR_syscalls)
691 		return;
692 	if (!test_bit(syscall_nr, enabled_perf_exit_syscalls))
693 		return;
694 
695 	sys_data = syscall_nr_to_meta(syscall_nr);
696 	if (!sys_data)
697 		return;
698 
699 	head = this_cpu_ptr(sys_data->exit_event->perf_events);
700 	valid_prog_array = bpf_prog_array_valid(sys_data->exit_event);
701 	if (!valid_prog_array && hlist_empty(head))
702 		return;
703 
704 	/* We can probably do that at build time */
705 	size = ALIGN(sizeof(*rec) + sizeof(u32), sizeof(u64));
706 	size -= sizeof(u32);
707 
708 	rec = perf_trace_buf_alloc(size, &fake_regs, &rctx);
709 	if (!rec)
710 		return;
711 
712 	rec->nr = syscall_nr;
713 	rec->ret = syscall_get_return_value(current, regs);
714 
715 	if ((valid_prog_array &&
716 	     !perf_call_bpf_exit(sys_data->exit_event, fake_regs, rec)) ||
717 	    hlist_empty(head)) {
718 		perf_swevent_put_recursion_context(rctx);
719 		return;
720 	}
721 
722 	perf_trace_buf_submit(rec, size, rctx, sys_data->exit_event->event.type,
723 			      1, regs, head, NULL);
724 }
725 
perf_sysexit_enable(struct trace_event_call * call)726 static int perf_sysexit_enable(struct trace_event_call *call)
727 {
728 	int ret = 0;
729 	int num;
730 
731 	num = ((struct syscall_metadata *)call->data)->syscall_nr;
732 
733 	mutex_lock(&syscall_trace_lock);
734 	if (!sys_perf_refcount_exit)
735 		ret = register_trace_sys_exit(perf_syscall_exit, NULL);
736 	if (ret) {
737 		pr_info("event trace: Could not activate syscall exit trace point");
738 	} else {
739 		set_bit(num, enabled_perf_exit_syscalls);
740 		sys_perf_refcount_exit++;
741 	}
742 	mutex_unlock(&syscall_trace_lock);
743 	return ret;
744 }
745 
perf_sysexit_disable(struct trace_event_call * call)746 static void perf_sysexit_disable(struct trace_event_call *call)
747 {
748 	int num;
749 
750 	num = ((struct syscall_metadata *)call->data)->syscall_nr;
751 
752 	mutex_lock(&syscall_trace_lock);
753 	sys_perf_refcount_exit--;
754 	clear_bit(num, enabled_perf_exit_syscalls);
755 	if (!sys_perf_refcount_exit)
756 		unregister_trace_sys_exit(perf_syscall_exit, NULL);
757 	mutex_unlock(&syscall_trace_lock);
758 }
759 
760 #endif /* CONFIG_PERF_EVENTS */
761 
syscall_enter_register(struct trace_event_call * event,enum trace_reg type,void * data)762 static int syscall_enter_register(struct trace_event_call *event,
763 				 enum trace_reg type, void *data)
764 {
765 	struct trace_event_file *file = data;
766 
767 	switch (type) {
768 	case TRACE_REG_REGISTER:
769 		return reg_event_syscall_enter(file, event);
770 	case TRACE_REG_UNREGISTER:
771 		unreg_event_syscall_enter(file, event);
772 		return 0;
773 
774 #ifdef CONFIG_PERF_EVENTS
775 	case TRACE_REG_PERF_REGISTER:
776 		return perf_sysenter_enable(event);
777 	case TRACE_REG_PERF_UNREGISTER:
778 		perf_sysenter_disable(event);
779 		return 0;
780 	case TRACE_REG_PERF_OPEN:
781 	case TRACE_REG_PERF_CLOSE:
782 	case TRACE_REG_PERF_ADD:
783 	case TRACE_REG_PERF_DEL:
784 		return 0;
785 #endif
786 	}
787 	return 0;
788 }
789 
syscall_exit_register(struct trace_event_call * event,enum trace_reg type,void * data)790 static int syscall_exit_register(struct trace_event_call *event,
791 				 enum trace_reg type, void *data)
792 {
793 	struct trace_event_file *file = data;
794 
795 	switch (type) {
796 	case TRACE_REG_REGISTER:
797 		return reg_event_syscall_exit(file, event);
798 	case TRACE_REG_UNREGISTER:
799 		unreg_event_syscall_exit(file, event);
800 		return 0;
801 
802 #ifdef CONFIG_PERF_EVENTS
803 	case TRACE_REG_PERF_REGISTER:
804 		return perf_sysexit_enable(event);
805 	case TRACE_REG_PERF_UNREGISTER:
806 		perf_sysexit_disable(event);
807 		return 0;
808 	case TRACE_REG_PERF_OPEN:
809 	case TRACE_REG_PERF_CLOSE:
810 	case TRACE_REG_PERF_ADD:
811 	case TRACE_REG_PERF_DEL:
812 		return 0;
813 #endif
814 	}
815 	return 0;
816 }
817