xref: /linux/kernel/trace/fgraph.c (revision 151ebcf0797b1a3ba53c8843dc21748c80e098c7)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Infrastructure to took into function calls and returns.
4  * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
5  * Mostly borrowed from function tracer which
6  * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
7  *
8  * Highly modified by Steven Rostedt (VMware).
9  */
10 #include <linux/jump_label.h>
11 #include <linux/suspend.h>
12 #include <linux/ftrace.h>
13 #include <linux/slab.h>
14 
15 #include <trace/events/sched.h>
16 
17 #include "ftrace_internal.h"
18 #include "trace.h"
19 
20 #ifdef CONFIG_DYNAMIC_FTRACE
21 #define ASSIGN_OPS_HASH(opsname, val) \
22 	.func_hash		= val, \
23 	.local_hash.regex_lock	= __MUTEX_INITIALIZER(opsname.local_hash.regex_lock),
24 #else
25 #define ASSIGN_OPS_HASH(opsname, val)
26 #endif
27 
28 DEFINE_STATIC_KEY_FALSE(kill_ftrace_graph);
29 int ftrace_graph_active;
30 
31 /* Both enabled by default (can be cleared by function_graph tracer flags */
32 static bool fgraph_sleep_time = true;
33 
34 #ifdef CONFIG_DYNAMIC_FTRACE
35 /*
36  * archs can override this function if they must do something
37  * to enable hook for graph tracer.
38  */
39 int __weak ftrace_enable_ftrace_graph_caller(void)
40 {
41 	return 0;
42 }
43 
44 /*
45  * archs can override this function if they must do something
46  * to disable hook for graph tracer.
47  */
48 int __weak ftrace_disable_ftrace_graph_caller(void)
49 {
50 	return 0;
51 }
52 #endif
53 
54 /**
55  * ftrace_graph_stop - set to permanently disable function graph tracing
56  *
57  * In case of an error int function graph tracing, this is called
58  * to try to keep function graph tracing from causing any more harm.
59  * Usually this is pretty severe and this is called to try to at least
60  * get a warning out to the user.
61  */
62 void ftrace_graph_stop(void)
63 {
64 	static_branch_enable(&kill_ftrace_graph);
65 }
66 
67 /* Add a function return address to the trace stack on thread info.*/
68 static int
69 ftrace_push_return_trace(unsigned long ret, unsigned long func,
70 			 unsigned long frame_pointer, unsigned long *retp)
71 {
72 	unsigned long long calltime;
73 	int index;
74 
75 	if (unlikely(ftrace_graph_is_dead()))
76 		return -EBUSY;
77 
78 	if (!current->ret_stack)
79 		return -EBUSY;
80 
81 	/*
82 	 * We must make sure the ret_stack is tested before we read
83 	 * anything else.
84 	 */
85 	smp_rmb();
86 
87 	/* The return trace stack is full */
88 	if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
89 		atomic_inc(&current->trace_overrun);
90 		return -EBUSY;
91 	}
92 
93 	calltime = trace_clock_local();
94 
95 	index = ++current->curr_ret_stack;
96 	barrier();
97 	current->ret_stack[index].ret = ret;
98 	current->ret_stack[index].func = func;
99 	current->ret_stack[index].calltime = calltime;
100 #ifdef HAVE_FUNCTION_GRAPH_FP_TEST
101 	current->ret_stack[index].fp = frame_pointer;
102 #endif
103 #ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
104 	current->ret_stack[index].retp = retp;
105 #endif
106 	return 0;
107 }
108 
109 /*
110  * Not all archs define MCOUNT_INSN_SIZE which is used to look for direct
111  * functions. But those archs currently don't support direct functions
112  * anyway, and ftrace_find_rec_direct() is just a stub for them.
113  * Define MCOUNT_INSN_SIZE to keep those archs compiling.
114  */
115 #ifndef MCOUNT_INSN_SIZE
116 /* Make sure this only works without direct calls */
117 # ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
118 #  error MCOUNT_INSN_SIZE not defined with direct calls enabled
119 # endif
120 # define MCOUNT_INSN_SIZE 0
121 #endif
122 
123 int function_graph_enter(unsigned long ret, unsigned long func,
124 			 unsigned long frame_pointer, unsigned long *retp)
125 {
126 	struct ftrace_graph_ent trace;
127 
128 	trace.func = func;
129 	trace.depth = ++current->curr_ret_depth;
130 
131 	if (ftrace_push_return_trace(ret, func, frame_pointer, retp))
132 		goto out;
133 
134 	/* Only trace if the calling function expects to */
135 	if (!ftrace_graph_entry(&trace))
136 		goto out_ret;
137 
138 	return 0;
139  out_ret:
140 	current->curr_ret_stack--;
141  out:
142 	current->curr_ret_depth--;
143 	return -EBUSY;
144 }
145 
146 /* Retrieve a function return address to the trace stack on thread info.*/
147 static void
148 ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
149 			unsigned long frame_pointer)
150 {
151 	int index;
152 
153 	index = current->curr_ret_stack;
154 
155 	if (unlikely(index < 0 || index >= FTRACE_RETFUNC_DEPTH)) {
156 		ftrace_graph_stop();
157 		WARN_ON(1);
158 		/* Might as well panic, otherwise we have no where to go */
159 		*ret = (unsigned long)panic;
160 		return;
161 	}
162 
163 #ifdef HAVE_FUNCTION_GRAPH_FP_TEST
164 	/*
165 	 * The arch may choose to record the frame pointer used
166 	 * and check it here to make sure that it is what we expect it
167 	 * to be. If gcc does not set the place holder of the return
168 	 * address in the frame pointer, and does a copy instead, then
169 	 * the function graph trace will fail. This test detects this
170 	 * case.
171 	 *
172 	 * Currently, x86_32 with optimize for size (-Os) makes the latest
173 	 * gcc do the above.
174 	 *
175 	 * Note, -mfentry does not use frame pointers, and this test
176 	 *  is not needed if CC_USING_FENTRY is set.
177 	 */
178 	if (unlikely(current->ret_stack[index].fp != frame_pointer)) {
179 		ftrace_graph_stop();
180 		WARN(1, "Bad frame pointer: expected %lx, received %lx\n"
181 		     "  from func %ps return to %lx\n",
182 		     current->ret_stack[index].fp,
183 		     frame_pointer,
184 		     (void *)current->ret_stack[index].func,
185 		     current->ret_stack[index].ret);
186 		*ret = (unsigned long)panic;
187 		return;
188 	}
189 #endif
190 
191 	*ret = current->ret_stack[index].ret;
192 	trace->func = current->ret_stack[index].func;
193 	trace->calltime = current->ret_stack[index].calltime;
194 	trace->overrun = atomic_read(&current->trace_overrun);
195 	trace->depth = current->curr_ret_depth--;
196 	/*
197 	 * We still want to trace interrupts coming in if
198 	 * max_depth is set to 1. Make sure the decrement is
199 	 * seen before ftrace_graph_return.
200 	 */
201 	barrier();
202 }
203 
204 /*
205  * Hibernation protection.
206  * The state of the current task is too much unstable during
207  * suspend/restore to disk. We want to protect against that.
208  */
209 static int
210 ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
211 							void *unused)
212 {
213 	switch (state) {
214 	case PM_HIBERNATION_PREPARE:
215 		pause_graph_tracing();
216 		break;
217 
218 	case PM_POST_HIBERNATION:
219 		unpause_graph_tracing();
220 		break;
221 	}
222 	return NOTIFY_DONE;
223 }
224 
225 static struct notifier_block ftrace_suspend_notifier = {
226 	.notifier_call = ftrace_suspend_notifier_call,
227 };
228 
229 /* fgraph_ret_regs is not defined without CONFIG_FUNCTION_GRAPH_RETVAL */
230 struct fgraph_ret_regs;
231 
232 /*
233  * Send the trace to the ring-buffer.
234  * @return the original return address.
235  */
236 static unsigned long __ftrace_return_to_handler(struct fgraph_ret_regs *ret_regs,
237 						unsigned long frame_pointer)
238 {
239 	struct ftrace_graph_ret trace;
240 	unsigned long ret;
241 
242 	ftrace_pop_return_trace(&trace, &ret, frame_pointer);
243 #ifdef CONFIG_FUNCTION_GRAPH_RETVAL
244 	trace.retval = fgraph_ret_regs_return_value(ret_regs);
245 #endif
246 	trace.rettime = trace_clock_local();
247 	ftrace_graph_return(&trace);
248 	/*
249 	 * The ftrace_graph_return() may still access the current
250 	 * ret_stack structure, we need to make sure the update of
251 	 * curr_ret_stack is after that.
252 	 */
253 	barrier();
254 	current->curr_ret_stack--;
255 
256 	if (unlikely(!ret)) {
257 		ftrace_graph_stop();
258 		WARN_ON(1);
259 		/* Might as well panic. What else to do? */
260 		ret = (unsigned long)panic;
261 	}
262 
263 	return ret;
264 }
265 
266 /*
267  * After all architecures have selected HAVE_FUNCTION_GRAPH_RETVAL, we can
268  * leave only ftrace_return_to_handler(ret_regs).
269  */
270 #ifdef CONFIG_HAVE_FUNCTION_GRAPH_RETVAL
271 unsigned long ftrace_return_to_handler(struct fgraph_ret_regs *ret_regs)
272 {
273 	return __ftrace_return_to_handler(ret_regs,
274 				fgraph_ret_regs_frame_pointer(ret_regs));
275 }
276 #else
277 unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
278 {
279 	return __ftrace_return_to_handler(NULL, frame_pointer);
280 }
281 #endif
282 
283 /**
284  * ftrace_graph_get_ret_stack - return the entry of the shadow stack
285  * @task: The task to read the shadow stack from
286  * @idx: Index down the shadow stack
287  *
288  * Return the ret_struct on the shadow stack of the @task at the
289  * call graph at @idx starting with zero. If @idx is zero, it
290  * will return the last saved ret_stack entry. If it is greater than
291  * zero, it will return the corresponding ret_stack for the depth
292  * of saved return addresses.
293  */
294 struct ftrace_ret_stack *
295 ftrace_graph_get_ret_stack(struct task_struct *task, int idx)
296 {
297 	idx = task->curr_ret_stack - idx;
298 
299 	if (idx >= 0 && idx <= task->curr_ret_stack)
300 		return &task->ret_stack[idx];
301 
302 	return NULL;
303 }
304 
305 /**
306  * ftrace_graph_ret_addr - convert a potentially modified stack return address
307  *			   to its original value
308  *
309  * This function can be called by stack unwinding code to convert a found stack
310  * return address ('ret') to its original value, in case the function graph
311  * tracer has modified it to be 'return_to_handler'.  If the address hasn't
312  * been modified, the unchanged value of 'ret' is returned.
313  *
314  * 'idx' is a state variable which should be initialized by the caller to zero
315  * before the first call.
316  *
317  * 'retp' is a pointer to the return address on the stack.  It's ignored if
318  * the arch doesn't have HAVE_FUNCTION_GRAPH_RET_ADDR_PTR defined.
319  */
320 #ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
321 unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
322 				    unsigned long ret, unsigned long *retp)
323 {
324 	int index = task->curr_ret_stack;
325 	int i;
326 
327 	if (ret != (unsigned long)dereference_kernel_function_descriptor(return_to_handler))
328 		return ret;
329 
330 	if (index < 0)
331 		return ret;
332 
333 	for (i = 0; i <= index; i++)
334 		if (task->ret_stack[i].retp == retp)
335 			return task->ret_stack[i].ret;
336 
337 	return ret;
338 }
339 #else /* !HAVE_FUNCTION_GRAPH_RET_ADDR_PTR */
340 unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
341 				    unsigned long ret, unsigned long *retp)
342 {
343 	int task_idx;
344 
345 	if (ret != (unsigned long)dereference_kernel_function_descriptor(return_to_handler))
346 		return ret;
347 
348 	task_idx = task->curr_ret_stack;
349 
350 	if (!task->ret_stack || task_idx < *idx)
351 		return ret;
352 
353 	task_idx -= *idx;
354 	(*idx)++;
355 
356 	return task->ret_stack[task_idx].ret;
357 }
358 #endif /* HAVE_FUNCTION_GRAPH_RET_ADDR_PTR */
359 
360 static struct ftrace_ops graph_ops = {
361 	.func			= ftrace_graph_func,
362 	.flags			= FTRACE_OPS_FL_INITIALIZED |
363 				   FTRACE_OPS_FL_PID |
364 				   FTRACE_OPS_GRAPH_STUB,
365 #ifdef FTRACE_GRAPH_TRAMP_ADDR
366 	.trampoline		= FTRACE_GRAPH_TRAMP_ADDR,
367 	/* trampoline_size is only needed for dynamically allocated tramps */
368 #endif
369 	ASSIGN_OPS_HASH(graph_ops, &global_ops.local_hash)
370 };
371 
372 void ftrace_graph_sleep_time_control(bool enable)
373 {
374 	fgraph_sleep_time = enable;
375 }
376 
377 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
378 {
379 	return 0;
380 }
381 
382 /*
383  * Simply points to ftrace_stub, but with the proper protocol.
384  * Defined by the linker script in linux/vmlinux.lds.h
385  */
386 extern void ftrace_stub_graph(struct ftrace_graph_ret *);
387 
388 /* The callbacks that hook a function */
389 trace_func_graph_ret_t ftrace_graph_return = ftrace_stub_graph;
390 trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
391 static trace_func_graph_ent_t __ftrace_graph_entry = ftrace_graph_entry_stub;
392 
393 /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
394 static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
395 {
396 	int i;
397 	int ret = 0;
398 	int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
399 	struct task_struct *g, *t;
400 
401 	for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
402 		ret_stack_list[i] =
403 			kmalloc_array(FTRACE_RETFUNC_DEPTH,
404 				      sizeof(struct ftrace_ret_stack),
405 				      GFP_KERNEL);
406 		if (!ret_stack_list[i]) {
407 			start = 0;
408 			end = i;
409 			ret = -ENOMEM;
410 			goto free;
411 		}
412 	}
413 
414 	rcu_read_lock();
415 	for_each_process_thread(g, t) {
416 		if (start == end) {
417 			ret = -EAGAIN;
418 			goto unlock;
419 		}
420 
421 		if (t->ret_stack == NULL) {
422 			atomic_set(&t->trace_overrun, 0);
423 			t->curr_ret_stack = -1;
424 			t->curr_ret_depth = -1;
425 			/* Make sure the tasks see the -1 first: */
426 			smp_wmb();
427 			t->ret_stack = ret_stack_list[start++];
428 		}
429 	}
430 
431 unlock:
432 	rcu_read_unlock();
433 free:
434 	for (i = start; i < end; i++)
435 		kfree(ret_stack_list[i]);
436 	return ret;
437 }
438 
439 static void
440 ftrace_graph_probe_sched_switch(void *ignore, bool preempt,
441 				struct task_struct *prev,
442 				struct task_struct *next,
443 				unsigned int prev_state)
444 {
445 	unsigned long long timestamp;
446 	int index;
447 
448 	/*
449 	 * Does the user want to count the time a function was asleep.
450 	 * If so, do not update the time stamps.
451 	 */
452 	if (fgraph_sleep_time)
453 		return;
454 
455 	timestamp = trace_clock_local();
456 
457 	prev->ftrace_timestamp = timestamp;
458 
459 	/* only process tasks that we timestamped */
460 	if (!next->ftrace_timestamp)
461 		return;
462 
463 	/*
464 	 * Update all the counters in next to make up for the
465 	 * time next was sleeping.
466 	 */
467 	timestamp -= next->ftrace_timestamp;
468 
469 	for (index = next->curr_ret_stack; index >= 0; index--)
470 		next->ret_stack[index].calltime += timestamp;
471 }
472 
473 static int ftrace_graph_entry_test(struct ftrace_graph_ent *trace)
474 {
475 	if (!ftrace_ops_test(&global_ops, trace->func, NULL))
476 		return 0;
477 	return __ftrace_graph_entry(trace);
478 }
479 
480 /*
481  * The function graph tracer should only trace the functions defined
482  * by set_ftrace_filter and set_ftrace_notrace. If another function
483  * tracer ops is registered, the graph tracer requires testing the
484  * function against the global ops, and not just trace any function
485  * that any ftrace_ops registered.
486  */
487 void update_function_graph_func(void)
488 {
489 	struct ftrace_ops *op;
490 	bool do_test = false;
491 
492 	/*
493 	 * The graph and global ops share the same set of functions
494 	 * to test. If any other ops is on the list, then
495 	 * the graph tracing needs to test if its the function
496 	 * it should call.
497 	 */
498 	do_for_each_ftrace_op(op, ftrace_ops_list) {
499 		if (op != &global_ops && op != &graph_ops &&
500 		    op != &ftrace_list_end) {
501 			do_test = true;
502 			/* in double loop, break out with goto */
503 			goto out;
504 		}
505 	} while_for_each_ftrace_op(op);
506  out:
507 	if (do_test)
508 		ftrace_graph_entry = ftrace_graph_entry_test;
509 	else
510 		ftrace_graph_entry = __ftrace_graph_entry;
511 }
512 
513 static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack);
514 
515 static void
516 graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
517 {
518 	atomic_set(&t->trace_overrun, 0);
519 	t->ftrace_timestamp = 0;
520 	/* make curr_ret_stack visible before we add the ret_stack */
521 	smp_wmb();
522 	t->ret_stack = ret_stack;
523 }
524 
525 /*
526  * Allocate a return stack for the idle task. May be the first
527  * time through, or it may be done by CPU hotplug online.
528  */
529 void ftrace_graph_init_idle_task(struct task_struct *t, int cpu)
530 {
531 	t->curr_ret_stack = -1;
532 	t->curr_ret_depth = -1;
533 	/*
534 	 * The idle task has no parent, it either has its own
535 	 * stack or no stack at all.
536 	 */
537 	if (t->ret_stack)
538 		WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu));
539 
540 	if (ftrace_graph_active) {
541 		struct ftrace_ret_stack *ret_stack;
542 
543 		ret_stack = per_cpu(idle_ret_stack, cpu);
544 		if (!ret_stack) {
545 			ret_stack =
546 				kmalloc_array(FTRACE_RETFUNC_DEPTH,
547 					      sizeof(struct ftrace_ret_stack),
548 					      GFP_KERNEL);
549 			if (!ret_stack)
550 				return;
551 			per_cpu(idle_ret_stack, cpu) = ret_stack;
552 		}
553 		graph_init_task(t, ret_stack);
554 	}
555 }
556 
557 /* Allocate a return stack for newly created task */
558 void ftrace_graph_init_task(struct task_struct *t)
559 {
560 	/* Make sure we do not use the parent ret_stack */
561 	t->ret_stack = NULL;
562 	t->curr_ret_stack = -1;
563 	t->curr_ret_depth = -1;
564 
565 	if (ftrace_graph_active) {
566 		struct ftrace_ret_stack *ret_stack;
567 
568 		ret_stack = kmalloc_array(FTRACE_RETFUNC_DEPTH,
569 					  sizeof(struct ftrace_ret_stack),
570 					  GFP_KERNEL);
571 		if (!ret_stack)
572 			return;
573 		graph_init_task(t, ret_stack);
574 	}
575 }
576 
577 void ftrace_graph_exit_task(struct task_struct *t)
578 {
579 	struct ftrace_ret_stack	*ret_stack = t->ret_stack;
580 
581 	t->ret_stack = NULL;
582 	/* NULL must become visible to IRQs before we free it: */
583 	barrier();
584 
585 	kfree(ret_stack);
586 }
587 
588 /* Allocate a return stack for each task */
589 static int start_graph_tracing(void)
590 {
591 	struct ftrace_ret_stack **ret_stack_list;
592 	int ret, cpu;
593 
594 	ret_stack_list = kmalloc_array(FTRACE_RETSTACK_ALLOC_SIZE,
595 				       sizeof(struct ftrace_ret_stack *),
596 				       GFP_KERNEL);
597 
598 	if (!ret_stack_list)
599 		return -ENOMEM;
600 
601 	/* The cpu_boot init_task->ret_stack will never be freed */
602 	for_each_online_cpu(cpu) {
603 		if (!idle_task(cpu)->ret_stack)
604 			ftrace_graph_init_idle_task(idle_task(cpu), cpu);
605 	}
606 
607 	do {
608 		ret = alloc_retstack_tasklist(ret_stack_list);
609 	} while (ret == -EAGAIN);
610 
611 	if (!ret) {
612 		ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
613 		if (ret)
614 			pr_info("ftrace_graph: Couldn't activate tracepoint"
615 				" probe to kernel_sched_switch\n");
616 	}
617 
618 	kfree(ret_stack_list);
619 	return ret;
620 }
621 
622 int register_ftrace_graph(struct fgraph_ops *gops)
623 {
624 	int ret = 0;
625 
626 	mutex_lock(&ftrace_lock);
627 
628 	/* we currently allow only one tracer registered at a time */
629 	if (ftrace_graph_active) {
630 		ret = -EBUSY;
631 		goto out;
632 	}
633 
634 	register_pm_notifier(&ftrace_suspend_notifier);
635 
636 	ftrace_graph_active++;
637 	ret = start_graph_tracing();
638 	if (ret) {
639 		ftrace_graph_active--;
640 		goto out;
641 	}
642 
643 	ftrace_graph_return = gops->retfunc;
644 
645 	/*
646 	 * Update the indirect function to the entryfunc, and the
647 	 * function that gets called to the entry_test first. Then
648 	 * call the update fgraph entry function to determine if
649 	 * the entryfunc should be called directly or not.
650 	 */
651 	__ftrace_graph_entry = gops->entryfunc;
652 	ftrace_graph_entry = ftrace_graph_entry_test;
653 	update_function_graph_func();
654 
655 	ret = ftrace_startup(&graph_ops, FTRACE_START_FUNC_RET);
656 out:
657 	mutex_unlock(&ftrace_lock);
658 	return ret;
659 }
660 
661 void unregister_ftrace_graph(struct fgraph_ops *gops)
662 {
663 	mutex_lock(&ftrace_lock);
664 
665 	if (unlikely(!ftrace_graph_active))
666 		goto out;
667 
668 	ftrace_graph_active--;
669 	ftrace_graph_return = ftrace_stub_graph;
670 	ftrace_graph_entry = ftrace_graph_entry_stub;
671 	__ftrace_graph_entry = ftrace_graph_entry_stub;
672 	ftrace_shutdown(&graph_ops, FTRACE_STOP_FUNC_RET);
673 	unregister_pm_notifier(&ftrace_suspend_notifier);
674 	unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
675 
676  out:
677 	mutex_unlock(&ftrace_lock);
678 }
679