xref: /linux/kernel/trace/fgraph.c (revision 6ca80638b90cec66547011ee1ef79e534589989a)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Infrastructure to took into function calls and returns.
4  * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
5  * Mostly borrowed from function tracer which
6  * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
7  *
8  * Highly modified by Steven Rostedt (VMware).
9  */
10 #include <linux/jump_label.h>
11 #include <linux/suspend.h>
12 #include <linux/ftrace.h>
13 #include <linux/slab.h>
14 
15 #include <trace/events/sched.h>
16 
17 #include "ftrace_internal.h"
18 #include "trace.h"
19 
20 #ifdef CONFIG_DYNAMIC_FTRACE
21 #define ASSIGN_OPS_HASH(opsname, val) \
22 	.func_hash		= val, \
23 	.local_hash.regex_lock	= __MUTEX_INITIALIZER(opsname.local_hash.regex_lock),
24 #else
25 #define ASSIGN_OPS_HASH(opsname, val)
26 #endif
27 
28 DEFINE_STATIC_KEY_FALSE(kill_ftrace_graph);
29 int ftrace_graph_active;
30 
31 /* Both enabled by default (can be cleared by function_graph tracer flags */
32 static bool fgraph_sleep_time = true;
33 
34 #ifdef CONFIG_DYNAMIC_FTRACE
35 /*
36  * archs can override this function if they must do something
37  * to enable hook for graph tracer.
38  */
39 int __weak ftrace_enable_ftrace_graph_caller(void)
40 {
41 	return 0;
42 }
43 
44 /*
45  * archs can override this function if they must do something
46  * to disable hook for graph tracer.
47  */
48 int __weak ftrace_disable_ftrace_graph_caller(void)
49 {
50 	return 0;
51 }
52 #endif
53 
54 /**
55  * ftrace_graph_stop - set to permanently disable function graph tracing
56  *
57  * In case of an error int function graph tracing, this is called
58  * to try to keep function graph tracing from causing any more harm.
59  * Usually this is pretty severe and this is called to try to at least
60  * get a warning out to the user.
61  */
62 void ftrace_graph_stop(void)
63 {
64 	static_branch_enable(&kill_ftrace_graph);
65 }
66 
67 /* Add a function return address to the trace stack on thread info.*/
68 static int
69 ftrace_push_return_trace(unsigned long ret, unsigned long func,
70 			 unsigned long frame_pointer, unsigned long *retp)
71 {
72 	unsigned long long calltime;
73 	int index;
74 
75 	if (unlikely(ftrace_graph_is_dead()))
76 		return -EBUSY;
77 
78 	if (!current->ret_stack)
79 		return -EBUSY;
80 
81 	/*
82 	 * We must make sure the ret_stack is tested before we read
83 	 * anything else.
84 	 */
85 	smp_rmb();
86 
87 	/* The return trace stack is full */
88 	if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
89 		atomic_inc(&current->trace_overrun);
90 		return -EBUSY;
91 	}
92 
93 	calltime = trace_clock_local();
94 
95 	index = ++current->curr_ret_stack;
96 	barrier();
97 	current->ret_stack[index].ret = ret;
98 	current->ret_stack[index].func = func;
99 	current->ret_stack[index].calltime = calltime;
100 #ifdef HAVE_FUNCTION_GRAPH_FP_TEST
101 	current->ret_stack[index].fp = frame_pointer;
102 #endif
103 #ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
104 	current->ret_stack[index].retp = retp;
105 #endif
106 	return 0;
107 }
108 
109 /*
110  * Not all archs define MCOUNT_INSN_SIZE which is used to look for direct
111  * functions. But those archs currently don't support direct functions
112  * anyway, and ftrace_find_rec_direct() is just a stub for them.
113  * Define MCOUNT_INSN_SIZE to keep those archs compiling.
114  */
115 #ifndef MCOUNT_INSN_SIZE
116 /* Make sure this only works without direct calls */
117 # ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
118 #  error MCOUNT_INSN_SIZE not defined with direct calls enabled
119 # endif
120 # define MCOUNT_INSN_SIZE 0
121 #endif
122 
123 int function_graph_enter(unsigned long ret, unsigned long func,
124 			 unsigned long frame_pointer, unsigned long *retp)
125 {
126 	struct ftrace_graph_ent trace;
127 
128 #ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS
129 	/*
130 	 * Skip graph tracing if the return location is served by direct trampoline,
131 	 * since call sequence and return addresses are unpredictable anyway.
132 	 * Ex: BPF trampoline may call original function and may skip frame
133 	 * depending on type of BPF programs attached.
134 	 */
135 	if (ftrace_direct_func_count &&
136 	    ftrace_find_rec_direct(ret - MCOUNT_INSN_SIZE))
137 		return -EBUSY;
138 #endif
139 	trace.func = func;
140 	trace.depth = ++current->curr_ret_depth;
141 
142 	if (ftrace_push_return_trace(ret, func, frame_pointer, retp))
143 		goto out;
144 
145 	/* Only trace if the calling function expects to */
146 	if (!ftrace_graph_entry(&trace))
147 		goto out_ret;
148 
149 	return 0;
150  out_ret:
151 	current->curr_ret_stack--;
152  out:
153 	current->curr_ret_depth--;
154 	return -EBUSY;
155 }
156 
157 /* Retrieve a function return address to the trace stack on thread info.*/
158 static void
159 ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
160 			unsigned long frame_pointer)
161 {
162 	int index;
163 
164 	index = current->curr_ret_stack;
165 
166 	if (unlikely(index < 0 || index >= FTRACE_RETFUNC_DEPTH)) {
167 		ftrace_graph_stop();
168 		WARN_ON(1);
169 		/* Might as well panic, otherwise we have no where to go */
170 		*ret = (unsigned long)panic;
171 		return;
172 	}
173 
174 #ifdef HAVE_FUNCTION_GRAPH_FP_TEST
175 	/*
176 	 * The arch may choose to record the frame pointer used
177 	 * and check it here to make sure that it is what we expect it
178 	 * to be. If gcc does not set the place holder of the return
179 	 * address in the frame pointer, and does a copy instead, then
180 	 * the function graph trace will fail. This test detects this
181 	 * case.
182 	 *
183 	 * Currently, x86_32 with optimize for size (-Os) makes the latest
184 	 * gcc do the above.
185 	 *
186 	 * Note, -mfentry does not use frame pointers, and this test
187 	 *  is not needed if CC_USING_FENTRY is set.
188 	 */
189 	if (unlikely(current->ret_stack[index].fp != frame_pointer)) {
190 		ftrace_graph_stop();
191 		WARN(1, "Bad frame pointer: expected %lx, received %lx\n"
192 		     "  from func %ps return to %lx\n",
193 		     current->ret_stack[index].fp,
194 		     frame_pointer,
195 		     (void *)current->ret_stack[index].func,
196 		     current->ret_stack[index].ret);
197 		*ret = (unsigned long)panic;
198 		return;
199 	}
200 #endif
201 
202 	*ret = current->ret_stack[index].ret;
203 	trace->func = current->ret_stack[index].func;
204 	trace->calltime = current->ret_stack[index].calltime;
205 	trace->overrun = atomic_read(&current->trace_overrun);
206 	trace->depth = current->curr_ret_depth--;
207 	/*
208 	 * We still want to trace interrupts coming in if
209 	 * max_depth is set to 1. Make sure the decrement is
210 	 * seen before ftrace_graph_return.
211 	 */
212 	barrier();
213 }
214 
215 /*
216  * Hibernation protection.
217  * The state of the current task is too much unstable during
218  * suspend/restore to disk. We want to protect against that.
219  */
220 static int
221 ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
222 							void *unused)
223 {
224 	switch (state) {
225 	case PM_HIBERNATION_PREPARE:
226 		pause_graph_tracing();
227 		break;
228 
229 	case PM_POST_HIBERNATION:
230 		unpause_graph_tracing();
231 		break;
232 	}
233 	return NOTIFY_DONE;
234 }
235 
236 static struct notifier_block ftrace_suspend_notifier = {
237 	.notifier_call = ftrace_suspend_notifier_call,
238 };
239 
240 /* fgraph_ret_regs is not defined without CONFIG_FUNCTION_GRAPH_RETVAL */
241 struct fgraph_ret_regs;
242 
243 /*
244  * Send the trace to the ring-buffer.
245  * @return the original return address.
246  */
247 static unsigned long __ftrace_return_to_handler(struct fgraph_ret_regs *ret_regs,
248 						unsigned long frame_pointer)
249 {
250 	struct ftrace_graph_ret trace;
251 	unsigned long ret;
252 
253 	ftrace_pop_return_trace(&trace, &ret, frame_pointer);
254 #ifdef CONFIG_FUNCTION_GRAPH_RETVAL
255 	trace.retval = fgraph_ret_regs_return_value(ret_regs);
256 #endif
257 	trace.rettime = trace_clock_local();
258 	ftrace_graph_return(&trace);
259 	/*
260 	 * The ftrace_graph_return() may still access the current
261 	 * ret_stack structure, we need to make sure the update of
262 	 * curr_ret_stack is after that.
263 	 */
264 	barrier();
265 	current->curr_ret_stack--;
266 
267 	if (unlikely(!ret)) {
268 		ftrace_graph_stop();
269 		WARN_ON(1);
270 		/* Might as well panic. What else to do? */
271 		ret = (unsigned long)panic;
272 	}
273 
274 	return ret;
275 }
276 
277 /*
278  * After all architecures have selected HAVE_FUNCTION_GRAPH_RETVAL, we can
279  * leave only ftrace_return_to_handler(ret_regs).
280  */
281 #ifdef CONFIG_HAVE_FUNCTION_GRAPH_RETVAL
282 unsigned long ftrace_return_to_handler(struct fgraph_ret_regs *ret_regs)
283 {
284 	return __ftrace_return_to_handler(ret_regs,
285 				fgraph_ret_regs_frame_pointer(ret_regs));
286 }
287 #else
288 unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
289 {
290 	return __ftrace_return_to_handler(NULL, frame_pointer);
291 }
292 #endif
293 
294 /**
295  * ftrace_graph_get_ret_stack - return the entry of the shadow stack
296  * @task: The task to read the shadow stack from
297  * @idx: Index down the shadow stack
298  *
299  * Return the ret_struct on the shadow stack of the @task at the
300  * call graph at @idx starting with zero. If @idx is zero, it
301  * will return the last saved ret_stack entry. If it is greater than
302  * zero, it will return the corresponding ret_stack for the depth
303  * of saved return addresses.
304  */
305 struct ftrace_ret_stack *
306 ftrace_graph_get_ret_stack(struct task_struct *task, int idx)
307 {
308 	idx = task->curr_ret_stack - idx;
309 
310 	if (idx >= 0 && idx <= task->curr_ret_stack)
311 		return &task->ret_stack[idx];
312 
313 	return NULL;
314 }
315 
316 /**
317  * ftrace_graph_ret_addr - convert a potentially modified stack return address
318  *			   to its original value
319  *
320  * This function can be called by stack unwinding code to convert a found stack
321  * return address ('ret') to its original value, in case the function graph
322  * tracer has modified it to be 'return_to_handler'.  If the address hasn't
323  * been modified, the unchanged value of 'ret' is returned.
324  *
325  * 'idx' is a state variable which should be initialized by the caller to zero
326  * before the first call.
327  *
328  * 'retp' is a pointer to the return address on the stack.  It's ignored if
329  * the arch doesn't have HAVE_FUNCTION_GRAPH_RET_ADDR_PTR defined.
330  */
331 #ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
332 unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
333 				    unsigned long ret, unsigned long *retp)
334 {
335 	int index = task->curr_ret_stack;
336 	int i;
337 
338 	if (ret != (unsigned long)dereference_kernel_function_descriptor(return_to_handler))
339 		return ret;
340 
341 	if (index < 0)
342 		return ret;
343 
344 	for (i = 0; i <= index; i++)
345 		if (task->ret_stack[i].retp == retp)
346 			return task->ret_stack[i].ret;
347 
348 	return ret;
349 }
350 #else /* !HAVE_FUNCTION_GRAPH_RET_ADDR_PTR */
351 unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
352 				    unsigned long ret, unsigned long *retp)
353 {
354 	int task_idx;
355 
356 	if (ret != (unsigned long)dereference_kernel_function_descriptor(return_to_handler))
357 		return ret;
358 
359 	task_idx = task->curr_ret_stack;
360 
361 	if (!task->ret_stack || task_idx < *idx)
362 		return ret;
363 
364 	task_idx -= *idx;
365 	(*idx)++;
366 
367 	return task->ret_stack[task_idx].ret;
368 }
369 #endif /* HAVE_FUNCTION_GRAPH_RET_ADDR_PTR */
370 
371 static struct ftrace_ops graph_ops = {
372 	.func			= ftrace_graph_func,
373 	.flags			= FTRACE_OPS_FL_INITIALIZED |
374 				   FTRACE_OPS_FL_PID |
375 				   FTRACE_OPS_GRAPH_STUB,
376 #ifdef FTRACE_GRAPH_TRAMP_ADDR
377 	.trampoline		= FTRACE_GRAPH_TRAMP_ADDR,
378 	/* trampoline_size is only needed for dynamically allocated tramps */
379 #endif
380 	ASSIGN_OPS_HASH(graph_ops, &global_ops.local_hash)
381 };
382 
383 void ftrace_graph_sleep_time_control(bool enable)
384 {
385 	fgraph_sleep_time = enable;
386 }
387 
388 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
389 {
390 	return 0;
391 }
392 
393 /*
394  * Simply points to ftrace_stub, but with the proper protocol.
395  * Defined by the linker script in linux/vmlinux.lds.h
396  */
397 extern void ftrace_stub_graph(struct ftrace_graph_ret *);
398 
399 /* The callbacks that hook a function */
400 trace_func_graph_ret_t ftrace_graph_return = ftrace_stub_graph;
401 trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
402 static trace_func_graph_ent_t __ftrace_graph_entry = ftrace_graph_entry_stub;
403 
404 /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
405 static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
406 {
407 	int i;
408 	int ret = 0;
409 	int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
410 	struct task_struct *g, *t;
411 
412 	for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
413 		ret_stack_list[i] =
414 			kmalloc_array(FTRACE_RETFUNC_DEPTH,
415 				      sizeof(struct ftrace_ret_stack),
416 				      GFP_KERNEL);
417 		if (!ret_stack_list[i]) {
418 			start = 0;
419 			end = i;
420 			ret = -ENOMEM;
421 			goto free;
422 		}
423 	}
424 
425 	rcu_read_lock();
426 	for_each_process_thread(g, t) {
427 		if (start == end) {
428 			ret = -EAGAIN;
429 			goto unlock;
430 		}
431 
432 		if (t->ret_stack == NULL) {
433 			atomic_set(&t->trace_overrun, 0);
434 			t->curr_ret_stack = -1;
435 			t->curr_ret_depth = -1;
436 			/* Make sure the tasks see the -1 first: */
437 			smp_wmb();
438 			t->ret_stack = ret_stack_list[start++];
439 		}
440 	}
441 
442 unlock:
443 	rcu_read_unlock();
444 free:
445 	for (i = start; i < end; i++)
446 		kfree(ret_stack_list[i]);
447 	return ret;
448 }
449 
450 static void
451 ftrace_graph_probe_sched_switch(void *ignore, bool preempt,
452 				struct task_struct *prev,
453 				struct task_struct *next,
454 				unsigned int prev_state)
455 {
456 	unsigned long long timestamp;
457 	int index;
458 
459 	/*
460 	 * Does the user want to count the time a function was asleep.
461 	 * If so, do not update the time stamps.
462 	 */
463 	if (fgraph_sleep_time)
464 		return;
465 
466 	timestamp = trace_clock_local();
467 
468 	prev->ftrace_timestamp = timestamp;
469 
470 	/* only process tasks that we timestamped */
471 	if (!next->ftrace_timestamp)
472 		return;
473 
474 	/*
475 	 * Update all the counters in next to make up for the
476 	 * time next was sleeping.
477 	 */
478 	timestamp -= next->ftrace_timestamp;
479 
480 	for (index = next->curr_ret_stack; index >= 0; index--)
481 		next->ret_stack[index].calltime += timestamp;
482 }
483 
484 static int ftrace_graph_entry_test(struct ftrace_graph_ent *trace)
485 {
486 	if (!ftrace_ops_test(&global_ops, trace->func, NULL))
487 		return 0;
488 	return __ftrace_graph_entry(trace);
489 }
490 
491 /*
492  * The function graph tracer should only trace the functions defined
493  * by set_ftrace_filter and set_ftrace_notrace. If another function
494  * tracer ops is registered, the graph tracer requires testing the
495  * function against the global ops, and not just trace any function
496  * that any ftrace_ops registered.
497  */
498 void update_function_graph_func(void)
499 {
500 	struct ftrace_ops *op;
501 	bool do_test = false;
502 
503 	/*
504 	 * The graph and global ops share the same set of functions
505 	 * to test. If any other ops is on the list, then
506 	 * the graph tracing needs to test if its the function
507 	 * it should call.
508 	 */
509 	do_for_each_ftrace_op(op, ftrace_ops_list) {
510 		if (op != &global_ops && op != &graph_ops &&
511 		    op != &ftrace_list_end) {
512 			do_test = true;
513 			/* in double loop, break out with goto */
514 			goto out;
515 		}
516 	} while_for_each_ftrace_op(op);
517  out:
518 	if (do_test)
519 		ftrace_graph_entry = ftrace_graph_entry_test;
520 	else
521 		ftrace_graph_entry = __ftrace_graph_entry;
522 }
523 
524 static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack);
525 
526 static void
527 graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
528 {
529 	atomic_set(&t->trace_overrun, 0);
530 	t->ftrace_timestamp = 0;
531 	/* make curr_ret_stack visible before we add the ret_stack */
532 	smp_wmb();
533 	t->ret_stack = ret_stack;
534 }
535 
536 /*
537  * Allocate a return stack for the idle task. May be the first
538  * time through, or it may be done by CPU hotplug online.
539  */
540 void ftrace_graph_init_idle_task(struct task_struct *t, int cpu)
541 {
542 	t->curr_ret_stack = -1;
543 	t->curr_ret_depth = -1;
544 	/*
545 	 * The idle task has no parent, it either has its own
546 	 * stack or no stack at all.
547 	 */
548 	if (t->ret_stack)
549 		WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu));
550 
551 	if (ftrace_graph_active) {
552 		struct ftrace_ret_stack *ret_stack;
553 
554 		ret_stack = per_cpu(idle_ret_stack, cpu);
555 		if (!ret_stack) {
556 			ret_stack =
557 				kmalloc_array(FTRACE_RETFUNC_DEPTH,
558 					      sizeof(struct ftrace_ret_stack),
559 					      GFP_KERNEL);
560 			if (!ret_stack)
561 				return;
562 			per_cpu(idle_ret_stack, cpu) = ret_stack;
563 		}
564 		graph_init_task(t, ret_stack);
565 	}
566 }
567 
568 /* Allocate a return stack for newly created task */
569 void ftrace_graph_init_task(struct task_struct *t)
570 {
571 	/* Make sure we do not use the parent ret_stack */
572 	t->ret_stack = NULL;
573 	t->curr_ret_stack = -1;
574 	t->curr_ret_depth = -1;
575 
576 	if (ftrace_graph_active) {
577 		struct ftrace_ret_stack *ret_stack;
578 
579 		ret_stack = kmalloc_array(FTRACE_RETFUNC_DEPTH,
580 					  sizeof(struct ftrace_ret_stack),
581 					  GFP_KERNEL);
582 		if (!ret_stack)
583 			return;
584 		graph_init_task(t, ret_stack);
585 	}
586 }
587 
588 void ftrace_graph_exit_task(struct task_struct *t)
589 {
590 	struct ftrace_ret_stack	*ret_stack = t->ret_stack;
591 
592 	t->ret_stack = NULL;
593 	/* NULL must become visible to IRQs before we free it: */
594 	barrier();
595 
596 	kfree(ret_stack);
597 }
598 
599 /* Allocate a return stack for each task */
600 static int start_graph_tracing(void)
601 {
602 	struct ftrace_ret_stack **ret_stack_list;
603 	int ret, cpu;
604 
605 	ret_stack_list = kmalloc_array(FTRACE_RETSTACK_ALLOC_SIZE,
606 				       sizeof(struct ftrace_ret_stack *),
607 				       GFP_KERNEL);
608 
609 	if (!ret_stack_list)
610 		return -ENOMEM;
611 
612 	/* The cpu_boot init_task->ret_stack will never be freed */
613 	for_each_online_cpu(cpu) {
614 		if (!idle_task(cpu)->ret_stack)
615 			ftrace_graph_init_idle_task(idle_task(cpu), cpu);
616 	}
617 
618 	do {
619 		ret = alloc_retstack_tasklist(ret_stack_list);
620 	} while (ret == -EAGAIN);
621 
622 	if (!ret) {
623 		ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
624 		if (ret)
625 			pr_info("ftrace_graph: Couldn't activate tracepoint"
626 				" probe to kernel_sched_switch\n");
627 	}
628 
629 	kfree(ret_stack_list);
630 	return ret;
631 }
632 
633 int register_ftrace_graph(struct fgraph_ops *gops)
634 {
635 	int ret = 0;
636 
637 	mutex_lock(&ftrace_lock);
638 
639 	/* we currently allow only one tracer registered at a time */
640 	if (ftrace_graph_active) {
641 		ret = -EBUSY;
642 		goto out;
643 	}
644 
645 	register_pm_notifier(&ftrace_suspend_notifier);
646 
647 	ftrace_graph_active++;
648 	ret = start_graph_tracing();
649 	if (ret) {
650 		ftrace_graph_active--;
651 		goto out;
652 	}
653 
654 	ftrace_graph_return = gops->retfunc;
655 
656 	/*
657 	 * Update the indirect function to the entryfunc, and the
658 	 * function that gets called to the entry_test first. Then
659 	 * call the update fgraph entry function to determine if
660 	 * the entryfunc should be called directly or not.
661 	 */
662 	__ftrace_graph_entry = gops->entryfunc;
663 	ftrace_graph_entry = ftrace_graph_entry_test;
664 	update_function_graph_func();
665 
666 	ret = ftrace_startup(&graph_ops, FTRACE_START_FUNC_RET);
667 out:
668 	mutex_unlock(&ftrace_lock);
669 	return ret;
670 }
671 
672 void unregister_ftrace_graph(struct fgraph_ops *gops)
673 {
674 	mutex_lock(&ftrace_lock);
675 
676 	if (unlikely(!ftrace_graph_active))
677 		goto out;
678 
679 	ftrace_graph_active--;
680 	ftrace_graph_return = ftrace_stub_graph;
681 	ftrace_graph_entry = ftrace_graph_entry_stub;
682 	__ftrace_graph_entry = ftrace_graph_entry_stub;
683 	ftrace_shutdown(&graph_ops, FTRACE_STOP_FUNC_RET);
684 	unregister_pm_notifier(&ftrace_suspend_notifier);
685 	unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
686 
687  out:
688 	mutex_unlock(&ftrace_lock);
689 }
690