xref: /linux/arch/x86/kernel/ftrace.c (revision 4ae68b26c3ab5a82aa271e6e9fc9b1a06e1d6b40)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Dynamic function tracing support.
4  *
5  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
6  *
7  * Thanks goes to Ingo Molnar, for suggesting the idea.
8  * Mathieu Desnoyers, for suggesting postponing the modifications.
9  * Arjan van de Ven, for keeping me straight, and explaining to me
10  * the dangers of modifying code on the run.
11  */
12 
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 
15 #include <linux/spinlock.h>
16 #include <linux/hardirq.h>
17 #include <linux/uaccess.h>
18 #include <linux/ftrace.h>
19 #include <linux/percpu.h>
20 #include <linux/sched.h>
21 #include <linux/slab.h>
22 #include <linux/init.h>
23 #include <linux/list.h>
24 #include <linux/module.h>
25 #include <linux/memory.h>
26 #include <linux/vmalloc.h>
27 #include <linux/set_memory.h>
28 
29 #include <trace/syscall.h>
30 
31 #include <asm/kprobes.h>
32 #include <asm/ftrace.h>
33 #include <asm/nops.h>
34 #include <asm/text-patching.h>
35 
36 #ifdef CONFIG_DYNAMIC_FTRACE
37 
38 static int ftrace_poke_late = 0;
39 
40 void ftrace_arch_code_modify_prepare(void)
41     __acquires(&text_mutex)
42 {
43 	/*
44 	 * Need to grab text_mutex to prevent a race from module loading
45 	 * and live kernel patching from changing the text permissions while
46 	 * ftrace has it set to "read/write".
47 	 */
48 	mutex_lock(&text_mutex);
49 	ftrace_poke_late = 1;
50 }
51 
52 void ftrace_arch_code_modify_post_process(void)
53     __releases(&text_mutex)
54 {
55 	/*
56 	 * ftrace_make_{call,nop}() may be called during
57 	 * module load, and we need to finish the text_poke_queue()
58 	 * that they do, here.
59 	 */
60 	text_poke_finish();
61 	ftrace_poke_late = 0;
62 	mutex_unlock(&text_mutex);
63 }
64 
65 static const char *ftrace_nop_replace(void)
66 {
67 	return x86_nops[5];
68 }
69 
70 static const char *ftrace_call_replace(unsigned long ip, unsigned long addr)
71 {
72 	/*
73 	 * No need to translate into a callthunk. The trampoline does
74 	 * the depth accounting itself.
75 	 */
76 	return text_gen_insn(CALL_INSN_OPCODE, (void *)ip, (void *)addr);
77 }
78 
79 static int ftrace_verify_code(unsigned long ip, const char *old_code)
80 {
81 	char cur_code[MCOUNT_INSN_SIZE];
82 
83 	/*
84 	 * Note:
85 	 * We are paranoid about modifying text, as if a bug was to happen, it
86 	 * could cause us to read or write to someplace that could cause harm.
87 	 * Carefully read and modify the code with probe_kernel_*(), and make
88 	 * sure what we read is what we expected it to be before modifying it.
89 	 */
90 	/* read the text we want to modify */
91 	if (copy_from_kernel_nofault(cur_code, (void *)ip, MCOUNT_INSN_SIZE)) {
92 		WARN_ON(1);
93 		return -EFAULT;
94 	}
95 
96 	/* Make sure it is what we expect it to be */
97 	if (memcmp(cur_code, old_code, MCOUNT_INSN_SIZE) != 0) {
98 		ftrace_expected = old_code;
99 		WARN_ON(1);
100 		return -EINVAL;
101 	}
102 
103 	return 0;
104 }
105 
106 /*
107  * Marked __ref because it calls text_poke_early() which is .init.text. That is
108  * ok because that call will happen early, during boot, when .init sections are
109  * still present.
110  */
111 static int __ref
112 ftrace_modify_code_direct(unsigned long ip, const char *old_code,
113 			  const char *new_code)
114 {
115 	int ret = ftrace_verify_code(ip, old_code);
116 	if (ret)
117 		return ret;
118 
119 	/* replace the text with the new text */
120 	if (ftrace_poke_late)
121 		text_poke_queue((void *)ip, new_code, MCOUNT_INSN_SIZE, NULL);
122 	else
123 		text_poke_early((void *)ip, new_code, MCOUNT_INSN_SIZE);
124 	return 0;
125 }
126 
127 int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr)
128 {
129 	unsigned long ip = rec->ip;
130 	const char *new, *old;
131 
132 	old = ftrace_call_replace(ip, addr);
133 	new = ftrace_nop_replace();
134 
135 	/*
136 	 * On boot up, and when modules are loaded, the MCOUNT_ADDR
137 	 * is converted to a nop, and will never become MCOUNT_ADDR
138 	 * again. This code is either running before SMP (on boot up)
139 	 * or before the code will ever be executed (module load).
140 	 * We do not want to use the breakpoint version in this case,
141 	 * just modify the code directly.
142 	 */
143 	if (addr == MCOUNT_ADDR)
144 		return ftrace_modify_code_direct(ip, old, new);
145 
146 	/*
147 	 * x86 overrides ftrace_replace_code -- this function will never be used
148 	 * in this case.
149 	 */
150 	WARN_ONCE(1, "invalid use of ftrace_make_nop");
151 	return -EINVAL;
152 }
153 
154 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
155 {
156 	unsigned long ip = rec->ip;
157 	const char *new, *old;
158 
159 	old = ftrace_nop_replace();
160 	new = ftrace_call_replace(ip, addr);
161 
162 	/* Should only be called when module is loaded */
163 	return ftrace_modify_code_direct(rec->ip, old, new);
164 }
165 
166 /*
167  * Should never be called:
168  *  As it is only called by __ftrace_replace_code() which is called by
169  *  ftrace_replace_code() that x86 overrides, and by ftrace_update_code()
170  *  which is called to turn mcount into nops or nops into function calls
171  *  but not to convert a function from not using regs to one that uses
172  *  regs, which ftrace_modify_call() is for.
173  */
174 int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
175 				 unsigned long addr)
176 {
177 	WARN_ON(1);
178 	return -EINVAL;
179 }
180 
181 int ftrace_update_ftrace_func(ftrace_func_t func)
182 {
183 	unsigned long ip;
184 	const char *new;
185 
186 	ip = (unsigned long)(&ftrace_call);
187 	new = ftrace_call_replace(ip, (unsigned long)func);
188 	text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL);
189 
190 	ip = (unsigned long)(&ftrace_regs_call);
191 	new = ftrace_call_replace(ip, (unsigned long)func);
192 	text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL);
193 
194 	return 0;
195 }
196 
197 void ftrace_replace_code(int enable)
198 {
199 	struct ftrace_rec_iter *iter;
200 	struct dyn_ftrace *rec;
201 	const char *new, *old;
202 	int ret;
203 
204 	for_ftrace_rec_iter(iter) {
205 		rec = ftrace_rec_iter_record(iter);
206 
207 		switch (ftrace_test_record(rec, enable)) {
208 		case FTRACE_UPDATE_IGNORE:
209 		default:
210 			continue;
211 
212 		case FTRACE_UPDATE_MAKE_CALL:
213 			old = ftrace_nop_replace();
214 			break;
215 
216 		case FTRACE_UPDATE_MODIFY_CALL:
217 		case FTRACE_UPDATE_MAKE_NOP:
218 			old = ftrace_call_replace(rec->ip, ftrace_get_addr_curr(rec));
219 			break;
220 		}
221 
222 		ret = ftrace_verify_code(rec->ip, old);
223 		if (ret) {
224 			ftrace_expected = old;
225 			ftrace_bug(ret, rec);
226 			ftrace_expected = NULL;
227 			return;
228 		}
229 	}
230 
231 	for_ftrace_rec_iter(iter) {
232 		rec = ftrace_rec_iter_record(iter);
233 
234 		switch (ftrace_test_record(rec, enable)) {
235 		case FTRACE_UPDATE_IGNORE:
236 		default:
237 			continue;
238 
239 		case FTRACE_UPDATE_MAKE_CALL:
240 		case FTRACE_UPDATE_MODIFY_CALL:
241 			new = ftrace_call_replace(rec->ip, ftrace_get_addr_new(rec));
242 			break;
243 
244 		case FTRACE_UPDATE_MAKE_NOP:
245 			new = ftrace_nop_replace();
246 			break;
247 		}
248 
249 		text_poke_queue((void *)rec->ip, new, MCOUNT_INSN_SIZE, NULL);
250 		ftrace_update_record(rec, enable);
251 	}
252 	text_poke_finish();
253 }
254 
255 void arch_ftrace_update_code(int command)
256 {
257 	ftrace_modify_all_code(command);
258 }
259 
260 /* Currently only x86_64 supports dynamic trampolines */
261 #ifdef CONFIG_X86_64
262 
263 #ifdef CONFIG_MODULES
264 #include <linux/moduleloader.h>
265 /* Module allocation simplifies allocating memory for code */
266 static inline void *alloc_tramp(unsigned long size)
267 {
268 	return module_alloc(size);
269 }
270 static inline void tramp_free(void *tramp)
271 {
272 	module_memfree(tramp);
273 }
274 #else
275 /* Trampolines can only be created if modules are supported */
276 static inline void *alloc_tramp(unsigned long size)
277 {
278 	return NULL;
279 }
280 static inline void tramp_free(void *tramp) { }
281 #endif
282 
283 /* Defined as markers to the end of the ftrace default trampolines */
284 extern void ftrace_regs_caller_end(void);
285 extern void ftrace_caller_end(void);
286 extern void ftrace_caller_op_ptr(void);
287 extern void ftrace_regs_caller_op_ptr(void);
288 extern void ftrace_regs_caller_jmp(void);
289 
290 /* movq function_trace_op(%rip), %rdx */
291 /* 0x48 0x8b 0x15 <offset-to-ftrace_trace_op (4 bytes)> */
292 #define OP_REF_SIZE	7
293 
294 /*
295  * The ftrace_ops is passed to the function callback. Since the
296  * trampoline only services a single ftrace_ops, we can pass in
297  * that ops directly.
298  *
299  * The ftrace_op_code_union is used to create a pointer to the
300  * ftrace_ops that will be passed to the callback function.
301  */
302 union ftrace_op_code_union {
303 	char code[OP_REF_SIZE];
304 	struct {
305 		char op[3];
306 		int offset;
307 	} __attribute__((packed));
308 };
309 
310 #define RET_SIZE		(IS_ENABLED(CONFIG_RETPOLINE) ? 5 : 1 + IS_ENABLED(CONFIG_SLS))
311 
312 static unsigned long
313 create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
314 {
315 	unsigned long start_offset;
316 	unsigned long end_offset;
317 	unsigned long op_offset;
318 	unsigned long call_offset;
319 	unsigned long jmp_offset;
320 	unsigned long offset;
321 	unsigned long npages;
322 	unsigned long size;
323 	unsigned long *ptr;
324 	void *trampoline;
325 	void *ip, *dest;
326 	/* 48 8b 15 <offset> is movq <offset>(%rip), %rdx */
327 	unsigned const char op_ref[] = { 0x48, 0x8b, 0x15 };
328 	unsigned const char retq[] = { RET_INSN_OPCODE, INT3_INSN_OPCODE };
329 	union ftrace_op_code_union op_ptr;
330 	int ret;
331 
332 	if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
333 		start_offset = (unsigned long)ftrace_regs_caller;
334 		end_offset = (unsigned long)ftrace_regs_caller_end;
335 		op_offset = (unsigned long)ftrace_regs_caller_op_ptr;
336 		call_offset = (unsigned long)ftrace_regs_call;
337 		jmp_offset = (unsigned long)ftrace_regs_caller_jmp;
338 	} else {
339 		start_offset = (unsigned long)ftrace_caller;
340 		end_offset = (unsigned long)ftrace_caller_end;
341 		op_offset = (unsigned long)ftrace_caller_op_ptr;
342 		call_offset = (unsigned long)ftrace_call;
343 		jmp_offset = 0;
344 	}
345 
346 	size = end_offset - start_offset;
347 
348 	/*
349 	 * Allocate enough size to store the ftrace_caller code,
350 	 * the iret , as well as the address of the ftrace_ops this
351 	 * trampoline is used for.
352 	 */
353 	trampoline = alloc_tramp(size + RET_SIZE + sizeof(void *));
354 	if (!trampoline)
355 		return 0;
356 
357 	*tramp_size = size + RET_SIZE + sizeof(void *);
358 	npages = DIV_ROUND_UP(*tramp_size, PAGE_SIZE);
359 
360 	/* Copy ftrace_caller onto the trampoline memory */
361 	ret = copy_from_kernel_nofault(trampoline, (void *)start_offset, size);
362 	if (WARN_ON(ret < 0))
363 		goto fail;
364 
365 	ip = trampoline + size;
366 	if (cpu_feature_enabled(X86_FEATURE_RETHUNK))
367 		__text_gen_insn(ip, JMP32_INSN_OPCODE, ip, x86_return_thunk, JMP32_INSN_SIZE);
368 	else
369 		memcpy(ip, retq, sizeof(retq));
370 
371 	/* No need to test direct calls on created trampolines */
372 	if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
373 		/* NOP the jnz 1f; but make sure it's a 2 byte jnz */
374 		ip = trampoline + (jmp_offset - start_offset);
375 		if (WARN_ON(*(char *)ip != 0x75))
376 			goto fail;
377 		ret = copy_from_kernel_nofault(ip, x86_nops[2], 2);
378 		if (ret < 0)
379 			goto fail;
380 	}
381 
382 	/*
383 	 * The address of the ftrace_ops that is used for this trampoline
384 	 * is stored at the end of the trampoline. This will be used to
385 	 * load the third parameter for the callback. Basically, that
386 	 * location at the end of the trampoline takes the place of
387 	 * the global function_trace_op variable.
388 	 */
389 
390 	ptr = (unsigned long *)(trampoline + size + RET_SIZE);
391 	*ptr = (unsigned long)ops;
392 
393 	op_offset -= start_offset;
394 	memcpy(&op_ptr, trampoline + op_offset, OP_REF_SIZE);
395 
396 	/* Are we pointing to the reference? */
397 	if (WARN_ON(memcmp(op_ptr.op, op_ref, 3) != 0))
398 		goto fail;
399 
400 	/* Load the contents of ptr into the callback parameter */
401 	offset = (unsigned long)ptr;
402 	offset -= (unsigned long)trampoline + op_offset + OP_REF_SIZE;
403 
404 	op_ptr.offset = offset;
405 
406 	/* put in the new offset to the ftrace_ops */
407 	memcpy(trampoline + op_offset, &op_ptr, OP_REF_SIZE);
408 
409 	/* put in the call to the function */
410 	mutex_lock(&text_mutex);
411 	call_offset -= start_offset;
412 	/*
413 	 * No need to translate into a callthunk. The trampoline does
414 	 * the depth accounting before the call already.
415 	 */
416 	dest = ftrace_ops_get_func(ops);
417 	memcpy(trampoline + call_offset,
418 	       text_gen_insn(CALL_INSN_OPCODE, trampoline + call_offset, dest),
419 	       CALL_INSN_SIZE);
420 	mutex_unlock(&text_mutex);
421 
422 	/* ALLOC_TRAMP flags lets us know we created it */
423 	ops->flags |= FTRACE_OPS_FL_ALLOC_TRAMP;
424 
425 	set_memory_rox((unsigned long)trampoline, npages);
426 	return (unsigned long)trampoline;
427 fail:
428 	tramp_free(trampoline);
429 	return 0;
430 }
431 
432 void set_ftrace_ops_ro(void)
433 {
434 	struct ftrace_ops *ops;
435 	unsigned long start_offset;
436 	unsigned long end_offset;
437 	unsigned long npages;
438 	unsigned long size;
439 
440 	do_for_each_ftrace_op(ops, ftrace_ops_list) {
441 		if (!(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
442 			continue;
443 
444 		if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
445 			start_offset = (unsigned long)ftrace_regs_caller;
446 			end_offset = (unsigned long)ftrace_regs_caller_end;
447 		} else {
448 			start_offset = (unsigned long)ftrace_caller;
449 			end_offset = (unsigned long)ftrace_caller_end;
450 		}
451 		size = end_offset - start_offset;
452 		size = size + RET_SIZE + sizeof(void *);
453 		npages = DIV_ROUND_UP(size, PAGE_SIZE);
454 		set_memory_ro((unsigned long)ops->trampoline, npages);
455 	} while_for_each_ftrace_op(ops);
456 }
457 
458 static unsigned long calc_trampoline_call_offset(bool save_regs)
459 {
460 	unsigned long start_offset;
461 	unsigned long call_offset;
462 
463 	if (save_regs) {
464 		start_offset = (unsigned long)ftrace_regs_caller;
465 		call_offset = (unsigned long)ftrace_regs_call;
466 	} else {
467 		start_offset = (unsigned long)ftrace_caller;
468 		call_offset = (unsigned long)ftrace_call;
469 	}
470 
471 	return call_offset - start_offset;
472 }
473 
474 void arch_ftrace_update_trampoline(struct ftrace_ops *ops)
475 {
476 	ftrace_func_t func;
477 	unsigned long offset;
478 	unsigned long ip;
479 	unsigned int size;
480 	const char *new;
481 
482 	if (!ops->trampoline) {
483 		ops->trampoline = create_trampoline(ops, &size);
484 		if (!ops->trampoline)
485 			return;
486 		ops->trampoline_size = size;
487 		return;
488 	}
489 
490 	/*
491 	 * The ftrace_ops caller may set up its own trampoline.
492 	 * In such a case, this code must not modify it.
493 	 */
494 	if (!(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
495 		return;
496 
497 	offset = calc_trampoline_call_offset(ops->flags & FTRACE_OPS_FL_SAVE_REGS);
498 	ip = ops->trampoline + offset;
499 	func = ftrace_ops_get_func(ops);
500 
501 	mutex_lock(&text_mutex);
502 	/* Do a safe modify in case the trampoline is executing */
503 	new = ftrace_call_replace(ip, (unsigned long)func);
504 	text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL);
505 	mutex_unlock(&text_mutex);
506 }
507 
508 /* Return the address of the function the trampoline calls */
509 static void *addr_from_call(void *ptr)
510 {
511 	union text_poke_insn call;
512 	int ret;
513 
514 	ret = copy_from_kernel_nofault(&call, ptr, CALL_INSN_SIZE);
515 	if (WARN_ON_ONCE(ret < 0))
516 		return NULL;
517 
518 	/* Make sure this is a call */
519 	if (WARN_ON_ONCE(call.opcode != CALL_INSN_OPCODE)) {
520 		pr_warn("Expected E8, got %x\n", call.opcode);
521 		return NULL;
522 	}
523 
524 	return ptr + CALL_INSN_SIZE + call.disp;
525 }
526 
527 /*
528  * If the ops->trampoline was not allocated, then it probably
529  * has a static trampoline func, or is the ftrace caller itself.
530  */
531 static void *static_tramp_func(struct ftrace_ops *ops, struct dyn_ftrace *rec)
532 {
533 	unsigned long offset;
534 	bool save_regs = rec->flags & FTRACE_FL_REGS_EN;
535 	void *ptr;
536 
537 	if (ops && ops->trampoline) {
538 #if !defined(CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS) && \
539 	defined(CONFIG_FUNCTION_GRAPH_TRACER)
540 		/*
541 		 * We only know about function graph tracer setting as static
542 		 * trampoline.
543 		 */
544 		if (ops->trampoline == FTRACE_GRAPH_ADDR)
545 			return (void *)prepare_ftrace_return;
546 #endif
547 		return NULL;
548 	}
549 
550 	offset = calc_trampoline_call_offset(save_regs);
551 
552 	if (save_regs)
553 		ptr = (void *)FTRACE_REGS_ADDR + offset;
554 	else
555 		ptr = (void *)FTRACE_ADDR + offset;
556 
557 	return addr_from_call(ptr);
558 }
559 
560 void *arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec)
561 {
562 	unsigned long offset;
563 
564 	/* If we didn't allocate this trampoline, consider it static */
565 	if (!ops || !(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
566 		return static_tramp_func(ops, rec);
567 
568 	offset = calc_trampoline_call_offset(ops->flags & FTRACE_OPS_FL_SAVE_REGS);
569 	return addr_from_call((void *)ops->trampoline + offset);
570 }
571 
572 void arch_ftrace_trampoline_free(struct ftrace_ops *ops)
573 {
574 	if (!ops || !(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
575 		return;
576 
577 	tramp_free((void *)ops->trampoline);
578 	ops->trampoline = 0;
579 }
580 
581 #endif /* CONFIG_X86_64 */
582 #endif /* CONFIG_DYNAMIC_FTRACE */
583 
584 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
585 
586 #if defined(CONFIG_DYNAMIC_FTRACE) && !defined(CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS)
587 extern void ftrace_graph_call(void);
588 static const char *ftrace_jmp_replace(unsigned long ip, unsigned long addr)
589 {
590 	return text_gen_insn(JMP32_INSN_OPCODE, (void *)ip, (void *)addr);
591 }
592 
593 static int ftrace_mod_jmp(unsigned long ip, void *func)
594 {
595 	const char *new;
596 
597 	new = ftrace_jmp_replace(ip, (unsigned long)func);
598 	text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL);
599 	return 0;
600 }
601 
602 int ftrace_enable_ftrace_graph_caller(void)
603 {
604 	unsigned long ip = (unsigned long)(&ftrace_graph_call);
605 
606 	return ftrace_mod_jmp(ip, &ftrace_graph_caller);
607 }
608 
609 int ftrace_disable_ftrace_graph_caller(void)
610 {
611 	unsigned long ip = (unsigned long)(&ftrace_graph_call);
612 
613 	return ftrace_mod_jmp(ip, &ftrace_stub);
614 }
615 #endif /* CONFIG_DYNAMIC_FTRACE && !CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS */
616 
617 /*
618  * Hook the return address and push it in the stack of return addrs
619  * in current thread info.
620  */
621 void prepare_ftrace_return(unsigned long ip, unsigned long *parent,
622 			   unsigned long frame_pointer)
623 {
624 	unsigned long return_hooker = (unsigned long)&return_to_handler;
625 	int bit;
626 
627 	/*
628 	 * When resuming from suspend-to-ram, this function can be indirectly
629 	 * called from early CPU startup code while the CPU is in real mode,
630 	 * which would fail miserably.  Make sure the stack pointer is a
631 	 * virtual address.
632 	 *
633 	 * This check isn't as accurate as virt_addr_valid(), but it should be
634 	 * good enough for this purpose, and it's fast.
635 	 */
636 	if (unlikely((long)__builtin_frame_address(0) >= 0))
637 		return;
638 
639 	if (unlikely(ftrace_graph_is_dead()))
640 		return;
641 
642 	if (unlikely(atomic_read(&current->tracing_graph_pause)))
643 		return;
644 
645 	bit = ftrace_test_recursion_trylock(ip, *parent);
646 	if (bit < 0)
647 		return;
648 
649 	if (!function_graph_enter(*parent, ip, frame_pointer, parent))
650 		*parent = return_hooker;
651 
652 	ftrace_test_recursion_unlock(bit);
653 }
654 
655 #ifdef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS
656 void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
657 		       struct ftrace_ops *op, struct ftrace_regs *fregs)
658 {
659 	struct pt_regs *regs = &fregs->regs;
660 	unsigned long *stack = (unsigned long *)kernel_stack_pointer(regs);
661 
662 	prepare_ftrace_return(ip, (unsigned long *)stack, 0);
663 }
664 #endif
665 
666 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
667