xref: /linux/arch/x86/kernel/ftrace.c (revision 7f71507851fc7764b36a3221839607d3a45c2025)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Dynamic function tracing support.
4  *
5  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
6  *
7  * Thanks goes to Ingo Molnar, for suggesting the idea.
8  * Mathieu Desnoyers, for suggesting postponing the modifications.
9  * Arjan van de Ven, for keeping me straight, and explaining to me
10  * the dangers of modifying code on the run.
11  */
12 
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 
15 #include <linux/spinlock.h>
16 #include <linux/hardirq.h>
17 #include <linux/uaccess.h>
18 #include <linux/ftrace.h>
19 #include <linux/percpu.h>
20 #include <linux/sched.h>
21 #include <linux/slab.h>
22 #include <linux/init.h>
23 #include <linux/list.h>
24 #include <linux/module.h>
25 #include <linux/memory.h>
26 #include <linux/vmalloc.h>
27 #include <linux/set_memory.h>
28 #include <linux/execmem.h>
29 
30 #include <trace/syscall.h>
31 
32 #include <asm/kprobes.h>
33 #include <asm/ftrace.h>
34 #include <asm/nops.h>
35 #include <asm/text-patching.h>
36 
37 #ifdef CONFIG_DYNAMIC_FTRACE
38 
39 static int ftrace_poke_late = 0;
40 
41 void ftrace_arch_code_modify_prepare(void)
42     __acquires(&text_mutex)
43 {
44 	/*
45 	 * Need to grab text_mutex to prevent a race from module loading
46 	 * and live kernel patching from changing the text permissions while
47 	 * ftrace has it set to "read/write".
48 	 */
49 	mutex_lock(&text_mutex);
50 	ftrace_poke_late = 1;
51 }
52 
53 void ftrace_arch_code_modify_post_process(void)
54     __releases(&text_mutex)
55 {
56 	/*
57 	 * ftrace_make_{call,nop}() may be called during
58 	 * module load, and we need to finish the text_poke_queue()
59 	 * that they do, here.
60 	 */
61 	text_poke_finish();
62 	ftrace_poke_late = 0;
63 	mutex_unlock(&text_mutex);
64 }
65 
66 static const char *ftrace_nop_replace(void)
67 {
68 	return x86_nops[5];
69 }
70 
71 static const char *ftrace_call_replace(unsigned long ip, unsigned long addr)
72 {
73 	/*
74 	 * No need to translate into a callthunk. The trampoline does
75 	 * the depth accounting itself.
76 	 */
77 	return text_gen_insn(CALL_INSN_OPCODE, (void *)ip, (void *)addr);
78 }
79 
80 static int ftrace_verify_code(unsigned long ip, const char *old_code)
81 {
82 	char cur_code[MCOUNT_INSN_SIZE];
83 
84 	/*
85 	 * Note:
86 	 * We are paranoid about modifying text, as if a bug was to happen, it
87 	 * could cause us to read or write to someplace that could cause harm.
88 	 * Carefully read and modify the code with probe_kernel_*(), and make
89 	 * sure what we read is what we expected it to be before modifying it.
90 	 */
91 	/* read the text we want to modify */
92 	if (copy_from_kernel_nofault(cur_code, (void *)ip, MCOUNT_INSN_SIZE)) {
93 		WARN_ON(1);
94 		return -EFAULT;
95 	}
96 
97 	/* Make sure it is what we expect it to be */
98 	if (memcmp(cur_code, old_code, MCOUNT_INSN_SIZE) != 0) {
99 		ftrace_expected = old_code;
100 		WARN_ON(1);
101 		return -EINVAL;
102 	}
103 
104 	return 0;
105 }
106 
107 /*
108  * Marked __ref because it calls text_poke_early() which is .init.text. That is
109  * ok because that call will happen early, during boot, when .init sections are
110  * still present.
111  */
112 static int __ref
113 ftrace_modify_code_direct(unsigned long ip, const char *old_code,
114 			  const char *new_code)
115 {
116 	int ret = ftrace_verify_code(ip, old_code);
117 	if (ret)
118 		return ret;
119 
120 	/* replace the text with the new text */
121 	if (ftrace_poke_late) {
122 		text_poke_queue((void *)ip, new_code, MCOUNT_INSN_SIZE, NULL);
123 	} else {
124 		mutex_lock(&text_mutex);
125 		text_poke((void *)ip, new_code, MCOUNT_INSN_SIZE);
126 		mutex_unlock(&text_mutex);
127 	}
128 	return 0;
129 }
130 
131 int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr)
132 {
133 	unsigned long ip = rec->ip;
134 	const char *new, *old;
135 
136 	old = ftrace_call_replace(ip, addr);
137 	new = ftrace_nop_replace();
138 
139 	/*
140 	 * On boot up, and when modules are loaded, the MCOUNT_ADDR
141 	 * is converted to a nop, and will never become MCOUNT_ADDR
142 	 * again. This code is either running before SMP (on boot up)
143 	 * or before the code will ever be executed (module load).
144 	 * We do not want to use the breakpoint version in this case,
145 	 * just modify the code directly.
146 	 */
147 	if (addr == MCOUNT_ADDR)
148 		return ftrace_modify_code_direct(ip, old, new);
149 
150 	/*
151 	 * x86 overrides ftrace_replace_code -- this function will never be used
152 	 * in this case.
153 	 */
154 	WARN_ONCE(1, "invalid use of ftrace_make_nop");
155 	return -EINVAL;
156 }
157 
158 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
159 {
160 	unsigned long ip = rec->ip;
161 	const char *new, *old;
162 
163 	old = ftrace_nop_replace();
164 	new = ftrace_call_replace(ip, addr);
165 
166 	/* Should only be called when module is loaded */
167 	return ftrace_modify_code_direct(rec->ip, old, new);
168 }
169 
170 /*
171  * Should never be called:
172  *  As it is only called by __ftrace_replace_code() which is called by
173  *  ftrace_replace_code() that x86 overrides, and by ftrace_update_code()
174  *  which is called to turn mcount into nops or nops into function calls
175  *  but not to convert a function from not using regs to one that uses
176  *  regs, which ftrace_modify_call() is for.
177  */
178 int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
179 				 unsigned long addr)
180 {
181 	WARN_ON(1);
182 	return -EINVAL;
183 }
184 
185 int ftrace_update_ftrace_func(ftrace_func_t func)
186 {
187 	unsigned long ip;
188 	const char *new;
189 
190 	ip = (unsigned long)(&ftrace_call);
191 	new = ftrace_call_replace(ip, (unsigned long)func);
192 	text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL);
193 
194 	ip = (unsigned long)(&ftrace_regs_call);
195 	new = ftrace_call_replace(ip, (unsigned long)func);
196 	text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL);
197 
198 	return 0;
199 }
200 
201 void ftrace_replace_code(int enable)
202 {
203 	struct ftrace_rec_iter *iter;
204 	struct dyn_ftrace *rec;
205 	const char *new, *old;
206 	int ret;
207 
208 	for_ftrace_rec_iter(iter) {
209 		rec = ftrace_rec_iter_record(iter);
210 
211 		switch (ftrace_test_record(rec, enable)) {
212 		case FTRACE_UPDATE_IGNORE:
213 		default:
214 			continue;
215 
216 		case FTRACE_UPDATE_MAKE_CALL:
217 			old = ftrace_nop_replace();
218 			break;
219 
220 		case FTRACE_UPDATE_MODIFY_CALL:
221 		case FTRACE_UPDATE_MAKE_NOP:
222 			old = ftrace_call_replace(rec->ip, ftrace_get_addr_curr(rec));
223 			break;
224 		}
225 
226 		ret = ftrace_verify_code(rec->ip, old);
227 		if (ret) {
228 			ftrace_expected = old;
229 			ftrace_bug(ret, rec);
230 			ftrace_expected = NULL;
231 			return;
232 		}
233 	}
234 
235 	for_ftrace_rec_iter(iter) {
236 		rec = ftrace_rec_iter_record(iter);
237 
238 		switch (ftrace_test_record(rec, enable)) {
239 		case FTRACE_UPDATE_IGNORE:
240 		default:
241 			continue;
242 
243 		case FTRACE_UPDATE_MAKE_CALL:
244 		case FTRACE_UPDATE_MODIFY_CALL:
245 			new = ftrace_call_replace(rec->ip, ftrace_get_addr_new(rec));
246 			break;
247 
248 		case FTRACE_UPDATE_MAKE_NOP:
249 			new = ftrace_nop_replace();
250 			break;
251 		}
252 
253 		text_poke_queue((void *)rec->ip, new, MCOUNT_INSN_SIZE, NULL);
254 		ftrace_update_record(rec, enable);
255 	}
256 	text_poke_finish();
257 }
258 
259 void arch_ftrace_update_code(int command)
260 {
261 	ftrace_modify_all_code(command);
262 }
263 
264 /* Currently only x86_64 supports dynamic trampolines */
265 #ifdef CONFIG_X86_64
266 
267 static inline void *alloc_tramp(unsigned long size)
268 {
269 	return execmem_alloc(EXECMEM_FTRACE, size);
270 }
271 static inline void tramp_free(void *tramp)
272 {
273 	execmem_free(tramp);
274 }
275 
276 /* Defined as markers to the end of the ftrace default trampolines */
277 extern void ftrace_regs_caller_end(void);
278 extern void ftrace_caller_end(void);
279 extern void ftrace_caller_op_ptr(void);
280 extern void ftrace_regs_caller_op_ptr(void);
281 extern void ftrace_regs_caller_jmp(void);
282 
283 /* movq function_trace_op(%rip), %rdx */
284 /* 0x48 0x8b 0x15 <offset-to-ftrace_trace_op (4 bytes)> */
285 #define OP_REF_SIZE	7
286 
287 /*
288  * The ftrace_ops is passed to the function callback. Since the
289  * trampoline only services a single ftrace_ops, we can pass in
290  * that ops directly.
291  *
292  * The ftrace_op_code_union is used to create a pointer to the
293  * ftrace_ops that will be passed to the callback function.
294  */
295 union ftrace_op_code_union {
296 	char code[OP_REF_SIZE];
297 	struct {
298 		char op[3];
299 		int offset;
300 	} __attribute__((packed));
301 };
302 
303 #define RET_SIZE \
304 	(IS_ENABLED(CONFIG_MITIGATION_RETPOLINE) ? 5 : 1 + IS_ENABLED(CONFIG_MITIGATION_SLS))
305 
306 static unsigned long
307 create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
308 {
309 	unsigned long start_offset;
310 	unsigned long end_offset;
311 	unsigned long op_offset;
312 	unsigned long call_offset;
313 	unsigned long jmp_offset;
314 	unsigned long offset;
315 	unsigned long npages;
316 	unsigned long size;
317 	unsigned long *ptr;
318 	void *trampoline;
319 	void *ip, *dest;
320 	/* 48 8b 15 <offset> is movq <offset>(%rip), %rdx */
321 	unsigned const char op_ref[] = { 0x48, 0x8b, 0x15 };
322 	unsigned const char retq[] = { RET_INSN_OPCODE, INT3_INSN_OPCODE };
323 	union ftrace_op_code_union op_ptr;
324 	void *ret;
325 
326 	if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
327 		start_offset = (unsigned long)ftrace_regs_caller;
328 		end_offset = (unsigned long)ftrace_regs_caller_end;
329 		op_offset = (unsigned long)ftrace_regs_caller_op_ptr;
330 		call_offset = (unsigned long)ftrace_regs_call;
331 		jmp_offset = (unsigned long)ftrace_regs_caller_jmp;
332 	} else {
333 		start_offset = (unsigned long)ftrace_caller;
334 		end_offset = (unsigned long)ftrace_caller_end;
335 		op_offset = (unsigned long)ftrace_caller_op_ptr;
336 		call_offset = (unsigned long)ftrace_call;
337 		jmp_offset = 0;
338 	}
339 
340 	size = end_offset - start_offset;
341 
342 	/*
343 	 * Allocate enough size to store the ftrace_caller code,
344 	 * the iret , as well as the address of the ftrace_ops this
345 	 * trampoline is used for.
346 	 */
347 	trampoline = alloc_tramp(size + RET_SIZE + sizeof(void *));
348 	if (!trampoline)
349 		return 0;
350 
351 	*tramp_size = size + RET_SIZE + sizeof(void *);
352 	npages = DIV_ROUND_UP(*tramp_size, PAGE_SIZE);
353 
354 	/* Copy ftrace_caller onto the trampoline memory */
355 	ret = text_poke_copy(trampoline, (void *)start_offset, size);
356 	if (WARN_ON(!ret))
357 		goto fail;
358 
359 	ip = trampoline + size;
360 	if (cpu_feature_enabled(X86_FEATURE_RETHUNK))
361 		__text_gen_insn(ip, JMP32_INSN_OPCODE, ip, x86_return_thunk, JMP32_INSN_SIZE);
362 	else
363 		text_poke_copy(ip, retq, sizeof(retq));
364 
365 	/* No need to test direct calls on created trampolines */
366 	if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
367 		/* NOP the jnz 1f; but make sure it's a 2 byte jnz */
368 		ip = trampoline + (jmp_offset - start_offset);
369 		if (WARN_ON(*(char *)ip != 0x75))
370 			goto fail;
371 		if (!text_poke_copy(ip, x86_nops[2], 2))
372 			goto fail;
373 	}
374 
375 	/*
376 	 * The address of the ftrace_ops that is used for this trampoline
377 	 * is stored at the end of the trampoline. This will be used to
378 	 * load the third parameter for the callback. Basically, that
379 	 * location at the end of the trampoline takes the place of
380 	 * the global function_trace_op variable.
381 	 */
382 
383 	ptr = (unsigned long *)(trampoline + size + RET_SIZE);
384 	text_poke_copy(ptr, &ops, sizeof(unsigned long));
385 
386 	op_offset -= start_offset;
387 	memcpy(&op_ptr, trampoline + op_offset, OP_REF_SIZE);
388 
389 	/* Are we pointing to the reference? */
390 	if (WARN_ON(memcmp(op_ptr.op, op_ref, 3) != 0))
391 		goto fail;
392 
393 	/* Load the contents of ptr into the callback parameter */
394 	offset = (unsigned long)ptr;
395 	offset -= (unsigned long)trampoline + op_offset + OP_REF_SIZE;
396 
397 	op_ptr.offset = offset;
398 
399 	/* put in the new offset to the ftrace_ops */
400 	text_poke_copy(trampoline + op_offset, &op_ptr, OP_REF_SIZE);
401 
402 	/* put in the call to the function */
403 	mutex_lock(&text_mutex);
404 	call_offset -= start_offset;
405 	/*
406 	 * No need to translate into a callthunk. The trampoline does
407 	 * the depth accounting before the call already.
408 	 */
409 	dest = ftrace_ops_get_func(ops);
410 	text_poke_copy_locked(trampoline + call_offset,
411 	      text_gen_insn(CALL_INSN_OPCODE, trampoline + call_offset, dest),
412 	      CALL_INSN_SIZE, false);
413 	mutex_unlock(&text_mutex);
414 
415 	/* ALLOC_TRAMP flags lets us know we created it */
416 	ops->flags |= FTRACE_OPS_FL_ALLOC_TRAMP;
417 
418 	set_memory_rox((unsigned long)trampoline, npages);
419 	return (unsigned long)trampoline;
420 fail:
421 	tramp_free(trampoline);
422 	return 0;
423 }
424 
425 void set_ftrace_ops_ro(void)
426 {
427 	struct ftrace_ops *ops;
428 	unsigned long start_offset;
429 	unsigned long end_offset;
430 	unsigned long npages;
431 	unsigned long size;
432 
433 	do_for_each_ftrace_op(ops, ftrace_ops_list) {
434 		if (!(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
435 			continue;
436 
437 		if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
438 			start_offset = (unsigned long)ftrace_regs_caller;
439 			end_offset = (unsigned long)ftrace_regs_caller_end;
440 		} else {
441 			start_offset = (unsigned long)ftrace_caller;
442 			end_offset = (unsigned long)ftrace_caller_end;
443 		}
444 		size = end_offset - start_offset;
445 		size = size + RET_SIZE + sizeof(void *);
446 		npages = DIV_ROUND_UP(size, PAGE_SIZE);
447 		set_memory_ro((unsigned long)ops->trampoline, npages);
448 	} while_for_each_ftrace_op(ops);
449 }
450 
451 static unsigned long calc_trampoline_call_offset(bool save_regs)
452 {
453 	unsigned long start_offset;
454 	unsigned long call_offset;
455 
456 	if (save_regs) {
457 		start_offset = (unsigned long)ftrace_regs_caller;
458 		call_offset = (unsigned long)ftrace_regs_call;
459 	} else {
460 		start_offset = (unsigned long)ftrace_caller;
461 		call_offset = (unsigned long)ftrace_call;
462 	}
463 
464 	return call_offset - start_offset;
465 }
466 
467 void arch_ftrace_update_trampoline(struct ftrace_ops *ops)
468 {
469 	ftrace_func_t func;
470 	unsigned long offset;
471 	unsigned long ip;
472 	unsigned int size;
473 	const char *new;
474 
475 	if (!ops->trampoline) {
476 		ops->trampoline = create_trampoline(ops, &size);
477 		if (!ops->trampoline)
478 			return;
479 		ops->trampoline_size = size;
480 		return;
481 	}
482 
483 	/*
484 	 * The ftrace_ops caller may set up its own trampoline.
485 	 * In such a case, this code must not modify it.
486 	 */
487 	if (!(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
488 		return;
489 
490 	offset = calc_trampoline_call_offset(ops->flags & FTRACE_OPS_FL_SAVE_REGS);
491 	ip = ops->trampoline + offset;
492 	func = ftrace_ops_get_func(ops);
493 
494 	mutex_lock(&text_mutex);
495 	/* Do a safe modify in case the trampoline is executing */
496 	new = ftrace_call_replace(ip, (unsigned long)func);
497 	text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL);
498 	mutex_unlock(&text_mutex);
499 }
500 
501 /* Return the address of the function the trampoline calls */
502 static void *addr_from_call(void *ptr)
503 {
504 	union text_poke_insn call;
505 	int ret;
506 
507 	ret = copy_from_kernel_nofault(&call, ptr, CALL_INSN_SIZE);
508 	if (WARN_ON_ONCE(ret < 0))
509 		return NULL;
510 
511 	/* Make sure this is a call */
512 	if (WARN_ON_ONCE(call.opcode != CALL_INSN_OPCODE)) {
513 		pr_warn("Expected E8, got %x\n", call.opcode);
514 		return NULL;
515 	}
516 
517 	return ptr + CALL_INSN_SIZE + call.disp;
518 }
519 
520 /*
521  * If the ops->trampoline was not allocated, then it probably
522  * has a static trampoline func, or is the ftrace caller itself.
523  */
524 static void *static_tramp_func(struct ftrace_ops *ops, struct dyn_ftrace *rec)
525 {
526 	unsigned long offset;
527 	bool save_regs = rec->flags & FTRACE_FL_REGS_EN;
528 	void *ptr;
529 
530 	if (ops && ops->trampoline) {
531 #if !defined(CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS) && \
532 	defined(CONFIG_FUNCTION_GRAPH_TRACER)
533 		/*
534 		 * We only know about function graph tracer setting as static
535 		 * trampoline.
536 		 */
537 		if (ops->trampoline == FTRACE_GRAPH_ADDR)
538 			return (void *)prepare_ftrace_return;
539 #endif
540 		return NULL;
541 	}
542 
543 	offset = calc_trampoline_call_offset(save_regs);
544 
545 	if (save_regs)
546 		ptr = (void *)FTRACE_REGS_ADDR + offset;
547 	else
548 		ptr = (void *)FTRACE_ADDR + offset;
549 
550 	return addr_from_call(ptr);
551 }
552 
553 void *arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec)
554 {
555 	unsigned long offset;
556 
557 	/* If we didn't allocate this trampoline, consider it static */
558 	if (!ops || !(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
559 		return static_tramp_func(ops, rec);
560 
561 	offset = calc_trampoline_call_offset(ops->flags & FTRACE_OPS_FL_SAVE_REGS);
562 	return addr_from_call((void *)ops->trampoline + offset);
563 }
564 
565 void arch_ftrace_trampoline_free(struct ftrace_ops *ops)
566 {
567 	if (!ops || !(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
568 		return;
569 
570 	tramp_free((void *)ops->trampoline);
571 	ops->trampoline = 0;
572 }
573 
574 #endif /* CONFIG_X86_64 */
575 #endif /* CONFIG_DYNAMIC_FTRACE */
576 
577 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
578 
579 #if defined(CONFIG_DYNAMIC_FTRACE) && !defined(CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS)
580 extern void ftrace_graph_call(void);
581 static const char *ftrace_jmp_replace(unsigned long ip, unsigned long addr)
582 {
583 	return text_gen_insn(JMP32_INSN_OPCODE, (void *)ip, (void *)addr);
584 }
585 
586 static int ftrace_mod_jmp(unsigned long ip, void *func)
587 {
588 	const char *new;
589 
590 	new = ftrace_jmp_replace(ip, (unsigned long)func);
591 	text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL);
592 	return 0;
593 }
594 
595 int ftrace_enable_ftrace_graph_caller(void)
596 {
597 	unsigned long ip = (unsigned long)(&ftrace_graph_call);
598 
599 	return ftrace_mod_jmp(ip, &ftrace_graph_caller);
600 }
601 
602 int ftrace_disable_ftrace_graph_caller(void)
603 {
604 	unsigned long ip = (unsigned long)(&ftrace_graph_call);
605 
606 	return ftrace_mod_jmp(ip, &ftrace_stub);
607 }
608 #endif /* CONFIG_DYNAMIC_FTRACE && !CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS */
609 
610 /*
611  * Hook the return address and push it in the stack of return addrs
612  * in current thread info.
613  */
614 void prepare_ftrace_return(unsigned long ip, unsigned long *parent,
615 			   unsigned long frame_pointer)
616 {
617 	unsigned long return_hooker = (unsigned long)&return_to_handler;
618 	int bit;
619 
620 	/*
621 	 * When resuming from suspend-to-ram, this function can be indirectly
622 	 * called from early CPU startup code while the CPU is in real mode,
623 	 * which would fail miserably.  Make sure the stack pointer is a
624 	 * virtual address.
625 	 *
626 	 * This check isn't as accurate as virt_addr_valid(), but it should be
627 	 * good enough for this purpose, and it's fast.
628 	 */
629 	if (unlikely((long)__builtin_frame_address(0) >= 0))
630 		return;
631 
632 	if (unlikely(ftrace_graph_is_dead()))
633 		return;
634 
635 	if (unlikely(atomic_read(&current->tracing_graph_pause)))
636 		return;
637 
638 	bit = ftrace_test_recursion_trylock(ip, *parent);
639 	if (bit < 0)
640 		return;
641 
642 	if (!function_graph_enter(*parent, ip, frame_pointer, parent))
643 		*parent = return_hooker;
644 
645 	ftrace_test_recursion_unlock(bit);
646 }
647 
648 #ifdef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS
649 void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
650 		       struct ftrace_ops *op, struct ftrace_regs *fregs)
651 {
652 	struct pt_regs *regs = &arch_ftrace_regs(fregs)->regs;
653 	unsigned long *stack = (unsigned long *)kernel_stack_pointer(regs);
654 
655 	prepare_ftrace_return(ip, (unsigned long *)stack, 0);
656 }
657 #endif
658 
659 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
660