xref: /linux/arch/powerpc/kernel/trace/ftrace.c (revision 7d40aff8213c92e64a1576ba9dfebcd201c0564d)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Code for replacing ftrace calls with jumps.
4  *
5  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
6  *
7  * Thanks goes out to P.A. Semi, Inc for supplying me with a PPC64 box.
8  *
9  * Added function graph tracer code, taken from x86 that was written
10  * by Frederic Weisbecker, and ported to PPC by Steven Rostedt.
11  *
12  */
13 
14 #define pr_fmt(fmt) "ftrace-powerpc: " fmt
15 
16 #include <linux/spinlock.h>
17 #include <linux/hardirq.h>
18 #include <linux/uaccess.h>
19 #include <linux/module.h>
20 #include <linux/ftrace.h>
21 #include <linux/percpu.h>
22 #include <linux/init.h>
23 #include <linux/list.h>
24 
25 #include <asm/cacheflush.h>
26 #include <asm/code-patching.h>
27 #include <asm/ftrace.h>
28 #include <asm/syscall.h>
29 #include <asm/inst.h>
30 
31 
32 #ifdef CONFIG_DYNAMIC_FTRACE
33 
34 /*
35  * We generally only have a single long_branch tramp and at most 2 or 3 plt
36  * tramps generated. But, we don't use the plt tramps currently. We also allot
37  * 2 tramps after .text and .init.text. So, we only end up with around 3 usable
38  * tramps in total. Set aside 8 just to be sure.
39  */
40 #define	NUM_FTRACE_TRAMPS	8
41 static unsigned long ftrace_tramps[NUM_FTRACE_TRAMPS];
42 
43 static ppc_inst_t
44 ftrace_call_replace(unsigned long ip, unsigned long addr, int link)
45 {
46 	ppc_inst_t op;
47 
48 	addr = ppc_function_entry((void *)addr);
49 
50 	/* if (link) set op to 'bl' else 'b' */
51 	create_branch(&op, (u32 *)ip, addr, link ? 1 : 0);
52 
53 	return op;
54 }
55 
56 static inline int
57 ftrace_modify_code(unsigned long ip, ppc_inst_t old, ppc_inst_t new)
58 {
59 	ppc_inst_t replaced;
60 
61 	/*
62 	 * Note:
63 	 * We are paranoid about modifying text, as if a bug was to happen, it
64 	 * could cause us to read or write to someplace that could cause harm.
65 	 * Carefully read and modify the code with probe_kernel_*(), and make
66 	 * sure what we read is what we expected it to be before modifying it.
67 	 */
68 
69 	/* read the text we want to modify */
70 	if (copy_inst_from_kernel_nofault(&replaced, (void *)ip))
71 		return -EFAULT;
72 
73 	/* Make sure it is what we expect it to be */
74 	if (!ppc_inst_equal(replaced, old)) {
75 		pr_err("%p: replaced (%s) != old (%s)",
76 		(void *)ip, ppc_inst_as_str(replaced), ppc_inst_as_str(old));
77 		return -EINVAL;
78 	}
79 
80 	/* replace the text with the new text */
81 	return patch_instruction((u32 *)ip, new);
82 }
83 
84 /*
85  * Helper functions that are the same for both PPC64 and PPC32.
86  */
87 static int test_24bit_addr(unsigned long ip, unsigned long addr)
88 {
89 	addr = ppc_function_entry((void *)addr);
90 
91 	return is_offset_in_branch_range(addr - ip);
92 }
93 
94 static int is_bl_op(ppc_inst_t op)
95 {
96 	return (ppc_inst_val(op) & 0xfc000003) == 0x48000001;
97 }
98 
99 static int is_b_op(ppc_inst_t op)
100 {
101 	return (ppc_inst_val(op) & 0xfc000003) == 0x48000000;
102 }
103 
104 static unsigned long find_bl_target(unsigned long ip, ppc_inst_t op)
105 {
106 	int offset;
107 
108 	offset = (ppc_inst_val(op) & 0x03fffffc);
109 	/* make it signed */
110 	if (offset & 0x02000000)
111 		offset |= 0xfe000000;
112 
113 	return ip + (long)offset;
114 }
115 
116 #ifdef CONFIG_MODULES
117 #ifdef CONFIG_PPC64
118 static int
119 __ftrace_make_nop(struct module *mod,
120 		  struct dyn_ftrace *rec, unsigned long addr)
121 {
122 	unsigned long entry, ptr, tramp;
123 	unsigned long ip = rec->ip;
124 	ppc_inst_t op, pop;
125 
126 	/* read where this goes */
127 	if (copy_inst_from_kernel_nofault(&op, (void *)ip)) {
128 		pr_err("Fetching opcode failed.\n");
129 		return -EFAULT;
130 	}
131 
132 	/* Make sure that that this is still a 24bit jump */
133 	if (!is_bl_op(op)) {
134 		pr_err("Not expected bl: opcode is %s\n", ppc_inst_as_str(op));
135 		return -EINVAL;
136 	}
137 
138 	/* lets find where the pointer goes */
139 	tramp = find_bl_target(ip, op);
140 
141 	pr_devel("ip:%lx jumps to %lx", ip, tramp);
142 
143 	if (module_trampoline_target(mod, tramp, &ptr)) {
144 		pr_err("Failed to get trampoline target\n");
145 		return -EFAULT;
146 	}
147 
148 	pr_devel("trampoline target %lx", ptr);
149 
150 	entry = ppc_global_function_entry((void *)addr);
151 	/* This should match what was called */
152 	if (ptr != entry) {
153 		pr_err("addr %lx does not match expected %lx\n", ptr, entry);
154 		return -EINVAL;
155 	}
156 
157 #ifdef CONFIG_MPROFILE_KERNEL
158 	/* When using -mkernel_profile there is no load to jump over */
159 	pop = ppc_inst(PPC_RAW_NOP());
160 
161 	if (copy_inst_from_kernel_nofault(&op, (void *)(ip - 4))) {
162 		pr_err("Fetching instruction at %lx failed.\n", ip - 4);
163 		return -EFAULT;
164 	}
165 
166 	/* We expect either a mflr r0, or a std r0, LRSAVE(r1) */
167 	if (!ppc_inst_equal(op, ppc_inst(PPC_RAW_MFLR(_R0))) &&
168 	    !ppc_inst_equal(op, ppc_inst(PPC_INST_STD_LR))) {
169 		pr_err("Unexpected instruction %s around bl _mcount\n",
170 		       ppc_inst_as_str(op));
171 		return -EINVAL;
172 	}
173 #else
174 	/*
175 	 * Our original call site looks like:
176 	 *
177 	 * bl <tramp>
178 	 * ld r2,XX(r1)
179 	 *
180 	 * Milton Miller pointed out that we can not simply nop the branch.
181 	 * If a task was preempted when calling a trace function, the nops
182 	 * will remove the way to restore the TOC in r2 and the r2 TOC will
183 	 * get corrupted.
184 	 *
185 	 * Use a b +8 to jump over the load.
186 	 */
187 
188 	pop = ppc_inst(PPC_INST_BRANCH | 8);	/* b +8 */
189 
190 	/*
191 	 * Check what is in the next instruction. We can see ld r2,40(r1), but
192 	 * on first pass after boot we will see mflr r0.
193 	 */
194 	if (copy_inst_from_kernel_nofault(&op, (void *)(ip + 4))) {
195 		pr_err("Fetching op failed.\n");
196 		return -EFAULT;
197 	}
198 
199 	if (!ppc_inst_equal(op,  ppc_inst(PPC_INST_LD_TOC))) {
200 		pr_err("Expected %08lx found %s\n", PPC_INST_LD_TOC, ppc_inst_as_str(op));
201 		return -EINVAL;
202 	}
203 #endif /* CONFIG_MPROFILE_KERNEL */
204 
205 	if (patch_instruction((u32 *)ip, pop)) {
206 		pr_err("Patching NOP failed.\n");
207 		return -EPERM;
208 	}
209 
210 	return 0;
211 }
212 
213 #else /* !PPC64 */
214 static int
215 __ftrace_make_nop(struct module *mod,
216 		  struct dyn_ftrace *rec, unsigned long addr)
217 {
218 	ppc_inst_t op;
219 	unsigned long ip = rec->ip;
220 	unsigned long tramp, ptr;
221 
222 	if (copy_from_kernel_nofault(&op, (void *)ip, MCOUNT_INSN_SIZE))
223 		return -EFAULT;
224 
225 	/* Make sure that that this is still a 24bit jump */
226 	if (!is_bl_op(op)) {
227 		pr_err("Not expected bl: opcode is %s\n", ppc_inst_as_str(op));
228 		return -EINVAL;
229 	}
230 
231 	/* lets find where the pointer goes */
232 	tramp = find_bl_target(ip, op);
233 
234 	/* Find where the trampoline jumps to */
235 	if (module_trampoline_target(mod, tramp, &ptr)) {
236 		pr_err("Failed to get trampoline target\n");
237 		return -EFAULT;
238 	}
239 
240 	if (ptr != addr) {
241 		pr_err("Trampoline location %08lx does not match addr\n",
242 		       tramp);
243 		return -EINVAL;
244 	}
245 
246 	op = ppc_inst(PPC_RAW_NOP());
247 
248 	if (patch_instruction((u32 *)ip, op))
249 		return -EPERM;
250 
251 	return 0;
252 }
253 #endif /* PPC64 */
254 #endif /* CONFIG_MODULES */
255 
256 static unsigned long find_ftrace_tramp(unsigned long ip)
257 {
258 	int i;
259 
260 	/*
261 	 * We have the compiler generated long_branch tramps at the end
262 	 * and we prefer those
263 	 */
264 	for (i = NUM_FTRACE_TRAMPS - 1; i >= 0; i--)
265 		if (!ftrace_tramps[i])
266 			continue;
267 		else if (is_offset_in_branch_range(ftrace_tramps[i] - ip))
268 			return ftrace_tramps[i];
269 
270 	return 0;
271 }
272 
273 static int add_ftrace_tramp(unsigned long tramp)
274 {
275 	int i;
276 
277 	for (i = 0; i < NUM_FTRACE_TRAMPS; i++)
278 		if (!ftrace_tramps[i]) {
279 			ftrace_tramps[i] = tramp;
280 			return 0;
281 		}
282 
283 	return -1;
284 }
285 
286 /*
287  * If this is a compiler generated long_branch trampoline (essentially, a
288  * trampoline that has a branch to _mcount()), we re-write the branch to
289  * instead go to ftrace_[regs_]caller() and note down the location of this
290  * trampoline.
291  */
292 static int setup_mcount_compiler_tramp(unsigned long tramp)
293 {
294 	int i;
295 	ppc_inst_t op;
296 	unsigned long ptr;
297 	static unsigned long ftrace_plt_tramps[NUM_FTRACE_TRAMPS];
298 
299 	/* Is this a known long jump tramp? */
300 	for (i = 0; i < NUM_FTRACE_TRAMPS; i++)
301 		if (!ftrace_tramps[i])
302 			break;
303 		else if (ftrace_tramps[i] == tramp)
304 			return 0;
305 
306 	/* Is this a known plt tramp? */
307 	for (i = 0; i < NUM_FTRACE_TRAMPS; i++)
308 		if (!ftrace_plt_tramps[i])
309 			break;
310 		else if (ftrace_plt_tramps[i] == tramp)
311 			return -1;
312 
313 	/* New trampoline -- read where this goes */
314 	if (copy_inst_from_kernel_nofault(&op, (void *)tramp)) {
315 		pr_debug("Fetching opcode failed.\n");
316 		return -1;
317 	}
318 
319 	/* Is this a 24 bit branch? */
320 	if (!is_b_op(op)) {
321 		pr_debug("Trampoline is not a long branch tramp.\n");
322 		return -1;
323 	}
324 
325 	/* lets find where the pointer goes */
326 	ptr = find_bl_target(tramp, op);
327 
328 	if (ptr != ppc_global_function_entry((void *)_mcount)) {
329 		pr_debug("Trampoline target %p is not _mcount\n", (void *)ptr);
330 		return -1;
331 	}
332 
333 	/* Let's re-write the tramp to go to ftrace_[regs_]caller */
334 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
335 	ptr = ppc_global_function_entry((void *)ftrace_regs_caller);
336 #else
337 	ptr = ppc_global_function_entry((void *)ftrace_caller);
338 #endif
339 	if (patch_branch((u32 *)tramp, ptr, 0)) {
340 		pr_debug("REL24 out of range!\n");
341 		return -1;
342 	}
343 
344 	if (add_ftrace_tramp(tramp)) {
345 		pr_debug("No tramp locations left\n");
346 		return -1;
347 	}
348 
349 	return 0;
350 }
351 
352 static int __ftrace_make_nop_kernel(struct dyn_ftrace *rec, unsigned long addr)
353 {
354 	unsigned long tramp, ip = rec->ip;
355 	ppc_inst_t op;
356 
357 	/* Read where this goes */
358 	if (copy_inst_from_kernel_nofault(&op, (void *)ip)) {
359 		pr_err("Fetching opcode failed.\n");
360 		return -EFAULT;
361 	}
362 
363 	/* Make sure that that this is still a 24bit jump */
364 	if (!is_bl_op(op)) {
365 		pr_err("Not expected bl: opcode is %s\n", ppc_inst_as_str(op));
366 		return -EINVAL;
367 	}
368 
369 	/* Let's find where the pointer goes */
370 	tramp = find_bl_target(ip, op);
371 
372 	pr_devel("ip:%lx jumps to %lx", ip, tramp);
373 
374 	if (setup_mcount_compiler_tramp(tramp)) {
375 		/* Are other trampolines reachable? */
376 		if (!find_ftrace_tramp(ip)) {
377 			pr_err("No ftrace trampolines reachable from %ps\n",
378 					(void *)ip);
379 			return -EINVAL;
380 		}
381 	}
382 
383 	if (patch_instruction((u32 *)ip, ppc_inst(PPC_RAW_NOP()))) {
384 		pr_err("Patching NOP failed.\n");
385 		return -EPERM;
386 	}
387 
388 	return 0;
389 }
390 
391 int ftrace_make_nop(struct module *mod,
392 		    struct dyn_ftrace *rec, unsigned long addr)
393 {
394 	unsigned long ip = rec->ip;
395 	ppc_inst_t old, new;
396 
397 	/*
398 	 * If the calling address is more that 24 bits away,
399 	 * then we had to use a trampoline to make the call.
400 	 * Otherwise just update the call site.
401 	 */
402 	if (test_24bit_addr(ip, addr)) {
403 		/* within range */
404 		old = ftrace_call_replace(ip, addr, 1);
405 		new = ppc_inst(PPC_RAW_NOP());
406 		return ftrace_modify_code(ip, old, new);
407 	} else if (core_kernel_text(ip))
408 		return __ftrace_make_nop_kernel(rec, addr);
409 
410 #ifdef CONFIG_MODULES
411 	/*
412 	 * Out of range jumps are called from modules.
413 	 * We should either already have a pointer to the module
414 	 * or it has been passed in.
415 	 */
416 	if (!rec->arch.mod) {
417 		if (!mod) {
418 			pr_err("No module loaded addr=%lx\n", addr);
419 			return -EFAULT;
420 		}
421 		rec->arch.mod = mod;
422 	} else if (mod) {
423 		if (mod != rec->arch.mod) {
424 			pr_err("Record mod %p not equal to passed in mod %p\n",
425 			       rec->arch.mod, mod);
426 			return -EINVAL;
427 		}
428 		/* nothing to do if mod == rec->arch.mod */
429 	} else
430 		mod = rec->arch.mod;
431 
432 	return __ftrace_make_nop(mod, rec, addr);
433 #else
434 	/* We should not get here without modules */
435 	return -EINVAL;
436 #endif /* CONFIG_MODULES */
437 }
438 
439 #ifdef CONFIG_MODULES
440 #ifdef CONFIG_PPC64
441 /*
442  * Examine the existing instructions for __ftrace_make_call.
443  * They should effectively be a NOP, and follow formal constraints,
444  * depending on the ABI. Return false if they don't.
445  */
446 #ifndef CONFIG_MPROFILE_KERNEL
447 static int
448 expected_nop_sequence(void *ip, ppc_inst_t op0, ppc_inst_t op1)
449 {
450 	/*
451 	 * We expect to see:
452 	 *
453 	 * b +8
454 	 * ld r2,XX(r1)
455 	 *
456 	 * The load offset is different depending on the ABI. For simplicity
457 	 * just mask it out when doing the compare.
458 	 */
459 	if (!ppc_inst_equal(op0, ppc_inst(0x48000008)) ||
460 	    (ppc_inst_val(op1) & 0xffff0000) != 0xe8410000)
461 		return 0;
462 	return 1;
463 }
464 #else
465 static int
466 expected_nop_sequence(void *ip, ppc_inst_t op0, ppc_inst_t op1)
467 {
468 	/* look for patched "NOP" on ppc64 with -mprofile-kernel */
469 	if (!ppc_inst_equal(op0, ppc_inst(PPC_RAW_NOP())))
470 		return 0;
471 	return 1;
472 }
473 #endif
474 
475 static int
476 __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
477 {
478 	ppc_inst_t op[2];
479 	void *ip = (void *)rec->ip;
480 	unsigned long entry, ptr, tramp;
481 	struct module *mod = rec->arch.mod;
482 
483 	/* read where this goes */
484 	if (copy_inst_from_kernel_nofault(op, ip))
485 		return -EFAULT;
486 
487 	if (copy_inst_from_kernel_nofault(op + 1, ip + 4))
488 		return -EFAULT;
489 
490 	if (!expected_nop_sequence(ip, op[0], op[1])) {
491 		pr_err("Unexpected call sequence at %p: %s %s\n",
492 		ip, ppc_inst_as_str(op[0]), ppc_inst_as_str(op[1]));
493 		return -EINVAL;
494 	}
495 
496 	/* If we never set up ftrace trampoline(s), then bail */
497 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
498 	if (!mod->arch.tramp || !mod->arch.tramp_regs) {
499 #else
500 	if (!mod->arch.tramp) {
501 #endif
502 		pr_err("No ftrace trampoline\n");
503 		return -EINVAL;
504 	}
505 
506 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
507 	if (rec->flags & FTRACE_FL_REGS)
508 		tramp = mod->arch.tramp_regs;
509 	else
510 #endif
511 		tramp = mod->arch.tramp;
512 
513 	if (module_trampoline_target(mod, tramp, &ptr)) {
514 		pr_err("Failed to get trampoline target\n");
515 		return -EFAULT;
516 	}
517 
518 	pr_devel("trampoline target %lx", ptr);
519 
520 	entry = ppc_global_function_entry((void *)addr);
521 	/* This should match what was called */
522 	if (ptr != entry) {
523 		pr_err("addr %lx does not match expected %lx\n", ptr, entry);
524 		return -EINVAL;
525 	}
526 
527 	if (patch_branch(ip, tramp, BRANCH_SET_LINK)) {
528 		pr_err("REL24 out of range!\n");
529 		return -EINVAL;
530 	}
531 
532 	return 0;
533 }
534 
535 #else  /* !CONFIG_PPC64: */
536 static int
537 __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
538 {
539 	int err;
540 	ppc_inst_t op;
541 	u32 *ip = (u32 *)rec->ip;
542 	struct module *mod = rec->arch.mod;
543 	unsigned long tramp;
544 
545 	/* read where this goes */
546 	if (copy_inst_from_kernel_nofault(&op, ip))
547 		return -EFAULT;
548 
549 	/* It should be pointing to a nop */
550 	if (!ppc_inst_equal(op,  ppc_inst(PPC_RAW_NOP()))) {
551 		pr_err("Expected NOP but have %s\n", ppc_inst_as_str(op));
552 		return -EINVAL;
553 	}
554 
555 	/* If we never set up a trampoline to ftrace_caller, then bail */
556 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
557 	if (!mod->arch.tramp || !mod->arch.tramp_regs) {
558 #else
559 	if (!mod->arch.tramp) {
560 #endif
561 		pr_err("No ftrace trampoline\n");
562 		return -EINVAL;
563 	}
564 
565 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
566 	if (rec->flags & FTRACE_FL_REGS)
567 		tramp = mod->arch.tramp_regs;
568 	else
569 #endif
570 		tramp = mod->arch.tramp;
571 	/* create the branch to the trampoline */
572 	err = create_branch(&op, ip, tramp, BRANCH_SET_LINK);
573 	if (err) {
574 		pr_err("REL24 out of range!\n");
575 		return -EINVAL;
576 	}
577 
578 	pr_devel("write to %lx\n", rec->ip);
579 
580 	if (patch_instruction(ip, op))
581 		return -EPERM;
582 
583 	return 0;
584 }
585 #endif /* CONFIG_PPC64 */
586 #endif /* CONFIG_MODULES */
587 
588 static int __ftrace_make_call_kernel(struct dyn_ftrace *rec, unsigned long addr)
589 {
590 	ppc_inst_t op;
591 	void *ip = (void *)rec->ip;
592 	unsigned long tramp, entry, ptr;
593 
594 	/* Make sure we're being asked to patch branch to a known ftrace addr */
595 	entry = ppc_global_function_entry((void *)ftrace_caller);
596 	ptr = ppc_global_function_entry((void *)addr);
597 
598 	if (ptr != entry) {
599 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
600 		entry = ppc_global_function_entry((void *)ftrace_regs_caller);
601 		if (ptr != entry) {
602 #endif
603 			pr_err("Unknown ftrace addr to patch: %ps\n", (void *)ptr);
604 			return -EINVAL;
605 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
606 		}
607 #endif
608 	}
609 
610 	/* Make sure we have a nop */
611 	if (copy_inst_from_kernel_nofault(&op, ip)) {
612 		pr_err("Unable to read ftrace location %p\n", ip);
613 		return -EFAULT;
614 	}
615 
616 	if (!ppc_inst_equal(op, ppc_inst(PPC_RAW_NOP()))) {
617 		pr_err("Unexpected call sequence at %p: %s\n", ip, ppc_inst_as_str(op));
618 		return -EINVAL;
619 	}
620 
621 	tramp = find_ftrace_tramp((unsigned long)ip);
622 	if (!tramp) {
623 		pr_err("No ftrace trampolines reachable from %ps\n", ip);
624 		return -EINVAL;
625 	}
626 
627 	if (patch_branch(ip, tramp, BRANCH_SET_LINK)) {
628 		pr_err("Error patching branch to ftrace tramp!\n");
629 		return -EINVAL;
630 	}
631 
632 	return 0;
633 }
634 
635 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
636 {
637 	unsigned long ip = rec->ip;
638 	ppc_inst_t old, new;
639 
640 	/*
641 	 * If the calling address is more that 24 bits away,
642 	 * then we had to use a trampoline to make the call.
643 	 * Otherwise just update the call site.
644 	 */
645 	if (test_24bit_addr(ip, addr)) {
646 		/* within range */
647 		old = ppc_inst(PPC_RAW_NOP());
648 		new = ftrace_call_replace(ip, addr, 1);
649 		return ftrace_modify_code(ip, old, new);
650 	} else if (core_kernel_text(ip))
651 		return __ftrace_make_call_kernel(rec, addr);
652 
653 #ifdef CONFIG_MODULES
654 	/*
655 	 * Out of range jumps are called from modules.
656 	 * Being that we are converting from nop, it had better
657 	 * already have a module defined.
658 	 */
659 	if (!rec->arch.mod) {
660 		pr_err("No module loaded\n");
661 		return -EINVAL;
662 	}
663 
664 	return __ftrace_make_call(rec, addr);
665 #else
666 	/* We should not get here without modules */
667 	return -EINVAL;
668 #endif /* CONFIG_MODULES */
669 }
670 
671 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
672 #ifdef CONFIG_MODULES
673 static int
674 __ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
675 					unsigned long addr)
676 {
677 	ppc_inst_t op;
678 	unsigned long ip = rec->ip;
679 	unsigned long entry, ptr, tramp;
680 	struct module *mod = rec->arch.mod;
681 
682 	/* If we never set up ftrace trampolines, then bail */
683 	if (!mod->arch.tramp || !mod->arch.tramp_regs) {
684 		pr_err("No ftrace trampoline\n");
685 		return -EINVAL;
686 	}
687 
688 	/* read where this goes */
689 	if (copy_inst_from_kernel_nofault(&op, (void *)ip)) {
690 		pr_err("Fetching opcode failed.\n");
691 		return -EFAULT;
692 	}
693 
694 	/* Make sure that that this is still a 24bit jump */
695 	if (!is_bl_op(op)) {
696 		pr_err("Not expected bl: opcode is %s\n", ppc_inst_as_str(op));
697 		return -EINVAL;
698 	}
699 
700 	/* lets find where the pointer goes */
701 	tramp = find_bl_target(ip, op);
702 	entry = ppc_global_function_entry((void *)old_addr);
703 
704 	pr_devel("ip:%lx jumps to %lx", ip, tramp);
705 
706 	if (tramp != entry) {
707 		/* old_addr is not within range, so we must have used a trampoline */
708 		if (module_trampoline_target(mod, tramp, &ptr)) {
709 			pr_err("Failed to get trampoline target\n");
710 			return -EFAULT;
711 		}
712 
713 		pr_devel("trampoline target %lx", ptr);
714 
715 		/* This should match what was called */
716 		if (ptr != entry) {
717 			pr_err("addr %lx does not match expected %lx\n", ptr, entry);
718 			return -EINVAL;
719 		}
720 	}
721 
722 	/* The new target may be within range */
723 	if (test_24bit_addr(ip, addr)) {
724 		/* within range */
725 		if (patch_branch((u32 *)ip, addr, BRANCH_SET_LINK)) {
726 			pr_err("REL24 out of range!\n");
727 			return -EINVAL;
728 		}
729 
730 		return 0;
731 	}
732 
733 	if (rec->flags & FTRACE_FL_REGS)
734 		tramp = mod->arch.tramp_regs;
735 	else
736 		tramp = mod->arch.tramp;
737 
738 	if (module_trampoline_target(mod, tramp, &ptr)) {
739 		pr_err("Failed to get trampoline target\n");
740 		return -EFAULT;
741 	}
742 
743 	pr_devel("trampoline target %lx", ptr);
744 
745 	entry = ppc_global_function_entry((void *)addr);
746 	/* This should match what was called */
747 	if (ptr != entry) {
748 		pr_err("addr %lx does not match expected %lx\n", ptr, entry);
749 		return -EINVAL;
750 	}
751 
752 	if (patch_branch((u32 *)ip, tramp, BRANCH_SET_LINK)) {
753 		pr_err("REL24 out of range!\n");
754 		return -EINVAL;
755 	}
756 
757 	return 0;
758 }
759 #endif
760 
761 int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
762 			unsigned long addr)
763 {
764 	unsigned long ip = rec->ip;
765 	ppc_inst_t old, new;
766 
767 	/*
768 	 * If the calling address is more that 24 bits away,
769 	 * then we had to use a trampoline to make the call.
770 	 * Otherwise just update the call site.
771 	 */
772 	if (test_24bit_addr(ip, addr) && test_24bit_addr(ip, old_addr)) {
773 		/* within range */
774 		old = ftrace_call_replace(ip, old_addr, 1);
775 		new = ftrace_call_replace(ip, addr, 1);
776 		return ftrace_modify_code(ip, old, new);
777 	} else if (core_kernel_text(ip)) {
778 		/*
779 		 * We always patch out of range locations to go to the regs
780 		 * variant, so there is nothing to do here
781 		 */
782 		return 0;
783 	}
784 
785 #ifdef CONFIG_MODULES
786 	/*
787 	 * Out of range jumps are called from modules.
788 	 */
789 	if (!rec->arch.mod) {
790 		pr_err("No module loaded\n");
791 		return -EINVAL;
792 	}
793 
794 	return __ftrace_modify_call(rec, old_addr, addr);
795 #else
796 	/* We should not get here without modules */
797 	return -EINVAL;
798 #endif /* CONFIG_MODULES */
799 }
800 #endif
801 
802 int ftrace_update_ftrace_func(ftrace_func_t func)
803 {
804 	unsigned long ip = (unsigned long)(&ftrace_call);
805 	ppc_inst_t old, new;
806 	int ret;
807 
808 	old = ppc_inst_read((u32 *)&ftrace_call);
809 	new = ftrace_call_replace(ip, (unsigned long)func, 1);
810 	ret = ftrace_modify_code(ip, old, new);
811 
812 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
813 	/* Also update the regs callback function */
814 	if (!ret) {
815 		ip = (unsigned long)(&ftrace_regs_call);
816 		old = ppc_inst_read((u32 *)&ftrace_regs_call);
817 		new = ftrace_call_replace(ip, (unsigned long)func, 1);
818 		ret = ftrace_modify_code(ip, old, new);
819 	}
820 #endif
821 
822 	return ret;
823 }
824 
825 /*
826  * Use the default ftrace_modify_all_code, but without
827  * stop_machine().
828  */
829 void arch_ftrace_update_code(int command)
830 {
831 	ftrace_modify_all_code(command);
832 }
833 
834 #ifdef CONFIG_PPC64
835 #define PACATOC offsetof(struct paca_struct, kernel_toc)
836 
837 extern unsigned int ftrace_tramp_text[], ftrace_tramp_init[];
838 
839 int __init ftrace_dyn_arch_init(void)
840 {
841 	int i;
842 	unsigned int *tramp[] = { ftrace_tramp_text, ftrace_tramp_init };
843 	u32 stub_insns[] = {
844 		0xe98d0000 | PACATOC,	/* ld      r12,PACATOC(r13)	*/
845 		0x3d8c0000,		/* addis   r12,r12,<high>	*/
846 		0x398c0000,		/* addi    r12,r12,<low>	*/
847 		0x7d8903a6,		/* mtctr   r12			*/
848 		0x4e800420,		/* bctr				*/
849 	};
850 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
851 	unsigned long addr = ppc_global_function_entry((void *)ftrace_regs_caller);
852 #else
853 	unsigned long addr = ppc_global_function_entry((void *)ftrace_caller);
854 #endif
855 	long reladdr = addr - kernel_toc_addr();
856 
857 	if (reladdr > 0x7FFFFFFF || reladdr < -(0x80000000L)) {
858 		pr_err("Address of %ps out of range of kernel_toc.\n",
859 				(void *)addr);
860 		return -1;
861 	}
862 
863 	for (i = 0; i < 2; i++) {
864 		memcpy(tramp[i], stub_insns, sizeof(stub_insns));
865 		tramp[i][1] |= PPC_HA(reladdr);
866 		tramp[i][2] |= PPC_LO(reladdr);
867 		add_ftrace_tramp((unsigned long)tramp[i]);
868 	}
869 
870 	return 0;
871 }
872 #else
873 int __init ftrace_dyn_arch_init(void)
874 {
875 	return 0;
876 }
877 #endif
878 #endif /* CONFIG_DYNAMIC_FTRACE */
879 
880 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
881 
882 extern void ftrace_graph_call(void);
883 extern void ftrace_graph_stub(void);
884 
885 static int ftrace_modify_ftrace_graph_caller(bool enable)
886 {
887 	unsigned long ip = (unsigned long)(&ftrace_graph_call);
888 	unsigned long addr = (unsigned long)(&ftrace_graph_caller);
889 	unsigned long stub = (unsigned long)(&ftrace_graph_stub);
890 	ppc_inst_t old, new;
891 
892 	if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_ARGS))
893 		return 0;
894 
895 	old = ftrace_call_replace(ip, enable ? stub : addr, 0);
896 	new = ftrace_call_replace(ip, enable ? addr : stub, 0);
897 
898 	return ftrace_modify_code(ip, old, new);
899 }
900 
901 int ftrace_enable_ftrace_graph_caller(void)
902 {
903 	return ftrace_modify_ftrace_graph_caller(true);
904 }
905 
906 int ftrace_disable_ftrace_graph_caller(void)
907 {
908 	return ftrace_modify_ftrace_graph_caller(false);
909 }
910 
911 /*
912  * Hook the return address and push it in the stack of return addrs
913  * in current thread info. Return the address we want to divert to.
914  */
915 static unsigned long
916 __prepare_ftrace_return(unsigned long parent, unsigned long ip, unsigned long sp)
917 {
918 	unsigned long return_hooker;
919 	int bit;
920 
921 	if (unlikely(ftrace_graph_is_dead()))
922 		goto out;
923 
924 	if (unlikely(atomic_read(&current->tracing_graph_pause)))
925 		goto out;
926 
927 	bit = ftrace_test_recursion_trylock(ip, parent);
928 	if (bit < 0)
929 		goto out;
930 
931 	return_hooker = ppc_function_entry(return_to_handler);
932 
933 	if (!function_graph_enter(parent, ip, 0, (unsigned long *)sp))
934 		parent = return_hooker;
935 
936 	ftrace_test_recursion_unlock(bit);
937 out:
938 	return parent;
939 }
940 
941 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS
942 void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
943 		       struct ftrace_ops *op, struct ftrace_regs *fregs)
944 {
945 	fregs->regs.link = __prepare_ftrace_return(parent_ip, ip, fregs->regs.gpr[1]);
946 }
947 #else
948 unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip,
949 				    unsigned long sp)
950 {
951 	return __prepare_ftrace_return(parent, ip, sp);
952 }
953 #endif
954 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
955 
956 #ifdef CONFIG_PPC64_ELF_ABI_V1
957 char *arch_ftrace_match_adjust(char *str, const char *search)
958 {
959 	if (str[0] == '.' && search[0] != '.')
960 		return str + 1;
961 	else
962 		return str;
963 }
964 #endif /* CONFIG_PPC64_ELF_ABI_V1 */
965