xref: /linux/arch/powerpc/kernel/trace/ftrace_64_pg.c (revision 7f71507851fc7764b36a3221839607d3a45c2025)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Code for replacing ftrace calls with jumps.
4  *
5  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
6  *
7  * Thanks goes out to P.A. Semi, Inc for supplying me with a PPC64 box.
8  *
9  * Added function graph tracer code, taken from x86 that was written
10  * by Frederic Weisbecker, and ported to PPC by Steven Rostedt.
11  *
12  */
13 
14 #define pr_fmt(fmt) "ftrace-powerpc: " fmt
15 
16 #include <linux/spinlock.h>
17 #include <linux/hardirq.h>
18 #include <linux/uaccess.h>
19 #include <linux/module.h>
20 #include <linux/ftrace.h>
21 #include <linux/percpu.h>
22 #include <linux/init.h>
23 #include <linux/list.h>
24 
25 #include <asm/cacheflush.h>
26 #include <asm/text-patching.h>
27 #include <asm/ftrace.h>
28 #include <asm/syscall.h>
29 #include <asm/inst.h>
30 
31 /*
32  * We generally only have a single long_branch tramp and at most 2 or 3 plt
33  * tramps generated. But, we don't use the plt tramps currently. We also allot
34  * 2 tramps after .text and .init.text. So, we only end up with around 3 usable
35  * tramps in total. Set aside 8 just to be sure.
36  */
37 #define	NUM_FTRACE_TRAMPS	8
38 static unsigned long ftrace_tramps[NUM_FTRACE_TRAMPS];
39 
40 unsigned long ftrace_call_adjust(unsigned long addr)
41 {
42 	return addr;
43 }
44 
45 static ppc_inst_t
46 ftrace_call_replace(unsigned long ip, unsigned long addr, int link)
47 {
48 	ppc_inst_t op;
49 
50 	addr = ppc_function_entry((void *)addr);
51 
52 	/* if (link) set op to 'bl' else 'b' */
53 	create_branch(&op, (u32 *)ip, addr, link ? BRANCH_SET_LINK : 0);
54 
55 	return op;
56 }
57 
58 static inline int
59 ftrace_modify_code(unsigned long ip, ppc_inst_t old, ppc_inst_t new)
60 {
61 	ppc_inst_t replaced;
62 
63 	/*
64 	 * Note:
65 	 * We are paranoid about modifying text, as if a bug was to happen, it
66 	 * could cause us to read or write to someplace that could cause harm.
67 	 * Carefully read and modify the code with probe_kernel_*(), and make
68 	 * sure what we read is what we expected it to be before modifying it.
69 	 */
70 
71 	/* read the text we want to modify */
72 	if (copy_inst_from_kernel_nofault(&replaced, (void *)ip))
73 		return -EFAULT;
74 
75 	/* Make sure it is what we expect it to be */
76 	if (!ppc_inst_equal(replaced, old)) {
77 		pr_err("%p: replaced (%08lx) != old (%08lx)", (void *)ip,
78 		       ppc_inst_as_ulong(replaced), ppc_inst_as_ulong(old));
79 		return -EINVAL;
80 	}
81 
82 	/* replace the text with the new text */
83 	return patch_instruction((u32 *)ip, new);
84 }
85 
86 /*
87  * Helper functions that are the same for both PPC64 and PPC32.
88  */
89 static int test_24bit_addr(unsigned long ip, unsigned long addr)
90 {
91 	addr = ppc_function_entry((void *)addr);
92 
93 	return is_offset_in_branch_range(addr - ip);
94 }
95 
96 static int is_bl_op(ppc_inst_t op)
97 {
98 	return (ppc_inst_val(op) & ~PPC_LI_MASK) == PPC_RAW_BL(0);
99 }
100 
101 static int is_b_op(ppc_inst_t op)
102 {
103 	return (ppc_inst_val(op) & ~PPC_LI_MASK) == PPC_RAW_BRANCH(0);
104 }
105 
106 static unsigned long find_bl_target(unsigned long ip, ppc_inst_t op)
107 {
108 	int offset;
109 
110 	offset = PPC_LI(ppc_inst_val(op));
111 	/* make it signed */
112 	if (offset & 0x02000000)
113 		offset |= 0xfe000000;
114 
115 	return ip + (long)offset;
116 }
117 
118 #ifdef CONFIG_MODULES
119 static struct module *ftrace_lookup_module(struct dyn_ftrace *rec)
120 {
121 	struct module *mod;
122 
123 	preempt_disable();
124 	mod = __module_text_address(rec->ip);
125 	preempt_enable();
126 
127 	if (!mod)
128 		pr_err("No module loaded at addr=%lx\n", rec->ip);
129 
130 	return mod;
131 }
132 
133 static int
134 __ftrace_make_nop(struct module *mod,
135 		  struct dyn_ftrace *rec, unsigned long addr)
136 {
137 	unsigned long entry, ptr, tramp;
138 	unsigned long ip = rec->ip;
139 	ppc_inst_t op, pop;
140 
141 	if (!mod) {
142 		mod = ftrace_lookup_module(rec);
143 		if (!mod)
144 			return -EINVAL;
145 	}
146 
147 	/* read where this goes */
148 	if (copy_inst_from_kernel_nofault(&op, (void *)ip)) {
149 		pr_err("Fetching opcode failed.\n");
150 		return -EFAULT;
151 	}
152 
153 	/* Make sure that this is still a 24bit jump */
154 	if (!is_bl_op(op)) {
155 		pr_err("Not expected bl: opcode is %08lx\n", ppc_inst_as_ulong(op));
156 		return -EINVAL;
157 	}
158 
159 	/* lets find where the pointer goes */
160 	tramp = find_bl_target(ip, op);
161 
162 	pr_devel("ip:%lx jumps to %lx", ip, tramp);
163 
164 	if (module_trampoline_target(mod, tramp, &ptr)) {
165 		pr_err("Failed to get trampoline target\n");
166 		return -EFAULT;
167 	}
168 
169 	pr_devel("trampoline target %lx", ptr);
170 
171 	entry = ppc_global_function_entry((void *)addr);
172 	/* This should match what was called */
173 	if (ptr != entry) {
174 		pr_err("addr %lx does not match expected %lx\n", ptr, entry);
175 		return -EINVAL;
176 	}
177 
178 	if (IS_ENABLED(CONFIG_MPROFILE_KERNEL)) {
179 		if (copy_inst_from_kernel_nofault(&op, (void *)(ip - 4))) {
180 			pr_err("Fetching instruction at %lx failed.\n", ip - 4);
181 			return -EFAULT;
182 		}
183 
184 		/* We expect either a mflr r0, or a std r0, LRSAVE(r1) */
185 		if (!ppc_inst_equal(op, ppc_inst(PPC_RAW_MFLR(_R0))) &&
186 		    !ppc_inst_equal(op, ppc_inst(PPC_INST_STD_LR))) {
187 			pr_err("Unexpected instruction %08lx around bl _mcount\n",
188 			       ppc_inst_as_ulong(op));
189 			return -EINVAL;
190 		}
191 	} else if (IS_ENABLED(CONFIG_PPC64)) {
192 		/*
193 		 * Check what is in the next instruction. We can see ld r2,40(r1), but
194 		 * on first pass after boot we will see mflr r0.
195 		 */
196 		if (copy_inst_from_kernel_nofault(&op, (void *)(ip + 4))) {
197 			pr_err("Fetching op failed.\n");
198 			return -EFAULT;
199 		}
200 
201 		if (!ppc_inst_equal(op,  ppc_inst(PPC_INST_LD_TOC))) {
202 			pr_err("Expected %08lx found %08lx\n", PPC_INST_LD_TOC,
203 			       ppc_inst_as_ulong(op));
204 			return -EINVAL;
205 		}
206 	}
207 
208 	/*
209 	 * When using -mprofile-kernel or PPC32 there is no load to jump over.
210 	 *
211 	 * Otherwise our original call site looks like:
212 	 *
213 	 * bl <tramp>
214 	 * ld r2,XX(r1)
215 	 *
216 	 * Milton Miller pointed out that we can not simply nop the branch.
217 	 * If a task was preempted when calling a trace function, the nops
218 	 * will remove the way to restore the TOC in r2 and the r2 TOC will
219 	 * get corrupted.
220 	 *
221 	 * Use a b +8 to jump over the load.
222 	 */
223 	if (IS_ENABLED(CONFIG_MPROFILE_KERNEL) || IS_ENABLED(CONFIG_PPC32))
224 		pop = ppc_inst(PPC_RAW_NOP());
225 	else
226 		pop = ppc_inst(PPC_RAW_BRANCH(8));	/* b +8 */
227 
228 	if (patch_instruction((u32 *)ip, pop)) {
229 		pr_err("Patching NOP failed.\n");
230 		return -EPERM;
231 	}
232 
233 	return 0;
234 }
235 #else
236 static int __ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr)
237 {
238 	return 0;
239 }
240 #endif /* CONFIG_MODULES */
241 
242 static unsigned long find_ftrace_tramp(unsigned long ip)
243 {
244 	int i;
245 
246 	/*
247 	 * We have the compiler generated long_branch tramps at the end
248 	 * and we prefer those
249 	 */
250 	for (i = NUM_FTRACE_TRAMPS - 1; i >= 0; i--)
251 		if (!ftrace_tramps[i])
252 			continue;
253 		else if (is_offset_in_branch_range(ftrace_tramps[i] - ip))
254 			return ftrace_tramps[i];
255 
256 	return 0;
257 }
258 
259 static int add_ftrace_tramp(unsigned long tramp)
260 {
261 	int i;
262 
263 	for (i = 0; i < NUM_FTRACE_TRAMPS; i++)
264 		if (!ftrace_tramps[i]) {
265 			ftrace_tramps[i] = tramp;
266 			return 0;
267 		}
268 
269 	return -1;
270 }
271 
272 /*
273  * If this is a compiler generated long_branch trampoline (essentially, a
274  * trampoline that has a branch to _mcount()), we re-write the branch to
275  * instead go to ftrace_[regs_]caller() and note down the location of this
276  * trampoline.
277  */
278 static int setup_mcount_compiler_tramp(unsigned long tramp)
279 {
280 	int i;
281 	ppc_inst_t op;
282 	unsigned long ptr;
283 
284 	/* Is this a known long jump tramp? */
285 	for (i = 0; i < NUM_FTRACE_TRAMPS; i++)
286 		if (ftrace_tramps[i] == tramp)
287 			return 0;
288 
289 	/* New trampoline -- read where this goes */
290 	if (copy_inst_from_kernel_nofault(&op, (void *)tramp)) {
291 		pr_debug("Fetching opcode failed.\n");
292 		return -1;
293 	}
294 
295 	/* Is this a 24 bit branch? */
296 	if (!is_b_op(op)) {
297 		pr_debug("Trampoline is not a long branch tramp.\n");
298 		return -1;
299 	}
300 
301 	/* lets find where the pointer goes */
302 	ptr = find_bl_target(tramp, op);
303 
304 	if (ptr != ppc_global_function_entry((void *)_mcount)) {
305 		pr_debug("Trampoline target %p is not _mcount\n", (void *)ptr);
306 		return -1;
307 	}
308 
309 	/* Let's re-write the tramp to go to ftrace_[regs_]caller */
310 	if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS))
311 		ptr = ppc_global_function_entry((void *)ftrace_regs_caller);
312 	else
313 		ptr = ppc_global_function_entry((void *)ftrace_caller);
314 
315 	if (patch_branch((u32 *)tramp, ptr, 0)) {
316 		pr_debug("REL24 out of range!\n");
317 		return -1;
318 	}
319 
320 	if (add_ftrace_tramp(tramp)) {
321 		pr_debug("No tramp locations left\n");
322 		return -1;
323 	}
324 
325 	return 0;
326 }
327 
328 static int __ftrace_make_nop_kernel(struct dyn_ftrace *rec, unsigned long addr)
329 {
330 	unsigned long tramp, ip = rec->ip;
331 	ppc_inst_t op;
332 
333 	/* Read where this goes */
334 	if (copy_inst_from_kernel_nofault(&op, (void *)ip)) {
335 		pr_err("Fetching opcode failed.\n");
336 		return -EFAULT;
337 	}
338 
339 	/* Make sure that this is still a 24bit jump */
340 	if (!is_bl_op(op)) {
341 		pr_err("Not expected bl: opcode is %08lx\n", ppc_inst_as_ulong(op));
342 		return -EINVAL;
343 	}
344 
345 	/* Let's find where the pointer goes */
346 	tramp = find_bl_target(ip, op);
347 
348 	pr_devel("ip:%lx jumps to %lx", ip, tramp);
349 
350 	if (setup_mcount_compiler_tramp(tramp)) {
351 		/* Are other trampolines reachable? */
352 		if (!find_ftrace_tramp(ip)) {
353 			pr_err("No ftrace trampolines reachable from %ps\n",
354 					(void *)ip);
355 			return -EINVAL;
356 		}
357 	}
358 
359 	if (patch_instruction((u32 *)ip, ppc_inst(PPC_RAW_NOP()))) {
360 		pr_err("Patching NOP failed.\n");
361 		return -EPERM;
362 	}
363 
364 	return 0;
365 }
366 
367 int ftrace_make_nop(struct module *mod,
368 		    struct dyn_ftrace *rec, unsigned long addr)
369 {
370 	unsigned long ip = rec->ip;
371 	ppc_inst_t old, new;
372 
373 	/*
374 	 * If the calling address is more that 24 bits away,
375 	 * then we had to use a trampoline to make the call.
376 	 * Otherwise just update the call site.
377 	 */
378 	if (test_24bit_addr(ip, addr)) {
379 		/* within range */
380 		old = ftrace_call_replace(ip, addr, 1);
381 		new = ppc_inst(PPC_RAW_NOP());
382 		return ftrace_modify_code(ip, old, new);
383 	} else if (core_kernel_text(ip)) {
384 		return __ftrace_make_nop_kernel(rec, addr);
385 	} else if (!IS_ENABLED(CONFIG_MODULES)) {
386 		return -EINVAL;
387 	}
388 
389 	return __ftrace_make_nop(mod, rec, addr);
390 }
391 
392 #ifdef CONFIG_MODULES
393 /*
394  * Examine the existing instructions for __ftrace_make_call.
395  * They should effectively be a NOP, and follow formal constraints,
396  * depending on the ABI. Return false if they don't.
397  */
398 static bool expected_nop_sequence(void *ip, ppc_inst_t op0, ppc_inst_t op1)
399 {
400 	if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS))
401 		return ppc_inst_equal(op0, ppc_inst(PPC_RAW_NOP()));
402 	else
403 		return ppc_inst_equal(op0, ppc_inst(PPC_RAW_BRANCH(8))) &&
404 		       ppc_inst_equal(op1, ppc_inst(PPC_INST_LD_TOC));
405 }
406 
407 static int
408 __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
409 {
410 	ppc_inst_t op[2];
411 	void *ip = (void *)rec->ip;
412 	unsigned long entry, ptr, tramp;
413 	struct module *mod = ftrace_lookup_module(rec);
414 
415 	if (!mod)
416 		return -EINVAL;
417 
418 	/* read where this goes */
419 	if (copy_inst_from_kernel_nofault(op, ip))
420 		return -EFAULT;
421 
422 	if (!IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS) &&
423 	    copy_inst_from_kernel_nofault(op + 1, ip + 4))
424 		return -EFAULT;
425 
426 	if (!expected_nop_sequence(ip, op[0], op[1])) {
427 		pr_err("Unexpected call sequence at %p: %08lx %08lx\n", ip,
428 		       ppc_inst_as_ulong(op[0]), ppc_inst_as_ulong(op[1]));
429 		return -EINVAL;
430 	}
431 
432 	/* If we never set up ftrace trampoline(s), then bail */
433 	if (!mod->arch.tramp ||
434 	    (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS) && !mod->arch.tramp_regs)) {
435 		pr_err("No ftrace trampoline\n");
436 		return -EINVAL;
437 	}
438 
439 	if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS) && rec->flags & FTRACE_FL_REGS)
440 		tramp = mod->arch.tramp_regs;
441 	else
442 		tramp = mod->arch.tramp;
443 
444 	if (module_trampoline_target(mod, tramp, &ptr)) {
445 		pr_err("Failed to get trampoline target\n");
446 		return -EFAULT;
447 	}
448 
449 	pr_devel("trampoline target %lx", ptr);
450 
451 	entry = ppc_global_function_entry((void *)addr);
452 	/* This should match what was called */
453 	if (ptr != entry) {
454 		pr_err("addr %lx does not match expected %lx\n", ptr, entry);
455 		return -EINVAL;
456 	}
457 
458 	if (patch_branch(ip, tramp, BRANCH_SET_LINK)) {
459 		pr_err("REL24 out of range!\n");
460 		return -EINVAL;
461 	}
462 
463 	return 0;
464 }
465 #else
466 static int __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
467 {
468 	return 0;
469 }
470 #endif /* CONFIG_MODULES */
471 
472 static int __ftrace_make_call_kernel(struct dyn_ftrace *rec, unsigned long addr)
473 {
474 	ppc_inst_t op;
475 	void *ip = (void *)rec->ip;
476 	unsigned long tramp, entry, ptr;
477 
478 	/* Make sure we're being asked to patch branch to a known ftrace addr */
479 	entry = ppc_global_function_entry((void *)ftrace_caller);
480 	ptr = ppc_global_function_entry((void *)addr);
481 
482 	if (ptr != entry && IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS))
483 		entry = ppc_global_function_entry((void *)ftrace_regs_caller);
484 
485 	if (ptr != entry) {
486 		pr_err("Unknown ftrace addr to patch: %ps\n", (void *)ptr);
487 		return -EINVAL;
488 	}
489 
490 	/* Make sure we have a nop */
491 	if (copy_inst_from_kernel_nofault(&op, ip)) {
492 		pr_err("Unable to read ftrace location %p\n", ip);
493 		return -EFAULT;
494 	}
495 
496 	if (!ppc_inst_equal(op, ppc_inst(PPC_RAW_NOP()))) {
497 		pr_err("Unexpected call sequence at %p: %08lx\n",
498 		       ip, ppc_inst_as_ulong(op));
499 		return -EINVAL;
500 	}
501 
502 	tramp = find_ftrace_tramp((unsigned long)ip);
503 	if (!tramp) {
504 		pr_err("No ftrace trampolines reachable from %ps\n", ip);
505 		return -EINVAL;
506 	}
507 
508 	if (patch_branch(ip, tramp, BRANCH_SET_LINK)) {
509 		pr_err("Error patching branch to ftrace tramp!\n");
510 		return -EINVAL;
511 	}
512 
513 	return 0;
514 }
515 
516 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
517 {
518 	unsigned long ip = rec->ip;
519 	ppc_inst_t old, new;
520 
521 	/*
522 	 * If the calling address is more that 24 bits away,
523 	 * then we had to use a trampoline to make the call.
524 	 * Otherwise just update the call site.
525 	 */
526 	if (test_24bit_addr(ip, addr)) {
527 		/* within range */
528 		old = ppc_inst(PPC_RAW_NOP());
529 		new = ftrace_call_replace(ip, addr, 1);
530 		return ftrace_modify_code(ip, old, new);
531 	} else if (core_kernel_text(ip)) {
532 		return __ftrace_make_call_kernel(rec, addr);
533 	} else if (!IS_ENABLED(CONFIG_MODULES)) {
534 		/* We should not get here without modules */
535 		return -EINVAL;
536 	}
537 
538 	return __ftrace_make_call(rec, addr);
539 }
540 
541 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
542 #ifdef CONFIG_MODULES
543 static int
544 __ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
545 					unsigned long addr)
546 {
547 	ppc_inst_t op;
548 	unsigned long ip = rec->ip;
549 	unsigned long entry, ptr, tramp;
550 	struct module *mod = ftrace_lookup_module(rec);
551 
552 	if (!mod)
553 		return -EINVAL;
554 
555 	/* If we never set up ftrace trampolines, then bail */
556 	if (!mod->arch.tramp || !mod->arch.tramp_regs) {
557 		pr_err("No ftrace trampoline\n");
558 		return -EINVAL;
559 	}
560 
561 	/* read where this goes */
562 	if (copy_inst_from_kernel_nofault(&op, (void *)ip)) {
563 		pr_err("Fetching opcode failed.\n");
564 		return -EFAULT;
565 	}
566 
567 	/* Make sure that this is still a 24bit jump */
568 	if (!is_bl_op(op)) {
569 		pr_err("Not expected bl: opcode is %08lx\n", ppc_inst_as_ulong(op));
570 		return -EINVAL;
571 	}
572 
573 	/* lets find where the pointer goes */
574 	tramp = find_bl_target(ip, op);
575 	entry = ppc_global_function_entry((void *)old_addr);
576 
577 	pr_devel("ip:%lx jumps to %lx", ip, tramp);
578 
579 	if (tramp != entry) {
580 		/* old_addr is not within range, so we must have used a trampoline */
581 		if (module_trampoline_target(mod, tramp, &ptr)) {
582 			pr_err("Failed to get trampoline target\n");
583 			return -EFAULT;
584 		}
585 
586 		pr_devel("trampoline target %lx", ptr);
587 
588 		/* This should match what was called */
589 		if (ptr != entry) {
590 			pr_err("addr %lx does not match expected %lx\n", ptr, entry);
591 			return -EINVAL;
592 		}
593 	}
594 
595 	/* The new target may be within range */
596 	if (test_24bit_addr(ip, addr)) {
597 		/* within range */
598 		if (patch_branch((u32 *)ip, addr, BRANCH_SET_LINK)) {
599 			pr_err("REL24 out of range!\n");
600 			return -EINVAL;
601 		}
602 
603 		return 0;
604 	}
605 
606 	if (rec->flags & FTRACE_FL_REGS)
607 		tramp = mod->arch.tramp_regs;
608 	else
609 		tramp = mod->arch.tramp;
610 
611 	if (module_trampoline_target(mod, tramp, &ptr)) {
612 		pr_err("Failed to get trampoline target\n");
613 		return -EFAULT;
614 	}
615 
616 	pr_devel("trampoline target %lx", ptr);
617 
618 	entry = ppc_global_function_entry((void *)addr);
619 	/* This should match what was called */
620 	if (ptr != entry) {
621 		pr_err("addr %lx does not match expected %lx\n", ptr, entry);
622 		return -EINVAL;
623 	}
624 
625 	if (patch_branch((u32 *)ip, tramp, BRANCH_SET_LINK)) {
626 		pr_err("REL24 out of range!\n");
627 		return -EINVAL;
628 	}
629 
630 	return 0;
631 }
632 #else
633 static int __ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, unsigned long addr)
634 {
635 	return 0;
636 }
637 #endif
638 
639 int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
640 			unsigned long addr)
641 {
642 	unsigned long ip = rec->ip;
643 	ppc_inst_t old, new;
644 
645 	/*
646 	 * If the calling address is more that 24 bits away,
647 	 * then we had to use a trampoline to make the call.
648 	 * Otherwise just update the call site.
649 	 */
650 	if (test_24bit_addr(ip, addr) && test_24bit_addr(ip, old_addr)) {
651 		/* within range */
652 		old = ftrace_call_replace(ip, old_addr, 1);
653 		new = ftrace_call_replace(ip, addr, 1);
654 		return ftrace_modify_code(ip, old, new);
655 	} else if (core_kernel_text(ip)) {
656 		/*
657 		 * We always patch out of range locations to go to the regs
658 		 * variant, so there is nothing to do here
659 		 */
660 		return 0;
661 	} else if (!IS_ENABLED(CONFIG_MODULES)) {
662 		/* We should not get here without modules */
663 		return -EINVAL;
664 	}
665 
666 	return __ftrace_modify_call(rec, old_addr, addr);
667 }
668 #endif
669 
670 int ftrace_update_ftrace_func(ftrace_func_t func)
671 {
672 	unsigned long ip = (unsigned long)(&ftrace_call);
673 	ppc_inst_t old, new;
674 	int ret;
675 
676 	old = ppc_inst_read((u32 *)&ftrace_call);
677 	new = ftrace_call_replace(ip, (unsigned long)func, 1);
678 	ret = ftrace_modify_code(ip, old, new);
679 
680 	/* Also update the regs callback function */
681 	if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS) && !ret) {
682 		ip = (unsigned long)(&ftrace_regs_call);
683 		old = ppc_inst_read((u32 *)&ftrace_regs_call);
684 		new = ftrace_call_replace(ip, (unsigned long)func, 1);
685 		ret = ftrace_modify_code(ip, old, new);
686 	}
687 
688 	return ret;
689 }
690 
691 /*
692  * Use the default ftrace_modify_all_code, but without
693  * stop_machine().
694  */
695 void arch_ftrace_update_code(int command)
696 {
697 	ftrace_modify_all_code(command);
698 }
699 
700 #ifdef CONFIG_PPC64
701 #define PACATOC offsetof(struct paca_struct, kernel_toc)
702 
703 extern unsigned int ftrace_tramp_text[], ftrace_tramp_init[];
704 
705 void ftrace_free_init_tramp(void)
706 {
707 	int i;
708 
709 	for (i = 0; i < NUM_FTRACE_TRAMPS && ftrace_tramps[i]; i++)
710 		if (ftrace_tramps[i] == (unsigned long)ftrace_tramp_init) {
711 			ftrace_tramps[i] = 0;
712 			return;
713 		}
714 }
715 
716 int __init ftrace_dyn_arch_init(void)
717 {
718 	int i;
719 	unsigned int *tramp[] = { ftrace_tramp_text, ftrace_tramp_init };
720 	u32 stub_insns[] = {
721 		PPC_RAW_LD(_R12, _R13, PACATOC),
722 		PPC_RAW_ADDIS(_R12, _R12, 0),
723 		PPC_RAW_ADDI(_R12, _R12, 0),
724 		PPC_RAW_MTCTR(_R12),
725 		PPC_RAW_BCTR()
726 	};
727 	unsigned long addr;
728 	long reladdr;
729 
730 	if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS))
731 		addr = ppc_global_function_entry((void *)ftrace_regs_caller);
732 	else
733 		addr = ppc_global_function_entry((void *)ftrace_caller);
734 
735 	reladdr = addr - kernel_toc_addr();
736 
737 	if (reladdr >= SZ_2G || reladdr < -(long)SZ_2G) {
738 		pr_err("Address of %ps out of range of kernel_toc.\n",
739 				(void *)addr);
740 		return -1;
741 	}
742 
743 	for (i = 0; i < 2; i++) {
744 		memcpy(tramp[i], stub_insns, sizeof(stub_insns));
745 		tramp[i][1] |= PPC_HA(reladdr);
746 		tramp[i][2] |= PPC_LO(reladdr);
747 		add_ftrace_tramp((unsigned long)tramp[i]);
748 	}
749 
750 	return 0;
751 }
752 #endif
753 
754 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
755 
756 extern void ftrace_graph_call(void);
757 extern void ftrace_graph_stub(void);
758 
759 static int ftrace_modify_ftrace_graph_caller(bool enable)
760 {
761 	unsigned long ip = (unsigned long)(&ftrace_graph_call);
762 	unsigned long addr = (unsigned long)(&ftrace_graph_caller);
763 	unsigned long stub = (unsigned long)(&ftrace_graph_stub);
764 	ppc_inst_t old, new;
765 
766 	if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_ARGS))
767 		return 0;
768 
769 	old = ftrace_call_replace(ip, enable ? stub : addr, 0);
770 	new = ftrace_call_replace(ip, enable ? addr : stub, 0);
771 
772 	return ftrace_modify_code(ip, old, new);
773 }
774 
775 int ftrace_enable_ftrace_graph_caller(void)
776 {
777 	return ftrace_modify_ftrace_graph_caller(true);
778 }
779 
780 int ftrace_disable_ftrace_graph_caller(void)
781 {
782 	return ftrace_modify_ftrace_graph_caller(false);
783 }
784 
785 /*
786  * Hook the return address and push it in the stack of return addrs
787  * in current thread info. Return the address we want to divert to.
788  */
789 static unsigned long
790 __prepare_ftrace_return(unsigned long parent, unsigned long ip, unsigned long sp)
791 {
792 	unsigned long return_hooker;
793 	int bit;
794 
795 	if (unlikely(ftrace_graph_is_dead()))
796 		goto out;
797 
798 	if (unlikely(atomic_read(&current->tracing_graph_pause)))
799 		goto out;
800 
801 	bit = ftrace_test_recursion_trylock(ip, parent);
802 	if (bit < 0)
803 		goto out;
804 
805 	return_hooker = ppc_function_entry(return_to_handler);
806 
807 	if (!function_graph_enter(parent, ip, 0, (unsigned long *)sp))
808 		parent = return_hooker;
809 
810 	ftrace_test_recursion_unlock(bit);
811 out:
812 	return parent;
813 }
814 
815 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS
816 void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
817 		       struct ftrace_ops *op, struct ftrace_regs *fregs)
818 {
819 	arch_ftrace_regs(fregs)->regs.link = __prepare_ftrace_return(parent_ip, ip, arch_ftrace_regs(fregs)->regs.gpr[1]);
820 }
821 #else
822 unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip,
823 				    unsigned long sp)
824 {
825 	return __prepare_ftrace_return(parent, ip, sp);
826 }
827 #endif
828 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
829 
830 #ifdef CONFIG_PPC64_ELF_ABI_V1
831 char *arch_ftrace_match_adjust(char *str, const char *search)
832 {
833 	if (str[0] == '.' && search[0] != '.')
834 		return str + 1;
835 	else
836 		return str;
837 }
838 #endif /* CONFIG_PPC64_ELF_ABI_V1 */
839