xref: /linux/arch/powerpc/kernel/module_64.c (revision ae30cc05bed2fd7eb05e4fb53f412783f05ccb7b)
1 /*  Kernel module help for PPC64.
2     Copyright (C) 2001, 2003 Rusty Russell IBM Corporation.
3 
4     This program is free software; you can redistribute it and/or modify
5     it under the terms of the GNU General Public License as published by
6     the Free Software Foundation; either version 2 of the License, or
7     (at your option) any later version.
8 
9     This program is distributed in the hope that it will be useful,
10     but WITHOUT ANY WARRANTY; without even the implied warranty of
11     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12     GNU General Public License for more details.
13 
14     You should have received a copy of the GNU General Public License
15     along with this program; if not, write to the Free Software
16     Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
17 */
18 
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 
21 #include <linux/module.h>
22 #include <linux/elf.h>
23 #include <linux/moduleloader.h>
24 #include <linux/err.h>
25 #include <linux/vmalloc.h>
26 #include <linux/ftrace.h>
27 #include <linux/bug.h>
28 #include <linux/uaccess.h>
29 #include <asm/module.h>
30 #include <asm/firmware.h>
31 #include <asm/code-patching.h>
32 #include <linux/sort.h>
33 #include <asm/setup.h>
34 #include <asm/sections.h>
35 
36 /* FIXME: We don't do .init separately.  To do this, we'd need to have
37    a separate r2 value in the init and core section, and stub between
38    them, too.
39 
40    Using a magic allocator which places modules within 32MB solves
41    this, and makes other things simpler.  Anton?
42    --RR.  */
43 
44 #ifdef PPC64_ELF_ABI_v2
45 
46 /* An address is simply the address of the function. */
47 typedef unsigned long func_desc_t;
48 
49 static func_desc_t func_desc(unsigned long addr)
50 {
51 	return addr;
52 }
53 static unsigned long func_addr(unsigned long addr)
54 {
55 	return addr;
56 }
57 static unsigned long stub_func_addr(func_desc_t func)
58 {
59 	return func;
60 }
61 
62 /* PowerPC64 specific values for the Elf64_Sym st_other field.  */
63 #define STO_PPC64_LOCAL_BIT	5
64 #define STO_PPC64_LOCAL_MASK	(7 << STO_PPC64_LOCAL_BIT)
65 #define PPC64_LOCAL_ENTRY_OFFSET(other)					\
66  (((1 << (((other) & STO_PPC64_LOCAL_MASK) >> STO_PPC64_LOCAL_BIT)) >> 2) << 2)
67 
68 static unsigned int local_entry_offset(const Elf64_Sym *sym)
69 {
70 	/* sym->st_other indicates offset to local entry point
71 	 * (otherwise it will assume r12 is the address of the start
72 	 * of function and try to derive r2 from it). */
73 	return PPC64_LOCAL_ENTRY_OFFSET(sym->st_other);
74 }
75 #else
76 
77 /* An address is address of the OPD entry, which contains address of fn. */
78 typedef struct ppc64_opd_entry func_desc_t;
79 
80 static func_desc_t func_desc(unsigned long addr)
81 {
82 	return *(struct ppc64_opd_entry *)addr;
83 }
84 static unsigned long func_addr(unsigned long addr)
85 {
86 	return func_desc(addr).funcaddr;
87 }
88 static unsigned long stub_func_addr(func_desc_t func)
89 {
90 	return func.funcaddr;
91 }
92 static unsigned int local_entry_offset(const Elf64_Sym *sym)
93 {
94 	return 0;
95 }
96 
97 void *dereference_module_function_descriptor(struct module *mod, void *ptr)
98 {
99 	if (ptr < (void *)mod->arch.start_opd ||
100 			ptr >= (void *)mod->arch.end_opd)
101 		return ptr;
102 
103 	return dereference_function_descriptor(ptr);
104 }
105 #endif
106 
107 #define STUB_MAGIC 0x73747562 /* stub */
108 
109 /* Like PPC32, we need little trampolines to do > 24-bit jumps (into
110    the kernel itself).  But on PPC64, these need to be used for every
111    jump, actually, to reset r2 (TOC+0x8000). */
112 struct ppc64_stub_entry
113 {
114 	/* 28 byte jump instruction sequence (7 instructions). We only
115 	 * need 6 instructions on ABIv2 but we always allocate 7 so
116 	 * so we don't have to modify the trampoline load instruction. */
117 	u32 jump[7];
118 	/* Used by ftrace to identify stubs */
119 	u32 magic;
120 	/* Data for the above code */
121 	func_desc_t funcdata;
122 };
123 
124 /*
125  * PPC64 uses 24 bit jumps, but we need to jump into other modules or
126  * the kernel which may be further.  So we jump to a stub.
127  *
128  * For ELFv1 we need to use this to set up the new r2 value (aka TOC
129  * pointer).  For ELFv2 it's the callee's responsibility to set up the
130  * new r2, but for both we need to save the old r2.
131  *
132  * We could simply patch the new r2 value and function pointer into
133  * the stub, but it's significantly shorter to put these values at the
134  * end of the stub code, and patch the stub address (32-bits relative
135  * to the TOC ptr, r2) into the stub.
136  */
137 
138 static u32 ppc64_stub_insns[] = {
139 	0x3d620000,			/* addis   r11,r2, <high> */
140 	0x396b0000,			/* addi    r11,r11, <low> */
141 	/* Save current r2 value in magic place on the stack. */
142 	0xf8410000|R2_STACK_OFFSET,	/* std     r2,R2_STACK_OFFSET(r1) */
143 	0xe98b0020,			/* ld      r12,32(r11) */
144 #ifdef PPC64_ELF_ABI_v1
145 	/* Set up new r2 from function descriptor */
146 	0xe84b0028,			/* ld      r2,40(r11) */
147 #endif
148 	0x7d8903a6,			/* mtctr   r12 */
149 	0x4e800420			/* bctr */
150 };
151 
152 #ifdef CONFIG_DYNAMIC_FTRACE
153 int module_trampoline_target(struct module *mod, unsigned long addr,
154 			     unsigned long *target)
155 {
156 	struct ppc64_stub_entry *stub;
157 	func_desc_t funcdata;
158 	u32 magic;
159 
160 	if (!within_module_core(addr, mod)) {
161 		pr_err("%s: stub %lx not in module %s\n", __func__, addr, mod->name);
162 		return -EFAULT;
163 	}
164 
165 	stub = (struct ppc64_stub_entry *)addr;
166 
167 	if (probe_kernel_read(&magic, &stub->magic, sizeof(magic))) {
168 		pr_err("%s: fault reading magic for stub %lx for %s\n", __func__, addr, mod->name);
169 		return -EFAULT;
170 	}
171 
172 	if (magic != STUB_MAGIC) {
173 		pr_err("%s: bad magic for stub %lx for %s\n", __func__, addr, mod->name);
174 		return -EFAULT;
175 	}
176 
177 	if (probe_kernel_read(&funcdata, &stub->funcdata, sizeof(funcdata))) {
178 		pr_err("%s: fault reading funcdata for stub %lx for %s\n", __func__, addr, mod->name);
179                 return -EFAULT;
180 	}
181 
182 	*target = stub_func_addr(funcdata);
183 
184 	return 0;
185 }
186 #endif
187 
188 /* Count how many different 24-bit relocations (different symbol,
189    different addend) */
190 static unsigned int count_relocs(const Elf64_Rela *rela, unsigned int num)
191 {
192 	unsigned int i, r_info, r_addend, _count_relocs;
193 
194 	/* FIXME: Only count external ones --RR */
195 	_count_relocs = 0;
196 	r_info = 0;
197 	r_addend = 0;
198 	for (i = 0; i < num; i++)
199 		/* Only count 24-bit relocs, others don't need stubs */
200 		if (ELF64_R_TYPE(rela[i].r_info) == R_PPC_REL24 &&
201 		    (r_info != ELF64_R_SYM(rela[i].r_info) ||
202 		     r_addend != rela[i].r_addend)) {
203 			_count_relocs++;
204 			r_info = ELF64_R_SYM(rela[i].r_info);
205 			r_addend = rela[i].r_addend;
206 		}
207 
208 	return _count_relocs;
209 }
210 
211 static int relacmp(const void *_x, const void *_y)
212 {
213 	const Elf64_Rela *x, *y;
214 
215 	y = (Elf64_Rela *)_x;
216 	x = (Elf64_Rela *)_y;
217 
218 	/* Compare the entire r_info (as opposed to ELF64_R_SYM(r_info) only) to
219 	 * make the comparison cheaper/faster. It won't affect the sorting or
220 	 * the counting algorithms' performance
221 	 */
222 	if (x->r_info < y->r_info)
223 		return -1;
224 	else if (x->r_info > y->r_info)
225 		return 1;
226 	else if (x->r_addend < y->r_addend)
227 		return -1;
228 	else if (x->r_addend > y->r_addend)
229 		return 1;
230 	else
231 		return 0;
232 }
233 
234 static void relaswap(void *_x, void *_y, int size)
235 {
236 	uint64_t *x, *y, tmp;
237 	int i;
238 
239 	y = (uint64_t *)_x;
240 	x = (uint64_t *)_y;
241 
242 	for (i = 0; i < sizeof(Elf64_Rela) / sizeof(uint64_t); i++) {
243 		tmp = x[i];
244 		x[i] = y[i];
245 		y[i] = tmp;
246 	}
247 }
248 
249 /* Get size of potential trampolines required. */
250 static unsigned long get_stubs_size(const Elf64_Ehdr *hdr,
251 				    const Elf64_Shdr *sechdrs)
252 {
253 	/* One extra reloc so it's always 0-funcaddr terminated */
254 	unsigned long relocs = 1;
255 	unsigned i;
256 
257 	/* Every relocated section... */
258 	for (i = 1; i < hdr->e_shnum; i++) {
259 		if (sechdrs[i].sh_type == SHT_RELA) {
260 			pr_debug("Found relocations in section %u\n", i);
261 			pr_debug("Ptr: %p.  Number: %Lu\n",
262 			       (void *)sechdrs[i].sh_addr,
263 			       sechdrs[i].sh_size / sizeof(Elf64_Rela));
264 
265 			/* Sort the relocation information based on a symbol and
266 			 * addend key. This is a stable O(n*log n) complexity
267 			 * alogrithm but it will reduce the complexity of
268 			 * count_relocs() to linear complexity O(n)
269 			 */
270 			sort((void *)sechdrs[i].sh_addr,
271 			     sechdrs[i].sh_size / sizeof(Elf64_Rela),
272 			     sizeof(Elf64_Rela), relacmp, relaswap);
273 
274 			relocs += count_relocs((void *)sechdrs[i].sh_addr,
275 					       sechdrs[i].sh_size
276 					       / sizeof(Elf64_Rela));
277 		}
278 	}
279 
280 #ifdef CONFIG_DYNAMIC_FTRACE
281 	/* make the trampoline to the ftrace_caller */
282 	relocs++;
283 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
284 	/* an additional one for ftrace_regs_caller */
285 	relocs++;
286 #endif
287 #endif
288 
289 	pr_debug("Looks like a total of %lu stubs, max\n", relocs);
290 	return relocs * sizeof(struct ppc64_stub_entry);
291 }
292 
293 /* Still needed for ELFv2, for .TOC. */
294 static void dedotify_versions(struct modversion_info *vers,
295 			      unsigned long size)
296 {
297 	struct modversion_info *end;
298 
299 	for (end = (void *)vers + size; vers < end; vers++)
300 		if (vers->name[0] == '.') {
301 			memmove(vers->name, vers->name+1, strlen(vers->name));
302 		}
303 }
304 
305 /*
306  * Undefined symbols which refer to .funcname, hack to funcname. Make .TOC.
307  * seem to be defined (value set later).
308  */
309 static void dedotify(Elf64_Sym *syms, unsigned int numsyms, char *strtab)
310 {
311 	unsigned int i;
312 
313 	for (i = 1; i < numsyms; i++) {
314 		if (syms[i].st_shndx == SHN_UNDEF) {
315 			char *name = strtab + syms[i].st_name;
316 			if (name[0] == '.') {
317 				if (strcmp(name+1, "TOC.") == 0)
318 					syms[i].st_shndx = SHN_ABS;
319 				syms[i].st_name++;
320 			}
321 		}
322 	}
323 }
324 
325 static Elf64_Sym *find_dot_toc(Elf64_Shdr *sechdrs,
326 			       const char *strtab,
327 			       unsigned int symindex)
328 {
329 	unsigned int i, numsyms;
330 	Elf64_Sym *syms;
331 
332 	syms = (Elf64_Sym *)sechdrs[symindex].sh_addr;
333 	numsyms = sechdrs[symindex].sh_size / sizeof(Elf64_Sym);
334 
335 	for (i = 1; i < numsyms; i++) {
336 		if (syms[i].st_shndx == SHN_ABS
337 		    && strcmp(strtab + syms[i].st_name, "TOC.") == 0)
338 			return &syms[i];
339 	}
340 	return NULL;
341 }
342 
343 int module_frob_arch_sections(Elf64_Ehdr *hdr,
344 			      Elf64_Shdr *sechdrs,
345 			      char *secstrings,
346 			      struct module *me)
347 {
348 	unsigned int i;
349 
350 	/* Find .toc and .stubs sections, symtab and strtab */
351 	for (i = 1; i < hdr->e_shnum; i++) {
352 		char *p;
353 		if (strcmp(secstrings + sechdrs[i].sh_name, ".stubs") == 0)
354 			me->arch.stubs_section = i;
355 		else if (strcmp(secstrings + sechdrs[i].sh_name, ".toc") == 0) {
356 			me->arch.toc_section = i;
357 			if (sechdrs[i].sh_addralign < 8)
358 				sechdrs[i].sh_addralign = 8;
359 		}
360 		else if (strcmp(secstrings+sechdrs[i].sh_name,"__versions")==0)
361 			dedotify_versions((void *)hdr + sechdrs[i].sh_offset,
362 					  sechdrs[i].sh_size);
363 		else if (!strcmp(secstrings + sechdrs[i].sh_name, ".opd")) {
364 			me->arch.start_opd = sechdrs[i].sh_addr;
365 			me->arch.end_opd = sechdrs[i].sh_addr +
366 					   sechdrs[i].sh_size;
367 		}
368 
369 		/* We don't handle .init for the moment: rename to _init */
370 		while ((p = strstr(secstrings + sechdrs[i].sh_name, ".init")))
371 			p[0] = '_';
372 
373 		if (sechdrs[i].sh_type == SHT_SYMTAB)
374 			dedotify((void *)hdr + sechdrs[i].sh_offset,
375 				 sechdrs[i].sh_size / sizeof(Elf64_Sym),
376 				 (void *)hdr
377 				 + sechdrs[sechdrs[i].sh_link].sh_offset);
378 	}
379 
380 	if (!me->arch.stubs_section) {
381 		pr_err("%s: doesn't contain .stubs.\n", me->name);
382 		return -ENOEXEC;
383 	}
384 
385 	/* If we don't have a .toc, just use .stubs.  We need to set r2
386 	   to some reasonable value in case the module calls out to
387 	   other functions via a stub, or if a function pointer escapes
388 	   the module by some means.  */
389 	if (!me->arch.toc_section)
390 		me->arch.toc_section = me->arch.stubs_section;
391 
392 	/* Override the stubs size */
393 	sechdrs[me->arch.stubs_section].sh_size = get_stubs_size(hdr, sechdrs);
394 	return 0;
395 }
396 
397 /*
398  * r2 is the TOC pointer: it actually points 0x8000 into the TOC (this gives the
399  * value maximum span in an instruction which uses a signed offset). Round down
400  * to a 256 byte boundary for the odd case where we are setting up r2 without a
401  * .toc section.
402  */
403 static inline unsigned long my_r2(const Elf64_Shdr *sechdrs, struct module *me)
404 {
405 	return (sechdrs[me->arch.toc_section].sh_addr & ~0xfful) + 0x8000;
406 }
407 
408 /* Both low and high 16 bits are added as SIGNED additions, so if low
409    16 bits has high bit set, high 16 bits must be adjusted.  These
410    macros do that (stolen from binutils). */
411 #define PPC_LO(v) ((v) & 0xffff)
412 #define PPC_HI(v) (((v) >> 16) & 0xffff)
413 #define PPC_HA(v) PPC_HI ((v) + 0x8000)
414 
415 /* Patch stub to reference function and correct r2 value. */
416 static inline int create_stub(const Elf64_Shdr *sechdrs,
417 			      struct ppc64_stub_entry *entry,
418 			      unsigned long addr,
419 			      struct module *me)
420 {
421 	long reladdr;
422 
423 	memcpy(entry->jump, ppc64_stub_insns, sizeof(ppc64_stub_insns));
424 
425 	/* Stub uses address relative to r2. */
426 	reladdr = (unsigned long)entry - my_r2(sechdrs, me);
427 	if (reladdr > 0x7FFFFFFF || reladdr < -(0x80000000L)) {
428 		pr_err("%s: Address %p of stub out of range of %p.\n",
429 		       me->name, (void *)reladdr, (void *)my_r2);
430 		return 0;
431 	}
432 	pr_debug("Stub %p get data from reladdr %li\n", entry, reladdr);
433 
434 	entry->jump[0] |= PPC_HA(reladdr);
435 	entry->jump[1] |= PPC_LO(reladdr);
436 	entry->funcdata = func_desc(addr);
437 	entry->magic = STUB_MAGIC;
438 
439 	return 1;
440 }
441 
442 /* Create stub to jump to function described in this OPD/ptr: we need the
443    stub to set up the TOC ptr (r2) for the function. */
444 static unsigned long stub_for_addr(const Elf64_Shdr *sechdrs,
445 				   unsigned long addr,
446 				   struct module *me)
447 {
448 	struct ppc64_stub_entry *stubs;
449 	unsigned int i, num_stubs;
450 
451 	num_stubs = sechdrs[me->arch.stubs_section].sh_size / sizeof(*stubs);
452 
453 	/* Find this stub, or if that fails, the next avail. entry */
454 	stubs = (void *)sechdrs[me->arch.stubs_section].sh_addr;
455 	for (i = 0; stub_func_addr(stubs[i].funcdata); i++) {
456 		if (WARN_ON(i >= num_stubs))
457 			return 0;
458 
459 		if (stub_func_addr(stubs[i].funcdata) == func_addr(addr))
460 			return (unsigned long)&stubs[i];
461 	}
462 
463 	if (!create_stub(sechdrs, &stubs[i], addr, me))
464 		return 0;
465 
466 	return (unsigned long)&stubs[i];
467 }
468 
469 #ifdef CC_USING_MPROFILE_KERNEL
470 static bool is_mprofile_mcount_callsite(const char *name, u32 *instruction)
471 {
472 	if (strcmp("_mcount", name))
473 		return false;
474 
475 	/*
476 	 * Check if this is one of the -mprofile-kernel sequences.
477 	 */
478 	if (instruction[-1] == PPC_INST_STD_LR &&
479 	    instruction[-2] == PPC_INST_MFLR)
480 		return true;
481 
482 	if (instruction[-1] == PPC_INST_MFLR)
483 		return true;
484 
485 	return false;
486 }
487 
488 /*
489  * In case of _mcount calls, do not save the current callee's TOC (in r2) into
490  * the original caller's stack frame. If we did we would clobber the saved TOC
491  * value of the original caller.
492  */
493 static void squash_toc_save_inst(const char *name, unsigned long addr)
494 {
495 	struct ppc64_stub_entry *stub = (struct ppc64_stub_entry *)addr;
496 
497 	/* Only for calls to _mcount */
498 	if (strcmp("_mcount", name) != 0)
499 		return;
500 
501 	stub->jump[2] = PPC_INST_NOP;
502 }
503 #else
504 static void squash_toc_save_inst(const char *name, unsigned long addr) { }
505 
506 static bool is_mprofile_mcount_callsite(const char *name, u32 *instruction)
507 {
508 	return false;
509 }
510 #endif
511 
512 /* We expect a noop next: if it is, replace it with instruction to
513    restore r2. */
514 static int restore_r2(const char *name, u32 *instruction, struct module *me)
515 {
516 	u32 *prev_insn = instruction - 1;
517 
518 	if (is_mprofile_mcount_callsite(name, prev_insn))
519 		return 1;
520 
521 	/*
522 	 * Make sure the branch isn't a sibling call.  Sibling calls aren't
523 	 * "link" branches and they don't return, so they don't need the r2
524 	 * restore afterwards.
525 	 */
526 	if (!instr_is_relative_link_branch(*prev_insn))
527 		return 1;
528 
529 	if (*instruction != PPC_INST_NOP) {
530 		pr_err("%s: Expected nop after call, got %08x at %pS\n",
531 			me->name, *instruction, instruction);
532 		return 0;
533 	}
534 	/* ld r2,R2_STACK_OFFSET(r1) */
535 	*instruction = PPC_INST_LD_TOC;
536 	return 1;
537 }
538 
539 int apply_relocate_add(Elf64_Shdr *sechdrs,
540 		       const char *strtab,
541 		       unsigned int symindex,
542 		       unsigned int relsec,
543 		       struct module *me)
544 {
545 	unsigned int i;
546 	Elf64_Rela *rela = (void *)sechdrs[relsec].sh_addr;
547 	Elf64_Sym *sym;
548 	unsigned long *location;
549 	unsigned long value;
550 
551 	pr_debug("Applying ADD relocate section %u to %u\n", relsec,
552 	       sechdrs[relsec].sh_info);
553 
554 	/* First time we're called, we can fix up .TOC. */
555 	if (!me->arch.toc_fixed) {
556 		sym = find_dot_toc(sechdrs, strtab, symindex);
557 		/* It's theoretically possible that a module doesn't want a
558 		 * .TOC. so don't fail it just for that. */
559 		if (sym)
560 			sym->st_value = my_r2(sechdrs, me);
561 		me->arch.toc_fixed = true;
562 	}
563 
564 	for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rela); i++) {
565 		/* This is where to make the change */
566 		location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
567 			+ rela[i].r_offset;
568 		/* This is the symbol it is referring to */
569 		sym = (Elf64_Sym *)sechdrs[symindex].sh_addr
570 			+ ELF64_R_SYM(rela[i].r_info);
571 
572 		pr_debug("RELOC at %p: %li-type as %s (0x%lx) + %li\n",
573 		       location, (long)ELF64_R_TYPE(rela[i].r_info),
574 		       strtab + sym->st_name, (unsigned long)sym->st_value,
575 		       (long)rela[i].r_addend);
576 
577 		/* `Everything is relative'. */
578 		value = sym->st_value + rela[i].r_addend;
579 
580 		switch (ELF64_R_TYPE(rela[i].r_info)) {
581 		case R_PPC64_ADDR32:
582 			/* Simply set it */
583 			*(u32 *)location = value;
584 			break;
585 
586 		case R_PPC64_ADDR64:
587 			/* Simply set it */
588 			*(unsigned long *)location = value;
589 			break;
590 
591 		case R_PPC64_TOC:
592 			*(unsigned long *)location = my_r2(sechdrs, me);
593 			break;
594 
595 		case R_PPC64_TOC16:
596 			/* Subtract TOC pointer */
597 			value -= my_r2(sechdrs, me);
598 			if (value + 0x8000 > 0xffff) {
599 				pr_err("%s: bad TOC16 relocation (0x%lx)\n",
600 				       me->name, value);
601 				return -ENOEXEC;
602 			}
603 			*((uint16_t *) location)
604 				= (*((uint16_t *) location) & ~0xffff)
605 				| (value & 0xffff);
606 			break;
607 
608 		case R_PPC64_TOC16_LO:
609 			/* Subtract TOC pointer */
610 			value -= my_r2(sechdrs, me);
611 			*((uint16_t *) location)
612 				= (*((uint16_t *) location) & ~0xffff)
613 				| (value & 0xffff);
614 			break;
615 
616 		case R_PPC64_TOC16_DS:
617 			/* Subtract TOC pointer */
618 			value -= my_r2(sechdrs, me);
619 			if ((value & 3) != 0 || value + 0x8000 > 0xffff) {
620 				pr_err("%s: bad TOC16_DS relocation (0x%lx)\n",
621 				       me->name, value);
622 				return -ENOEXEC;
623 			}
624 			*((uint16_t *) location)
625 				= (*((uint16_t *) location) & ~0xfffc)
626 				| (value & 0xfffc);
627 			break;
628 
629 		case R_PPC64_TOC16_LO_DS:
630 			/* Subtract TOC pointer */
631 			value -= my_r2(sechdrs, me);
632 			if ((value & 3) != 0) {
633 				pr_err("%s: bad TOC16_LO_DS relocation (0x%lx)\n",
634 				       me->name, value);
635 				return -ENOEXEC;
636 			}
637 			*((uint16_t *) location)
638 				= (*((uint16_t *) location) & ~0xfffc)
639 				| (value & 0xfffc);
640 			break;
641 
642 		case R_PPC64_TOC16_HA:
643 			/* Subtract TOC pointer */
644 			value -= my_r2(sechdrs, me);
645 			value = ((value + 0x8000) >> 16);
646 			*((uint16_t *) location)
647 				= (*((uint16_t *) location) & ~0xffff)
648 				| (value & 0xffff);
649 			break;
650 
651 		case R_PPC_REL24:
652 			/* FIXME: Handle weak symbols here --RR */
653 			if (sym->st_shndx == SHN_UNDEF ||
654 			    sym->st_shndx == SHN_LIVEPATCH) {
655 				/* External: go via stub */
656 				value = stub_for_addr(sechdrs, value, me);
657 				if (!value)
658 					return -ENOENT;
659 				if (!restore_r2(strtab + sym->st_name,
660 							(u32 *)location + 1, me))
661 					return -ENOEXEC;
662 
663 				squash_toc_save_inst(strtab + sym->st_name, value);
664 			} else
665 				value += local_entry_offset(sym);
666 
667 			/* Convert value to relative */
668 			value -= (unsigned long)location;
669 			if (value + 0x2000000 > 0x3ffffff || (value & 3) != 0){
670 				pr_err("%s: REL24 %li out of range!\n",
671 				       me->name, (long int)value);
672 				return -ENOEXEC;
673 			}
674 
675 			/* Only replace bits 2 through 26 */
676 			*(uint32_t *)location
677 				= (*(uint32_t *)location & ~0x03fffffc)
678 				| (value & 0x03fffffc);
679 			break;
680 
681 		case R_PPC64_REL64:
682 			/* 64 bits relative (used by features fixups) */
683 			*location = value - (unsigned long)location;
684 			break;
685 
686 		case R_PPC64_REL32:
687 			/* 32 bits relative (used by relative exception tables) */
688 			*(u32 *)location = value - (unsigned long)location;
689 			break;
690 
691 		case R_PPC64_TOCSAVE:
692 			/*
693 			 * Marker reloc indicates we don't have to save r2.
694 			 * That would only save us one instruction, so ignore
695 			 * it.
696 			 */
697 			break;
698 
699 		case R_PPC64_ENTRY:
700 			/*
701 			 * Optimize ELFv2 large code model entry point if
702 			 * the TOC is within 2GB range of current location.
703 			 */
704 			value = my_r2(sechdrs, me) - (unsigned long)location;
705 			if (value + 0x80008000 > 0xffffffff)
706 				break;
707 			/*
708 			 * Check for the large code model prolog sequence:
709 		         *	ld r2, ...(r12)
710 			 *	add r2, r2, r12
711 			 */
712 			if ((((uint32_t *)location)[0] & ~0xfffc)
713 			    != 0xe84c0000)
714 				break;
715 			if (((uint32_t *)location)[1] != 0x7c426214)
716 				break;
717 			/*
718 			 * If found, replace it with:
719 			 *	addis r2, r12, (.TOC.-func)@ha
720 			 *	addi r2, r12, (.TOC.-func)@l
721 			 */
722 			((uint32_t *)location)[0] = 0x3c4c0000 + PPC_HA(value);
723 			((uint32_t *)location)[1] = 0x38420000 + PPC_LO(value);
724 			break;
725 
726 		case R_PPC64_REL16_HA:
727 			/* Subtract location pointer */
728 			value -= (unsigned long)location;
729 			value = ((value + 0x8000) >> 16);
730 			*((uint16_t *) location)
731 				= (*((uint16_t *) location) & ~0xffff)
732 				| (value & 0xffff);
733 			break;
734 
735 		case R_PPC64_REL16_LO:
736 			/* Subtract location pointer */
737 			value -= (unsigned long)location;
738 			*((uint16_t *) location)
739 				= (*((uint16_t *) location) & ~0xffff)
740 				| (value & 0xffff);
741 			break;
742 
743 		default:
744 			pr_err("%s: Unknown ADD relocation: %lu\n",
745 			       me->name,
746 			       (unsigned long)ELF64_R_TYPE(rela[i].r_info));
747 			return -ENOEXEC;
748 		}
749 	}
750 
751 	return 0;
752 }
753 
754 #ifdef CONFIG_DYNAMIC_FTRACE
755 
756 #ifdef CC_USING_MPROFILE_KERNEL
757 
758 #define PACATOC offsetof(struct paca_struct, kernel_toc)
759 
760 /*
761  * For mprofile-kernel we use a special stub for ftrace_caller() because we
762  * can't rely on r2 containing this module's TOC when we enter the stub.
763  *
764  * That can happen if the function calling us didn't need to use the toc. In
765  * that case it won't have setup r2, and the r2 value will be either the
766  * kernel's toc, or possibly another modules toc.
767  *
768  * To deal with that this stub uses the kernel toc, which is always accessible
769  * via the paca (in r13). The target (ftrace_caller()) is responsible for
770  * saving and restoring the toc before returning.
771  */
772 static unsigned long create_ftrace_stub(const Elf64_Shdr *sechdrs,
773 				struct module *me, unsigned long addr)
774 {
775 	struct ppc64_stub_entry *entry;
776 	unsigned int i, num_stubs;
777 	static u32 stub_insns[] = {
778 		0xe98d0000 | PACATOC, 	/* ld      r12,PACATOC(r13)	*/
779 		0x3d8c0000,		/* addis   r12,r12,<high>	*/
780 		0x398c0000, 		/* addi    r12,r12,<low>	*/
781 		0x7d8903a6, 		/* mtctr   r12			*/
782 		0x4e800420, 		/* bctr				*/
783 	};
784 	long reladdr;
785 
786 	num_stubs = sechdrs[me->arch.stubs_section].sh_size / sizeof(*entry);
787 
788 	/* Find the next available stub entry */
789 	entry = (void *)sechdrs[me->arch.stubs_section].sh_addr;
790 	for (i = 0; i < num_stubs && stub_func_addr(entry->funcdata); i++, entry++);
791 
792 	if (i >= num_stubs) {
793 		pr_err("%s: Unable to find a free slot for ftrace stub.\n", me->name);
794 		return 0;
795 	}
796 
797 	memcpy(entry->jump, stub_insns, sizeof(stub_insns));
798 
799 	/* Stub uses address relative to kernel toc (from the paca) */
800 	reladdr = addr - kernel_toc_addr();
801 	if (reladdr > 0x7FFFFFFF || reladdr < -(0x80000000L)) {
802 		pr_err("%s: Address of %ps out of range of kernel_toc.\n",
803 							me->name, (void *)addr);
804 		return 0;
805 	}
806 
807 	entry->jump[1] |= PPC_HA(reladdr);
808 	entry->jump[2] |= PPC_LO(reladdr);
809 
810 	/* Eventhough we don't use funcdata in the stub, it's needed elsewhere. */
811 	entry->funcdata = func_desc(addr);
812 	entry->magic = STUB_MAGIC;
813 
814 	return (unsigned long)entry;
815 }
816 #else
817 static unsigned long create_ftrace_stub(const Elf64_Shdr *sechdrs,
818 				struct module *me, unsigned long addr)
819 {
820 	return stub_for_addr(sechdrs, addr, me);
821 }
822 #endif
823 
824 int module_finalize_ftrace(struct module *mod, const Elf_Shdr *sechdrs)
825 {
826 	mod->arch.toc = my_r2(sechdrs, mod);
827 	mod->arch.tramp = create_ftrace_stub(sechdrs, mod,
828 					(unsigned long)ftrace_caller);
829 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
830 	mod->arch.tramp_regs = create_ftrace_stub(sechdrs, mod,
831 					(unsigned long)ftrace_regs_caller);
832 	if (!mod->arch.tramp_regs)
833 		return -ENOENT;
834 #endif
835 
836 	if (!mod->arch.tramp)
837 		return -ENOENT;
838 
839 	return 0;
840 }
841 #endif
842