xref: /linux/arch/powerpc/kernel/module_64.c (revision 6dee6ae9d62642e81def4d461d71f13a6496ab59)
1 /*  Kernel module help for PPC64.
2     Copyright (C) 2001, 2003 Rusty Russell IBM Corporation.
3 
4     This program is free software; you can redistribute it and/or modify
5     it under the terms of the GNU General Public License as published by
6     the Free Software Foundation; either version 2 of the License, or
7     (at your option) any later version.
8 
9     This program is distributed in the hope that it will be useful,
10     but WITHOUT ANY WARRANTY; without even the implied warranty of
11     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12     GNU General Public License for more details.
13 
14     You should have received a copy of the GNU General Public License
15     along with this program; if not, write to the Free Software
16     Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
17 */
18 
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 
21 #include <linux/module.h>
22 #include <linux/elf.h>
23 #include <linux/moduleloader.h>
24 #include <linux/err.h>
25 #include <linux/vmalloc.h>
26 #include <linux/ftrace.h>
27 #include <linux/bug.h>
28 #include <linux/uaccess.h>
29 #include <asm/module.h>
30 #include <asm/firmware.h>
31 #include <asm/code-patching.h>
32 #include <linux/sort.h>
33 #include <asm/setup.h>
34 #include <asm/sections.h>
35 
36 /* FIXME: We don't do .init separately.  To do this, we'd need to have
37    a separate r2 value in the init and core section, and stub between
38    them, too.
39 
40    Using a magic allocator which places modules within 32MB solves
41    this, and makes other things simpler.  Anton?
42    --RR.  */
43 
44 #ifdef PPC64_ELF_ABI_v2
45 
46 /* An address is simply the address of the function. */
47 typedef unsigned long func_desc_t;
48 
49 static func_desc_t func_desc(unsigned long addr)
50 {
51 	return addr;
52 }
53 static unsigned long func_addr(unsigned long addr)
54 {
55 	return addr;
56 }
57 static unsigned long stub_func_addr(func_desc_t func)
58 {
59 	return func;
60 }
61 
62 /* PowerPC64 specific values for the Elf64_Sym st_other field.  */
63 #define STO_PPC64_LOCAL_BIT	5
64 #define STO_PPC64_LOCAL_MASK	(7 << STO_PPC64_LOCAL_BIT)
65 #define PPC64_LOCAL_ENTRY_OFFSET(other)					\
66  (((1 << (((other) & STO_PPC64_LOCAL_MASK) >> STO_PPC64_LOCAL_BIT)) >> 2) << 2)
67 
68 static unsigned int local_entry_offset(const Elf64_Sym *sym)
69 {
70 	/* sym->st_other indicates offset to local entry point
71 	 * (otherwise it will assume r12 is the address of the start
72 	 * of function and try to derive r2 from it). */
73 	return PPC64_LOCAL_ENTRY_OFFSET(sym->st_other);
74 }
75 #else
76 
77 /* An address is address of the OPD entry, which contains address of fn. */
78 typedef struct ppc64_opd_entry func_desc_t;
79 
80 static func_desc_t func_desc(unsigned long addr)
81 {
82 	return *(struct ppc64_opd_entry *)addr;
83 }
84 static unsigned long func_addr(unsigned long addr)
85 {
86 	return func_desc(addr).funcaddr;
87 }
88 static unsigned long stub_func_addr(func_desc_t func)
89 {
90 	return func.funcaddr;
91 }
92 static unsigned int local_entry_offset(const Elf64_Sym *sym)
93 {
94 	return 0;
95 }
96 
97 void *dereference_module_function_descriptor(struct module *mod, void *ptr)
98 {
99 	if (ptr < (void *)mod->arch.start_opd ||
100 			ptr >= (void *)mod->arch.end_opd)
101 		return ptr;
102 
103 	return dereference_function_descriptor(ptr);
104 }
105 #endif
106 
107 #define STUB_MAGIC 0x73747562 /* stub */
108 
109 /* Like PPC32, we need little trampolines to do > 24-bit jumps (into
110    the kernel itself).  But on PPC64, these need to be used for every
111    jump, actually, to reset r2 (TOC+0x8000). */
112 struct ppc64_stub_entry
113 {
114 	/* 28 byte jump instruction sequence (7 instructions). We only
115 	 * need 6 instructions on ABIv2 but we always allocate 7 so
116 	 * so we don't have to modify the trampoline load instruction. */
117 	u32 jump[7];
118 	/* Used by ftrace to identify stubs */
119 	u32 magic;
120 	/* Data for the above code */
121 	func_desc_t funcdata;
122 };
123 
124 /*
125  * PPC64 uses 24 bit jumps, but we need to jump into other modules or
126  * the kernel which may be further.  So we jump to a stub.
127  *
128  * For ELFv1 we need to use this to set up the new r2 value (aka TOC
129  * pointer).  For ELFv2 it's the callee's responsibility to set up the
130  * new r2, but for both we need to save the old r2.
131  *
132  * We could simply patch the new r2 value and function pointer into
133  * the stub, but it's significantly shorter to put these values at the
134  * end of the stub code, and patch the stub address (32-bits relative
135  * to the TOC ptr, r2) into the stub.
136  */
137 
138 static u32 ppc64_stub_insns[] = {
139 	0x3d620000,			/* addis   r11,r2, <high> */
140 	0x396b0000,			/* addi    r11,r11, <low> */
141 	/* Save current r2 value in magic place on the stack. */
142 	0xf8410000|R2_STACK_OFFSET,	/* std     r2,R2_STACK_OFFSET(r1) */
143 	0xe98b0020,			/* ld      r12,32(r11) */
144 #ifdef PPC64_ELF_ABI_v1
145 	/* Set up new r2 from function descriptor */
146 	0xe84b0028,			/* ld      r2,40(r11) */
147 #endif
148 	0x7d8903a6,			/* mtctr   r12 */
149 	0x4e800420			/* bctr */
150 };
151 
152 #ifdef CONFIG_DYNAMIC_FTRACE
153 int module_trampoline_target(struct module *mod, unsigned long addr,
154 			     unsigned long *target)
155 {
156 	struct ppc64_stub_entry *stub;
157 	func_desc_t funcdata;
158 	u32 magic;
159 
160 	if (!within_module_core(addr, mod)) {
161 		pr_err("%s: stub %lx not in module %s\n", __func__, addr, mod->name);
162 		return -EFAULT;
163 	}
164 
165 	stub = (struct ppc64_stub_entry *)addr;
166 
167 	if (probe_kernel_read(&magic, &stub->magic, sizeof(magic))) {
168 		pr_err("%s: fault reading magic for stub %lx for %s\n", __func__, addr, mod->name);
169 		return -EFAULT;
170 	}
171 
172 	if (magic != STUB_MAGIC) {
173 		pr_err("%s: bad magic for stub %lx for %s\n", __func__, addr, mod->name);
174 		return -EFAULT;
175 	}
176 
177 	if (probe_kernel_read(&funcdata, &stub->funcdata, sizeof(funcdata))) {
178 		pr_err("%s: fault reading funcdata for stub %lx for %s\n", __func__, addr, mod->name);
179                 return -EFAULT;
180 	}
181 
182 	*target = stub_func_addr(funcdata);
183 
184 	return 0;
185 }
186 #endif
187 
188 /* Count how many different 24-bit relocations (different symbol,
189    different addend) */
190 static unsigned int count_relocs(const Elf64_Rela *rela, unsigned int num)
191 {
192 	unsigned int i, r_info, r_addend, _count_relocs;
193 
194 	/* FIXME: Only count external ones --RR */
195 	_count_relocs = 0;
196 	r_info = 0;
197 	r_addend = 0;
198 	for (i = 0; i < num; i++)
199 		/* Only count 24-bit relocs, others don't need stubs */
200 		if (ELF64_R_TYPE(rela[i].r_info) == R_PPC_REL24 &&
201 		    (r_info != ELF64_R_SYM(rela[i].r_info) ||
202 		     r_addend != rela[i].r_addend)) {
203 			_count_relocs++;
204 			r_info = ELF64_R_SYM(rela[i].r_info);
205 			r_addend = rela[i].r_addend;
206 		}
207 
208 	return _count_relocs;
209 }
210 
211 static int relacmp(const void *_x, const void *_y)
212 {
213 	const Elf64_Rela *x, *y;
214 
215 	y = (Elf64_Rela *)_x;
216 	x = (Elf64_Rela *)_y;
217 
218 	/* Compare the entire r_info (as opposed to ELF64_R_SYM(r_info) only) to
219 	 * make the comparison cheaper/faster. It won't affect the sorting or
220 	 * the counting algorithms' performance
221 	 */
222 	if (x->r_info < y->r_info)
223 		return -1;
224 	else if (x->r_info > y->r_info)
225 		return 1;
226 	else if (x->r_addend < y->r_addend)
227 		return -1;
228 	else if (x->r_addend > y->r_addend)
229 		return 1;
230 	else
231 		return 0;
232 }
233 
234 static void relaswap(void *_x, void *_y, int size)
235 {
236 	uint64_t *x, *y, tmp;
237 	int i;
238 
239 	y = (uint64_t *)_x;
240 	x = (uint64_t *)_y;
241 
242 	for (i = 0; i < sizeof(Elf64_Rela) / sizeof(uint64_t); i++) {
243 		tmp = x[i];
244 		x[i] = y[i];
245 		y[i] = tmp;
246 	}
247 }
248 
249 /* Get size of potential trampolines required. */
250 static unsigned long get_stubs_size(const Elf64_Ehdr *hdr,
251 				    const Elf64_Shdr *sechdrs)
252 {
253 	/* One extra reloc so it's always 0-funcaddr terminated */
254 	unsigned long relocs = 1;
255 	unsigned i;
256 
257 	/* Every relocated section... */
258 	for (i = 1; i < hdr->e_shnum; i++) {
259 		if (sechdrs[i].sh_type == SHT_RELA) {
260 			pr_debug("Found relocations in section %u\n", i);
261 			pr_debug("Ptr: %p.  Number: %Lu\n",
262 			       (void *)sechdrs[i].sh_addr,
263 			       sechdrs[i].sh_size / sizeof(Elf64_Rela));
264 
265 			/* Sort the relocation information based on a symbol and
266 			 * addend key. This is a stable O(n*log n) complexity
267 			 * alogrithm but it will reduce the complexity of
268 			 * count_relocs() to linear complexity O(n)
269 			 */
270 			sort((void *)sechdrs[i].sh_addr,
271 			     sechdrs[i].sh_size / sizeof(Elf64_Rela),
272 			     sizeof(Elf64_Rela), relacmp, relaswap);
273 
274 			relocs += count_relocs((void *)sechdrs[i].sh_addr,
275 					       sechdrs[i].sh_size
276 					       / sizeof(Elf64_Rela));
277 		}
278 	}
279 
280 #ifdef CONFIG_DYNAMIC_FTRACE
281 	/* make the trampoline to the ftrace_caller */
282 	relocs++;
283 #endif
284 
285 	pr_debug("Looks like a total of %lu stubs, max\n", relocs);
286 	return relocs * sizeof(struct ppc64_stub_entry);
287 }
288 
289 /* Still needed for ELFv2, for .TOC. */
290 static void dedotify_versions(struct modversion_info *vers,
291 			      unsigned long size)
292 {
293 	struct modversion_info *end;
294 
295 	for (end = (void *)vers + size; vers < end; vers++)
296 		if (vers->name[0] == '.') {
297 			memmove(vers->name, vers->name+1, strlen(vers->name));
298 		}
299 }
300 
301 /*
302  * Undefined symbols which refer to .funcname, hack to funcname. Make .TOC.
303  * seem to be defined (value set later).
304  */
305 static void dedotify(Elf64_Sym *syms, unsigned int numsyms, char *strtab)
306 {
307 	unsigned int i;
308 
309 	for (i = 1; i < numsyms; i++) {
310 		if (syms[i].st_shndx == SHN_UNDEF) {
311 			char *name = strtab + syms[i].st_name;
312 			if (name[0] == '.') {
313 				if (strcmp(name+1, "TOC.") == 0)
314 					syms[i].st_shndx = SHN_ABS;
315 				syms[i].st_name++;
316 			}
317 		}
318 	}
319 }
320 
321 static Elf64_Sym *find_dot_toc(Elf64_Shdr *sechdrs,
322 			       const char *strtab,
323 			       unsigned int symindex)
324 {
325 	unsigned int i, numsyms;
326 	Elf64_Sym *syms;
327 
328 	syms = (Elf64_Sym *)sechdrs[symindex].sh_addr;
329 	numsyms = sechdrs[symindex].sh_size / sizeof(Elf64_Sym);
330 
331 	for (i = 1; i < numsyms; i++) {
332 		if (syms[i].st_shndx == SHN_ABS
333 		    && strcmp(strtab + syms[i].st_name, "TOC.") == 0)
334 			return &syms[i];
335 	}
336 	return NULL;
337 }
338 
339 int module_frob_arch_sections(Elf64_Ehdr *hdr,
340 			      Elf64_Shdr *sechdrs,
341 			      char *secstrings,
342 			      struct module *me)
343 {
344 	unsigned int i;
345 
346 	/* Find .toc and .stubs sections, symtab and strtab */
347 	for (i = 1; i < hdr->e_shnum; i++) {
348 		char *p;
349 		if (strcmp(secstrings + sechdrs[i].sh_name, ".stubs") == 0)
350 			me->arch.stubs_section = i;
351 		else if (strcmp(secstrings + sechdrs[i].sh_name, ".toc") == 0) {
352 			me->arch.toc_section = i;
353 			if (sechdrs[i].sh_addralign < 8)
354 				sechdrs[i].sh_addralign = 8;
355 		}
356 		else if (strcmp(secstrings+sechdrs[i].sh_name,"__versions")==0)
357 			dedotify_versions((void *)hdr + sechdrs[i].sh_offset,
358 					  sechdrs[i].sh_size);
359 		else if (!strcmp(secstrings + sechdrs[i].sh_name, ".opd")) {
360 			me->arch.start_opd = sechdrs[i].sh_addr;
361 			me->arch.end_opd = sechdrs[i].sh_addr +
362 					   sechdrs[i].sh_size;
363 		}
364 
365 		/* We don't handle .init for the moment: rename to _init */
366 		while ((p = strstr(secstrings + sechdrs[i].sh_name, ".init")))
367 			p[0] = '_';
368 
369 		if (sechdrs[i].sh_type == SHT_SYMTAB)
370 			dedotify((void *)hdr + sechdrs[i].sh_offset,
371 				 sechdrs[i].sh_size / sizeof(Elf64_Sym),
372 				 (void *)hdr
373 				 + sechdrs[sechdrs[i].sh_link].sh_offset);
374 	}
375 
376 	if (!me->arch.stubs_section) {
377 		pr_err("%s: doesn't contain .stubs.\n", me->name);
378 		return -ENOEXEC;
379 	}
380 
381 	/* If we don't have a .toc, just use .stubs.  We need to set r2
382 	   to some reasonable value in case the module calls out to
383 	   other functions via a stub, or if a function pointer escapes
384 	   the module by some means.  */
385 	if (!me->arch.toc_section)
386 		me->arch.toc_section = me->arch.stubs_section;
387 
388 	/* Override the stubs size */
389 	sechdrs[me->arch.stubs_section].sh_size = get_stubs_size(hdr, sechdrs);
390 	return 0;
391 }
392 
393 /*
394  * r2 is the TOC pointer: it actually points 0x8000 into the TOC (this gives the
395  * value maximum span in an instruction which uses a signed offset). Round down
396  * to a 256 byte boundary for the odd case where we are setting up r2 without a
397  * .toc section.
398  */
399 static inline unsigned long my_r2(const Elf64_Shdr *sechdrs, struct module *me)
400 {
401 	return (sechdrs[me->arch.toc_section].sh_addr & ~0xfful) + 0x8000;
402 }
403 
404 /* Both low and high 16 bits are added as SIGNED additions, so if low
405    16 bits has high bit set, high 16 bits must be adjusted.  These
406    macros do that (stolen from binutils). */
407 #define PPC_LO(v) ((v) & 0xffff)
408 #define PPC_HI(v) (((v) >> 16) & 0xffff)
409 #define PPC_HA(v) PPC_HI ((v) + 0x8000)
410 
411 /* Patch stub to reference function and correct r2 value. */
412 static inline int create_stub(const Elf64_Shdr *sechdrs,
413 			      struct ppc64_stub_entry *entry,
414 			      unsigned long addr,
415 			      struct module *me)
416 {
417 	long reladdr;
418 
419 	memcpy(entry->jump, ppc64_stub_insns, sizeof(ppc64_stub_insns));
420 
421 	/* Stub uses address relative to r2. */
422 	reladdr = (unsigned long)entry - my_r2(sechdrs, me);
423 	if (reladdr > 0x7FFFFFFF || reladdr < -(0x80000000L)) {
424 		pr_err("%s: Address %p of stub out of range of %p.\n",
425 		       me->name, (void *)reladdr, (void *)my_r2);
426 		return 0;
427 	}
428 	pr_debug("Stub %p get data from reladdr %li\n", entry, reladdr);
429 
430 	entry->jump[0] |= PPC_HA(reladdr);
431 	entry->jump[1] |= PPC_LO(reladdr);
432 	entry->funcdata = func_desc(addr);
433 	entry->magic = STUB_MAGIC;
434 
435 	return 1;
436 }
437 
438 /* Create stub to jump to function described in this OPD/ptr: we need the
439    stub to set up the TOC ptr (r2) for the function. */
440 static unsigned long stub_for_addr(const Elf64_Shdr *sechdrs,
441 				   unsigned long addr,
442 				   struct module *me)
443 {
444 	struct ppc64_stub_entry *stubs;
445 	unsigned int i, num_stubs;
446 
447 	num_stubs = sechdrs[me->arch.stubs_section].sh_size / sizeof(*stubs);
448 
449 	/* Find this stub, or if that fails, the next avail. entry */
450 	stubs = (void *)sechdrs[me->arch.stubs_section].sh_addr;
451 	for (i = 0; stub_func_addr(stubs[i].funcdata); i++) {
452 		if (WARN_ON(i >= num_stubs))
453 			return 0;
454 
455 		if (stub_func_addr(stubs[i].funcdata) == func_addr(addr))
456 			return (unsigned long)&stubs[i];
457 	}
458 
459 	if (!create_stub(sechdrs, &stubs[i], addr, me))
460 		return 0;
461 
462 	return (unsigned long)&stubs[i];
463 }
464 
465 #ifdef CC_USING_MPROFILE_KERNEL
466 static bool is_early_mcount_callsite(u32 *instruction)
467 {
468 	/*
469 	 * Check if this is one of the -mprofile-kernel sequences.
470 	 */
471 	if (instruction[-1] == PPC_INST_STD_LR &&
472 	    instruction[-2] == PPC_INST_MFLR)
473 		return true;
474 
475 	if (instruction[-1] == PPC_INST_MFLR)
476 		return true;
477 
478 	return false;
479 }
480 
481 /*
482  * In case of _mcount calls, do not save the current callee's TOC (in r2) into
483  * the original caller's stack frame. If we did we would clobber the saved TOC
484  * value of the original caller.
485  */
486 static void squash_toc_save_inst(const char *name, unsigned long addr)
487 {
488 	struct ppc64_stub_entry *stub = (struct ppc64_stub_entry *)addr;
489 
490 	/* Only for calls to _mcount */
491 	if (strcmp("_mcount", name) != 0)
492 		return;
493 
494 	stub->jump[2] = PPC_INST_NOP;
495 }
496 #else
497 static void squash_toc_save_inst(const char *name, unsigned long addr) { }
498 
499 /* without -mprofile-kernel, mcount calls are never early */
500 static bool is_early_mcount_callsite(u32 *instruction)
501 {
502 	return false;
503 }
504 #endif
505 
506 /* We expect a noop next: if it is, replace it with instruction to
507    restore r2. */
508 static int restore_r2(u32 *instruction, struct module *me)
509 {
510 	u32 *prev_insn = instruction - 1;
511 
512 	if (is_early_mcount_callsite(prev_insn))
513 		return 1;
514 
515 	/*
516 	 * Make sure the branch isn't a sibling call.  Sibling calls aren't
517 	 * "link" branches and they don't return, so they don't need the r2
518 	 * restore afterwards.
519 	 */
520 	if (!instr_is_relative_link_branch(*prev_insn))
521 		return 1;
522 
523 	if (*instruction != PPC_INST_NOP) {
524 		pr_err("%s: Expected nop after call, got %08x at %pS\n",
525 			me->name, *instruction, instruction);
526 		return 0;
527 	}
528 	/* ld r2,R2_STACK_OFFSET(r1) */
529 	*instruction = PPC_INST_LD_TOC;
530 	return 1;
531 }
532 
533 int apply_relocate_add(Elf64_Shdr *sechdrs,
534 		       const char *strtab,
535 		       unsigned int symindex,
536 		       unsigned int relsec,
537 		       struct module *me)
538 {
539 	unsigned int i;
540 	Elf64_Rela *rela = (void *)sechdrs[relsec].sh_addr;
541 	Elf64_Sym *sym;
542 	unsigned long *location;
543 	unsigned long value;
544 
545 	pr_debug("Applying ADD relocate section %u to %u\n", relsec,
546 	       sechdrs[relsec].sh_info);
547 
548 	/* First time we're called, we can fix up .TOC. */
549 	if (!me->arch.toc_fixed) {
550 		sym = find_dot_toc(sechdrs, strtab, symindex);
551 		/* It's theoretically possible that a module doesn't want a
552 		 * .TOC. so don't fail it just for that. */
553 		if (sym)
554 			sym->st_value = my_r2(sechdrs, me);
555 		me->arch.toc_fixed = true;
556 	}
557 
558 	for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rela); i++) {
559 		/* This is where to make the change */
560 		location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
561 			+ rela[i].r_offset;
562 		/* This is the symbol it is referring to */
563 		sym = (Elf64_Sym *)sechdrs[symindex].sh_addr
564 			+ ELF64_R_SYM(rela[i].r_info);
565 
566 		pr_debug("RELOC at %p: %li-type as %s (0x%lx) + %li\n",
567 		       location, (long)ELF64_R_TYPE(rela[i].r_info),
568 		       strtab + sym->st_name, (unsigned long)sym->st_value,
569 		       (long)rela[i].r_addend);
570 
571 		/* `Everything is relative'. */
572 		value = sym->st_value + rela[i].r_addend;
573 
574 		switch (ELF64_R_TYPE(rela[i].r_info)) {
575 		case R_PPC64_ADDR32:
576 			/* Simply set it */
577 			*(u32 *)location = value;
578 			break;
579 
580 		case R_PPC64_ADDR64:
581 			/* Simply set it */
582 			*(unsigned long *)location = value;
583 			break;
584 
585 		case R_PPC64_TOC:
586 			*(unsigned long *)location = my_r2(sechdrs, me);
587 			break;
588 
589 		case R_PPC64_TOC16:
590 			/* Subtract TOC pointer */
591 			value -= my_r2(sechdrs, me);
592 			if (value + 0x8000 > 0xffff) {
593 				pr_err("%s: bad TOC16 relocation (0x%lx)\n",
594 				       me->name, value);
595 				return -ENOEXEC;
596 			}
597 			*((uint16_t *) location)
598 				= (*((uint16_t *) location) & ~0xffff)
599 				| (value & 0xffff);
600 			break;
601 
602 		case R_PPC64_TOC16_LO:
603 			/* Subtract TOC pointer */
604 			value -= my_r2(sechdrs, me);
605 			*((uint16_t *) location)
606 				= (*((uint16_t *) location) & ~0xffff)
607 				| (value & 0xffff);
608 			break;
609 
610 		case R_PPC64_TOC16_DS:
611 			/* Subtract TOC pointer */
612 			value -= my_r2(sechdrs, me);
613 			if ((value & 3) != 0 || value + 0x8000 > 0xffff) {
614 				pr_err("%s: bad TOC16_DS relocation (0x%lx)\n",
615 				       me->name, value);
616 				return -ENOEXEC;
617 			}
618 			*((uint16_t *) location)
619 				= (*((uint16_t *) location) & ~0xfffc)
620 				| (value & 0xfffc);
621 			break;
622 
623 		case R_PPC64_TOC16_LO_DS:
624 			/* Subtract TOC pointer */
625 			value -= my_r2(sechdrs, me);
626 			if ((value & 3) != 0) {
627 				pr_err("%s: bad TOC16_LO_DS relocation (0x%lx)\n",
628 				       me->name, value);
629 				return -ENOEXEC;
630 			}
631 			*((uint16_t *) location)
632 				= (*((uint16_t *) location) & ~0xfffc)
633 				| (value & 0xfffc);
634 			break;
635 
636 		case R_PPC64_TOC16_HA:
637 			/* Subtract TOC pointer */
638 			value -= my_r2(sechdrs, me);
639 			value = ((value + 0x8000) >> 16);
640 			*((uint16_t *) location)
641 				= (*((uint16_t *) location) & ~0xffff)
642 				| (value & 0xffff);
643 			break;
644 
645 		case R_PPC_REL24:
646 			/* FIXME: Handle weak symbols here --RR */
647 			if (sym->st_shndx == SHN_UNDEF ||
648 			    sym->st_shndx == SHN_LIVEPATCH) {
649 				/* External: go via stub */
650 				value = stub_for_addr(sechdrs, value, me);
651 				if (!value)
652 					return -ENOENT;
653 				if (!restore_r2((u32 *)location + 1, me))
654 					return -ENOEXEC;
655 
656 				squash_toc_save_inst(strtab + sym->st_name, value);
657 			} else
658 				value += local_entry_offset(sym);
659 
660 			/* Convert value to relative */
661 			value -= (unsigned long)location;
662 			if (value + 0x2000000 > 0x3ffffff || (value & 3) != 0){
663 				pr_err("%s: REL24 %li out of range!\n",
664 				       me->name, (long int)value);
665 				return -ENOEXEC;
666 			}
667 
668 			/* Only replace bits 2 through 26 */
669 			*(uint32_t *)location
670 				= (*(uint32_t *)location & ~0x03fffffc)
671 				| (value & 0x03fffffc);
672 			break;
673 
674 		case R_PPC64_REL64:
675 			/* 64 bits relative (used by features fixups) */
676 			*location = value - (unsigned long)location;
677 			break;
678 
679 		case R_PPC64_REL32:
680 			/* 32 bits relative (used by relative exception tables) */
681 			*(u32 *)location = value - (unsigned long)location;
682 			break;
683 
684 		case R_PPC64_TOCSAVE:
685 			/*
686 			 * Marker reloc indicates we don't have to save r2.
687 			 * That would only save us one instruction, so ignore
688 			 * it.
689 			 */
690 			break;
691 
692 		case R_PPC64_ENTRY:
693 			/*
694 			 * Optimize ELFv2 large code model entry point if
695 			 * the TOC is within 2GB range of current location.
696 			 */
697 			value = my_r2(sechdrs, me) - (unsigned long)location;
698 			if (value + 0x80008000 > 0xffffffff)
699 				break;
700 			/*
701 			 * Check for the large code model prolog sequence:
702 		         *	ld r2, ...(r12)
703 			 *	add r2, r2, r12
704 			 */
705 			if ((((uint32_t *)location)[0] & ~0xfffc)
706 			    != 0xe84c0000)
707 				break;
708 			if (((uint32_t *)location)[1] != 0x7c426214)
709 				break;
710 			/*
711 			 * If found, replace it with:
712 			 *	addis r2, r12, (.TOC.-func)@ha
713 			 *	addi r2, r12, (.TOC.-func)@l
714 			 */
715 			((uint32_t *)location)[0] = 0x3c4c0000 + PPC_HA(value);
716 			((uint32_t *)location)[1] = 0x38420000 + PPC_LO(value);
717 			break;
718 
719 		case R_PPC64_REL16_HA:
720 			/* Subtract location pointer */
721 			value -= (unsigned long)location;
722 			value = ((value + 0x8000) >> 16);
723 			*((uint16_t *) location)
724 				= (*((uint16_t *) location) & ~0xffff)
725 				| (value & 0xffff);
726 			break;
727 
728 		case R_PPC64_REL16_LO:
729 			/* Subtract location pointer */
730 			value -= (unsigned long)location;
731 			*((uint16_t *) location)
732 				= (*((uint16_t *) location) & ~0xffff)
733 				| (value & 0xffff);
734 			break;
735 
736 		default:
737 			pr_err("%s: Unknown ADD relocation: %lu\n",
738 			       me->name,
739 			       (unsigned long)ELF64_R_TYPE(rela[i].r_info));
740 			return -ENOEXEC;
741 		}
742 	}
743 
744 	return 0;
745 }
746 
747 #ifdef CONFIG_DYNAMIC_FTRACE
748 
749 #ifdef CC_USING_MPROFILE_KERNEL
750 
751 #define PACATOC offsetof(struct paca_struct, kernel_toc)
752 
753 /*
754  * For mprofile-kernel we use a special stub for ftrace_caller() because we
755  * can't rely on r2 containing this module's TOC when we enter the stub.
756  *
757  * That can happen if the function calling us didn't need to use the toc. In
758  * that case it won't have setup r2, and the r2 value will be either the
759  * kernel's toc, or possibly another modules toc.
760  *
761  * To deal with that this stub uses the kernel toc, which is always accessible
762  * via the paca (in r13). The target (ftrace_caller()) is responsible for
763  * saving and restoring the toc before returning.
764  */
765 static unsigned long create_ftrace_stub(const Elf64_Shdr *sechdrs, struct module *me)
766 {
767 	struct ppc64_stub_entry *entry;
768 	unsigned int i, num_stubs;
769 	static u32 stub_insns[] = {
770 		0xe98d0000 | PACATOC, 	/* ld      r12,PACATOC(r13)	*/
771 		0x3d8c0000,		/* addis   r12,r12,<high>	*/
772 		0x398c0000, 		/* addi    r12,r12,<low>	*/
773 		0x7d8903a6, 		/* mtctr   r12			*/
774 		0x4e800420, 		/* bctr				*/
775 	};
776 	long reladdr;
777 
778 	num_stubs = sechdrs[me->arch.stubs_section].sh_size / sizeof(*entry);
779 
780 	/* Find the next available stub entry */
781 	entry = (void *)sechdrs[me->arch.stubs_section].sh_addr;
782 	for (i = 0; i < num_stubs && stub_func_addr(entry->funcdata); i++, entry++);
783 
784 	if (i >= num_stubs) {
785 		pr_err("%s: Unable to find a free slot for ftrace stub.\n", me->name);
786 		return 0;
787 	}
788 
789 	memcpy(entry->jump, stub_insns, sizeof(stub_insns));
790 
791 	/* Stub uses address relative to kernel toc (from the paca) */
792 	reladdr = (unsigned long)ftrace_caller - kernel_toc_addr();
793 	if (reladdr > 0x7FFFFFFF || reladdr < -(0x80000000L)) {
794 		pr_err("%s: Address of ftrace_caller out of range of kernel_toc.\n", me->name);
795 		return 0;
796 	}
797 
798 	entry->jump[1] |= PPC_HA(reladdr);
799 	entry->jump[2] |= PPC_LO(reladdr);
800 
801 	/* Eventhough we don't use funcdata in the stub, it's needed elsewhere. */
802 	entry->funcdata = func_desc((unsigned long)ftrace_caller);
803 	entry->magic = STUB_MAGIC;
804 
805 	return (unsigned long)entry;
806 }
807 #else
808 static unsigned long create_ftrace_stub(const Elf64_Shdr *sechdrs, struct module *me)
809 {
810 	return stub_for_addr(sechdrs, (unsigned long)ftrace_caller, me);
811 }
812 #endif
813 
814 int module_finalize_ftrace(struct module *mod, const Elf_Shdr *sechdrs)
815 {
816 	mod->arch.toc = my_r2(sechdrs, mod);
817 	mod->arch.tramp = create_ftrace_stub(sechdrs, mod);
818 
819 	if (!mod->arch.tramp)
820 		return -ENOENT;
821 
822 	return 0;
823 }
824 #endif
825