1 /* Kernel module help for PPC64. 2 Copyright (C) 2001, 2003 Rusty Russell IBM Corporation. 3 4 This program is free software; you can redistribute it and/or modify 5 it under the terms of the GNU General Public License as published by 6 the Free Software Foundation; either version 2 of the License, or 7 (at your option) any later version. 8 9 This program is distributed in the hope that it will be useful, 10 but WITHOUT ANY WARRANTY; without even the implied warranty of 11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 GNU General Public License for more details. 13 14 You should have received a copy of the GNU General Public License 15 along with this program; if not, write to the Free Software 16 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 17 */ 18 19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 20 21 #include <linux/module.h> 22 #include <linux/elf.h> 23 #include <linux/moduleloader.h> 24 #include <linux/err.h> 25 #include <linux/vmalloc.h> 26 #include <linux/ftrace.h> 27 #include <linux/bug.h> 28 #include <linux/uaccess.h> 29 #include <asm/module.h> 30 #include <asm/firmware.h> 31 #include <asm/code-patching.h> 32 #include <linux/sort.h> 33 #include <asm/setup.h> 34 #include <asm/sections.h> 35 36 /* FIXME: We don't do .init separately. To do this, we'd need to have 37 a separate r2 value in the init and core section, and stub between 38 them, too. 39 40 Using a magic allocator which places modules within 32MB solves 41 this, and makes other things simpler. Anton? 42 --RR. */ 43 44 #ifdef PPC64_ELF_ABI_v2 45 46 /* An address is simply the address of the function. */ 47 typedef unsigned long func_desc_t; 48 49 static func_desc_t func_desc(unsigned long addr) 50 { 51 return addr; 52 } 53 static unsigned long func_addr(unsigned long addr) 54 { 55 return addr; 56 } 57 static unsigned long stub_func_addr(func_desc_t func) 58 { 59 return func; 60 } 61 62 /* PowerPC64 specific values for the Elf64_Sym st_other field. */ 63 #define STO_PPC64_LOCAL_BIT 5 64 #define STO_PPC64_LOCAL_MASK (7 << STO_PPC64_LOCAL_BIT) 65 #define PPC64_LOCAL_ENTRY_OFFSET(other) \ 66 (((1 << (((other) & STO_PPC64_LOCAL_MASK) >> STO_PPC64_LOCAL_BIT)) >> 2) << 2) 67 68 static unsigned int local_entry_offset(const Elf64_Sym *sym) 69 { 70 /* sym->st_other indicates offset to local entry point 71 * (otherwise it will assume r12 is the address of the start 72 * of function and try to derive r2 from it). */ 73 return PPC64_LOCAL_ENTRY_OFFSET(sym->st_other); 74 } 75 #else 76 77 /* An address is address of the OPD entry, which contains address of fn. */ 78 typedef struct ppc64_opd_entry func_desc_t; 79 80 static func_desc_t func_desc(unsigned long addr) 81 { 82 return *(struct ppc64_opd_entry *)addr; 83 } 84 static unsigned long func_addr(unsigned long addr) 85 { 86 return func_desc(addr).funcaddr; 87 } 88 static unsigned long stub_func_addr(func_desc_t func) 89 { 90 return func.funcaddr; 91 } 92 static unsigned int local_entry_offset(const Elf64_Sym *sym) 93 { 94 return 0; 95 } 96 #endif 97 98 #define STUB_MAGIC 0x73747562 /* stub */ 99 100 /* Like PPC32, we need little trampolines to do > 24-bit jumps (into 101 the kernel itself). But on PPC64, these need to be used for every 102 jump, actually, to reset r2 (TOC+0x8000). */ 103 struct ppc64_stub_entry 104 { 105 /* 28 byte jump instruction sequence (7 instructions). We only 106 * need 6 instructions on ABIv2 but we always allocate 7 so 107 * so we don't have to modify the trampoline load instruction. */ 108 u32 jump[7]; 109 /* Used by ftrace to identify stubs */ 110 u32 magic; 111 /* Data for the above code */ 112 func_desc_t funcdata; 113 }; 114 115 /* 116 * PPC64 uses 24 bit jumps, but we need to jump into other modules or 117 * the kernel which may be further. So we jump to a stub. 118 * 119 * For ELFv1 we need to use this to set up the new r2 value (aka TOC 120 * pointer). For ELFv2 it's the callee's responsibility to set up the 121 * new r2, but for both we need to save the old r2. 122 * 123 * We could simply patch the new r2 value and function pointer into 124 * the stub, but it's significantly shorter to put these values at the 125 * end of the stub code, and patch the stub address (32-bits relative 126 * to the TOC ptr, r2) into the stub. 127 */ 128 129 static u32 ppc64_stub_insns[] = { 130 0x3d620000, /* addis r11,r2, <high> */ 131 0x396b0000, /* addi r11,r11, <low> */ 132 /* Save current r2 value in magic place on the stack. */ 133 0xf8410000|R2_STACK_OFFSET, /* std r2,R2_STACK_OFFSET(r1) */ 134 0xe98b0020, /* ld r12,32(r11) */ 135 #ifdef PPC64_ELF_ABI_v1 136 /* Set up new r2 from function descriptor */ 137 0xe84b0028, /* ld r2,40(r11) */ 138 #endif 139 0x7d8903a6, /* mtctr r12 */ 140 0x4e800420 /* bctr */ 141 }; 142 143 #ifdef CONFIG_DYNAMIC_FTRACE 144 int module_trampoline_target(struct module *mod, unsigned long addr, 145 unsigned long *target) 146 { 147 struct ppc64_stub_entry *stub; 148 func_desc_t funcdata; 149 u32 magic; 150 151 if (!within_module_core(addr, mod)) { 152 pr_err("%s: stub %lx not in module %s\n", __func__, addr, mod->name); 153 return -EFAULT; 154 } 155 156 stub = (struct ppc64_stub_entry *)addr; 157 158 if (probe_kernel_read(&magic, &stub->magic, sizeof(magic))) { 159 pr_err("%s: fault reading magic for stub %lx for %s\n", __func__, addr, mod->name); 160 return -EFAULT; 161 } 162 163 if (magic != STUB_MAGIC) { 164 pr_err("%s: bad magic for stub %lx for %s\n", __func__, addr, mod->name); 165 return -EFAULT; 166 } 167 168 if (probe_kernel_read(&funcdata, &stub->funcdata, sizeof(funcdata))) { 169 pr_err("%s: fault reading funcdata for stub %lx for %s\n", __func__, addr, mod->name); 170 return -EFAULT; 171 } 172 173 *target = stub_func_addr(funcdata); 174 175 return 0; 176 } 177 #endif 178 179 /* Count how many different 24-bit relocations (different symbol, 180 different addend) */ 181 static unsigned int count_relocs(const Elf64_Rela *rela, unsigned int num) 182 { 183 unsigned int i, r_info, r_addend, _count_relocs; 184 185 /* FIXME: Only count external ones --RR */ 186 _count_relocs = 0; 187 r_info = 0; 188 r_addend = 0; 189 for (i = 0; i < num; i++) 190 /* Only count 24-bit relocs, others don't need stubs */ 191 if (ELF64_R_TYPE(rela[i].r_info) == R_PPC_REL24 && 192 (r_info != ELF64_R_SYM(rela[i].r_info) || 193 r_addend != rela[i].r_addend)) { 194 _count_relocs++; 195 r_info = ELF64_R_SYM(rela[i].r_info); 196 r_addend = rela[i].r_addend; 197 } 198 199 return _count_relocs; 200 } 201 202 static int relacmp(const void *_x, const void *_y) 203 { 204 const Elf64_Rela *x, *y; 205 206 y = (Elf64_Rela *)_x; 207 x = (Elf64_Rela *)_y; 208 209 /* Compare the entire r_info (as opposed to ELF64_R_SYM(r_info) only) to 210 * make the comparison cheaper/faster. It won't affect the sorting or 211 * the counting algorithms' performance 212 */ 213 if (x->r_info < y->r_info) 214 return -1; 215 else if (x->r_info > y->r_info) 216 return 1; 217 else if (x->r_addend < y->r_addend) 218 return -1; 219 else if (x->r_addend > y->r_addend) 220 return 1; 221 else 222 return 0; 223 } 224 225 static void relaswap(void *_x, void *_y, int size) 226 { 227 uint64_t *x, *y, tmp; 228 int i; 229 230 y = (uint64_t *)_x; 231 x = (uint64_t *)_y; 232 233 for (i = 0; i < sizeof(Elf64_Rela) / sizeof(uint64_t); i++) { 234 tmp = x[i]; 235 x[i] = y[i]; 236 y[i] = tmp; 237 } 238 } 239 240 /* Get size of potential trampolines required. */ 241 static unsigned long get_stubs_size(const Elf64_Ehdr *hdr, 242 const Elf64_Shdr *sechdrs) 243 { 244 /* One extra reloc so it's always 0-funcaddr terminated */ 245 unsigned long relocs = 1; 246 unsigned i; 247 248 /* Every relocated section... */ 249 for (i = 1; i < hdr->e_shnum; i++) { 250 if (sechdrs[i].sh_type == SHT_RELA) { 251 pr_debug("Found relocations in section %u\n", i); 252 pr_debug("Ptr: %p. Number: %Lu\n", 253 (void *)sechdrs[i].sh_addr, 254 sechdrs[i].sh_size / sizeof(Elf64_Rela)); 255 256 /* Sort the relocation information based on a symbol and 257 * addend key. This is a stable O(n*log n) complexity 258 * alogrithm but it will reduce the complexity of 259 * count_relocs() to linear complexity O(n) 260 */ 261 sort((void *)sechdrs[i].sh_addr, 262 sechdrs[i].sh_size / sizeof(Elf64_Rela), 263 sizeof(Elf64_Rela), relacmp, relaswap); 264 265 relocs += count_relocs((void *)sechdrs[i].sh_addr, 266 sechdrs[i].sh_size 267 / sizeof(Elf64_Rela)); 268 } 269 } 270 271 #ifdef CONFIG_DYNAMIC_FTRACE 272 /* make the trampoline to the ftrace_caller */ 273 relocs++; 274 #endif 275 276 pr_debug("Looks like a total of %lu stubs, max\n", relocs); 277 return relocs * sizeof(struct ppc64_stub_entry); 278 } 279 280 /* Still needed for ELFv2, for .TOC. */ 281 static void dedotify_versions(struct modversion_info *vers, 282 unsigned long size) 283 { 284 struct modversion_info *end; 285 286 for (end = (void *)vers + size; vers < end; vers++) 287 if (vers->name[0] == '.') { 288 memmove(vers->name, vers->name+1, strlen(vers->name)); 289 } 290 } 291 292 /* 293 * Undefined symbols which refer to .funcname, hack to funcname. Make .TOC. 294 * seem to be defined (value set later). 295 */ 296 static void dedotify(Elf64_Sym *syms, unsigned int numsyms, char *strtab) 297 { 298 unsigned int i; 299 300 for (i = 1; i < numsyms; i++) { 301 if (syms[i].st_shndx == SHN_UNDEF) { 302 char *name = strtab + syms[i].st_name; 303 if (name[0] == '.') { 304 if (strcmp(name+1, "TOC.") == 0) 305 syms[i].st_shndx = SHN_ABS; 306 syms[i].st_name++; 307 } 308 } 309 } 310 } 311 312 static Elf64_Sym *find_dot_toc(Elf64_Shdr *sechdrs, 313 const char *strtab, 314 unsigned int symindex) 315 { 316 unsigned int i, numsyms; 317 Elf64_Sym *syms; 318 319 syms = (Elf64_Sym *)sechdrs[symindex].sh_addr; 320 numsyms = sechdrs[symindex].sh_size / sizeof(Elf64_Sym); 321 322 for (i = 1; i < numsyms; i++) { 323 if (syms[i].st_shndx == SHN_ABS 324 && strcmp(strtab + syms[i].st_name, "TOC.") == 0) 325 return &syms[i]; 326 } 327 return NULL; 328 } 329 330 int module_frob_arch_sections(Elf64_Ehdr *hdr, 331 Elf64_Shdr *sechdrs, 332 char *secstrings, 333 struct module *me) 334 { 335 unsigned int i; 336 337 /* Find .toc and .stubs sections, symtab and strtab */ 338 for (i = 1; i < hdr->e_shnum; i++) { 339 char *p; 340 if (strcmp(secstrings + sechdrs[i].sh_name, ".stubs") == 0) 341 me->arch.stubs_section = i; 342 else if (strcmp(secstrings + sechdrs[i].sh_name, ".toc") == 0) { 343 me->arch.toc_section = i; 344 if (sechdrs[i].sh_addralign < 8) 345 sechdrs[i].sh_addralign = 8; 346 } 347 else if (strcmp(secstrings+sechdrs[i].sh_name,"__versions")==0) 348 dedotify_versions((void *)hdr + sechdrs[i].sh_offset, 349 sechdrs[i].sh_size); 350 351 /* We don't handle .init for the moment: rename to _init */ 352 while ((p = strstr(secstrings + sechdrs[i].sh_name, ".init"))) 353 p[0] = '_'; 354 355 if (sechdrs[i].sh_type == SHT_SYMTAB) 356 dedotify((void *)hdr + sechdrs[i].sh_offset, 357 sechdrs[i].sh_size / sizeof(Elf64_Sym), 358 (void *)hdr 359 + sechdrs[sechdrs[i].sh_link].sh_offset); 360 } 361 362 if (!me->arch.stubs_section) { 363 pr_err("%s: doesn't contain .stubs.\n", me->name); 364 return -ENOEXEC; 365 } 366 367 /* If we don't have a .toc, just use .stubs. We need to set r2 368 to some reasonable value in case the module calls out to 369 other functions via a stub, or if a function pointer escapes 370 the module by some means. */ 371 if (!me->arch.toc_section) 372 me->arch.toc_section = me->arch.stubs_section; 373 374 /* Override the stubs size */ 375 sechdrs[me->arch.stubs_section].sh_size = get_stubs_size(hdr, sechdrs); 376 return 0; 377 } 378 379 /* 380 * r2 is the TOC pointer: it actually points 0x8000 into the TOC (this gives the 381 * value maximum span in an instruction which uses a signed offset). Round down 382 * to a 256 byte boundary for the odd case where we are setting up r2 without a 383 * .toc section. 384 */ 385 static inline unsigned long my_r2(const Elf64_Shdr *sechdrs, struct module *me) 386 { 387 return (sechdrs[me->arch.toc_section].sh_addr & ~0xfful) + 0x8000; 388 } 389 390 /* Both low and high 16 bits are added as SIGNED additions, so if low 391 16 bits has high bit set, high 16 bits must be adjusted. These 392 macros do that (stolen from binutils). */ 393 #define PPC_LO(v) ((v) & 0xffff) 394 #define PPC_HI(v) (((v) >> 16) & 0xffff) 395 #define PPC_HA(v) PPC_HI ((v) + 0x8000) 396 397 /* Patch stub to reference function and correct r2 value. */ 398 static inline int create_stub(const Elf64_Shdr *sechdrs, 399 struct ppc64_stub_entry *entry, 400 unsigned long addr, 401 struct module *me) 402 { 403 long reladdr; 404 405 memcpy(entry->jump, ppc64_stub_insns, sizeof(ppc64_stub_insns)); 406 407 /* Stub uses address relative to r2. */ 408 reladdr = (unsigned long)entry - my_r2(sechdrs, me); 409 if (reladdr > 0x7FFFFFFF || reladdr < -(0x80000000L)) { 410 pr_err("%s: Address %p of stub out of range of %p.\n", 411 me->name, (void *)reladdr, (void *)my_r2); 412 return 0; 413 } 414 pr_debug("Stub %p get data from reladdr %li\n", entry, reladdr); 415 416 entry->jump[0] |= PPC_HA(reladdr); 417 entry->jump[1] |= PPC_LO(reladdr); 418 entry->funcdata = func_desc(addr); 419 entry->magic = STUB_MAGIC; 420 421 return 1; 422 } 423 424 /* Create stub to jump to function described in this OPD/ptr: we need the 425 stub to set up the TOC ptr (r2) for the function. */ 426 static unsigned long stub_for_addr(const Elf64_Shdr *sechdrs, 427 unsigned long addr, 428 struct module *me) 429 { 430 struct ppc64_stub_entry *stubs; 431 unsigned int i, num_stubs; 432 433 num_stubs = sechdrs[me->arch.stubs_section].sh_size / sizeof(*stubs); 434 435 /* Find this stub, or if that fails, the next avail. entry */ 436 stubs = (void *)sechdrs[me->arch.stubs_section].sh_addr; 437 for (i = 0; stub_func_addr(stubs[i].funcdata); i++) { 438 if (WARN_ON(i >= num_stubs)) 439 return 0; 440 441 if (stub_func_addr(stubs[i].funcdata) == func_addr(addr)) 442 return (unsigned long)&stubs[i]; 443 } 444 445 if (!create_stub(sechdrs, &stubs[i], addr, me)) 446 return 0; 447 448 return (unsigned long)&stubs[i]; 449 } 450 451 #ifdef CC_USING_MPROFILE_KERNEL 452 static bool is_early_mcount_callsite(u32 *instruction) 453 { 454 /* 455 * Check if this is one of the -mprofile-kernel sequences. 456 */ 457 if (instruction[-1] == PPC_INST_STD_LR && 458 instruction[-2] == PPC_INST_MFLR) 459 return true; 460 461 if (instruction[-1] == PPC_INST_MFLR) 462 return true; 463 464 return false; 465 } 466 467 /* 468 * In case of _mcount calls, do not save the current callee's TOC (in r2) into 469 * the original caller's stack frame. If we did we would clobber the saved TOC 470 * value of the original caller. 471 */ 472 static void squash_toc_save_inst(const char *name, unsigned long addr) 473 { 474 struct ppc64_stub_entry *stub = (struct ppc64_stub_entry *)addr; 475 476 /* Only for calls to _mcount */ 477 if (strcmp("_mcount", name) != 0) 478 return; 479 480 stub->jump[2] = PPC_INST_NOP; 481 } 482 #else 483 static void squash_toc_save_inst(const char *name, unsigned long addr) { } 484 485 /* without -mprofile-kernel, mcount calls are never early */ 486 static bool is_early_mcount_callsite(u32 *instruction) 487 { 488 return false; 489 } 490 #endif 491 492 /* We expect a noop next: if it is, replace it with instruction to 493 restore r2. */ 494 static int restore_r2(u32 *instruction, struct module *me) 495 { 496 u32 *prev_insn = instruction - 1; 497 498 if (is_early_mcount_callsite(prev_insn)) 499 return 1; 500 501 /* 502 * Make sure the branch isn't a sibling call. Sibling calls aren't 503 * "link" branches and they don't return, so they don't need the r2 504 * restore afterwards. 505 */ 506 if (!instr_is_relative_link_branch(*prev_insn)) 507 return 1; 508 509 if (*instruction != PPC_INST_NOP) { 510 pr_err("%s: Expected nop after call, got %08x at %pS\n", 511 me->name, *instruction, instruction); 512 return 0; 513 } 514 /* ld r2,R2_STACK_OFFSET(r1) */ 515 *instruction = PPC_INST_LD_TOC; 516 return 1; 517 } 518 519 int apply_relocate_add(Elf64_Shdr *sechdrs, 520 const char *strtab, 521 unsigned int symindex, 522 unsigned int relsec, 523 struct module *me) 524 { 525 unsigned int i; 526 Elf64_Rela *rela = (void *)sechdrs[relsec].sh_addr; 527 Elf64_Sym *sym; 528 unsigned long *location; 529 unsigned long value; 530 531 pr_debug("Applying ADD relocate section %u to %u\n", relsec, 532 sechdrs[relsec].sh_info); 533 534 /* First time we're called, we can fix up .TOC. */ 535 if (!me->arch.toc_fixed) { 536 sym = find_dot_toc(sechdrs, strtab, symindex); 537 /* It's theoretically possible that a module doesn't want a 538 * .TOC. so don't fail it just for that. */ 539 if (sym) 540 sym->st_value = my_r2(sechdrs, me); 541 me->arch.toc_fixed = true; 542 } 543 544 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rela); i++) { 545 /* This is where to make the change */ 546 location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr 547 + rela[i].r_offset; 548 /* This is the symbol it is referring to */ 549 sym = (Elf64_Sym *)sechdrs[symindex].sh_addr 550 + ELF64_R_SYM(rela[i].r_info); 551 552 pr_debug("RELOC at %p: %li-type as %s (0x%lx) + %li\n", 553 location, (long)ELF64_R_TYPE(rela[i].r_info), 554 strtab + sym->st_name, (unsigned long)sym->st_value, 555 (long)rela[i].r_addend); 556 557 /* `Everything is relative'. */ 558 value = sym->st_value + rela[i].r_addend; 559 560 switch (ELF64_R_TYPE(rela[i].r_info)) { 561 case R_PPC64_ADDR32: 562 /* Simply set it */ 563 *(u32 *)location = value; 564 break; 565 566 case R_PPC64_ADDR64: 567 /* Simply set it */ 568 *(unsigned long *)location = value; 569 break; 570 571 case R_PPC64_TOC: 572 *(unsigned long *)location = my_r2(sechdrs, me); 573 break; 574 575 case R_PPC64_TOC16: 576 /* Subtract TOC pointer */ 577 value -= my_r2(sechdrs, me); 578 if (value + 0x8000 > 0xffff) { 579 pr_err("%s: bad TOC16 relocation (0x%lx)\n", 580 me->name, value); 581 return -ENOEXEC; 582 } 583 *((uint16_t *) location) 584 = (*((uint16_t *) location) & ~0xffff) 585 | (value & 0xffff); 586 break; 587 588 case R_PPC64_TOC16_LO: 589 /* Subtract TOC pointer */ 590 value -= my_r2(sechdrs, me); 591 *((uint16_t *) location) 592 = (*((uint16_t *) location) & ~0xffff) 593 | (value & 0xffff); 594 break; 595 596 case R_PPC64_TOC16_DS: 597 /* Subtract TOC pointer */ 598 value -= my_r2(sechdrs, me); 599 if ((value & 3) != 0 || value + 0x8000 > 0xffff) { 600 pr_err("%s: bad TOC16_DS relocation (0x%lx)\n", 601 me->name, value); 602 return -ENOEXEC; 603 } 604 *((uint16_t *) location) 605 = (*((uint16_t *) location) & ~0xfffc) 606 | (value & 0xfffc); 607 break; 608 609 case R_PPC64_TOC16_LO_DS: 610 /* Subtract TOC pointer */ 611 value -= my_r2(sechdrs, me); 612 if ((value & 3) != 0) { 613 pr_err("%s: bad TOC16_LO_DS relocation (0x%lx)\n", 614 me->name, value); 615 return -ENOEXEC; 616 } 617 *((uint16_t *) location) 618 = (*((uint16_t *) location) & ~0xfffc) 619 | (value & 0xfffc); 620 break; 621 622 case R_PPC64_TOC16_HA: 623 /* Subtract TOC pointer */ 624 value -= my_r2(sechdrs, me); 625 value = ((value + 0x8000) >> 16); 626 *((uint16_t *) location) 627 = (*((uint16_t *) location) & ~0xffff) 628 | (value & 0xffff); 629 break; 630 631 case R_PPC_REL24: 632 /* FIXME: Handle weak symbols here --RR */ 633 if (sym->st_shndx == SHN_UNDEF || 634 sym->st_shndx == SHN_LIVEPATCH) { 635 /* External: go via stub */ 636 value = stub_for_addr(sechdrs, value, me); 637 if (!value) 638 return -ENOENT; 639 if (!restore_r2((u32 *)location + 1, me)) 640 return -ENOEXEC; 641 642 squash_toc_save_inst(strtab + sym->st_name, value); 643 } else 644 value += local_entry_offset(sym); 645 646 /* Convert value to relative */ 647 value -= (unsigned long)location; 648 if (value + 0x2000000 > 0x3ffffff || (value & 3) != 0){ 649 pr_err("%s: REL24 %li out of range!\n", 650 me->name, (long int)value); 651 return -ENOEXEC; 652 } 653 654 /* Only replace bits 2 through 26 */ 655 *(uint32_t *)location 656 = (*(uint32_t *)location & ~0x03fffffc) 657 | (value & 0x03fffffc); 658 break; 659 660 case R_PPC64_REL64: 661 /* 64 bits relative (used by features fixups) */ 662 *location = value - (unsigned long)location; 663 break; 664 665 case R_PPC64_REL32: 666 /* 32 bits relative (used by relative exception tables) */ 667 *(u32 *)location = value - (unsigned long)location; 668 break; 669 670 case R_PPC64_TOCSAVE: 671 /* 672 * Marker reloc indicates we don't have to save r2. 673 * That would only save us one instruction, so ignore 674 * it. 675 */ 676 break; 677 678 case R_PPC64_ENTRY: 679 /* 680 * Optimize ELFv2 large code model entry point if 681 * the TOC is within 2GB range of current location. 682 */ 683 value = my_r2(sechdrs, me) - (unsigned long)location; 684 if (value + 0x80008000 > 0xffffffff) 685 break; 686 /* 687 * Check for the large code model prolog sequence: 688 * ld r2, ...(r12) 689 * add r2, r2, r12 690 */ 691 if ((((uint32_t *)location)[0] & ~0xfffc) 692 != 0xe84c0000) 693 break; 694 if (((uint32_t *)location)[1] != 0x7c426214) 695 break; 696 /* 697 * If found, replace it with: 698 * addis r2, r12, (.TOC.-func)@ha 699 * addi r2, r12, (.TOC.-func)@l 700 */ 701 ((uint32_t *)location)[0] = 0x3c4c0000 + PPC_HA(value); 702 ((uint32_t *)location)[1] = 0x38420000 + PPC_LO(value); 703 break; 704 705 case R_PPC64_REL16_HA: 706 /* Subtract location pointer */ 707 value -= (unsigned long)location; 708 value = ((value + 0x8000) >> 16); 709 *((uint16_t *) location) 710 = (*((uint16_t *) location) & ~0xffff) 711 | (value & 0xffff); 712 break; 713 714 case R_PPC64_REL16_LO: 715 /* Subtract location pointer */ 716 value -= (unsigned long)location; 717 *((uint16_t *) location) 718 = (*((uint16_t *) location) & ~0xffff) 719 | (value & 0xffff); 720 break; 721 722 default: 723 pr_err("%s: Unknown ADD relocation: %lu\n", 724 me->name, 725 (unsigned long)ELF64_R_TYPE(rela[i].r_info)); 726 return -ENOEXEC; 727 } 728 } 729 730 return 0; 731 } 732 733 #ifdef CONFIG_DYNAMIC_FTRACE 734 735 #ifdef CC_USING_MPROFILE_KERNEL 736 737 #define PACATOC offsetof(struct paca_struct, kernel_toc) 738 739 /* 740 * For mprofile-kernel we use a special stub for ftrace_caller() because we 741 * can't rely on r2 containing this module's TOC when we enter the stub. 742 * 743 * That can happen if the function calling us didn't need to use the toc. In 744 * that case it won't have setup r2, and the r2 value will be either the 745 * kernel's toc, or possibly another modules toc. 746 * 747 * To deal with that this stub uses the kernel toc, which is always accessible 748 * via the paca (in r13). The target (ftrace_caller()) is responsible for 749 * saving and restoring the toc before returning. 750 */ 751 static unsigned long create_ftrace_stub(const Elf64_Shdr *sechdrs, struct module *me) 752 { 753 struct ppc64_stub_entry *entry; 754 unsigned int i, num_stubs; 755 static u32 stub_insns[] = { 756 0xe98d0000 | PACATOC, /* ld r12,PACATOC(r13) */ 757 0x3d8c0000, /* addis r12,r12,<high> */ 758 0x398c0000, /* addi r12,r12,<low> */ 759 0x7d8903a6, /* mtctr r12 */ 760 0x4e800420, /* bctr */ 761 }; 762 long reladdr; 763 764 num_stubs = sechdrs[me->arch.stubs_section].sh_size / sizeof(*entry); 765 766 /* Find the next available stub entry */ 767 entry = (void *)sechdrs[me->arch.stubs_section].sh_addr; 768 for (i = 0; i < num_stubs && stub_func_addr(entry->funcdata); i++, entry++); 769 770 if (i >= num_stubs) { 771 pr_err("%s: Unable to find a free slot for ftrace stub.\n", me->name); 772 return 0; 773 } 774 775 memcpy(entry->jump, stub_insns, sizeof(stub_insns)); 776 777 /* Stub uses address relative to kernel toc (from the paca) */ 778 reladdr = (unsigned long)ftrace_caller - kernel_toc_addr(); 779 if (reladdr > 0x7FFFFFFF || reladdr < -(0x80000000L)) { 780 pr_err("%s: Address of ftrace_caller out of range of kernel_toc.\n", me->name); 781 return 0; 782 } 783 784 entry->jump[1] |= PPC_HA(reladdr); 785 entry->jump[2] |= PPC_LO(reladdr); 786 787 /* Eventhough we don't use funcdata in the stub, it's needed elsewhere. */ 788 entry->funcdata = func_desc((unsigned long)ftrace_caller); 789 entry->magic = STUB_MAGIC; 790 791 return (unsigned long)entry; 792 } 793 #else 794 static unsigned long create_ftrace_stub(const Elf64_Shdr *sechdrs, struct module *me) 795 { 796 return stub_for_addr(sechdrs, (unsigned long)ftrace_caller, me); 797 } 798 #endif 799 800 int module_finalize_ftrace(struct module *mod, const Elf_Shdr *sechdrs) 801 { 802 mod->arch.toc = my_r2(sechdrs, mod); 803 mod->arch.tramp = create_ftrace_stub(sechdrs, mod); 804 805 if (!mod->arch.tramp) 806 return -ENOENT; 807 808 return 0; 809 } 810 #endif 811