1 /*- 2 * Copyright (c) 1998 Michael Smith <msmith@freebsd.org> 3 * Copyright (c) 1998 Peter Wemm <peter@freebsd.org> 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 #include <sys/param.h> 32 #include <sys/endian.h> 33 #include <sys/exec.h> 34 #include <sys/linker.h> 35 #include <sys/module.h> 36 #include <sys/stdint.h> 37 #include <string.h> 38 #include <machine/elf.h> 39 #include <stand.h> 40 #define FREEBSD_ELF 41 #include <sys/link_elf.h> 42 43 #include "bootstrap.h" 44 45 #define COPYOUT(s,d,l) archsw.arch_copyout((vm_offset_t)(s), d, l) 46 47 #if defined(__i386__) && __ELF_WORD_SIZE == 64 48 #undef ELF_TARG_CLASS 49 #undef ELF_TARG_MACH 50 #define ELF_TARG_CLASS ELFCLASS64 51 #define ELF_TARG_MACH EM_X86_64 52 #endif 53 54 typedef struct elf_file { 55 Elf_Phdr *ph; 56 Elf_Ehdr *ehdr; 57 Elf_Sym *symtab; 58 Elf_Hashelt *hashtab; 59 Elf_Hashelt nbuckets; 60 Elf_Hashelt nchains; 61 Elf_Hashelt *buckets; 62 Elf_Hashelt *chains; 63 Elf_Rel *rel; 64 size_t relsz; 65 Elf_Rela *rela; 66 size_t relasz; 67 char *strtab; 68 size_t strsz; 69 int fd; 70 caddr_t firstpage; 71 size_t firstlen; 72 int kernel; 73 u_int64_t off; 74 } *elf_file_t; 75 76 static int __elfN(loadimage)(struct preloaded_file *mp, elf_file_t ef, u_int64_t loadaddr); 77 static int __elfN(lookup_symbol)(struct preloaded_file *mp, elf_file_t ef, const char* name, Elf_Sym* sym); 78 static int __elfN(reloc_ptr)(struct preloaded_file *mp, elf_file_t ef, 79 Elf_Addr p, void *val, size_t len); 80 static int __elfN(parse_modmetadata)(struct preloaded_file *mp, elf_file_t ef, 81 Elf_Addr p_start, Elf_Addr p_end); 82 static symaddr_fn __elfN(symaddr); 83 static char *fake_modname(const char *name); 84 85 const char *__elfN(kerneltype) = "elf kernel"; 86 const char *__elfN(moduletype) = "elf module"; 87 88 u_int64_t __elfN(relocation_offset) = 0; 89 90 static int 91 __elfN(load_elf_header)(char *filename, elf_file_t ef) 92 { 93 ssize_t bytes_read; 94 Elf_Ehdr *ehdr; 95 int err; 96 97 /* 98 * Open the image, read and validate the ELF header 99 */ 100 if (filename == NULL) /* can't handle nameless */ 101 return (EFTYPE); 102 if ((ef->fd = open(filename, O_RDONLY)) == -1) 103 return (errno); 104 ef->firstpage = malloc(PAGE_SIZE); 105 if (ef->firstpage == NULL) { 106 close(ef->fd); 107 return (ENOMEM); 108 } 109 bytes_read = read(ef->fd, ef->firstpage, PAGE_SIZE); 110 ef->firstlen = (size_t)bytes_read; 111 if (bytes_read < 0 || ef->firstlen <= sizeof(Elf_Ehdr)) { 112 err = EFTYPE; /* could be EIO, but may be small file */ 113 goto error; 114 } 115 ehdr = ef->ehdr = (Elf_Ehdr *)ef->firstpage; 116 117 /* Is it ELF? */ 118 if (!IS_ELF(*ehdr)) { 119 err = EFTYPE; 120 goto error; 121 } 122 123 if (ehdr->e_ident[EI_CLASS] != ELF_TARG_CLASS || /* Layout ? */ 124 ehdr->e_ident[EI_DATA] != ELF_TARG_DATA || 125 ehdr->e_ident[EI_VERSION] != EV_CURRENT) /* Version ? */ { 126 err = EFTYPE; 127 goto error; 128 } 129 130 /* 131 * Fixup ELF endianness. 132 * 133 * The Xhdr structure was loaded using block read call to 134 * optimize file accesses. It might happen, that the endianness 135 * of the system memory is different that endianness of 136 * the ELF header. 137 * Swap fields here to guarantee that Xhdr always contain 138 * valid data regardless of architecture. 139 */ 140 if (ehdr->e_ident[EI_DATA] == ELFDATA2MSB) { 141 ehdr->e_type = be16toh(ehdr->e_type); 142 ehdr->e_machine = be16toh(ehdr->e_machine); 143 ehdr->e_version = be32toh(ehdr->e_version); 144 if (ehdr->e_ident[EI_CLASS] == ELFCLASS64) { 145 ehdr->e_entry = be64toh(ehdr->e_entry); 146 ehdr->e_phoff = be64toh(ehdr->e_phoff); 147 ehdr->e_shoff = be64toh(ehdr->e_shoff); 148 } else { 149 ehdr->e_entry = be32toh(ehdr->e_entry); 150 ehdr->e_phoff = be32toh(ehdr->e_phoff); 151 ehdr->e_shoff = be32toh(ehdr->e_shoff); 152 } 153 ehdr->e_flags = be32toh(ehdr->e_flags); 154 ehdr->e_ehsize = be16toh(ehdr->e_ehsize); 155 ehdr->e_phentsize = be16toh(ehdr->e_phentsize); 156 ehdr->e_phnum = be16toh(ehdr->e_phnum); 157 ehdr->e_shentsize = be16toh(ehdr->e_shentsize); 158 ehdr->e_shnum = be16toh(ehdr->e_shnum); 159 ehdr->e_shstrndx = be16toh(ehdr->e_shstrndx); 160 161 } else { 162 ehdr->e_type = le16toh(ehdr->e_type); 163 ehdr->e_machine = le16toh(ehdr->e_machine); 164 ehdr->e_version = le32toh(ehdr->e_version); 165 if (ehdr->e_ident[EI_CLASS] == ELFCLASS64) { 166 ehdr->e_entry = le64toh(ehdr->e_entry); 167 ehdr->e_phoff = le64toh(ehdr->e_phoff); 168 ehdr->e_shoff = le64toh(ehdr->e_shoff); 169 } else { 170 ehdr->e_entry = le32toh(ehdr->e_entry); 171 ehdr->e_phoff = le32toh(ehdr->e_phoff); 172 ehdr->e_shoff = le32toh(ehdr->e_shoff); 173 } 174 ehdr->e_flags = le32toh(ehdr->e_flags); 175 ehdr->e_ehsize = le16toh(ehdr->e_ehsize); 176 ehdr->e_phentsize = le16toh(ehdr->e_phentsize); 177 ehdr->e_phnum = le16toh(ehdr->e_phnum); 178 ehdr->e_shentsize = le16toh(ehdr->e_shentsize); 179 ehdr->e_shnum = le16toh(ehdr->e_shnum); 180 ehdr->e_shstrndx = le16toh(ehdr->e_shstrndx); 181 } 182 183 if (ehdr->e_version != EV_CURRENT || ehdr->e_machine != ELF_TARG_MACH) { /* Machine ? */ 184 err = EFTYPE; 185 goto error; 186 } 187 188 return (0); 189 190 error: 191 if (ef->firstpage != NULL) { 192 free(ef->firstpage); 193 ef->firstpage = NULL; 194 } 195 if (ef->fd != -1) { 196 close(ef->fd); 197 ef->fd = -1; 198 } 199 return (err); 200 } 201 202 /* 203 * Attempt to load the file (file) as an ELF module. It will be stored at 204 * (dest), and a pointer to a module structure describing the loaded object 205 * will be saved in (result). 206 */ 207 int 208 __elfN(loadfile)(char *filename, u_int64_t dest, struct preloaded_file **result) 209 { 210 return (__elfN(loadfile_raw)(filename, dest, result, 0)); 211 } 212 213 int 214 __elfN(loadfile_raw)(char *filename, u_int64_t dest, 215 struct preloaded_file **result, int multiboot) 216 { 217 struct preloaded_file *fp, *kfp; 218 struct elf_file ef; 219 Elf_Ehdr *ehdr; 220 int err; 221 222 fp = NULL; 223 bzero(&ef, sizeof(struct elf_file)); 224 ef.fd = -1; 225 226 err = __elfN(load_elf_header)(filename, &ef); 227 if (err != 0) 228 return (err); 229 230 ehdr = ef.ehdr; 231 232 /* 233 * Check to see what sort of module we are. 234 */ 235 kfp = file_findfile(NULL, __elfN(kerneltype)); 236 #ifdef __powerpc__ 237 /* 238 * Kernels can be ET_DYN, so just assume the first loaded object is the 239 * kernel. This assumption will be checked later. 240 */ 241 if (kfp == NULL) 242 ef.kernel = 1; 243 #endif 244 if (ef.kernel || ehdr->e_type == ET_EXEC) { 245 /* Looks like a kernel */ 246 if (kfp != NULL) { 247 printf("elf" __XSTRING(__ELF_WORD_SIZE) "_loadfile: kernel already loaded\n"); 248 err = EPERM; 249 goto oerr; 250 } 251 /* 252 * Calculate destination address based on kernel entrypoint. 253 * 254 * For ARM, the destination address is independent of any values in the 255 * elf header (an ARM kernel can be loaded at any 2MB boundary), so we 256 * leave dest set to the value calculated by archsw.arch_loadaddr() and 257 * passed in to this function. 258 */ 259 #ifndef __arm__ 260 if (ehdr->e_type == ET_EXEC) 261 dest = (ehdr->e_entry & ~PAGE_MASK); 262 #endif 263 if ((ehdr->e_entry & ~PAGE_MASK) == 0) { 264 printf("elf" __XSTRING(__ELF_WORD_SIZE) "_loadfile: not a kernel (maybe static binary?)\n"); 265 err = EPERM; 266 goto oerr; 267 } 268 ef.kernel = 1; 269 270 } else if (ehdr->e_type == ET_DYN) { 271 /* Looks like a kld module */ 272 if (multiboot != 0) { 273 printf("elf" __XSTRING(__ELF_WORD_SIZE) "_loadfile: can't load module as multiboot\n"); 274 err = EPERM; 275 goto oerr; 276 } 277 if (kfp == NULL) { 278 printf("elf" __XSTRING(__ELF_WORD_SIZE) "_loadfile: can't load module before kernel\n"); 279 err = EPERM; 280 goto oerr; 281 } 282 if (strcmp(__elfN(kerneltype), kfp->f_type)) { 283 printf("elf" __XSTRING(__ELF_WORD_SIZE) "_loadfile: can't load module with kernel type '%s'\n", kfp->f_type); 284 err = EPERM; 285 goto oerr; 286 } 287 /* Looks OK, got ahead */ 288 ef.kernel = 0; 289 290 } else { 291 err = EFTYPE; 292 goto oerr; 293 } 294 295 if (archsw.arch_loadaddr != NULL) 296 dest = archsw.arch_loadaddr(LOAD_ELF, ehdr, dest); 297 else 298 dest = roundup(dest, PAGE_SIZE); 299 300 /* 301 * Ok, we think we should handle this. 302 */ 303 fp = file_alloc(); 304 if (fp == NULL) { 305 printf("elf" __XSTRING(__ELF_WORD_SIZE) "_loadfile: cannot allocate module info\n"); 306 err = EPERM; 307 goto out; 308 } 309 if (ef.kernel == 1 && multiboot == 0) 310 setenv("kernelname", filename, 1); 311 fp->f_name = strdup(filename); 312 if (multiboot == 0) 313 fp->f_type = strdup(ef.kernel ? 314 __elfN(kerneltype) : __elfN(moduletype)); 315 else 316 fp->f_type = strdup("elf multiboot kernel"); 317 318 #ifdef ELF_VERBOSE 319 if (ef.kernel) 320 printf("%s entry at 0x%jx\n", filename, (uintmax_t)ehdr->e_entry); 321 #else 322 printf("%s ", filename); 323 #endif 324 325 fp->f_size = __elfN(loadimage)(fp, &ef, dest); 326 if (fp->f_size == 0 || fp->f_addr == 0) 327 goto ioerr; 328 329 /* save exec header as metadata */ 330 file_addmetadata(fp, MODINFOMD_ELFHDR, sizeof(*ehdr), ehdr); 331 332 /* Load OK, return module pointer */ 333 *result = (struct preloaded_file *)fp; 334 err = 0; 335 goto out; 336 337 ioerr: 338 err = EIO; 339 oerr: 340 file_discard(fp); 341 out: 342 if (ef.firstpage) 343 free(ef.firstpage); 344 if (ef.fd != -1) 345 close(ef.fd); 346 return(err); 347 } 348 349 /* 350 * With the file (fd) open on the image, and (ehdr) containing 351 * the Elf header, load the image at (off) 352 */ 353 static int 354 __elfN(loadimage)(struct preloaded_file *fp, elf_file_t ef, u_int64_t off) 355 { 356 int i; 357 u_int j; 358 Elf_Ehdr *ehdr; 359 Elf_Phdr *phdr, *php; 360 Elf_Shdr *shdr; 361 char *shstr; 362 int ret; 363 vm_offset_t firstaddr; 364 vm_offset_t lastaddr; 365 size_t chunk; 366 ssize_t result; 367 Elf_Addr ssym, esym; 368 Elf_Dyn *dp; 369 Elf_Addr adp; 370 Elf_Addr ctors; 371 int ndp; 372 int symstrindex; 373 int symtabindex; 374 Elf_Size size; 375 u_int fpcopy; 376 Elf_Sym sym; 377 Elf_Addr p_start, p_end; 378 #if __ELF_WORD_SIZE == 64 379 uint64_t scr_ssym; 380 uint64_t scr_esym; 381 uint64_t scr; 382 #else 383 uint32_t scr_ssym; 384 uint32_t scr_esym; 385 uint32_t scr; 386 #endif 387 388 dp = NULL; 389 shdr = NULL; 390 ret = 0; 391 firstaddr = lastaddr = 0; 392 ehdr = ef->ehdr; 393 if (ehdr->e_type == ET_EXEC) { 394 #if defined(__i386__) || defined(__amd64__) 395 #if __ELF_WORD_SIZE == 64 396 off = - (off & 0xffffffffff000000ull);/* x86_64 relocates after locore */ 397 #else 398 off = - (off & 0xff000000u); /* i386 relocates after locore */ 399 #endif 400 #elif defined(__powerpc__) 401 /* 402 * On the purely virtual memory machines like e500, the kernel is 403 * linked against its final VA range, which is most often not 404 * available at the loader stage, but only after kernel initializes 405 * and completes its VM settings. In such cases we cannot use p_vaddr 406 * field directly to load ELF segments, but put them at some 407 * 'load-time' locations. 408 */ 409 if (off & 0xf0000000u) { 410 off = -(off & 0xf0000000u); 411 /* 412 * XXX the physical load address should not be hardcoded. Note 413 * that the Book-E kernel assumes that it's loaded at a 16MB 414 * boundary for now... 415 */ 416 off += 0x01000000; 417 ehdr->e_entry += off; 418 #ifdef ELF_VERBOSE 419 printf("Converted entry 0x%08x\n", ehdr->e_entry); 420 #endif 421 } else 422 off = 0; 423 #elif defined(__arm__) && !defined(EFI) 424 /* 425 * The elf headers in arm kernels specify virtual addresses in all 426 * header fields, even the ones that should be physical addresses. 427 * We assume the entry point is in the first page, and masking the page 428 * offset will leave us with the virtual address the kernel was linked 429 * at. We subtract that from the load offset, making 'off' into the 430 * value which, when added to a virtual address in an elf header, 431 * translates it to a physical address. We do the va->pa conversion on 432 * the entry point address in the header now, so that later we can 433 * launch the kernel by just jumping to that address. 434 * 435 * When booting from UEFI the copyin and copyout functions handle 436 * adjusting the location relative to the first virtual address. 437 * Because of this there is no need to adjust the offset or entry 438 * point address as these will both be handled by the efi code. 439 */ 440 off -= ehdr->e_entry & ~PAGE_MASK; 441 ehdr->e_entry += off; 442 #ifdef ELF_VERBOSE 443 printf("ehdr->e_entry 0x%08x, va<->pa off %llx\n", ehdr->e_entry, off); 444 #endif 445 #else 446 off = 0; /* other archs use direct mapped kernels */ 447 #endif 448 } 449 ef->off = off; 450 451 if (ef->kernel) 452 __elfN(relocation_offset) = off; 453 454 if ((ehdr->e_phoff + ehdr->e_phnum * sizeof(*phdr)) > ef->firstlen) { 455 printf("elf" __XSTRING(__ELF_WORD_SIZE) "_loadimage: program header not within first page\n"); 456 goto out; 457 } 458 phdr = (Elf_Phdr *)(ef->firstpage + ehdr->e_phoff); 459 460 for (i = 0; i < ehdr->e_phnum; i++) { 461 /* 462 * Fixup ELF endianness. 463 * 464 * The Xhdr structure was loaded using block read call to 465 * optimize file accesses. It might happen, that the endianness 466 * of the system memory is different that endianness of 467 * the ELF header. 468 * Swap fields here to guarantee that Xhdr always contain 469 * valid data regardless of architecture. 470 */ 471 if (ehdr->e_ident[EI_DATA] == ELFDATA2MSB) { 472 phdr[i].p_type = be32toh(phdr[i].p_type); 473 phdr[i].p_flags = be32toh(phdr[i].p_flags); 474 if (ehdr->e_ident[EI_CLASS] == ELFCLASS64) { 475 phdr[i].p_offset = be64toh(phdr[i].p_offset); 476 phdr[i].p_vaddr = be64toh(phdr[i].p_vaddr); 477 phdr[i].p_paddr = be64toh(phdr[i].p_paddr); 478 phdr[i].p_filesz = be64toh(phdr[i].p_filesz); 479 phdr[i].p_memsz = be64toh(phdr[i].p_memsz); 480 phdr[i].p_align = be64toh(phdr[i].p_align); 481 } else { 482 phdr[i].p_offset = be32toh(phdr[i].p_offset); 483 phdr[i].p_vaddr = be32toh(phdr[i].p_vaddr); 484 phdr[i].p_paddr = be32toh(phdr[i].p_paddr); 485 phdr[i].p_filesz = be32toh(phdr[i].p_filesz); 486 phdr[i].p_memsz = be32toh(phdr[i].p_memsz); 487 phdr[i].p_align = be32toh(phdr[i].p_align); 488 } 489 } else { 490 phdr[i].p_type = le32toh(phdr[i].p_type); 491 phdr[i].p_flags = le32toh(phdr[i].p_flags); 492 if (ehdr->e_ident[EI_CLASS] == ELFCLASS64) { 493 phdr[i].p_offset = le64toh(phdr[i].p_offset); 494 phdr[i].p_vaddr = le64toh(phdr[i].p_vaddr); 495 phdr[i].p_paddr = le64toh(phdr[i].p_paddr); 496 phdr[i].p_filesz = le64toh(phdr[i].p_filesz); 497 phdr[i].p_memsz = le64toh(phdr[i].p_memsz); 498 phdr[i].p_align = le64toh(phdr[i].p_align); 499 } else { 500 phdr[i].p_offset = le32toh(phdr[i].p_offset); 501 phdr[i].p_vaddr = le32toh(phdr[i].p_vaddr); 502 phdr[i].p_paddr = le32toh(phdr[i].p_paddr); 503 phdr[i].p_filesz = le32toh(phdr[i].p_filesz); 504 phdr[i].p_memsz = le32toh(phdr[i].p_memsz); 505 phdr[i].p_align = le32toh(phdr[i].p_align); 506 } 507 } 508 509 /* We want to load PT_LOAD segments only.. */ 510 if (phdr[i].p_type != PT_LOAD) 511 continue; 512 513 #ifdef ELF_VERBOSE 514 printf("Segment: 0x%lx@0x%lx -> 0x%lx-0x%lx", 515 (long)phdr[i].p_filesz, (long)phdr[i].p_offset, 516 (long)(phdr[i].p_vaddr + off), 517 (long)(phdr[i].p_vaddr + off + phdr[i].p_memsz - 1)); 518 #else 519 if ((phdr[i].p_flags & PF_W) == 0) { 520 printf("text=0x%lx ", (long)phdr[i].p_filesz); 521 } else { 522 printf("data=0x%lx", (long)phdr[i].p_filesz); 523 if (phdr[i].p_filesz < phdr[i].p_memsz) 524 printf("+0x%lx", (long)(phdr[i].p_memsz -phdr[i].p_filesz)); 525 printf(" "); 526 } 527 #endif 528 fpcopy = 0; 529 if (ef->firstlen > phdr[i].p_offset) { 530 fpcopy = ef->firstlen - phdr[i].p_offset; 531 archsw.arch_copyin(ef->firstpage + phdr[i].p_offset, 532 phdr[i].p_vaddr + off, fpcopy); 533 } 534 if (phdr[i].p_filesz > fpcopy) { 535 if (kern_pread(ef->fd, phdr[i].p_vaddr + off + fpcopy, 536 phdr[i].p_filesz - fpcopy, phdr[i].p_offset + fpcopy) != 0) { 537 printf("\nelf" __XSTRING(__ELF_WORD_SIZE) 538 "_loadimage: read failed\n"); 539 goto out; 540 } 541 } 542 /* clear space from oversized segments; eg: bss */ 543 if (phdr[i].p_filesz < phdr[i].p_memsz) { 544 #ifdef ELF_VERBOSE 545 printf(" (bss: 0x%lx-0x%lx)", 546 (long)(phdr[i].p_vaddr + off + phdr[i].p_filesz), 547 (long)(phdr[i].p_vaddr + off + phdr[i].p_memsz - 1)); 548 #endif 549 550 kern_bzero(phdr[i].p_vaddr + off + phdr[i].p_filesz, 551 phdr[i].p_memsz - phdr[i].p_filesz); 552 } 553 #ifdef ELF_VERBOSE 554 printf("\n"); 555 #endif 556 557 if (archsw.arch_loadseg != NULL) 558 archsw.arch_loadseg(ehdr, phdr + i, off); 559 560 if (firstaddr == 0 || firstaddr > (phdr[i].p_vaddr + off)) 561 firstaddr = phdr[i].p_vaddr + off; 562 if (lastaddr == 0 || lastaddr < (phdr[i].p_vaddr + off + phdr[i].p_memsz)) 563 lastaddr = phdr[i].p_vaddr + off + phdr[i].p_memsz; 564 } 565 lastaddr = roundup(lastaddr, sizeof(long)); 566 567 /* 568 * Get the section headers. We need this for finding the .ctors 569 * section as well as for loading any symbols. Both may be hard 570 * to do if reading from a .gz file as it involves seeking. I 571 * think the rule is going to have to be that you must strip a 572 * file to remove symbols before gzipping it. 573 */ 574 chunk = (size_t)ehdr->e_shnum * (size_t)ehdr->e_shentsize; 575 if (chunk == 0 || ehdr->e_shoff == 0) 576 goto nosyms; 577 shdr = alloc_pread(ef->fd, ehdr->e_shoff, chunk); 578 if (shdr == NULL) { 579 printf("\nelf" __XSTRING(__ELF_WORD_SIZE) 580 "_loadimage: failed to read section headers"); 581 goto nosyms; 582 } 583 584 /* 585 * Fixup ELF endianness. 586 * 587 * The Xhdr structure was loaded using block read call to 588 * optimize file accesses. It might happen, that the endianness 589 * of the system memory is different that endianness of 590 * the ELF header. 591 * Swap fields here to guarantee that Xhdr always contain 592 * valid data regardless of architecture. 593 */ 594 for (i = 0; i < ehdr->e_shnum; i++) { 595 if (ehdr->e_ident[EI_DATA] == ELFDATA2MSB) { 596 shdr[i].sh_name = be32toh(shdr[i].sh_name); 597 shdr[i].sh_type = be32toh(shdr[i].sh_type); 598 shdr[i].sh_link = be32toh(shdr[i].sh_link); 599 shdr[i].sh_info = be32toh(shdr[i].sh_info); 600 if (ehdr->e_ident[EI_CLASS] == ELFCLASS64) { 601 shdr[i].sh_flags = be64toh(shdr[i].sh_flags); 602 shdr[i].sh_addr = be64toh(shdr[i].sh_addr); 603 shdr[i].sh_offset = be64toh(shdr[i].sh_offset); 604 shdr[i].sh_size = be64toh(shdr[i].sh_size); 605 shdr[i].sh_addralign = be64toh(shdr[i].sh_addralign); 606 shdr[i].sh_entsize = be64toh(shdr[i].sh_entsize); 607 } else { 608 shdr[i].sh_flags = be32toh(shdr[i].sh_flags); 609 shdr[i].sh_addr = be32toh(shdr[i].sh_addr); 610 shdr[i].sh_offset = be32toh(shdr[i].sh_offset); 611 shdr[i].sh_size = be32toh(shdr[i].sh_size); 612 shdr[i].sh_addralign = be32toh(shdr[i].sh_addralign); 613 shdr[i].sh_entsize = be32toh(shdr[i].sh_entsize); 614 } 615 } else { 616 shdr[i].sh_name = le32toh(shdr[i].sh_name); 617 shdr[i].sh_type = le32toh(shdr[i].sh_type); 618 shdr[i].sh_link = le32toh(shdr[i].sh_link); 619 shdr[i].sh_info = le32toh(shdr[i].sh_info); 620 if (ehdr->e_ident[EI_CLASS] == ELFCLASS64) { 621 shdr[i].sh_flags = le64toh(shdr[i].sh_flags); 622 shdr[i].sh_addr = le64toh(shdr[i].sh_addr); 623 shdr[i].sh_offset = le64toh(shdr[i].sh_offset); 624 shdr[i].sh_size = le64toh(shdr[i].sh_size); 625 shdr[i].sh_addralign = le64toh(shdr[i].sh_addralign); 626 shdr[i].sh_entsize = le64toh(shdr[i].sh_entsize); 627 } else { 628 shdr[i].sh_flags = le32toh(shdr[i].sh_flags); 629 shdr[i].sh_addr = le32toh(shdr[i].sh_addr); 630 shdr[i].sh_offset = le32toh(shdr[i].sh_offset); 631 shdr[i].sh_size = le32toh(shdr[i].sh_size); 632 shdr[i].sh_addralign = le32toh(shdr[i].sh_addralign); 633 shdr[i].sh_entsize = le32toh(shdr[i].sh_entsize); 634 } 635 } 636 } 637 file_addmetadata(fp, MODINFOMD_SHDR, chunk, shdr); 638 639 /* 640 * Read the section string table and look for the .ctors section. 641 * We need to tell the kernel where it is so that it can call the 642 * ctors. 643 */ 644 chunk = shdr[ehdr->e_shstrndx].sh_size; 645 if (chunk) { 646 shstr = alloc_pread(ef->fd, shdr[ehdr->e_shstrndx].sh_offset, chunk); 647 if (shstr) { 648 for (i = 0; i < ehdr->e_shnum; i++) { 649 if (strcmp(shstr + shdr[i].sh_name, ".ctors") != 0) 650 continue; 651 ctors = shdr[i].sh_addr; 652 file_addmetadata(fp, MODINFOMD_CTORS_ADDR, sizeof(ctors), 653 &ctors); 654 size = shdr[i].sh_size; 655 file_addmetadata(fp, MODINFOMD_CTORS_SIZE, sizeof(size), 656 &size); 657 break; 658 } 659 free(shstr); 660 } 661 } 662 663 /* 664 * Now load any symbols. 665 */ 666 symtabindex = -1; 667 symstrindex = -1; 668 for (i = 0; i < ehdr->e_shnum; i++) { 669 if (shdr[i].sh_type != SHT_SYMTAB) 670 continue; 671 for (j = 0; j < ehdr->e_phnum; j++) { 672 if (phdr[j].p_type != PT_LOAD) 673 continue; 674 if (shdr[i].sh_offset >= phdr[j].p_offset && 675 (shdr[i].sh_offset + shdr[i].sh_size <= 676 phdr[j].p_offset + phdr[j].p_filesz)) { 677 shdr[i].sh_offset = 0; 678 shdr[i].sh_size = 0; 679 break; 680 } 681 } 682 if (shdr[i].sh_offset == 0 || shdr[i].sh_size == 0) 683 continue; /* alread loaded in a PT_LOAD above */ 684 /* Save it for loading below */ 685 symtabindex = i; 686 symstrindex = shdr[i].sh_link; 687 } 688 if (symtabindex < 0 || symstrindex < 0) 689 goto nosyms; 690 691 /* Ok, committed to a load. */ 692 #ifndef ELF_VERBOSE 693 printf("syms=["); 694 #endif 695 ssym = lastaddr; 696 for (i = symtabindex; i >= 0; i = symstrindex) { 697 #ifdef ELF_VERBOSE 698 char *secname; 699 700 switch(shdr[i].sh_type) { 701 case SHT_SYMTAB: /* Symbol table */ 702 secname = "symtab"; 703 break; 704 case SHT_STRTAB: /* String table */ 705 secname = "strtab"; 706 break; 707 default: 708 secname = "WHOA!!"; 709 break; 710 } 711 #endif 712 size = shdr[i].sh_size; 713 #if defined(__powerpc__) 714 #if __ELF_WORD_SIZE == 64 715 scr = htobe64(size); 716 #else 717 scr = htobe32(size); 718 #endif 719 #else 720 scr = size; 721 #endif 722 archsw.arch_copyin(&scr, lastaddr, sizeof(scr)); 723 lastaddr += sizeof(scr); 724 725 #ifdef ELF_VERBOSE 726 printf("\n%s: 0x%jx@0x%jx -> 0x%jx-0x%jx", secname, 727 (uintmax_t)shdr[i].sh_size, (uintmax_t)shdr[i].sh_offset, 728 (uintmax_t)lastaddr, (uintmax_t)(lastaddr + shdr[i].sh_size)); 729 #else 730 if (i == symstrindex) 731 printf("+"); 732 printf("0x%lx+0x%lx", (long)sizeof(size), (long)size); 733 #endif 734 735 if (lseek(ef->fd, (off_t)shdr[i].sh_offset, SEEK_SET) == -1) { 736 printf("\nelf" __XSTRING(__ELF_WORD_SIZE) "_loadimage: could not seek for symbols - skipped!"); 737 lastaddr = ssym; 738 ssym = 0; 739 goto nosyms; 740 } 741 result = archsw.arch_readin(ef->fd, lastaddr, shdr[i].sh_size); 742 if (result < 0 || (size_t)result != shdr[i].sh_size) { 743 printf("\nelf" __XSTRING(__ELF_WORD_SIZE) "_loadimage: could not read symbols - skipped! (%ju != %ju)", (uintmax_t)result, 744 (uintmax_t)shdr[i].sh_size); 745 lastaddr = ssym; 746 ssym = 0; 747 goto nosyms; 748 } 749 /* Reset offsets relative to ssym */ 750 lastaddr += shdr[i].sh_size; 751 lastaddr = roundup(lastaddr, sizeof(size)); 752 if (i == symtabindex) 753 symtabindex = -1; 754 else if (i == symstrindex) 755 symstrindex = -1; 756 } 757 esym = lastaddr; 758 #ifndef ELF_VERBOSE 759 printf("]"); 760 #endif 761 762 #if defined(__powerpc__) 763 /* On PowerPC we always need to provide BE data to the kernel */ 764 #if __ELF_WORD_SIZE == 64 765 scr_ssym = htobe64((uint64_t)ssym); 766 scr_esym = htobe64((uint64_t)esym); 767 #else 768 scr_ssym = htobe32((uint32_t)ssym); 769 scr_esym = htobe32((uint32_t)esym); 770 #endif 771 #else 772 scr_ssym = ssym; 773 scr_esym = esym; 774 #endif 775 776 file_addmetadata(fp, MODINFOMD_SSYM, sizeof(scr_ssym), &scr_ssym); 777 file_addmetadata(fp, MODINFOMD_ESYM, sizeof(scr_esym), &scr_esym); 778 779 nosyms: 780 printf("\n"); 781 782 ret = lastaddr - firstaddr; 783 fp->f_addr = firstaddr; 784 785 php = NULL; 786 for (i = 0; i < ehdr->e_phnum; i++) { 787 if (phdr[i].p_type == PT_DYNAMIC) { 788 php = phdr + i; 789 adp = php->p_vaddr; 790 file_addmetadata(fp, MODINFOMD_DYNAMIC, sizeof(adp), &adp); 791 break; 792 } 793 } 794 795 if (php == NULL) /* this is bad, we cannot get to symbols or _DYNAMIC */ 796 goto out; 797 798 ndp = php->p_filesz / sizeof(Elf_Dyn); 799 if (ndp == 0) 800 goto out; 801 dp = malloc(php->p_filesz); 802 if (dp == NULL) 803 goto out; 804 archsw.arch_copyout(php->p_vaddr + off, dp, php->p_filesz); 805 806 ef->strsz = 0; 807 for (i = 0; i < ndp; i++) { 808 if (dp[i].d_tag == 0) 809 break; 810 switch (dp[i].d_tag) { 811 case DT_HASH: 812 ef->hashtab = (Elf_Hashelt*)(uintptr_t)(dp[i].d_un.d_ptr + off); 813 break; 814 case DT_STRTAB: 815 ef->strtab = (char *)(uintptr_t)(dp[i].d_un.d_ptr + off); 816 break; 817 case DT_STRSZ: 818 ef->strsz = dp[i].d_un.d_val; 819 break; 820 case DT_SYMTAB: 821 ef->symtab = (Elf_Sym*)(uintptr_t)(dp[i].d_un.d_ptr + off); 822 break; 823 case DT_REL: 824 ef->rel = (Elf_Rel *)(uintptr_t)(dp[i].d_un.d_ptr + off); 825 break; 826 case DT_RELSZ: 827 ef->relsz = dp[i].d_un.d_val; 828 break; 829 case DT_RELA: 830 ef->rela = (Elf_Rela *)(uintptr_t)(dp[i].d_un.d_ptr + off); 831 break; 832 case DT_RELASZ: 833 ef->relasz = dp[i].d_un.d_val; 834 break; 835 default: 836 break; 837 } 838 } 839 if (ef->hashtab == NULL || ef->symtab == NULL || 840 ef->strtab == NULL || ef->strsz == 0) 841 goto out; 842 COPYOUT(ef->hashtab, &ef->nbuckets, sizeof(ef->nbuckets)); 843 COPYOUT(ef->hashtab + 1, &ef->nchains, sizeof(ef->nchains)); 844 ef->buckets = ef->hashtab + 2; 845 ef->chains = ef->buckets + ef->nbuckets; 846 847 if (__elfN(lookup_symbol)(fp, ef, "__start_set_modmetadata_set", &sym) != 0) 848 return 0; 849 p_start = sym.st_value + ef->off; 850 if (__elfN(lookup_symbol)(fp, ef, "__stop_set_modmetadata_set", &sym) != 0) 851 return ENOENT; 852 p_end = sym.st_value + ef->off; 853 854 if (__elfN(parse_modmetadata)(fp, ef, p_start, p_end) == 0) 855 goto out; 856 857 if (ef->kernel) /* kernel must not depend on anything */ 858 goto out; 859 860 out: 861 if (dp) 862 free(dp); 863 if (shdr) 864 free(shdr); 865 return ret; 866 } 867 868 static char invalid_name[] = "bad"; 869 870 char * 871 fake_modname(const char *name) 872 { 873 const char *sp, *ep; 874 char *fp; 875 size_t len; 876 877 sp = strrchr(name, '/'); 878 if (sp) 879 sp++; 880 else 881 sp = name; 882 ep = strrchr(name, '.'); 883 if (ep) { 884 if (ep == name) { 885 sp = invalid_name; 886 ep = invalid_name + sizeof(invalid_name) - 1; 887 } 888 } else 889 ep = name + strlen(name); 890 len = ep - sp; 891 fp = malloc(len + 1); 892 if (fp == NULL) 893 return NULL; 894 memcpy(fp, sp, len); 895 fp[len] = '\0'; 896 return fp; 897 } 898 899 #if (defined(__i386__) || defined(__powerpc__)) && __ELF_WORD_SIZE == 64 900 struct mod_metadata64 { 901 int md_version; /* structure version MDTV_* */ 902 int md_type; /* type of entry MDT_* */ 903 u_int64_t md_data; /* specific data */ 904 u_int64_t md_cval; /* common string label */ 905 }; 906 #endif 907 #if defined(__amd64__) && __ELF_WORD_SIZE == 32 908 struct mod_metadata32 { 909 int md_version; /* structure version MDTV_* */ 910 int md_type; /* type of entry MDT_* */ 911 u_int32_t md_data; /* specific data */ 912 u_int32_t md_cval; /* common string label */ 913 }; 914 #endif 915 916 int 917 __elfN(load_modmetadata)(struct preloaded_file *fp, u_int64_t dest) 918 { 919 struct elf_file ef; 920 int err, i, j; 921 Elf_Shdr *sh_meta, *shdr = NULL; 922 Elf_Shdr *sh_data[2]; 923 char *shstrtab = NULL; 924 size_t size; 925 Elf_Addr p_start, p_end; 926 927 bzero(&ef, sizeof(struct elf_file)); 928 ef.fd = -1; 929 930 err = __elfN(load_elf_header)(fp->f_name, &ef); 931 if (err != 0) 932 goto out; 933 934 if (ef.kernel == 1 || ef.ehdr->e_type == ET_EXEC) { 935 ef.kernel = 1; 936 } else if (ef.ehdr->e_type != ET_DYN) { 937 err = EFTYPE; 938 goto out; 939 } 940 941 size = (size_t)ef.ehdr->e_shnum * (size_t)ef.ehdr->e_shentsize; 942 shdr = alloc_pread(ef.fd, ef.ehdr->e_shoff, size); 943 if (shdr == NULL) { 944 err = ENOMEM; 945 goto out; 946 } 947 948 /* Load shstrtab. */ 949 shstrtab = alloc_pread(ef.fd, shdr[ef.ehdr->e_shstrndx].sh_offset, 950 shdr[ef.ehdr->e_shstrndx].sh_size); 951 if (shstrtab == NULL) { 952 printf("\nelf" __XSTRING(__ELF_WORD_SIZE) 953 "load_modmetadata: unable to load shstrtab\n"); 954 err = EFTYPE; 955 goto out; 956 } 957 958 /* Find set_modmetadata_set and data sections. */ 959 sh_data[0] = sh_data[1] = sh_meta = NULL; 960 for (i = 0, j = 0; i < ef.ehdr->e_shnum; i++) { 961 if (strcmp(&shstrtab[shdr[i].sh_name], 962 "set_modmetadata_set") == 0) { 963 sh_meta = &shdr[i]; 964 } 965 if ((strcmp(&shstrtab[shdr[i].sh_name], ".data") == 0) || 966 (strcmp(&shstrtab[shdr[i].sh_name], ".rodata") == 0)) { 967 sh_data[j++] = &shdr[i]; 968 } 969 } 970 if (sh_meta == NULL || sh_data[0] == NULL || sh_data[1] == NULL) { 971 printf("\nelf" __XSTRING(__ELF_WORD_SIZE) 972 "load_modmetadata: unable to find set_modmetadata_set or data sections\n"); 973 err = EFTYPE; 974 goto out; 975 } 976 977 /* Load set_modmetadata_set into memory */ 978 err = kern_pread(ef.fd, dest, sh_meta->sh_size, sh_meta->sh_offset); 979 if (err != 0) { 980 printf("\nelf" __XSTRING(__ELF_WORD_SIZE) 981 "load_modmetadata: unable to load set_modmetadata_set: %d\n", err); 982 goto out; 983 } 984 p_start = dest; 985 p_end = dest + sh_meta->sh_size; 986 dest += sh_meta->sh_size; 987 988 /* Load data sections into memory. */ 989 err = kern_pread(ef.fd, dest, sh_data[0]->sh_size, 990 sh_data[0]->sh_offset); 991 if (err != 0) { 992 printf("\nelf" __XSTRING(__ELF_WORD_SIZE) 993 "load_modmetadata: unable to load data: %d\n", err); 994 goto out; 995 } 996 997 /* 998 * We have to increment the dest, so that the offset is the same into 999 * both the .rodata and .data sections. 1000 */ 1001 ef.off = -(sh_data[0]->sh_addr - dest); 1002 dest += (sh_data[1]->sh_addr - sh_data[0]->sh_addr); 1003 1004 err = kern_pread(ef.fd, dest, sh_data[1]->sh_size, 1005 sh_data[1]->sh_offset); 1006 if (err != 0) { 1007 printf("\nelf" __XSTRING(__ELF_WORD_SIZE) 1008 "load_modmetadata: unable to load data: %d\n", err); 1009 goto out; 1010 } 1011 1012 err = __elfN(parse_modmetadata)(fp, &ef, p_start, p_end); 1013 if (err != 0) { 1014 printf("\nelf" __XSTRING(__ELF_WORD_SIZE) 1015 "load_modmetadata: unable to parse metadata: %d\n", err); 1016 goto out; 1017 } 1018 1019 out: 1020 if (shstrtab != NULL) 1021 free(shstrtab); 1022 if (shdr != NULL) 1023 free(shdr); 1024 if (ef.firstpage != NULL) 1025 free(ef.firstpage); 1026 if (ef.fd != -1) 1027 close(ef.fd); 1028 return (err); 1029 } 1030 1031 int 1032 __elfN(parse_modmetadata)(struct preloaded_file *fp, elf_file_t ef, 1033 Elf_Addr p_start, Elf_Addr p_end) 1034 { 1035 struct mod_metadata md; 1036 #if (defined(__i386__) || defined(__powerpc__)) && __ELF_WORD_SIZE == 64 1037 struct mod_metadata64 md64; 1038 #elif defined(__amd64__) && __ELF_WORD_SIZE == 32 1039 struct mod_metadata32 md32; 1040 #endif 1041 struct mod_depend *mdepend; 1042 struct mod_version mver; 1043 char *s; 1044 int error, modcnt, minfolen; 1045 Elf_Addr v, p; 1046 1047 modcnt = 0; 1048 p = p_start; 1049 while (p < p_end) { 1050 COPYOUT(p, &v, sizeof(v)); 1051 error = __elfN(reloc_ptr)(fp, ef, p, &v, sizeof(v)); 1052 if (error == EOPNOTSUPP) 1053 v += ef->off; 1054 else if (error != 0) 1055 return (error); 1056 #if (defined(__i386__) || defined(__powerpc__)) && __ELF_WORD_SIZE == 64 1057 COPYOUT(v, &md64, sizeof(md64)); 1058 error = __elfN(reloc_ptr)(fp, ef, v, &md64, sizeof(md64)); 1059 if (error == EOPNOTSUPP) { 1060 md64.md_cval += ef->off; 1061 md64.md_data += ef->off; 1062 } else if (error != 0) 1063 return (error); 1064 md.md_version = md64.md_version; 1065 md.md_type = md64.md_type; 1066 md.md_cval = (const char *)(uintptr_t)md64.md_cval; 1067 md.md_data = (void *)(uintptr_t)md64.md_data; 1068 #elif defined(__amd64__) && __ELF_WORD_SIZE == 32 1069 COPYOUT(v, &md32, sizeof(md32)); 1070 error = __elfN(reloc_ptr)(fp, ef, v, &md32, sizeof(md32)); 1071 if (error == EOPNOTSUPP) { 1072 md32.md_cval += ef->off; 1073 md32.md_data += ef->off; 1074 } else if (error != 0) 1075 return (error); 1076 md.md_version = md32.md_version; 1077 md.md_type = md32.md_type; 1078 md.md_cval = (const char *)(uintptr_t)md32.md_cval; 1079 md.md_data = (void *)(uintptr_t)md32.md_data; 1080 #else 1081 COPYOUT(v, &md, sizeof(md)); 1082 error = __elfN(reloc_ptr)(fp, ef, v, &md, sizeof(md)); 1083 if (error == EOPNOTSUPP) { 1084 md.md_cval += ef->off; 1085 md.md_data = (void *)((uintptr_t)md.md_data + (uintptr_t)ef->off); 1086 } else if (error != 0) 1087 return (error); 1088 #endif 1089 p += sizeof(Elf_Addr); 1090 switch(md.md_type) { 1091 case MDT_DEPEND: 1092 if (ef->kernel) /* kernel must not depend on anything */ 1093 break; 1094 s = strdupout((vm_offset_t)md.md_cval); 1095 minfolen = sizeof(*mdepend) + strlen(s) + 1; 1096 mdepend = malloc(minfolen); 1097 if (mdepend == NULL) 1098 return ENOMEM; 1099 COPYOUT((vm_offset_t)md.md_data, mdepend, sizeof(*mdepend)); 1100 strcpy((char*)(mdepend + 1), s); 1101 free(s); 1102 file_addmetadata(fp, MODINFOMD_DEPLIST, minfolen, mdepend); 1103 free(mdepend); 1104 break; 1105 case MDT_VERSION: 1106 s = strdupout((vm_offset_t)md.md_cval); 1107 COPYOUT((vm_offset_t)md.md_data, &mver, sizeof(mver)); 1108 file_addmodule(fp, s, mver.mv_version, NULL); 1109 free(s); 1110 modcnt++; 1111 break; 1112 } 1113 } 1114 if (modcnt == 0) { 1115 s = fake_modname(fp->f_name); 1116 file_addmodule(fp, s, 1, NULL); 1117 free(s); 1118 } 1119 return 0; 1120 } 1121 1122 static unsigned long 1123 elf_hash(const char *name) 1124 { 1125 const unsigned char *p = (const unsigned char *) name; 1126 unsigned long h = 0; 1127 unsigned long g; 1128 1129 while (*p != '\0') { 1130 h = (h << 4) + *p++; 1131 if ((g = h & 0xf0000000) != 0) 1132 h ^= g >> 24; 1133 h &= ~g; 1134 } 1135 return h; 1136 } 1137 1138 static const char __elfN(bad_symtable)[] = "elf" __XSTRING(__ELF_WORD_SIZE) "_lookup_symbol: corrupt symbol table\n"; 1139 int 1140 __elfN(lookup_symbol)(struct preloaded_file *fp, elf_file_t ef, const char* name, 1141 Elf_Sym *symp) 1142 { 1143 Elf_Hashelt symnum; 1144 Elf_Sym sym; 1145 char *strp; 1146 unsigned long hash; 1147 1148 hash = elf_hash(name); 1149 COPYOUT(&ef->buckets[hash % ef->nbuckets], &symnum, sizeof(symnum)); 1150 1151 while (symnum != STN_UNDEF) { 1152 if (symnum >= ef->nchains) { 1153 printf(__elfN(bad_symtable)); 1154 return ENOENT; 1155 } 1156 1157 COPYOUT(ef->symtab + symnum, &sym, sizeof(sym)); 1158 if (sym.st_name == 0) { 1159 printf(__elfN(bad_symtable)); 1160 return ENOENT; 1161 } 1162 1163 strp = strdupout((vm_offset_t)(ef->strtab + sym.st_name)); 1164 if (strcmp(name, strp) == 0) { 1165 free(strp); 1166 if (sym.st_shndx != SHN_UNDEF || 1167 (sym.st_value != 0 && 1168 ELF_ST_TYPE(sym.st_info) == STT_FUNC)) { 1169 *symp = sym; 1170 return 0; 1171 } 1172 return ENOENT; 1173 } 1174 free(strp); 1175 COPYOUT(&ef->chains[symnum], &symnum, sizeof(symnum)); 1176 } 1177 return ENOENT; 1178 } 1179 1180 /* 1181 * Apply any intra-module relocations to the value. p is the load address 1182 * of the value and val/len is the value to be modified. This does NOT modify 1183 * the image in-place, because this is done by kern_linker later on. 1184 * 1185 * Returns EOPNOTSUPP if no relocation method is supplied. 1186 */ 1187 static int 1188 __elfN(reloc_ptr)(struct preloaded_file *mp, elf_file_t ef, 1189 Elf_Addr p, void *val, size_t len) 1190 { 1191 size_t n; 1192 Elf_Rela a; 1193 Elf_Rel r; 1194 int error; 1195 1196 /* 1197 * The kernel is already relocated, but we still want to apply 1198 * offset adjustments. 1199 */ 1200 if (ef->kernel) 1201 return (EOPNOTSUPP); 1202 1203 for (n = 0; n < ef->relsz / sizeof(r); n++) { 1204 COPYOUT(ef->rel + n, &r, sizeof(r)); 1205 1206 error = __elfN(reloc)(ef, __elfN(symaddr), &r, ELF_RELOC_REL, 1207 ef->off, p, val, len); 1208 if (error != 0) 1209 return (error); 1210 } 1211 for (n = 0; n < ef->relasz / sizeof(a); n++) { 1212 COPYOUT(ef->rela + n, &a, sizeof(a)); 1213 1214 error = __elfN(reloc)(ef, __elfN(symaddr), &a, ELF_RELOC_RELA, 1215 ef->off, p, val, len); 1216 if (error != 0) 1217 return (error); 1218 } 1219 1220 return (0); 1221 } 1222 1223 static Elf_Addr 1224 __elfN(symaddr)(struct elf_file *ef, Elf_Size symidx) 1225 { 1226 1227 /* Symbol lookup by index not required here. */ 1228 return (0); 1229 } 1230