1 /*- 2 * Copyright (c) 1998 Michael Smith <msmith@freebsd.org> 3 * Copyright (c) 1998 Peter Wemm <peter@freebsd.org> 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/param.h> 29 #include <sys/endian.h> 30 #include <sys/exec.h> 31 #include <sys/linker.h> 32 #include <sys/module.h> 33 #include <machine/elf.h> 34 #include <stand.h> 35 36 #include "bootstrap.h" 37 #include "modinfo.h" 38 39 #define COPYOUT(s,d,l) archsw.arch_copyout((vm_offset_t)(s), d, l) 40 41 #if defined(__i386__) && __ELF_WORD_SIZE == 64 42 #undef ELF_TARG_CLASS 43 #undef ELF_TARG_MACH 44 #define ELF_TARG_CLASS ELFCLASS64 45 #define ELF_TARG_MACH EM_X86_64 46 #endif 47 48 typedef struct elf_file { 49 Elf_Phdr *ph; 50 Elf_Ehdr *ehdr; 51 Elf_Sym *symtab; 52 Elf_Hashelt *hashtab; 53 Elf_Hashelt nbuckets; 54 Elf_Hashelt nchains; 55 Elf_Hashelt *buckets; 56 Elf_Hashelt *chains; 57 Elf_Rel *rel; 58 size_t relsz; 59 Elf_Rela *rela; 60 size_t relasz; 61 char *strtab; 62 size_t strsz; 63 int fd; 64 caddr_t firstpage; 65 size_t firstlen; 66 int kernel; 67 uint64_t off; 68 #ifdef LOADER_VERIEXEC_VECTX 69 struct vectx *vctx; 70 #endif 71 } *elf_file_t; 72 73 #ifdef LOADER_VERIEXEC_VECTX 74 #define VECTX_HANDLE(ef) (ef)->vctx 75 #else 76 #define VECTX_HANDLE(ef) (ef)->fd 77 #endif 78 79 static int __elfN(loadimage)(struct preloaded_file *mp, elf_file_t ef, 80 uint64_t loadaddr); 81 static int __elfN(lookup_symbol)(elf_file_t ef, const char* name, 82 Elf_Sym *sym, unsigned char type); 83 static int __elfN(reloc_ptr)(struct preloaded_file *mp, elf_file_t ef, 84 Elf_Addr p, void *val, size_t len); 85 static int __elfN(parse_modmetadata)(struct preloaded_file *mp, elf_file_t ef, 86 Elf_Addr p_start, Elf_Addr p_end); 87 static symaddr_fn __elfN(symaddr); 88 static char *fake_modname(const char *name); 89 90 uint64_t __elfN(relocation_offset) = 0; 91 92 #ifdef __powerpc__ 93 extern void elf_wrong_field_size(void); 94 #define CONVERT_FIELD(b, f, e) \ 95 switch (sizeof((b)->f)) { \ 96 case 2: \ 97 (b)->f = e ## 16toh((b)->f); \ 98 break; \ 99 case 4: \ 100 (b)->f = e ## 32toh((b)->f); \ 101 break; \ 102 case 8: \ 103 (b)->f = e ## 64toh((b)->f); \ 104 break; \ 105 default: \ 106 /* Force a link time error. */ \ 107 elf_wrong_field_size(); \ 108 break; \ 109 } 110 111 #define CONVERT_SWITCH(h, d, f) \ 112 switch ((h)->e_ident[EI_DATA]) { \ 113 case ELFDATA2MSB: \ 114 f(d, be); \ 115 break; \ 116 case ELFDATA2LSB: \ 117 f(d, le); \ 118 break; \ 119 default: \ 120 return (EINVAL); \ 121 } 122 123 124 static int elf_header_convert(Elf_Ehdr *ehdr) 125 { 126 /* 127 * Fixup ELF header endianness. 128 * 129 * The Xhdr structure was loaded using block read call to optimize file 130 * accesses. It might happen, that the endianness of the system memory 131 * is different that endianness of the ELF header. Swap fields here to 132 * guarantee that Xhdr always contain valid data regardless of 133 * architecture. 134 */ 135 #define HEADER_FIELDS(b, e) \ 136 CONVERT_FIELD(b, e_type, e); \ 137 CONVERT_FIELD(b, e_machine, e); \ 138 CONVERT_FIELD(b, e_version, e); \ 139 CONVERT_FIELD(b, e_entry, e); \ 140 CONVERT_FIELD(b, e_phoff, e); \ 141 CONVERT_FIELD(b, e_shoff, e); \ 142 CONVERT_FIELD(b, e_flags, e); \ 143 CONVERT_FIELD(b, e_ehsize, e); \ 144 CONVERT_FIELD(b, e_phentsize, e); \ 145 CONVERT_FIELD(b, e_phnum, e); \ 146 CONVERT_FIELD(b, e_shentsize, e); \ 147 CONVERT_FIELD(b, e_shnum, e); \ 148 CONVERT_FIELD(b, e_shstrndx, e) 149 150 CONVERT_SWITCH(ehdr, ehdr, HEADER_FIELDS); 151 152 #undef HEADER_FIELDS 153 154 return (0); 155 } 156 157 static int elf_program_header_convert(const Elf_Ehdr *ehdr, Elf_Phdr *phdr) 158 { 159 #define PROGRAM_HEADER_FIELDS(b, e) \ 160 CONVERT_FIELD(b, p_type, e); \ 161 CONVERT_FIELD(b, p_flags, e); \ 162 CONVERT_FIELD(b, p_offset, e); \ 163 CONVERT_FIELD(b, p_vaddr, e); \ 164 CONVERT_FIELD(b, p_paddr, e); \ 165 CONVERT_FIELD(b, p_filesz, e); \ 166 CONVERT_FIELD(b, p_memsz, e); \ 167 CONVERT_FIELD(b, p_align, e) 168 169 CONVERT_SWITCH(ehdr, phdr, PROGRAM_HEADER_FIELDS); 170 171 #undef PROGRAM_HEADER_FIELDS 172 173 return (0); 174 } 175 176 static int elf_section_header_convert(const Elf_Ehdr *ehdr, Elf_Shdr *shdr) 177 { 178 #define SECTION_HEADER_FIELDS(b, e) \ 179 CONVERT_FIELD(b, sh_name, e); \ 180 CONVERT_FIELD(b, sh_type, e); \ 181 CONVERT_FIELD(b, sh_link, e); \ 182 CONVERT_FIELD(b, sh_info, e); \ 183 CONVERT_FIELD(b, sh_flags, e); \ 184 CONVERT_FIELD(b, sh_addr, e); \ 185 CONVERT_FIELD(b, sh_offset, e); \ 186 CONVERT_FIELD(b, sh_size, e); \ 187 CONVERT_FIELD(b, sh_addralign, e); \ 188 CONVERT_FIELD(b, sh_entsize, e) 189 190 CONVERT_SWITCH(ehdr, shdr, SECTION_HEADER_FIELDS); 191 192 #undef SECTION_HEADER_FIELDS 193 194 return (0); 195 } 196 #undef CONVERT_SWITCH 197 #undef CONVERT_FIELD 198 #else 199 static int elf_header_convert(Elf_Ehdr *ehdr) 200 { 201 return (0); 202 } 203 204 static int elf_program_header_convert(const Elf_Ehdr *ehdr, Elf_Phdr *phdr) 205 { 206 return (0); 207 } 208 209 static int elf_section_header_convert(const Elf_Ehdr *ehdr, Elf_Shdr *shdr) 210 { 211 return (0); 212 } 213 #endif 214 215 #if defined(__amd64__) || (defined(__i386__) && defined(EFI)) 216 static bool 217 is_kernphys_relocatable(elf_file_t ef) 218 { 219 Elf_Sym sym; 220 221 return (__elfN(lookup_symbol)(ef, "kernphys", &sym, STT_OBJECT) == 0); 222 } 223 #endif 224 225 #ifdef __i386__ 226 static bool 227 is_tg_kernel_support(struct preloaded_file *fp, elf_file_t ef) 228 { 229 Elf_Sym sym; 230 Elf_Addr p_start, p_end, v, p; 231 char vd_name[16]; 232 int error; 233 234 if (__elfN(lookup_symbol)(ef, "__start_set_vt_drv_set", &sym, STT_NOTYPE) != 0) 235 return (false); 236 p_start = sym.st_value + ef->off; 237 if (__elfN(lookup_symbol)(ef, "__stop_set_vt_drv_set", &sym, STT_NOTYPE) != 0) 238 return (false); 239 p_end = sym.st_value + ef->off; 240 241 /* 242 * Walk through vt_drv_set, each vt driver structure starts with 243 * static 16 chars for driver name. If we have "vbefb", return true. 244 */ 245 for (p = p_start; p < p_end; p += sizeof(Elf_Addr)) { 246 COPYOUT(p, &v, sizeof(v)); 247 248 error = __elfN(reloc_ptr)(fp, ef, p, &v, sizeof(v)); 249 if (error == EOPNOTSUPP) 250 v += ef->off; 251 else if (error != 0) 252 return (false); 253 COPYOUT(v, &vd_name, sizeof(vd_name)); 254 if (strncmp(vd_name, "vbefb", sizeof(vd_name)) == 0) 255 return (true); 256 } 257 258 return (false); 259 } 260 #endif 261 262 static int 263 __elfN(load_elf_header)(char *filename, elf_file_t ef) 264 { 265 ssize_t bytes_read; 266 Elf_Ehdr *ehdr; 267 int err; 268 269 /* 270 * Open the image, read and validate the ELF header 271 */ 272 if (filename == NULL) /* can't handle nameless */ 273 return (EFTYPE); 274 if ((ef->fd = open(filename, O_RDONLY)) == -1) 275 return (errno); 276 ef->firstpage = malloc(PAGE_SIZE); 277 if (ef->firstpage == NULL) { 278 close(ef->fd); 279 return (ENOMEM); 280 } 281 preload(ef->fd); 282 #ifdef LOADER_VERIEXEC_VECTX 283 { 284 int verror; 285 286 ef->vctx = vectx_open(ef->fd, filename, 0L, NULL, &verror, __func__); 287 if (verror) { 288 printf("Unverified %s: %s\n", filename, ve_error_get()); 289 close(ef->fd); 290 free(ef->vctx); 291 return (EAUTH); 292 } 293 } 294 #endif 295 bytes_read = VECTX_READ(VECTX_HANDLE(ef), ef->firstpage, PAGE_SIZE); 296 ef->firstlen = (size_t)bytes_read; 297 if (bytes_read < 0 || ef->firstlen <= sizeof(Elf_Ehdr)) { 298 err = EFTYPE; /* could be EIO, but may be small file */ 299 goto error; 300 } 301 ehdr = ef->ehdr = (Elf_Ehdr *)ef->firstpage; 302 303 /* Is it ELF? */ 304 if (!IS_ELF(*ehdr)) { 305 err = EFTYPE; 306 goto error; 307 } 308 309 if (ehdr->e_ident[EI_CLASS] != ELF_TARG_CLASS || /* Layout ? */ 310 ehdr->e_ident[EI_DATA] != ELF_TARG_DATA || 311 ehdr->e_ident[EI_VERSION] != EV_CURRENT) /* Version ? */ { 312 err = EFTYPE; 313 goto error; 314 } 315 316 err = elf_header_convert(ehdr); 317 if (err) 318 goto error; 319 320 if (ehdr->e_version != EV_CURRENT || ehdr->e_machine != ELF_TARG_MACH) { 321 /* Machine ? */ 322 err = EFTYPE; 323 goto error; 324 } 325 326 #if defined(LOADER_VERIEXEC) && !defined(LOADER_VERIEXEC_VECTX) 327 if (verify_file(ef->fd, filename, bytes_read, VE_MUST, __func__) < 0) { 328 err = EAUTH; 329 goto error; 330 } 331 #endif 332 return (0); 333 334 error: 335 if (ef->firstpage != NULL) { 336 free(ef->firstpage); 337 ef->firstpage = NULL; 338 } 339 if (ef->fd != -1) { 340 #ifdef LOADER_VERIEXEC_VECTX 341 free(ef->vctx); 342 #endif 343 close(ef->fd); 344 ef->fd = -1; 345 } 346 return (err); 347 } 348 349 /* 350 * Attempt to load the file (file) as an ELF module. It will be stored at 351 * (dest), and a pointer to a module structure describing the loaded object 352 * will be saved in (result). 353 */ 354 int 355 __elfN(loadfile)(char *filename, uint64_t dest, struct preloaded_file **result) 356 { 357 return (__elfN(loadfile_raw)(filename, dest, result, 0)); 358 } 359 360 int 361 __elfN(loadfile_raw)(char *filename, uint64_t dest, 362 struct preloaded_file **result, int multiboot) 363 { 364 struct preloaded_file *fp, *kfp; 365 struct elf_file ef; 366 Elf_Ehdr *ehdr; 367 int err; 368 369 fp = NULL; 370 bzero(&ef, sizeof(struct elf_file)); 371 ef.fd = -1; 372 373 err = __elfN(load_elf_header)(filename, &ef); 374 if (err != 0) 375 return (err); 376 377 ehdr = ef.ehdr; 378 379 /* 380 * Check to see what sort of module we are. 381 */ 382 kfp = file_findfile(NULL, md_kerntype); 383 #ifdef __powerpc__ 384 /* 385 * Kernels can be ET_DYN, so just assume the first loaded object is the 386 * kernel. This assumption will be checked later. 387 */ 388 if (kfp == NULL) 389 ef.kernel = 1; 390 #endif 391 if (ef.kernel || ehdr->e_type == ET_EXEC) { 392 /* Looks like a kernel */ 393 if (kfp != NULL) { 394 printf("elf" __XSTRING(__ELF_WORD_SIZE) 395 "_loadfile: kernel already loaded\n"); 396 err = EPERM; 397 goto oerr; 398 } 399 /* 400 * Calculate destination address based on kernel entrypoint. 401 * 402 * For ARM, the destination address is independent of any values 403 * in the elf header (an ARM kernel can be loaded at any 2MB 404 * boundary), so we leave dest set to the value calculated by 405 * archsw.arch_loadaddr() and passed in to this function. 406 */ 407 #ifndef __arm__ 408 if (ehdr->e_type == ET_EXEC) 409 dest = (ehdr->e_entry & ~PAGE_MASK); 410 #endif 411 if ((ehdr->e_entry & ~PAGE_MASK) == 0) { 412 printf("elf" __XSTRING(__ELF_WORD_SIZE) 413 "_loadfile: not a kernel (maybe static binary?)\n"); 414 err = EPERM; 415 goto oerr; 416 } 417 ef.kernel = 1; 418 419 } else if (ehdr->e_type == ET_DYN) { 420 /* Looks like a kld module */ 421 if (multiboot != 0) { 422 printf("elf" __XSTRING(__ELF_WORD_SIZE) 423 "_loadfile: can't load module as multiboot\n"); 424 err = EPERM; 425 goto oerr; 426 } 427 if (kfp == NULL) { 428 printf("elf" __XSTRING(__ELF_WORD_SIZE) 429 "_loadfile: can't load module before kernel\n"); 430 err = EPERM; 431 goto oerr; 432 } 433 if (strcmp(md_kerntype, kfp->f_type)) { 434 printf("elf" __XSTRING(__ELF_WORD_SIZE) 435 "_loadfile: can't load module with kernel type '%s'\n", 436 kfp->f_type); 437 err = EPERM; 438 goto oerr; 439 } 440 /* Looks OK, got ahead */ 441 ef.kernel = 0; 442 443 } else { 444 err = EFTYPE; 445 goto oerr; 446 } 447 448 if (archsw.arch_loadaddr != NULL) 449 dest = archsw.arch_loadaddr(LOAD_ELF, ehdr, dest); 450 else 451 dest = roundup(dest, PAGE_SIZE); 452 453 /* 454 * Ok, we think we should handle this. 455 */ 456 fp = file_alloc(); 457 if (fp == NULL) { 458 printf("elf" __XSTRING(__ELF_WORD_SIZE) 459 "_loadfile: cannot allocate module info\n"); 460 err = EPERM; 461 goto out; 462 } 463 if (ef.kernel == 1 && multiboot == 0) 464 setenv("kernelname", filename, 1); 465 fp->f_name = strdup(filename); 466 if (multiboot == 0) 467 fp->f_type = strdup(ef.kernel ? 468 md_kerntype : md_modtype); 469 else 470 fp->f_type = strdup(md_kerntype_mb); 471 472 if (module_verbose >= MODULE_VERBOSE_FULL) { 473 if (ef.kernel) 474 printf("%s entry at 0x%jx\n", filename, 475 (uintmax_t)ehdr->e_entry); 476 } else if (module_verbose > MODULE_VERBOSE_SILENT) 477 printf("%s ", filename); 478 479 fp->f_size = __elfN(loadimage)(fp, &ef, dest); 480 if (fp->f_size == 0 || fp->f_addr == 0) 481 goto ioerr; 482 483 /* save exec header as metadata */ 484 file_addmetadata(fp, MODINFOMD_ELFHDR, sizeof(*ehdr), ehdr); 485 486 /* Load OK, return module pointer */ 487 *result = (struct preloaded_file *)fp; 488 err = 0; 489 #if defined(__amd64__) || (defined(__i386__) && defined(EFI)) 490 fp->f_kernphys_relocatable = multiboot || is_kernphys_relocatable(&ef); 491 #endif 492 #if defined(__i386__) && !defined(EFI) 493 fp->f_tg_kernel_support = is_tg_kernel_support(fp, &ef); 494 #endif 495 goto out; 496 497 ioerr: 498 err = EIO; 499 oerr: 500 file_discard(fp); 501 out: 502 if (ef.firstpage) 503 free(ef.firstpage); 504 if (ef.fd != -1) { 505 #ifdef LOADER_VERIEXEC_VECTX 506 if (!err && ef.vctx) { 507 int verror; 508 509 verror = vectx_close(ef.vctx, VE_MUST, __func__); 510 if (verror) { 511 err = EAUTH; 512 file_discard(fp); 513 } 514 } 515 #endif 516 close(ef.fd); 517 } 518 return (err); 519 } 520 521 /* 522 * With the file (fd) open on the image, and (ehdr) containing 523 * the Elf header, load the image at (off) 524 */ 525 static int 526 __elfN(loadimage)(struct preloaded_file *fp, elf_file_t ef, uint64_t off) 527 { 528 int i; 529 u_int j; 530 Elf_Ehdr *ehdr; 531 Elf_Phdr *phdr, *php; 532 Elf_Shdr *shdr; 533 char *shstr; 534 int ret; 535 vm_offset_t firstaddr; 536 vm_offset_t lastaddr; 537 size_t chunk; 538 ssize_t result; 539 Elf_Addr ssym, esym; 540 Elf_Dyn *dp; 541 Elf_Addr adp; 542 Elf_Addr ctors; 543 int ndp; 544 int symstrindex; 545 int symtabindex; 546 Elf_Size size; 547 u_int fpcopy; 548 Elf_Sym sym; 549 Elf_Addr p_start, p_end; 550 551 dp = NULL; 552 shdr = NULL; 553 ret = 0; 554 firstaddr = lastaddr = 0; 555 ehdr = ef->ehdr; 556 #ifdef __powerpc__ 557 if (ef->kernel) { 558 #else 559 if (ehdr->e_type == ET_EXEC) { 560 #endif 561 #if defined(__i386__) || defined(__amd64__) 562 #if __ELF_WORD_SIZE == 64 563 /* x86_64 relocates after locore */ 564 off = - (off & 0xffffffffff000000ull); 565 #else 566 /* i386 relocates after locore */ 567 off = - (off & 0xff000000u); 568 #endif 569 #elif defined(__powerpc__) 570 /* 571 * On the purely virtual memory machines like e500, the kernel 572 * is linked against its final VA range, which is most often 573 * not available at the loader stage, but only after kernel 574 * initializes and completes its VM settings. In such cases we 575 * cannot use p_vaddr field directly to load ELF segments, but 576 * put them at some 'load-time' locations. 577 */ 578 if (off & 0xf0000000u) { 579 off = -(off & 0xf0000000u); 580 /* 581 * XXX the physical load address should not be 582 * hardcoded. Note that the Book-E kernel assumes that 583 * it's loaded at a 16MB boundary for now... 584 */ 585 off += 0x01000000; 586 } 587 ehdr->e_entry += off; 588 if (module_verbose >= MODULE_VERBOSE_FULL) 589 printf("Converted entry 0x%jx\n", 590 (uintmax_t)ehdr->e_entry); 591 592 #elif defined(__arm__) && !defined(EFI) 593 /* 594 * The elf headers in arm kernels specify virtual addresses in 595 * all header fields, even the ones that should be physical 596 * addresses. We assume the entry point is in the first page, 597 * and masking the page offset will leave us with the virtual 598 * address the kernel was linked at. We subtract that from the 599 * load offset, making 'off' into the value which, when added 600 * to a virtual address in an elf header, translates it to a 601 * physical address. We do the va->pa conversion on the entry 602 * point address in the header now, so that later we can launch 603 * the kernel by just jumping to that address. 604 * 605 * When booting from UEFI the copyin and copyout functions 606 * handle adjusting the location relative to the first virtual 607 * address. Because of this there is no need to adjust the 608 * offset or entry point address as these will both be handled 609 * by the efi code. 610 */ 611 off -= ehdr->e_entry & ~PAGE_MASK; 612 ehdr->e_entry += off; 613 if (module_verbose >= MODULE_VERBOSE_FULL) 614 printf("ehdr->e_entry 0x%jx, va<->pa off %llx\n", 615 (uintmax_t)ehdr->e_entry, off); 616 #else 617 off = 0; /* other archs use direct mapped kernels */ 618 #endif 619 } 620 ef->off = off; 621 622 if (ef->kernel) 623 __elfN(relocation_offset) = off; 624 625 if ((ehdr->e_phoff + ehdr->e_phnum * sizeof(*phdr)) > ef->firstlen) { 626 printf("elf" __XSTRING(__ELF_WORD_SIZE) 627 "_loadimage: program header not within first page\n"); 628 goto out; 629 } 630 phdr = (Elf_Phdr *)(ef->firstpage + ehdr->e_phoff); 631 632 for (i = 0; i < ehdr->e_phnum; i++) { 633 if (elf_program_header_convert(ehdr, phdr)) 634 continue; 635 636 /* We want to load PT_LOAD segments only.. */ 637 if (phdr[i].p_type != PT_LOAD) 638 continue; 639 640 if (module_verbose >= MODULE_VERBOSE_FULL) { 641 printf("Segment: 0x%lx@0x%lx -> 0x%lx-0x%lx", 642 (long)phdr[i].p_filesz, (long)phdr[i].p_offset, 643 (long)(phdr[i].p_vaddr + off), 644 (long)(phdr[i].p_vaddr + off + phdr[i].p_memsz - 1)); 645 } else if (module_verbose > MODULE_VERBOSE_SILENT) { 646 if ((phdr[i].p_flags & PF_W) == 0) { 647 printf("text=0x%lx ", (long)phdr[i].p_filesz); 648 } else { 649 printf("data=0x%lx", (long)phdr[i].p_filesz); 650 if (phdr[i].p_filesz < phdr[i].p_memsz) 651 printf("+0x%lx", (long)(phdr[i].p_memsz - 652 phdr[i].p_filesz)); 653 printf(" "); 654 } 655 } 656 fpcopy = 0; 657 if (ef->firstlen > phdr[i].p_offset) { 658 fpcopy = ef->firstlen - phdr[i].p_offset; 659 archsw.arch_copyin(ef->firstpage + phdr[i].p_offset, 660 phdr[i].p_vaddr + off, fpcopy); 661 } 662 if (phdr[i].p_filesz > fpcopy) { 663 if (kern_pread(VECTX_HANDLE(ef), 664 phdr[i].p_vaddr + off + fpcopy, 665 phdr[i].p_filesz - fpcopy, 666 phdr[i].p_offset + fpcopy) != 0) { 667 printf("\nelf" __XSTRING(__ELF_WORD_SIZE) 668 "_loadimage: read failed\n"); 669 goto out; 670 } 671 } 672 /* clear space from oversized segments; eg: bss */ 673 if (phdr[i].p_filesz < phdr[i].p_memsz) { 674 if (module_verbose >= MODULE_VERBOSE_FULL) { 675 printf(" (bss: 0x%lx-0x%lx)", 676 (long)(phdr[i].p_vaddr + off + phdr[i].p_filesz), 677 (long)(phdr[i].p_vaddr + off + phdr[i].p_memsz -1)); 678 } 679 kern_bzero(phdr[i].p_vaddr + off + phdr[i].p_filesz, 680 phdr[i].p_memsz - phdr[i].p_filesz); 681 } 682 if (module_verbose >= MODULE_VERBOSE_FULL) 683 printf("\n"); 684 685 if (archsw.arch_loadseg != NULL) 686 archsw.arch_loadseg(ehdr, phdr + i, off); 687 688 if (firstaddr == 0 || firstaddr > (phdr[i].p_vaddr + off)) 689 firstaddr = phdr[i].p_vaddr + off; 690 if (lastaddr == 0 || lastaddr < 691 (phdr[i].p_vaddr + off + phdr[i].p_memsz)) 692 lastaddr = phdr[i].p_vaddr + off + phdr[i].p_memsz; 693 } 694 lastaddr = roundup(lastaddr, sizeof(long)); 695 696 /* 697 * Get the section headers. We need this for finding the .ctors 698 * section as well as for loading any symbols. Both may be hard 699 * to do if reading from a .gz file as it involves seeking. I 700 * think the rule is going to have to be that you must strip a 701 * file to remove symbols before gzipping it. 702 */ 703 chunk = (size_t)ehdr->e_shnum * (size_t)ehdr->e_shentsize; 704 if (chunk == 0 || ehdr->e_shoff == 0) 705 goto nosyms; 706 shdr = alloc_pread(VECTX_HANDLE(ef), ehdr->e_shoff, chunk); 707 if (shdr == NULL) { 708 printf("\nelf" __XSTRING(__ELF_WORD_SIZE) 709 "_loadimage: failed to read section headers"); 710 goto nosyms; 711 } 712 713 for (i = 0; i < ehdr->e_shnum; i++) 714 elf_section_header_convert(ehdr, &shdr[i]); 715 716 file_addmetadata(fp, MODINFOMD_SHDR, chunk, shdr); 717 718 /* 719 * Read the section string table and look for the .ctors section. 720 * We need to tell the kernel where it is so that it can call the 721 * ctors. 722 */ 723 chunk = shdr[ehdr->e_shstrndx].sh_size; 724 if (chunk) { 725 shstr = alloc_pread(VECTX_HANDLE(ef), 726 shdr[ehdr->e_shstrndx].sh_offset, chunk); 727 if (shstr) { 728 for (i = 0; i < ehdr->e_shnum; i++) { 729 if (strcmp(shstr + shdr[i].sh_name, 730 ".ctors") != 0) 731 continue; 732 ctors = shdr[i].sh_addr; 733 file_addmetadata(fp, MODINFOMD_CTORS_ADDR, 734 sizeof(ctors), &ctors); 735 size = shdr[i].sh_size; 736 file_addmetadata(fp, MODINFOMD_CTORS_SIZE, 737 sizeof(size), &size); 738 break; 739 } 740 free(shstr); 741 } 742 } 743 744 /* 745 * Now load any symbols. 746 */ 747 symtabindex = -1; 748 symstrindex = -1; 749 for (i = 0; i < ehdr->e_shnum; i++) { 750 if (shdr[i].sh_type != SHT_SYMTAB) 751 continue; 752 for (j = 0; j < ehdr->e_phnum; j++) { 753 if (phdr[j].p_type != PT_LOAD) 754 continue; 755 if (shdr[i].sh_offset >= phdr[j].p_offset && 756 (shdr[i].sh_offset + shdr[i].sh_size <= 757 phdr[j].p_offset + phdr[j].p_filesz)) { 758 shdr[i].sh_offset = 0; 759 shdr[i].sh_size = 0; 760 break; 761 } 762 } 763 if (shdr[i].sh_offset == 0 || shdr[i].sh_size == 0) 764 continue; /* alread loaded in a PT_LOAD above */ 765 /* Save it for loading below */ 766 symtabindex = i; 767 symstrindex = shdr[i].sh_link; 768 } 769 if (symtabindex < 0 || symstrindex < 0) 770 goto nosyms; 771 772 /* Ok, committed to a load. */ 773 if (module_verbose >= MODULE_VERBOSE_FULL) 774 printf("syms=["); 775 ssym = lastaddr; 776 for (i = symtabindex; i >= 0; i = symstrindex) { 777 char *secname; 778 779 switch(shdr[i].sh_type) { 780 case SHT_SYMTAB: /* Symbol table */ 781 secname = "symtab"; 782 break; 783 case SHT_STRTAB: /* String table */ 784 secname = "strtab"; 785 break; 786 default: 787 secname = "WHOA!!"; 788 break; 789 } 790 size = shdr[i].sh_size; 791 792 archsw.arch_copyin(&size, lastaddr, sizeof(size)); 793 lastaddr += sizeof(size); 794 795 if (module_verbose >= MODULE_VERBOSE_FULL) { 796 printf("\n%s: 0x%jx@0x%jx -> 0x%jx-0x%jx", secname, 797 (uintmax_t)shdr[i].sh_size, (uintmax_t)shdr[i].sh_offset, 798 (uintmax_t)lastaddr, 799 (uintmax_t)(lastaddr + shdr[i].sh_size)); 800 } else if (module_verbose > MODULE_VERBOSE_SILENT) { 801 if (i == symstrindex) 802 printf("+"); 803 printf("0x%lx+0x%lx", (long)sizeof(size), (long)size); 804 } 805 if (VECTX_LSEEK(VECTX_HANDLE(ef), (off_t)shdr[i].sh_offset, SEEK_SET) == -1) { 806 printf("\nelf" __XSTRING(__ELF_WORD_SIZE) 807 "_loadimage: could not seek for symbols - skipped!"); 808 lastaddr = ssym; 809 ssym = 0; 810 goto nosyms; 811 } 812 result = archsw.arch_readin(VECTX_HANDLE(ef), lastaddr, shdr[i].sh_size); 813 if (result < 0 || (size_t)result != shdr[i].sh_size) { 814 printf("\nelf" __XSTRING(__ELF_WORD_SIZE) 815 "_loadimage: could not read symbols - skipped! " 816 "(%ju != %ju)", (uintmax_t)result, 817 (uintmax_t)shdr[i].sh_size); 818 lastaddr = ssym; 819 ssym = 0; 820 goto nosyms; 821 } 822 /* Reset offsets relative to ssym */ 823 lastaddr += shdr[i].sh_size; 824 lastaddr = roundup(lastaddr, sizeof(size)); 825 if (i == symtabindex) 826 symtabindex = -1; 827 else if (i == symstrindex) 828 symstrindex = -1; 829 } 830 esym = lastaddr; 831 if (module_verbose >= MODULE_VERBOSE_FULL) 832 printf("]"); 833 834 file_addmetadata(fp, MODINFOMD_SSYM, sizeof(ssym), &ssym); 835 file_addmetadata(fp, MODINFOMD_ESYM, sizeof(esym), &esym); 836 837 nosyms: 838 if (module_verbose > MODULE_VERBOSE_SILENT) 839 printf("\n"); 840 841 ret = lastaddr - firstaddr; 842 fp->f_addr = firstaddr; 843 844 php = NULL; 845 for (i = 0; i < ehdr->e_phnum; i++) { 846 if (phdr[i].p_type == PT_DYNAMIC) { 847 php = phdr + i; 848 adp = php->p_vaddr; 849 file_addmetadata(fp, MODINFOMD_DYNAMIC, sizeof(adp), 850 &adp); 851 break; 852 } 853 } 854 855 if (php == NULL) /* this is bad, we cannot get to symbols or _DYNAMIC */ 856 goto out; 857 858 ndp = php->p_filesz / sizeof(Elf_Dyn); 859 if (ndp == 0) 860 goto out; 861 dp = malloc(php->p_filesz); 862 if (dp == NULL) 863 goto out; 864 archsw.arch_copyout(php->p_vaddr + off, dp, php->p_filesz); 865 866 ef->strsz = 0; 867 for (i = 0; i < ndp; i++) { 868 if (dp[i].d_tag == 0) 869 break; 870 switch (dp[i].d_tag) { 871 case DT_HASH: 872 ef->hashtab = 873 (Elf_Hashelt*)(uintptr_t)(dp[i].d_un.d_ptr + off); 874 break; 875 case DT_STRTAB: 876 ef->strtab = 877 (char *)(uintptr_t)(dp[i].d_un.d_ptr + off); 878 break; 879 case DT_STRSZ: 880 ef->strsz = dp[i].d_un.d_val; 881 break; 882 case DT_SYMTAB: 883 ef->symtab = 884 (Elf_Sym *)(uintptr_t)(dp[i].d_un.d_ptr + off); 885 break; 886 case DT_REL: 887 ef->rel = 888 (Elf_Rel *)(uintptr_t)(dp[i].d_un.d_ptr + off); 889 break; 890 case DT_RELSZ: 891 ef->relsz = dp[i].d_un.d_val; 892 break; 893 case DT_RELA: 894 ef->rela = 895 (Elf_Rela *)(uintptr_t)(dp[i].d_un.d_ptr + off); 896 break; 897 case DT_RELASZ: 898 ef->relasz = dp[i].d_un.d_val; 899 break; 900 default: 901 break; 902 } 903 } 904 if (ef->hashtab == NULL || ef->symtab == NULL || 905 ef->strtab == NULL || ef->strsz == 0) 906 goto out; 907 COPYOUT(ef->hashtab, &ef->nbuckets, sizeof(ef->nbuckets)); 908 COPYOUT(ef->hashtab + 1, &ef->nchains, sizeof(ef->nchains)); 909 ef->buckets = ef->hashtab + 2; 910 ef->chains = ef->buckets + ef->nbuckets; 911 912 if (__elfN(lookup_symbol)(ef, "__start_set_modmetadata_set", &sym, 913 STT_NOTYPE) != 0) 914 return 0; 915 p_start = sym.st_value + ef->off; 916 if (__elfN(lookup_symbol)(ef, "__stop_set_modmetadata_set", &sym, 917 STT_NOTYPE) != 0) 918 return 0; 919 p_end = sym.st_value + ef->off; 920 921 if (__elfN(parse_modmetadata)(fp, ef, p_start, p_end) == 0) 922 goto out; 923 924 if (ef->kernel) /* kernel must not depend on anything */ 925 goto out; 926 927 out: 928 if (dp) 929 free(dp); 930 if (shdr) 931 free(shdr); 932 return ret; 933 } 934 935 static char invalid_name[] = "bad"; 936 937 char * 938 fake_modname(const char *name) 939 { 940 const char *sp, *ep; 941 char *fp; 942 size_t len; 943 944 sp = strrchr(name, '/'); 945 if (sp) 946 sp++; 947 else 948 sp = name; 949 950 ep = strrchr(sp, '.'); 951 if (ep == NULL) { 952 ep = sp + strlen(sp); 953 } 954 if (ep == sp) { 955 sp = invalid_name; 956 ep = invalid_name + sizeof(invalid_name) - 1; 957 } 958 959 len = ep - sp; 960 fp = malloc(len + 1); 961 if (fp == NULL) 962 return NULL; 963 memcpy(fp, sp, len); 964 fp[len] = '\0'; 965 return fp; 966 } 967 968 #if (defined(__i386__) || defined(__powerpc__)) && __ELF_WORD_SIZE == 64 969 struct mod_metadata64 { 970 int md_version; /* structure version MDTV_* */ 971 int md_type; /* type of entry MDT_* */ 972 uint64_t md_data; /* specific data */ 973 uint64_t md_cval; /* common string label */ 974 }; 975 #endif 976 #if defined(__amd64__) && __ELF_WORD_SIZE == 32 977 struct mod_metadata32 { 978 int md_version; /* structure version MDTV_* */ 979 int md_type; /* type of entry MDT_* */ 980 uint32_t md_data; /* specific data */ 981 uint32_t md_cval; /* common string label */ 982 }; 983 #endif 984 985 int 986 __elfN(load_modmetadata)(struct preloaded_file *fp, uint64_t dest) 987 { 988 struct elf_file ef; 989 int err, i, j; 990 Elf_Shdr *sh_meta, *shdr = NULL; 991 Elf_Shdr *sh_data[2]; 992 char *shstrtab = NULL; 993 size_t size; 994 Elf_Addr p_start, p_end; 995 996 bzero(&ef, sizeof(struct elf_file)); 997 ef.fd = -1; 998 999 err = __elfN(load_elf_header)(fp->f_name, &ef); 1000 if (err != 0) 1001 goto out; 1002 1003 if (ef.kernel == 1 || ef.ehdr->e_type == ET_EXEC) { 1004 ef.kernel = 1; 1005 } else if (ef.ehdr->e_type != ET_DYN) { 1006 err = EFTYPE; 1007 goto out; 1008 } 1009 1010 size = (size_t)ef.ehdr->e_shnum * (size_t)ef.ehdr->e_shentsize; 1011 shdr = alloc_pread(VECTX_HANDLE(&ef), ef.ehdr->e_shoff, size); 1012 if (shdr == NULL) { 1013 err = ENOMEM; 1014 goto out; 1015 } 1016 1017 /* Load shstrtab. */ 1018 shstrtab = alloc_pread(VECTX_HANDLE(&ef), shdr[ef.ehdr->e_shstrndx].sh_offset, 1019 shdr[ef.ehdr->e_shstrndx].sh_size); 1020 if (shstrtab == NULL) { 1021 printf("\nelf" __XSTRING(__ELF_WORD_SIZE) 1022 "load_modmetadata: unable to load shstrtab\n"); 1023 err = EFTYPE; 1024 goto out; 1025 } 1026 1027 /* Find set_modmetadata_set and data sections. */ 1028 sh_data[0] = sh_data[1] = sh_meta = NULL; 1029 for (i = 0, j = 0; i < ef.ehdr->e_shnum; i++) { 1030 if (strcmp(&shstrtab[shdr[i].sh_name], 1031 "set_modmetadata_set") == 0) { 1032 sh_meta = &shdr[i]; 1033 } 1034 if ((strcmp(&shstrtab[shdr[i].sh_name], ".data") == 0) || 1035 (strcmp(&shstrtab[shdr[i].sh_name], ".rodata") == 0)) { 1036 sh_data[j++] = &shdr[i]; 1037 } 1038 } 1039 if (sh_meta == NULL || sh_data[0] == NULL || sh_data[1] == NULL) { 1040 printf("\nelf" __XSTRING(__ELF_WORD_SIZE) 1041 "load_modmetadata: unable to find set_modmetadata_set or data sections\n"); 1042 err = EFTYPE; 1043 goto out; 1044 } 1045 1046 /* Load set_modmetadata_set into memory */ 1047 err = kern_pread(VECTX_HANDLE(&ef), dest, sh_meta->sh_size, sh_meta->sh_offset); 1048 if (err != 0) { 1049 printf("\nelf" __XSTRING(__ELF_WORD_SIZE) 1050 "load_modmetadata: unable to load set_modmetadata_set: %d\n", err); 1051 goto out; 1052 } 1053 p_start = dest; 1054 p_end = dest + sh_meta->sh_size; 1055 dest += sh_meta->sh_size; 1056 1057 /* Load data sections into memory. */ 1058 err = kern_pread(VECTX_HANDLE(&ef), dest, sh_data[0]->sh_size, 1059 sh_data[0]->sh_offset); 1060 if (err != 0) { 1061 printf("\nelf" __XSTRING(__ELF_WORD_SIZE) 1062 "load_modmetadata: unable to load data: %d\n", err); 1063 goto out; 1064 } 1065 1066 /* 1067 * We have to increment the dest, so that the offset is the same into 1068 * both the .rodata and .data sections. 1069 */ 1070 ef.off = -(sh_data[0]->sh_addr - dest); 1071 dest += (sh_data[1]->sh_addr - sh_data[0]->sh_addr); 1072 1073 err = kern_pread(VECTX_HANDLE(&ef), dest, sh_data[1]->sh_size, 1074 sh_data[1]->sh_offset); 1075 if (err != 0) { 1076 printf("\nelf" __XSTRING(__ELF_WORD_SIZE) 1077 "load_modmetadata: unable to load data: %d\n", err); 1078 goto out; 1079 } 1080 1081 err = __elfN(parse_modmetadata)(fp, &ef, p_start, p_end); 1082 if (err != 0) { 1083 printf("\nelf" __XSTRING(__ELF_WORD_SIZE) 1084 "load_modmetadata: unable to parse metadata: %d\n", err); 1085 goto out; 1086 } 1087 1088 out: 1089 if (shstrtab != NULL) 1090 free(shstrtab); 1091 if (shdr != NULL) 1092 free(shdr); 1093 if (ef.firstpage != NULL) 1094 free(ef.firstpage); 1095 if (ef.fd != -1) { 1096 #ifdef LOADER_VERIEXEC_VECTX 1097 if (!err && ef.vctx) { 1098 int verror; 1099 1100 verror = vectx_close(ef.vctx, VE_MUST, __func__); 1101 if (verror) { 1102 err = EAUTH; 1103 file_discard(fp); 1104 } 1105 } 1106 #endif 1107 close(ef.fd); 1108 } 1109 return (err); 1110 } 1111 1112 int 1113 __elfN(parse_modmetadata)(struct preloaded_file *fp, elf_file_t ef, 1114 Elf_Addr p_start, Elf_Addr p_end) 1115 { 1116 struct mod_metadata md; 1117 #if (defined(__i386__) || defined(__powerpc__)) && __ELF_WORD_SIZE == 64 1118 struct mod_metadata64 md64; 1119 #elif defined(__amd64__) && __ELF_WORD_SIZE == 32 1120 struct mod_metadata32 md32; 1121 #endif 1122 struct mod_depend *mdepend; 1123 struct mod_version mver; 1124 char *s; 1125 int error, modcnt, minfolen; 1126 Elf_Addr v, p; 1127 1128 modcnt = 0; 1129 p = p_start; 1130 while (p < p_end) { 1131 COPYOUT(p, &v, sizeof(v)); 1132 error = __elfN(reloc_ptr)(fp, ef, p, &v, sizeof(v)); 1133 if (error == EOPNOTSUPP) 1134 v += ef->off; 1135 else if (error != 0) 1136 return (error); 1137 #if (defined(__i386__) || defined(__powerpc__)) && __ELF_WORD_SIZE == 64 1138 COPYOUT(v, &md64, sizeof(md64)); 1139 error = __elfN(reloc_ptr)(fp, ef, v, &md64, sizeof(md64)); 1140 if (error == EOPNOTSUPP) { 1141 md64.md_cval += ef->off; 1142 md64.md_data += ef->off; 1143 } else if (error != 0) 1144 return (error); 1145 md.md_version = md64.md_version; 1146 md.md_type = md64.md_type; 1147 md.md_cval = (const char *)(uintptr_t)md64.md_cval; 1148 md.md_data = (void *)(uintptr_t)md64.md_data; 1149 #elif defined(__amd64__) && __ELF_WORD_SIZE == 32 1150 COPYOUT(v, &md32, sizeof(md32)); 1151 error = __elfN(reloc_ptr)(fp, ef, v, &md32, sizeof(md32)); 1152 if (error == EOPNOTSUPP) { 1153 md32.md_cval += ef->off; 1154 md32.md_data += ef->off; 1155 } else if (error != 0) 1156 return (error); 1157 md.md_version = md32.md_version; 1158 md.md_type = md32.md_type; 1159 md.md_cval = (const char *)(uintptr_t)md32.md_cval; 1160 md.md_data = (void *)(uintptr_t)md32.md_data; 1161 #else 1162 COPYOUT(v, &md, sizeof(md)); 1163 error = __elfN(reloc_ptr)(fp, ef, v, &md, sizeof(md)); 1164 if (error == EOPNOTSUPP) { 1165 md.md_cval += ef->off; 1166 md.md_data = (void *)((uintptr_t)md.md_data + 1167 (uintptr_t)ef->off); 1168 } else if (error != 0) 1169 return (error); 1170 #endif 1171 p += sizeof(Elf_Addr); 1172 switch(md.md_type) { 1173 case MDT_DEPEND: 1174 if (ef->kernel) /* kernel must not depend on anything */ 1175 break; 1176 s = strdupout((vm_offset_t)md.md_cval); 1177 minfolen = sizeof(*mdepend) + strlen(s) + 1; 1178 mdepend = malloc(minfolen); 1179 if (mdepend == NULL) 1180 return ENOMEM; 1181 COPYOUT((vm_offset_t)md.md_data, mdepend, 1182 sizeof(*mdepend)); 1183 strcpy((char*)(mdepend + 1), s); 1184 free(s); 1185 file_addmetadata(fp, MODINFOMD_DEPLIST, minfolen, 1186 mdepend); 1187 free(mdepend); 1188 break; 1189 case MDT_VERSION: 1190 s = strdupout((vm_offset_t)md.md_cval); 1191 COPYOUT((vm_offset_t)md.md_data, &mver, sizeof(mver)); 1192 file_addmodule(fp, s, mver.mv_version, NULL); 1193 free(s); 1194 modcnt++; 1195 break; 1196 } 1197 } 1198 if (modcnt == 0) { 1199 s = fake_modname(fp->f_name); 1200 file_addmodule(fp, s, 1, NULL); 1201 free(s); 1202 } 1203 return 0; 1204 } 1205 1206 static unsigned long 1207 elf_hash(const char *name) 1208 { 1209 const unsigned char *p = (const unsigned char *) name; 1210 unsigned long h = 0; 1211 unsigned long g; 1212 1213 while (*p != '\0') { 1214 h = (h << 4) + *p++; 1215 if ((g = h & 0xf0000000) != 0) 1216 h ^= g >> 24; 1217 h &= ~g; 1218 } 1219 return h; 1220 } 1221 1222 static const char __elfN(bad_symtable)[] = "elf" __XSTRING(__ELF_WORD_SIZE) 1223 "_lookup_symbol: corrupt symbol table\n"; 1224 int 1225 __elfN(lookup_symbol)(elf_file_t ef, const char* name, Elf_Sym *symp, 1226 unsigned char type) 1227 { 1228 Elf_Hashelt symnum; 1229 Elf_Sym sym; 1230 char *strp; 1231 unsigned long hash; 1232 1233 if (ef->nbuckets == 0) { 1234 printf(__elfN(bad_symtable)); 1235 return ENOENT; 1236 } 1237 1238 hash = elf_hash(name); 1239 COPYOUT(&ef->buckets[hash % ef->nbuckets], &symnum, sizeof(symnum)); 1240 1241 while (symnum != STN_UNDEF) { 1242 if (symnum >= ef->nchains) { 1243 printf(__elfN(bad_symtable)); 1244 return ENOENT; 1245 } 1246 1247 COPYOUT(ef->symtab + symnum, &sym, sizeof(sym)); 1248 if (sym.st_name == 0) { 1249 printf(__elfN(bad_symtable)); 1250 return ENOENT; 1251 } 1252 1253 strp = strdupout((vm_offset_t)(ef->strtab + sym.st_name)); 1254 if (strcmp(name, strp) == 0) { 1255 free(strp); 1256 if (sym.st_shndx != SHN_UNDEF && sym.st_value != 0 && 1257 ELF_ST_TYPE(sym.st_info) == type) { 1258 *symp = sym; 1259 return 0; 1260 } 1261 return ENOENT; 1262 } 1263 free(strp); 1264 COPYOUT(&ef->chains[symnum], &symnum, sizeof(symnum)); 1265 } 1266 return ENOENT; 1267 } 1268 1269 /* 1270 * Apply any intra-module relocations to the value. p is the load address 1271 * of the value and val/len is the value to be modified. This does NOT modify 1272 * the image in-place, because this is done by kern_linker later on. 1273 * 1274 * Returns EOPNOTSUPP if no relocation method is supplied. 1275 */ 1276 static int 1277 __elfN(reloc_ptr)(struct preloaded_file *mp, elf_file_t ef, 1278 Elf_Addr p, void *val, size_t len) 1279 { 1280 size_t n; 1281 Elf_Rela a; 1282 Elf_Rel r; 1283 int error; 1284 1285 /* 1286 * The kernel is already relocated, but we still want to apply 1287 * offset adjustments. 1288 */ 1289 if (ef->kernel) 1290 return (EOPNOTSUPP); 1291 1292 for (n = 0; n < ef->relsz / sizeof(r); n++) { 1293 COPYOUT(ef->rel + n, &r, sizeof(r)); 1294 1295 error = __elfN(reloc)(ef, __elfN(symaddr), &r, ELF_RELOC_REL, 1296 ef->off, p, val, len); 1297 if (error != 0) 1298 return (error); 1299 } 1300 for (n = 0; n < ef->relasz / sizeof(a); n++) { 1301 COPYOUT(ef->rela + n, &a, sizeof(a)); 1302 1303 error = __elfN(reloc)(ef, __elfN(symaddr), &a, ELF_RELOC_RELA, 1304 ef->off, p, val, len); 1305 if (error != 0) 1306 return (error); 1307 } 1308 1309 return (0); 1310 } 1311 1312 static Elf_Addr 1313 __elfN(symaddr)(struct elf_file *ef, Elf_Size symidx) 1314 { 1315 1316 /* Symbol lookup by index not required here. */ 1317 return (0); 1318 } 1319