1 /*-
2 * Copyright (c) 1998 Michael Smith <msmith@freebsd.org>
3 * Copyright (c) 1998 Peter Wemm <peter@freebsd.org>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28 #include <sys/param.h>
29 #include <sys/endian.h>
30 #include <sys/exec.h>
31 #include <sys/linker.h>
32 #include <sys/module.h>
33 #include <sys/stdint.h>
34 #include <string.h>
35 #include <machine/elf.h>
36 #include <stand.h>
37 #include <sys/link_elf.h>
38
39 #include "bootstrap.h"
40
41 #define COPYOUT(s,d,l) archsw.arch_copyout((vm_offset_t)(s), d, l)
42
43 #if defined(__i386__) && __ELF_WORD_SIZE == 64
44 #undef ELF_TARG_CLASS
45 #undef ELF_TARG_MACH
46 #define ELF_TARG_CLASS ELFCLASS64
47 #define ELF_TARG_MACH EM_X86_64
48 #endif
49
50 typedef struct elf_file {
51 Elf_Phdr *ph;
52 Elf_Ehdr *ehdr;
53 Elf_Sym *symtab;
54 Elf_Hashelt *hashtab;
55 Elf_Hashelt nbuckets;
56 Elf_Hashelt nchains;
57 Elf_Hashelt *buckets;
58 Elf_Hashelt *chains;
59 Elf_Rel *rel;
60 size_t relsz;
61 Elf_Rela *rela;
62 size_t relasz;
63 char *strtab;
64 size_t strsz;
65 int fd;
66 caddr_t firstpage;
67 size_t firstlen;
68 int kernel;
69 uint64_t off;
70 #ifdef LOADER_VERIEXEC_VECTX
71 struct vectx *vctx;
72 #endif
73 } *elf_file_t;
74
75 #ifdef LOADER_VERIEXEC_VECTX
76 #define VECTX_HANDLE(ef) (ef)->vctx
77 #else
78 #define VECTX_HANDLE(ef) (ef)->fd
79 #endif
80
81 static int __elfN(loadimage)(struct preloaded_file *mp, elf_file_t ef,
82 uint64_t loadaddr);
83 static int __elfN(lookup_symbol)(elf_file_t ef, const char* name,
84 Elf_Sym *sym, unsigned char type);
85 static int __elfN(reloc_ptr)(struct preloaded_file *mp, elf_file_t ef,
86 Elf_Addr p, void *val, size_t len);
87 static int __elfN(parse_modmetadata)(struct preloaded_file *mp, elf_file_t ef,
88 Elf_Addr p_start, Elf_Addr p_end);
89 static symaddr_fn __elfN(symaddr);
90 static char *fake_modname(const char *name);
91
92 const char *__elfN(kerneltype) = "elf kernel";
93 const char *__elfN(moduletype) = "elf module";
94
95 uint64_t __elfN(relocation_offset) = 0;
96
97 #ifdef __powerpc__
98 extern void elf_wrong_field_size(void);
99 #define CONVERT_FIELD(b, f, e) \
100 switch (sizeof((b)->f)) { \
101 case 2: \
102 (b)->f = e ## 16toh((b)->f); \
103 break; \
104 case 4: \
105 (b)->f = e ## 32toh((b)->f); \
106 break; \
107 case 8: \
108 (b)->f = e ## 64toh((b)->f); \
109 break; \
110 default: \
111 /* Force a link time error. */ \
112 elf_wrong_field_size(); \
113 break; \
114 }
115
116 #define CONVERT_SWITCH(h, d, f) \
117 switch ((h)->e_ident[EI_DATA]) { \
118 case ELFDATA2MSB: \
119 f(d, be); \
120 break; \
121 case ELFDATA2LSB: \
122 f(d, le); \
123 break; \
124 default: \
125 return (EINVAL); \
126 }
127
128
elf_header_convert(Elf_Ehdr * ehdr)129 static int elf_header_convert(Elf_Ehdr *ehdr)
130 {
131 /*
132 * Fixup ELF header endianness.
133 *
134 * The Xhdr structure was loaded using block read call to optimize file
135 * accesses. It might happen, that the endianness of the system memory
136 * is different that endianness of the ELF header. Swap fields here to
137 * guarantee that Xhdr always contain valid data regardless of
138 * architecture.
139 */
140 #define HEADER_FIELDS(b, e) \
141 CONVERT_FIELD(b, e_type, e); \
142 CONVERT_FIELD(b, e_machine, e); \
143 CONVERT_FIELD(b, e_version, e); \
144 CONVERT_FIELD(b, e_entry, e); \
145 CONVERT_FIELD(b, e_phoff, e); \
146 CONVERT_FIELD(b, e_shoff, e); \
147 CONVERT_FIELD(b, e_flags, e); \
148 CONVERT_FIELD(b, e_ehsize, e); \
149 CONVERT_FIELD(b, e_phentsize, e); \
150 CONVERT_FIELD(b, e_phnum, e); \
151 CONVERT_FIELD(b, e_shentsize, e); \
152 CONVERT_FIELD(b, e_shnum, e); \
153 CONVERT_FIELD(b, e_shstrndx, e)
154
155 CONVERT_SWITCH(ehdr, ehdr, HEADER_FIELDS);
156
157 #undef HEADER_FIELDS
158
159 return (0);
160 }
161
elf_program_header_convert(const Elf_Ehdr * ehdr,Elf_Phdr * phdr)162 static int elf_program_header_convert(const Elf_Ehdr *ehdr, Elf_Phdr *phdr)
163 {
164 #define PROGRAM_HEADER_FIELDS(b, e) \
165 CONVERT_FIELD(b, p_type, e); \
166 CONVERT_FIELD(b, p_flags, e); \
167 CONVERT_FIELD(b, p_offset, e); \
168 CONVERT_FIELD(b, p_vaddr, e); \
169 CONVERT_FIELD(b, p_paddr, e); \
170 CONVERT_FIELD(b, p_filesz, e); \
171 CONVERT_FIELD(b, p_memsz, e); \
172 CONVERT_FIELD(b, p_align, e)
173
174 CONVERT_SWITCH(ehdr, phdr, PROGRAM_HEADER_FIELDS);
175
176 #undef PROGRAM_HEADER_FIELDS
177
178 return (0);
179 }
180
elf_section_header_convert(const Elf_Ehdr * ehdr,Elf_Shdr * shdr)181 static int elf_section_header_convert(const Elf_Ehdr *ehdr, Elf_Shdr *shdr)
182 {
183 #define SECTION_HEADER_FIELDS(b, e) \
184 CONVERT_FIELD(b, sh_name, e); \
185 CONVERT_FIELD(b, sh_type, e); \
186 CONVERT_FIELD(b, sh_link, e); \
187 CONVERT_FIELD(b, sh_info, e); \
188 CONVERT_FIELD(b, sh_flags, e); \
189 CONVERT_FIELD(b, sh_addr, e); \
190 CONVERT_FIELD(b, sh_offset, e); \
191 CONVERT_FIELD(b, sh_size, e); \
192 CONVERT_FIELD(b, sh_addralign, e); \
193 CONVERT_FIELD(b, sh_entsize, e)
194
195 CONVERT_SWITCH(ehdr, shdr, SECTION_HEADER_FIELDS);
196
197 #undef SECTION_HEADER_FIELDS
198
199 return (0);
200 }
201 #undef CONVERT_SWITCH
202 #undef CONVERT_FIELD
203 #else
elf_header_convert(Elf_Ehdr * ehdr)204 static int elf_header_convert(Elf_Ehdr *ehdr)
205 {
206 return (0);
207 }
208
elf_program_header_convert(const Elf_Ehdr * ehdr,Elf_Phdr * phdr)209 static int elf_program_header_convert(const Elf_Ehdr *ehdr, Elf_Phdr *phdr)
210 {
211 return (0);
212 }
213
elf_section_header_convert(const Elf_Ehdr * ehdr,Elf_Shdr * shdr)214 static int elf_section_header_convert(const Elf_Ehdr *ehdr, Elf_Shdr *shdr)
215 {
216 return (0);
217 }
218 #endif
219
220 #if defined(__amd64__) || defined(__i386__)
221 static bool
is_kernphys_relocatable(elf_file_t ef)222 is_kernphys_relocatable(elf_file_t ef)
223 {
224 Elf_Sym sym;
225
226 return (__elfN(lookup_symbol)(ef, "kernphys", &sym, STT_OBJECT) == 0);
227 }
228 #endif
229
230 #ifdef __i386__
231 static bool
is_tg_kernel_support(struct preloaded_file * fp,elf_file_t ef)232 is_tg_kernel_support(struct preloaded_file *fp, elf_file_t ef)
233 {
234 Elf_Sym sym;
235 Elf_Addr p_start, p_end, v, p;
236 char vd_name[16];
237 int error;
238
239 if (__elfN(lookup_symbol)(ef, "__start_set_vt_drv_set", &sym, STT_NOTYPE) != 0)
240 return (false);
241 p_start = sym.st_value + ef->off;
242 if (__elfN(lookup_symbol)(ef, "__stop_set_vt_drv_set", &sym, STT_NOTYPE) != 0)
243 return (false);
244 p_end = sym.st_value + ef->off;
245
246 /*
247 * Walk through vt_drv_set, each vt driver structure starts with
248 * static 16 chars for driver name. If we have "vbefb", return true.
249 */
250 for (p = p_start; p < p_end; p += sizeof(Elf_Addr)) {
251 COPYOUT(p, &v, sizeof(v));
252
253 error = __elfN(reloc_ptr)(fp, ef, p, &v, sizeof(v));
254 if (error == EOPNOTSUPP)
255 v += ef->off;
256 else if (error != 0)
257 return (false);
258 COPYOUT(v, &vd_name, sizeof(vd_name));
259 if (strncmp(vd_name, "vbefb", sizeof(vd_name)) == 0)
260 return (true);
261 }
262
263 return (false);
264 }
265 #endif
266
267 static int
__elfN(load_elf_header)268 __elfN(load_elf_header)(char *filename, elf_file_t ef)
269 {
270 ssize_t bytes_read;
271 Elf_Ehdr *ehdr;
272 int err;
273
274 /*
275 * Open the image, read and validate the ELF header
276 */
277 if (filename == NULL) /* can't handle nameless */
278 return (EFTYPE);
279 if ((ef->fd = open(filename, O_RDONLY)) == -1)
280 return (errno);
281 ef->firstpage = malloc(PAGE_SIZE);
282 if (ef->firstpage == NULL) {
283 close(ef->fd);
284 return (ENOMEM);
285 }
286 preload(ef->fd);
287 #ifdef LOADER_VERIEXEC_VECTX
288 {
289 int verror;
290
291 ef->vctx = vectx_open(ef->fd, filename, 0L, NULL, &verror, __func__);
292 if (verror) {
293 printf("Unverified %s: %s\n", filename, ve_error_get());
294 close(ef->fd);
295 free(ef->vctx);
296 return (EAUTH);
297 }
298 }
299 #endif
300 bytes_read = VECTX_READ(VECTX_HANDLE(ef), ef->firstpage, PAGE_SIZE);
301 ef->firstlen = (size_t)bytes_read;
302 if (bytes_read < 0 || ef->firstlen <= sizeof(Elf_Ehdr)) {
303 err = EFTYPE; /* could be EIO, but may be small file */
304 goto error;
305 }
306 ehdr = ef->ehdr = (Elf_Ehdr *)ef->firstpage;
307
308 /* Is it ELF? */
309 if (!IS_ELF(*ehdr)) {
310 err = EFTYPE;
311 goto error;
312 }
313
314 if (ehdr->e_ident[EI_CLASS] != ELF_TARG_CLASS || /* Layout ? */
315 ehdr->e_ident[EI_DATA] != ELF_TARG_DATA ||
316 ehdr->e_ident[EI_VERSION] != EV_CURRENT) /* Version ? */ {
317 err = EFTYPE;
318 goto error;
319 }
320
321 err = elf_header_convert(ehdr);
322 if (err)
323 goto error;
324
325 if (ehdr->e_version != EV_CURRENT || ehdr->e_machine != ELF_TARG_MACH) {
326 /* Machine ? */
327 err = EFTYPE;
328 goto error;
329 }
330
331 #if defined(LOADER_VERIEXEC) && !defined(LOADER_VERIEXEC_VECTX)
332 if (verify_file(ef->fd, filename, bytes_read, VE_MUST, __func__) < 0) {
333 err = EAUTH;
334 goto error;
335 }
336 #endif
337 return (0);
338
339 error:
340 if (ef->firstpage != NULL) {
341 free(ef->firstpage);
342 ef->firstpage = NULL;
343 }
344 if (ef->fd != -1) {
345 #ifdef LOADER_VERIEXEC_VECTX
346 free(ef->vctx);
347 #endif
348 close(ef->fd);
349 ef->fd = -1;
350 }
351 return (err);
352 }
353
354 /*
355 * Attempt to load the file (file) as an ELF module. It will be stored at
356 * (dest), and a pointer to a module structure describing the loaded object
357 * will be saved in (result).
358 */
359 int
__elfN(loadfile)360 __elfN(loadfile)(char *filename, uint64_t dest, struct preloaded_file **result)
361 {
362 return (__elfN(loadfile_raw)(filename, dest, result, 0));
363 }
364
365 int
__elfN(loadfile_raw)366 __elfN(loadfile_raw)(char *filename, uint64_t dest,
367 struct preloaded_file **result, int multiboot)
368 {
369 struct preloaded_file *fp, *kfp;
370 struct elf_file ef;
371 Elf_Ehdr *ehdr;
372 int err;
373
374 fp = NULL;
375 bzero(&ef, sizeof(struct elf_file));
376 ef.fd = -1;
377
378 err = __elfN(load_elf_header)(filename, &ef);
379 if (err != 0)
380 return (err);
381
382 ehdr = ef.ehdr;
383
384 /*
385 * Check to see what sort of module we are.
386 */
387 kfp = file_findfile(NULL, __elfN(kerneltype));
388 #ifdef __powerpc__
389 /*
390 * Kernels can be ET_DYN, so just assume the first loaded object is the
391 * kernel. This assumption will be checked later.
392 */
393 if (kfp == NULL)
394 ef.kernel = 1;
395 #endif
396 if (ef.kernel || ehdr->e_type == ET_EXEC) {
397 /* Looks like a kernel */
398 if (kfp != NULL) {
399 printf("elf" __XSTRING(__ELF_WORD_SIZE)
400 "_loadfile: kernel already loaded\n");
401 err = EPERM;
402 goto oerr;
403 }
404 /*
405 * Calculate destination address based on kernel entrypoint.
406 *
407 * For ARM, the destination address is independent of any values
408 * in the elf header (an ARM kernel can be loaded at any 2MB
409 * boundary), so we leave dest set to the value calculated by
410 * archsw.arch_loadaddr() and passed in to this function.
411 */
412 #ifndef __arm__
413 if (ehdr->e_type == ET_EXEC)
414 dest = (ehdr->e_entry & ~PAGE_MASK);
415 #endif
416 if ((ehdr->e_entry & ~PAGE_MASK) == 0) {
417 printf("elf" __XSTRING(__ELF_WORD_SIZE)
418 "_loadfile: not a kernel (maybe static binary?)\n");
419 err = EPERM;
420 goto oerr;
421 }
422 ef.kernel = 1;
423
424 } else if (ehdr->e_type == ET_DYN) {
425 /* Looks like a kld module */
426 if (multiboot != 0) {
427 printf("elf" __XSTRING(__ELF_WORD_SIZE)
428 "_loadfile: can't load module as multiboot\n");
429 err = EPERM;
430 goto oerr;
431 }
432 if (kfp == NULL) {
433 printf("elf" __XSTRING(__ELF_WORD_SIZE)
434 "_loadfile: can't load module before kernel\n");
435 err = EPERM;
436 goto oerr;
437 }
438 if (strcmp(__elfN(kerneltype), kfp->f_type)) {
439 printf("elf" __XSTRING(__ELF_WORD_SIZE)
440 "_loadfile: can't load module with kernel type '%s'\n",
441 kfp->f_type);
442 err = EPERM;
443 goto oerr;
444 }
445 /* Looks OK, got ahead */
446 ef.kernel = 0;
447
448 } else {
449 err = EFTYPE;
450 goto oerr;
451 }
452
453 if (archsw.arch_loadaddr != NULL)
454 dest = archsw.arch_loadaddr(LOAD_ELF, ehdr, dest);
455 else
456 dest = roundup(dest, PAGE_SIZE);
457
458 /*
459 * Ok, we think we should handle this.
460 */
461 fp = file_alloc();
462 if (fp == NULL) {
463 printf("elf" __XSTRING(__ELF_WORD_SIZE)
464 "_loadfile: cannot allocate module info\n");
465 err = EPERM;
466 goto out;
467 }
468 if (ef.kernel == 1 && multiboot == 0)
469 setenv("kernelname", filename, 1);
470 fp->f_name = strdup(filename);
471 if (multiboot == 0)
472 fp->f_type = strdup(ef.kernel ?
473 __elfN(kerneltype) : __elfN(moduletype));
474 else
475 fp->f_type = strdup("elf multiboot kernel");
476
477 if (module_verbose >= MODULE_VERBOSE_FULL) {
478 if (ef.kernel)
479 printf("%s entry at 0x%jx\n", filename,
480 (uintmax_t)ehdr->e_entry);
481 } else if (module_verbose > MODULE_VERBOSE_SILENT)
482 printf("%s ", filename);
483
484 fp->f_size = __elfN(loadimage)(fp, &ef, dest);
485 if (fp->f_size == 0 || fp->f_addr == 0)
486 goto ioerr;
487
488 /* save exec header as metadata */
489 file_addmetadata(fp, MODINFOMD_ELFHDR, sizeof(*ehdr), ehdr);
490
491 /* Load OK, return module pointer */
492 *result = (struct preloaded_file *)fp;
493 err = 0;
494 #if defined(__amd64__) || defined(__i386__)
495 fp->f_kernphys_relocatable = multiboot || is_kernphys_relocatable(&ef);
496 #endif
497 #ifdef __i386__
498 fp->f_tg_kernel_support = is_tg_kernel_support(fp, &ef);
499 #endif
500 goto out;
501
502 ioerr:
503 err = EIO;
504 oerr:
505 file_discard(fp);
506 out:
507 if (ef.firstpage)
508 free(ef.firstpage);
509 if (ef.fd != -1) {
510 #ifdef LOADER_VERIEXEC_VECTX
511 if (!err && ef.vctx) {
512 int verror;
513
514 verror = vectx_close(ef.vctx, VE_MUST, __func__);
515 if (verror) {
516 err = EAUTH;
517 file_discard(fp);
518 }
519 }
520 #endif
521 close(ef.fd);
522 }
523 return (err);
524 }
525
526 /*
527 * With the file (fd) open on the image, and (ehdr) containing
528 * the Elf header, load the image at (off)
529 */
530 static int
__elfN(loadimage)531 __elfN(loadimage)(struct preloaded_file *fp, elf_file_t ef, uint64_t off)
532 {
533 int i;
534 u_int j;
535 Elf_Ehdr *ehdr;
536 Elf_Phdr *phdr, *php;
537 Elf_Shdr *shdr;
538 char *shstr;
539 int ret;
540 vm_offset_t firstaddr;
541 vm_offset_t lastaddr;
542 size_t chunk;
543 ssize_t result;
544 Elf_Addr ssym, esym;
545 Elf_Dyn *dp;
546 Elf_Addr adp;
547 Elf_Addr ctors;
548 int ndp;
549 int symstrindex;
550 int symtabindex;
551 Elf_Size size;
552 u_int fpcopy;
553 Elf_Sym sym;
554 Elf_Addr p_start, p_end;
555
556 dp = NULL;
557 shdr = NULL;
558 ret = 0;
559 firstaddr = lastaddr = 0;
560 ehdr = ef->ehdr;
561 #ifdef __powerpc__
562 if (ef->kernel) {
563 #else
564 if (ehdr->e_type == ET_EXEC) {
565 #endif
566 #if defined(__i386__) || defined(__amd64__)
567 #if __ELF_WORD_SIZE == 64
568 /* x86_64 relocates after locore */
569 off = - (off & 0xffffffffff000000ull);
570 #else
571 /* i386 relocates after locore */
572 off = - (off & 0xff000000u);
573 #endif
574 #elif defined(__powerpc__)
575 /*
576 * On the purely virtual memory machines like e500, the kernel
577 * is linked against its final VA range, which is most often
578 * not available at the loader stage, but only after kernel
579 * initializes and completes its VM settings. In such cases we
580 * cannot use p_vaddr field directly to load ELF segments, but
581 * put them at some 'load-time' locations.
582 */
583 if (off & 0xf0000000u) {
584 off = -(off & 0xf0000000u);
585 /*
586 * XXX the physical load address should not be
587 * hardcoded. Note that the Book-E kernel assumes that
588 * it's loaded at a 16MB boundary for now...
589 */
590 off += 0x01000000;
591 }
592 ehdr->e_entry += off;
593 if (module_verbose >= MODULE_VERBOSE_FULL)
594 printf("Converted entry 0x%jx\n",
595 (uintmax_t)ehdr->e_entry);
596
597 #elif defined(__arm__) && !defined(EFI)
598 /*
599 * The elf headers in arm kernels specify virtual addresses in
600 * all header fields, even the ones that should be physical
601 * addresses. We assume the entry point is in the first page,
602 * and masking the page offset will leave us with the virtual
603 * address the kernel was linked at. We subtract that from the
604 * load offset, making 'off' into the value which, when added
605 * to a virtual address in an elf header, translates it to a
606 * physical address. We do the va->pa conversion on the entry
607 * point address in the header now, so that later we can launch
608 * the kernel by just jumping to that address.
609 *
610 * When booting from UEFI the copyin and copyout functions
611 * handle adjusting the location relative to the first virtual
612 * address. Because of this there is no need to adjust the
613 * offset or entry point address as these will both be handled
614 * by the efi code.
615 */
616 off -= ehdr->e_entry & ~PAGE_MASK;
617 ehdr->e_entry += off;
618 if (module_verbose >= MODULE_VERBOSE_FULL)
619 printf("ehdr->e_entry 0x%jx, va<->pa off %llx\n",
620 (uintmax_t)ehdr->e_entry, off);
621 #else
622 off = 0; /* other archs use direct mapped kernels */
623 #endif
624 }
625 ef->off = off;
626
627 if (ef->kernel)
628 __elfN(relocation_offset) = off;
629
630 if ((ehdr->e_phoff + ehdr->e_phnum * sizeof(*phdr)) > ef->firstlen) {
631 printf("elf" __XSTRING(__ELF_WORD_SIZE)
632 "_loadimage: program header not within first page\n");
633 goto out;
634 }
635 phdr = (Elf_Phdr *)(ef->firstpage + ehdr->e_phoff);
636
637 for (i = 0; i < ehdr->e_phnum; i++) {
638 if (elf_program_header_convert(ehdr, phdr))
639 continue;
640
641 /* We want to load PT_LOAD segments only.. */
642 if (phdr[i].p_type != PT_LOAD)
643 continue;
644
645 if (module_verbose >= MODULE_VERBOSE_FULL) {
646 printf("Segment: 0x%lx@0x%lx -> 0x%lx-0x%lx",
647 (long)phdr[i].p_filesz, (long)phdr[i].p_offset,
648 (long)(phdr[i].p_vaddr + off),
649 (long)(phdr[i].p_vaddr + off + phdr[i].p_memsz - 1));
650 } else if (module_verbose > MODULE_VERBOSE_SILENT) {
651 if ((phdr[i].p_flags & PF_W) == 0) {
652 printf("text=0x%lx ", (long)phdr[i].p_filesz);
653 } else {
654 printf("data=0x%lx", (long)phdr[i].p_filesz);
655 if (phdr[i].p_filesz < phdr[i].p_memsz)
656 printf("+0x%lx", (long)(phdr[i].p_memsz -
657 phdr[i].p_filesz));
658 printf(" ");
659 }
660 }
661 fpcopy = 0;
662 if (ef->firstlen > phdr[i].p_offset) {
663 fpcopy = ef->firstlen - phdr[i].p_offset;
664 archsw.arch_copyin(ef->firstpage + phdr[i].p_offset,
665 phdr[i].p_vaddr + off, fpcopy);
666 }
667 if (phdr[i].p_filesz > fpcopy) {
668 if (kern_pread(VECTX_HANDLE(ef),
669 phdr[i].p_vaddr + off + fpcopy,
670 phdr[i].p_filesz - fpcopy,
671 phdr[i].p_offset + fpcopy) != 0) {
672 printf("\nelf" __XSTRING(__ELF_WORD_SIZE)
673 "_loadimage: read failed\n");
674 goto out;
675 }
676 }
677 /* clear space from oversized segments; eg: bss */
678 if (phdr[i].p_filesz < phdr[i].p_memsz) {
679 if (module_verbose >= MODULE_VERBOSE_FULL) {
680 printf(" (bss: 0x%lx-0x%lx)",
681 (long)(phdr[i].p_vaddr + off + phdr[i].p_filesz),
682 (long)(phdr[i].p_vaddr + off + phdr[i].p_memsz -1));
683 }
684 kern_bzero(phdr[i].p_vaddr + off + phdr[i].p_filesz,
685 phdr[i].p_memsz - phdr[i].p_filesz);
686 }
687 if (module_verbose >= MODULE_VERBOSE_FULL)
688 printf("\n");
689
690 if (archsw.arch_loadseg != NULL)
691 archsw.arch_loadseg(ehdr, phdr + i, off);
692
693 if (firstaddr == 0 || firstaddr > (phdr[i].p_vaddr + off))
694 firstaddr = phdr[i].p_vaddr + off;
695 if (lastaddr == 0 || lastaddr <
696 (phdr[i].p_vaddr + off + phdr[i].p_memsz))
697 lastaddr = phdr[i].p_vaddr + off + phdr[i].p_memsz;
698 }
699 lastaddr = roundup(lastaddr, sizeof(long));
700
701 /*
702 * Get the section headers. We need this for finding the .ctors
703 * section as well as for loading any symbols. Both may be hard
704 * to do if reading from a .gz file as it involves seeking. I
705 * think the rule is going to have to be that you must strip a
706 * file to remove symbols before gzipping it.
707 */
708 chunk = (size_t)ehdr->e_shnum * (size_t)ehdr->e_shentsize;
709 if (chunk == 0 || ehdr->e_shoff == 0)
710 goto nosyms;
711 shdr = alloc_pread(VECTX_HANDLE(ef), ehdr->e_shoff, chunk);
712 if (shdr == NULL) {
713 printf("\nelf" __XSTRING(__ELF_WORD_SIZE)
714 "_loadimage: failed to read section headers");
715 goto nosyms;
716 }
717
718 for (i = 0; i < ehdr->e_shnum; i++)
719 elf_section_header_convert(ehdr, &shdr[i]);
720
721 file_addmetadata(fp, MODINFOMD_SHDR, chunk, shdr);
722
723 /*
724 * Read the section string table and look for the .ctors section.
725 * We need to tell the kernel where it is so that it can call the
726 * ctors.
727 */
728 chunk = shdr[ehdr->e_shstrndx].sh_size;
729 if (chunk) {
730 shstr = alloc_pread(VECTX_HANDLE(ef),
731 shdr[ehdr->e_shstrndx].sh_offset, chunk);
732 if (shstr) {
733 for (i = 0; i < ehdr->e_shnum; i++) {
734 if (strcmp(shstr + shdr[i].sh_name,
735 ".ctors") != 0)
736 continue;
737 ctors = shdr[i].sh_addr;
738 file_addmetadata(fp, MODINFOMD_CTORS_ADDR,
739 sizeof(ctors), &ctors);
740 size = shdr[i].sh_size;
741 file_addmetadata(fp, MODINFOMD_CTORS_SIZE,
742 sizeof(size), &size);
743 break;
744 }
745 free(shstr);
746 }
747 }
748
749 /*
750 * Now load any symbols.
751 */
752 symtabindex = -1;
753 symstrindex = -1;
754 for (i = 0; i < ehdr->e_shnum; i++) {
755 if (shdr[i].sh_type != SHT_SYMTAB)
756 continue;
757 for (j = 0; j < ehdr->e_phnum; j++) {
758 if (phdr[j].p_type != PT_LOAD)
759 continue;
760 if (shdr[i].sh_offset >= phdr[j].p_offset &&
761 (shdr[i].sh_offset + shdr[i].sh_size <=
762 phdr[j].p_offset + phdr[j].p_filesz)) {
763 shdr[i].sh_offset = 0;
764 shdr[i].sh_size = 0;
765 break;
766 }
767 }
768 if (shdr[i].sh_offset == 0 || shdr[i].sh_size == 0)
769 continue; /* alread loaded in a PT_LOAD above */
770 /* Save it for loading below */
771 symtabindex = i;
772 symstrindex = shdr[i].sh_link;
773 }
774 if (symtabindex < 0 || symstrindex < 0)
775 goto nosyms;
776
777 /* Ok, committed to a load. */
778 if (module_verbose >= MODULE_VERBOSE_FULL)
779 printf("syms=[");
780 ssym = lastaddr;
781 for (i = symtabindex; i >= 0; i = symstrindex) {
782 char *secname;
783
784 switch(shdr[i].sh_type) {
785 case SHT_SYMTAB: /* Symbol table */
786 secname = "symtab";
787 break;
788 case SHT_STRTAB: /* String table */
789 secname = "strtab";
790 break;
791 default:
792 secname = "WHOA!!";
793 break;
794 }
795 size = shdr[i].sh_size;
796
797 archsw.arch_copyin(&size, lastaddr, sizeof(size));
798 lastaddr += sizeof(size);
799
800 if (module_verbose >= MODULE_VERBOSE_FULL) {
801 printf("\n%s: 0x%jx@0x%jx -> 0x%jx-0x%jx", secname,
802 (uintmax_t)shdr[i].sh_size, (uintmax_t)shdr[i].sh_offset,
803 (uintmax_t)lastaddr,
804 (uintmax_t)(lastaddr + shdr[i].sh_size));
805 } else if (module_verbose > MODULE_VERBOSE_SILENT) {
806 if (i == symstrindex)
807 printf("+");
808 printf("0x%lx+0x%lx", (long)sizeof(size), (long)size);
809 }
810 if (VECTX_LSEEK(VECTX_HANDLE(ef), (off_t)shdr[i].sh_offset, SEEK_SET) == -1) {
811 printf("\nelf" __XSTRING(__ELF_WORD_SIZE)
812 "_loadimage: could not seek for symbols - skipped!");
813 lastaddr = ssym;
814 ssym = 0;
815 goto nosyms;
816 }
817 result = archsw.arch_readin(VECTX_HANDLE(ef), lastaddr, shdr[i].sh_size);
818 if (result < 0 || (size_t)result != shdr[i].sh_size) {
819 printf("\nelf" __XSTRING(__ELF_WORD_SIZE)
820 "_loadimage: could not read symbols - skipped! "
821 "(%ju != %ju)", (uintmax_t)result,
822 (uintmax_t)shdr[i].sh_size);
823 lastaddr = ssym;
824 ssym = 0;
825 goto nosyms;
826 }
827 /* Reset offsets relative to ssym */
828 lastaddr += shdr[i].sh_size;
829 lastaddr = roundup(lastaddr, sizeof(size));
830 if (i == symtabindex)
831 symtabindex = -1;
832 else if (i == symstrindex)
833 symstrindex = -1;
834 }
835 esym = lastaddr;
836 if (module_verbose >= MODULE_VERBOSE_FULL)
837 printf("]");
838
839 file_addmetadata(fp, MODINFOMD_SSYM, sizeof(ssym), &ssym);
840 file_addmetadata(fp, MODINFOMD_ESYM, sizeof(esym), &esym);
841
842 nosyms:
843 if (module_verbose > MODULE_VERBOSE_SILENT)
844 printf("\n");
845
846 ret = lastaddr - firstaddr;
847 fp->f_addr = firstaddr;
848
849 php = NULL;
850 for (i = 0; i < ehdr->e_phnum; i++) {
851 if (phdr[i].p_type == PT_DYNAMIC) {
852 php = phdr + i;
853 adp = php->p_vaddr;
854 file_addmetadata(fp, MODINFOMD_DYNAMIC, sizeof(adp),
855 &adp);
856 break;
857 }
858 }
859
860 if (php == NULL) /* this is bad, we cannot get to symbols or _DYNAMIC */
861 goto out;
862
863 ndp = php->p_filesz / sizeof(Elf_Dyn);
864 if (ndp == 0)
865 goto out;
866 dp = malloc(php->p_filesz);
867 if (dp == NULL)
868 goto out;
869 archsw.arch_copyout(php->p_vaddr + off, dp, php->p_filesz);
870
871 ef->strsz = 0;
872 for (i = 0; i < ndp; i++) {
873 if (dp[i].d_tag == 0)
874 break;
875 switch (dp[i].d_tag) {
876 case DT_HASH:
877 ef->hashtab =
878 (Elf_Hashelt*)(uintptr_t)(dp[i].d_un.d_ptr + off);
879 break;
880 case DT_STRTAB:
881 ef->strtab =
882 (char *)(uintptr_t)(dp[i].d_un.d_ptr + off);
883 break;
884 case DT_STRSZ:
885 ef->strsz = dp[i].d_un.d_val;
886 break;
887 case DT_SYMTAB:
888 ef->symtab =
889 (Elf_Sym *)(uintptr_t)(dp[i].d_un.d_ptr + off);
890 break;
891 case DT_REL:
892 ef->rel =
893 (Elf_Rel *)(uintptr_t)(dp[i].d_un.d_ptr + off);
894 break;
895 case DT_RELSZ:
896 ef->relsz = dp[i].d_un.d_val;
897 break;
898 case DT_RELA:
899 ef->rela =
900 (Elf_Rela *)(uintptr_t)(dp[i].d_un.d_ptr + off);
901 break;
902 case DT_RELASZ:
903 ef->relasz = dp[i].d_un.d_val;
904 break;
905 default:
906 break;
907 }
908 }
909 if (ef->hashtab == NULL || ef->symtab == NULL ||
910 ef->strtab == NULL || ef->strsz == 0)
911 goto out;
912 COPYOUT(ef->hashtab, &ef->nbuckets, sizeof(ef->nbuckets));
913 COPYOUT(ef->hashtab + 1, &ef->nchains, sizeof(ef->nchains));
914 ef->buckets = ef->hashtab + 2;
915 ef->chains = ef->buckets + ef->nbuckets;
916
917 if (__elfN(lookup_symbol)(ef, "__start_set_modmetadata_set", &sym,
918 STT_NOTYPE) != 0)
919 return 0;
920 p_start = sym.st_value + ef->off;
921 if (__elfN(lookup_symbol)(ef, "__stop_set_modmetadata_set", &sym,
922 STT_NOTYPE) != 0)
923 return 0;
924 p_end = sym.st_value + ef->off;
925
926 if (__elfN(parse_modmetadata)(fp, ef, p_start, p_end) == 0)
927 goto out;
928
929 if (ef->kernel) /* kernel must not depend on anything */
930 goto out;
931
932 out:
933 if (dp)
934 free(dp);
935 if (shdr)
936 free(shdr);
937 return ret;
938 }
939
940 static char invalid_name[] = "bad";
941
942 char *
943 fake_modname(const char *name)
944 {
945 const char *sp, *ep;
946 char *fp;
947 size_t len;
948
949 sp = strrchr(name, '/');
950 if (sp)
951 sp++;
952 else
953 sp = name;
954
955 ep = strrchr(sp, '.');
956 if (ep == NULL) {
957 ep = sp + strlen(sp);
958 }
959 if (ep == sp) {
960 sp = invalid_name;
961 ep = invalid_name + sizeof(invalid_name) - 1;
962 }
963
964 len = ep - sp;
965 fp = malloc(len + 1);
966 if (fp == NULL)
967 return NULL;
968 memcpy(fp, sp, len);
969 fp[len] = '\0';
970 return fp;
971 }
972
973 #if (defined(__i386__) || defined(__powerpc__)) && __ELF_WORD_SIZE == 64
974 struct mod_metadata64 {
975 int md_version; /* structure version MDTV_* */
976 int md_type; /* type of entry MDT_* */
977 uint64_t md_data; /* specific data */
978 uint64_t md_cval; /* common string label */
979 };
980 #endif
981 #if defined(__amd64__) && __ELF_WORD_SIZE == 32
982 struct mod_metadata32 {
983 int md_version; /* structure version MDTV_* */
984 int md_type; /* type of entry MDT_* */
985 uint32_t md_data; /* specific data */
986 uint32_t md_cval; /* common string label */
987 };
988 #endif
989
990 int
991 __elfN(load_modmetadata)(struct preloaded_file *fp, uint64_t dest)
992 {
993 struct elf_file ef;
994 int err, i, j;
995 Elf_Shdr *sh_meta, *shdr = NULL;
996 Elf_Shdr *sh_data[2];
997 char *shstrtab = NULL;
998 size_t size;
999 Elf_Addr p_start, p_end;
1000
1001 bzero(&ef, sizeof(struct elf_file));
1002 ef.fd = -1;
1003
1004 err = __elfN(load_elf_header)(fp->f_name, &ef);
1005 if (err != 0)
1006 goto out;
1007
1008 if (ef.kernel == 1 || ef.ehdr->e_type == ET_EXEC) {
1009 ef.kernel = 1;
1010 } else if (ef.ehdr->e_type != ET_DYN) {
1011 err = EFTYPE;
1012 goto out;
1013 }
1014
1015 size = (size_t)ef.ehdr->e_shnum * (size_t)ef.ehdr->e_shentsize;
1016 shdr = alloc_pread(VECTX_HANDLE(&ef), ef.ehdr->e_shoff, size);
1017 if (shdr == NULL) {
1018 err = ENOMEM;
1019 goto out;
1020 }
1021
1022 /* Load shstrtab. */
1023 shstrtab = alloc_pread(VECTX_HANDLE(&ef), shdr[ef.ehdr->e_shstrndx].sh_offset,
1024 shdr[ef.ehdr->e_shstrndx].sh_size);
1025 if (shstrtab == NULL) {
1026 printf("\nelf" __XSTRING(__ELF_WORD_SIZE)
1027 "load_modmetadata: unable to load shstrtab\n");
1028 err = EFTYPE;
1029 goto out;
1030 }
1031
1032 /* Find set_modmetadata_set and data sections. */
1033 sh_data[0] = sh_data[1] = sh_meta = NULL;
1034 for (i = 0, j = 0; i < ef.ehdr->e_shnum; i++) {
1035 if (strcmp(&shstrtab[shdr[i].sh_name],
1036 "set_modmetadata_set") == 0) {
1037 sh_meta = &shdr[i];
1038 }
1039 if ((strcmp(&shstrtab[shdr[i].sh_name], ".data") == 0) ||
1040 (strcmp(&shstrtab[shdr[i].sh_name], ".rodata") == 0)) {
1041 sh_data[j++] = &shdr[i];
1042 }
1043 }
1044 if (sh_meta == NULL || sh_data[0] == NULL || sh_data[1] == NULL) {
1045 printf("\nelf" __XSTRING(__ELF_WORD_SIZE)
1046 "load_modmetadata: unable to find set_modmetadata_set or data sections\n");
1047 err = EFTYPE;
1048 goto out;
1049 }
1050
1051 /* Load set_modmetadata_set into memory */
1052 err = kern_pread(VECTX_HANDLE(&ef), dest, sh_meta->sh_size, sh_meta->sh_offset);
1053 if (err != 0) {
1054 printf("\nelf" __XSTRING(__ELF_WORD_SIZE)
1055 "load_modmetadata: unable to load set_modmetadata_set: %d\n", err);
1056 goto out;
1057 }
1058 p_start = dest;
1059 p_end = dest + sh_meta->sh_size;
1060 dest += sh_meta->sh_size;
1061
1062 /* Load data sections into memory. */
1063 err = kern_pread(VECTX_HANDLE(&ef), dest, sh_data[0]->sh_size,
1064 sh_data[0]->sh_offset);
1065 if (err != 0) {
1066 printf("\nelf" __XSTRING(__ELF_WORD_SIZE)
1067 "load_modmetadata: unable to load data: %d\n", err);
1068 goto out;
1069 }
1070
1071 /*
1072 * We have to increment the dest, so that the offset is the same into
1073 * both the .rodata and .data sections.
1074 */
1075 ef.off = -(sh_data[0]->sh_addr - dest);
1076 dest += (sh_data[1]->sh_addr - sh_data[0]->sh_addr);
1077
1078 err = kern_pread(VECTX_HANDLE(&ef), dest, sh_data[1]->sh_size,
1079 sh_data[1]->sh_offset);
1080 if (err != 0) {
1081 printf("\nelf" __XSTRING(__ELF_WORD_SIZE)
1082 "load_modmetadata: unable to load data: %d\n", err);
1083 goto out;
1084 }
1085
1086 err = __elfN(parse_modmetadata)(fp, &ef, p_start, p_end);
1087 if (err != 0) {
1088 printf("\nelf" __XSTRING(__ELF_WORD_SIZE)
1089 "load_modmetadata: unable to parse metadata: %d\n", err);
1090 goto out;
1091 }
1092
1093 out:
1094 if (shstrtab != NULL)
1095 free(shstrtab);
1096 if (shdr != NULL)
1097 free(shdr);
1098 if (ef.firstpage != NULL)
1099 free(ef.firstpage);
1100 if (ef.fd != -1) {
1101 #ifdef LOADER_VERIEXEC_VECTX
1102 if (!err && ef.vctx) {
1103 int verror;
1104
1105 verror = vectx_close(ef.vctx, VE_MUST, __func__);
1106 if (verror) {
1107 err = EAUTH;
1108 file_discard(fp);
1109 }
1110 }
1111 #endif
1112 close(ef.fd);
1113 }
1114 return (err);
1115 }
1116
1117 int
1118 __elfN(parse_modmetadata)(struct preloaded_file *fp, elf_file_t ef,
1119 Elf_Addr p_start, Elf_Addr p_end)
1120 {
1121 struct mod_metadata md;
1122 #if (defined(__i386__) || defined(__powerpc__)) && __ELF_WORD_SIZE == 64
1123 struct mod_metadata64 md64;
1124 #elif defined(__amd64__) && __ELF_WORD_SIZE == 32
1125 struct mod_metadata32 md32;
1126 #endif
1127 struct mod_depend *mdepend;
1128 struct mod_version mver;
1129 char *s;
1130 int error, modcnt, minfolen;
1131 Elf_Addr v, p;
1132
1133 modcnt = 0;
1134 p = p_start;
1135 while (p < p_end) {
1136 COPYOUT(p, &v, sizeof(v));
1137 error = __elfN(reloc_ptr)(fp, ef, p, &v, sizeof(v));
1138 if (error == EOPNOTSUPP)
1139 v += ef->off;
1140 else if (error != 0)
1141 return (error);
1142 #if (defined(__i386__) || defined(__powerpc__)) && __ELF_WORD_SIZE == 64
1143 COPYOUT(v, &md64, sizeof(md64));
1144 error = __elfN(reloc_ptr)(fp, ef, v, &md64, sizeof(md64));
1145 if (error == EOPNOTSUPP) {
1146 md64.md_cval += ef->off;
1147 md64.md_data += ef->off;
1148 } else if (error != 0)
1149 return (error);
1150 md.md_version = md64.md_version;
1151 md.md_type = md64.md_type;
1152 md.md_cval = (const char *)(uintptr_t)md64.md_cval;
1153 md.md_data = (void *)(uintptr_t)md64.md_data;
1154 #elif defined(__amd64__) && __ELF_WORD_SIZE == 32
1155 COPYOUT(v, &md32, sizeof(md32));
1156 error = __elfN(reloc_ptr)(fp, ef, v, &md32, sizeof(md32));
1157 if (error == EOPNOTSUPP) {
1158 md32.md_cval += ef->off;
1159 md32.md_data += ef->off;
1160 } else if (error != 0)
1161 return (error);
1162 md.md_version = md32.md_version;
1163 md.md_type = md32.md_type;
1164 md.md_cval = (const char *)(uintptr_t)md32.md_cval;
1165 md.md_data = (void *)(uintptr_t)md32.md_data;
1166 #else
1167 COPYOUT(v, &md, sizeof(md));
1168 error = __elfN(reloc_ptr)(fp, ef, v, &md, sizeof(md));
1169 if (error == EOPNOTSUPP) {
1170 md.md_cval += ef->off;
1171 md.md_data = (void *)((uintptr_t)md.md_data +
1172 (uintptr_t)ef->off);
1173 } else if (error != 0)
1174 return (error);
1175 #endif
1176 p += sizeof(Elf_Addr);
1177 switch(md.md_type) {
1178 case MDT_DEPEND:
1179 if (ef->kernel) /* kernel must not depend on anything */
1180 break;
1181 s = strdupout((vm_offset_t)md.md_cval);
1182 minfolen = sizeof(*mdepend) + strlen(s) + 1;
1183 mdepend = malloc(minfolen);
1184 if (mdepend == NULL)
1185 return ENOMEM;
1186 COPYOUT((vm_offset_t)md.md_data, mdepend,
1187 sizeof(*mdepend));
1188 strcpy((char*)(mdepend + 1), s);
1189 free(s);
1190 file_addmetadata(fp, MODINFOMD_DEPLIST, minfolen,
1191 mdepend);
1192 free(mdepend);
1193 break;
1194 case MDT_VERSION:
1195 s = strdupout((vm_offset_t)md.md_cval);
1196 COPYOUT((vm_offset_t)md.md_data, &mver, sizeof(mver));
1197 file_addmodule(fp, s, mver.mv_version, NULL);
1198 free(s);
1199 modcnt++;
1200 break;
1201 }
1202 }
1203 if (modcnt == 0) {
1204 s = fake_modname(fp->f_name);
1205 file_addmodule(fp, s, 1, NULL);
1206 free(s);
1207 }
1208 return 0;
1209 }
1210
1211 static unsigned long
1212 elf_hash(const char *name)
1213 {
1214 const unsigned char *p = (const unsigned char *) name;
1215 unsigned long h = 0;
1216 unsigned long g;
1217
1218 while (*p != '\0') {
1219 h = (h << 4) + *p++;
1220 if ((g = h & 0xf0000000) != 0)
1221 h ^= g >> 24;
1222 h &= ~g;
1223 }
1224 return h;
1225 }
1226
1227 static const char __elfN(bad_symtable)[] = "elf" __XSTRING(__ELF_WORD_SIZE)
1228 "_lookup_symbol: corrupt symbol table\n";
1229 int
1230 __elfN(lookup_symbol)(elf_file_t ef, const char* name, Elf_Sym *symp,
1231 unsigned char type)
1232 {
1233 Elf_Hashelt symnum;
1234 Elf_Sym sym;
1235 char *strp;
1236 unsigned long hash;
1237
1238 if (ef->nbuckets == 0) {
1239 printf(__elfN(bad_symtable));
1240 return ENOENT;
1241 }
1242
1243 hash = elf_hash(name);
1244 COPYOUT(&ef->buckets[hash % ef->nbuckets], &symnum, sizeof(symnum));
1245
1246 while (symnum != STN_UNDEF) {
1247 if (symnum >= ef->nchains) {
1248 printf(__elfN(bad_symtable));
1249 return ENOENT;
1250 }
1251
1252 COPYOUT(ef->symtab + symnum, &sym, sizeof(sym));
1253 if (sym.st_name == 0) {
1254 printf(__elfN(bad_symtable));
1255 return ENOENT;
1256 }
1257
1258 strp = strdupout((vm_offset_t)(ef->strtab + sym.st_name));
1259 if (strcmp(name, strp) == 0) {
1260 free(strp);
1261 if (sym.st_shndx != SHN_UNDEF && sym.st_value != 0 &&
1262 ELF_ST_TYPE(sym.st_info) == type) {
1263 *symp = sym;
1264 return 0;
1265 }
1266 return ENOENT;
1267 }
1268 free(strp);
1269 COPYOUT(&ef->chains[symnum], &symnum, sizeof(symnum));
1270 }
1271 return ENOENT;
1272 }
1273
1274 /*
1275 * Apply any intra-module relocations to the value. p is the load address
1276 * of the value and val/len is the value to be modified. This does NOT modify
1277 * the image in-place, because this is done by kern_linker later on.
1278 *
1279 * Returns EOPNOTSUPP if no relocation method is supplied.
1280 */
1281 static int
1282 __elfN(reloc_ptr)(struct preloaded_file *mp, elf_file_t ef,
1283 Elf_Addr p, void *val, size_t len)
1284 {
1285 size_t n;
1286 Elf_Rela a;
1287 Elf_Rel r;
1288 int error;
1289
1290 /*
1291 * The kernel is already relocated, but we still want to apply
1292 * offset adjustments.
1293 */
1294 if (ef->kernel)
1295 return (EOPNOTSUPP);
1296
1297 for (n = 0; n < ef->relsz / sizeof(r); n++) {
1298 COPYOUT(ef->rel + n, &r, sizeof(r));
1299
1300 error = __elfN(reloc)(ef, __elfN(symaddr), &r, ELF_RELOC_REL,
1301 ef->off, p, val, len);
1302 if (error != 0)
1303 return (error);
1304 }
1305 for (n = 0; n < ef->relasz / sizeof(a); n++) {
1306 COPYOUT(ef->rela + n, &a, sizeof(a));
1307
1308 error = __elfN(reloc)(ef, __elfN(symaddr), &a, ELF_RELOC_RELA,
1309 ef->off, p, val, len);
1310 if (error != 0)
1311 return (error);
1312 }
1313
1314 return (0);
1315 }
1316
1317 static Elf_Addr
1318 __elfN(symaddr)(struct elf_file *ef, Elf_Size symidx)
1319 {
1320
1321 /* Symbol lookup by index not required here. */
1322 return (0);
1323 }
1324