1 /*-
2 * Copyright (c) 1998 Michael Smith <msmith@freebsd.org>
3 * Copyright (c) 1998 Peter Wemm <peter@freebsd.org>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28 #include <sys/param.h>
29 #include <sys/endian.h>
30 #include <sys/exec.h>
31 #include <sys/linker.h>
32 #include <sys/module.h>
33 #include <sys/stdint.h>
34 #include <string.h>
35 #include <machine/elf.h>
36 #include <stand.h>
37 #include <sys/link_elf.h>
38
39 #include "bootstrap.h"
40 #include "modinfo.h"
41
42 #define COPYOUT(s,d,l) archsw.arch_copyout((vm_offset_t)(s), d, l)
43
44 #if defined(__i386__) && __ELF_WORD_SIZE == 64
45 #undef ELF_TARG_CLASS
46 #undef ELF_TARG_MACH
47 #define ELF_TARG_CLASS ELFCLASS64
48 #define ELF_TARG_MACH EM_X86_64
49 #endif
50
51 typedef struct elf_file {
52 Elf_Phdr *ph;
53 Elf_Ehdr *ehdr;
54 Elf_Sym *symtab;
55 Elf_Hashelt *hashtab;
56 Elf_Hashelt nbuckets;
57 Elf_Hashelt nchains;
58 Elf_Hashelt *buckets;
59 Elf_Hashelt *chains;
60 Elf_Rel *rel;
61 size_t relsz;
62 Elf_Rela *rela;
63 size_t relasz;
64 char *strtab;
65 size_t strsz;
66 int fd;
67 caddr_t firstpage;
68 size_t firstlen;
69 int kernel;
70 uint64_t off;
71 #ifdef LOADER_VERIEXEC_VECTX
72 struct vectx *vctx;
73 #endif
74 } *elf_file_t;
75
76 #ifdef LOADER_VERIEXEC_VECTX
77 #define VECTX_HANDLE(ef) (ef)->vctx
78 #else
79 #define VECTX_HANDLE(ef) (ef)->fd
80 #endif
81
82 static int __elfN(loadimage)(struct preloaded_file *mp, elf_file_t ef,
83 uint64_t loadaddr);
84 static int __elfN(lookup_symbol)(elf_file_t ef, const char* name,
85 Elf_Sym *sym, unsigned char type);
86 static int __elfN(reloc_ptr)(struct preloaded_file *mp, elf_file_t ef,
87 Elf_Addr p, void *val, size_t len);
88 static int __elfN(parse_modmetadata)(struct preloaded_file *mp, elf_file_t ef,
89 Elf_Addr p_start, Elf_Addr p_end);
90 static symaddr_fn __elfN(symaddr);
91 static char *fake_modname(const char *name);
92
93 uint64_t __elfN(relocation_offset) = 0;
94
95 #ifdef __powerpc__
96 extern void elf_wrong_field_size(void);
97 #define CONVERT_FIELD(b, f, e) \
98 switch (sizeof((b)->f)) { \
99 case 2: \
100 (b)->f = e ## 16toh((b)->f); \
101 break; \
102 case 4: \
103 (b)->f = e ## 32toh((b)->f); \
104 break; \
105 case 8: \
106 (b)->f = e ## 64toh((b)->f); \
107 break; \
108 default: \
109 /* Force a link time error. */ \
110 elf_wrong_field_size(); \
111 break; \
112 }
113
114 #define CONVERT_SWITCH(h, d, f) \
115 switch ((h)->e_ident[EI_DATA]) { \
116 case ELFDATA2MSB: \
117 f(d, be); \
118 break; \
119 case ELFDATA2LSB: \
120 f(d, le); \
121 break; \
122 default: \
123 return (EINVAL); \
124 }
125
126
elf_header_convert(Elf_Ehdr * ehdr)127 static int elf_header_convert(Elf_Ehdr *ehdr)
128 {
129 /*
130 * Fixup ELF header endianness.
131 *
132 * The Xhdr structure was loaded using block read call to optimize file
133 * accesses. It might happen, that the endianness of the system memory
134 * is different that endianness of the ELF header. Swap fields here to
135 * guarantee that Xhdr always contain valid data regardless of
136 * architecture.
137 */
138 #define HEADER_FIELDS(b, e) \
139 CONVERT_FIELD(b, e_type, e); \
140 CONVERT_FIELD(b, e_machine, e); \
141 CONVERT_FIELD(b, e_version, e); \
142 CONVERT_FIELD(b, e_entry, e); \
143 CONVERT_FIELD(b, e_phoff, e); \
144 CONVERT_FIELD(b, e_shoff, e); \
145 CONVERT_FIELD(b, e_flags, e); \
146 CONVERT_FIELD(b, e_ehsize, e); \
147 CONVERT_FIELD(b, e_phentsize, e); \
148 CONVERT_FIELD(b, e_phnum, e); \
149 CONVERT_FIELD(b, e_shentsize, e); \
150 CONVERT_FIELD(b, e_shnum, e); \
151 CONVERT_FIELD(b, e_shstrndx, e)
152
153 CONVERT_SWITCH(ehdr, ehdr, HEADER_FIELDS);
154
155 #undef HEADER_FIELDS
156
157 return (0);
158 }
159
elf_program_header_convert(const Elf_Ehdr * ehdr,Elf_Phdr * phdr)160 static int elf_program_header_convert(const Elf_Ehdr *ehdr, Elf_Phdr *phdr)
161 {
162 #define PROGRAM_HEADER_FIELDS(b, e) \
163 CONVERT_FIELD(b, p_type, e); \
164 CONVERT_FIELD(b, p_flags, e); \
165 CONVERT_FIELD(b, p_offset, e); \
166 CONVERT_FIELD(b, p_vaddr, e); \
167 CONVERT_FIELD(b, p_paddr, e); \
168 CONVERT_FIELD(b, p_filesz, e); \
169 CONVERT_FIELD(b, p_memsz, e); \
170 CONVERT_FIELD(b, p_align, e)
171
172 CONVERT_SWITCH(ehdr, phdr, PROGRAM_HEADER_FIELDS);
173
174 #undef PROGRAM_HEADER_FIELDS
175
176 return (0);
177 }
178
elf_section_header_convert(const Elf_Ehdr * ehdr,Elf_Shdr * shdr)179 static int elf_section_header_convert(const Elf_Ehdr *ehdr, Elf_Shdr *shdr)
180 {
181 #define SECTION_HEADER_FIELDS(b, e) \
182 CONVERT_FIELD(b, sh_name, e); \
183 CONVERT_FIELD(b, sh_type, e); \
184 CONVERT_FIELD(b, sh_link, e); \
185 CONVERT_FIELD(b, sh_info, e); \
186 CONVERT_FIELD(b, sh_flags, e); \
187 CONVERT_FIELD(b, sh_addr, e); \
188 CONVERT_FIELD(b, sh_offset, e); \
189 CONVERT_FIELD(b, sh_size, e); \
190 CONVERT_FIELD(b, sh_addralign, e); \
191 CONVERT_FIELD(b, sh_entsize, e)
192
193 CONVERT_SWITCH(ehdr, shdr, SECTION_HEADER_FIELDS);
194
195 #undef SECTION_HEADER_FIELDS
196
197 return (0);
198 }
199 #undef CONVERT_SWITCH
200 #undef CONVERT_FIELD
201 #else
elf_header_convert(Elf_Ehdr * ehdr)202 static int elf_header_convert(Elf_Ehdr *ehdr)
203 {
204 return (0);
205 }
206
elf_program_header_convert(const Elf_Ehdr * ehdr,Elf_Phdr * phdr)207 static int elf_program_header_convert(const Elf_Ehdr *ehdr, Elf_Phdr *phdr)
208 {
209 return (0);
210 }
211
elf_section_header_convert(const Elf_Ehdr * ehdr,Elf_Shdr * shdr)212 static int elf_section_header_convert(const Elf_Ehdr *ehdr, Elf_Shdr *shdr)
213 {
214 return (0);
215 }
216 #endif
217
218 #if defined(__amd64__) || (defined(__i386__) && defined(EFI))
219 static bool
is_kernphys_relocatable(elf_file_t ef)220 is_kernphys_relocatable(elf_file_t ef)
221 {
222 Elf_Sym sym;
223
224 return (__elfN(lookup_symbol)(ef, "kernphys", &sym, STT_OBJECT) == 0);
225 }
226 #endif
227
228 #ifdef __i386__
229 static bool
is_tg_kernel_support(struct preloaded_file * fp,elf_file_t ef)230 is_tg_kernel_support(struct preloaded_file *fp, elf_file_t ef)
231 {
232 Elf_Sym sym;
233 Elf_Addr p_start, p_end, v, p;
234 char vd_name[16];
235 int error;
236
237 if (__elfN(lookup_symbol)(ef, "__start_set_vt_drv_set", &sym, STT_NOTYPE) != 0)
238 return (false);
239 p_start = sym.st_value + ef->off;
240 if (__elfN(lookup_symbol)(ef, "__stop_set_vt_drv_set", &sym, STT_NOTYPE) != 0)
241 return (false);
242 p_end = sym.st_value + ef->off;
243
244 /*
245 * Walk through vt_drv_set, each vt driver structure starts with
246 * static 16 chars for driver name. If we have "vbefb", return true.
247 */
248 for (p = p_start; p < p_end; p += sizeof(Elf_Addr)) {
249 COPYOUT(p, &v, sizeof(v));
250
251 error = __elfN(reloc_ptr)(fp, ef, p, &v, sizeof(v));
252 if (error == EOPNOTSUPP)
253 v += ef->off;
254 else if (error != 0)
255 return (false);
256 COPYOUT(v, &vd_name, sizeof(vd_name));
257 if (strncmp(vd_name, "vbefb", sizeof(vd_name)) == 0)
258 return (true);
259 }
260
261 return (false);
262 }
263 #endif
264
265 static int
__elfN(load_elf_header)266 __elfN(load_elf_header)(char *filename, elf_file_t ef)
267 {
268 ssize_t bytes_read;
269 Elf_Ehdr *ehdr;
270 int err;
271
272 /*
273 * Open the image, read and validate the ELF header
274 */
275 if (filename == NULL) /* can't handle nameless */
276 return (EFTYPE);
277 if ((ef->fd = open(filename, O_RDONLY)) == -1)
278 return (errno);
279 ef->firstpage = malloc(PAGE_SIZE);
280 if (ef->firstpage == NULL) {
281 close(ef->fd);
282 return (ENOMEM);
283 }
284 preload(ef->fd);
285 #ifdef LOADER_VERIEXEC_VECTX
286 {
287 int verror;
288
289 ef->vctx = vectx_open(ef->fd, filename, 0L, NULL, &verror, __func__);
290 if (verror) {
291 printf("Unverified %s: %s\n", filename, ve_error_get());
292 close(ef->fd);
293 free(ef->vctx);
294 return (EAUTH);
295 }
296 }
297 #endif
298 bytes_read = VECTX_READ(VECTX_HANDLE(ef), ef->firstpage, PAGE_SIZE);
299 ef->firstlen = (size_t)bytes_read;
300 if (bytes_read < 0 || ef->firstlen <= sizeof(Elf_Ehdr)) {
301 err = EFTYPE; /* could be EIO, but may be small file */
302 goto error;
303 }
304 ehdr = ef->ehdr = (Elf_Ehdr *)ef->firstpage;
305
306 /* Is it ELF? */
307 if (!IS_ELF(*ehdr)) {
308 err = EFTYPE;
309 goto error;
310 }
311
312 if (ehdr->e_ident[EI_CLASS] != ELF_TARG_CLASS || /* Layout ? */
313 ehdr->e_ident[EI_DATA] != ELF_TARG_DATA ||
314 ehdr->e_ident[EI_VERSION] != EV_CURRENT) /* Version ? */ {
315 err = EFTYPE;
316 goto error;
317 }
318
319 err = elf_header_convert(ehdr);
320 if (err)
321 goto error;
322
323 if (ehdr->e_version != EV_CURRENT || ehdr->e_machine != ELF_TARG_MACH) {
324 /* Machine ? */
325 err = EFTYPE;
326 goto error;
327 }
328
329 #if defined(LOADER_VERIEXEC) && !defined(LOADER_VERIEXEC_VECTX)
330 if (verify_file(ef->fd, filename, bytes_read, VE_MUST, __func__) < 0) {
331 err = EAUTH;
332 goto error;
333 }
334 #endif
335 return (0);
336
337 error:
338 if (ef->firstpage != NULL) {
339 free(ef->firstpage);
340 ef->firstpage = NULL;
341 }
342 if (ef->fd != -1) {
343 #ifdef LOADER_VERIEXEC_VECTX
344 free(ef->vctx);
345 #endif
346 close(ef->fd);
347 ef->fd = -1;
348 }
349 return (err);
350 }
351
352 /*
353 * Attempt to load the file (file) as an ELF module. It will be stored at
354 * (dest), and a pointer to a module structure describing the loaded object
355 * will be saved in (result).
356 */
357 int
__elfN(loadfile)358 __elfN(loadfile)(char *filename, uint64_t dest, struct preloaded_file **result)
359 {
360 return (__elfN(loadfile_raw)(filename, dest, result, 0));
361 }
362
363 int
__elfN(loadfile_raw)364 __elfN(loadfile_raw)(char *filename, uint64_t dest,
365 struct preloaded_file **result, int multiboot)
366 {
367 struct preloaded_file *fp, *kfp;
368 struct elf_file ef;
369 Elf_Ehdr *ehdr;
370 int err;
371
372 fp = NULL;
373 bzero(&ef, sizeof(struct elf_file));
374 ef.fd = -1;
375
376 err = __elfN(load_elf_header)(filename, &ef);
377 if (err != 0)
378 return (err);
379
380 ehdr = ef.ehdr;
381
382 /*
383 * Check to see what sort of module we are.
384 */
385 kfp = file_findfile(NULL, md_kerntype);
386 #ifdef __powerpc__
387 /*
388 * Kernels can be ET_DYN, so just assume the first loaded object is the
389 * kernel. This assumption will be checked later.
390 */
391 if (kfp == NULL)
392 ef.kernel = 1;
393 #endif
394 if (ef.kernel || ehdr->e_type == ET_EXEC) {
395 /* Looks like a kernel */
396 if (kfp != NULL) {
397 printf("elf" __XSTRING(__ELF_WORD_SIZE)
398 "_loadfile: kernel already loaded\n");
399 err = EPERM;
400 goto oerr;
401 }
402 /*
403 * Calculate destination address based on kernel entrypoint.
404 *
405 * For ARM, the destination address is independent of any values
406 * in the elf header (an ARM kernel can be loaded at any 2MB
407 * boundary), so we leave dest set to the value calculated by
408 * archsw.arch_loadaddr() and passed in to this function.
409 */
410 #ifndef __arm__
411 if (ehdr->e_type == ET_EXEC)
412 dest = (ehdr->e_entry & ~PAGE_MASK);
413 #endif
414 if ((ehdr->e_entry & ~PAGE_MASK) == 0) {
415 printf("elf" __XSTRING(__ELF_WORD_SIZE)
416 "_loadfile: not a kernel (maybe static binary?)\n");
417 err = EPERM;
418 goto oerr;
419 }
420 ef.kernel = 1;
421
422 } else if (ehdr->e_type == ET_DYN) {
423 /* Looks like a kld module */
424 if (multiboot != 0) {
425 printf("elf" __XSTRING(__ELF_WORD_SIZE)
426 "_loadfile: can't load module as multiboot\n");
427 err = EPERM;
428 goto oerr;
429 }
430 if (kfp == NULL) {
431 printf("elf" __XSTRING(__ELF_WORD_SIZE)
432 "_loadfile: can't load module before kernel\n");
433 err = EPERM;
434 goto oerr;
435 }
436 if (strcmp(md_kerntype, kfp->f_type)) {
437 printf("elf" __XSTRING(__ELF_WORD_SIZE)
438 "_loadfile: can't load module with kernel type '%s'\n",
439 kfp->f_type);
440 err = EPERM;
441 goto oerr;
442 }
443 /* Looks OK, got ahead */
444 ef.kernel = 0;
445
446 } else {
447 err = EFTYPE;
448 goto oerr;
449 }
450
451 if (archsw.arch_loadaddr != NULL)
452 dest = archsw.arch_loadaddr(LOAD_ELF, ehdr, dest);
453 else
454 dest = roundup(dest, PAGE_SIZE);
455
456 /*
457 * Ok, we think we should handle this.
458 */
459 fp = file_alloc();
460 if (fp == NULL) {
461 printf("elf" __XSTRING(__ELF_WORD_SIZE)
462 "_loadfile: cannot allocate module info\n");
463 err = EPERM;
464 goto out;
465 }
466 if (ef.kernel == 1 && multiboot == 0)
467 setenv("kernelname", filename, 1);
468 fp->f_name = strdup(filename);
469 if (multiboot == 0)
470 fp->f_type = strdup(ef.kernel ?
471 md_kerntype : md_modtype);
472 else
473 fp->f_type = strdup(md_kerntype_mb);
474
475 if (module_verbose >= MODULE_VERBOSE_FULL) {
476 if (ef.kernel)
477 printf("%s entry at 0x%jx\n", filename,
478 (uintmax_t)ehdr->e_entry);
479 } else if (module_verbose > MODULE_VERBOSE_SILENT)
480 printf("%s ", filename);
481
482 fp->f_size = __elfN(loadimage)(fp, &ef, dest);
483 if (fp->f_size == 0 || fp->f_addr == 0)
484 goto ioerr;
485
486 /* save exec header as metadata */
487 file_addmetadata(fp, MODINFOMD_ELFHDR, sizeof(*ehdr), ehdr);
488
489 /* Load OK, return module pointer */
490 *result = (struct preloaded_file *)fp;
491 err = 0;
492 #if defined(__amd64__) || (defined(__i386__) && defined(EFI))
493 fp->f_kernphys_relocatable = multiboot || is_kernphys_relocatable(&ef);
494 #endif
495 #if defined(__i386__) && !defined(EFI)
496 fp->f_tg_kernel_support = is_tg_kernel_support(fp, &ef);
497 #endif
498 goto out;
499
500 ioerr:
501 err = EIO;
502 oerr:
503 file_discard(fp);
504 out:
505 if (ef.firstpage)
506 free(ef.firstpage);
507 if (ef.fd != -1) {
508 #ifdef LOADER_VERIEXEC_VECTX
509 if (!err && ef.vctx) {
510 int verror;
511
512 verror = vectx_close(ef.vctx, VE_MUST, __func__);
513 if (verror) {
514 err = EAUTH;
515 file_discard(fp);
516 }
517 }
518 #endif
519 close(ef.fd);
520 }
521 return (err);
522 }
523
524 /*
525 * With the file (fd) open on the image, and (ehdr) containing
526 * the Elf header, load the image at (off)
527 */
528 static int
__elfN(loadimage)529 __elfN(loadimage)(struct preloaded_file *fp, elf_file_t ef, uint64_t off)
530 {
531 int i;
532 u_int j;
533 Elf_Ehdr *ehdr;
534 Elf_Phdr *phdr, *php;
535 Elf_Shdr *shdr;
536 char *shstr;
537 int ret;
538 vm_offset_t firstaddr;
539 vm_offset_t lastaddr;
540 size_t chunk;
541 ssize_t result;
542 Elf_Addr ssym, esym;
543 Elf_Dyn *dp;
544 Elf_Addr adp;
545 Elf_Addr ctors;
546 int ndp;
547 int symstrindex;
548 int symtabindex;
549 Elf_Size size;
550 u_int fpcopy;
551 Elf_Sym sym;
552 Elf_Addr p_start, p_end;
553
554 dp = NULL;
555 shdr = NULL;
556 ret = 0;
557 firstaddr = lastaddr = 0;
558 ehdr = ef->ehdr;
559 #ifdef __powerpc__
560 if (ef->kernel) {
561 #else
562 if (ehdr->e_type == ET_EXEC) {
563 #endif
564 #if defined(__i386__) || defined(__amd64__)
565 #if __ELF_WORD_SIZE == 64
566 /* x86_64 relocates after locore */
567 off = - (off & 0xffffffffff000000ull);
568 #else
569 /* i386 relocates after locore */
570 off = - (off & 0xff000000u);
571 #endif
572 #elif defined(__powerpc__)
573 /*
574 * On the purely virtual memory machines like e500, the kernel
575 * is linked against its final VA range, which is most often
576 * not available at the loader stage, but only after kernel
577 * initializes and completes its VM settings. In such cases we
578 * cannot use p_vaddr field directly to load ELF segments, but
579 * put them at some 'load-time' locations.
580 */
581 if (off & 0xf0000000u) {
582 off = -(off & 0xf0000000u);
583 /*
584 * XXX the physical load address should not be
585 * hardcoded. Note that the Book-E kernel assumes that
586 * it's loaded at a 16MB boundary for now...
587 */
588 off += 0x01000000;
589 }
590 ehdr->e_entry += off;
591 if (module_verbose >= MODULE_VERBOSE_FULL)
592 printf("Converted entry 0x%jx\n",
593 (uintmax_t)ehdr->e_entry);
594
595 #elif defined(__arm__) && !defined(EFI)
596 /*
597 * The elf headers in arm kernels specify virtual addresses in
598 * all header fields, even the ones that should be physical
599 * addresses. We assume the entry point is in the first page,
600 * and masking the page offset will leave us with the virtual
601 * address the kernel was linked at. We subtract that from the
602 * load offset, making 'off' into the value which, when added
603 * to a virtual address in an elf header, translates it to a
604 * physical address. We do the va->pa conversion on the entry
605 * point address in the header now, so that later we can launch
606 * the kernel by just jumping to that address.
607 *
608 * When booting from UEFI the copyin and copyout functions
609 * handle adjusting the location relative to the first virtual
610 * address. Because of this there is no need to adjust the
611 * offset or entry point address as these will both be handled
612 * by the efi code.
613 */
614 off -= ehdr->e_entry & ~PAGE_MASK;
615 ehdr->e_entry += off;
616 if (module_verbose >= MODULE_VERBOSE_FULL)
617 printf("ehdr->e_entry 0x%jx, va<->pa off %llx\n",
618 (uintmax_t)ehdr->e_entry, off);
619 #else
620 off = 0; /* other archs use direct mapped kernels */
621 #endif
622 }
623 ef->off = off;
624
625 if (ef->kernel)
626 __elfN(relocation_offset) = off;
627
628 if ((ehdr->e_phoff + ehdr->e_phnum * sizeof(*phdr)) > ef->firstlen) {
629 printf("elf" __XSTRING(__ELF_WORD_SIZE)
630 "_loadimage: program header not within first page\n");
631 goto out;
632 }
633 phdr = (Elf_Phdr *)(ef->firstpage + ehdr->e_phoff);
634
635 for (i = 0; i < ehdr->e_phnum; i++) {
636 if (elf_program_header_convert(ehdr, phdr))
637 continue;
638
639 /* We want to load PT_LOAD segments only.. */
640 if (phdr[i].p_type != PT_LOAD)
641 continue;
642
643 if (module_verbose >= MODULE_VERBOSE_FULL) {
644 printf("Segment: 0x%lx@0x%lx -> 0x%lx-0x%lx",
645 (long)phdr[i].p_filesz, (long)phdr[i].p_offset,
646 (long)(phdr[i].p_vaddr + off),
647 (long)(phdr[i].p_vaddr + off + phdr[i].p_memsz - 1));
648 } else if (module_verbose > MODULE_VERBOSE_SILENT) {
649 if ((phdr[i].p_flags & PF_W) == 0) {
650 printf("text=0x%lx ", (long)phdr[i].p_filesz);
651 } else {
652 printf("data=0x%lx", (long)phdr[i].p_filesz);
653 if (phdr[i].p_filesz < phdr[i].p_memsz)
654 printf("+0x%lx", (long)(phdr[i].p_memsz -
655 phdr[i].p_filesz));
656 printf(" ");
657 }
658 }
659 fpcopy = 0;
660 if (ef->firstlen > phdr[i].p_offset) {
661 fpcopy = ef->firstlen - phdr[i].p_offset;
662 archsw.arch_copyin(ef->firstpage + phdr[i].p_offset,
663 phdr[i].p_vaddr + off, fpcopy);
664 }
665 if (phdr[i].p_filesz > fpcopy) {
666 if (kern_pread(VECTX_HANDLE(ef),
667 phdr[i].p_vaddr + off + fpcopy,
668 phdr[i].p_filesz - fpcopy,
669 phdr[i].p_offset + fpcopy) != 0) {
670 printf("\nelf" __XSTRING(__ELF_WORD_SIZE)
671 "_loadimage: read failed\n");
672 goto out;
673 }
674 }
675 /* clear space from oversized segments; eg: bss */
676 if (phdr[i].p_filesz < phdr[i].p_memsz) {
677 if (module_verbose >= MODULE_VERBOSE_FULL) {
678 printf(" (bss: 0x%lx-0x%lx)",
679 (long)(phdr[i].p_vaddr + off + phdr[i].p_filesz),
680 (long)(phdr[i].p_vaddr + off + phdr[i].p_memsz -1));
681 }
682 kern_bzero(phdr[i].p_vaddr + off + phdr[i].p_filesz,
683 phdr[i].p_memsz - phdr[i].p_filesz);
684 }
685 if (module_verbose >= MODULE_VERBOSE_FULL)
686 printf("\n");
687
688 if (archsw.arch_loadseg != NULL)
689 archsw.arch_loadseg(ehdr, phdr + i, off);
690
691 if (firstaddr == 0 || firstaddr > (phdr[i].p_vaddr + off))
692 firstaddr = phdr[i].p_vaddr + off;
693 if (lastaddr == 0 || lastaddr <
694 (phdr[i].p_vaddr + off + phdr[i].p_memsz))
695 lastaddr = phdr[i].p_vaddr + off + phdr[i].p_memsz;
696 }
697 lastaddr = roundup(lastaddr, sizeof(long));
698
699 /*
700 * Get the section headers. We need this for finding the .ctors
701 * section as well as for loading any symbols. Both may be hard
702 * to do if reading from a .gz file as it involves seeking. I
703 * think the rule is going to have to be that you must strip a
704 * file to remove symbols before gzipping it.
705 */
706 chunk = (size_t)ehdr->e_shnum * (size_t)ehdr->e_shentsize;
707 if (chunk == 0 || ehdr->e_shoff == 0)
708 goto nosyms;
709 shdr = alloc_pread(VECTX_HANDLE(ef), ehdr->e_shoff, chunk);
710 if (shdr == NULL) {
711 printf("\nelf" __XSTRING(__ELF_WORD_SIZE)
712 "_loadimage: failed to read section headers");
713 goto nosyms;
714 }
715
716 for (i = 0; i < ehdr->e_shnum; i++)
717 elf_section_header_convert(ehdr, &shdr[i]);
718
719 file_addmetadata(fp, MODINFOMD_SHDR, chunk, shdr);
720
721 /*
722 * Read the section string table and look for the .ctors section.
723 * We need to tell the kernel where it is so that it can call the
724 * ctors.
725 */
726 chunk = shdr[ehdr->e_shstrndx].sh_size;
727 if (chunk) {
728 shstr = alloc_pread(VECTX_HANDLE(ef),
729 shdr[ehdr->e_shstrndx].sh_offset, chunk);
730 if (shstr) {
731 for (i = 0; i < ehdr->e_shnum; i++) {
732 if (strcmp(shstr + shdr[i].sh_name,
733 ".ctors") != 0)
734 continue;
735 ctors = shdr[i].sh_addr;
736 file_addmetadata(fp, MODINFOMD_CTORS_ADDR,
737 sizeof(ctors), &ctors);
738 size = shdr[i].sh_size;
739 file_addmetadata(fp, MODINFOMD_CTORS_SIZE,
740 sizeof(size), &size);
741 break;
742 }
743 free(shstr);
744 }
745 }
746
747 /*
748 * Now load any symbols.
749 */
750 symtabindex = -1;
751 symstrindex = -1;
752 for (i = 0; i < ehdr->e_shnum; i++) {
753 if (shdr[i].sh_type != SHT_SYMTAB)
754 continue;
755 for (j = 0; j < ehdr->e_phnum; j++) {
756 if (phdr[j].p_type != PT_LOAD)
757 continue;
758 if (shdr[i].sh_offset >= phdr[j].p_offset &&
759 (shdr[i].sh_offset + shdr[i].sh_size <=
760 phdr[j].p_offset + phdr[j].p_filesz)) {
761 shdr[i].sh_offset = 0;
762 shdr[i].sh_size = 0;
763 break;
764 }
765 }
766 if (shdr[i].sh_offset == 0 || shdr[i].sh_size == 0)
767 continue; /* alread loaded in a PT_LOAD above */
768 /* Save it for loading below */
769 symtabindex = i;
770 symstrindex = shdr[i].sh_link;
771 }
772 if (symtabindex < 0 || symstrindex < 0)
773 goto nosyms;
774
775 /* Ok, committed to a load. */
776 if (module_verbose >= MODULE_VERBOSE_FULL)
777 printf("syms=[");
778 ssym = lastaddr;
779 for (i = symtabindex; i >= 0; i = symstrindex) {
780 char *secname;
781
782 switch(shdr[i].sh_type) {
783 case SHT_SYMTAB: /* Symbol table */
784 secname = "symtab";
785 break;
786 case SHT_STRTAB: /* String table */
787 secname = "strtab";
788 break;
789 default:
790 secname = "WHOA!!";
791 break;
792 }
793 size = shdr[i].sh_size;
794
795 archsw.arch_copyin(&size, lastaddr, sizeof(size));
796 lastaddr += sizeof(size);
797
798 if (module_verbose >= MODULE_VERBOSE_FULL) {
799 printf("\n%s: 0x%jx@0x%jx -> 0x%jx-0x%jx", secname,
800 (uintmax_t)shdr[i].sh_size, (uintmax_t)shdr[i].sh_offset,
801 (uintmax_t)lastaddr,
802 (uintmax_t)(lastaddr + shdr[i].sh_size));
803 } else if (module_verbose > MODULE_VERBOSE_SILENT) {
804 if (i == symstrindex)
805 printf("+");
806 printf("0x%lx+0x%lx", (long)sizeof(size), (long)size);
807 }
808 if (VECTX_LSEEK(VECTX_HANDLE(ef), (off_t)shdr[i].sh_offset, SEEK_SET) == -1) {
809 printf("\nelf" __XSTRING(__ELF_WORD_SIZE)
810 "_loadimage: could not seek for symbols - skipped!");
811 lastaddr = ssym;
812 ssym = 0;
813 goto nosyms;
814 }
815 result = archsw.arch_readin(VECTX_HANDLE(ef), lastaddr, shdr[i].sh_size);
816 if (result < 0 || (size_t)result != shdr[i].sh_size) {
817 printf("\nelf" __XSTRING(__ELF_WORD_SIZE)
818 "_loadimage: could not read symbols - skipped! "
819 "(%ju != %ju)", (uintmax_t)result,
820 (uintmax_t)shdr[i].sh_size);
821 lastaddr = ssym;
822 ssym = 0;
823 goto nosyms;
824 }
825 /* Reset offsets relative to ssym */
826 lastaddr += shdr[i].sh_size;
827 lastaddr = roundup(lastaddr, sizeof(size));
828 if (i == symtabindex)
829 symtabindex = -1;
830 else if (i == symstrindex)
831 symstrindex = -1;
832 }
833 esym = lastaddr;
834 if (module_verbose >= MODULE_VERBOSE_FULL)
835 printf("]");
836
837 file_addmetadata(fp, MODINFOMD_SSYM, sizeof(ssym), &ssym);
838 file_addmetadata(fp, MODINFOMD_ESYM, sizeof(esym), &esym);
839
840 nosyms:
841 if (module_verbose > MODULE_VERBOSE_SILENT)
842 printf("\n");
843
844 ret = lastaddr - firstaddr;
845 fp->f_addr = firstaddr;
846
847 php = NULL;
848 for (i = 0; i < ehdr->e_phnum; i++) {
849 if (phdr[i].p_type == PT_DYNAMIC) {
850 php = phdr + i;
851 adp = php->p_vaddr;
852 file_addmetadata(fp, MODINFOMD_DYNAMIC, sizeof(adp),
853 &adp);
854 break;
855 }
856 }
857
858 if (php == NULL) /* this is bad, we cannot get to symbols or _DYNAMIC */
859 goto out;
860
861 ndp = php->p_filesz / sizeof(Elf_Dyn);
862 if (ndp == 0)
863 goto out;
864 dp = malloc(php->p_filesz);
865 if (dp == NULL)
866 goto out;
867 archsw.arch_copyout(php->p_vaddr + off, dp, php->p_filesz);
868
869 ef->strsz = 0;
870 for (i = 0; i < ndp; i++) {
871 if (dp[i].d_tag == 0)
872 break;
873 switch (dp[i].d_tag) {
874 case DT_HASH:
875 ef->hashtab =
876 (Elf_Hashelt*)(uintptr_t)(dp[i].d_un.d_ptr + off);
877 break;
878 case DT_STRTAB:
879 ef->strtab =
880 (char *)(uintptr_t)(dp[i].d_un.d_ptr + off);
881 break;
882 case DT_STRSZ:
883 ef->strsz = dp[i].d_un.d_val;
884 break;
885 case DT_SYMTAB:
886 ef->symtab =
887 (Elf_Sym *)(uintptr_t)(dp[i].d_un.d_ptr + off);
888 break;
889 case DT_REL:
890 ef->rel =
891 (Elf_Rel *)(uintptr_t)(dp[i].d_un.d_ptr + off);
892 break;
893 case DT_RELSZ:
894 ef->relsz = dp[i].d_un.d_val;
895 break;
896 case DT_RELA:
897 ef->rela =
898 (Elf_Rela *)(uintptr_t)(dp[i].d_un.d_ptr + off);
899 break;
900 case DT_RELASZ:
901 ef->relasz = dp[i].d_un.d_val;
902 break;
903 default:
904 break;
905 }
906 }
907 if (ef->hashtab == NULL || ef->symtab == NULL ||
908 ef->strtab == NULL || ef->strsz == 0)
909 goto out;
910 COPYOUT(ef->hashtab, &ef->nbuckets, sizeof(ef->nbuckets));
911 COPYOUT(ef->hashtab + 1, &ef->nchains, sizeof(ef->nchains));
912 ef->buckets = ef->hashtab + 2;
913 ef->chains = ef->buckets + ef->nbuckets;
914
915 if (__elfN(lookup_symbol)(ef, "__start_set_modmetadata_set", &sym,
916 STT_NOTYPE) != 0)
917 return 0;
918 p_start = sym.st_value + ef->off;
919 if (__elfN(lookup_symbol)(ef, "__stop_set_modmetadata_set", &sym,
920 STT_NOTYPE) != 0)
921 return 0;
922 p_end = sym.st_value + ef->off;
923
924 if (__elfN(parse_modmetadata)(fp, ef, p_start, p_end) == 0)
925 goto out;
926
927 if (ef->kernel) /* kernel must not depend on anything */
928 goto out;
929
930 out:
931 if (dp)
932 free(dp);
933 if (shdr)
934 free(shdr);
935 return ret;
936 }
937
938 static char invalid_name[] = "bad";
939
940 char *
941 fake_modname(const char *name)
942 {
943 const char *sp, *ep;
944 char *fp;
945 size_t len;
946
947 sp = strrchr(name, '/');
948 if (sp)
949 sp++;
950 else
951 sp = name;
952
953 ep = strrchr(sp, '.');
954 if (ep == NULL) {
955 ep = sp + strlen(sp);
956 }
957 if (ep == sp) {
958 sp = invalid_name;
959 ep = invalid_name + sizeof(invalid_name) - 1;
960 }
961
962 len = ep - sp;
963 fp = malloc(len + 1);
964 if (fp == NULL)
965 return NULL;
966 memcpy(fp, sp, len);
967 fp[len] = '\0';
968 return fp;
969 }
970
971 #if (defined(__i386__) || defined(__powerpc__)) && __ELF_WORD_SIZE == 64
972 struct mod_metadata64 {
973 int md_version; /* structure version MDTV_* */
974 int md_type; /* type of entry MDT_* */
975 uint64_t md_data; /* specific data */
976 uint64_t md_cval; /* common string label */
977 };
978 #endif
979 #if defined(__amd64__) && __ELF_WORD_SIZE == 32
980 struct mod_metadata32 {
981 int md_version; /* structure version MDTV_* */
982 int md_type; /* type of entry MDT_* */
983 uint32_t md_data; /* specific data */
984 uint32_t md_cval; /* common string label */
985 };
986 #endif
987
988 int
989 __elfN(load_modmetadata)(struct preloaded_file *fp, uint64_t dest)
990 {
991 struct elf_file ef;
992 int err, i, j;
993 Elf_Shdr *sh_meta, *shdr = NULL;
994 Elf_Shdr *sh_data[2];
995 char *shstrtab = NULL;
996 size_t size;
997 Elf_Addr p_start, p_end;
998
999 bzero(&ef, sizeof(struct elf_file));
1000 ef.fd = -1;
1001
1002 err = __elfN(load_elf_header)(fp->f_name, &ef);
1003 if (err != 0)
1004 goto out;
1005
1006 if (ef.kernel == 1 || ef.ehdr->e_type == ET_EXEC) {
1007 ef.kernel = 1;
1008 } else if (ef.ehdr->e_type != ET_DYN) {
1009 err = EFTYPE;
1010 goto out;
1011 }
1012
1013 size = (size_t)ef.ehdr->e_shnum * (size_t)ef.ehdr->e_shentsize;
1014 shdr = alloc_pread(VECTX_HANDLE(&ef), ef.ehdr->e_shoff, size);
1015 if (shdr == NULL) {
1016 err = ENOMEM;
1017 goto out;
1018 }
1019
1020 /* Load shstrtab. */
1021 shstrtab = alloc_pread(VECTX_HANDLE(&ef), shdr[ef.ehdr->e_shstrndx].sh_offset,
1022 shdr[ef.ehdr->e_shstrndx].sh_size);
1023 if (shstrtab == NULL) {
1024 printf("\nelf" __XSTRING(__ELF_WORD_SIZE)
1025 "load_modmetadata: unable to load shstrtab\n");
1026 err = EFTYPE;
1027 goto out;
1028 }
1029
1030 /* Find set_modmetadata_set and data sections. */
1031 sh_data[0] = sh_data[1] = sh_meta = NULL;
1032 for (i = 0, j = 0; i < ef.ehdr->e_shnum; i++) {
1033 if (strcmp(&shstrtab[shdr[i].sh_name],
1034 "set_modmetadata_set") == 0) {
1035 sh_meta = &shdr[i];
1036 }
1037 if ((strcmp(&shstrtab[shdr[i].sh_name], ".data") == 0) ||
1038 (strcmp(&shstrtab[shdr[i].sh_name], ".rodata") == 0)) {
1039 sh_data[j++] = &shdr[i];
1040 }
1041 }
1042 if (sh_meta == NULL || sh_data[0] == NULL || sh_data[1] == NULL) {
1043 printf("\nelf" __XSTRING(__ELF_WORD_SIZE)
1044 "load_modmetadata: unable to find set_modmetadata_set or data sections\n");
1045 err = EFTYPE;
1046 goto out;
1047 }
1048
1049 /* Load set_modmetadata_set into memory */
1050 err = kern_pread(VECTX_HANDLE(&ef), dest, sh_meta->sh_size, sh_meta->sh_offset);
1051 if (err != 0) {
1052 printf("\nelf" __XSTRING(__ELF_WORD_SIZE)
1053 "load_modmetadata: unable to load set_modmetadata_set: %d\n", err);
1054 goto out;
1055 }
1056 p_start = dest;
1057 p_end = dest + sh_meta->sh_size;
1058 dest += sh_meta->sh_size;
1059
1060 /* Load data sections into memory. */
1061 err = kern_pread(VECTX_HANDLE(&ef), dest, sh_data[0]->sh_size,
1062 sh_data[0]->sh_offset);
1063 if (err != 0) {
1064 printf("\nelf" __XSTRING(__ELF_WORD_SIZE)
1065 "load_modmetadata: unable to load data: %d\n", err);
1066 goto out;
1067 }
1068
1069 /*
1070 * We have to increment the dest, so that the offset is the same into
1071 * both the .rodata and .data sections.
1072 */
1073 ef.off = -(sh_data[0]->sh_addr - dest);
1074 dest += (sh_data[1]->sh_addr - sh_data[0]->sh_addr);
1075
1076 err = kern_pread(VECTX_HANDLE(&ef), dest, sh_data[1]->sh_size,
1077 sh_data[1]->sh_offset);
1078 if (err != 0) {
1079 printf("\nelf" __XSTRING(__ELF_WORD_SIZE)
1080 "load_modmetadata: unable to load data: %d\n", err);
1081 goto out;
1082 }
1083
1084 err = __elfN(parse_modmetadata)(fp, &ef, p_start, p_end);
1085 if (err != 0) {
1086 printf("\nelf" __XSTRING(__ELF_WORD_SIZE)
1087 "load_modmetadata: unable to parse metadata: %d\n", err);
1088 goto out;
1089 }
1090
1091 out:
1092 if (shstrtab != NULL)
1093 free(shstrtab);
1094 if (shdr != NULL)
1095 free(shdr);
1096 if (ef.firstpage != NULL)
1097 free(ef.firstpage);
1098 if (ef.fd != -1) {
1099 #ifdef LOADER_VERIEXEC_VECTX
1100 if (!err && ef.vctx) {
1101 int verror;
1102
1103 verror = vectx_close(ef.vctx, VE_MUST, __func__);
1104 if (verror) {
1105 err = EAUTH;
1106 file_discard(fp);
1107 }
1108 }
1109 #endif
1110 close(ef.fd);
1111 }
1112 return (err);
1113 }
1114
1115 int
1116 __elfN(parse_modmetadata)(struct preloaded_file *fp, elf_file_t ef,
1117 Elf_Addr p_start, Elf_Addr p_end)
1118 {
1119 struct mod_metadata md;
1120 #if (defined(__i386__) || defined(__powerpc__)) && __ELF_WORD_SIZE == 64
1121 struct mod_metadata64 md64;
1122 #elif defined(__amd64__) && __ELF_WORD_SIZE == 32
1123 struct mod_metadata32 md32;
1124 #endif
1125 struct mod_depend *mdepend;
1126 struct mod_version mver;
1127 char *s;
1128 int error, modcnt, minfolen;
1129 Elf_Addr v, p;
1130
1131 modcnt = 0;
1132 p = p_start;
1133 while (p < p_end) {
1134 COPYOUT(p, &v, sizeof(v));
1135 error = __elfN(reloc_ptr)(fp, ef, p, &v, sizeof(v));
1136 if (error == EOPNOTSUPP)
1137 v += ef->off;
1138 else if (error != 0)
1139 return (error);
1140 #if (defined(__i386__) || defined(__powerpc__)) && __ELF_WORD_SIZE == 64
1141 COPYOUT(v, &md64, sizeof(md64));
1142 error = __elfN(reloc_ptr)(fp, ef, v, &md64, sizeof(md64));
1143 if (error == EOPNOTSUPP) {
1144 md64.md_cval += ef->off;
1145 md64.md_data += ef->off;
1146 } else if (error != 0)
1147 return (error);
1148 md.md_version = md64.md_version;
1149 md.md_type = md64.md_type;
1150 md.md_cval = (const char *)(uintptr_t)md64.md_cval;
1151 md.md_data = (void *)(uintptr_t)md64.md_data;
1152 #elif defined(__amd64__) && __ELF_WORD_SIZE == 32
1153 COPYOUT(v, &md32, sizeof(md32));
1154 error = __elfN(reloc_ptr)(fp, ef, v, &md32, sizeof(md32));
1155 if (error == EOPNOTSUPP) {
1156 md32.md_cval += ef->off;
1157 md32.md_data += ef->off;
1158 } else if (error != 0)
1159 return (error);
1160 md.md_version = md32.md_version;
1161 md.md_type = md32.md_type;
1162 md.md_cval = (const char *)(uintptr_t)md32.md_cval;
1163 md.md_data = (void *)(uintptr_t)md32.md_data;
1164 #else
1165 COPYOUT(v, &md, sizeof(md));
1166 error = __elfN(reloc_ptr)(fp, ef, v, &md, sizeof(md));
1167 if (error == EOPNOTSUPP) {
1168 md.md_cval += ef->off;
1169 md.md_data = (void *)((uintptr_t)md.md_data +
1170 (uintptr_t)ef->off);
1171 } else if (error != 0)
1172 return (error);
1173 #endif
1174 p += sizeof(Elf_Addr);
1175 switch(md.md_type) {
1176 case MDT_DEPEND:
1177 if (ef->kernel) /* kernel must not depend on anything */
1178 break;
1179 s = strdupout((vm_offset_t)md.md_cval);
1180 minfolen = sizeof(*mdepend) + strlen(s) + 1;
1181 mdepend = malloc(minfolen);
1182 if (mdepend == NULL)
1183 return ENOMEM;
1184 COPYOUT((vm_offset_t)md.md_data, mdepend,
1185 sizeof(*mdepend));
1186 strcpy((char*)(mdepend + 1), s);
1187 free(s);
1188 file_addmetadata(fp, MODINFOMD_DEPLIST, minfolen,
1189 mdepend);
1190 free(mdepend);
1191 break;
1192 case MDT_VERSION:
1193 s = strdupout((vm_offset_t)md.md_cval);
1194 COPYOUT((vm_offset_t)md.md_data, &mver, sizeof(mver));
1195 file_addmodule(fp, s, mver.mv_version, NULL);
1196 free(s);
1197 modcnt++;
1198 break;
1199 }
1200 }
1201 if (modcnt == 0) {
1202 s = fake_modname(fp->f_name);
1203 file_addmodule(fp, s, 1, NULL);
1204 free(s);
1205 }
1206 return 0;
1207 }
1208
1209 static unsigned long
1210 elf_hash(const char *name)
1211 {
1212 const unsigned char *p = (const unsigned char *) name;
1213 unsigned long h = 0;
1214 unsigned long g;
1215
1216 while (*p != '\0') {
1217 h = (h << 4) + *p++;
1218 if ((g = h & 0xf0000000) != 0)
1219 h ^= g >> 24;
1220 h &= ~g;
1221 }
1222 return h;
1223 }
1224
1225 static const char __elfN(bad_symtable)[] = "elf" __XSTRING(__ELF_WORD_SIZE)
1226 "_lookup_symbol: corrupt symbol table\n";
1227 int
1228 __elfN(lookup_symbol)(elf_file_t ef, const char* name, Elf_Sym *symp,
1229 unsigned char type)
1230 {
1231 Elf_Hashelt symnum;
1232 Elf_Sym sym;
1233 char *strp;
1234 unsigned long hash;
1235
1236 if (ef->nbuckets == 0) {
1237 printf(__elfN(bad_symtable));
1238 return ENOENT;
1239 }
1240
1241 hash = elf_hash(name);
1242 COPYOUT(&ef->buckets[hash % ef->nbuckets], &symnum, sizeof(symnum));
1243
1244 while (symnum != STN_UNDEF) {
1245 if (symnum >= ef->nchains) {
1246 printf(__elfN(bad_symtable));
1247 return ENOENT;
1248 }
1249
1250 COPYOUT(ef->symtab + symnum, &sym, sizeof(sym));
1251 if (sym.st_name == 0) {
1252 printf(__elfN(bad_symtable));
1253 return ENOENT;
1254 }
1255
1256 strp = strdupout((vm_offset_t)(ef->strtab + sym.st_name));
1257 if (strcmp(name, strp) == 0) {
1258 free(strp);
1259 if (sym.st_shndx != SHN_UNDEF && sym.st_value != 0 &&
1260 ELF_ST_TYPE(sym.st_info) == type) {
1261 *symp = sym;
1262 return 0;
1263 }
1264 return ENOENT;
1265 }
1266 free(strp);
1267 COPYOUT(&ef->chains[symnum], &symnum, sizeof(symnum));
1268 }
1269 return ENOENT;
1270 }
1271
1272 /*
1273 * Apply any intra-module relocations to the value. p is the load address
1274 * of the value and val/len is the value to be modified. This does NOT modify
1275 * the image in-place, because this is done by kern_linker later on.
1276 *
1277 * Returns EOPNOTSUPP if no relocation method is supplied.
1278 */
1279 static int
1280 __elfN(reloc_ptr)(struct preloaded_file *mp, elf_file_t ef,
1281 Elf_Addr p, void *val, size_t len)
1282 {
1283 size_t n;
1284 Elf_Rela a;
1285 Elf_Rel r;
1286 int error;
1287
1288 /*
1289 * The kernel is already relocated, but we still want to apply
1290 * offset adjustments.
1291 */
1292 if (ef->kernel)
1293 return (EOPNOTSUPP);
1294
1295 for (n = 0; n < ef->relsz / sizeof(r); n++) {
1296 COPYOUT(ef->rel + n, &r, sizeof(r));
1297
1298 error = __elfN(reloc)(ef, __elfN(symaddr), &r, ELF_RELOC_REL,
1299 ef->off, p, val, len);
1300 if (error != 0)
1301 return (error);
1302 }
1303 for (n = 0; n < ef->relasz / sizeof(a); n++) {
1304 COPYOUT(ef->rela + n, &a, sizeof(a));
1305
1306 error = __elfN(reloc)(ef, __elfN(symaddr), &a, ELF_RELOC_RELA,
1307 ef->off, p, val, len);
1308 if (error != 0)
1309 return (error);
1310 }
1311
1312 return (0);
1313 }
1314
1315 static Elf_Addr
1316 __elfN(symaddr)(struct elf_file *ef, Elf_Size symidx)
1317 {
1318
1319 /* Symbol lookup by index not required here. */
1320 return (0);
1321 }
1322