xref: /freebsd/stand/common/load_elf.c (revision e0656a491411fe65ed8b9135add026358b24951f)
1 /*-
2  * Copyright (c) 1998 Michael Smith <msmith@freebsd.org>
3  * Copyright (c) 1998 Peter Wemm <peter@freebsd.org>
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30 
31 #include <sys/param.h>
32 #include <sys/endian.h>
33 #include <sys/exec.h>
34 #include <sys/linker.h>
35 #include <sys/module.h>
36 #include <sys/stdint.h>
37 #include <string.h>
38 #include <machine/elf.h>
39 #include <stand.h>
40 #define FREEBSD_ELF
41 #include <sys/link_elf.h>
42 
43 #include "bootstrap.h"
44 
45 #define COPYOUT(s,d,l)	archsw.arch_copyout((vm_offset_t)(s), d, l)
46 
47 #if defined(__i386__) && __ELF_WORD_SIZE == 64
48 #undef ELF_TARG_CLASS
49 #undef ELF_TARG_MACH
50 #define ELF_TARG_CLASS  ELFCLASS64
51 #define ELF_TARG_MACH   EM_X86_64
52 #endif
53 
54 typedef struct elf_file {
55     Elf_Phdr 	*ph;
56     Elf_Ehdr	*ehdr;
57     Elf_Sym	*symtab;
58     Elf_Hashelt	*hashtab;
59     Elf_Hashelt	nbuckets;
60     Elf_Hashelt	nchains;
61     Elf_Hashelt	*buckets;
62     Elf_Hashelt	*chains;
63     Elf_Rel	*rel;
64     size_t	relsz;
65     Elf_Rela	*rela;
66     size_t	relasz;
67     char	*strtab;
68     size_t	strsz;
69     int		fd;
70     caddr_t	firstpage;
71     size_t	firstlen;
72     int		kernel;
73     u_int64_t	off;
74 } *elf_file_t;
75 
76 static int __elfN(loadimage)(struct preloaded_file *mp, elf_file_t ef, u_int64_t loadaddr);
77 static int __elfN(lookup_symbol)(struct preloaded_file *mp, elf_file_t ef, const char* name, Elf_Sym* sym);
78 static int __elfN(reloc_ptr)(struct preloaded_file *mp, elf_file_t ef,
79     Elf_Addr p, void *val, size_t len);
80 static int __elfN(parse_modmetadata)(struct preloaded_file *mp, elf_file_t ef,
81     Elf_Addr p_start, Elf_Addr p_end);
82 static symaddr_fn __elfN(symaddr);
83 static char	*fake_modname(const char *name);
84 
85 const char	*__elfN(kerneltype) = "elf kernel";
86 const char	*__elfN(moduletype) = "elf module";
87 
88 u_int64_t	__elfN(relocation_offset) = 0;
89 
90 static int
91 __elfN(load_elf_header)(char *filename, elf_file_t ef)
92 {
93 	ssize_t			 bytes_read;
94 	Elf_Ehdr		*ehdr;
95 	int 			 err;
96 
97 	/*
98 	* Open the image, read and validate the ELF header
99 	*/
100 	if (filename == NULL)	/* can't handle nameless */
101 		return (EFTYPE);
102 	if ((ef->fd = open(filename, O_RDONLY)) == -1)
103 		return (errno);
104 	ef->firstpage = malloc(PAGE_SIZE);
105 	if (ef->firstpage == NULL) {
106 		close(ef->fd);
107 		return (ENOMEM);
108 	}
109 	bytes_read = read(ef->fd, ef->firstpage, PAGE_SIZE);
110 	ef->firstlen = (size_t)bytes_read;
111 	if (bytes_read < 0 || ef->firstlen <= sizeof(Elf_Ehdr)) {
112 		err = EFTYPE; /* could be EIO, but may be small file */
113 		goto error;
114 	}
115 	ehdr = ef->ehdr = (Elf_Ehdr *)ef->firstpage;
116 
117 	/* Is it ELF? */
118 	if (!IS_ELF(*ehdr)) {
119 		err = EFTYPE;
120 		goto error;
121 	}
122 
123 	if (ehdr->e_ident[EI_CLASS] != ELF_TARG_CLASS || /* Layout ? */
124 	    ehdr->e_ident[EI_DATA] != ELF_TARG_DATA ||
125 	    ehdr->e_ident[EI_VERSION] != EV_CURRENT) /* Version ? */ {
126 		err = EFTYPE;
127 		goto error;
128 	}
129 
130 	/*
131 	 * Fixup ELF endianness.
132 	 *
133 	 * The Xhdr structure was loaded using block read call to
134 	 * optimize file accesses. It might happen, that the endianness
135 	 * of the system memory is different that endianness of
136 	 * the ELF header.
137 	 * Swap fields here to guarantee that Xhdr always contain
138 	 * valid data regardless of architecture.
139 	 */
140 	if (ehdr->e_ident[EI_DATA] == ELFDATA2MSB) {
141 		ehdr->e_type = be16toh(ehdr->e_type);
142 		ehdr->e_machine = be16toh(ehdr->e_machine);
143 		ehdr->e_version = be32toh(ehdr->e_version);
144 		if (ehdr->e_ident[EI_CLASS] == ELFCLASS64) {
145 			ehdr->e_entry = be64toh(ehdr->e_entry);
146 			ehdr->e_phoff = be64toh(ehdr->e_phoff);
147 			ehdr->e_shoff = be64toh(ehdr->e_shoff);
148 		} else {
149 			ehdr->e_entry = be32toh(ehdr->e_entry);
150 			ehdr->e_phoff = be32toh(ehdr->e_phoff);
151 			ehdr->e_shoff = be32toh(ehdr->e_shoff);
152 		}
153 		ehdr->e_flags = be32toh(ehdr->e_flags);
154 		ehdr->e_ehsize = be16toh(ehdr->e_ehsize);
155 		ehdr->e_phentsize = be16toh(ehdr->e_phentsize);
156 		ehdr->e_phnum = be16toh(ehdr->e_phnum);
157 		ehdr->e_shentsize = be16toh(ehdr->e_shentsize);
158 		ehdr->e_shnum = be16toh(ehdr->e_shnum);
159 		ehdr->e_shstrndx = be16toh(ehdr->e_shstrndx);
160 
161 	} else {
162 		ehdr->e_type = le16toh(ehdr->e_type);
163 		ehdr->e_machine = le16toh(ehdr->e_machine);
164 		ehdr->e_version = le32toh(ehdr->e_version);
165 		if (ehdr->e_ident[EI_CLASS] == ELFCLASS64) {
166 			ehdr->e_entry = le64toh(ehdr->e_entry);
167 			ehdr->e_phoff = le64toh(ehdr->e_phoff);
168 			ehdr->e_shoff = le64toh(ehdr->e_shoff);
169 		} else {
170 			ehdr->e_entry = le32toh(ehdr->e_entry);
171 			ehdr->e_phoff = le32toh(ehdr->e_phoff);
172 			ehdr->e_shoff = le32toh(ehdr->e_shoff);
173 		}
174 		ehdr->e_flags = le32toh(ehdr->e_flags);
175 		ehdr->e_ehsize = le16toh(ehdr->e_ehsize);
176 		ehdr->e_phentsize = le16toh(ehdr->e_phentsize);
177 		ehdr->e_phnum = le16toh(ehdr->e_phnum);
178 		ehdr->e_shentsize = le16toh(ehdr->e_shentsize);
179 		ehdr->e_shnum = le16toh(ehdr->e_shnum);
180 		ehdr->e_shstrndx = le16toh(ehdr->e_shstrndx);
181 	}
182 
183 	if (ehdr->e_version != EV_CURRENT || ehdr->e_machine != ELF_TARG_MACH) { /* Machine ? */
184 		err = EFTYPE;
185 		goto error;
186 	}
187 
188 	return (0);
189 
190 error:
191 	if (ef->firstpage != NULL) {
192 		free(ef->firstpage);
193 		ef->firstpage = NULL;
194 	}
195 	if (ef->fd != -1) {
196 		close(ef->fd);
197 		ef->fd = -1;
198 	}
199 	return (err);
200 }
201 
202 /*
203  * Attempt to load the file (file) as an ELF module.  It will be stored at
204  * (dest), and a pointer to a module structure describing the loaded object
205  * will be saved in (result).
206  */
207 int
208 __elfN(loadfile)(char *filename, u_int64_t dest, struct preloaded_file **result)
209 {
210 	return (__elfN(loadfile_raw)(filename, dest, result, 0));
211 }
212 
213 int
214 __elfN(loadfile_raw)(char *filename, u_int64_t dest,
215     struct preloaded_file **result, int multiboot)
216 {
217     struct preloaded_file	*fp, *kfp;
218     struct elf_file		ef;
219     Elf_Ehdr 			*ehdr;
220     int				err;
221 
222     fp = NULL;
223     bzero(&ef, sizeof(struct elf_file));
224     ef.fd = -1;
225 
226     err = __elfN(load_elf_header)(filename, &ef);
227     if (err != 0)
228     	return (err);
229 
230     ehdr = ef.ehdr;
231 
232     /*
233      * Check to see what sort of module we are.
234      */
235     kfp = file_findfile(NULL, __elfN(kerneltype));
236 #ifdef __powerpc__
237     /*
238      * Kernels can be ET_DYN, so just assume the first loaded object is the
239      * kernel. This assumption will be checked later.
240      */
241     if (kfp == NULL)
242         ef.kernel = 1;
243 #endif
244     if (ef.kernel || ehdr->e_type == ET_EXEC) {
245 	/* Looks like a kernel */
246 	if (kfp != NULL) {
247 	    printf("elf" __XSTRING(__ELF_WORD_SIZE) "_loadfile: kernel already loaded\n");
248 	    err = EPERM;
249 	    goto oerr;
250 	}
251 	/*
252 	 * Calculate destination address based on kernel entrypoint.
253 	 *
254 	 * For ARM, the destination address is independent of any values in the
255 	 * elf header (an ARM kernel can be loaded at any 2MB boundary), so we
256 	 * leave dest set to the value calculated by archsw.arch_loadaddr() and
257 	 * passed in to this function.
258 	 */
259 #ifndef __arm__
260         if (ehdr->e_type == ET_EXEC)
261 	    dest = (ehdr->e_entry & ~PAGE_MASK);
262 #endif
263 	if ((ehdr->e_entry & ~PAGE_MASK) == 0) {
264 	    printf("elf" __XSTRING(__ELF_WORD_SIZE) "_loadfile: not a kernel (maybe static binary?)\n");
265 	    err = EPERM;
266 	    goto oerr;
267 	}
268 	ef.kernel = 1;
269 
270     } else if (ehdr->e_type == ET_DYN) {
271 	/* Looks like a kld module */
272 	if (multiboot != 0) {
273 		printf("elf" __XSTRING(__ELF_WORD_SIZE) "_loadfile: can't load module as multiboot\n");
274 		err = EPERM;
275 		goto oerr;
276 	}
277 	if (kfp == NULL) {
278 	    printf("elf" __XSTRING(__ELF_WORD_SIZE) "_loadfile: can't load module before kernel\n");
279 	    err = EPERM;
280 	    goto oerr;
281 	}
282 	if (strcmp(__elfN(kerneltype), kfp->f_type)) {
283 	    printf("elf" __XSTRING(__ELF_WORD_SIZE) "_loadfile: can't load module with kernel type '%s'\n", kfp->f_type);
284 	    err = EPERM;
285 	    goto oerr;
286 	}
287 	/* Looks OK, got ahead */
288 	ef.kernel = 0;
289 
290     } else {
291 	err = EFTYPE;
292 	goto oerr;
293     }
294 
295     if (archsw.arch_loadaddr != NULL)
296 	dest = archsw.arch_loadaddr(LOAD_ELF, ehdr, dest);
297     else
298 	dest = roundup(dest, PAGE_SIZE);
299 
300     /*
301      * Ok, we think we should handle this.
302      */
303     fp = file_alloc();
304     if (fp == NULL) {
305 	    printf("elf" __XSTRING(__ELF_WORD_SIZE) "_loadfile: cannot allocate module info\n");
306 	    err = EPERM;
307 	    goto out;
308     }
309     if (ef.kernel == 1 && multiboot == 0)
310 	setenv("kernelname", filename, 1);
311     fp->f_name = strdup(filename);
312     if (multiboot == 0)
313     	fp->f_type = strdup(ef.kernel ?
314     	    __elfN(kerneltype) : __elfN(moduletype));
315     else
316     	fp->f_type = strdup("elf multiboot kernel");
317 
318 #ifdef ELF_VERBOSE
319     if (ef.kernel)
320 	printf("%s entry at 0x%jx\n", filename, (uintmax_t)ehdr->e_entry);
321 #else
322     printf("%s ", filename);
323 #endif
324 
325     fp->f_size = __elfN(loadimage)(fp, &ef, dest);
326     if (fp->f_size == 0 || fp->f_addr == 0)
327 	goto ioerr;
328 
329     /* save exec header as metadata */
330     file_addmetadata(fp, MODINFOMD_ELFHDR, sizeof(*ehdr), ehdr);
331 
332     /* Load OK, return module pointer */
333     *result = (struct preloaded_file *)fp;
334     err = 0;
335     goto out;
336 
337  ioerr:
338     err = EIO;
339  oerr:
340     file_discard(fp);
341  out:
342     if (ef.firstpage)
343 	free(ef.firstpage);
344     if (ef.fd != -1)
345     	close(ef.fd);
346     return(err);
347 }
348 
349 /*
350  * With the file (fd) open on the image, and (ehdr) containing
351  * the Elf header, load the image at (off)
352  */
353 static int
354 __elfN(loadimage)(struct preloaded_file *fp, elf_file_t ef, u_int64_t off)
355 {
356     int 	i;
357     u_int	j;
358     Elf_Ehdr	*ehdr;
359     Elf_Phdr	*phdr, *php;
360     Elf_Shdr	*shdr;
361     char	*shstr;
362     int		ret;
363     vm_offset_t firstaddr;
364     vm_offset_t lastaddr;
365     size_t	chunk;
366     ssize_t	result;
367     Elf_Addr	ssym, esym;
368     Elf_Dyn	*dp;
369     Elf_Addr	adp;
370     Elf_Addr	ctors;
371     int		ndp;
372     int		symstrindex;
373     int		symtabindex;
374     Elf_Size	size;
375     u_int	fpcopy;
376     Elf_Sym	sym;
377     Elf_Addr	p_start, p_end;
378 #if __ELF_WORD_SIZE == 64
379     uint64_t scr_ssym;
380     uint64_t scr_esym;
381     uint64_t scr;
382 #else
383     uint32_t scr_ssym;
384     uint32_t scr_esym;
385     uint32_t scr;
386 #endif
387 
388     dp = NULL;
389     shdr = NULL;
390     ret = 0;
391     firstaddr = lastaddr = 0;
392     ehdr = ef->ehdr;
393     if (ehdr->e_type == ET_EXEC) {
394 #if defined(__i386__) || defined(__amd64__)
395 #if __ELF_WORD_SIZE == 64
396 	off = - (off & 0xffffffffff000000ull);/* x86_64 relocates after locore */
397 #else
398 	off = - (off & 0xff000000u);	/* i386 relocates after locore */
399 #endif
400 #elif defined(__powerpc__)
401 	/*
402 	 * On the purely virtual memory machines like e500, the kernel is
403 	 * linked against its final VA range, which is most often not
404 	 * available at the loader stage, but only after kernel initializes
405 	 * and completes its VM settings. In such cases we cannot use p_vaddr
406 	 * field directly to load ELF segments, but put them at some
407 	 * 'load-time' locations.
408 	 */
409 	if (off & 0xf0000000u) {
410 	    off = -(off & 0xf0000000u);
411 	    /*
412 	     * XXX the physical load address should not be hardcoded. Note
413 	     * that the Book-E kernel assumes that it's loaded at a 16MB
414 	     * boundary for now...
415 	     */
416 	    off += 0x01000000;
417 	    ehdr->e_entry += off;
418 #ifdef ELF_VERBOSE
419 	    printf("Converted entry 0x%08x\n", ehdr->e_entry);
420 #endif
421 	} else
422 	    off = 0;
423 #elif defined(__arm__) && !defined(EFI)
424 	/*
425 	 * The elf headers in arm kernels specify virtual addresses in all
426 	 * header fields, even the ones that should be physical addresses.
427 	 * We assume the entry point is in the first page, and masking the page
428 	 * offset will leave us with the virtual address the kernel was linked
429 	 * at.  We subtract that from the load offset, making 'off' into the
430 	 * value which, when added to a virtual address in an elf header,
431 	 * translates it to a physical address.  We do the va->pa conversion on
432 	 * the entry point address in the header now, so that later we can
433 	 * launch the kernel by just jumping to that address.
434 	 *
435 	 * When booting from UEFI the copyin and copyout functions handle
436 	 * adjusting the location relative to the first virtual address.
437 	 * Because of this there is no need to adjust the offset or entry
438 	 * point address as these will both be handled by the efi code.
439 	 */
440 	off -= ehdr->e_entry & ~PAGE_MASK;
441 	ehdr->e_entry += off;
442 #ifdef ELF_VERBOSE
443 	printf("ehdr->e_entry 0x%08x, va<->pa off %llx\n", ehdr->e_entry, off);
444 #endif
445 #else
446 	off = 0;		/* other archs use direct mapped kernels */
447 #endif
448     }
449     ef->off = off;
450 
451     if (ef->kernel)
452 	__elfN(relocation_offset) = off;
453 
454     if ((ehdr->e_phoff + ehdr->e_phnum * sizeof(*phdr)) > ef->firstlen) {
455 	printf("elf" __XSTRING(__ELF_WORD_SIZE) "_loadimage: program header not within first page\n");
456 	goto out;
457     }
458     phdr = (Elf_Phdr *)(ef->firstpage + ehdr->e_phoff);
459 
460     for (i = 0; i < ehdr->e_phnum; i++) {
461 	/*
462 	 * Fixup ELF endianness.
463 	 *
464 	 * The Xhdr structure was loaded using block read call to
465 	 * optimize file accesses. It might happen, that the endianness
466 	 * of the system memory is different that endianness of
467 	 * the ELF header.
468 	 * Swap fields here to guarantee that Xhdr always contain
469 	 * valid data regardless of architecture.
470 	 */
471 	if (ehdr->e_ident[EI_DATA] == ELFDATA2MSB) {
472 		phdr[i].p_type = be32toh(phdr[i].p_type);
473 		phdr[i].p_flags = be32toh(phdr[i].p_flags);
474 		if (ehdr->e_ident[EI_CLASS] == ELFCLASS64) {
475 			phdr[i].p_offset = be64toh(phdr[i].p_offset);
476 			phdr[i].p_vaddr = be64toh(phdr[i].p_vaddr);
477 			phdr[i].p_paddr = be64toh(phdr[i].p_paddr);
478 			phdr[i].p_filesz = be64toh(phdr[i].p_filesz);
479 			phdr[i].p_memsz = be64toh(phdr[i].p_memsz);
480 			phdr[i].p_align = be64toh(phdr[i].p_align);
481 		} else {
482 			phdr[i].p_offset = be32toh(phdr[i].p_offset);
483 			phdr[i].p_vaddr = be32toh(phdr[i].p_vaddr);
484 			phdr[i].p_paddr = be32toh(phdr[i].p_paddr);
485 			phdr[i].p_filesz = be32toh(phdr[i].p_filesz);
486 			phdr[i].p_memsz = be32toh(phdr[i].p_memsz);
487 			phdr[i].p_align = be32toh(phdr[i].p_align);
488 		}
489 	} else {
490 		phdr[i].p_type = le32toh(phdr[i].p_type);
491 		phdr[i].p_flags = le32toh(phdr[i].p_flags);
492 		if (ehdr->e_ident[EI_CLASS] == ELFCLASS64) {
493 			phdr[i].p_offset = le64toh(phdr[i].p_offset);
494 			phdr[i].p_vaddr = le64toh(phdr[i].p_vaddr);
495 			phdr[i].p_paddr = le64toh(phdr[i].p_paddr);
496 			phdr[i].p_filesz = le64toh(phdr[i].p_filesz);
497 			phdr[i].p_memsz = le64toh(phdr[i].p_memsz);
498 			phdr[i].p_align = le64toh(phdr[i].p_align);
499 		} else {
500 			phdr[i].p_offset = le32toh(phdr[i].p_offset);
501 			phdr[i].p_vaddr = le32toh(phdr[i].p_vaddr);
502 			phdr[i].p_paddr = le32toh(phdr[i].p_paddr);
503 			phdr[i].p_filesz = le32toh(phdr[i].p_filesz);
504 			phdr[i].p_memsz = le32toh(phdr[i].p_memsz);
505 			phdr[i].p_align = le32toh(phdr[i].p_align);
506 		}
507 	}
508 
509 	/* We want to load PT_LOAD segments only.. */
510 	if (phdr[i].p_type != PT_LOAD)
511 	    continue;
512 
513 #ifdef ELF_VERBOSE
514 	printf("Segment: 0x%lx@0x%lx -> 0x%lx-0x%lx",
515 	    (long)phdr[i].p_filesz, (long)phdr[i].p_offset,
516 	    (long)(phdr[i].p_vaddr + off),
517 	    (long)(phdr[i].p_vaddr + off + phdr[i].p_memsz - 1));
518 #else
519 	if ((phdr[i].p_flags & PF_W) == 0) {
520 	    printf("text=0x%lx ", (long)phdr[i].p_filesz);
521 	} else {
522 	    printf("data=0x%lx", (long)phdr[i].p_filesz);
523 	    if (phdr[i].p_filesz < phdr[i].p_memsz)
524 		printf("+0x%lx", (long)(phdr[i].p_memsz -phdr[i].p_filesz));
525 	    printf(" ");
526 	}
527 #endif
528 	fpcopy = 0;
529 	if (ef->firstlen > phdr[i].p_offset) {
530 	    fpcopy = ef->firstlen - phdr[i].p_offset;
531 	    archsw.arch_copyin(ef->firstpage + phdr[i].p_offset,
532 			       phdr[i].p_vaddr + off, fpcopy);
533 	}
534 	if (phdr[i].p_filesz > fpcopy) {
535 	    if (kern_pread(ef->fd, phdr[i].p_vaddr + off + fpcopy,
536 		phdr[i].p_filesz - fpcopy, phdr[i].p_offset + fpcopy) != 0) {
537 		printf("\nelf" __XSTRING(__ELF_WORD_SIZE)
538 		    "_loadimage: read failed\n");
539 		goto out;
540 	    }
541 	}
542 	/* clear space from oversized segments; eg: bss */
543 	if (phdr[i].p_filesz < phdr[i].p_memsz) {
544 #ifdef ELF_VERBOSE
545 	    printf(" (bss: 0x%lx-0x%lx)",
546 		(long)(phdr[i].p_vaddr + off + phdr[i].p_filesz),
547 		(long)(phdr[i].p_vaddr + off + phdr[i].p_memsz - 1));
548 #endif
549 
550 	    kern_bzero(phdr[i].p_vaddr + off + phdr[i].p_filesz,
551 		phdr[i].p_memsz - phdr[i].p_filesz);
552 	}
553 #ifdef ELF_VERBOSE
554 	printf("\n");
555 #endif
556 
557 	if (archsw.arch_loadseg != NULL)
558 	    archsw.arch_loadseg(ehdr, phdr + i, off);
559 
560 	if (firstaddr == 0 || firstaddr > (phdr[i].p_vaddr + off))
561 	    firstaddr = phdr[i].p_vaddr + off;
562 	if (lastaddr == 0 || lastaddr < (phdr[i].p_vaddr + off + phdr[i].p_memsz))
563 	    lastaddr = phdr[i].p_vaddr + off + phdr[i].p_memsz;
564     }
565     lastaddr = roundup(lastaddr, sizeof(long));
566 
567     /*
568      * Get the section headers.  We need this for finding the .ctors
569      * section as well as for loading any symbols.  Both may be hard
570      * to do if reading from a .gz file as it involves seeking.  I
571      * think the rule is going to have to be that you must strip a
572      * file to remove symbols before gzipping it.
573      */
574     chunk = (size_t)ehdr->e_shnum * (size_t)ehdr->e_shentsize;
575     if (chunk == 0 || ehdr->e_shoff == 0)
576 	goto nosyms;
577     shdr = alloc_pread(ef->fd, ehdr->e_shoff, chunk);
578     if (shdr == NULL) {
579 	printf("\nelf" __XSTRING(__ELF_WORD_SIZE)
580 	    "_loadimage: failed to read section headers");
581 	goto nosyms;
582     }
583 
584     /*
585      * Fixup ELF endianness.
586      *
587      * The Xhdr structure was loaded using block read call to
588      * optimize file accesses. It might happen, that the endianness
589      * of the system memory is different that endianness of
590      * the ELF header.
591      * Swap fields here to guarantee that Xhdr always contain
592      * valid data regardless of architecture.
593      */
594     for (i = 0; i < ehdr->e_shnum; i++) {
595 	if (ehdr->e_ident[EI_DATA] == ELFDATA2MSB) {
596 		shdr[i].sh_name = be32toh(shdr[i].sh_name);
597 		shdr[i].sh_type = be32toh(shdr[i].sh_type);
598 		shdr[i].sh_link = be32toh(shdr[i].sh_link);
599 		shdr[i].sh_info = be32toh(shdr[i].sh_info);
600 		if (ehdr->e_ident[EI_CLASS] == ELFCLASS64) {
601 			shdr[i].sh_flags = be64toh(shdr[i].sh_flags);
602 			shdr[i].sh_addr = be64toh(shdr[i].sh_addr);
603 			shdr[i].sh_offset = be64toh(shdr[i].sh_offset);
604 			shdr[i].sh_size = be64toh(shdr[i].sh_size);
605 			shdr[i].sh_addralign = be64toh(shdr[i].sh_addralign);
606 			shdr[i].sh_entsize = be64toh(shdr[i].sh_entsize);
607 		} else {
608 			shdr[i].sh_flags = be32toh(shdr[i].sh_flags);
609 			shdr[i].sh_addr = be32toh(shdr[i].sh_addr);
610 			shdr[i].sh_offset = be32toh(shdr[i].sh_offset);
611 			shdr[i].sh_size = be32toh(shdr[i].sh_size);
612 			shdr[i].sh_addralign = be32toh(shdr[i].sh_addralign);
613 			shdr[i].sh_entsize = be32toh(shdr[i].sh_entsize);
614 		}
615 	} else {
616 		shdr[i].sh_name = le32toh(shdr[i].sh_name);
617 		shdr[i].sh_type = le32toh(shdr[i].sh_type);
618 		shdr[i].sh_link = le32toh(shdr[i].sh_link);
619 		shdr[i].sh_info = le32toh(shdr[i].sh_info);
620 		if (ehdr->e_ident[EI_CLASS] == ELFCLASS64) {
621 			shdr[i].sh_flags = le64toh(shdr[i].sh_flags);
622 			shdr[i].sh_addr = le64toh(shdr[i].sh_addr);
623 			shdr[i].sh_offset = le64toh(shdr[i].sh_offset);
624 			shdr[i].sh_size = le64toh(shdr[i].sh_size);
625 			shdr[i].sh_addralign = le64toh(shdr[i].sh_addralign);
626 			shdr[i].sh_entsize = le64toh(shdr[i].sh_entsize);
627 		} else {
628 			shdr[i].sh_flags = le32toh(shdr[i].sh_flags);
629 			shdr[i].sh_addr = le32toh(shdr[i].sh_addr);
630 			shdr[i].sh_offset = le32toh(shdr[i].sh_offset);
631 			shdr[i].sh_size = le32toh(shdr[i].sh_size);
632 			shdr[i].sh_addralign = le32toh(shdr[i].sh_addralign);
633 			shdr[i].sh_entsize = le32toh(shdr[i].sh_entsize);
634 		}
635 	}
636     }
637     file_addmetadata(fp, MODINFOMD_SHDR, chunk, shdr);
638 
639     /*
640      * Read the section string table and look for the .ctors section.
641      * We need to tell the kernel where it is so that it can call the
642      * ctors.
643      */
644     chunk = shdr[ehdr->e_shstrndx].sh_size;
645     if (chunk) {
646 	shstr = alloc_pread(ef->fd, shdr[ehdr->e_shstrndx].sh_offset, chunk);
647 	if (shstr) {
648 	    for (i = 0; i < ehdr->e_shnum; i++) {
649 		if (strcmp(shstr + shdr[i].sh_name, ".ctors") != 0)
650 		    continue;
651 		ctors = shdr[i].sh_addr;
652 		file_addmetadata(fp, MODINFOMD_CTORS_ADDR, sizeof(ctors),
653 		    &ctors);
654 		size = shdr[i].sh_size;
655 		file_addmetadata(fp, MODINFOMD_CTORS_SIZE, sizeof(size),
656 		    &size);
657 		break;
658 	    }
659 	    free(shstr);
660 	}
661     }
662 
663     /*
664      * Now load any symbols.
665      */
666     symtabindex = -1;
667     symstrindex = -1;
668     for (i = 0; i < ehdr->e_shnum; i++) {
669 	if (shdr[i].sh_type != SHT_SYMTAB)
670 	    continue;
671 	for (j = 0; j < ehdr->e_phnum; j++) {
672 	    if (phdr[j].p_type != PT_LOAD)
673 		continue;
674 	    if (shdr[i].sh_offset >= phdr[j].p_offset &&
675 		(shdr[i].sh_offset + shdr[i].sh_size <=
676 		 phdr[j].p_offset + phdr[j].p_filesz)) {
677 		shdr[i].sh_offset = 0;
678 		shdr[i].sh_size = 0;
679 		break;
680 	    }
681 	}
682 	if (shdr[i].sh_offset == 0 || shdr[i].sh_size == 0)
683 	    continue;		/* alread loaded in a PT_LOAD above */
684 	/* Save it for loading below */
685 	symtabindex = i;
686 	symstrindex = shdr[i].sh_link;
687     }
688     if (symtabindex < 0 || symstrindex < 0)
689 	goto nosyms;
690 
691     /* Ok, committed to a load. */
692 #ifndef ELF_VERBOSE
693     printf("syms=[");
694 #endif
695     ssym = lastaddr;
696     for (i = symtabindex; i >= 0; i = symstrindex) {
697 #ifdef ELF_VERBOSE
698 	char	*secname;
699 
700 	switch(shdr[i].sh_type) {
701 	    case SHT_SYMTAB:		/* Symbol table */
702 		secname = "symtab";
703 		break;
704 	    case SHT_STRTAB:		/* String table */
705 		secname = "strtab";
706 		break;
707 	    default:
708 		secname = "WHOA!!";
709 		break;
710 	}
711 #endif
712 	size = shdr[i].sh_size;
713 #if defined(__powerpc__)
714   #if __ELF_WORD_SIZE == 64
715 	scr = htobe64(size);
716   #else
717 	scr = htobe32(size);
718   #endif
719 #endif
720 	archsw.arch_copyin(&scr, lastaddr, sizeof(scr));
721 	lastaddr += sizeof(scr);
722 
723 #ifdef ELF_VERBOSE
724 	printf("\n%s: 0x%jx@0x%jx -> 0x%jx-0x%jx", secname,
725 	    (uintmax_t)shdr[i].sh_size, (uintmax_t)shdr[i].sh_offset,
726 	    (uintmax_t)lastaddr, (uintmax_t)(lastaddr + shdr[i].sh_size));
727 #else
728 	if (i == symstrindex)
729 	    printf("+");
730 	printf("0x%lx+0x%lx", (long)sizeof(size), (long)size);
731 #endif
732 
733 	if (lseek(ef->fd, (off_t)shdr[i].sh_offset, SEEK_SET) == -1) {
734 	    printf("\nelf" __XSTRING(__ELF_WORD_SIZE) "_loadimage: could not seek for symbols - skipped!");
735 	    lastaddr = ssym;
736 	    ssym = 0;
737 	    goto nosyms;
738 	}
739 	result = archsw.arch_readin(ef->fd, lastaddr, shdr[i].sh_size);
740 	if (result < 0 || (size_t)result != shdr[i].sh_size) {
741 	    printf("\nelf" __XSTRING(__ELF_WORD_SIZE) "_loadimage: could not read symbols - skipped! (%ju != %ju)", (uintmax_t)result,
742 		(uintmax_t)shdr[i].sh_size);
743 	    lastaddr = ssym;
744 	    ssym = 0;
745 	    goto nosyms;
746 	}
747 	/* Reset offsets relative to ssym */
748 	lastaddr += shdr[i].sh_size;
749 	lastaddr = roundup(lastaddr, sizeof(size));
750 	if (i == symtabindex)
751 	    symtabindex = -1;
752 	else if (i == symstrindex)
753 	    symstrindex = -1;
754     }
755     esym = lastaddr;
756 #ifndef ELF_VERBOSE
757     printf("]");
758 #endif
759 
760 #if defined(__powerpc__)
761   /* On PowerPC we always need to provide BE data to the kernel */
762   #if __ELF_WORD_SIZE == 64
763     scr_ssym = htobe64((uint64_t)ssym);
764     scr_esym = htobe64((uint64_t)esym);
765   #else
766     scr_ssym = htobe32((uint32_t)ssym);
767     scr_esym = htobe32((uint32_t)esym);
768   #endif
769 #else
770     scr_ssym = ssym;
771     scr_esym = esym;
772 #endif
773 
774     file_addmetadata(fp, MODINFOMD_SSYM, sizeof(scr_ssym), &scr_ssym);
775     file_addmetadata(fp, MODINFOMD_ESYM, sizeof(scr_esym), &scr_esym);
776 
777 nosyms:
778     printf("\n");
779 
780     ret = lastaddr - firstaddr;
781     fp->f_addr = firstaddr;
782 
783     php = NULL;
784     for (i = 0; i < ehdr->e_phnum; i++) {
785 	if (phdr[i].p_type == PT_DYNAMIC) {
786 	    php = phdr + i;
787 	    adp = php->p_vaddr;
788 	    file_addmetadata(fp, MODINFOMD_DYNAMIC, sizeof(adp), &adp);
789 	    break;
790 	}
791     }
792 
793     if (php == NULL)	/* this is bad, we cannot get to symbols or _DYNAMIC */
794 	goto out;
795 
796     ndp = php->p_filesz / sizeof(Elf_Dyn);
797     if (ndp == 0)
798 	goto out;
799     dp = malloc(php->p_filesz);
800     if (dp == NULL)
801 	goto out;
802     archsw.arch_copyout(php->p_vaddr + off, dp, php->p_filesz);
803 
804     ef->strsz = 0;
805     for (i = 0; i < ndp; i++) {
806 	if (dp[i].d_tag == 0)
807 	    break;
808 	switch (dp[i].d_tag) {
809 	case DT_HASH:
810 	    ef->hashtab = (Elf_Hashelt*)(uintptr_t)(dp[i].d_un.d_ptr + off);
811 	    break;
812 	case DT_STRTAB:
813 	    ef->strtab = (char *)(uintptr_t)(dp[i].d_un.d_ptr + off);
814 	    break;
815 	case DT_STRSZ:
816 	    ef->strsz = dp[i].d_un.d_val;
817 	    break;
818 	case DT_SYMTAB:
819 	    ef->symtab = (Elf_Sym*)(uintptr_t)(dp[i].d_un.d_ptr + off);
820 	    break;
821 	case DT_REL:
822 	    ef->rel = (Elf_Rel *)(uintptr_t)(dp[i].d_un.d_ptr + off);
823 	    break;
824 	case DT_RELSZ:
825 	    ef->relsz = dp[i].d_un.d_val;
826 	    break;
827 	case DT_RELA:
828 	    ef->rela = (Elf_Rela *)(uintptr_t)(dp[i].d_un.d_ptr + off);
829 	    break;
830 	case DT_RELASZ:
831 	    ef->relasz = dp[i].d_un.d_val;
832 	    break;
833 	default:
834 	    break;
835 	}
836     }
837     if (ef->hashtab == NULL || ef->symtab == NULL ||
838 	ef->strtab == NULL || ef->strsz == 0)
839 	goto out;
840     COPYOUT(ef->hashtab, &ef->nbuckets, sizeof(ef->nbuckets));
841     COPYOUT(ef->hashtab + 1, &ef->nchains, sizeof(ef->nchains));
842     ef->buckets = ef->hashtab + 2;
843     ef->chains = ef->buckets + ef->nbuckets;
844 
845     if (__elfN(lookup_symbol)(fp, ef, "__start_set_modmetadata_set", &sym) != 0)
846 	return 0;
847     p_start = sym.st_value + ef->off;
848     if (__elfN(lookup_symbol)(fp, ef, "__stop_set_modmetadata_set", &sym) != 0)
849 	return ENOENT;
850     p_end = sym.st_value + ef->off;
851 
852     if (__elfN(parse_modmetadata)(fp, ef, p_start, p_end) == 0)
853 	goto out;
854 
855     if (ef->kernel)			/* kernel must not depend on anything */
856 	goto out;
857 
858 out:
859     if (dp)
860 	free(dp);
861     if (shdr)
862 	free(shdr);
863     return ret;
864 }
865 
866 static char invalid_name[] = "bad";
867 
868 char *
869 fake_modname(const char *name)
870 {
871     const char *sp, *ep;
872     char *fp;
873     size_t len;
874 
875     sp = strrchr(name, '/');
876     if (sp)
877 	sp++;
878     else
879 	sp = name;
880     ep = strrchr(name, '.');
881     if (ep) {
882 	    if (ep == name) {
883 		sp = invalid_name;
884 		ep = invalid_name + sizeof(invalid_name) - 1;
885 	    }
886     } else
887 	ep = name + strlen(name);
888     len = ep - sp;
889     fp = malloc(len + 1);
890     if (fp == NULL)
891 	return NULL;
892     memcpy(fp, sp, len);
893     fp[len] = '\0';
894     return fp;
895 }
896 
897 #if (defined(__i386__) || defined(__powerpc__)) && __ELF_WORD_SIZE == 64
898 struct mod_metadata64 {
899 	int		md_version;	/* structure version MDTV_* */
900 	int		md_type;	/* type of entry MDT_* */
901 	u_int64_t	md_data;	/* specific data */
902 	u_int64_t	md_cval;	/* common string label */
903 };
904 #endif
905 #if defined(__amd64__) && __ELF_WORD_SIZE == 32
906 struct mod_metadata32 {
907 	int		md_version;	/* structure version MDTV_* */
908 	int		md_type;	/* type of entry MDT_* */
909 	u_int32_t	md_data;	/* specific data */
910 	u_int32_t	md_cval;	/* common string label */
911 };
912 #endif
913 
914 int
915 __elfN(load_modmetadata)(struct preloaded_file *fp, u_int64_t dest)
916 {
917 	struct elf_file		 ef;
918 	int			 err, i, j;
919 	Elf_Shdr		*sh_meta, *shdr = NULL;
920 	Elf_Shdr		*sh_data[2];
921 	char			*shstrtab = NULL;
922 	size_t			 size;
923 	Elf_Addr		 p_start, p_end;
924 
925 	bzero(&ef, sizeof(struct elf_file));
926 	ef.fd = -1;
927 
928 	err = __elfN(load_elf_header)(fp->f_name, &ef);
929 	if (err != 0)
930 		goto out;
931 
932 	if (ef.kernel == 1 || ef.ehdr->e_type == ET_EXEC) {
933 		ef.kernel = 1;
934 	} else if (ef.ehdr->e_type != ET_DYN) {
935 		err = EFTYPE;
936 		goto out;
937 	}
938 
939 	size = (size_t)ef.ehdr->e_shnum * (size_t)ef.ehdr->e_shentsize;
940 	shdr = alloc_pread(ef.fd, ef.ehdr->e_shoff, size);
941 	if (shdr == NULL) {
942 		err = ENOMEM;
943 		goto out;
944 	}
945 
946 	/* Load shstrtab. */
947 	shstrtab = alloc_pread(ef.fd, shdr[ef.ehdr->e_shstrndx].sh_offset,
948 	    shdr[ef.ehdr->e_shstrndx].sh_size);
949 	if (shstrtab == NULL) {
950 		printf("\nelf" __XSTRING(__ELF_WORD_SIZE)
951 		    "load_modmetadata: unable to load shstrtab\n");
952 		err = EFTYPE;
953 		goto out;
954 	}
955 
956 	/* Find set_modmetadata_set and data sections. */
957 	sh_data[0] = sh_data[1] = sh_meta = NULL;
958 	for (i = 0, j = 0; i < ef.ehdr->e_shnum; i++) {
959 		if (strcmp(&shstrtab[shdr[i].sh_name],
960 		    "set_modmetadata_set") == 0) {
961 			sh_meta = &shdr[i];
962 		}
963 		if ((strcmp(&shstrtab[shdr[i].sh_name], ".data") == 0) ||
964 		    (strcmp(&shstrtab[shdr[i].sh_name], ".rodata") == 0)) {
965 			sh_data[j++] = &shdr[i];
966 		}
967 	}
968 	if (sh_meta == NULL || sh_data[0] == NULL || sh_data[1] == NULL) {
969 		printf("\nelf" __XSTRING(__ELF_WORD_SIZE)
970     "load_modmetadata: unable to find set_modmetadata_set or data sections\n");
971 		err = EFTYPE;
972 		goto out;
973 	}
974 
975 	/* Load set_modmetadata_set into memory */
976 	err = kern_pread(ef.fd, dest, sh_meta->sh_size, sh_meta->sh_offset);
977 	if (err != 0) {
978 		printf("\nelf" __XSTRING(__ELF_WORD_SIZE)
979     "load_modmetadata: unable to load set_modmetadata_set: %d\n", err);
980 		goto out;
981 	}
982 	p_start = dest;
983 	p_end = dest + sh_meta->sh_size;
984 	dest += sh_meta->sh_size;
985 
986 	/* Load data sections into memory. */
987 	err = kern_pread(ef.fd, dest, sh_data[0]->sh_size,
988 	    sh_data[0]->sh_offset);
989 	if (err != 0) {
990 		printf("\nelf" __XSTRING(__ELF_WORD_SIZE)
991 		    "load_modmetadata: unable to load data: %d\n", err);
992 		goto out;
993 	}
994 
995 	/*
996 	 * We have to increment the dest, so that the offset is the same into
997 	 * both the .rodata and .data sections.
998 	 */
999 	ef.off = -(sh_data[0]->sh_addr - dest);
1000 	dest +=	(sh_data[1]->sh_addr - sh_data[0]->sh_addr);
1001 
1002 	err = kern_pread(ef.fd, dest, sh_data[1]->sh_size,
1003 	    sh_data[1]->sh_offset);
1004 	if (err != 0) {
1005 		printf("\nelf" __XSTRING(__ELF_WORD_SIZE)
1006 		    "load_modmetadata: unable to load data: %d\n", err);
1007 		goto out;
1008 	}
1009 
1010 	err = __elfN(parse_modmetadata)(fp, &ef, p_start, p_end);
1011 	if (err != 0) {
1012 		printf("\nelf" __XSTRING(__ELF_WORD_SIZE)
1013 		    "load_modmetadata: unable to parse metadata: %d\n", err);
1014 		goto out;
1015 	}
1016 
1017 out:
1018 	if (shstrtab != NULL)
1019 		free(shstrtab);
1020 	if (shdr != NULL)
1021 		free(shdr);
1022 	if (ef.firstpage != NULL)
1023 		free(ef.firstpage);
1024 	if (ef.fd != -1)
1025 		close(ef.fd);
1026 	return (err);
1027 }
1028 
1029 int
1030 __elfN(parse_modmetadata)(struct preloaded_file *fp, elf_file_t ef,
1031     Elf_Addr p_start, Elf_Addr p_end)
1032 {
1033     struct mod_metadata md;
1034 #if (defined(__i386__) || defined(__powerpc__)) && __ELF_WORD_SIZE == 64
1035     struct mod_metadata64 md64;
1036 #elif defined(__amd64__) && __ELF_WORD_SIZE == 32
1037     struct mod_metadata32 md32;
1038 #endif
1039     struct mod_depend *mdepend;
1040     struct mod_version mver;
1041     char *s;
1042     int error, modcnt, minfolen;
1043     Elf_Addr v, p;
1044 
1045     modcnt = 0;
1046     p = p_start;
1047     while (p < p_end) {
1048 	COPYOUT(p, &v, sizeof(v));
1049 	error = __elfN(reloc_ptr)(fp, ef, p, &v, sizeof(v));
1050 	if (error == EOPNOTSUPP)
1051 	    v += ef->off;
1052 	else if (error != 0)
1053 	    return (error);
1054 #if (defined(__i386__) || defined(__powerpc__)) && __ELF_WORD_SIZE == 64
1055 	COPYOUT(v, &md64, sizeof(md64));
1056 	error = __elfN(reloc_ptr)(fp, ef, v, &md64, sizeof(md64));
1057 	if (error == EOPNOTSUPP) {
1058 	    md64.md_cval += ef->off;
1059 	    md64.md_data += ef->off;
1060 	} else if (error != 0)
1061 	    return (error);
1062 	md.md_version = md64.md_version;
1063 	md.md_type = md64.md_type;
1064 	md.md_cval = (const char *)(uintptr_t)md64.md_cval;
1065 	md.md_data = (void *)(uintptr_t)md64.md_data;
1066 #elif defined(__amd64__) && __ELF_WORD_SIZE == 32
1067 	COPYOUT(v, &md32, sizeof(md32));
1068 	error = __elfN(reloc_ptr)(fp, ef, v, &md32, sizeof(md32));
1069 	if (error == EOPNOTSUPP) {
1070 	    md32.md_cval += ef->off;
1071 	    md32.md_data += ef->off;
1072 	} else if (error != 0)
1073 	    return (error);
1074 	md.md_version = md32.md_version;
1075 	md.md_type = md32.md_type;
1076 	md.md_cval = (const char *)(uintptr_t)md32.md_cval;
1077 	md.md_data = (void *)(uintptr_t)md32.md_data;
1078 #else
1079 	COPYOUT(v, &md, sizeof(md));
1080 	error = __elfN(reloc_ptr)(fp, ef, v, &md, sizeof(md));
1081 	if (error == EOPNOTSUPP) {
1082 	    md.md_cval += ef->off;
1083 	    md.md_data = (void *)((uintptr_t)md.md_data + (uintptr_t)ef->off);
1084 	} else if (error != 0)
1085 	    return (error);
1086 #endif
1087 	p += sizeof(Elf_Addr);
1088 	switch(md.md_type) {
1089 	  case MDT_DEPEND:
1090 	    if (ef->kernel)		/* kernel must not depend on anything */
1091 	      break;
1092 	    s = strdupout((vm_offset_t)md.md_cval);
1093 	    minfolen = sizeof(*mdepend) + strlen(s) + 1;
1094 	    mdepend = malloc(minfolen);
1095 	    if (mdepend == NULL)
1096 		return ENOMEM;
1097 	    COPYOUT((vm_offset_t)md.md_data, mdepend, sizeof(*mdepend));
1098 	    strcpy((char*)(mdepend + 1), s);
1099 	    free(s);
1100 	    file_addmetadata(fp, MODINFOMD_DEPLIST, minfolen, mdepend);
1101 	    free(mdepend);
1102 	    break;
1103 	  case MDT_VERSION:
1104 	    s = strdupout((vm_offset_t)md.md_cval);
1105 	    COPYOUT((vm_offset_t)md.md_data, &mver, sizeof(mver));
1106 	    file_addmodule(fp, s, mver.mv_version, NULL);
1107 	    free(s);
1108 	    modcnt++;
1109 	    break;
1110 	}
1111     }
1112     if (modcnt == 0) {
1113 	s = fake_modname(fp->f_name);
1114 	file_addmodule(fp, s, 1, NULL);
1115 	free(s);
1116     }
1117     return 0;
1118 }
1119 
1120 static unsigned long
1121 elf_hash(const char *name)
1122 {
1123     const unsigned char *p = (const unsigned char *) name;
1124     unsigned long h = 0;
1125     unsigned long g;
1126 
1127     while (*p != '\0') {
1128 	h = (h << 4) + *p++;
1129 	if ((g = h & 0xf0000000) != 0)
1130 	    h ^= g >> 24;
1131 	h &= ~g;
1132     }
1133     return h;
1134 }
1135 
1136 static const char __elfN(bad_symtable)[] = "elf" __XSTRING(__ELF_WORD_SIZE) "_lookup_symbol: corrupt symbol table\n";
1137 int
1138 __elfN(lookup_symbol)(struct preloaded_file *fp, elf_file_t ef, const char* name,
1139 		  Elf_Sym *symp)
1140 {
1141     Elf_Hashelt symnum;
1142     Elf_Sym sym;
1143     char *strp;
1144     unsigned long hash;
1145 
1146     hash = elf_hash(name);
1147     COPYOUT(&ef->buckets[hash % ef->nbuckets], &symnum, sizeof(symnum));
1148 
1149     while (symnum != STN_UNDEF) {
1150 	if (symnum >= ef->nchains) {
1151 	    printf(__elfN(bad_symtable));
1152 	    return ENOENT;
1153 	}
1154 
1155 	COPYOUT(ef->symtab + symnum, &sym, sizeof(sym));
1156 	if (sym.st_name == 0) {
1157 	    printf(__elfN(bad_symtable));
1158 	    return ENOENT;
1159 	}
1160 
1161 	strp = strdupout((vm_offset_t)(ef->strtab + sym.st_name));
1162 	if (strcmp(name, strp) == 0) {
1163 	    free(strp);
1164 	    if (sym.st_shndx != SHN_UNDEF ||
1165 		(sym.st_value != 0 &&
1166 		 ELF_ST_TYPE(sym.st_info) == STT_FUNC)) {
1167 		*symp = sym;
1168 		return 0;
1169 	    }
1170 	    return ENOENT;
1171 	}
1172 	free(strp);
1173 	COPYOUT(&ef->chains[symnum], &symnum, sizeof(symnum));
1174     }
1175     return ENOENT;
1176 }
1177 
1178 /*
1179  * Apply any intra-module relocations to the value. p is the load address
1180  * of the value and val/len is the value to be modified. This does NOT modify
1181  * the image in-place, because this is done by kern_linker later on.
1182  *
1183  * Returns EOPNOTSUPP if no relocation method is supplied.
1184  */
1185 static int
1186 __elfN(reloc_ptr)(struct preloaded_file *mp, elf_file_t ef,
1187     Elf_Addr p, void *val, size_t len)
1188 {
1189 	size_t n;
1190 	Elf_Rela a;
1191 	Elf_Rel r;
1192 	int error;
1193 
1194 	/*
1195 	 * The kernel is already relocated, but we still want to apply
1196 	 * offset adjustments.
1197 	 */
1198 	if (ef->kernel)
1199 		return (EOPNOTSUPP);
1200 
1201 	for (n = 0; n < ef->relsz / sizeof(r); n++) {
1202 		COPYOUT(ef->rel + n, &r, sizeof(r));
1203 
1204 		error = __elfN(reloc)(ef, __elfN(symaddr), &r, ELF_RELOC_REL,
1205 		    ef->off, p, val, len);
1206 		if (error != 0)
1207 			return (error);
1208 	}
1209 	for (n = 0; n < ef->relasz / sizeof(a); n++) {
1210 		COPYOUT(ef->rela + n, &a, sizeof(a));
1211 
1212 		error = __elfN(reloc)(ef, __elfN(symaddr), &a, ELF_RELOC_RELA,
1213 		    ef->off, p, val, len);
1214 		if (error != 0)
1215 			return (error);
1216 	}
1217 
1218 	return (0);
1219 }
1220 
1221 static Elf_Addr
1222 __elfN(symaddr)(struct elf_file *ef, Elf_Size symidx)
1223 {
1224 
1225 	/* Symbol lookup by index not required here. */
1226 	return (0);
1227 }
1228