xref: /freebsd/stand/common/load_elf.c (revision 6aea7b224a34ccc800f9598f034838af7e47eb62)
1 /*-
2  * Copyright (c) 1998 Michael Smith <msmith@freebsd.org>
3  * Copyright (c) 1998 Peter Wemm <peter@freebsd.org>
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 #include <sys/param.h>
29 #include <sys/endian.h>
30 #include <sys/exec.h>
31 #include <sys/linker.h>
32 #include <sys/module.h>
33 #include <machine/elf.h>
34 #include <stand.h>
35 
36 #include "bootstrap.h"
37 #include "modinfo.h"
38 
39 #define COPYOUT(s,d,l)	archsw.arch_copyout((vm_offset_t)(s), d, l)
40 
41 #if defined(__i386__) && __ELF_WORD_SIZE == 64
42 #undef ELF_TARG_CLASS
43 #undef ELF_TARG_MACH
44 #define ELF_TARG_CLASS  ELFCLASS64
45 #define ELF_TARG_MACH   EM_X86_64
46 #endif
47 
48 typedef struct elf_file {
49 	Elf_Phdr	*ph;
50 	Elf_Ehdr	*ehdr;
51 	Elf_Sym		*symtab;
52 	Elf_Hashelt	*hashtab;
53 	Elf_Hashelt	nbuckets;
54 	Elf_Hashelt	nchains;
55 	Elf_Hashelt	*buckets;
56 	Elf_Hashelt	*chains;
57 	Elf_Rel	*rel;
58 	size_t	relsz;
59 	Elf_Rela	*rela;
60 	size_t	relasz;
61 	char	*strtab;
62 	size_t	strsz;
63 	int		fd;
64 	caddr_t	firstpage;
65 	size_t	firstlen;
66 	int		kernel;
67 	uint64_t	off;
68 #ifdef LOADER_VERIEXEC_VECTX
69 	struct vectx	*vctx;
70 #endif
71 } *elf_file_t;
72 
73 #ifdef LOADER_VERIEXEC_VECTX
74 #define VECTX_HANDLE(ef) (ef)->vctx
75 #else
76 #define VECTX_HANDLE(ef) (ef)->fd
77 #endif
78 
79 static int __elfN(loadimage)(struct preloaded_file *mp, elf_file_t ef,
80     uint64_t loadaddr);
81 static int __elfN(lookup_symbol)(elf_file_t ef, const char* name,
82     Elf_Sym *sym, unsigned char type);
83 static int __elfN(reloc_ptr)(struct preloaded_file *mp, elf_file_t ef,
84     Elf_Addr p, void *val, size_t len);
85 static int __elfN(parse_modmetadata)(struct preloaded_file *mp, elf_file_t ef,
86     Elf_Addr p_start, Elf_Addr p_end);
87 static symaddr_fn __elfN(symaddr);
88 static char	*fake_modname(const char *name);
89 
90 uint64_t	__elfN(relocation_offset) = 0;
91 
92 #ifdef __powerpc__
93 extern void elf_wrong_field_size(void);
94 #define CONVERT_FIELD(b, f, e)			\
95 	switch (sizeof((b)->f)) {		\
96 	case 2:					\
97 		(b)->f = e ## 16toh((b)->f);	\
98 		break;				\
99 	case 4:					\
100 		(b)->f = e ## 32toh((b)->f);	\
101 		break;				\
102 	case 8:					\
103 		(b)->f = e ## 64toh((b)->f);	\
104 		break;				\
105 	default:				\
106 		/* Force a link time error. */	\
107 		elf_wrong_field_size();		\
108 		break;				\
109 	}
110 
111 #define CONVERT_SWITCH(h, d, f)			\
112 	switch ((h)->e_ident[EI_DATA]) {	\
113 	case ELFDATA2MSB:			\
114 		f(d, be);			\
115 		break;				\
116 	case ELFDATA2LSB:			\
117 		f(d, le);			\
118 		break;				\
119 	default:				\
120 		return (EINVAL);		\
121 	}
122 
123 
elf_header_convert(Elf_Ehdr * ehdr)124 static int elf_header_convert(Elf_Ehdr *ehdr)
125 {
126 	/*
127 	 * Fixup ELF header endianness.
128 	 *
129 	 * The Xhdr structure was loaded using block read call to optimize file
130 	 * accesses. It might happen, that the endianness of the system memory
131 	 * is different that endianness of the ELF header.  Swap fields here to
132 	 * guarantee that Xhdr always contain valid data regardless of
133 	 * architecture.
134 	 */
135 #define HEADER_FIELDS(b, e)			\
136 	CONVERT_FIELD(b, e_type, e);		\
137 	CONVERT_FIELD(b, e_machine, e);		\
138 	CONVERT_FIELD(b, e_version, e);		\
139 	CONVERT_FIELD(b, e_entry, e);		\
140 	CONVERT_FIELD(b, e_phoff, e);		\
141 	CONVERT_FIELD(b, e_shoff, e);		\
142 	CONVERT_FIELD(b, e_flags, e);		\
143 	CONVERT_FIELD(b, e_ehsize, e);		\
144 	CONVERT_FIELD(b, e_phentsize, e);	\
145 	CONVERT_FIELD(b, e_phnum, e);		\
146 	CONVERT_FIELD(b, e_shentsize, e);	\
147 	CONVERT_FIELD(b, e_shnum, e);		\
148 	CONVERT_FIELD(b, e_shstrndx, e)
149 
150 	CONVERT_SWITCH(ehdr, ehdr, HEADER_FIELDS);
151 
152 #undef HEADER_FIELDS
153 
154 	return (0);
155 }
156 
elf_program_header_convert(const Elf_Ehdr * ehdr,Elf_Phdr * phdr)157 static int elf_program_header_convert(const Elf_Ehdr *ehdr, Elf_Phdr *phdr)
158 {
159 #define PROGRAM_HEADER_FIELDS(b, e)		\
160 	CONVERT_FIELD(b, p_type, e);		\
161 	CONVERT_FIELD(b, p_flags, e);		\
162 	CONVERT_FIELD(b, p_offset, e);		\
163 	CONVERT_FIELD(b, p_vaddr, e);		\
164 	CONVERT_FIELD(b, p_paddr, e);		\
165 	CONVERT_FIELD(b, p_filesz, e);		\
166 	CONVERT_FIELD(b, p_memsz, e);		\
167 	CONVERT_FIELD(b, p_align, e)
168 
169 	CONVERT_SWITCH(ehdr, phdr, PROGRAM_HEADER_FIELDS);
170 
171 #undef PROGRAM_HEADER_FIELDS
172 
173 	return (0);
174 }
175 
elf_section_header_convert(const Elf_Ehdr * ehdr,Elf_Shdr * shdr)176 static int elf_section_header_convert(const Elf_Ehdr *ehdr, Elf_Shdr *shdr)
177 {
178 #define SECTION_HEADER_FIELDS(b, e)		\
179 	CONVERT_FIELD(b, sh_name, e);		\
180 	CONVERT_FIELD(b, sh_type, e);		\
181 	CONVERT_FIELD(b, sh_link, e);		\
182 	CONVERT_FIELD(b, sh_info, e);		\
183 	CONVERT_FIELD(b, sh_flags, e);		\
184 	CONVERT_FIELD(b, sh_addr, e);		\
185 	CONVERT_FIELD(b, sh_offset, e);		\
186 	CONVERT_FIELD(b, sh_size, e);		\
187 	CONVERT_FIELD(b, sh_addralign, e);	\
188 	CONVERT_FIELD(b, sh_entsize, e)
189 
190 	CONVERT_SWITCH(ehdr, shdr, SECTION_HEADER_FIELDS);
191 
192 #undef SECTION_HEADER_FIELDS
193 
194 	return (0);
195 }
196 #undef CONVERT_SWITCH
197 #undef CONVERT_FIELD
198 #else
elf_header_convert(Elf_Ehdr * ehdr)199 static int elf_header_convert(Elf_Ehdr *ehdr)
200 {
201 	return (0);
202 }
203 
elf_program_header_convert(const Elf_Ehdr * ehdr,Elf_Phdr * phdr)204 static int elf_program_header_convert(const Elf_Ehdr *ehdr, Elf_Phdr *phdr)
205 {
206 	return (0);
207 }
208 
elf_section_header_convert(const Elf_Ehdr * ehdr,Elf_Shdr * shdr)209 static int elf_section_header_convert(const Elf_Ehdr *ehdr, Elf_Shdr *shdr)
210 {
211 	return (0);
212 }
213 #endif
214 
215 #if defined(__amd64__) || (defined(__i386__) && defined(EFI))
216 static bool
is_kernphys_relocatable(elf_file_t ef)217 is_kernphys_relocatable(elf_file_t ef)
218 {
219 	Elf_Sym sym;
220 
221 	return (__elfN(lookup_symbol)(ef, "kernphys", &sym, STT_OBJECT) == 0);
222 }
223 #endif
224 
225 #ifdef __i386__
226 static bool
is_tg_kernel_support(struct preloaded_file * fp,elf_file_t ef)227 is_tg_kernel_support(struct preloaded_file *fp, elf_file_t ef)
228 {
229 	Elf_Sym		sym;
230 	Elf_Addr	p_start, p_end, v, p;
231 	char		vd_name[16];
232 	int		error;
233 
234 	if (__elfN(lookup_symbol)(ef, "__start_set_vt_drv_set", &sym, STT_NOTYPE) != 0)
235 		return (false);
236 	p_start = sym.st_value + ef->off;
237 	if (__elfN(lookup_symbol)(ef, "__stop_set_vt_drv_set", &sym, STT_NOTYPE) != 0)
238 		return (false);
239 	p_end = sym.st_value + ef->off;
240 
241 	/*
242 	 * Walk through vt_drv_set, each vt driver structure starts with
243 	 * static 16 chars for driver name. If we have "vbefb", return true.
244 	 */
245 	for (p = p_start; p < p_end; p += sizeof(Elf_Addr)) {
246 		COPYOUT(p, &v, sizeof(v));
247 
248 		error = __elfN(reloc_ptr)(fp, ef, p, &v, sizeof(v));
249 		if (error == EOPNOTSUPP)
250 			v += ef->off;
251 		else if (error != 0)
252 			return (false);
253 		COPYOUT(v, &vd_name, sizeof(vd_name));
254 		if (strncmp(vd_name, "vbefb", sizeof(vd_name)) == 0)
255 			return (true);
256 	}
257 
258 	return (false);
259 }
260 #endif
261 
262 static int
__elfN(load_elf_header)263 __elfN(load_elf_header)(char *filename, elf_file_t ef)
264 {
265 	ssize_t			 bytes_read;
266 	Elf_Ehdr		*ehdr;
267 	int			 err;
268 
269 	/*
270 	 * Open the image, read and validate the ELF header
271 	 */
272 	if (filename == NULL)	/* can't handle nameless */
273 		return (EFTYPE);
274 	if ((ef->fd = open(filename, O_RDONLY)) == -1)
275 		return (errno);
276 	ef->firstpage = malloc(PAGE_SIZE);
277 	if (ef->firstpage == NULL) {
278 		close(ef->fd);
279 		return (ENOMEM);
280 	}
281 	preload(ef->fd);
282 #ifdef LOADER_VERIEXEC_VECTX
283 	{
284 		int verror;
285 
286 		ef->vctx = vectx_open(ef->fd, filename, 0L, NULL, &verror, __func__);
287 		if (verror) {
288 			printf("Unverified %s: %s\n", filename, ve_error_get());
289 			close(ef->fd);
290 			free(ef->vctx);
291 			return (EAUTH);
292 		}
293 	}
294 #endif
295 	bytes_read = VECTX_READ(VECTX_HANDLE(ef), ef->firstpage, PAGE_SIZE);
296 	ef->firstlen = (size_t)bytes_read;
297 	if (bytes_read < 0 || ef->firstlen <= sizeof(Elf_Ehdr)) {
298 		err = EFTYPE; /* could be EIO, but may be small file */
299 		goto error;
300 	}
301 	ehdr = ef->ehdr = (Elf_Ehdr *)ef->firstpage;
302 
303 	/* Is it ELF? */
304 	if (!IS_ELF(*ehdr)) {
305 		err = EFTYPE;
306 		goto error;
307 	}
308 
309 	if (ehdr->e_ident[EI_CLASS] != ELF_TARG_CLASS || /* Layout ? */
310 	    ehdr->e_ident[EI_DATA] != ELF_TARG_DATA ||
311 	    ehdr->e_ident[EI_VERSION] != EV_CURRENT) /* Version ? */ {
312 		err = EFTYPE;
313 		goto error;
314 	}
315 
316 	err = elf_header_convert(ehdr);
317 	if (err)
318 		goto error;
319 
320 	if (ehdr->e_version != EV_CURRENT || ehdr->e_machine != ELF_TARG_MACH) {
321 		/* Machine ? */
322 		err = EFTYPE;
323 		goto error;
324 	}
325 
326 #if defined(LOADER_VERIEXEC) && !defined(LOADER_VERIEXEC_VECTX)
327 	if (verify_file(ef->fd, filename, bytes_read, VE_MUST, __func__) < 0) {
328 		err = EAUTH;
329 		goto error;
330 	}
331 #endif
332 	return (0);
333 
334 error:
335 	if (ef->firstpage != NULL) {
336 		free(ef->firstpage);
337 		ef->firstpage = NULL;
338 	}
339 	if (ef->fd != -1) {
340 #ifdef LOADER_VERIEXEC_VECTX
341 		free(ef->vctx);
342 #endif
343 		close(ef->fd);
344 		ef->fd = -1;
345 	}
346 	return (err);
347 }
348 
349 /*
350  * Attempt to load the file (file) as an ELF module.  It will be stored at
351  * (dest), and a pointer to a module structure describing the loaded object
352  * will be saved in (result).
353  */
354 int
__elfN(loadfile)355 __elfN(loadfile)(char *filename, uint64_t dest, struct preloaded_file **result)
356 {
357 	return (__elfN(loadfile_raw)(filename, dest, result, 0));
358 }
359 
360 int
__elfN(loadfile_raw)361 __elfN(loadfile_raw)(char *filename, uint64_t dest,
362     struct preloaded_file **result, int multiboot)
363 {
364 	struct preloaded_file	*fp, *kfp;
365 	struct elf_file		ef;
366 	Elf_Ehdr		*ehdr;
367 	int			err;
368 
369 	fp = NULL;
370 	bzero(&ef, sizeof(struct elf_file));
371 	ef.fd = -1;
372 
373 	err = __elfN(load_elf_header)(filename, &ef);
374 	if (err != 0)
375 		return (err);
376 
377 	ehdr = ef.ehdr;
378 
379 	/*
380 	 * Check to see what sort of module we are.
381 	 */
382 	kfp = file_findfile(NULL, md_kerntype);
383 #ifdef __powerpc__
384 	/*
385 	 * Kernels can be ET_DYN, so just assume the first loaded object is the
386 	 * kernel. This assumption will be checked later.
387 	 */
388 	if (kfp == NULL)
389 		ef.kernel = 1;
390 #endif
391 	if (ef.kernel || ehdr->e_type == ET_EXEC) {
392 		/* Looks like a kernel */
393 		if (kfp != NULL) {
394 			printf("elf" __XSTRING(__ELF_WORD_SIZE)
395 			    "_loadfile: kernel already loaded\n");
396 			err = EPERM;
397 			goto oerr;
398 		}
399 		/*
400 		 * Calculate destination address based on kernel entrypoint.
401 		 *
402 		 * For ARM, the destination address is independent of any values
403 		 * in the elf header (an ARM kernel can be loaded at any 2MB
404 		 * boundary), so we leave dest set to the value calculated by
405 		 * archsw.arch_loadaddr() and passed in to this function.
406 		 * XXX This comment is obsolete, but it still seems to work
407 		 */
408 #ifndef __arm__
409 		if (ehdr->e_type == ET_EXEC)
410 			dest = (ehdr->e_entry & ~PAGE_MASK);
411 #endif
412 		if ((ehdr->e_entry & ~PAGE_MASK) == 0) {
413 			printf("elf" __XSTRING(__ELF_WORD_SIZE)
414 			    "_loadfile: not a kernel (maybe static binary?)\n");
415 			err = EPERM;
416 			goto oerr;
417 		}
418 		ef.kernel = 1;
419 
420 	} else if (ehdr->e_type == ET_DYN) {
421 		/* Looks like a kld module */
422 		if (multiboot != 0) {
423 			printf("elf" __XSTRING(__ELF_WORD_SIZE)
424 			    "_loadfile: can't load module as multiboot\n");
425 			err = EPERM;
426 			goto oerr;
427 		}
428 		if (kfp == NULL) {
429 			printf("elf" __XSTRING(__ELF_WORD_SIZE)
430 			    "_loadfile: can't load module before kernel\n");
431 			err = EPERM;
432 			goto oerr;
433 		}
434 		if (strcmp(md_kerntype, kfp->f_type)) {
435 			printf("elf" __XSTRING(__ELF_WORD_SIZE)
436 			 "_loadfile: can't load module with kernel type '%s'\n",
437 			    kfp->f_type);
438 			err = EPERM;
439 			goto oerr;
440 		}
441 		/* Looks OK, got ahead */
442 		ef.kernel = 0;
443 
444 	} else {
445 		err = EFTYPE;
446 		goto oerr;
447 	}
448 
449 	dest = md_align(dest);
450 
451 	/*
452 	 * Ok, we think we should handle this.
453 	 */
454 	fp = file_alloc();
455 	if (fp == NULL) {
456 		printf("elf" __XSTRING(__ELF_WORD_SIZE)
457 		    "_loadfile: cannot allocate module info\n");
458 		err = EPERM;
459 		goto out;
460 	}
461 	if (ef.kernel == 1 && multiboot == 0)
462 		setenv("kernelname", filename, 1);
463 	fp->f_name = strdup(filename);
464 	if (multiboot == 0)
465 		fp->f_type = strdup(ef.kernel ?
466 		    md_kerntype : md_modtype);
467 	else
468 		fp->f_type = strdup(md_kerntype_mb);
469 
470 	if (module_verbose >= MODULE_VERBOSE_FULL) {
471 		if (ef.kernel)
472 			printf("%s entry at 0x%jx\n", filename,
473 			    (uintmax_t)ehdr->e_entry);
474 	} else if (module_verbose > MODULE_VERBOSE_SILENT)
475 		printf("%s ", filename);
476 
477 	fp->f_size = __elfN(loadimage)(fp, &ef, dest);
478 	if (fp->f_size == 0 || fp->f_addr == 0)
479 		goto ioerr;
480 
481 	/* save exec header as metadata */
482 	file_addmetadata(fp, MODINFOMD_ELFHDR, sizeof(*ehdr), ehdr);
483 
484 	/* Load OK, return module pointer */
485 	*result = (struct preloaded_file *)fp;
486 	err = 0;
487 #if defined(__amd64__) || (defined(__i386__) && defined(EFI))
488 	fp->f_kernphys_relocatable = multiboot || is_kernphys_relocatable(&ef);
489 #endif
490 #if defined(__i386__) && !defined(EFI)
491 	fp->f_tg_kernel_support = is_tg_kernel_support(fp, &ef);
492 #endif
493 	goto out;
494 
495 ioerr:
496 	err = EIO;
497 oerr:
498 	file_discard(fp);
499 out:
500 	if (ef.firstpage)
501 		free(ef.firstpage);
502 	if (ef.fd != -1) {
503 #ifdef LOADER_VERIEXEC_VECTX
504 		if (!err && ef.vctx) {
505 			int verror;
506 
507 			verror = vectx_close(ef.vctx, VE_MUST, __func__);
508 			if (verror) {
509 				err = EAUTH;
510 				file_discard(fp);
511 			}
512 		}
513 #endif
514 		close(ef.fd);
515 	}
516 	return (err);
517 }
518 
519 /*
520  * With the file (fd) open on the image, and (ehdr) containing
521  * the Elf header, load the image at (off)
522  */
523 static int
__elfN(loadimage)524 __elfN(loadimage)(struct preloaded_file *fp, elf_file_t ef, uint64_t off)
525 {
526 	int		i;
527 	u_int		j;
528 	Elf_Ehdr	*ehdr;
529 	Elf_Phdr	*phdr, *php;
530 	Elf_Shdr	*shdr;
531 	char		*shstr;
532 	int		ret;
533 	vm_offset_t	firstaddr;
534 	vm_offset_t	lastaddr;
535 	size_t		chunk;
536 	ssize_t		result;
537 	Elf_Addr	ssym, esym;
538 	Elf_Dyn		*dp;
539 	Elf_Addr	adp;
540 	Elf_Addr	ctors;
541 	int		ndp;
542 	int		symstrindex;
543 	int		symtabindex;
544 	Elf_Size	size;
545 	u_int		fpcopy;
546 	Elf_Sym		sym;
547 	Elf_Addr	p_start, p_end;
548 
549 	dp = NULL;
550 	shdr = NULL;
551 	ret = 0;
552 	firstaddr = lastaddr = 0;
553 	ehdr = ef->ehdr;
554 #ifdef __powerpc__
555 	if (ef->kernel) {
556 #else
557 	if (ehdr->e_type == ET_EXEC) {
558 #endif
559 #if defined(__i386__) || defined(__amd64__)
560 #if __ELF_WORD_SIZE == 64
561 		/* x86_64 relocates after locore */
562 		off = - (off & 0xffffffffff000000ull);
563 #else
564 		/* i386 relocates after locore */
565 		off = - (off & 0xff000000u);
566 #endif
567 #elif defined(__powerpc__)
568 		/*
569 		 * On the purely virtual memory machines like e500, the kernel
570 		 * is linked against its final VA range, which is most often
571 		 * not available at the loader stage, but only after kernel
572 		 * initializes and completes its VM settings. In such cases we
573 		 * cannot use p_vaddr field directly to load ELF segments, but
574 		 * put them at some 'load-time' locations.
575 		 */
576 		if (off & 0xf0000000u) {
577 			off = -(off & 0xf0000000u);
578 			/*
579 			 * XXX the physical load address should not be
580 			 * hardcoded. Note that the Book-E kernel assumes that
581 			 * it's loaded at a 16MB boundary for now...
582 			 */
583 			off += 0x01000000;
584 		}
585 		ehdr->e_entry += off;
586 		if (module_verbose >= MODULE_VERBOSE_FULL)
587 			printf("Converted entry 0x%jx\n",
588 			    (uintmax_t)ehdr->e_entry);
589 
590 #elif defined(__arm__) && !defined(EFI)
591 		/*
592 		 * The elf headers in arm kernels specify virtual addresses in
593 		 * all header fields, even the ones that should be physical
594 		 * addresses.  We assume the entry point is in the first page,
595 		 * and masking the page offset will leave us with the virtual
596 		 * address the kernel was linked at.  We subtract that from the
597 		 * load offset, making 'off' into the value which, when added
598 		 * to a virtual address in an elf header, translates it to a
599 		 * physical address.  We do the va->pa conversion on the entry
600 		 * point address in the header now, so that later we can launch
601 		 * the kernel by just jumping to that address.
602 		 *
603 		 * When booting from UEFI the copyin and copyout functions
604 		 * handle adjusting the location relative to the first virtual
605 		 * address.  Because of this there is no need to adjust the
606 		 * offset or entry point address as these will both be handled
607 		 * by the efi code.
608 		 */
609 		off -= ehdr->e_entry & ~PAGE_MASK;
610 		ehdr->e_entry += off;
611 		if (module_verbose >= MODULE_VERBOSE_FULL)
612 			printf("ehdr->e_entry 0x%jx, va<->pa off %llx\n",
613 			    (uintmax_t)ehdr->e_entry, off);
614 #else
615 		off = 0;	/* other archs use direct mapped kernels */
616 #endif
617 	}
618 	ef->off = off;
619 
620 	if (ef->kernel)
621 		__elfN(relocation_offset) = off;
622 
623 	if ((ehdr->e_phoff + ehdr->e_phnum * sizeof(*phdr)) > ef->firstlen) {
624 		printf("elf" __XSTRING(__ELF_WORD_SIZE)
625 		    "_loadimage: program header not within first page\n");
626 		goto out;
627 	}
628 	phdr = (Elf_Phdr *)(ef->firstpage + ehdr->e_phoff);
629 
630 	for (i = 0; i < ehdr->e_phnum; i++) {
631 		if (elf_program_header_convert(ehdr, phdr))
632 			continue;
633 
634 		/* We want to load PT_LOAD segments only.. */
635 		if (phdr[i].p_type != PT_LOAD)
636 			continue;
637 
638 		if (module_verbose >= MODULE_VERBOSE_FULL) {
639 			printf("Segment: 0x%lx@0x%lx -> 0x%lx-0x%lx",
640 			    (long)phdr[i].p_filesz, (long)phdr[i].p_offset,
641 			    (long)(phdr[i].p_vaddr + off),
642 			    (long)(phdr[i].p_vaddr + off + phdr[i].p_memsz - 1));
643 		} else if (module_verbose > MODULE_VERBOSE_SILENT) {
644 			if ((phdr[i].p_flags & PF_W) == 0) {
645 				printf("text=0x%lx ", (long)phdr[i].p_filesz);
646 			} else {
647 				printf("data=0x%lx", (long)phdr[i].p_filesz);
648 				if (phdr[i].p_filesz < phdr[i].p_memsz)
649 					printf("+0x%lx", (long)(phdr[i].p_memsz -
650 						phdr[i].p_filesz));
651 				printf(" ");
652 			}
653 		}
654 		fpcopy = 0;
655 		if (ef->firstlen > phdr[i].p_offset) {
656 			fpcopy = ef->firstlen - phdr[i].p_offset;
657 			archsw.arch_copyin(ef->firstpage + phdr[i].p_offset,
658 			    phdr[i].p_vaddr + off, fpcopy);
659 		}
660 		if (phdr[i].p_filesz > fpcopy) {
661 			if (kern_pread(VECTX_HANDLE(ef),
662 			    phdr[i].p_vaddr + off + fpcopy,
663 			    phdr[i].p_filesz - fpcopy,
664 			    phdr[i].p_offset + fpcopy) != 0) {
665 				printf("\nelf" __XSTRING(__ELF_WORD_SIZE)
666 				    "_loadimage: read failed\n");
667 				goto out;
668 			}
669 		}
670 		/* clear space from oversized segments; eg: bss */
671 		if (phdr[i].p_filesz < phdr[i].p_memsz) {
672 			if (module_verbose >= MODULE_VERBOSE_FULL) {
673 				printf(" (bss: 0x%lx-0x%lx)",
674 				    (long)(phdr[i].p_vaddr + off + phdr[i].p_filesz),
675 				    (long)(phdr[i].p_vaddr + off + phdr[i].p_memsz -1));
676 			}
677 			kern_bzero(phdr[i].p_vaddr + off + phdr[i].p_filesz,
678 			    phdr[i].p_memsz - phdr[i].p_filesz);
679 		}
680 		if (module_verbose >= MODULE_VERBOSE_FULL)
681 			printf("\n");
682 
683 		if (archsw.arch_loadseg != NULL)
684 			archsw.arch_loadseg(ehdr, phdr + i, off);
685 
686 		if (firstaddr == 0 || firstaddr > (phdr[i].p_vaddr + off))
687 			firstaddr = phdr[i].p_vaddr + off;
688 		if (lastaddr == 0 || lastaddr <
689 		    (phdr[i].p_vaddr + off + phdr[i].p_memsz))
690 			lastaddr = phdr[i].p_vaddr + off + phdr[i].p_memsz;
691 	}
692 	lastaddr = roundup(lastaddr, sizeof(long));
693 
694 	/*
695 	 * Get the section headers.  We need this for finding the .ctors
696 	 * section as well as for loading any symbols.  Both may be hard
697 	 * to do if reading from a .gz file as it involves seeking.  I
698 	 * think the rule is going to have to be that you must strip a
699 	 * file to remove symbols before gzipping it.
700 	 */
701 	chunk = (size_t)ehdr->e_shnum * (size_t)ehdr->e_shentsize;
702 	if (chunk == 0 || ehdr->e_shoff == 0)
703 		goto nosyms;
704 	shdr = alloc_pread(VECTX_HANDLE(ef), ehdr->e_shoff, chunk);
705 	if (shdr == NULL) {
706 		printf("\nelf" __XSTRING(__ELF_WORD_SIZE)
707 		    "_loadimage: failed to read section headers");
708 		goto nosyms;
709 	}
710 
711 	for (i = 0; i < ehdr->e_shnum; i++)
712 		elf_section_header_convert(ehdr, &shdr[i]);
713 
714 	file_addmetadata(fp, MODINFOMD_SHDR, chunk, shdr);
715 
716 	/*
717 	 * Read the section string table and look for the .ctors section.
718 	 * We need to tell the kernel where it is so that it can call the
719 	 * ctors.
720 	 */
721 	chunk = shdr[ehdr->e_shstrndx].sh_size;
722 	if (chunk) {
723 		shstr = alloc_pread(VECTX_HANDLE(ef),
724 		    shdr[ehdr->e_shstrndx].sh_offset, chunk);
725 		if (shstr) {
726 			for (i = 0; i < ehdr->e_shnum; i++) {
727 				if (strcmp(shstr + shdr[i].sh_name,
728 				    ".ctors") != 0)
729 					continue;
730 				ctors = shdr[i].sh_addr;
731 				file_addmetadata(fp, MODINFOMD_CTORS_ADDR,
732 				    sizeof(ctors), &ctors);
733 				size = shdr[i].sh_size;
734 				file_addmetadata(fp, MODINFOMD_CTORS_SIZE,
735 				    sizeof(size), &size);
736 				break;
737 			}
738 			free(shstr);
739 		}
740 	}
741 
742 	/*
743 	 * Now load any symbols.
744 	 */
745 	symtabindex = -1;
746 	symstrindex = -1;
747 	for (i = 0; i < ehdr->e_shnum; i++) {
748 		if (shdr[i].sh_type != SHT_SYMTAB)
749 			continue;
750 		for (j = 0; j < ehdr->e_phnum; j++) {
751 			if (phdr[j].p_type != PT_LOAD)
752 				continue;
753 			if (shdr[i].sh_offset >= phdr[j].p_offset &&
754 			    (shdr[i].sh_offset + shdr[i].sh_size <=
755 			    phdr[j].p_offset + phdr[j].p_filesz)) {
756 				shdr[i].sh_offset = 0;
757 				shdr[i].sh_size = 0;
758 				break;
759 			}
760 		}
761 		if (shdr[i].sh_offset == 0 || shdr[i].sh_size == 0)
762 			continue;	/* alread loaded in a PT_LOAD above */
763 		/* Save it for loading below */
764 		symtabindex = i;
765 		symstrindex = shdr[i].sh_link;
766 	}
767 	if (symtabindex < 0 || symstrindex < 0)
768 		goto nosyms;
769 
770 	/* Ok, committed to a load. */
771 	if (module_verbose >= MODULE_VERBOSE_FULL)
772 		printf("syms=[");
773 	ssym = lastaddr;
774 	for (i = symtabindex; i >= 0; i = symstrindex) {
775 		char	*secname;
776 
777 		switch(shdr[i].sh_type) {
778 		case SHT_SYMTAB:		/* Symbol table */
779 			secname = "symtab";
780 			break;
781 		case SHT_STRTAB:		/* String table */
782 			secname = "strtab";
783 			break;
784 		default:
785 			secname = "WHOA!!";
786 			break;
787 		}
788 		size = shdr[i].sh_size;
789 
790 		archsw.arch_copyin(&size, lastaddr, sizeof(size));
791 		lastaddr += sizeof(size);
792 
793 		if (module_verbose >= MODULE_VERBOSE_FULL) {
794 			printf("\n%s: 0x%jx@0x%jx -> 0x%jx-0x%jx", secname,
795 			    (uintmax_t)shdr[i].sh_size, (uintmax_t)shdr[i].sh_offset,
796 			    (uintmax_t)lastaddr,
797 			    (uintmax_t)(lastaddr + shdr[i].sh_size));
798 		} else if (module_verbose > MODULE_VERBOSE_SILENT) {
799 			if (i == symstrindex)
800 				printf("+");
801 			printf("0x%lx+0x%lx", (long)sizeof(size), (long)size);
802 		}
803 		if (VECTX_LSEEK(VECTX_HANDLE(ef), (off_t)shdr[i].sh_offset, SEEK_SET) == -1) {
804 			printf("\nelf" __XSTRING(__ELF_WORD_SIZE)
805 			   "_loadimage: could not seek for symbols - skipped!");
806 			lastaddr = ssym;
807 			ssym = 0;
808 			goto nosyms;
809 		}
810 		result = archsw.arch_readin(VECTX_HANDLE(ef), lastaddr, shdr[i].sh_size);
811 		if (result < 0 || (size_t)result != shdr[i].sh_size) {
812 			printf("\nelf" __XSTRING(__ELF_WORD_SIZE)
813 			    "_loadimage: could not read symbols - skipped! "
814 			    "(%ju != %ju)", (uintmax_t)result,
815 			    (uintmax_t)shdr[i].sh_size);
816 			lastaddr = ssym;
817 			ssym = 0;
818 			goto nosyms;
819 		}
820 		/* Reset offsets relative to ssym */
821 		lastaddr += shdr[i].sh_size;
822 		lastaddr = roundup(lastaddr, sizeof(size));
823 		if (i == symtabindex)
824 			symtabindex = -1;
825 		else if (i == symstrindex)
826 			symstrindex = -1;
827 	}
828 	esym = lastaddr;
829 	if (module_verbose >= MODULE_VERBOSE_FULL)
830 		printf("]");
831 
832 	file_addmetadata(fp, MODINFOMD_SSYM, sizeof(ssym), &ssym);
833 	file_addmetadata(fp, MODINFOMD_ESYM, sizeof(esym), &esym);
834 
835 nosyms:
836 	if (module_verbose > MODULE_VERBOSE_SILENT)
837 		printf("\n");
838 
839 	ret = lastaddr - firstaddr;
840 	fp->f_addr = firstaddr;
841 
842 	php = NULL;
843 	for (i = 0; i < ehdr->e_phnum; i++) {
844 		if (phdr[i].p_type == PT_DYNAMIC) {
845 			php = phdr + i;
846 			adp = php->p_vaddr;
847 			file_addmetadata(fp, MODINFOMD_DYNAMIC, sizeof(adp),
848 			    &adp);
849 			break;
850 		}
851 	}
852 
853 	if (php == NULL) /* this is bad, we cannot get to symbols or _DYNAMIC */
854 		goto out;
855 
856 	ndp = php->p_filesz / sizeof(Elf_Dyn);
857 	if (ndp == 0)
858 		goto out;
859 	dp = malloc(php->p_filesz);
860 	if (dp == NULL)
861 		goto out;
862 	archsw.arch_copyout(php->p_vaddr + off, dp, php->p_filesz);
863 
864 	ef->strsz = 0;
865 	for (i = 0; i < ndp; i++) {
866 		if (dp[i].d_tag == 0)
867 			break;
868 		switch (dp[i].d_tag) {
869 		case DT_HASH:
870 			ef->hashtab =
871 			    (Elf_Hashelt*)(uintptr_t)(dp[i].d_un.d_ptr + off);
872 			break;
873 		case DT_STRTAB:
874 			ef->strtab =
875 			    (char *)(uintptr_t)(dp[i].d_un.d_ptr + off);
876 			break;
877 		case DT_STRSZ:
878 			ef->strsz = dp[i].d_un.d_val;
879 			break;
880 		case DT_SYMTAB:
881 			ef->symtab =
882 			    (Elf_Sym *)(uintptr_t)(dp[i].d_un.d_ptr + off);
883 			break;
884 		case DT_REL:
885 			ef->rel =
886 			    (Elf_Rel *)(uintptr_t)(dp[i].d_un.d_ptr + off);
887 			break;
888 		case DT_RELSZ:
889 			ef->relsz = dp[i].d_un.d_val;
890 			break;
891 		case DT_RELA:
892 			ef->rela =
893 			    (Elf_Rela *)(uintptr_t)(dp[i].d_un.d_ptr + off);
894 			break;
895 		case DT_RELASZ:
896 			ef->relasz = dp[i].d_un.d_val;
897 			break;
898 		default:
899 			break;
900 		}
901 	}
902 	if (ef->hashtab == NULL || ef->symtab == NULL ||
903 	    ef->strtab == NULL || ef->strsz == 0)
904 		goto out;
905 	COPYOUT(ef->hashtab, &ef->nbuckets, sizeof(ef->nbuckets));
906 	COPYOUT(ef->hashtab + 1, &ef->nchains, sizeof(ef->nchains));
907 	ef->buckets = ef->hashtab + 2;
908 	ef->chains = ef->buckets + ef->nbuckets;
909 
910 	if (__elfN(lookup_symbol)(ef, "__start_set_modmetadata_set", &sym,
911 	    STT_NOTYPE) != 0)
912 		return 0;
913 	p_start = sym.st_value + ef->off;
914 	if (__elfN(lookup_symbol)(ef, "__stop_set_modmetadata_set", &sym,
915 	    STT_NOTYPE) != 0)
916 		return 0;
917 	p_end = sym.st_value + ef->off;
918 
919 	if (__elfN(parse_modmetadata)(fp, ef, p_start, p_end) == 0)
920 		goto out;
921 
922 	if (ef->kernel)		/* kernel must not depend on anything */
923 		goto out;
924 
925 out:
926 	if (dp)
927 		free(dp);
928 	if (shdr)
929 		free(shdr);
930 	return ret;
931 }
932 
933 static char invalid_name[] = "bad";
934 
935 char *
936 fake_modname(const char *name)
937 {
938 	const char *sp, *ep;
939 	char *fp;
940 	size_t len;
941 
942 	sp = strrchr(name, '/');
943 	if (sp)
944 		sp++;
945 	else
946 		sp = name;
947 
948 	ep = strrchr(sp, '.');
949 	if (ep == NULL) {
950 		ep = sp + strlen(sp);
951 	}
952 	if (ep == sp) {
953 		sp = invalid_name;
954 		ep = invalid_name + sizeof(invalid_name) - 1;
955 	}
956 
957 	len = ep - sp;
958 	fp = malloc(len + 1);
959 	if (fp == NULL)
960 		return NULL;
961 	memcpy(fp, sp, len);
962 	fp[len] = '\0';
963 	return fp;
964 }
965 
966 #if (defined(__i386__) || defined(__powerpc__)) && __ELF_WORD_SIZE == 64
967 struct mod_metadata64 {
968 	int		md_version;	/* structure version MDTV_* */
969 	int		md_type;	/* type of entry MDT_* */
970 	uint64_t	md_data;	/* specific data */
971 	uint64_t	md_cval;	/* common string label */
972 };
973 #endif
974 #if defined(__amd64__) && __ELF_WORD_SIZE == 32
975 struct mod_metadata32 {
976 	int		md_version;	/* structure version MDTV_* */
977 	int		md_type;	/* type of entry MDT_* */
978 	uint32_t	md_data;	/* specific data */
979 	uint32_t	md_cval;	/* common string label */
980 };
981 #endif
982 
983 int
984 __elfN(load_modmetadata)(struct preloaded_file *fp, uint64_t dest)
985 {
986 	struct elf_file		 ef;
987 	int			 err, i, j;
988 	Elf_Shdr		*sh_meta, *shdr = NULL;
989 	Elf_Shdr		*sh_data[2];
990 	char			*shstrtab = NULL;
991 	size_t			 size;
992 	Elf_Addr		 p_start, p_end;
993 
994 	bzero(&ef, sizeof(struct elf_file));
995 	ef.fd = -1;
996 
997 	err = __elfN(load_elf_header)(fp->f_name, &ef);
998 	if (err != 0)
999 		goto out;
1000 
1001 	if (ef.kernel == 1 || ef.ehdr->e_type == ET_EXEC) {
1002 		ef.kernel = 1;
1003 	} else if (ef.ehdr->e_type != ET_DYN) {
1004 		err = EFTYPE;
1005 		goto out;
1006 	}
1007 
1008 	size = (size_t)ef.ehdr->e_shnum * (size_t)ef.ehdr->e_shentsize;
1009 	shdr = alloc_pread(VECTX_HANDLE(&ef), ef.ehdr->e_shoff, size);
1010 	if (shdr == NULL) {
1011 		err = ENOMEM;
1012 		goto out;
1013 	}
1014 
1015 	/* Load shstrtab. */
1016 	shstrtab = alloc_pread(VECTX_HANDLE(&ef), shdr[ef.ehdr->e_shstrndx].sh_offset,
1017 	    shdr[ef.ehdr->e_shstrndx].sh_size);
1018 	if (shstrtab == NULL) {
1019 		printf("\nelf" __XSTRING(__ELF_WORD_SIZE)
1020 		    "load_modmetadata: unable to load shstrtab\n");
1021 		err = EFTYPE;
1022 		goto out;
1023 	}
1024 
1025 	/* Find set_modmetadata_set and data sections. */
1026 	sh_data[0] = sh_data[1] = sh_meta = NULL;
1027 	for (i = 0, j = 0; i < ef.ehdr->e_shnum; i++) {
1028 		if (strcmp(&shstrtab[shdr[i].sh_name],
1029 		    "set_modmetadata_set") == 0) {
1030 			sh_meta = &shdr[i];
1031 		}
1032 		if ((strcmp(&shstrtab[shdr[i].sh_name], ".data") == 0) ||
1033 		    (strcmp(&shstrtab[shdr[i].sh_name], ".rodata") == 0)) {
1034 			sh_data[j++] = &shdr[i];
1035 		}
1036 	}
1037 	if (sh_meta == NULL || sh_data[0] == NULL || sh_data[1] == NULL) {
1038 		printf("\nelf" __XSTRING(__ELF_WORD_SIZE)
1039     "load_modmetadata: unable to find set_modmetadata_set or data sections\n");
1040 		err = EFTYPE;
1041 		goto out;
1042 	}
1043 
1044 	/* Load set_modmetadata_set into memory */
1045 	err = kern_pread(VECTX_HANDLE(&ef), dest, sh_meta->sh_size, sh_meta->sh_offset);
1046 	if (err != 0) {
1047 		printf("\nelf" __XSTRING(__ELF_WORD_SIZE)
1048     "load_modmetadata: unable to load set_modmetadata_set: %d\n", err);
1049 		goto out;
1050 	}
1051 	p_start = dest;
1052 	p_end = dest + sh_meta->sh_size;
1053 	dest += sh_meta->sh_size;
1054 
1055 	/* Load data sections into memory. */
1056 	err = kern_pread(VECTX_HANDLE(&ef), dest, sh_data[0]->sh_size,
1057 	    sh_data[0]->sh_offset);
1058 	if (err != 0) {
1059 		printf("\nelf" __XSTRING(__ELF_WORD_SIZE)
1060 		    "load_modmetadata: unable to load data: %d\n", err);
1061 		goto out;
1062 	}
1063 
1064 	/*
1065 	 * We have to increment the dest, so that the offset is the same into
1066 	 * both the .rodata and .data sections.
1067 	 */
1068 	ef.off = -(sh_data[0]->sh_addr - dest);
1069 	dest +=	(sh_data[1]->sh_addr - sh_data[0]->sh_addr);
1070 
1071 	err = kern_pread(VECTX_HANDLE(&ef), dest, sh_data[1]->sh_size,
1072 	    sh_data[1]->sh_offset);
1073 	if (err != 0) {
1074 		printf("\nelf" __XSTRING(__ELF_WORD_SIZE)
1075 		    "load_modmetadata: unable to load data: %d\n", err);
1076 		goto out;
1077 	}
1078 
1079 	err = __elfN(parse_modmetadata)(fp, &ef, p_start, p_end);
1080 	if (err != 0) {
1081 		printf("\nelf" __XSTRING(__ELF_WORD_SIZE)
1082 		    "load_modmetadata: unable to parse metadata: %d\n", err);
1083 		goto out;
1084 	}
1085 
1086 out:
1087 	if (shstrtab != NULL)
1088 		free(shstrtab);
1089 	if (shdr != NULL)
1090 		free(shdr);
1091 	if (ef.firstpage != NULL)
1092 		free(ef.firstpage);
1093 	if (ef.fd != -1) {
1094 #ifdef LOADER_VERIEXEC_VECTX
1095 		if (!err && ef.vctx) {
1096 			int verror;
1097 
1098 			verror = vectx_close(ef.vctx, VE_MUST, __func__);
1099 			if (verror) {
1100 				err = EAUTH;
1101 				file_discard(fp);
1102 			}
1103 		}
1104 #endif
1105 		close(ef.fd);
1106 	}
1107 	return (err);
1108 }
1109 
1110 int
1111 __elfN(parse_modmetadata)(struct preloaded_file *fp, elf_file_t ef,
1112     Elf_Addr p_start, Elf_Addr p_end)
1113 {
1114 	struct mod_metadata md;
1115 #if (defined(__i386__) || defined(__powerpc__)) && __ELF_WORD_SIZE == 64
1116 	struct mod_metadata64 md64;
1117 #elif defined(__amd64__) && __ELF_WORD_SIZE == 32
1118 	struct mod_metadata32 md32;
1119 #endif
1120 	struct mod_depend *mdepend;
1121 	struct mod_version mver;
1122 	char *s;
1123 	int error, modcnt, minfolen;
1124 	Elf_Addr v, p;
1125 
1126 	modcnt = 0;
1127 	p = p_start;
1128 	while (p < p_end) {
1129 		COPYOUT(p, &v, sizeof(v));
1130 		error = __elfN(reloc_ptr)(fp, ef, p, &v, sizeof(v));
1131 		if (error == EOPNOTSUPP)
1132 			v += ef->off;
1133 		else if (error != 0)
1134 			return (error);
1135 #if (defined(__i386__) || defined(__powerpc__)) && __ELF_WORD_SIZE == 64
1136 		COPYOUT(v, &md64, sizeof(md64));
1137 		error = __elfN(reloc_ptr)(fp, ef, v, &md64, sizeof(md64));
1138 		if (error == EOPNOTSUPP) {
1139 			md64.md_cval += ef->off;
1140 			md64.md_data += ef->off;
1141 		} else if (error != 0)
1142 			return (error);
1143 		md.md_version = md64.md_version;
1144 		md.md_type = md64.md_type;
1145 		md.md_cval = (const char *)(uintptr_t)md64.md_cval;
1146 		md.md_data = (void *)(uintptr_t)md64.md_data;
1147 #elif defined(__amd64__) && __ELF_WORD_SIZE == 32
1148 		COPYOUT(v, &md32, sizeof(md32));
1149 		error = __elfN(reloc_ptr)(fp, ef, v, &md32, sizeof(md32));
1150 		if (error == EOPNOTSUPP) {
1151 			md32.md_cval += ef->off;
1152 			md32.md_data += ef->off;
1153 		} else if (error != 0)
1154 			return (error);
1155 		md.md_version = md32.md_version;
1156 		md.md_type = md32.md_type;
1157 		md.md_cval = (const char *)(uintptr_t)md32.md_cval;
1158 		md.md_data = (void *)(uintptr_t)md32.md_data;
1159 #else
1160 		COPYOUT(v, &md, sizeof(md));
1161 		error = __elfN(reloc_ptr)(fp, ef, v, &md, sizeof(md));
1162 		if (error == EOPNOTSUPP) {
1163 			md.md_cval += ef->off;
1164 			md.md_data = (void *)((uintptr_t)md.md_data +
1165 			    (uintptr_t)ef->off);
1166 		} else if (error != 0)
1167 			return (error);
1168 #endif
1169 		p += sizeof(Elf_Addr);
1170 		switch(md.md_type) {
1171 		case MDT_DEPEND:
1172 			if (ef->kernel) /* kernel must not depend on anything */
1173 				break;
1174 			s = strdupout((vm_offset_t)md.md_cval);
1175 			minfolen = sizeof(*mdepend) + strlen(s) + 1;
1176 			mdepend = malloc(minfolen);
1177 			if (mdepend == NULL)
1178 				return ENOMEM;
1179 			COPYOUT((vm_offset_t)md.md_data, mdepend,
1180 			    sizeof(*mdepend));
1181 			strcpy((char*)(mdepend + 1), s);
1182 			free(s);
1183 			file_addmetadata(fp, MODINFOMD_DEPLIST, minfolen,
1184 			    mdepend);
1185 			free(mdepend);
1186 			break;
1187 		case MDT_VERSION:
1188 			s = strdupout((vm_offset_t)md.md_cval);
1189 			COPYOUT((vm_offset_t)md.md_data, &mver, sizeof(mver));
1190 			file_addmodule(fp, s, mver.mv_version, NULL);
1191 			free(s);
1192 			modcnt++;
1193 			break;
1194 		}
1195 	}
1196 	if (modcnt == 0) {
1197 		s = fake_modname(fp->f_name);
1198 		file_addmodule(fp, s, 1, NULL);
1199 		free(s);
1200 	}
1201 	return 0;
1202 }
1203 
1204 static unsigned long
1205 elf_hash(const char *name)
1206 {
1207 	const unsigned char *p = (const unsigned char *) name;
1208 	unsigned long h = 0;
1209 	unsigned long g;
1210 
1211 	while (*p != '\0') {
1212 		h = (h << 4) + *p++;
1213 		if ((g = h & 0xf0000000) != 0)
1214 			h ^= g >> 24;
1215 		h &= ~g;
1216 	}
1217 	return h;
1218 }
1219 
1220 static const char __elfN(bad_symtable)[] = "elf" __XSTRING(__ELF_WORD_SIZE)
1221     "_lookup_symbol: corrupt symbol table\n";
1222 int
1223 __elfN(lookup_symbol)(elf_file_t ef, const char* name, Elf_Sym *symp,
1224     unsigned char type)
1225 {
1226 	Elf_Hashelt symnum;
1227 	Elf_Sym sym;
1228 	char *strp;
1229 	unsigned long hash;
1230 
1231 	if (ef->nbuckets == 0) {
1232 		printf(__elfN(bad_symtable));
1233 		return ENOENT;
1234 	}
1235 
1236 	hash = elf_hash(name);
1237 	COPYOUT(&ef->buckets[hash % ef->nbuckets], &symnum, sizeof(symnum));
1238 
1239 	while (symnum != STN_UNDEF) {
1240 		if (symnum >= ef->nchains) {
1241 			printf(__elfN(bad_symtable));
1242 			return ENOENT;
1243 		}
1244 
1245 		COPYOUT(ef->symtab + symnum, &sym, sizeof(sym));
1246 		if (sym.st_name == 0) {
1247 			printf(__elfN(bad_symtable));
1248 			return ENOENT;
1249 		}
1250 
1251 		strp = strdupout((vm_offset_t)(ef->strtab + sym.st_name));
1252 		if (strcmp(name, strp) == 0) {
1253 			free(strp);
1254 			if (sym.st_shndx != SHN_UNDEF && sym.st_value != 0 &&
1255 			    ELF_ST_TYPE(sym.st_info) == type) {
1256 				*symp = sym;
1257 				return 0;
1258 			}
1259 			return ENOENT;
1260 		}
1261 		free(strp);
1262 		COPYOUT(&ef->chains[symnum], &symnum, sizeof(symnum));
1263 	}
1264 	return ENOENT;
1265 }
1266 
1267 /*
1268  * Apply any intra-module relocations to the value. p is the load address
1269  * of the value and val/len is the value to be modified. This does NOT modify
1270  * the image in-place, because this is done by kern_linker later on.
1271  *
1272  * Returns EOPNOTSUPP if no relocation method is supplied.
1273  */
1274 static int
1275 __elfN(reloc_ptr)(struct preloaded_file *mp, elf_file_t ef,
1276     Elf_Addr p, void *val, size_t len)
1277 {
1278 	size_t n;
1279 	Elf_Rela a;
1280 	Elf_Rel r;
1281 	int error;
1282 
1283 	/*
1284 	 * The kernel is already relocated, but we still want to apply
1285 	 * offset adjustments.
1286 	 */
1287 	if (ef->kernel)
1288 		return (EOPNOTSUPP);
1289 
1290 	for (n = 0; n < ef->relsz / sizeof(r); n++) {
1291 		COPYOUT(ef->rel + n, &r, sizeof(r));
1292 
1293 		error = __elfN(reloc)(ef, __elfN(symaddr), &r, ELF_RELOC_REL,
1294 		    ef->off, p, val, len);
1295 		if (error != 0)
1296 			return (error);
1297 	}
1298 	for (n = 0; n < ef->relasz / sizeof(a); n++) {
1299 		COPYOUT(ef->rela + n, &a, sizeof(a));
1300 
1301 		error = __elfN(reloc)(ef, __elfN(symaddr), &a, ELF_RELOC_RELA,
1302 		    ef->off, p, val, len);
1303 		if (error != 0)
1304 			return (error);
1305 	}
1306 
1307 	return (0);
1308 }
1309 
1310 static Elf_Addr
1311 __elfN(symaddr)(struct elf_file *ef, Elf_Size symidx)
1312 {
1313 
1314 	/* Symbol lookup by index not required here. */
1315 	return (0);
1316 }
1317