xref: /freebsd/sys/kern/imgact_elf.c (revision eb6d21b4ca6d668cf89afd99eef7baeafa712197)
1 /*-
2  * Copyright (c) 2000 David O'Brien
3  * Copyright (c) 1995-1996 S�ren Schmidt
4  * Copyright (c) 1996 Peter Wemm
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer
12  *    in this position and unchanged.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. The name of the author may not be used to endorse or promote products
17  *    derived from this software without specific prior written permission
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 #include "opt_compat.h"
35 
36 #include <sys/param.h>
37 #include <sys/exec.h>
38 #include <sys/fcntl.h>
39 #include <sys/imgact.h>
40 #include <sys/imgact_elf.h>
41 #include <sys/kernel.h>
42 #include <sys/lock.h>
43 #include <sys/malloc.h>
44 #include <sys/mount.h>
45 #include <sys/mutex.h>
46 #include <sys/mman.h>
47 #include <sys/namei.h>
48 #include <sys/pioctl.h>
49 #include <sys/proc.h>
50 #include <sys/procfs.h>
51 #include <sys/resourcevar.h>
52 #include <sys/sf_buf.h>
53 #include <sys/systm.h>
54 #include <sys/signalvar.h>
55 #include <sys/stat.h>
56 #include <sys/sx.h>
57 #include <sys/syscall.h>
58 #include <sys/sysctl.h>
59 #include <sys/sysent.h>
60 #include <sys/vnode.h>
61 
62 #include <vm/vm.h>
63 #include <vm/vm_kern.h>
64 #include <vm/vm_param.h>
65 #include <vm/pmap.h>
66 #include <vm/vm_map.h>
67 #include <vm/vm_object.h>
68 #include <vm/vm_extern.h>
69 
70 #include <machine/elf.h>
71 #include <machine/md_var.h>
72 
73 #if defined(COMPAT_IA32) && __ELF_WORD_SIZE == 32
74 #include <machine/fpu.h>
75 #include <compat/ia32/ia32_reg.h>
76 #endif
77 
78 #define OLD_EI_BRAND	8
79 
80 static int __elfN(check_header)(const Elf_Ehdr *hdr);
81 static Elf_Brandinfo *__elfN(get_brandinfo)(struct image_params *imgp,
82     const char *interp, int32_t *osrel);
83 static int __elfN(load_file)(struct proc *p, const char *file, u_long *addr,
84     u_long *entry, size_t pagesize);
85 static int __elfN(load_section)(struct vmspace *vmspace, vm_object_t object,
86     vm_offset_t offset, caddr_t vmaddr, size_t memsz, size_t filsz,
87     vm_prot_t prot, size_t pagesize);
88 static int __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp);
89 static boolean_t __elfN(freebsd_trans_osrel)(const Elf_Note *note,
90     int32_t *osrel);
91 static boolean_t kfreebsd_trans_osrel(const Elf_Note *note, int32_t *osrel);
92 static boolean_t __elfN(check_note)(struct image_params *imgp,
93     Elf_Brandnote *checknote, int32_t *osrel);
94 
95 SYSCTL_NODE(_kern, OID_AUTO, __CONCAT(elf, __ELF_WORD_SIZE), CTLFLAG_RW, 0,
96     "");
97 
98 int __elfN(fallback_brand) = -1;
99 SYSCTL_INT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO,
100     fallback_brand, CTLFLAG_RW, &__elfN(fallback_brand), 0,
101     __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) " brand of last resort");
102 TUNABLE_INT("kern.elf" __XSTRING(__ELF_WORD_SIZE) ".fallback_brand",
103     &__elfN(fallback_brand));
104 
105 static int elf_legacy_coredump = 0;
106 SYSCTL_INT(_debug, OID_AUTO, __elfN(legacy_coredump), CTLFLAG_RW,
107     &elf_legacy_coredump, 0, "");
108 
109 static Elf_Brandinfo *elf_brand_list[MAX_BRANDS];
110 
111 #define	trunc_page_ps(va, ps)	((va) & ~(ps - 1))
112 #define	round_page_ps(va, ps)	(((va) + (ps - 1)) & ~(ps - 1))
113 #define	aligned(a, t)	(trunc_page_ps((u_long)(a), sizeof(t)) == (u_long)(a))
114 
115 static const char FREEBSD_ABI_VENDOR[] = "FreeBSD";
116 
117 Elf_Brandnote __elfN(freebsd_brandnote) = {
118 	.hdr.n_namesz	= sizeof(FREEBSD_ABI_VENDOR),
119 	.hdr.n_descsz	= sizeof(int32_t),
120 	.hdr.n_type	= 1,
121 	.vendor		= FREEBSD_ABI_VENDOR,
122 	.flags		= BN_TRANSLATE_OSREL,
123 	.trans_osrel	= __elfN(freebsd_trans_osrel)
124 };
125 
126 static boolean_t
127 __elfN(freebsd_trans_osrel)(const Elf_Note *note, int32_t *osrel)
128 {
129 	uintptr_t p;
130 
131 	p = (uintptr_t)(note + 1);
132 	p += roundup2(note->n_namesz, sizeof(Elf32_Addr));
133 	*osrel = *(const int32_t *)(p);
134 
135 	return (TRUE);
136 }
137 
138 static const char GNU_ABI_VENDOR[] = "GNU";
139 static int GNU_KFREEBSD_ABI_DESC = 3;
140 
141 Elf_Brandnote __elfN(kfreebsd_brandnote) = {
142 	.hdr.n_namesz	= sizeof(GNU_ABI_VENDOR),
143 	.hdr.n_descsz	= 16,	/* XXX at least 16 */
144 	.hdr.n_type	= 1,
145 	.vendor		= GNU_ABI_VENDOR,
146 	.flags		= BN_TRANSLATE_OSREL,
147 	.trans_osrel	= kfreebsd_trans_osrel
148 };
149 
150 static boolean_t
151 kfreebsd_trans_osrel(const Elf_Note *note, int32_t *osrel)
152 {
153 	const Elf32_Word *desc;
154 	uintptr_t p;
155 
156 	p = (uintptr_t)(note + 1);
157 	p += roundup2(note->n_namesz, sizeof(Elf32_Addr));
158 
159 	desc = (const Elf32_Word *)p;
160 	if (desc[0] != GNU_KFREEBSD_ABI_DESC)
161 		return (FALSE);
162 
163 	/*
164 	 * Debian GNU/kFreeBSD embed the earliest compatible kernel version
165 	 * (__FreeBSD_version: <major><two digit minor>Rxx) in the LSB way.
166 	 */
167 	*osrel = desc[1] * 100000 + desc[2] * 1000 + desc[3];
168 
169 	return (TRUE);
170 }
171 
172 int
173 __elfN(insert_brand_entry)(Elf_Brandinfo *entry)
174 {
175 	int i;
176 
177 	for (i = 0; i < MAX_BRANDS; i++) {
178 		if (elf_brand_list[i] == NULL) {
179 			elf_brand_list[i] = entry;
180 			break;
181 		}
182 	}
183 	if (i == MAX_BRANDS) {
184 		printf("WARNING: %s: could not insert brandinfo entry: %p\n",
185 			__func__, entry);
186 		return (-1);
187 	}
188 	return (0);
189 }
190 
191 int
192 __elfN(remove_brand_entry)(Elf_Brandinfo *entry)
193 {
194 	int i;
195 
196 	for (i = 0; i < MAX_BRANDS; i++) {
197 		if (elf_brand_list[i] == entry) {
198 			elf_brand_list[i] = NULL;
199 			break;
200 		}
201 	}
202 	if (i == MAX_BRANDS)
203 		return (-1);
204 	return (0);
205 }
206 
207 int
208 __elfN(brand_inuse)(Elf_Brandinfo *entry)
209 {
210 	struct proc *p;
211 	int rval = FALSE;
212 
213 	sx_slock(&allproc_lock);
214 	FOREACH_PROC_IN_SYSTEM(p) {
215 		if (p->p_sysent == entry->sysvec) {
216 			rval = TRUE;
217 			break;
218 		}
219 	}
220 	sx_sunlock(&allproc_lock);
221 
222 	return (rval);
223 }
224 
225 static Elf_Brandinfo *
226 __elfN(get_brandinfo)(struct image_params *imgp, const char *interp,
227     int32_t *osrel)
228 {
229 	const Elf_Ehdr *hdr = (const Elf_Ehdr *)imgp->image_header;
230 	Elf_Brandinfo *bi;
231 	boolean_t ret;
232 	int i;
233 
234 	/*
235 	 * We support four types of branding -- (1) the ELF EI_OSABI field
236 	 * that SCO added to the ELF spec, (2) FreeBSD 3.x's traditional string
237 	 * branding w/in the ELF header, (3) path of the `interp_path'
238 	 * field, and (4) the ".note.ABI-tag" ELF section.
239 	 */
240 
241 	/* Look for an ".note.ABI-tag" ELF section */
242 	for (i = 0; i < MAX_BRANDS; i++) {
243 		bi = elf_brand_list[i];
244 		if (bi == NULL)
245 			continue;
246 		if (hdr->e_machine == bi->machine && (bi->flags &
247 		    (BI_BRAND_NOTE|BI_BRAND_NOTE_MANDATORY)) != 0) {
248 			ret = __elfN(check_note)(imgp, bi->brand_note, osrel);
249 			if (ret)
250 				return (bi);
251 		}
252 	}
253 
254 	/* If the executable has a brand, search for it in the brand list. */
255 	for (i = 0; i < MAX_BRANDS; i++) {
256 		bi = elf_brand_list[i];
257 		if (bi == NULL || bi->flags & BI_BRAND_NOTE_MANDATORY)
258 			continue;
259 		if (hdr->e_machine == bi->machine &&
260 		    (hdr->e_ident[EI_OSABI] == bi->brand ||
261 		    strncmp((const char *)&hdr->e_ident[OLD_EI_BRAND],
262 		    bi->compat_3_brand, strlen(bi->compat_3_brand)) == 0))
263 			return (bi);
264 	}
265 
266 	/* Lacking a known brand, search for a recognized interpreter. */
267 	if (interp != NULL) {
268 		for (i = 0; i < MAX_BRANDS; i++) {
269 			bi = elf_brand_list[i];
270 			if (bi == NULL || bi->flags & BI_BRAND_NOTE_MANDATORY)
271 				continue;
272 			if (hdr->e_machine == bi->machine &&
273 			    strcmp(interp, bi->interp_path) == 0)
274 				return (bi);
275 		}
276 	}
277 
278 	/* Lacking a recognized interpreter, try the default brand */
279 	for (i = 0; i < MAX_BRANDS; i++) {
280 		bi = elf_brand_list[i];
281 		if (bi == NULL || bi->flags & BI_BRAND_NOTE_MANDATORY)
282 			continue;
283 		if (hdr->e_machine == bi->machine &&
284 		    __elfN(fallback_brand) == bi->brand)
285 			return (bi);
286 	}
287 	return (NULL);
288 }
289 
290 static int
291 __elfN(check_header)(const Elf_Ehdr *hdr)
292 {
293 	Elf_Brandinfo *bi;
294 	int i;
295 
296 	if (!IS_ELF(*hdr) ||
297 	    hdr->e_ident[EI_CLASS] != ELF_TARG_CLASS ||
298 	    hdr->e_ident[EI_DATA] != ELF_TARG_DATA ||
299 	    hdr->e_ident[EI_VERSION] != EV_CURRENT ||
300 	    hdr->e_phentsize != sizeof(Elf_Phdr) ||
301 	    hdr->e_version != ELF_TARG_VER)
302 		return (ENOEXEC);
303 
304 	/*
305 	 * Make sure we have at least one brand for this machine.
306 	 */
307 
308 	for (i = 0; i < MAX_BRANDS; i++) {
309 		bi = elf_brand_list[i];
310 		if (bi != NULL && bi->machine == hdr->e_machine)
311 			break;
312 	}
313 	if (i == MAX_BRANDS)
314 		return (ENOEXEC);
315 
316 	return (0);
317 }
318 
319 static int
320 __elfN(map_partial)(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
321     vm_offset_t start, vm_offset_t end, vm_prot_t prot)
322 {
323 	struct sf_buf *sf;
324 	int error;
325 	vm_offset_t off;
326 
327 	/*
328 	 * Create the page if it doesn't exist yet. Ignore errors.
329 	 */
330 	vm_map_lock(map);
331 	vm_map_insert(map, NULL, 0, trunc_page(start), round_page(end),
332 	    VM_PROT_ALL, VM_PROT_ALL, 0);
333 	vm_map_unlock(map);
334 
335 	/*
336 	 * Find the page from the underlying object.
337 	 */
338 	if (object) {
339 		sf = vm_imgact_map_page(object, offset);
340 		if (sf == NULL)
341 			return (KERN_FAILURE);
342 		off = offset - trunc_page(offset);
343 		error = copyout((caddr_t)sf_buf_kva(sf) + off, (caddr_t)start,
344 		    end - start);
345 		vm_imgact_unmap_page(sf);
346 		if (error) {
347 			return (KERN_FAILURE);
348 		}
349 	}
350 
351 	return (KERN_SUCCESS);
352 }
353 
354 static int
355 __elfN(map_insert)(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
356     vm_offset_t start, vm_offset_t end, vm_prot_t prot, int cow)
357 {
358 	struct sf_buf *sf;
359 	vm_offset_t off;
360 	vm_size_t sz;
361 	int error, rv;
362 
363 	if (start != trunc_page(start)) {
364 		rv = __elfN(map_partial)(map, object, offset, start,
365 		    round_page(start), prot);
366 		if (rv)
367 			return (rv);
368 		offset += round_page(start) - start;
369 		start = round_page(start);
370 	}
371 	if (end != round_page(end)) {
372 		rv = __elfN(map_partial)(map, object, offset +
373 		    trunc_page(end) - start, trunc_page(end), end, prot);
374 		if (rv)
375 			return (rv);
376 		end = trunc_page(end);
377 	}
378 	if (end > start) {
379 		if (offset & PAGE_MASK) {
380 			/*
381 			 * The mapping is not page aligned. This means we have
382 			 * to copy the data. Sigh.
383 			 */
384 			rv = vm_map_find(map, NULL, 0, &start, end - start,
385 			    FALSE, prot | VM_PROT_WRITE, VM_PROT_ALL, 0);
386 			if (rv)
387 				return (rv);
388 			if (object == NULL)
389 				return (KERN_SUCCESS);
390 			for (; start < end; start += sz) {
391 				sf = vm_imgact_map_page(object, offset);
392 				if (sf == NULL)
393 					return (KERN_FAILURE);
394 				off = offset - trunc_page(offset);
395 				sz = end - start;
396 				if (sz > PAGE_SIZE - off)
397 					sz = PAGE_SIZE - off;
398 				error = copyout((caddr_t)sf_buf_kva(sf) + off,
399 				    (caddr_t)start, sz);
400 				vm_imgact_unmap_page(sf);
401 				if (error) {
402 					return (KERN_FAILURE);
403 				}
404 				offset += sz;
405 			}
406 			rv = KERN_SUCCESS;
407 		} else {
408 			vm_object_reference(object);
409 			vm_map_lock(map);
410 			rv = vm_map_insert(map, object, offset, start, end,
411 			    prot, VM_PROT_ALL, cow);
412 			vm_map_unlock(map);
413 			if (rv != KERN_SUCCESS)
414 				vm_object_deallocate(object);
415 		}
416 		return (rv);
417 	} else {
418 		return (KERN_SUCCESS);
419 	}
420 }
421 
422 static int
423 __elfN(load_section)(struct vmspace *vmspace,
424 	vm_object_t object, vm_offset_t offset,
425 	caddr_t vmaddr, size_t memsz, size_t filsz, vm_prot_t prot,
426 	size_t pagesize)
427 {
428 	struct sf_buf *sf;
429 	size_t map_len;
430 	vm_offset_t map_addr;
431 	int error, rv, cow;
432 	size_t copy_len;
433 	vm_offset_t file_addr;
434 
435 	/*
436 	 * It's necessary to fail if the filsz + offset taken from the
437 	 * header is greater than the actual file pager object's size.
438 	 * If we were to allow this, then the vm_map_find() below would
439 	 * walk right off the end of the file object and into the ether.
440 	 *
441 	 * While I'm here, might as well check for something else that
442 	 * is invalid: filsz cannot be greater than memsz.
443 	 */
444 	if ((off_t)filsz + offset > object->un_pager.vnp.vnp_size ||
445 	    filsz > memsz) {
446 		uprintf("elf_load_section: truncated ELF file\n");
447 		return (ENOEXEC);
448 	}
449 
450 	map_addr = trunc_page_ps((vm_offset_t)vmaddr, pagesize);
451 	file_addr = trunc_page_ps(offset, pagesize);
452 
453 	/*
454 	 * We have two choices.  We can either clear the data in the last page
455 	 * of an oversized mapping, or we can start the anon mapping a page
456 	 * early and copy the initialized data into that first page.  We
457 	 * choose the second..
458 	 */
459 	if (memsz > filsz)
460 		map_len = trunc_page_ps(offset + filsz, pagesize) - file_addr;
461 	else
462 		map_len = round_page_ps(offset + filsz, pagesize) - file_addr;
463 
464 	if (map_len != 0) {
465 		/* cow flags: don't dump readonly sections in core */
466 		cow = MAP_COPY_ON_WRITE | MAP_PREFAULT |
467 		    (prot & VM_PROT_WRITE ? 0 : MAP_DISABLE_COREDUMP);
468 
469 		rv = __elfN(map_insert)(&vmspace->vm_map,
470 				      object,
471 				      file_addr,	/* file offset */
472 				      map_addr,		/* virtual start */
473 				      map_addr + map_len,/* virtual end */
474 				      prot,
475 				      cow);
476 		if (rv != KERN_SUCCESS)
477 			return (EINVAL);
478 
479 		/* we can stop now if we've covered it all */
480 		if (memsz == filsz) {
481 			return (0);
482 		}
483 	}
484 
485 
486 	/*
487 	 * We have to get the remaining bit of the file into the first part
488 	 * of the oversized map segment.  This is normally because the .data
489 	 * segment in the file is extended to provide bss.  It's a neat idea
490 	 * to try and save a page, but it's a pain in the behind to implement.
491 	 */
492 	copy_len = (offset + filsz) - trunc_page_ps(offset + filsz, pagesize);
493 	map_addr = trunc_page_ps((vm_offset_t)vmaddr + filsz, pagesize);
494 	map_len = round_page_ps((vm_offset_t)vmaddr + memsz, pagesize) -
495 	    map_addr;
496 
497 	/* This had damn well better be true! */
498 	if (map_len != 0) {
499 		rv = __elfN(map_insert)(&vmspace->vm_map, NULL, 0, map_addr,
500 		    map_addr + map_len, VM_PROT_ALL, 0);
501 		if (rv != KERN_SUCCESS) {
502 			return (EINVAL);
503 		}
504 	}
505 
506 	if (copy_len != 0) {
507 		vm_offset_t off;
508 
509 		sf = vm_imgact_map_page(object, offset + filsz);
510 		if (sf == NULL)
511 			return (EIO);
512 
513 		/* send the page fragment to user space */
514 		off = trunc_page_ps(offset + filsz, pagesize) -
515 		    trunc_page(offset + filsz);
516 		error = copyout((caddr_t)sf_buf_kva(sf) + off,
517 		    (caddr_t)map_addr, copy_len);
518 		vm_imgact_unmap_page(sf);
519 		if (error) {
520 			return (error);
521 		}
522 	}
523 
524 	/*
525 	 * set it to the specified protection.
526 	 * XXX had better undo the damage from pasting over the cracks here!
527 	 */
528 	vm_map_protect(&vmspace->vm_map, trunc_page(map_addr),
529 	    round_page(map_addr + map_len),  prot, FALSE);
530 
531 	return (0);
532 }
533 
534 /*
535  * Load the file "file" into memory.  It may be either a shared object
536  * or an executable.
537  *
538  * The "addr" reference parameter is in/out.  On entry, it specifies
539  * the address where a shared object should be loaded.  If the file is
540  * an executable, this value is ignored.  On exit, "addr" specifies
541  * where the file was actually loaded.
542  *
543  * The "entry" reference parameter is out only.  On exit, it specifies
544  * the entry point for the loaded file.
545  */
546 static int
547 __elfN(load_file)(struct proc *p, const char *file, u_long *addr,
548 	u_long *entry, size_t pagesize)
549 {
550 	struct {
551 		struct nameidata nd;
552 		struct vattr attr;
553 		struct image_params image_params;
554 	} *tempdata;
555 	const Elf_Ehdr *hdr = NULL;
556 	const Elf_Phdr *phdr = NULL;
557 	struct nameidata *nd;
558 	struct vmspace *vmspace = p->p_vmspace;
559 	struct vattr *attr;
560 	struct image_params *imgp;
561 	vm_prot_t prot;
562 	u_long rbase;
563 	u_long base_addr = 0;
564 	int vfslocked, error, i, numsegs;
565 
566 	tempdata = malloc(sizeof(*tempdata), M_TEMP, M_WAITOK);
567 	nd = &tempdata->nd;
568 	attr = &tempdata->attr;
569 	imgp = &tempdata->image_params;
570 
571 	/*
572 	 * Initialize part of the common data
573 	 */
574 	imgp->proc = p;
575 	imgp->attr = attr;
576 	imgp->firstpage = NULL;
577 	imgp->image_header = NULL;
578 	imgp->object = NULL;
579 	imgp->execlabel = NULL;
580 
581 	NDINIT(nd, LOOKUP, MPSAFE|LOCKLEAF|FOLLOW, UIO_SYSSPACE, file,
582 	    curthread);
583 	vfslocked = 0;
584 	if ((error = namei(nd)) != 0) {
585 		nd->ni_vp = NULL;
586 		goto fail;
587 	}
588 	vfslocked = NDHASGIANT(nd);
589 	NDFREE(nd, NDF_ONLY_PNBUF);
590 	imgp->vp = nd->ni_vp;
591 
592 	/*
593 	 * Check permissions, modes, uid, etc on the file, and "open" it.
594 	 */
595 	error = exec_check_permissions(imgp);
596 	if (error)
597 		goto fail;
598 
599 	error = exec_map_first_page(imgp);
600 	if (error)
601 		goto fail;
602 
603 	/*
604 	 * Also make certain that the interpreter stays the same, so set
605 	 * its VV_TEXT flag, too.
606 	 */
607 	nd->ni_vp->v_vflag |= VV_TEXT;
608 
609 	imgp->object = nd->ni_vp->v_object;
610 
611 	hdr = (const Elf_Ehdr *)imgp->image_header;
612 	if ((error = __elfN(check_header)(hdr)) != 0)
613 		goto fail;
614 	if (hdr->e_type == ET_DYN)
615 		rbase = *addr;
616 	else if (hdr->e_type == ET_EXEC)
617 		rbase = 0;
618 	else {
619 		error = ENOEXEC;
620 		goto fail;
621 	}
622 
623 	/* Only support headers that fit within first page for now      */
624 	/*    (multiplication of two Elf_Half fields will not overflow) */
625 	if ((hdr->e_phoff > PAGE_SIZE) ||
626 	    (hdr->e_phentsize * hdr->e_phnum) > PAGE_SIZE - hdr->e_phoff) {
627 		error = ENOEXEC;
628 		goto fail;
629 	}
630 
631 	phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff);
632 	if (!aligned(phdr, Elf_Addr)) {
633 		error = ENOEXEC;
634 		goto fail;
635 	}
636 
637 	for (i = 0, numsegs = 0; i < hdr->e_phnum; i++) {
638 		if (phdr[i].p_type == PT_LOAD && phdr[i].p_memsz != 0) {
639 			/* Loadable segment */
640 			prot = 0;
641 			if (phdr[i].p_flags & PF_X)
642   				prot |= VM_PROT_EXECUTE;
643 			if (phdr[i].p_flags & PF_W)
644   				prot |= VM_PROT_WRITE;
645 			if (phdr[i].p_flags & PF_R)
646   				prot |= VM_PROT_READ;
647 
648 			if ((error = __elfN(load_section)(vmspace,
649 			    imgp->object, phdr[i].p_offset,
650 			    (caddr_t)(uintptr_t)phdr[i].p_vaddr + rbase,
651 			    phdr[i].p_memsz, phdr[i].p_filesz, prot,
652 			    pagesize)) != 0)
653 				goto fail;
654 			/*
655 			 * Establish the base address if this is the
656 			 * first segment.
657 			 */
658 			if (numsegs == 0)
659   				base_addr = trunc_page(phdr[i].p_vaddr +
660 				    rbase);
661 			numsegs++;
662 		}
663 	}
664 	*addr = base_addr;
665 	*entry = (unsigned long)hdr->e_entry + rbase;
666 
667 fail:
668 	if (imgp->firstpage)
669 		exec_unmap_first_page(imgp);
670 
671 	if (nd->ni_vp)
672 		vput(nd->ni_vp);
673 
674 	VFS_UNLOCK_GIANT(vfslocked);
675 	free(tempdata, M_TEMP);
676 
677 	return (error);
678 }
679 
680 static int
681 __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp)
682 {
683 	const Elf_Ehdr *hdr = (const Elf_Ehdr *)imgp->image_header;
684 	const Elf_Phdr *phdr;
685 	Elf_Auxargs *elf_auxargs;
686 	struct vmspace *vmspace;
687 	vm_prot_t prot;
688 	u_long text_size = 0, data_size = 0, total_size = 0;
689 	u_long text_addr = 0, data_addr = 0;
690 	u_long seg_size, seg_addr;
691 	u_long addr, baddr, et_dyn_addr, entry = 0, proghdr = 0;
692 	int32_t osrel = 0;
693 	int error = 0, i, n;
694 	const char *interp = NULL, *newinterp = NULL;
695 	Elf_Brandinfo *brand_info;
696 	char *path;
697 	struct sysentvec *sv;
698 
699 	/*
700 	 * Do we have a valid ELF header ?
701 	 *
702 	 * Only allow ET_EXEC & ET_DYN here, reject ET_DYN later
703 	 * if particular brand doesn't support it.
704 	 */
705 	if (__elfN(check_header)(hdr) != 0 ||
706 	    (hdr->e_type != ET_EXEC && hdr->e_type != ET_DYN))
707 		return (-1);
708 
709 	/*
710 	 * From here on down, we return an errno, not -1, as we've
711 	 * detected an ELF file.
712 	 */
713 
714 	if ((hdr->e_phoff > PAGE_SIZE) ||
715 	    (hdr->e_phoff + hdr->e_phentsize * hdr->e_phnum) > PAGE_SIZE) {
716 		/* Only support headers in first page for now */
717 		return (ENOEXEC);
718 	}
719 	phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff);
720 	if (!aligned(phdr, Elf_Addr))
721 		return (ENOEXEC);
722 	n = 0;
723 	baddr = 0;
724 	for (i = 0; i < hdr->e_phnum; i++) {
725 		if (phdr[i].p_type == PT_LOAD) {
726 			if (n == 0)
727 				baddr = phdr[i].p_vaddr;
728 			n++;
729 			continue;
730 		}
731 		if (phdr[i].p_type == PT_INTERP) {
732 			/* Path to interpreter */
733 			if (phdr[i].p_filesz > MAXPATHLEN ||
734 			    phdr[i].p_offset + phdr[i].p_filesz > PAGE_SIZE)
735 				return (ENOEXEC);
736 			interp = imgp->image_header + phdr[i].p_offset;
737 			continue;
738 		}
739 	}
740 
741 	brand_info = __elfN(get_brandinfo)(imgp, interp, &osrel);
742 	if (brand_info == NULL) {
743 		uprintf("ELF binary type \"%u\" not known.\n",
744 		    hdr->e_ident[EI_OSABI]);
745 		return (ENOEXEC);
746 	}
747 	if (hdr->e_type == ET_DYN) {
748 		if ((brand_info->flags & BI_CAN_EXEC_DYN) == 0)
749 			return (ENOEXEC);
750 		/*
751 		 * Honour the base load address from the dso if it is
752 		 * non-zero for some reason.
753 		 */
754 		if (baddr == 0)
755 			et_dyn_addr = ET_DYN_LOAD_ADDR;
756 		else
757 			et_dyn_addr = 0;
758 	} else
759 		et_dyn_addr = 0;
760 	sv = brand_info->sysvec;
761 	if (interp != NULL && brand_info->interp_newpath != NULL)
762 		newinterp = brand_info->interp_newpath;
763 
764 	/*
765 	 * Avoid a possible deadlock if the current address space is destroyed
766 	 * and that address space maps the locked vnode.  In the common case,
767 	 * the locked vnode's v_usecount is decremented but remains greater
768 	 * than zero.  Consequently, the vnode lock is not needed by vrele().
769 	 * However, in cases where the vnode lock is external, such as nullfs,
770 	 * v_usecount may become zero.
771 	 */
772 	VOP_UNLOCK(imgp->vp, 0);
773 
774 	error = exec_new_vmspace(imgp, sv);
775 	imgp->proc->p_sysent = sv;
776 
777 	vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY);
778 	if (error)
779 		return (error);
780 
781 	vmspace = imgp->proc->p_vmspace;
782 
783 	for (i = 0; i < hdr->e_phnum; i++) {
784 		switch (phdr[i].p_type) {
785 		case PT_LOAD:	/* Loadable segment */
786 			if (phdr[i].p_memsz == 0)
787 				break;
788 			prot = 0;
789 			if (phdr[i].p_flags & PF_X)
790   				prot |= VM_PROT_EXECUTE;
791 			if (phdr[i].p_flags & PF_W)
792   				prot |= VM_PROT_WRITE;
793 			if (phdr[i].p_flags & PF_R)
794   				prot |= VM_PROT_READ;
795 
796 #if defined(__ia64__) && __ELF_WORD_SIZE == 32 && defined(IA32_ME_HARDER)
797 			/*
798 			 * Some x86 binaries assume read == executable,
799 			 * notably the M3 runtime and therefore cvsup
800 			 */
801 			if (prot & VM_PROT_READ)
802 				prot |= VM_PROT_EXECUTE;
803 #endif
804 
805 			if ((error = __elfN(load_section)(vmspace,
806 			    imgp->object, phdr[i].p_offset,
807 			    (caddr_t)(uintptr_t)phdr[i].p_vaddr + et_dyn_addr,
808 			    phdr[i].p_memsz, phdr[i].p_filesz, prot,
809 			    sv->sv_pagesize)) != 0)
810 				return (error);
811 
812 			/*
813 			 * If this segment contains the program headers,
814 			 * remember their virtual address for the AT_PHDR
815 			 * aux entry. Static binaries don't usually include
816 			 * a PT_PHDR entry.
817 			 */
818 			if (phdr[i].p_offset == 0 &&
819 			    hdr->e_phoff + hdr->e_phnum * hdr->e_phentsize
820 				<= phdr[i].p_filesz)
821 				proghdr = phdr[i].p_vaddr + hdr->e_phoff +
822 				    et_dyn_addr;
823 
824 			seg_addr = trunc_page(phdr[i].p_vaddr + et_dyn_addr);
825 			seg_size = round_page(phdr[i].p_memsz +
826 			    phdr[i].p_vaddr + et_dyn_addr - seg_addr);
827 
828 			/*
829 			 * Is this .text or .data?  We can't use
830 			 * VM_PROT_WRITE or VM_PROT_EXEC, it breaks the
831 			 * alpha terribly and possibly does other bad
832 			 * things so we stick to the old way of figuring
833 			 * it out:  If the segment contains the program
834 			 * entry point, it's a text segment, otherwise it
835 			 * is a data segment.
836 			 *
837 			 * Note that obreak() assumes that data_addr +
838 			 * data_size == end of data load area, and the ELF
839 			 * file format expects segments to be sorted by
840 			 * address.  If multiple data segments exist, the
841 			 * last one will be used.
842 			 */
843 			if (hdr->e_entry >= phdr[i].p_vaddr &&
844 			    hdr->e_entry < (phdr[i].p_vaddr +
845 			    phdr[i].p_memsz)) {
846 				text_size = seg_size;
847 				text_addr = seg_addr;
848 				entry = (u_long)hdr->e_entry + et_dyn_addr;
849 			} else {
850 				data_size = seg_size;
851 				data_addr = seg_addr;
852 			}
853 			total_size += seg_size;
854 			break;
855 		case PT_PHDR: 	/* Program header table info */
856 			proghdr = phdr[i].p_vaddr + et_dyn_addr;
857 			break;
858 		default:
859 			break;
860 		}
861 	}
862 
863 	if (data_addr == 0 && data_size == 0) {
864 		data_addr = text_addr;
865 		data_size = text_size;
866 	}
867 
868 	/*
869 	 * Check limits.  It should be safe to check the
870 	 * limits after loading the segments since we do
871 	 * not actually fault in all the segments pages.
872 	 */
873 	PROC_LOCK(imgp->proc);
874 	if (data_size > lim_cur(imgp->proc, RLIMIT_DATA) ||
875 	    text_size > maxtsiz ||
876 	    total_size > lim_cur(imgp->proc, RLIMIT_VMEM)) {
877 		PROC_UNLOCK(imgp->proc);
878 		return (ENOMEM);
879 	}
880 
881 	vmspace->vm_tsize = text_size >> PAGE_SHIFT;
882 	vmspace->vm_taddr = (caddr_t)(uintptr_t)text_addr;
883 	vmspace->vm_dsize = data_size >> PAGE_SHIFT;
884 	vmspace->vm_daddr = (caddr_t)(uintptr_t)data_addr;
885 
886 	/*
887 	 * We load the dynamic linker where a userland call
888 	 * to mmap(0, ...) would put it.  The rationale behind this
889 	 * calculation is that it leaves room for the heap to grow to
890 	 * its maximum allowed size.
891 	 */
892 	addr = round_page((vm_offset_t)imgp->proc->p_vmspace->vm_daddr +
893 	    lim_max(imgp->proc, RLIMIT_DATA));
894 	PROC_UNLOCK(imgp->proc);
895 
896 	imgp->entry_addr = entry;
897 
898 	if (interp != NULL) {
899 		int have_interp = FALSE;
900 		VOP_UNLOCK(imgp->vp, 0);
901 		if (brand_info->emul_path != NULL &&
902 		    brand_info->emul_path[0] != '\0') {
903 			path = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
904 			snprintf(path, MAXPATHLEN, "%s%s",
905 			    brand_info->emul_path, interp);
906 			error = __elfN(load_file)(imgp->proc, path, &addr,
907 			    &imgp->entry_addr, sv->sv_pagesize);
908 			free(path, M_TEMP);
909 			if (error == 0)
910 				have_interp = TRUE;
911 		}
912 		if (!have_interp && newinterp != NULL) {
913 			error = __elfN(load_file)(imgp->proc, newinterp, &addr,
914 			    &imgp->entry_addr, sv->sv_pagesize);
915 			if (error == 0)
916 				have_interp = TRUE;
917 		}
918 		if (!have_interp) {
919 			error = __elfN(load_file)(imgp->proc, interp, &addr,
920 			    &imgp->entry_addr, sv->sv_pagesize);
921 		}
922 		vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY);
923 		if (error != 0) {
924 			uprintf("ELF interpreter %s not found\n", interp);
925 			return (error);
926 		}
927 	} else
928 		addr = et_dyn_addr;
929 
930 	/*
931 	 * Construct auxargs table (used by the fixup routine)
932 	 */
933 	elf_auxargs = malloc(sizeof(Elf_Auxargs), M_TEMP, M_WAITOK);
934 	elf_auxargs->execfd = -1;
935 	elf_auxargs->phdr = proghdr;
936 	elf_auxargs->phent = hdr->e_phentsize;
937 	elf_auxargs->phnum = hdr->e_phnum;
938 	elf_auxargs->pagesz = PAGE_SIZE;
939 	elf_auxargs->base = addr;
940 	elf_auxargs->flags = 0;
941 	elf_auxargs->entry = entry;
942 
943 	imgp->auxargs = elf_auxargs;
944 	imgp->interpreted = 0;
945 	imgp->proc->p_osrel = osrel;
946 
947 	return (error);
948 }
949 
950 #define	suword __CONCAT(suword, __ELF_WORD_SIZE)
951 
952 int
953 __elfN(freebsd_fixup)(register_t **stack_base, struct image_params *imgp)
954 {
955 	Elf_Auxargs *args = (Elf_Auxargs *)imgp->auxargs;
956 	Elf_Addr *base;
957 	Elf_Addr *pos;
958 
959 	base = (Elf_Addr *)*stack_base;
960 	pos = base + (imgp->args->argc + imgp->args->envc + 2);
961 
962 	if (args->execfd != -1)
963 		AUXARGS_ENTRY(pos, AT_EXECFD, args->execfd);
964 	AUXARGS_ENTRY(pos, AT_PHDR, args->phdr);
965 	AUXARGS_ENTRY(pos, AT_PHENT, args->phent);
966 	AUXARGS_ENTRY(pos, AT_PHNUM, args->phnum);
967 	AUXARGS_ENTRY(pos, AT_PAGESZ, args->pagesz);
968 	AUXARGS_ENTRY(pos, AT_FLAGS, args->flags);
969 	AUXARGS_ENTRY(pos, AT_ENTRY, args->entry);
970 	AUXARGS_ENTRY(pos, AT_BASE, args->base);
971 	if (imgp->execpathp != 0)
972 		AUXARGS_ENTRY(pos, AT_EXECPATH, imgp->execpathp);
973 	AUXARGS_ENTRY(pos, AT_NULL, 0);
974 
975 	free(imgp->auxargs, M_TEMP);
976 	imgp->auxargs = NULL;
977 
978 	base--;
979 	suword(base, (long)imgp->args->argc);
980 	*stack_base = (register_t *)base;
981 	return (0);
982 }
983 
984 /*
985  * Code for generating ELF core dumps.
986  */
987 
988 typedef void (*segment_callback)(vm_map_entry_t, void *);
989 
990 /* Closure for cb_put_phdr(). */
991 struct phdr_closure {
992 	Elf_Phdr *phdr;		/* Program header to fill in */
993 	Elf_Off offset;		/* Offset of segment in core file */
994 };
995 
996 /* Closure for cb_size_segment(). */
997 struct sseg_closure {
998 	int count;		/* Count of writable segments. */
999 	size_t size;		/* Total size of all writable segments. */
1000 };
1001 
1002 static void cb_put_phdr(vm_map_entry_t, void *);
1003 static void cb_size_segment(vm_map_entry_t, void *);
1004 static void each_writable_segment(struct thread *, segment_callback, void *);
1005 static int __elfN(corehdr)(struct thread *, struct vnode *, struct ucred *,
1006     int, void *, size_t);
1007 static void __elfN(puthdr)(struct thread *, void *, size_t *, int);
1008 static void __elfN(putnote)(void *, size_t *, const char *, int,
1009     const void *, size_t);
1010 
1011 int
1012 __elfN(coredump)(td, vp, limit)
1013 	struct thread *td;
1014 	struct vnode *vp;
1015 	off_t limit;
1016 {
1017 	struct ucred *cred = td->td_ucred;
1018 	int error = 0;
1019 	struct sseg_closure seginfo;
1020 	void *hdr;
1021 	size_t hdrsize;
1022 
1023 	/* Size the program segments. */
1024 	seginfo.count = 0;
1025 	seginfo.size = 0;
1026 	each_writable_segment(td, cb_size_segment, &seginfo);
1027 
1028 	/*
1029 	 * Calculate the size of the core file header area by making
1030 	 * a dry run of generating it.  Nothing is written, but the
1031 	 * size is calculated.
1032 	 */
1033 	hdrsize = 0;
1034 	__elfN(puthdr)(td, (void *)NULL, &hdrsize, seginfo.count);
1035 
1036 	if (hdrsize + seginfo.size >= limit)
1037 		return (EFAULT);
1038 
1039 	/*
1040 	 * Allocate memory for building the header, fill it up,
1041 	 * and write it out.
1042 	 */
1043 	hdr = malloc(hdrsize, M_TEMP, M_WAITOK);
1044 	if (hdr == NULL) {
1045 		return (EINVAL);
1046 	}
1047 	error = __elfN(corehdr)(td, vp, cred, seginfo.count, hdr, hdrsize);
1048 
1049 	/* Write the contents of all of the writable segments. */
1050 	if (error == 0) {
1051 		Elf_Phdr *php;
1052 		off_t offset;
1053 		int i;
1054 
1055 		php = (Elf_Phdr *)((char *)hdr + sizeof(Elf_Ehdr)) + 1;
1056 		offset = hdrsize;
1057 		for (i = 0; i < seginfo.count; i++) {
1058 			error = vn_rdwr_inchunks(UIO_WRITE, vp,
1059 			    (caddr_t)(uintptr_t)php->p_vaddr,
1060 			    php->p_filesz, offset, UIO_USERSPACE,
1061 			    IO_UNIT | IO_DIRECT, cred, NOCRED, NULL,
1062 			    curthread);
1063 			if (error != 0)
1064 				break;
1065 			offset += php->p_filesz;
1066 			php++;
1067 		}
1068 	}
1069 	free(hdr, M_TEMP);
1070 
1071 	return (error);
1072 }
1073 
1074 /*
1075  * A callback for each_writable_segment() to write out the segment's
1076  * program header entry.
1077  */
1078 static void
1079 cb_put_phdr(entry, closure)
1080 	vm_map_entry_t entry;
1081 	void *closure;
1082 {
1083 	struct phdr_closure *phc = (struct phdr_closure *)closure;
1084 	Elf_Phdr *phdr = phc->phdr;
1085 
1086 	phc->offset = round_page(phc->offset);
1087 
1088 	phdr->p_type = PT_LOAD;
1089 	phdr->p_offset = phc->offset;
1090 	phdr->p_vaddr = entry->start;
1091 	phdr->p_paddr = 0;
1092 	phdr->p_filesz = phdr->p_memsz = entry->end - entry->start;
1093 	phdr->p_align = PAGE_SIZE;
1094 	phdr->p_flags = 0;
1095 	if (entry->protection & VM_PROT_READ)
1096 		phdr->p_flags |= PF_R;
1097 	if (entry->protection & VM_PROT_WRITE)
1098 		phdr->p_flags |= PF_W;
1099 	if (entry->protection & VM_PROT_EXECUTE)
1100 		phdr->p_flags |= PF_X;
1101 
1102 	phc->offset += phdr->p_filesz;
1103 	phc->phdr++;
1104 }
1105 
1106 /*
1107  * A callback for each_writable_segment() to gather information about
1108  * the number of segments and their total size.
1109  */
1110 static void
1111 cb_size_segment(entry, closure)
1112 	vm_map_entry_t entry;
1113 	void *closure;
1114 {
1115 	struct sseg_closure *ssc = (struct sseg_closure *)closure;
1116 
1117 	ssc->count++;
1118 	ssc->size += entry->end - entry->start;
1119 }
1120 
1121 /*
1122  * For each writable segment in the process's memory map, call the given
1123  * function with a pointer to the map entry and some arbitrary
1124  * caller-supplied data.
1125  */
1126 static void
1127 each_writable_segment(td, func, closure)
1128 	struct thread *td;
1129 	segment_callback func;
1130 	void *closure;
1131 {
1132 	struct proc *p = td->td_proc;
1133 	vm_map_t map = &p->p_vmspace->vm_map;
1134 	vm_map_entry_t entry;
1135 	vm_object_t backing_object, object;
1136 	boolean_t ignore_entry;
1137 
1138 	vm_map_lock_read(map);
1139 	for (entry = map->header.next; entry != &map->header;
1140 	    entry = entry->next) {
1141 		/*
1142 		 * Don't dump inaccessible mappings, deal with legacy
1143 		 * coredump mode.
1144 		 *
1145 		 * Note that read-only segments related to the elf binary
1146 		 * are marked MAP_ENTRY_NOCOREDUMP now so we no longer
1147 		 * need to arbitrarily ignore such segments.
1148 		 */
1149 		if (elf_legacy_coredump) {
1150 			if ((entry->protection & VM_PROT_RW) != VM_PROT_RW)
1151 				continue;
1152 		} else {
1153 			if ((entry->protection & VM_PROT_ALL) == 0)
1154 				continue;
1155 		}
1156 
1157 		/*
1158 		 * Dont include memory segment in the coredump if
1159 		 * MAP_NOCORE is set in mmap(2) or MADV_NOCORE in
1160 		 * madvise(2).  Do not dump submaps (i.e. parts of the
1161 		 * kernel map).
1162 		 */
1163 		if (entry->eflags & (MAP_ENTRY_NOCOREDUMP|MAP_ENTRY_IS_SUB_MAP))
1164 			continue;
1165 
1166 		if ((object = entry->object.vm_object) == NULL)
1167 			continue;
1168 
1169 		/* Ignore memory-mapped devices and such things. */
1170 		VM_OBJECT_LOCK(object);
1171 		while ((backing_object = object->backing_object) != NULL) {
1172 			VM_OBJECT_LOCK(backing_object);
1173 			VM_OBJECT_UNLOCK(object);
1174 			object = backing_object;
1175 		}
1176 		ignore_entry = object->type != OBJT_DEFAULT &&
1177 		    object->type != OBJT_SWAP && object->type != OBJT_VNODE;
1178 		VM_OBJECT_UNLOCK(object);
1179 		if (ignore_entry)
1180 			continue;
1181 
1182 		(*func)(entry, closure);
1183 	}
1184 	vm_map_unlock_read(map);
1185 }
1186 
1187 /*
1188  * Write the core file header to the file, including padding up to
1189  * the page boundary.
1190  */
1191 static int
1192 __elfN(corehdr)(td, vp, cred, numsegs, hdr, hdrsize)
1193 	struct thread *td;
1194 	struct vnode *vp;
1195 	struct ucred *cred;
1196 	int numsegs;
1197 	size_t hdrsize;
1198 	void *hdr;
1199 {
1200 	size_t off;
1201 
1202 	/* Fill in the header. */
1203 	bzero(hdr, hdrsize);
1204 	off = 0;
1205 	__elfN(puthdr)(td, hdr, &off, numsegs);
1206 
1207 	/* Write it to the core file. */
1208 	return (vn_rdwr_inchunks(UIO_WRITE, vp, hdr, hdrsize, (off_t)0,
1209 	    UIO_SYSSPACE, IO_UNIT | IO_DIRECT, cred, NOCRED, NULL,
1210 	    td));
1211 }
1212 
1213 #if defined(COMPAT_IA32) && __ELF_WORD_SIZE == 32
1214 typedef struct prstatus32 elf_prstatus_t;
1215 typedef struct prpsinfo32 elf_prpsinfo_t;
1216 typedef struct fpreg32 elf_prfpregset_t;
1217 typedef struct fpreg32 elf_fpregset_t;
1218 typedef struct reg32 elf_gregset_t;
1219 #else
1220 typedef prstatus_t elf_prstatus_t;
1221 typedef prpsinfo_t elf_prpsinfo_t;
1222 typedef prfpregset_t elf_prfpregset_t;
1223 typedef prfpregset_t elf_fpregset_t;
1224 typedef gregset_t elf_gregset_t;
1225 #endif
1226 
1227 static void
1228 __elfN(puthdr)(struct thread *td, void *dst, size_t *off, int numsegs)
1229 {
1230 	struct {
1231 		elf_prstatus_t status;
1232 		elf_prfpregset_t fpregset;
1233 		elf_prpsinfo_t psinfo;
1234 	} *tempdata;
1235 	elf_prstatus_t *status;
1236 	elf_prfpregset_t *fpregset;
1237 	elf_prpsinfo_t *psinfo;
1238 	struct proc *p;
1239 	struct thread *thr;
1240 	size_t ehoff, noteoff, notesz, phoff;
1241 
1242 	p = td->td_proc;
1243 
1244 	ehoff = *off;
1245 	*off += sizeof(Elf_Ehdr);
1246 
1247 	phoff = *off;
1248 	*off += (numsegs + 1) * sizeof(Elf_Phdr);
1249 
1250 	noteoff = *off;
1251 	/*
1252 	 * Don't allocate space for the notes if we're just calculating
1253 	 * the size of the header. We also don't collect the data.
1254 	 */
1255 	if (dst != NULL) {
1256 		tempdata = malloc(sizeof(*tempdata), M_TEMP, M_ZERO|M_WAITOK);
1257 		status = &tempdata->status;
1258 		fpregset = &tempdata->fpregset;
1259 		psinfo = &tempdata->psinfo;
1260 	} else {
1261 		tempdata = NULL;
1262 		status = NULL;
1263 		fpregset = NULL;
1264 		psinfo = NULL;
1265 	}
1266 
1267 	if (dst != NULL) {
1268 		psinfo->pr_version = PRPSINFO_VERSION;
1269 		psinfo->pr_psinfosz = sizeof(elf_prpsinfo_t);
1270 		strlcpy(psinfo->pr_fname, p->p_comm, sizeof(psinfo->pr_fname));
1271 		/*
1272 		 * XXX - We don't fill in the command line arguments properly
1273 		 * yet.
1274 		 */
1275 		strlcpy(psinfo->pr_psargs, p->p_comm,
1276 		    sizeof(psinfo->pr_psargs));
1277 	}
1278 	__elfN(putnote)(dst, off, "FreeBSD", NT_PRPSINFO, psinfo,
1279 	    sizeof *psinfo);
1280 
1281 	/*
1282 	 * To have the debugger select the right thread (LWP) as the initial
1283 	 * thread, we dump the state of the thread passed to us in td first.
1284 	 * This is the thread that causes the core dump and thus likely to
1285 	 * be the right thread one wants to have selected in the debugger.
1286 	 */
1287 	thr = td;
1288 	while (thr != NULL) {
1289 		if (dst != NULL) {
1290 			status->pr_version = PRSTATUS_VERSION;
1291 			status->pr_statussz = sizeof(elf_prstatus_t);
1292 			status->pr_gregsetsz = sizeof(elf_gregset_t);
1293 			status->pr_fpregsetsz = sizeof(elf_fpregset_t);
1294 			status->pr_osreldate = osreldate;
1295 			status->pr_cursig = p->p_sig;
1296 			status->pr_pid = thr->td_tid;
1297 #if defined(COMPAT_IA32) && __ELF_WORD_SIZE == 32
1298 			fill_regs32(thr, &status->pr_reg);
1299 			fill_fpregs32(thr, fpregset);
1300 #else
1301 			fill_regs(thr, &status->pr_reg);
1302 			fill_fpregs(thr, fpregset);
1303 #endif
1304 		}
1305 		__elfN(putnote)(dst, off, "FreeBSD", NT_PRSTATUS, status,
1306 		    sizeof *status);
1307 		__elfN(putnote)(dst, off, "FreeBSD", NT_FPREGSET, fpregset,
1308 		    sizeof *fpregset);
1309 		/*
1310 		 * Allow for MD specific notes, as well as any MD
1311 		 * specific preparations for writing MI notes.
1312 		 */
1313 		__elfN(dump_thread)(thr, dst, off);
1314 
1315 		thr = (thr == td) ? TAILQ_FIRST(&p->p_threads) :
1316 		    TAILQ_NEXT(thr, td_plist);
1317 		if (thr == td)
1318 			thr = TAILQ_NEXT(thr, td_plist);
1319 	}
1320 
1321 	notesz = *off - noteoff;
1322 
1323 	if (dst != NULL)
1324 		free(tempdata, M_TEMP);
1325 
1326 	/* Align up to a page boundary for the program segments. */
1327 	*off = round_page(*off);
1328 
1329 	if (dst != NULL) {
1330 		Elf_Ehdr *ehdr;
1331 		Elf_Phdr *phdr;
1332 		struct phdr_closure phc;
1333 
1334 		/*
1335 		 * Fill in the ELF header.
1336 		 */
1337 		ehdr = (Elf_Ehdr *)((char *)dst + ehoff);
1338 		ehdr->e_ident[EI_MAG0] = ELFMAG0;
1339 		ehdr->e_ident[EI_MAG1] = ELFMAG1;
1340 		ehdr->e_ident[EI_MAG2] = ELFMAG2;
1341 		ehdr->e_ident[EI_MAG3] = ELFMAG3;
1342 		ehdr->e_ident[EI_CLASS] = ELF_CLASS;
1343 		ehdr->e_ident[EI_DATA] = ELF_DATA;
1344 		ehdr->e_ident[EI_VERSION] = EV_CURRENT;
1345 		ehdr->e_ident[EI_OSABI] = ELFOSABI_FREEBSD;
1346 		ehdr->e_ident[EI_ABIVERSION] = 0;
1347 		ehdr->e_ident[EI_PAD] = 0;
1348 		ehdr->e_type = ET_CORE;
1349 #if defined(COMPAT_IA32) && __ELF_WORD_SIZE == 32
1350 		ehdr->e_machine = EM_386;
1351 #else
1352 		ehdr->e_machine = ELF_ARCH;
1353 #endif
1354 		ehdr->e_version = EV_CURRENT;
1355 		ehdr->e_entry = 0;
1356 		ehdr->e_phoff = phoff;
1357 		ehdr->e_flags = 0;
1358 		ehdr->e_ehsize = sizeof(Elf_Ehdr);
1359 		ehdr->e_phentsize = sizeof(Elf_Phdr);
1360 		ehdr->e_phnum = numsegs + 1;
1361 		ehdr->e_shentsize = sizeof(Elf_Shdr);
1362 		ehdr->e_shnum = 0;
1363 		ehdr->e_shstrndx = SHN_UNDEF;
1364 
1365 		/*
1366 		 * Fill in the program header entries.
1367 		 */
1368 		phdr = (Elf_Phdr *)((char *)dst + phoff);
1369 
1370 		/* The note segement. */
1371 		phdr->p_type = PT_NOTE;
1372 		phdr->p_offset = noteoff;
1373 		phdr->p_vaddr = 0;
1374 		phdr->p_paddr = 0;
1375 		phdr->p_filesz = notesz;
1376 		phdr->p_memsz = 0;
1377 		phdr->p_flags = 0;
1378 		phdr->p_align = 0;
1379 		phdr++;
1380 
1381 		/* All the writable segments from the program. */
1382 		phc.phdr = phdr;
1383 		phc.offset = *off;
1384 		each_writable_segment(td, cb_put_phdr, &phc);
1385 	}
1386 }
1387 
1388 static void
1389 __elfN(putnote)(void *dst, size_t *off, const char *name, int type,
1390     const void *desc, size_t descsz)
1391 {
1392 	Elf_Note note;
1393 
1394 	note.n_namesz = strlen(name) + 1;
1395 	note.n_descsz = descsz;
1396 	note.n_type = type;
1397 	if (dst != NULL)
1398 		bcopy(&note, (char *)dst + *off, sizeof note);
1399 	*off += sizeof note;
1400 	if (dst != NULL)
1401 		bcopy(name, (char *)dst + *off, note.n_namesz);
1402 	*off += roundup2(note.n_namesz, sizeof(Elf_Size));
1403 	if (dst != NULL)
1404 		bcopy(desc, (char *)dst + *off, note.n_descsz);
1405 	*off += roundup2(note.n_descsz, sizeof(Elf_Size));
1406 }
1407 
1408 /*
1409  * Try to find the appropriate ABI-note section for checknote,
1410  * fetch the osreldate for binary from the ELF OSABI-note. Only the
1411  * first page of the image is searched, the same as for headers.
1412  */
1413 static boolean_t
1414 __elfN(check_note)(struct image_params *imgp, Elf_Brandnote *checknote,
1415     int32_t *osrel)
1416 {
1417 	const Elf_Note *note, *note0, *note_end;
1418 	const Elf_Phdr *phdr, *pnote;
1419 	const Elf_Ehdr *hdr;
1420 	const char *note_name;
1421 	int i;
1422 
1423 	pnote = NULL;
1424 	hdr = (const Elf_Ehdr *)imgp->image_header;
1425 	phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff);
1426 
1427 	for (i = 0; i < hdr->e_phnum; i++) {
1428 		if (phdr[i].p_type == PT_NOTE) {
1429 			pnote = &phdr[i];
1430 			break;
1431 		}
1432 	}
1433 
1434 	if (pnote == NULL || pnote->p_offset >= PAGE_SIZE ||
1435 	    pnote->p_offset + pnote->p_filesz >= PAGE_SIZE)
1436 		return (FALSE);
1437 
1438 	note = note0 = (const Elf_Note *)(imgp->image_header + pnote->p_offset);
1439 	note_end = (const Elf_Note *)(imgp->image_header +
1440 	    pnote->p_offset + pnote->p_filesz);
1441 	for (i = 0; i < 100 && note >= note0 && note < note_end; i++) {
1442 		if (!aligned(note, Elf32_Addr))
1443 			return (FALSE);
1444 		if (note->n_namesz != checknote->hdr.n_namesz ||
1445 		    note->n_descsz != checknote->hdr.n_descsz ||
1446 		    note->n_type != checknote->hdr.n_type)
1447 			goto nextnote;
1448 		note_name = (const char *)(note + 1);
1449 		if (strncmp(checknote->vendor, note_name,
1450 		    checknote->hdr.n_namesz) != 0)
1451 			goto nextnote;
1452 
1453 		/*
1454 		 * Fetch the osreldate for binary
1455 		 * from the ELF OSABI-note if necessary.
1456 		 */
1457 		if ((checknote->flags & BN_TRANSLATE_OSREL) != 0 &&
1458 		    checknote->trans_osrel != NULL)
1459 			return (checknote->trans_osrel(note, osrel));
1460 		return (TRUE);
1461 
1462 nextnote:
1463 		note = (const Elf_Note *)((const char *)(note + 1) +
1464 		    roundup2(note->n_namesz, sizeof(Elf32_Addr)) +
1465 		    roundup2(note->n_descsz, sizeof(Elf32_Addr)));
1466 	}
1467 
1468 	return (FALSE);
1469 }
1470 
1471 /*
1472  * Tell kern_execve.c about it, with a little help from the linker.
1473  */
1474 static struct execsw __elfN(execsw) = {
1475 	__CONCAT(exec_, __elfN(imgact)),
1476 	__XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE))
1477 };
1478 EXEC_SET(__CONCAT(elf, __ELF_WORD_SIZE), __elfN(execsw));
1479