xref: /titanic_44/usr/src/uts/common/exec/elf/elf.c (revision a0e56b0eb1fdc159ff8348ca0e77d884bb7d126b)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*	Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T	*/
28 /*	  All Rights Reserved  	*/
29 
30 
31 #pragma ident	"%Z%%M%	%I%	%E% SMI"
32 
33 #include <sys/types.h>
34 #include <sys/param.h>
35 #include <sys/thread.h>
36 #include <sys/sysmacros.h>
37 #include <sys/signal.h>
38 #include <sys/cred.h>
39 #include <sys/user.h>
40 #include <sys/errno.h>
41 #include <sys/vnode.h>
42 #include <sys/mman.h>
43 #include <sys/kmem.h>
44 #include <sys/proc.h>
45 #include <sys/pathname.h>
46 #include <sys/cmn_err.h>
47 #include <sys/systm.h>
48 #include <sys/elf.h>
49 #include <sys/vmsystm.h>
50 #include <sys/debug.h>
51 #include <sys/auxv.h>
52 #include <sys/exec.h>
53 #include <sys/prsystm.h>
54 #include <vm/as.h>
55 #include <vm/rm.h>
56 #include <vm/seg.h>
57 #include <vm/seg_vn.h>
58 #include <sys/modctl.h>
59 #include <sys/systeminfo.h>
60 #include <sys/vmparam.h>
61 #include <sys/machelf.h>
62 #include <sys/shm_impl.h>
63 #include <sys/archsystm.h>
64 #include <sys/fasttrap.h>
65 #include <sys/brand.h>
66 #include "elf_impl.h"
67 
68 #include <sys/sdt.h>
69 
70 extern int at_flags;
71 
72 #define	ORIGIN_STR	"ORIGIN"
73 #define	ORIGIN_STR_SIZE	6
74 
75 static int getelfhead(vnode_t *, cred_t *, Ehdr *, int *, int *, int *);
76 static int getelfphdr(vnode_t *, cred_t *, const Ehdr *, int, caddr_t *,
77     ssize_t *);
78 static int getelfshdr(vnode_t *, cred_t *, const Ehdr *, int, int, caddr_t *,
79     ssize_t *, caddr_t *, ssize_t *);
80 static size_t elfsize(Ehdr *, int, caddr_t, uintptr_t *);
81 static int mapelfexec(vnode_t *, Ehdr *, int, caddr_t,
82     Phdr **, Phdr **, Phdr **, Phdr **, Phdr *,
83     caddr_t *, caddr_t *, intptr_t *, intptr_t *, size_t, long *, size_t *);
84 
85 typedef enum {
86 	STR_CTF,
87 	STR_SYMTAB,
88 	STR_DYNSYM,
89 	STR_STRTAB,
90 	STR_DYNSTR,
91 	STR_SHSTRTAB,
92 	STR_NUM
93 } shstrtype_t;
94 
95 static const char *shstrtab_data[] = {
96 	".SUNW_ctf",
97 	".symtab",
98 	".dynsym",
99 	".strtab",
100 	".dynstr",
101 	".shstrtab"
102 };
103 
104 typedef struct shstrtab {
105 	int	sst_ndx[STR_NUM];
106 	int	sst_cur;
107 } shstrtab_t;
108 
109 static void
110 shstrtab_init(shstrtab_t *s)
111 {
112 	bzero(&s->sst_ndx, sizeof (s->sst_ndx));
113 	s->sst_cur = 1;
114 }
115 
116 static int
117 shstrtab_ndx(shstrtab_t *s, shstrtype_t type)
118 {
119 	int ret;
120 
121 	if ((ret = s->sst_ndx[type]) != 0)
122 		return (ret);
123 
124 	ret = s->sst_ndx[type] = s->sst_cur;
125 	s->sst_cur += strlen(shstrtab_data[type]) + 1;
126 
127 	return (ret);
128 }
129 
130 static size_t
131 shstrtab_size(const shstrtab_t *s)
132 {
133 	return (s->sst_cur);
134 }
135 
136 static void
137 shstrtab_dump(const shstrtab_t *s, char *buf)
138 {
139 	int i, ndx;
140 
141 	*buf = '\0';
142 	for (i = 0; i < STR_NUM; i++) {
143 		if ((ndx = s->sst_ndx[i]) != 0)
144 			(void) strcpy(buf + ndx, shstrtab_data[i]);
145 	}
146 }
147 
148 static int
149 dtrace_safe_phdr(Phdr *phdrp, struct uarg *args, uintptr_t base)
150 {
151 	ASSERT(phdrp->p_type == PT_SUNWDTRACE);
152 
153 	/*
154 	 * See the comment in fasttrap.h for information on how to safely
155 	 * update this program header.
156 	 */
157 	if (phdrp->p_memsz < PT_SUNWDTRACE_SIZE ||
158 	    (phdrp->p_flags & (PF_R | PF_W | PF_X)) != (PF_R | PF_W | PF_X))
159 		return (-1);
160 
161 	args->thrptr = phdrp->p_vaddr + base;
162 
163 	return (0);
164 }
165 
166 /*
167  * Map in the executable pointed to by vp. Returns 0 on success.
168  */
169 int
170 mapexec_brand(vnode_t *vp, uarg_t *args, Ehdr *ehdr, Elf32_Addr *uphdr_vaddr,
171     intptr_t *voffset, caddr_t exec_file, int *interp, caddr_t *bssbase,
172     caddr_t *brkbase, size_t *brksize)
173 {
174 	size_t		len;
175 	struct vattr	vat;
176 	caddr_t		phdrbase = NULL;
177 	ssize_t		phdrsize;
178 	int		nshdrs, shstrndx, nphdrs;
179 	int		error = 0;
180 	Phdr		*uphdr = NULL;
181 	Phdr		*junk = NULL;
182 	Phdr		*dynphdr = NULL;
183 	Phdr		*dtrphdr = NULL;
184 	uintptr_t	lddata;
185 	long		execsz;
186 	intptr_t	minaddr;
187 
188 	if (error = execpermissions(vp, &vat, args)) {
189 		uprintf("%s: Cannot execute %s\n", exec_file, args->pathname);
190 		return (error);
191 	}
192 
193 	if ((error = getelfhead(vp, CRED(), ehdr, &nshdrs, &shstrndx,
194 	    &nphdrs)) != 0 ||
195 	    (error = getelfphdr(vp, CRED(), ehdr, nphdrs, &phdrbase,
196 	    &phdrsize)) != 0) {
197 		uprintf("%s: Cannot read %s\n", exec_file, args->pathname);
198 		return (error);
199 	}
200 
201 	if ((len = elfsize(ehdr, nphdrs, phdrbase, &lddata)) == 0) {
202 		uprintf("%s: Nothing to load in %s", exec_file, args->pathname);
203 		kmem_free(phdrbase, phdrsize);
204 		return (ENOEXEC);
205 	}
206 
207 	if (error = mapelfexec(vp, ehdr, nphdrs, phdrbase, &uphdr, &dynphdr,
208 	    &junk, &dtrphdr, NULL, bssbase, brkbase, voffset, &minaddr,
209 	    len, &execsz, brksize)) {
210 		uprintf("%s: Cannot map %s\n", exec_file, args->pathname);
211 		kmem_free(phdrbase, phdrsize);
212 		return (error);
213 	}
214 
215 	/*
216 	 * Inform our caller if the executable needs an interpreter.
217 	 */
218 	*interp = (dynphdr == NULL) ? 0 : 1;
219 
220 	/*
221 	 * If this is a statically linked executable, voffset should indicate
222 	 * the address of the executable itself (it normally holds the address
223 	 * of the interpreter).
224 	 */
225 	if (ehdr->e_type == ET_EXEC && *interp == 0)
226 		*voffset = minaddr;
227 
228 	if (uphdr != NULL) {
229 		*uphdr_vaddr = uphdr->p_vaddr;
230 	} else {
231 		*uphdr_vaddr = (Elf32_Addr)-1;
232 	}
233 
234 	kmem_free(phdrbase, phdrsize);
235 	return (error);
236 }
237 
238 /*ARGSUSED*/
239 int
240 elfexec(vnode_t *vp, execa_t *uap, uarg_t *args, intpdata_t *idatap,
241     int level, long *execsz, int setid, caddr_t exec_file, cred_t *cred,
242     int brand_action)
243 {
244 	caddr_t		phdrbase = NULL;
245 	caddr_t 	bssbase = 0;
246 	caddr_t 	brkbase = 0;
247 	size_t		brksize = 0;
248 	ssize_t		dlnsize;
249 	aux_entry_t	*aux;
250 	int		error;
251 	ssize_t		resid;
252 	int		fd = -1;
253 	intptr_t	voffset;
254 	Phdr		*dyphdr = NULL;
255 	Phdr		*stphdr = NULL;
256 	Phdr		*uphdr = NULL;
257 	Phdr		*junk = NULL;
258 	size_t		len;
259 	ssize_t		phdrsize;
260 	int		postfixsize = 0;
261 	int		i, hsize;
262 	Phdr		*phdrp;
263 	Phdr		*dataphdrp = NULL;
264 	Phdr		*dtrphdr;
265 	int		hasu = 0;
266 	int		hasauxv = 0;
267 	int		hasdy = 0;
268 	int		branded = 0;
269 
270 	struct proc *p = ttoproc(curthread);
271 	struct user *up = PTOU(p);
272 	struct bigwad {
273 		Ehdr	ehdr;
274 		aux_entry_t	elfargs[__KERN_NAUXV_IMPL];
275 		char		dl_name[MAXPATHLEN];
276 		char		pathbuf[MAXPATHLEN];
277 		struct vattr	vattr;
278 		struct execenv	exenv;
279 	} *bigwad;	/* kmem_alloc this behemoth so we don't blow stack */
280 	Ehdr		*ehdrp;
281 	int		nshdrs, shstrndx, nphdrs;
282 	char		*dlnp;
283 	char		*pathbufp;
284 	rlim64_t	limit;
285 	rlim64_t	roundlimit;
286 
287 	ASSERT(p->p_model == DATAMODEL_ILP32 || p->p_model == DATAMODEL_LP64);
288 
289 	if ((level < 2) &&
290 	    (brand_action != EBA_NATIVE) && (PROC_IS_BRANDED(p))) {
291 		return (BROP(p)->b_elfexec(vp, uap, args,
292 		    idatap, level + 1, execsz, setid, exec_file, cred,
293 		    brand_action));
294 	}
295 
296 	bigwad = kmem_alloc(sizeof (struct bigwad), KM_SLEEP);
297 	ehdrp = &bigwad->ehdr;
298 	dlnp = bigwad->dl_name;
299 	pathbufp = bigwad->pathbuf;
300 
301 	/*
302 	 * Obtain ELF and program header information.
303 	 */
304 	if ((error = getelfhead(vp, CRED(), ehdrp, &nshdrs, &shstrndx,
305 	    &nphdrs)) != 0 ||
306 	    (error = getelfphdr(vp, CRED(), ehdrp, nphdrs, &phdrbase,
307 	    &phdrsize)) != 0)
308 		goto out;
309 
310 	/*
311 	 * Prevent executing an ELF file that has no entry point.
312 	 */
313 	if (ehdrp->e_entry == 0) {
314 		uprintf("%s: Bad entry point\n", exec_file);
315 		goto bad;
316 	}
317 
318 	/*
319 	 * Put data model that we're exec-ing to into the args passed to
320 	 * exec_args(), so it will know what it is copying to on new stack.
321 	 * Now that we know whether we are exec-ing a 32-bit or 64-bit
322 	 * executable, we can set execsz with the appropriate NCARGS.
323 	 */
324 #ifdef	_LP64
325 	if (ehdrp->e_ident[EI_CLASS] == ELFCLASS32) {
326 		args->to_model = DATAMODEL_ILP32;
327 		*execsz = btopr(SINCR) + btopr(SSIZE) + btopr(NCARGS32-1);
328 	} else {
329 		args->to_model = DATAMODEL_LP64;
330 		args->stk_prot &= ~PROT_EXEC;
331 #if defined(__i386) || defined(__amd64)
332 		args->dat_prot &= ~PROT_EXEC;
333 #endif
334 		*execsz = btopr(SINCR) + btopr(SSIZE) + btopr(NCARGS64-1);
335 	}
336 #else	/* _LP64 */
337 	args->to_model = DATAMODEL_ILP32;
338 	*execsz = btopr(SINCR) + btopr(SSIZE) + btopr(NCARGS-1);
339 #endif	/* _LP64 */
340 
341 	/*
342 	 * Determine aux size now so that stack can be built
343 	 * in one shot (except actual copyout of aux image),
344 	 * determine any non-default stack protections,
345 	 * and still have this code be machine independent.
346 	 */
347 	hsize = ehdrp->e_phentsize;
348 	phdrp = (Phdr *)phdrbase;
349 	for (i = nphdrs; i > 0; i--) {
350 		switch (phdrp->p_type) {
351 		case PT_INTERP:
352 			hasauxv = hasdy = 1;
353 			break;
354 		case PT_PHDR:
355 			hasu = 1;
356 			break;
357 		case PT_SUNWSTACK:
358 			args->stk_prot = PROT_USER;
359 			if (phdrp->p_flags & PF_R)
360 				args->stk_prot |= PROT_READ;
361 			if (phdrp->p_flags & PF_W)
362 				args->stk_prot |= PROT_WRITE;
363 			if (phdrp->p_flags & PF_X)
364 				args->stk_prot |= PROT_EXEC;
365 			break;
366 		case PT_LOAD:
367 			dataphdrp = phdrp;
368 			break;
369 		}
370 		phdrp = (Phdr *)((caddr_t)phdrp + hsize);
371 	}
372 
373 	if (ehdrp->e_type != ET_EXEC) {
374 		dataphdrp = NULL;
375 		hasauxv = 1;
376 	}
377 
378 	/* Copy BSS permissions to args->dat_prot */
379 	if (dataphdrp != NULL) {
380 		args->dat_prot = PROT_USER;
381 		if (dataphdrp->p_flags & PF_R)
382 			args->dat_prot |= PROT_READ;
383 		if (dataphdrp->p_flags & PF_W)
384 			args->dat_prot |= PROT_WRITE;
385 		if (dataphdrp->p_flags & PF_X)
386 			args->dat_prot |= PROT_EXEC;
387 	}
388 
389 	/*
390 	 * If a auxvector will be required - reserve the space for
391 	 * it now.  This may be increased by exec_args if there are
392 	 * ISA-specific types (included in __KERN_NAUXV_IMPL).
393 	 */
394 	if (hasauxv) {
395 		/*
396 		 * If a AUX vector is being built - the base AUX
397 		 * entries are:
398 		 *
399 		 *	AT_BASE
400 		 *	AT_FLAGS
401 		 *	AT_PAGESZ
402 		 *	AT_SUN_LDSECURE
403 		 *	AT_SUN_HWCAP
404 		 *	AT_SUN_PLATFORM
405 		 *	AT_SUN_EXECNAME
406 		 *	AT_NULL
407 		 *
408 		 * total == 8
409 		 */
410 		if (hasdy && hasu) {
411 			/*
412 			 * Has PT_INTERP & PT_PHDR - the auxvectors that
413 			 * will be built are:
414 			 *
415 			 *	AT_PHDR
416 			 *	AT_PHENT
417 			 *	AT_PHNUM
418 			 *	AT_ENTRY
419 			 *	AT_LDDATA
420 			 *
421 			 * total = 5
422 			 */
423 			args->auxsize = (8 + 5) * sizeof (aux_entry_t);
424 		} else if (hasdy) {
425 			/*
426 			 * Has PT_INTERP but no PT_PHDR
427 			 *
428 			 *	AT_EXECFD
429 			 *	AT_LDDATA
430 			 *
431 			 * total = 2
432 			 */
433 			args->auxsize = (8 + 2) * sizeof (aux_entry_t);
434 		} else {
435 			args->auxsize = 8 * sizeof (aux_entry_t);
436 		}
437 	} else
438 		args->auxsize = 0;
439 
440 	/*
441 	 * If this binary is using an emulator, we need to add an
442 	 * AT_SUN_EMULATOR aux entry.
443 	 */
444 	if (args->emulator != NULL)
445 		args->auxsize += sizeof (aux_entry_t);
446 
447 	if ((brand_action != EBA_NATIVE) && (PROC_IS_BRANDED(p))) {
448 		branded = 1;
449 		/*
450 		 * We will be adding 2 entries to the aux vector.  One for
451 		 * the branded binary's phdr and one for the brandname.
452 		 */
453 		args->auxsize += 2 * sizeof (aux_entry_t);
454 	}
455 
456 	aux = bigwad->elfargs;
457 	/*
458 	 * Move args to the user's stack.
459 	 */
460 	if ((error = exec_args(uap, args, idatap, (void **)&aux)) != 0) {
461 		if (error == -1) {
462 			error = ENOEXEC;
463 			goto bad;
464 		}
465 		goto out;
466 	}
467 	/* we're single threaded after this point */
468 
469 	/*
470 	 * If this is an ET_DYN executable (shared object),
471 	 * determine its memory size so that mapelfexec() can load it.
472 	 */
473 	if (ehdrp->e_type == ET_DYN)
474 		len = elfsize(ehdrp, nphdrs, phdrbase, NULL);
475 	else
476 		len = 0;
477 
478 	dtrphdr = NULL;
479 
480 	if ((error = mapelfexec(vp, ehdrp, nphdrs, phdrbase, &uphdr, &dyphdr,
481 	    &stphdr, &dtrphdr, dataphdrp, &bssbase, &brkbase, &voffset, NULL,
482 	    len, execsz, &brksize)) != 0)
483 		goto bad;
484 
485 	if (uphdr != NULL && dyphdr == NULL)
486 		goto bad;
487 
488 	if (dtrphdr != NULL && dtrace_safe_phdr(dtrphdr, args, voffset) != 0) {
489 		uprintf("%s: Bad DTrace phdr in %s\n", exec_file, exec_file);
490 		goto bad;
491 	}
492 
493 	if (dyphdr != NULL) {
494 		size_t		len;
495 		uintptr_t	lddata;
496 		char		*p;
497 		struct vnode	*nvp;
498 
499 		dlnsize = dyphdr->p_filesz;
500 
501 		if (dlnsize > MAXPATHLEN || dlnsize <= 0)
502 			goto bad;
503 
504 		/*
505 		 * Read in "interpreter" pathname.
506 		 */
507 		if ((error = vn_rdwr(UIO_READ, vp, dlnp, dyphdr->p_filesz,
508 		    (offset_t)dyphdr->p_offset, UIO_SYSSPACE, 0, (rlim64_t)0,
509 		    CRED(), &resid)) != 0) {
510 			uprintf("%s: Cannot obtain interpreter pathname\n",
511 			    exec_file);
512 			goto bad;
513 		}
514 
515 		if (resid != 0 || dlnp[dlnsize - 1] != '\0')
516 			goto bad;
517 
518 		/*
519 		 * Search for '$ORIGIN' token in interpreter path.
520 		 * If found, expand it.
521 		 */
522 		for (p = dlnp; p = strchr(p, '$'); ) {
523 			uint_t	len, curlen;
524 			char	*_ptr;
525 
526 			if (strncmp(++p, ORIGIN_STR, ORIGIN_STR_SIZE))
527 				continue;
528 
529 			curlen = 0;
530 			len = p - dlnp - 1;
531 			if (len) {
532 				bcopy(dlnp, pathbufp, len);
533 				curlen += len;
534 			}
535 			if (_ptr = strrchr(args->pathname, '/')) {
536 				len = _ptr - args->pathname;
537 				if ((curlen + len) > MAXPATHLEN)
538 					break;
539 
540 				bcopy(args->pathname, &pathbufp[curlen], len);
541 				curlen += len;
542 			} else {
543 				/*
544 				 * executable is a basename found in the
545 				 * current directory.  So - just substitue
546 				 * '.' for ORIGIN.
547 				 */
548 				pathbufp[curlen] = '.';
549 				curlen++;
550 			}
551 			p += ORIGIN_STR_SIZE;
552 			len = strlen(p);
553 
554 			if ((curlen + len) > MAXPATHLEN)
555 				break;
556 			bcopy(p, &pathbufp[curlen], len);
557 			curlen += len;
558 			pathbufp[curlen++] = '\0';
559 			bcopy(pathbufp, dlnp, curlen);
560 		}
561 
562 		/*
563 		 * /usr/lib/ld.so.1 is known to be a symlink to /lib/ld.so.1
564 		 * (and /usr/lib/64/ld.so.1 is a symlink to /lib/64/ld.so.1).
565 		 * Just in case /usr is not mounted, change it now.
566 		 */
567 		if (strcmp(dlnp, USR_LIB_RTLD) == 0)
568 			dlnp += 4;
569 		error = lookupname(dlnp, UIO_SYSSPACE, FOLLOW, NULLVPP, &nvp);
570 		if (error && dlnp != bigwad->dl_name) {
571 			/* new kernel, old user-level */
572 			error = lookupname(dlnp -= 4, UIO_SYSSPACE, FOLLOW,
573 				NULLVPP, &nvp);
574 		}
575 		if (error) {
576 			uprintf("%s: Cannot find %s\n", exec_file, dlnp);
577 			goto bad;
578 		}
579 
580 		/*
581 		 * Setup the "aux" vector.
582 		 */
583 		if (uphdr) {
584 			if (ehdrp->e_type == ET_DYN) {
585 				/* don't use the first page */
586 				bigwad->exenv.ex_brkbase = (caddr_t)PAGESIZE;
587 				bigwad->exenv.ex_bssbase = (caddr_t)PAGESIZE;
588 			} else {
589 				bigwad->exenv.ex_bssbase = bssbase;
590 				bigwad->exenv.ex_brkbase = brkbase;
591 			}
592 			bigwad->exenv.ex_brksize = brksize;
593 			bigwad->exenv.ex_magic = elfmagic;
594 			bigwad->exenv.ex_vp = vp;
595 			setexecenv(&bigwad->exenv);
596 
597 			ADDAUX(aux, AT_PHDR, uphdr->p_vaddr + voffset)
598 			ADDAUX(aux, AT_PHENT, ehdrp->e_phentsize)
599 			ADDAUX(aux, AT_PHNUM, nphdrs)
600 			ADDAUX(aux, AT_ENTRY, ehdrp->e_entry + voffset)
601 		} else {
602 			if ((error = execopen(&vp, &fd)) != 0) {
603 				VN_RELE(nvp);
604 				goto bad;
605 			}
606 
607 			ADDAUX(aux, AT_EXECFD, fd)
608 		}
609 
610 		if ((error = execpermissions(nvp, &bigwad->vattr, args)) != 0) {
611 			VN_RELE(nvp);
612 			uprintf("%s: Cannot execute %s\n", exec_file, dlnp);
613 			goto bad;
614 		}
615 
616 		/*
617 		 * Now obtain the ELF header along with the entire program
618 		 * header contained in "nvp".
619 		 */
620 		kmem_free(phdrbase, phdrsize);
621 		phdrbase = NULL;
622 		if ((error = getelfhead(nvp, CRED(), ehdrp, &nshdrs,
623 		    &shstrndx, &nphdrs)) != 0 ||
624 		    (error = getelfphdr(nvp, CRED(), ehdrp, nphdrs, &phdrbase,
625 		    &phdrsize)) != 0) {
626 			VN_RELE(nvp);
627 			uprintf("%s: Cannot read %s\n", exec_file, dlnp);
628 			goto bad;
629 		}
630 
631 		/*
632 		 * Determine memory size of the "interpreter's" loadable
633 		 * sections.  This size is then used to obtain the virtual
634 		 * address of a hole, in the user's address space, large
635 		 * enough to map the "interpreter".
636 		 */
637 		if ((len = elfsize(ehdrp, nphdrs, phdrbase, &lddata)) == 0) {
638 			VN_RELE(nvp);
639 			uprintf("%s: Nothing to load in %s\n", exec_file, dlnp);
640 			goto bad;
641 		}
642 
643 		dtrphdr = NULL;
644 
645 		error = mapelfexec(nvp, ehdrp, nphdrs, phdrbase, &junk, &junk,
646 		    &junk, &dtrphdr, NULL, NULL, NULL, &voffset, NULL, len,
647 		    execsz, NULL);
648 		if (error || junk != NULL) {
649 			VN_RELE(nvp);
650 			uprintf("%s: Cannot map %s\n", exec_file, dlnp);
651 			goto bad;
652 		}
653 
654 		/*
655 		 * We use the DTrace program header to initialize the
656 		 * architecture-specific user per-LWP location. The dtrace
657 		 * fasttrap provider requires ready access to per-LWP scratch
658 		 * space. We assume that there is only one such program header
659 		 * in the interpreter.
660 		 */
661 		if (dtrphdr != NULL &&
662 		    dtrace_safe_phdr(dtrphdr, args, voffset) != 0) {
663 			VN_RELE(nvp);
664 			uprintf("%s: Bad DTrace phdr in %s\n", exec_file, dlnp);
665 			goto bad;
666 		}
667 
668 		VN_RELE(nvp);
669 		ADDAUX(aux, AT_SUN_LDDATA, voffset + lddata)
670 	}
671 
672 	if (hasauxv) {
673 		int auxf = AF_SUN_HWCAPVERIFY;
674 		/*
675 		 * Note: AT_SUN_PLATFORM was filled in via exec_args()
676 		 */
677 		ADDAUX(aux, AT_BASE, voffset)
678 		ADDAUX(aux, AT_FLAGS, at_flags)
679 		ADDAUX(aux, AT_PAGESZ, PAGESIZE)
680 		/*
681 		 * Linker flags. (security)
682 		 * p_flag not yet set at this time.
683 		 * We rely on gexec() to provide us with the information.
684 		 * If the application is set-uid but this is not reflected
685 		 * in a mismatch between real/effective uids/gids, then
686 		 * don't treat this as a set-uid exec.  So we care about
687 		 * the EXECSETID_UGIDS flag but not the ...SETID flag.
688 		 */
689 		setid &= ~EXECSETID_SETID;
690 		ADDAUX(aux, AT_SUN_AUXFLAGS,
691 		    setid ? AF_SUN_SETUGID | auxf : auxf);
692 		/*
693 		 * Hardware capability flag word (performance hints)
694 		 * Used for choosing faster library routines.
695 		 * (Potentially different between 32-bit and 64-bit ABIs)
696 		 */
697 #if defined(_LP64)
698 		if (args->to_model == DATAMODEL_NATIVE)
699 			ADDAUX(aux, AT_SUN_HWCAP, auxv_hwcap)
700 		else
701 			ADDAUX(aux, AT_SUN_HWCAP, auxv_hwcap32)
702 #else
703 		ADDAUX(aux, AT_SUN_HWCAP, auxv_hwcap)
704 #endif
705 		if (branded) {
706 			/*
707 			 * Reserve space for the brand-private aux vector entry,
708 			 * and record the user addr of that space.
709 			 */
710 			args->brand_auxp = (auxv32_t *)((char *)args->stackend +
711 			    ((char *)&aux->a_type - (char *)bigwad->elfargs));
712 			ADDAUX(aux, AT_SUN_BRAND_PHDR, 0)
713 		}
714 
715 		ADDAUX(aux, AT_NULL, 0)
716 		postfixsize = (char *)aux - (char *)bigwad->elfargs;
717 		ASSERT(postfixsize == args->auxsize);
718 		ASSERT(postfixsize <= __KERN_NAUXV_IMPL * sizeof (aux_entry_t));
719 	}
720 
721 	/*
722 	 * For the 64-bit kernel, the limit is big enough that rounding it up
723 	 * to a page can overflow the 64-bit limit, so we check for btopr()
724 	 * overflowing here by comparing it with the unrounded limit in pages.
725 	 * If it hasn't overflowed, compare the exec size with the rounded up
726 	 * limit in pages.  Otherwise, just compare with the unrounded limit.
727 	 */
728 	limit = btop(p->p_vmem_ctl);
729 	roundlimit = btopr(p->p_vmem_ctl);
730 	if ((roundlimit > limit && *execsz > roundlimit) ||
731 	    (roundlimit < limit && *execsz > limit)) {
732 		mutex_enter(&p->p_lock);
733 		(void) rctl_action(rctlproc_legacy[RLIMIT_VMEM], p->p_rctls, p,
734 		    RCA_SAFE);
735 		mutex_exit(&p->p_lock);
736 		error = ENOMEM;
737 		goto bad;
738 	}
739 
740 	bzero(up->u_auxv, sizeof (up->u_auxv));
741 	if (postfixsize) {
742 		int num_auxv;
743 
744 		/*
745 		 * Copy the aux vector to the user stack.
746 		 */
747 		error = execpoststack(args, bigwad->elfargs, postfixsize);
748 		if (error)
749 			goto bad;
750 
751 		/*
752 		 * Copy auxv to the process's user structure for use by /proc.
753 		 * If this is a branded process, the brand's exec routine will
754 		 * copy it's private entries to the user structure later. It
755 		 * relies on the fact that the blank entries are at the end.
756 		 */
757 		num_auxv = postfixsize / sizeof (aux_entry_t);
758 		ASSERT(num_auxv <= sizeof (up->u_auxv) / sizeof (auxv_t));
759 		aux = bigwad->elfargs;
760 		for (i = 0; i < num_auxv; i++) {
761 			up->u_auxv[i].a_type = aux[i].a_type;
762 			up->u_auxv[i].a_un.a_val = (aux_val_t)aux[i].a_un.a_val;
763 		}
764 	}
765 
766 	/*
767 	 * Pass back the starting address so we can set the program counter.
768 	 */
769 	args->entry = (uintptr_t)(ehdrp->e_entry + voffset);
770 
771 	if (!uphdr) {
772 		if (ehdrp->e_type == ET_DYN) {
773 			/*
774 			 * If we are executing a shared library which doesn't
775 			 * have a interpreter (probably ld.so.1) then
776 			 * we don't set the brkbase now.  Instead we
777 			 * delay it's setting until the first call
778 			 * via grow.c::brk().  This permits ld.so.1 to
779 			 * initialize brkbase to the tail of the executable it
780 			 * loads (which is where it needs to be).
781 			 */
782 			bigwad->exenv.ex_brkbase = (caddr_t)0;
783 			bigwad->exenv.ex_bssbase = (caddr_t)0;
784 			bigwad->exenv.ex_brksize = 0;
785 		} else {
786 			bigwad->exenv.ex_brkbase = brkbase;
787 			bigwad->exenv.ex_bssbase = bssbase;
788 			bigwad->exenv.ex_brksize = brksize;
789 		}
790 		bigwad->exenv.ex_magic = elfmagic;
791 		bigwad->exenv.ex_vp = vp;
792 		setexecenv(&bigwad->exenv);
793 	}
794 
795 	ASSERT(error == 0);
796 	goto out;
797 
798 bad:
799 	if (fd != -1)		/* did we open the a.out yet */
800 		(void) execclose(fd);
801 
802 	psignal(p, SIGKILL);
803 
804 	if (error == 0)
805 		error = ENOEXEC;
806 out:
807 	if (phdrbase != NULL)
808 		kmem_free(phdrbase, phdrsize);
809 	kmem_free(bigwad, sizeof (struct bigwad));
810 	return (error);
811 }
812 
813 /*
814  * Compute the memory size requirement for the ELF file.
815  */
816 static size_t
817 elfsize(Ehdr *ehdrp, int nphdrs, caddr_t phdrbase, uintptr_t *lddata)
818 {
819 	size_t	len;
820 	Phdr	*phdrp = (Phdr *)phdrbase;
821 	int	hsize = ehdrp->e_phentsize;
822 	int	first = 1;
823 	int	dfirst = 1;	/* first data segment */
824 	uintptr_t loaddr = 0;
825 	uintptr_t hiaddr = 0;
826 	uintptr_t lo, hi;
827 	int	i;
828 
829 	for (i = nphdrs; i > 0; i--) {
830 		if (phdrp->p_type == PT_LOAD) {
831 			lo = phdrp->p_vaddr;
832 			hi = lo + phdrp->p_memsz;
833 			if (first) {
834 				loaddr = lo;
835 				hiaddr = hi;
836 				first = 0;
837 			} else {
838 				if (loaddr > lo)
839 					loaddr = lo;
840 				if (hiaddr < hi)
841 					hiaddr = hi;
842 			}
843 
844 			/*
845 			 * save the address of the first data segment
846 			 * of a object - used for the AT_SUNW_LDDATA
847 			 * aux entry.
848 			 */
849 			if ((lddata != NULL) && dfirst &&
850 			    (phdrp->p_flags & PF_W)) {
851 				*lddata = lo;
852 				dfirst = 0;
853 			}
854 		}
855 		phdrp = (Phdr *)((caddr_t)phdrp + hsize);
856 	}
857 
858 	len = hiaddr - (loaddr & PAGEMASK);
859 	len = roundup(len, PAGESIZE);
860 
861 	return (len);
862 }
863 
864 /*
865  * Read in the ELF header and program header table.
866  * SUSV3 requires:
867  *	ENOEXEC	File format is not recognized
868  *	EINVAL	Format recognized but execution not supported
869  */
870 static int
871 getelfhead(vnode_t *vp, cred_t *credp, Ehdr *ehdr, int *nshdrs, int *shstrndx,
872     int *nphdrs)
873 {
874 	int error;
875 	ssize_t resid;
876 
877 	/*
878 	 * We got here by the first two bytes in ident,
879 	 * now read the entire ELF header.
880 	 */
881 	if ((error = vn_rdwr(UIO_READ, vp, (caddr_t)ehdr,
882 	    sizeof (Ehdr), (offset_t)0, UIO_SYSSPACE, 0,
883 	    (rlim64_t)0, credp, &resid)) != 0)
884 		return (error);
885 
886 	/*
887 	 * Since a separate version is compiled for handling 32-bit and
888 	 * 64-bit ELF executables on a 64-bit kernel, the 64-bit version
889 	 * doesn't need to be able to deal with 32-bit ELF files.
890 	 */
891 	if (resid != 0 ||
892 	    ehdr->e_ident[EI_MAG2] != ELFMAG2 ||
893 	    ehdr->e_ident[EI_MAG3] != ELFMAG3)
894 		return (ENOEXEC);
895 
896 	if ((ehdr->e_type != ET_EXEC && ehdr->e_type != ET_DYN) ||
897 #if defined(_ILP32) || defined(_ELF32_COMPAT)
898 	    ehdr->e_ident[EI_CLASS] != ELFCLASS32 ||
899 #else
900 	    ehdr->e_ident[EI_CLASS] != ELFCLASS64 ||
901 #endif
902 	    !elfheadcheck(ehdr->e_ident[EI_DATA], ehdr->e_machine,
903 	    ehdr->e_flags))
904 		return (EINVAL);
905 
906 	*nshdrs = ehdr->e_shnum;
907 	*shstrndx = ehdr->e_shstrndx;
908 	*nphdrs = ehdr->e_phnum;
909 
910 	/*
911 	 * If e_shnum, e_shstrndx, or e_phnum is its sentinel value, we need
912 	 * to read in the section header at index zero to acces the true
913 	 * values for those fields.
914 	 */
915 	if ((*nshdrs == 0 && ehdr->e_shoff != 0) ||
916 	    *shstrndx == SHN_XINDEX || *nphdrs == PN_XNUM) {
917 		Shdr shdr;
918 
919 		if (ehdr->e_shoff == 0)
920 			return (EINVAL);
921 
922 		if ((error = vn_rdwr(UIO_READ, vp, (caddr_t)&shdr,
923 		    sizeof (shdr), (offset_t)ehdr->e_shoff, UIO_SYSSPACE, 0,
924 		    (rlim64_t)0, credp, &resid)) != 0)
925 			return (error);
926 
927 		if (*nshdrs == 0)
928 			*nshdrs = shdr.sh_size;
929 		if (*shstrndx == SHN_XINDEX)
930 			*shstrndx = shdr.sh_link;
931 		if (*nphdrs == PN_XNUM && shdr.sh_info != 0)
932 			*nphdrs = shdr.sh_info;
933 	}
934 
935 	return (0);
936 }
937 
938 #ifdef _ELF32_COMPAT
939 extern size_t elf_nphdr_max;
940 #else
941 size_t elf_nphdr_max = 1000;
942 #endif
943 
944 static int
945 getelfphdr(vnode_t *vp, cred_t *credp, const Ehdr *ehdr, int nphdrs,
946     caddr_t *phbasep, ssize_t *phsizep)
947 {
948 	ssize_t resid, minsize;
949 	int err;
950 
951 	/*
952 	 * Since we're going to be using e_phentsize to iterate down the
953 	 * array of program headers, it must be 8-byte aligned or else
954 	 * a we might cause a misaligned access. We use all members through
955 	 * p_flags on 32-bit ELF files and p_memsz on 64-bit ELF files so
956 	 * e_phentsize must be at least large enough to include those
957 	 * members.
958 	 */
959 #if !defined(_LP64) || defined(_ELF32_COMPAT)
960 	minsize = offsetof(Phdr, p_flags) + sizeof (((Phdr *)NULL)->p_flags);
961 #else
962 	minsize = offsetof(Phdr, p_memsz) + sizeof (((Phdr *)NULL)->p_memsz);
963 #endif
964 	if (ehdr->e_phentsize < minsize || (ehdr->e_phentsize & 3))
965 		return (EINVAL);
966 
967 	*phsizep = nphdrs * ehdr->e_phentsize;
968 
969 	if (*phsizep > sizeof (Phdr) * elf_nphdr_max) {
970 		if ((*phbasep = kmem_alloc(*phsizep, KM_NOSLEEP)) == NULL)
971 			return (ENOMEM);
972 	} else {
973 		*phbasep = kmem_alloc(*phsizep, KM_SLEEP);
974 	}
975 
976 	if ((err = vn_rdwr(UIO_READ, vp, *phbasep, *phsizep,
977 	    (offset_t)ehdr->e_phoff, UIO_SYSSPACE, 0, (rlim64_t)0,
978 	    credp, &resid)) != 0) {
979 		kmem_free(*phbasep, *phsizep);
980 		*phbasep = NULL;
981 		return (err);
982 	}
983 
984 	return (0);
985 }
986 
987 #ifdef _ELF32_COMPAT
988 extern size_t elf_nshdr_max;
989 extern size_t elf_shstrtab_max;
990 #else
991 size_t elf_nshdr_max = 10000;
992 size_t elf_shstrtab_max = 100 * 1024;
993 #endif
994 
995 
996 static int
997 getelfshdr(vnode_t *vp, cred_t *credp, const Ehdr *ehdr,
998     int nshdrs, int shstrndx, caddr_t *shbasep, ssize_t *shsizep,
999     char **shstrbasep, ssize_t *shstrsizep)
1000 {
1001 	ssize_t resid, minsize;
1002 	int err;
1003 	Shdr *shdr;
1004 
1005 	/*
1006 	 * Since we're going to be using e_shentsize to iterate down the
1007 	 * array of section headers, it must be 8-byte aligned or else
1008 	 * a we might cause a misaligned access. We use all members through
1009 	 * sh_entsize (on both 32- and 64-bit ELF files) so e_shentsize
1010 	 * must be at least large enough to include that member. The index
1011 	 * of the string table section must also be valid.
1012 	 */
1013 	minsize = offsetof(Shdr, sh_entsize) + sizeof (shdr->sh_entsize);
1014 	if (ehdr->e_shentsize < minsize || (ehdr->e_shentsize & 3) ||
1015 	    shstrndx >= nshdrs)
1016 		return (EINVAL);
1017 
1018 	*shsizep = nshdrs * ehdr->e_shentsize;
1019 
1020 	if (*shsizep > sizeof (Shdr) * elf_nshdr_max) {
1021 		if ((*shbasep = kmem_alloc(*shsizep, KM_NOSLEEP)) == NULL)
1022 			return (ENOMEM);
1023 	} else {
1024 		*shbasep = kmem_alloc(*shsizep, KM_SLEEP);
1025 	}
1026 
1027 	if ((err = vn_rdwr(UIO_READ, vp, *shbasep, *shsizep,
1028 	    (offset_t)ehdr->e_shoff, UIO_SYSSPACE, 0, (rlim64_t)0,
1029 	    credp, &resid)) != 0) {
1030 		kmem_free(*shbasep, *shsizep);
1031 		return (err);
1032 	}
1033 
1034 	/*
1035 	 * Pull the section string table out of the vnode; fail if the size
1036 	 * is zero.
1037 	 */
1038 	shdr = (Shdr *)(*shbasep + shstrndx * ehdr->e_shentsize);
1039 	if ((*shstrsizep = shdr->sh_size) == 0) {
1040 		kmem_free(*shbasep, *shsizep);
1041 		return (EINVAL);
1042 	}
1043 
1044 	if (*shstrsizep > elf_shstrtab_max) {
1045 		if ((*shstrbasep = kmem_alloc(*shstrsizep,
1046 		    KM_NOSLEEP)) == NULL) {
1047 			kmem_free(*shbasep, *shsizep);
1048 			return (ENOMEM);
1049 		}
1050 	} else {
1051 		*shstrbasep = kmem_alloc(*shstrsizep, KM_SLEEP);
1052 	}
1053 
1054 	if ((err = vn_rdwr(UIO_READ, vp, *shstrbasep, *shstrsizep,
1055 	    (offset_t)shdr->sh_offset, UIO_SYSSPACE, 0, (rlim64_t)0,
1056 	    credp, &resid)) != 0) {
1057 		kmem_free(*shbasep, *shsizep);
1058 		kmem_free(*shstrbasep, *shstrsizep);
1059 		return (err);
1060 	}
1061 
1062 	/*
1063 	 * Make sure the strtab is null-terminated to make sure we
1064 	 * don't run off the end of the table.
1065 	 */
1066 	(*shstrbasep)[*shstrsizep - 1] = '\0';
1067 
1068 	return (0);
1069 }
1070 
1071 static int
1072 mapelfexec(
1073 	vnode_t *vp,
1074 	Ehdr *ehdr,
1075 	int nphdrs,
1076 	caddr_t phdrbase,
1077 	Phdr **uphdr,
1078 	Phdr **dyphdr,
1079 	Phdr **stphdr,
1080 	Phdr **dtphdr,
1081 	Phdr *dataphdrp,
1082 	caddr_t *bssbase,
1083 	caddr_t *brkbase,
1084 	intptr_t *voffset,
1085 	intptr_t *minaddr,
1086 	size_t len,
1087 	long *execsz,
1088 	size_t *brksize)
1089 {
1090 	Phdr *phdr;
1091 	int i, prot, error;
1092 	caddr_t addr;
1093 	size_t zfodsz;
1094 	int ptload = 0;
1095 	int page;
1096 	off_t offset;
1097 	int hsize = ehdr->e_phentsize;
1098 	caddr_t mintmp = (caddr_t)-1;
1099 
1100 	if (ehdr->e_type == ET_DYN) {
1101 		/*
1102 		 * Obtain the virtual address of a hole in the
1103 		 * address space to map the "interpreter".
1104 		 */
1105 		map_addr(&addr, len, (offset_t)0, 1, 0);
1106 		if (addr == NULL)
1107 			return (ENOMEM);
1108 		*voffset = (intptr_t)addr;
1109 	} else {
1110 		*voffset = 0;
1111 	}
1112 	phdr = (Phdr *)phdrbase;
1113 	for (i = nphdrs; i > 0; i--) {
1114 		switch (phdr->p_type) {
1115 		case PT_LOAD:
1116 			if ((*dyphdr != NULL) && (*uphdr == NULL))
1117 				return (0);
1118 
1119 			ptload = 1;
1120 			prot = PROT_USER;
1121 			if (phdr->p_flags & PF_R)
1122 				prot |= PROT_READ;
1123 			if (phdr->p_flags & PF_W)
1124 				prot |= PROT_WRITE;
1125 			if (phdr->p_flags & PF_X)
1126 				prot |= PROT_EXEC;
1127 
1128 			addr = (caddr_t)((uintptr_t)phdr->p_vaddr + *voffset);
1129 
1130 			/*
1131 			 * Keep track of the segment with the lowest starting
1132 			 * address.
1133 			 */
1134 			if (addr < mintmp)
1135 				mintmp = addr;
1136 
1137 			zfodsz = (size_t)phdr->p_memsz - phdr->p_filesz;
1138 
1139 			offset = phdr->p_offset;
1140 			if (((uintptr_t)offset & PAGEOFFSET) ==
1141 			    ((uintptr_t)addr & PAGEOFFSET) &&
1142 				(!(vp->v_flag & VNOMAP))) {
1143 				page = 1;
1144 			} else {
1145 				page = 0;
1146 			}
1147 
1148 			if (curproc->p_brkpageszc != 0 && phdr == dataphdrp &&
1149 			    (prot & PROT_WRITE)) {
1150 				/*
1151 				 * segvn only uses large pages for segments
1152 				 * that have the requested large page size
1153 				 * aligned base and size. To insure the part
1154 				 * of bss that starts at heap large page size
1155 				 * boundary gets mapped by large pages create
1156 				 * 2 bss segvn segments which is accomplished
1157 				 * by calling execmap twice. First execmap
1158 				 * will create the bss segvn segment that is
1159 				 * before the large page boundary and it will
1160 				 * be mapped with base pages. If bss start is
1161 				 * already large page aligned only 1 bss
1162 				 * segment will be created. The second bss
1163 				 * segment's size is large page size aligned
1164 				 * so that segvn uses large pages for that
1165 				 * segment and it also makes the heap that
1166 				 * starts right after bss to start at large
1167 				 * page boundary.
1168 				 */
1169 				uint_t	szc = curproc->p_brkpageszc;
1170 				size_t pgsz = page_get_pagesize(szc);
1171 				caddr_t zaddr = addr + phdr->p_filesz;
1172 				size_t zlen = P2NPHASE((uintptr_t)zaddr, pgsz);
1173 
1174 				ASSERT(pgsz > PAGESIZE);
1175 
1176 				if (error = execmap(vp, addr, phdr->p_filesz,
1177 				    zlen, phdr->p_offset, prot, page, szc))
1178 					goto bad;
1179 				if (zfodsz > zlen) {
1180 					zfodsz -= zlen;
1181 					zaddr += zlen;
1182 					zlen = P2ROUNDUP(zfodsz, pgsz);
1183 					if (error = execmap(vp, zaddr, 0, zlen,
1184 					    phdr->p_offset, prot, page, szc))
1185 						goto bad;
1186 				}
1187 				if (brksize != NULL)
1188 					*brksize = zlen - zfodsz;
1189 			} else {
1190 				if (error = execmap(vp, addr, phdr->p_filesz,
1191 				    zfodsz, phdr->p_offset, prot, page, 0))
1192 					goto bad;
1193 			}
1194 
1195 			if (bssbase != NULL && addr >= *bssbase &&
1196 			    phdr == dataphdrp) {
1197 				*bssbase = addr + phdr->p_filesz;
1198 			}
1199 			if (brkbase != NULL && addr >= *brkbase) {
1200 				*brkbase = addr + phdr->p_memsz;
1201 			}
1202 
1203 			*execsz += btopr(phdr->p_memsz);
1204 			break;
1205 
1206 		case PT_INTERP:
1207 			if (ptload)
1208 				goto bad;
1209 			*dyphdr = phdr;
1210 			break;
1211 
1212 		case PT_SHLIB:
1213 			*stphdr = phdr;
1214 			break;
1215 
1216 		case PT_PHDR:
1217 			if (ptload)
1218 				goto bad;
1219 			*uphdr = phdr;
1220 			break;
1221 
1222 		case PT_NULL:
1223 		case PT_DYNAMIC:
1224 		case PT_NOTE:
1225 			break;
1226 
1227 		case PT_SUNWDTRACE:
1228 			if (dtphdr != NULL)
1229 				*dtphdr = phdr;
1230 			break;
1231 
1232 		default:
1233 			break;
1234 		}
1235 		phdr = (Phdr *)((caddr_t)phdr + hsize);
1236 	}
1237 
1238 	if (minaddr != NULL) {
1239 		ASSERT(mintmp != (caddr_t)-1);
1240 		*minaddr = (intptr_t)mintmp;
1241 	}
1242 
1243 	return (0);
1244 bad:
1245 	if (error == 0)
1246 		error = EINVAL;
1247 	return (error);
1248 }
1249 
1250 int
1251 elfnote(vnode_t *vp, offset_t *offsetp, int type, int descsz, void *desc,
1252     rlim64_t rlimit, cred_t *credp)
1253 {
1254 	Note note;
1255 	int error;
1256 
1257 	bzero(&note, sizeof (note));
1258 	bcopy("CORE", note.name, 4);
1259 	note.nhdr.n_type = type;
1260 	/*
1261 	 * The System V ABI states that n_namesz must be the length of the
1262 	 * string that follows the Nhdr structure including the terminating
1263 	 * null. The ABI also specifies that sufficient padding should be
1264 	 * included so that the description that follows the name string
1265 	 * begins on a 4- or 8-byte boundary for 32- and 64-bit binaries
1266 	 * respectively. However, since this change was not made correctly
1267 	 * at the time of the 64-bit port, both 32- and 64-bit binaries
1268 	 * descriptions are only guaranteed to begin on a 4-byte boundary.
1269 	 */
1270 	note.nhdr.n_namesz = 5;
1271 	note.nhdr.n_descsz = roundup(descsz, sizeof (Word));
1272 
1273 	if (error = core_write(vp, UIO_SYSSPACE, *offsetp, &note,
1274 	    sizeof (note), rlimit, credp))
1275 		return (error);
1276 
1277 	*offsetp += sizeof (note);
1278 
1279 	if (error = core_write(vp, UIO_SYSSPACE, *offsetp, desc,
1280 	    note.nhdr.n_descsz, rlimit, credp))
1281 		return (error);
1282 
1283 	*offsetp += note.nhdr.n_descsz;
1284 	return (0);
1285 }
1286 
1287 /*
1288  * Copy the section data from one vnode to the section of another vnode.
1289  */
1290 static void
1291 copy_scn(Shdr *src, vnode_t *src_vp, Shdr *dst, vnode_t *dst_vp, Off *doffset,
1292     void *buf, size_t size, cred_t *credp, rlim64_t rlimit)
1293 {
1294 	ssize_t resid;
1295 	size_t len, n = src->sh_size;
1296 	offset_t off = 0;
1297 
1298 	while (n != 0) {
1299 		len = MIN(size, n);
1300 		if (vn_rdwr(UIO_READ, src_vp, buf, len, src->sh_offset + off,
1301 		    UIO_SYSSPACE, 0, (rlim64_t)0, credp, &resid) != 0 ||
1302 		    resid >= len ||
1303 		    core_write(dst_vp, UIO_SYSSPACE, *doffset + off,
1304 		    buf, len - resid, rlimit, credp) != 0) {
1305 			dst->sh_size = 0;
1306 			dst->sh_offset = 0;
1307 			return;
1308 		}
1309 
1310 		ASSERT(n >= len - resid);
1311 
1312 		n -= len - resid;
1313 		off += len - resid;
1314 	}
1315 
1316 	*doffset += src->sh_size;
1317 }
1318 
1319 #ifdef _ELF32_COMPAT
1320 extern size_t elf_datasz_max;
1321 #else
1322 size_t elf_datasz_max = 1 * 1024 * 1024;
1323 #endif
1324 
1325 /*
1326  * This function processes mappings that correspond to load objects to
1327  * examine their respective sections for elfcore(). It's called once with
1328  * v set to NULL to count the number of sections that we're going to need
1329  * and then again with v set to some allocated buffer that we fill in with
1330  * all the section data.
1331  */
1332 static int
1333 process_scns(core_content_t content, proc_t *p, cred_t *credp, vnode_t *vp,
1334     Shdr *v, int nv, rlim64_t rlimit, Off *doffsetp, int *nshdrsp)
1335 {
1336 	vnode_t *lastvp = NULL;
1337 	struct seg *seg;
1338 	int i, j;
1339 	void *data = NULL;
1340 	size_t datasz = 0;
1341 	shstrtab_t shstrtab;
1342 	struct as *as = p->p_as;
1343 	int error = 0;
1344 
1345 	if (v != NULL)
1346 		shstrtab_init(&shstrtab);
1347 
1348 	i = 1;
1349 	for (seg = AS_SEGFIRST(as); seg != NULL; seg = AS_SEGNEXT(as, seg)) {
1350 		uint_t prot;
1351 		vnode_t *mvp;
1352 		void *tmp = NULL;
1353 		caddr_t saddr = seg->s_base;
1354 		caddr_t naddr;
1355 		caddr_t eaddr;
1356 		size_t segsize;
1357 
1358 		Ehdr ehdr;
1359 		int nshdrs, shstrndx, nphdrs;
1360 		caddr_t shbase;
1361 		ssize_t shsize;
1362 		char *shstrbase;
1363 		ssize_t shstrsize;
1364 
1365 		Shdr *shdr;
1366 		const char *name;
1367 		size_t sz;
1368 		uintptr_t off;
1369 
1370 		int ctf_ndx = 0;
1371 		int symtab_ndx = 0;
1372 
1373 		/*
1374 		 * Since we're just looking for text segments of load
1375 		 * objects, we only care about the protection bits; we don't
1376 		 * care about the actual size of the segment so we use the
1377 		 * reserved size. If the segment's size is zero, there's
1378 		 * something fishy going on so we ignore this segment.
1379 		 */
1380 		if (seg->s_ops != &segvn_ops ||
1381 		    SEGOP_GETVP(seg, seg->s_base, &mvp) != 0 ||
1382 		    mvp == lastvp || mvp == NULL || mvp->v_type != VREG ||
1383 		    (segsize = pr_getsegsize(seg, 1)) == 0)
1384 			continue;
1385 
1386 		eaddr = saddr + segsize;
1387 		prot = pr_getprot(seg, 1, &tmp, &saddr, &naddr, eaddr);
1388 		pr_getprot_done(&tmp);
1389 
1390 		/*
1391 		 * Skip this segment unless the protection bits look like
1392 		 * what we'd expect for a text segment.
1393 		 */
1394 		if ((prot & (PROT_WRITE | PROT_EXEC)) != PROT_EXEC)
1395 			continue;
1396 
1397 		if (getelfhead(mvp, credp, &ehdr, &nshdrs, &shstrndx,
1398 		    &nphdrs) != 0 ||
1399 		    getelfshdr(mvp, credp, &ehdr, nshdrs, shstrndx,
1400 		    &shbase, &shsize, &shstrbase, &shstrsize) != 0)
1401 			continue;
1402 
1403 		off = ehdr.e_shentsize;
1404 		for (j = 1; j < nshdrs; j++, off += ehdr.e_shentsize) {
1405 			Shdr *symtab = NULL, *strtab;
1406 
1407 			shdr = (Shdr *)(shbase + off);
1408 
1409 			if (shdr->sh_name >= shstrsize)
1410 				continue;
1411 
1412 			name = shstrbase + shdr->sh_name;
1413 
1414 			if (strcmp(name, shstrtab_data[STR_CTF]) == 0) {
1415 				if ((content & CC_CONTENT_CTF) == 0 ||
1416 				    ctf_ndx != 0)
1417 					continue;
1418 
1419 				if (shdr->sh_link > 0 &&
1420 				    shdr->sh_link < nshdrs) {
1421 					symtab = (Shdr *)(shbase +
1422 					    shdr->sh_link * ehdr.e_shentsize);
1423 				}
1424 
1425 				if (v != NULL && i < nv - 1) {
1426 					if (shdr->sh_size > datasz &&
1427 					    shdr->sh_size <= elf_datasz_max) {
1428 						if (data != NULL)
1429 							kmem_free(data, datasz);
1430 
1431 						datasz = shdr->sh_size;
1432 						data = kmem_alloc(datasz,
1433 						    KM_SLEEP);
1434 					}
1435 
1436 					v[i].sh_name = shstrtab_ndx(&shstrtab,
1437 					    STR_CTF);
1438 					v[i].sh_addr = (Addr)(uintptr_t)saddr;
1439 					v[i].sh_type = SHT_PROGBITS;
1440 					v[i].sh_addralign = 4;
1441 					*doffsetp = roundup(*doffsetp,
1442 					    v[i].sh_addralign);
1443 					v[i].sh_offset = *doffsetp;
1444 					v[i].sh_size = shdr->sh_size;
1445 					if (symtab == NULL)  {
1446 						v[i].sh_link = 0;
1447 					} else if (symtab->sh_type ==
1448 					    SHT_SYMTAB &&
1449 					    symtab_ndx != 0) {
1450 						v[i].sh_link =
1451 						    symtab_ndx;
1452 					} else {
1453 						v[i].sh_link = i + 1;
1454 					}
1455 
1456 					copy_scn(shdr, mvp, &v[i], vp,
1457 					    doffsetp, data, datasz, credp,
1458 					    rlimit);
1459 				}
1460 
1461 				ctf_ndx = i++;
1462 
1463 				/*
1464 				 * We've already dumped the symtab.
1465 				 */
1466 				if (symtab != NULL &&
1467 				    symtab->sh_type == SHT_SYMTAB &&
1468 				    symtab_ndx != 0)
1469 					continue;
1470 
1471 			} else if (strcmp(name,
1472 			    shstrtab_data[STR_SYMTAB]) == 0) {
1473 				if ((content & CC_CONTENT_SYMTAB) == 0 ||
1474 				    symtab != 0)
1475 					continue;
1476 
1477 				symtab = shdr;
1478 			}
1479 
1480 			if (symtab != NULL) {
1481 				if ((symtab->sh_type != SHT_DYNSYM &&
1482 				    symtab->sh_type != SHT_SYMTAB) ||
1483 				    symtab->sh_link == 0 ||
1484 				    symtab->sh_link >= nshdrs)
1485 					continue;
1486 
1487 				strtab = (Shdr *)(shbase +
1488 				    symtab->sh_link * ehdr.e_shentsize);
1489 
1490 				if (strtab->sh_type != SHT_STRTAB)
1491 					continue;
1492 
1493 				if (v != NULL && i < nv - 2) {
1494 					sz = MAX(symtab->sh_size,
1495 					    strtab->sh_size);
1496 					if (sz > datasz &&
1497 					    sz <= elf_datasz_max) {
1498 						if (data != NULL)
1499 							kmem_free(data, datasz);
1500 
1501 						datasz = sz;
1502 						data = kmem_alloc(datasz,
1503 						    KM_SLEEP);
1504 					}
1505 
1506 					if (symtab->sh_type == SHT_DYNSYM) {
1507 						v[i].sh_name = shstrtab_ndx(
1508 						    &shstrtab, STR_DYNSYM);
1509 						v[i + 1].sh_name = shstrtab_ndx(
1510 						    &shstrtab, STR_DYNSTR);
1511 					} else {
1512 						v[i].sh_name = shstrtab_ndx(
1513 						    &shstrtab, STR_SYMTAB);
1514 						v[i + 1].sh_name = shstrtab_ndx(
1515 						    &shstrtab, STR_STRTAB);
1516 					}
1517 
1518 					v[i].sh_type = symtab->sh_type;
1519 					v[i].sh_addr = symtab->sh_addr;
1520 					if (ehdr.e_type == ET_DYN ||
1521 					    v[i].sh_addr == 0)
1522 						v[i].sh_addr +=
1523 						    (Addr)(uintptr_t)saddr;
1524 					v[i].sh_addralign =
1525 					    symtab->sh_addralign;
1526 					*doffsetp = roundup(*doffsetp,
1527 					    v[i].sh_addralign);
1528 					v[i].sh_offset = *doffsetp;
1529 					v[i].sh_size = symtab->sh_size;
1530 					v[i].sh_link = i + 1;
1531 					v[i].sh_entsize = symtab->sh_entsize;
1532 					v[i].sh_info = symtab->sh_info;
1533 
1534 					copy_scn(symtab, mvp, &v[i], vp,
1535 					    doffsetp, data, datasz, credp,
1536 					    rlimit);
1537 
1538 					v[i + 1].sh_type = SHT_STRTAB;
1539 					v[i + 1].sh_flags = SHF_STRINGS;
1540 					v[i + 1].sh_addr = symtab->sh_addr;
1541 					if (ehdr.e_type == ET_DYN ||
1542 					    v[i + 1].sh_addr == 0)
1543 						v[i + 1].sh_addr +=
1544 						    (Addr)(uintptr_t)saddr;
1545 					v[i + 1].sh_addralign =
1546 					    strtab->sh_addralign;
1547 					*doffsetp = roundup(*doffsetp,
1548 					    v[i + 1].sh_addralign);
1549 					v[i + 1].sh_offset = *doffsetp;
1550 					v[i + 1].sh_size = strtab->sh_size;
1551 
1552 					copy_scn(strtab, mvp, &v[i + 1], vp,
1553 					    doffsetp, data, datasz, credp,
1554 					    rlimit);
1555 				}
1556 
1557 				if (symtab->sh_type == SHT_SYMTAB)
1558 					symtab_ndx = i;
1559 				i += 2;
1560 			}
1561 		}
1562 
1563 		kmem_free(shstrbase, shstrsize);
1564 		kmem_free(shbase, shsize);
1565 
1566 		lastvp = mvp;
1567 	}
1568 
1569 	if (v == NULL) {
1570 		if (i == 1)
1571 			*nshdrsp = 0;
1572 		else
1573 			*nshdrsp = i + 1;
1574 		goto done;
1575 	}
1576 
1577 	if (i != nv - 1) {
1578 		cmn_err(CE_WARN, "elfcore: core dump failed for "
1579 		    "process %d; address space is changing", p->p_pid);
1580 		error = EIO;
1581 		goto done;
1582 	}
1583 
1584 	v[i].sh_name = shstrtab_ndx(&shstrtab, STR_SHSTRTAB);
1585 	v[i].sh_size = shstrtab_size(&shstrtab);
1586 	v[i].sh_addralign = 1;
1587 	*doffsetp = roundup(*doffsetp, v[i].sh_addralign);
1588 	v[i].sh_offset = *doffsetp;
1589 	v[i].sh_flags = SHF_STRINGS;
1590 	v[i].sh_type = SHT_STRTAB;
1591 
1592 	if (v[i].sh_size > datasz) {
1593 		if (data != NULL)
1594 			kmem_free(data, datasz);
1595 
1596 		datasz = v[i].sh_size;
1597 		data = kmem_alloc(datasz,
1598 		    KM_SLEEP);
1599 	}
1600 
1601 	shstrtab_dump(&shstrtab, data);
1602 
1603 	if ((error = core_write(vp, UIO_SYSSPACE, *doffsetp,
1604 	    data, v[i].sh_size, rlimit, credp)) != 0)
1605 		goto done;
1606 
1607 	*doffsetp += v[i].sh_size;
1608 
1609 done:
1610 	if (data != NULL)
1611 		kmem_free(data, datasz);
1612 
1613 	return (error);
1614 }
1615 
1616 int
1617 elfcore(vnode_t *vp, proc_t *p, cred_t *credp, rlim64_t rlimit, int sig,
1618     core_content_t content)
1619 {
1620 	offset_t poffset, soffset;
1621 	Off doffset;
1622 	int error, i, nphdrs, nshdrs;
1623 	int overflow = 0;
1624 	struct seg *seg;
1625 	struct as *as = p->p_as;
1626 	union {
1627 		Ehdr ehdr;
1628 		Phdr phdr[1];
1629 		Shdr shdr[1];
1630 	} *bigwad;
1631 	size_t bigsize;
1632 	size_t phdrsz, shdrsz;
1633 	Ehdr *ehdr;
1634 	Phdr *v;
1635 	caddr_t brkbase;
1636 	size_t brksize;
1637 	caddr_t stkbase;
1638 	size_t stksize;
1639 	int ntries = 0;
1640 
1641 top:
1642 	/*
1643 	 * Make sure we have everything we need (registers, etc.).
1644 	 * All other lwps have already stopped and are in an orderly state.
1645 	 */
1646 	ASSERT(p == ttoproc(curthread));
1647 	prstop(0, 0);
1648 
1649 	AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER);
1650 	nphdrs = prnsegs(as, 0) + 2;		/* two CORE note sections */
1651 
1652 	/*
1653 	 * Count the number of section headers we're going to need.
1654 	 */
1655 	nshdrs = 0;
1656 	if (content & (CC_CONTENT_CTF | CC_CONTENT_SYMTAB)) {
1657 		(void) process_scns(content, p, credp, NULL, NULL, NULL, 0,
1658 		    NULL, &nshdrs);
1659 	}
1660 	AS_LOCK_EXIT(as, &as->a_lock);
1661 
1662 	ASSERT(nshdrs == 0 || nshdrs > 1);
1663 
1664 	/*
1665 	 * The core file contents may required zero section headers, but if
1666 	 * we overflow the 16 bits allotted to the program header count in
1667 	 * the ELF header, we'll need that program header at index zero.
1668 	 */
1669 	if (nshdrs == 0 && nphdrs >= PN_XNUM)
1670 		nshdrs = 1;
1671 
1672 	phdrsz = nphdrs * sizeof (Phdr);
1673 	shdrsz = nshdrs * sizeof (Shdr);
1674 
1675 	bigsize = MAX(sizeof (*bigwad), MAX(phdrsz, shdrsz));
1676 	bigwad = kmem_alloc(bigsize, KM_SLEEP);
1677 
1678 	ehdr = &bigwad->ehdr;
1679 	bzero(ehdr, sizeof (*ehdr));
1680 
1681 	ehdr->e_ident[EI_MAG0] = ELFMAG0;
1682 	ehdr->e_ident[EI_MAG1] = ELFMAG1;
1683 	ehdr->e_ident[EI_MAG2] = ELFMAG2;
1684 	ehdr->e_ident[EI_MAG3] = ELFMAG3;
1685 	ehdr->e_ident[EI_CLASS] = ELFCLASS;
1686 	ehdr->e_type = ET_CORE;
1687 
1688 #if !defined(_LP64) || defined(_ELF32_COMPAT)
1689 
1690 #if defined(__sparc)
1691 	ehdr->e_ident[EI_DATA] = ELFDATA2MSB;
1692 	ehdr->e_machine = EM_SPARC;
1693 #elif defined(__i386) || defined(__i386_COMPAT)
1694 	ehdr->e_ident[EI_DATA] = ELFDATA2LSB;
1695 	ehdr->e_machine = EM_386;
1696 #else
1697 #error "no recognized machine type is defined"
1698 #endif
1699 
1700 #else	/* !defined(_LP64) || defined(_ELF32_COMPAT) */
1701 
1702 #if defined(__sparc)
1703 	ehdr->e_ident[EI_DATA] = ELFDATA2MSB;
1704 	ehdr->e_machine = EM_SPARCV9;
1705 #elif defined(__amd64)
1706 	ehdr->e_ident[EI_DATA] = ELFDATA2LSB;
1707 	ehdr->e_machine = EM_AMD64;
1708 #else
1709 #error "no recognized 64-bit machine type is defined"
1710 #endif
1711 
1712 #endif	/* !defined(_LP64) || defined(_ELF32_COMPAT) */
1713 
1714 	/*
1715 	 * If the count of program headers or section headers or the index
1716 	 * of the section string table can't fit in the mere 16 bits
1717 	 * shortsightedly allotted to them in the ELF header, we use the
1718 	 * extended formats and put the real values in the section header
1719 	 * as index 0.
1720 	 */
1721 	ehdr->e_version = EV_CURRENT;
1722 	ehdr->e_ehsize = sizeof (Ehdr);
1723 
1724 	if (nphdrs >= PN_XNUM)
1725 		ehdr->e_phnum = PN_XNUM;
1726 	else
1727 		ehdr->e_phnum = (unsigned short)nphdrs;
1728 
1729 	ehdr->e_phoff = sizeof (Ehdr);
1730 	ehdr->e_phentsize = sizeof (Phdr);
1731 
1732 	if (nshdrs > 0) {
1733 		if (nshdrs >= SHN_LORESERVE)
1734 			ehdr->e_shnum = 0;
1735 		else
1736 			ehdr->e_shnum = (unsigned short)nshdrs;
1737 
1738 		if (nshdrs - 1 >= SHN_LORESERVE)
1739 			ehdr->e_shstrndx = SHN_XINDEX;
1740 		else
1741 			ehdr->e_shstrndx = (unsigned short)(nshdrs - 1);
1742 
1743 		ehdr->e_shoff = ehdr->e_phoff + ehdr->e_phentsize * nphdrs;
1744 		ehdr->e_shentsize = sizeof (Shdr);
1745 	}
1746 
1747 	if (error = core_write(vp, UIO_SYSSPACE, (offset_t)0, ehdr,
1748 	    sizeof (Ehdr), rlimit, credp))
1749 		goto done;
1750 
1751 	poffset = sizeof (Ehdr);
1752 	soffset = sizeof (Ehdr) + phdrsz;
1753 	doffset = sizeof (Ehdr) + phdrsz + shdrsz;
1754 
1755 	v = &bigwad->phdr[0];
1756 	bzero(v, phdrsz);
1757 
1758 	setup_old_note_header(&v[0], p);
1759 	v[0].p_offset = doffset = roundup(doffset, sizeof (Word));
1760 	doffset += v[0].p_filesz;
1761 
1762 	setup_note_header(&v[1], p);
1763 	v[1].p_offset = doffset = roundup(doffset, sizeof (Word));
1764 	doffset += v[1].p_filesz;
1765 
1766 	mutex_enter(&p->p_lock);
1767 
1768 	brkbase = p->p_brkbase;
1769 	brksize = p->p_brksize;
1770 
1771 	stkbase = p->p_usrstack - p->p_stksize;
1772 	stksize = p->p_stksize;
1773 
1774 	mutex_exit(&p->p_lock);
1775 
1776 	AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER);
1777 	i = 2;
1778 	for (seg = AS_SEGFIRST(as); seg != NULL; seg = AS_SEGNEXT(as, seg)) {
1779 		caddr_t eaddr = seg->s_base + pr_getsegsize(seg, 0);
1780 		caddr_t saddr, naddr;
1781 		void *tmp = NULL;
1782 		extern struct seg_ops segspt_shmops;
1783 
1784 		for (saddr = seg->s_base; saddr < eaddr; saddr = naddr) {
1785 			uint_t prot;
1786 			size_t size;
1787 			int type;
1788 			vnode_t *mvp;
1789 
1790 			prot = pr_getprot(seg, 0, &tmp, &saddr, &naddr, eaddr);
1791 			prot &= PROT_READ | PROT_WRITE | PROT_EXEC;
1792 			if ((size = (size_t)(naddr - saddr)) == 0)
1793 				continue;
1794 			if (i == nphdrs) {
1795 				overflow++;
1796 				continue;
1797 			}
1798 			v[i].p_type = PT_LOAD;
1799 			v[i].p_vaddr = (Addr)(uintptr_t)saddr;
1800 			v[i].p_memsz = size;
1801 			if (prot & PROT_READ)
1802 				v[i].p_flags |= PF_R;
1803 			if (prot & PROT_WRITE)
1804 				v[i].p_flags |= PF_W;
1805 			if (prot & PROT_EXEC)
1806 				v[i].p_flags |= PF_X;
1807 
1808 			/*
1809 			 * Figure out which mappings to include in the core.
1810 			 */
1811 			type = SEGOP_GETTYPE(seg, saddr);
1812 
1813 			if (saddr == stkbase && size == stksize) {
1814 				if (!(content & CC_CONTENT_STACK))
1815 					goto exclude;
1816 
1817 			} else if (saddr == brkbase && size == brksize) {
1818 				if (!(content & CC_CONTENT_HEAP))
1819 					goto exclude;
1820 
1821 			} else if (seg->s_ops == &segspt_shmops) {
1822 				if (type & MAP_NORESERVE) {
1823 					if (!(content & CC_CONTENT_DISM))
1824 						goto exclude;
1825 				} else {
1826 					if (!(content & CC_CONTENT_ISM))
1827 						goto exclude;
1828 				}
1829 
1830 			} else if (seg->s_ops != &segvn_ops) {
1831 				goto exclude;
1832 
1833 			} else if (type & MAP_SHARED) {
1834 				if (shmgetid(p, saddr) != SHMID_NONE) {
1835 					if (!(content & CC_CONTENT_SHM))
1836 						goto exclude;
1837 
1838 				} else if (SEGOP_GETVP(seg, seg->s_base,
1839 				    &mvp) != 0 || mvp == NULL ||
1840 				    mvp->v_type != VREG) {
1841 					if (!(content & CC_CONTENT_SHANON))
1842 						goto exclude;
1843 
1844 				} else {
1845 					if (!(content & CC_CONTENT_SHFILE))
1846 						goto exclude;
1847 				}
1848 
1849 			} else if (SEGOP_GETVP(seg, seg->s_base, &mvp) != 0 ||
1850 			    mvp == NULL || mvp->v_type != VREG) {
1851 				if (!(content & CC_CONTENT_ANON))
1852 					goto exclude;
1853 
1854 			} else if (prot == (PROT_READ | PROT_EXEC)) {
1855 				if (!(content & CC_CONTENT_TEXT))
1856 					goto exclude;
1857 
1858 			} else if (prot == PROT_READ) {
1859 				if (!(content & CC_CONTENT_RODATA))
1860 					goto exclude;
1861 
1862 			} else {
1863 				if (!(content & CC_CONTENT_DATA))
1864 					goto exclude;
1865 			}
1866 
1867 			doffset = roundup(doffset, sizeof (Word));
1868 			v[i].p_offset = doffset;
1869 			v[i].p_filesz = size;
1870 			doffset += size;
1871 exclude:
1872 			i++;
1873 		}
1874 		ASSERT(tmp == NULL);
1875 	}
1876 	AS_LOCK_EXIT(as, &as->a_lock);
1877 
1878 	if (overflow || i != nphdrs) {
1879 		if (ntries++ == 0) {
1880 			kmem_free(bigwad, bigsize);
1881 			goto top;
1882 		}
1883 		cmn_err(CE_WARN, "elfcore: core dump failed for "
1884 		    "process %d; address space is changing", p->p_pid);
1885 		error = EIO;
1886 		goto done;
1887 	}
1888 
1889 	if ((error = core_write(vp, UIO_SYSSPACE, poffset,
1890 	    v, phdrsz, rlimit, credp)) != 0)
1891 		goto done;
1892 
1893 	if ((error = write_old_elfnotes(p, sig, vp, v[0].p_offset, rlimit,
1894 	    credp)) != 0)
1895 		goto done;
1896 
1897 	if ((error = write_elfnotes(p, sig, vp, v[1].p_offset, rlimit,
1898 	    credp, content)) != 0)
1899 		goto done;
1900 
1901 	for (i = 2; i < nphdrs; i++) {
1902 		if (v[i].p_filesz == 0)
1903 			continue;
1904 
1905 		/*
1906 		 * If dumping out this segment fails, rather than failing
1907 		 * the core dump entirely, we reset the size of the mapping
1908 		 * to zero to indicate that the data is absent from the core
1909 		 * file and or in the PF_SUNW_FAILURE flag to differentiate
1910 		 * this from mappings that were excluded due to the core file
1911 		 * content settings.
1912 		 */
1913 		if ((error = core_seg(p, vp, v[i].p_offset,
1914 		    (caddr_t)(uintptr_t)v[i].p_vaddr, v[i].p_filesz,
1915 		    rlimit, credp)) != 0) {
1916 
1917 			/*
1918 			 * Since the space reserved for the segment is now
1919 			 * unused, we stash the errno in the first four
1920 			 * bytes. This undocumented interface will let us
1921 			 * understand the nature of the failure.
1922 			 */
1923 			(void) core_write(vp, UIO_SYSSPACE, v[i].p_offset,
1924 			    &error, sizeof (error), rlimit, credp);
1925 
1926 			v[i].p_filesz = 0;
1927 			v[i].p_flags |= PF_SUNW_FAILURE;
1928 			if ((error = core_write(vp, UIO_SYSSPACE,
1929 			    poffset + sizeof (v[i]) * i, &v[i], sizeof (v[i]),
1930 			    rlimit, credp)) != 0)
1931 				goto done;
1932 		}
1933 	}
1934 
1935 	if (nshdrs > 0) {
1936 		bzero(&bigwad->shdr[0], shdrsz);
1937 
1938 		if (nshdrs >= SHN_LORESERVE)
1939 			bigwad->shdr[0].sh_size = nshdrs;
1940 
1941 		if (nshdrs - 1 >= SHN_LORESERVE)
1942 			bigwad->shdr[0].sh_link = nshdrs - 1;
1943 
1944 		if (nphdrs >= PN_XNUM)
1945 			bigwad->shdr[0].sh_info = nphdrs;
1946 
1947 		if (nshdrs > 1) {
1948 			AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER);
1949 			if ((error = process_scns(content, p, credp, vp,
1950 			    &bigwad->shdr[0], nshdrs, rlimit, &doffset,
1951 			    NULL)) != 0) {
1952 				AS_LOCK_EXIT(as, &as->a_lock);
1953 				goto done;
1954 			}
1955 			AS_LOCK_EXIT(as, &as->a_lock);
1956 		}
1957 
1958 		if ((error = core_write(vp, UIO_SYSSPACE, soffset,
1959 		    &bigwad->shdr[0], shdrsz, rlimit, credp)) != 0)
1960 			goto done;
1961 	}
1962 
1963 done:
1964 	kmem_free(bigwad, bigsize);
1965 	return (error);
1966 }
1967 
1968 #ifndef	_ELF32_COMPAT
1969 
1970 static struct execsw esw = {
1971 #ifdef	_LP64
1972 	elf64magicstr,
1973 #else	/* _LP64 */
1974 	elf32magicstr,
1975 #endif	/* _LP64 */
1976 	0,
1977 	5,
1978 	elfexec,
1979 	elfcore
1980 };
1981 
1982 static struct modlexec modlexec = {
1983 	&mod_execops, "exec module for elf %I%", &esw
1984 };
1985 
1986 #ifdef	_LP64
1987 extern int elf32exec(vnode_t *vp, execa_t *uap, uarg_t *args,
1988 			intpdata_t *idatap, int level, long *execsz,
1989 			int setid, caddr_t exec_file, cred_t *cred,
1990 			int brand_action);
1991 extern int elf32core(vnode_t *vp, proc_t *p, cred_t *credp,
1992 			rlim64_t rlimit, int sig, core_content_t content);
1993 
1994 static struct execsw esw32 = {
1995 	elf32magicstr,
1996 	0,
1997 	5,
1998 	elf32exec,
1999 	elf32core
2000 };
2001 
2002 static struct modlexec modlexec32 = {
2003 	&mod_execops, "32-bit exec module for elf", &esw32
2004 };
2005 #endif	/* _LP64 */
2006 
2007 static struct modlinkage modlinkage = {
2008 	MODREV_1,
2009 	(void *)&modlexec,
2010 #ifdef	_LP64
2011 	(void *)&modlexec32,
2012 #endif	/* _LP64 */
2013 	NULL
2014 };
2015 
2016 int
2017 _init(void)
2018 {
2019 	return (mod_install(&modlinkage));
2020 }
2021 
2022 int
2023 _fini(void)
2024 {
2025 	return (mod_remove(&modlinkage));
2026 }
2027 
2028 int
2029 _info(struct modinfo *modinfop)
2030 {
2031 	return (mod_info(&modlinkage, modinfop));
2032 }
2033 
2034 #endif	/* !_ELF32_COMPAT */
2035