xref: /illumos-gate/usr/src/uts/common/os/grow.c (revision a6bde1a23b60f140c7ed78df979c2e22b1ed9b2c)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /* Copyright 2013 OmniTI Computer Consulting, Inc. All rights reserved. */
23 
24 /*
25  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
26  * Use is subject to license terms.
27  */
28 
29 /*	Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T	*/
30 /*	  All Rights Reserved  	*/
31 
32 #include <sys/types.h>
33 #include <sys/inttypes.h>
34 #include <sys/param.h>
35 #include <sys/sysmacros.h>
36 #include <sys/systm.h>
37 #include <sys/signal.h>
38 #include <sys/user.h>
39 #include <sys/errno.h>
40 #include <sys/var.h>
41 #include <sys/proc.h>
42 #include <sys/tuneable.h>
43 #include <sys/debug.h>
44 #include <sys/cmn_err.h>
45 #include <sys/cred.h>
46 #include <sys/vnode.h>
47 #include <sys/vfs.h>
48 #include <sys/vm.h>
49 #include <sys/file.h>
50 #include <sys/mman.h>
51 #include <sys/vmparam.h>
52 #include <sys/fcntl.h>
53 #include <sys/lwpchan_impl.h>
54 #include <sys/nbmlock.h>
55 
56 #include <vm/hat.h>
57 #include <vm/as.h>
58 #include <vm/seg.h>
59 #include <vm/seg_dev.h>
60 #include <vm/seg_vn.h>
61 
62 int use_brk_lpg = 1;
63 int use_stk_lpg = 1;
64 
65 static int brk_lpg(caddr_t nva);
66 static int grow_lpg(caddr_t sp);
67 
68 int
69 brk(caddr_t nva)
70 {
71 	int error;
72 	proc_t *p = curproc;
73 
74 	/*
75 	 * Serialize brk operations on an address space.
76 	 * This also serves as the lock protecting p_brksize
77 	 * and p_brkpageszc.
78 	 */
79 	as_rangelock(p->p_as);
80 	if (use_brk_lpg && (p->p_flag & SAUTOLPG) != 0) {
81 		error = brk_lpg(nva);
82 	} else {
83 		error = brk_internal(nva, p->p_brkpageszc);
84 	}
85 	as_rangeunlock(p->p_as);
86 	return ((error != 0 ? set_errno(error) : 0));
87 }
88 
89 /*
90  * Algorithm: call arch-specific map_pgsz to get best page size to use,
91  * then call brk_internal().
92  * Returns 0 on success.
93  */
94 static int
95 brk_lpg(caddr_t nva)
96 {
97 	struct proc *p = curproc;
98 	size_t pgsz, len;
99 	caddr_t addr, brkend;
100 	caddr_t bssbase = p->p_bssbase;
101 	caddr_t brkbase = p->p_brkbase;
102 	int oszc, szc;
103 	int err;
104 
105 	oszc = p->p_brkpageszc;
106 
107 	/*
108 	 * If p_brkbase has not yet been set, the first call
109 	 * to brk_internal() will initialize it.
110 	 */
111 	if (brkbase == 0) {
112 		return (brk_internal(nva, oszc));
113 	}
114 
115 	len = nva - bssbase;
116 
117 	pgsz = map_pgsz(MAPPGSZ_HEAP, p, bssbase, len, 0);
118 	szc = page_szc(pgsz);
119 
120 	/*
121 	 * Covers two cases:
122 	 * 1. page_szc() returns -1 for invalid page size, so we want to
123 	 * ignore it in that case.
124 	 * 2. By design we never decrease page size, as it is more stable.
125 	 */
126 	if (szc <= oszc) {
127 		err = brk_internal(nva, oszc);
128 		/* If failed, back off to base page size. */
129 		if (err != 0 && oszc != 0) {
130 			err = brk_internal(nva, 0);
131 		}
132 		return (err);
133 	}
134 
135 	err = brk_internal(nva, szc);
136 	/* If using szc failed, map with base page size and return. */
137 	if (err != 0) {
138 		if (szc != 0) {
139 			err = brk_internal(nva, 0);
140 		}
141 		return (err);
142 	}
143 
144 	/*
145 	 * Round up brk base to a large page boundary and remap
146 	 * anything in the segment already faulted in beyond that
147 	 * point.
148 	 */
149 	addr = (caddr_t)P2ROUNDUP((uintptr_t)p->p_bssbase, pgsz);
150 	brkend = brkbase + p->p_brksize;
151 	len = brkend - addr;
152 	/* Check that len is not negative. Update page size code for heap. */
153 	if (addr >= p->p_bssbase && brkend > addr && IS_P2ALIGNED(len, pgsz)) {
154 		(void) as_setpagesize(p->p_as, addr, len, szc, B_FALSE);
155 		p->p_brkpageszc = szc;
156 	}
157 
158 	ASSERT(err == 0);
159 	return (err);		/* should always be 0 */
160 }
161 
162 /*
163  * Returns 0 on success.
164  */
165 int
166 brk_internal(caddr_t nva, uint_t brkszc)
167 {
168 	caddr_t ova;			/* current break address */
169 	size_t size;
170 	int	error;
171 	struct proc *p = curproc;
172 	struct as *as = p->p_as;
173 	size_t pgsz;
174 	uint_t szc;
175 	rctl_qty_t as_rctl;
176 
177 	/*
178 	 * extend heap to brkszc alignment but use current p->p_brkpageszc
179 	 * for the newly created segment. This allows the new extension
180 	 * segment to be concatenated successfully with the existing brk
181 	 * segment.
182 	 */
183 	if ((szc = brkszc) != 0) {
184 		pgsz = page_get_pagesize(szc);
185 		ASSERT(pgsz > PAGESIZE);
186 	} else {
187 		pgsz = PAGESIZE;
188 	}
189 
190 	mutex_enter(&p->p_lock);
191 	as_rctl = rctl_enforced_value(rctlproc_legacy[RLIMIT_DATA],
192 	    p->p_rctls, p);
193 	mutex_exit(&p->p_lock);
194 
195 	/*
196 	 * If p_brkbase has not yet been set, the first call
197 	 * to brk() will initialize it.
198 	 */
199 	if (p->p_brkbase == 0)
200 		p->p_brkbase = nva;
201 
202 	/*
203 	 * Before multiple page size support existed p_brksize was the value
204 	 * not rounded to the pagesize (i.e. it stored the exact user request
205 	 * for heap size). If pgsz is greater than PAGESIZE calculate the
206 	 * heap size as the real new heap size by rounding it up to pgsz.
207 	 * This is useful since we may want to know where the heap ends
208 	 * without knowing heap pagesize (e.g. some old code) and also if
209 	 * heap pagesize changes we can update p_brkpageszc but delay adding
210 	 * new mapping yet still know from p_brksize where the heap really
211 	 * ends. The user requested heap end is stored in libc variable.
212 	 */
213 	if (pgsz > PAGESIZE) {
214 		caddr_t tnva = (caddr_t)P2ROUNDUP((uintptr_t)nva, pgsz);
215 		size = tnva - p->p_brkbase;
216 		if (tnva < p->p_brkbase || (size > p->p_brksize &&
217 		    size > (size_t)as_rctl)) {
218 			szc = 0;
219 			pgsz = PAGESIZE;
220 			size = nva - p->p_brkbase;
221 		}
222 	} else {
223 		size = nva - p->p_brkbase;
224 	}
225 
226 	/*
227 	 * use PAGESIZE to roundup ova because we want to know the real value
228 	 * of the current heap end in case p_brkpageszc changes since the last
229 	 * p_brksize was computed.
230 	 */
231 	nva = (caddr_t)P2ROUNDUP((uintptr_t)nva, pgsz);
232 	ova = (caddr_t)P2ROUNDUP((uintptr_t)(p->p_brkbase + p->p_brksize),
233 	    PAGESIZE);
234 
235 	if ((nva < p->p_brkbase) || (size > p->p_brksize &&
236 	    size > as_rctl)) {
237 		mutex_enter(&p->p_lock);
238 		(void) rctl_action(rctlproc_legacy[RLIMIT_DATA], p->p_rctls, p,
239 		    RCA_SAFE);
240 		mutex_exit(&p->p_lock);
241 		return (ENOMEM);
242 	}
243 
244 	if (nva > ova) {
245 		struct segvn_crargs crargs =
246 		    SEGVN_ZFOD_ARGS(PROT_ZFOD, PROT_ALL);
247 
248 		if (!(p->p_datprot & PROT_EXEC)) {
249 			crargs.prot &= ~PROT_EXEC;
250 		}
251 
252 		/*
253 		 * Add new zfod mapping to extend UNIX data segment
254 		 * AS_MAP_NO_LPOOB means use 0, and don't reapply OOB policies
255 		 * via map_pgszcvec(). Use AS_MAP_HEAP to get intermediate
256 		 * page sizes if ova is not aligned to szc's pgsz.
257 		 */
258 		if (szc > 0) {
259 			caddr_t rbss;
260 
261 			rbss = (caddr_t)P2ROUNDUP((uintptr_t)p->p_bssbase,
262 			    pgsz);
263 			if (IS_P2ALIGNED(p->p_bssbase, pgsz) || ova > rbss) {
264 				crargs.szc = p->p_brkpageszc ? p->p_brkpageszc :
265 				    AS_MAP_NO_LPOOB;
266 			} else if (ova == rbss) {
267 				crargs.szc = szc;
268 			} else {
269 				crargs.szc = AS_MAP_HEAP;
270 			}
271 		} else {
272 			crargs.szc = AS_MAP_NO_LPOOB;
273 		}
274 		crargs.lgrp_mem_policy_flags = LGRP_MP_FLAG_EXTEND_UP;
275 		error = as_map(as, ova, (size_t)(nva - ova), segvn_create,
276 		    &crargs);
277 		if (error) {
278 			return (error);
279 		}
280 
281 	} else if (nva < ova) {
282 		/*
283 		 * Release mapping to shrink UNIX data segment.
284 		 */
285 		(void) as_unmap(as, nva, (size_t)(ova - nva));
286 	}
287 	p->p_brksize = size;
288 	return (0);
289 }
290 
291 /*
292  * Grow the stack to include sp.  Return 1 if successful, 0 otherwise.
293  * This routine assumes that the stack grows downward.
294  */
295 int
296 grow(caddr_t sp)
297 {
298 	struct proc *p = curproc;
299 	struct as *as = p->p_as;
300 	size_t oldsize = p->p_stksize;
301 	size_t newsize;
302 	int err;
303 
304 	/*
305 	 * Serialize grow operations on an address space.
306 	 * This also serves as the lock protecting p_stksize
307 	 * and p_stkpageszc.
308 	 */
309 	as_rangelock(as);
310 	if (use_stk_lpg && (p->p_flag & SAUTOLPG) != 0) {
311 		err = grow_lpg(sp);
312 	} else {
313 		err = grow_internal(sp, p->p_stkpageszc);
314 	}
315 	as_rangeunlock(as);
316 
317 	if (err == 0 && (newsize = p->p_stksize) > oldsize) {
318 		ASSERT(IS_P2ALIGNED(oldsize, PAGESIZE));
319 		ASSERT(IS_P2ALIGNED(newsize, PAGESIZE));
320 		/*
321 		 * Set up translations so the process doesn't have to fault in
322 		 * the stack pages we just gave it.
323 		 */
324 		(void) as_fault(as->a_hat, as, p->p_usrstack - newsize,
325 		    newsize - oldsize, F_INVAL, S_WRITE);
326 	}
327 	return ((err == 0 ? 1 : 0));
328 }
329 
330 /*
331  * Algorithm: call arch-specific map_pgsz to get best page size to use,
332  * then call grow_internal().
333  * Returns 0 on success.
334  */
335 static int
336 grow_lpg(caddr_t sp)
337 {
338 	struct proc *p = curproc;
339 	size_t pgsz;
340 	size_t len, newsize;
341 	caddr_t addr, saddr;
342 	caddr_t growend;
343 	int oszc, szc;
344 	int err;
345 
346 	newsize = p->p_usrstack - sp;
347 
348 	oszc = p->p_stkpageszc;
349 	pgsz = map_pgsz(MAPPGSZ_STK, p, sp, newsize, 0);
350 	szc = page_szc(pgsz);
351 
352 	/*
353 	 * Covers two cases:
354 	 * 1. page_szc() returns -1 for invalid page size, so we want to
355 	 * ignore it in that case.
356 	 * 2. By design we never decrease page size, as it is more stable.
357 	 * This shouldn't happen as the stack never shrinks.
358 	 */
359 	if (szc <= oszc) {
360 		err = grow_internal(sp, oszc);
361 		/* failed, fall back to base page size */
362 		if (err != 0 && oszc != 0) {
363 			err = grow_internal(sp, 0);
364 		}
365 		return (err);
366 	}
367 
368 	/*
369 	 * We've grown sufficiently to switch to a new page size.
370 	 * So we are going to remap the whole segment with the new page size.
371 	 */
372 	err = grow_internal(sp, szc);
373 	/* The grow with szc failed, so fall back to base page size. */
374 	if (err != 0) {
375 		if (szc != 0) {
376 			err = grow_internal(sp, 0);
377 		}
378 		return (err);
379 	}
380 
381 	/*
382 	 * Round up stack pointer to a large page boundary and remap
383 	 * any pgsz pages in the segment already faulted in beyond that
384 	 * point.
385 	 */
386 	saddr = p->p_usrstack - p->p_stksize;
387 	addr = (caddr_t)P2ROUNDUP((uintptr_t)saddr, pgsz);
388 	growend = (caddr_t)P2ALIGN((uintptr_t)p->p_usrstack, pgsz);
389 	len = growend - addr;
390 	/* Check that len is not negative. Update page size code for stack. */
391 	if (addr >= saddr && growend > addr && IS_P2ALIGNED(len, pgsz)) {
392 		(void) as_setpagesize(p->p_as, addr, len, szc, B_FALSE);
393 		p->p_stkpageszc = szc;
394 	}
395 
396 	ASSERT(err == 0);
397 	return (err);		/* should always be 0 */
398 }
399 
400 /*
401  * This routine assumes that the stack grows downward.
402  * Returns 0 on success, errno on failure.
403  */
404 int
405 grow_internal(caddr_t sp, uint_t growszc)
406 {
407 	struct proc *p = curproc;
408 	size_t newsize;
409 	size_t oldsize;
410 	int    error;
411 	size_t pgsz;
412 	uint_t szc;
413 	struct segvn_crargs crargs = SEGVN_ZFOD_ARGS(PROT_ZFOD, PROT_ALL);
414 
415 	ASSERT(sp < p->p_usrstack);
416 	sp = (caddr_t)P2ALIGN((uintptr_t)sp, PAGESIZE);
417 
418 	/*
419 	 * grow to growszc alignment but use current p->p_stkpageszc for
420 	 * the segvn_crargs szc passed to segvn_create. For memcntl to
421 	 * increase the szc, this allows the new extension segment to be
422 	 * concatenated successfully with the existing stack segment.
423 	 */
424 	if ((szc = growszc) != 0) {
425 		pgsz = page_get_pagesize(szc);
426 		ASSERT(pgsz > PAGESIZE);
427 		newsize = p->p_usrstack - (caddr_t)P2ALIGN((uintptr_t)sp, pgsz);
428 		if (newsize > (size_t)p->p_stk_ctl) {
429 			szc = 0;
430 			pgsz = PAGESIZE;
431 			newsize = p->p_usrstack - sp;
432 		}
433 	} else {
434 		pgsz = PAGESIZE;
435 		newsize = p->p_usrstack - sp;
436 	}
437 
438 	if (newsize > (size_t)p->p_stk_ctl) {
439 		(void) rctl_action(rctlproc_legacy[RLIMIT_STACK], p->p_rctls, p,
440 		    RCA_UNSAFE_ALL);
441 
442 		return (ENOMEM);
443 	}
444 
445 	oldsize = p->p_stksize;
446 	ASSERT(P2PHASE(oldsize, PAGESIZE) == 0);
447 
448 	if (newsize <= oldsize) {	/* prevent the stack from shrinking */
449 		return (0);
450 	}
451 
452 	if (!(p->p_stkprot & PROT_EXEC)) {
453 		crargs.prot &= ~PROT_EXEC;
454 	}
455 	/*
456 	 * extend stack with the proposed new growszc, which is different
457 	 * than p_stkpageszc only on a memcntl to increase the stack pagesize.
458 	 * AS_MAP_NO_LPOOB means use 0, and don't reapply OOB policies via
459 	 * map_pgszcvec(). Use AS_MAP_STACK to get intermediate page sizes
460 	 * if not aligned to szc's pgsz.
461 	 */
462 	if (szc > 0) {
463 		caddr_t oldsp = p->p_usrstack - oldsize;
464 		caddr_t austk = (caddr_t)P2ALIGN((uintptr_t)p->p_usrstack,
465 		    pgsz);
466 
467 		if (IS_P2ALIGNED(p->p_usrstack, pgsz) || oldsp < austk) {
468 			crargs.szc = p->p_stkpageszc ? p->p_stkpageszc :
469 			    AS_MAP_NO_LPOOB;
470 		} else if (oldsp == austk) {
471 			crargs.szc = szc;
472 		} else {
473 			crargs.szc = AS_MAP_STACK;
474 		}
475 	} else {
476 		crargs.szc = AS_MAP_NO_LPOOB;
477 	}
478 	crargs.lgrp_mem_policy_flags = LGRP_MP_FLAG_EXTEND_DOWN;
479 
480 	if ((error = as_map(p->p_as, p->p_usrstack - newsize, newsize - oldsize,
481 	    segvn_create, &crargs)) != 0) {
482 		if (error == EAGAIN) {
483 			cmn_err(CE_WARN, "Sorry, no swap space to grow stack "
484 			    "for pid %d (%s)", p->p_pid, PTOU(p)->u_comm);
485 		}
486 		return (error);
487 	}
488 	p->p_stksize = newsize;
489 	return (0);
490 }
491 
492 /*
493  * Find address for user to map.
494  * If MAP_FIXED is not specified, we can pick any address we want, but we will
495  * first try the value in *addrp if it is non-NULL.  Thus this is implementing
496  * a way to try and get a preferred address.
497  */
498 int
499 choose_addr(struct as *as, caddr_t *addrp, size_t len, offset_t off,
500     int vacalign, uint_t flags)
501 {
502 	caddr_t basep = (caddr_t)(uintptr_t)((uintptr_t)*addrp & PAGEMASK);
503 	size_t lenp = len;
504 
505 	ASSERT(AS_ISCLAIMGAP(as));	/* searches should be serialized */
506 	if (flags & MAP_FIXED) {
507 		(void) as_unmap(as, *addrp, len);
508 		return (0);
509 	} else if (basep != NULL && ((flags & MAP_ALIGN) == 0) &&
510 	    !as_gap(as, len, &basep, &lenp, 0, *addrp)) {
511 		/* User supplied address was available */
512 		*addrp = basep;
513 	} else {
514 		/*
515 		 * No user supplied address or the address supplied was not
516 		 * available.
517 		 */
518 		map_addr(addrp, len, off, vacalign, flags);
519 	}
520 	if (*addrp == NULL)
521 		return (ENOMEM);
522 	return (0);
523 }
524 
525 
526 /*
527  * Used for MAP_ANON - fast way to get anonymous pages
528  */
529 static int
530 zmap(struct as *as, caddr_t *addrp, size_t len, uint_t uprot, int flags,
531     offset_t pos)
532 {
533 	struct segvn_crargs vn_a;
534 	int error;
535 
536 	if (((PROT_ALL & uprot) != uprot))
537 		return (EACCES);
538 
539 	if ((flags & MAP_FIXED) != 0) {
540 		caddr_t userlimit;
541 
542 		/*
543 		 * Use the user address.  First verify that
544 		 * the address to be used is page aligned.
545 		 * Then make some simple bounds checks.
546 		 */
547 		if (((uintptr_t)*addrp & PAGEOFFSET) != 0)
548 			return (EINVAL);
549 
550 		userlimit = flags & _MAP_LOW32 ?
551 		    (caddr_t)USERLIMIT32 : as->a_userlimit;
552 		switch (valid_usr_range(*addrp, len, uprot, as, userlimit)) {
553 		case RANGE_OKAY:
554 			break;
555 		case RANGE_BADPROT:
556 			return (ENOTSUP);
557 		case RANGE_BADADDR:
558 		default:
559 			return (ENOMEM);
560 		}
561 	}
562 	/*
563 	 * No need to worry about vac alignment for anonymous
564 	 * pages since this is a "clone" object that doesn't
565 	 * yet exist.
566 	 */
567 	error = choose_addr(as, addrp, len, pos, ADDR_NOVACALIGN, flags);
568 	if (error != 0) {
569 		return (error);
570 	}
571 
572 	/*
573 	 * Use the seg_vn segment driver; passing in the NULL amp
574 	 * gives the desired "cloning" effect.
575 	 */
576 	vn_a.vp = NULL;
577 	vn_a.offset = 0;
578 	vn_a.type = flags & MAP_TYPE;
579 	vn_a.prot = uprot;
580 	vn_a.maxprot = PROT_ALL;
581 	vn_a.flags = flags & ~MAP_TYPE;
582 	vn_a.cred = CRED();
583 	vn_a.amp = NULL;
584 	vn_a.szc = 0;
585 	vn_a.lgrp_mem_policy_flags = 0;
586 
587 	return (as_map(as, *addrp, len, segvn_create, &vn_a));
588 }
589 
590 static int
591 smmap_common(caddr_t *addrp, size_t len,
592     int prot, int flags, struct file *fp, offset_t pos)
593 {
594 	struct vnode *vp;
595 	struct as *as = curproc->p_as;
596 	uint_t uprot, maxprot, type;
597 	int error;
598 	int in_crit = 0;
599 
600 	if ((flags & ~(MAP_SHARED | MAP_PRIVATE | MAP_FIXED | _MAP_NEW |
601 	    _MAP_LOW32 | MAP_NORESERVE | MAP_ANON | MAP_ALIGN |
602 	    MAP_TEXT | MAP_INITDATA)) != 0) {
603 		/* | MAP_RENAME */	/* not implemented, let user know */
604 		return (EINVAL);
605 	}
606 
607 	if ((flags & MAP_TEXT) && !(prot & PROT_EXEC)) {
608 		return (EINVAL);
609 	}
610 
611 	if ((flags & (MAP_TEXT | MAP_INITDATA)) == (MAP_TEXT | MAP_INITDATA)) {
612 		return (EINVAL);
613 	}
614 
615 #if defined(__sparc)
616 	/*
617 	 * See if this is an "old mmap call".  If so, remember this
618 	 * fact and convert the flags value given to mmap to indicate
619 	 * the specified address in the system call must be used.
620 	 * _MAP_NEW is turned set by all new uses of mmap.
621 	 */
622 	if ((flags & _MAP_NEW) == 0)
623 		flags |= MAP_FIXED;
624 #endif
625 	flags &= ~_MAP_NEW;
626 
627 	type = flags & MAP_TYPE;
628 	if (type != MAP_PRIVATE && type != MAP_SHARED)
629 		return (EINVAL);
630 
631 
632 	if (flags & MAP_ALIGN) {
633 
634 		if (flags & MAP_FIXED)
635 			return (EINVAL);
636 
637 		/* alignment needs to be a power of 2 >= page size */
638 		if (((uintptr_t)*addrp < PAGESIZE && (uintptr_t)*addrp != 0) ||
639 		    !ISP2((uintptr_t)*addrp))
640 			return (EINVAL);
641 	}
642 	/*
643 	 * Check for bad lengths and file position.
644 	 * We let the VOP_MAP routine check for negative lengths
645 	 * since on some vnode types this might be appropriate.
646 	 */
647 	if (len == 0 || (pos & (u_offset_t)PAGEOFFSET) != 0)
648 		return (EINVAL);
649 
650 	maxprot = PROT_ALL;		/* start out allowing all accesses */
651 	uprot = prot | PROT_USER;
652 
653 	if (fp == NULL) {
654 		ASSERT(flags & MAP_ANON);
655 		/* discard lwpchan mappings, like munmap() */
656 		if ((flags & MAP_FIXED) && curproc->p_lcp != NULL)
657 			lwpchan_delete_mapping(curproc, *addrp, *addrp + len);
658 		as_rangelock(as);
659 		error = zmap(as, addrp, len, uprot, flags, pos);
660 		as_rangeunlock(as);
661 		/*
662 		 * Tell machine specific code that lwp has mapped shared memory
663 		 */
664 		if (error == 0 && (flags & MAP_SHARED)) {
665 			/* EMPTY */
666 			LWP_MMODEL_SHARED_AS(*addrp, len);
667 		}
668 		return (error);
669 	} else if ((flags & MAP_ANON) != 0)
670 		return (EINVAL);
671 
672 	vp = fp->f_vnode;
673 
674 	/* Can't execute code from "noexec" mounted filesystem. */
675 	if ((vp->v_vfsp->vfs_flag & VFS_NOEXEC) != 0)
676 		maxprot &= ~PROT_EXEC;
677 
678 	/*
679 	 * These checks were added as part of large files.
680 	 *
681 	 * Return ENXIO if the initial position is negative; return EOVERFLOW
682 	 * if (offset + len) would overflow the maximum allowed offset for the
683 	 * type of file descriptor being used.
684 	 */
685 	if (vp->v_type == VREG) {
686 		if (pos < 0)
687 			return (ENXIO);
688 		if ((offset_t)len > (OFFSET_MAX(fp) - pos))
689 			return (EOVERFLOW);
690 	}
691 
692 	if (type == MAP_SHARED && (fp->f_flag & FWRITE) == 0) {
693 		/* no write access allowed */
694 		maxprot &= ~PROT_WRITE;
695 	}
696 
697 	/*
698 	 * XXX - Do we also adjust maxprot based on protections
699 	 * of the vnode?  E.g. if no execute permission is given
700 	 * on the vnode for the current user, maxprot probably
701 	 * should disallow PROT_EXEC also?  This is different
702 	 * from the write access as this would be a per vnode
703 	 * test as opposed to a per fd test for writability.
704 	 */
705 
706 	/*
707 	 * Verify that the specified protections are not greater than
708 	 * the maximum allowable protections.  Also test to make sure
709 	 * that the file descriptor does allows for read access since
710 	 * "write only" mappings are hard to do since normally we do
711 	 * the read from the file before the page can be written.
712 	 */
713 	if (((maxprot & uprot) != uprot) || (fp->f_flag & FREAD) == 0)
714 		return (EACCES);
715 
716 	/*
717 	 * If the user specified an address, do some simple checks here
718 	 */
719 	if ((flags & MAP_FIXED) != 0) {
720 		caddr_t userlimit;
721 
722 		/*
723 		 * Use the user address.  First verify that
724 		 * the address to be used is page aligned.
725 		 * Then make some simple bounds checks.
726 		 */
727 		if (((uintptr_t)*addrp & PAGEOFFSET) != 0)
728 			return (EINVAL);
729 
730 		userlimit = flags & _MAP_LOW32 ?
731 		    (caddr_t)USERLIMIT32 : as->a_userlimit;
732 		switch (valid_usr_range(*addrp, len, uprot, as, userlimit)) {
733 		case RANGE_OKAY:
734 			break;
735 		case RANGE_BADPROT:
736 			return (ENOTSUP);
737 		case RANGE_BADADDR:
738 		default:
739 			return (ENOMEM);
740 		}
741 	}
742 
743 	if ((prot & (PROT_READ | PROT_WRITE | PROT_EXEC)) &&
744 	    nbl_need_check(vp)) {
745 		int svmand;
746 		nbl_op_t nop;
747 
748 		nbl_start_crit(vp, RW_READER);
749 		in_crit = 1;
750 		error = nbl_svmand(vp, fp->f_cred, &svmand);
751 		if (error != 0)
752 			goto done;
753 		if ((prot & PROT_WRITE) && (type == MAP_SHARED)) {
754 			if (prot & (PROT_READ | PROT_EXEC)) {
755 				nop = NBL_READWRITE;
756 			} else {
757 				nop = NBL_WRITE;
758 			}
759 		} else {
760 			nop = NBL_READ;
761 		}
762 		if (nbl_conflict(vp, nop, 0, LONG_MAX, svmand, NULL)) {
763 			error = EACCES;
764 			goto done;
765 		}
766 	}
767 
768 	/* discard lwpchan mappings, like munmap() */
769 	if ((flags & MAP_FIXED) && curproc->p_lcp != NULL)
770 		lwpchan_delete_mapping(curproc, *addrp, *addrp + len);
771 
772 	/*
773 	 * Ok, now let the vnode map routine do its thing to set things up.
774 	 */
775 	error = VOP_MAP(vp, pos, as,
776 	    addrp, len, uprot, maxprot, flags, fp->f_cred, NULL);
777 
778 	if (error == 0) {
779 		/*
780 		 * Tell machine specific code that lwp has mapped shared memory
781 		 */
782 		if (flags & MAP_SHARED) {
783 			/* EMPTY */
784 			LWP_MMODEL_SHARED_AS(*addrp, len);
785 		}
786 		if (vp->v_type == VREG &&
787 		    (flags & (MAP_TEXT | MAP_INITDATA)) != 0) {
788 			/*
789 			 * Mark this as an executable vnode
790 			 */
791 			mutex_enter(&vp->v_lock);
792 			vp->v_flag |= VVMEXEC;
793 			mutex_exit(&vp->v_lock);
794 		}
795 	}
796 
797 done:
798 	if (in_crit)
799 		nbl_end_crit(vp);
800 	return (error);
801 }
802 
803 #ifdef _LP64
804 /*
805  * LP64 mmap(2) system call: 64-bit offset, 64-bit address.
806  *
807  * The "large file" mmap routine mmap64(2) is also mapped to this routine
808  * by the 64-bit version of libc.
809  *
810  * Eventually, this should be the only version, and have smmap_common()
811  * folded back into it again.  Some day.
812  */
813 caddr_t
814 smmap64(caddr_t addr, size_t len, int prot, int flags, int fd, off_t pos)
815 {
816 	struct file *fp;
817 	int error;
818 
819 	if (fd == -1 && (flags & MAP_ANON) != 0)
820 		error = smmap_common(&addr, len, prot, flags,
821 		    NULL, (offset_t)pos);
822 	else if ((fp = getf(fd)) != NULL) {
823 		error = smmap_common(&addr, len, prot, flags,
824 		    fp, (offset_t)pos);
825 		releasef(fd);
826 	} else
827 		error = EBADF;
828 
829 	return (error ? (caddr_t)(uintptr_t)set_errno(error) : addr);
830 }
831 #endif	/* _LP64 */
832 
833 #if defined(_SYSCALL32_IMPL) || defined(_ILP32)
834 
835 /*
836  * ILP32 mmap(2) system call: 32-bit offset, 32-bit address.
837  */
838 caddr_t
839 smmap32(caddr32_t addr, size32_t len, int prot, int flags, int fd, off32_t pos)
840 {
841 	struct file *fp;
842 	int error;
843 	caddr_t a = (caddr_t)(uintptr_t)addr;
844 
845 	if (flags & _MAP_LOW32)
846 		error = EINVAL;
847 	else if (fd == -1 && (flags & MAP_ANON) != 0)
848 		error = smmap_common(&a, (size_t)len, prot,
849 		    flags | _MAP_LOW32, NULL, (offset_t)pos);
850 	else if ((fp = getf(fd)) != NULL) {
851 		error = smmap_common(&a, (size_t)len, prot,
852 		    flags | _MAP_LOW32, fp, (offset_t)pos);
853 		releasef(fd);
854 	} else
855 		error = EBADF;
856 
857 	ASSERT(error != 0 || (uintptr_t)(a + len) < (uintptr_t)UINT32_MAX);
858 
859 	return (error ? (caddr_t)(uintptr_t)set_errno(error) : a);
860 }
861 
862 /*
863  * ILP32 mmap64(2) system call: 64-bit offset, 32-bit address.
864  *
865  * Now things really get ugly because we can't use the C-style
866  * calling convention for more than 6 args, and 64-bit parameter
867  * passing on 32-bit systems is less than clean.
868  */
869 
870 struct mmaplf32a {
871 	caddr_t addr;
872 	size_t len;
873 #ifdef _LP64
874 	/*
875 	 * 32-bit contents, 64-bit cells
876 	 */
877 	uint64_t prot;
878 	uint64_t flags;
879 	uint64_t fd;
880 	uint64_t offhi;
881 	uint64_t offlo;
882 #else
883 	/*
884 	 * 32-bit contents, 32-bit cells
885 	 */
886 	uint32_t prot;
887 	uint32_t flags;
888 	uint32_t fd;
889 	uint32_t offhi;
890 	uint32_t offlo;
891 #endif
892 };
893 
894 int
895 smmaplf32(struct mmaplf32a *uap, rval_t *rvp)
896 {
897 	struct file *fp;
898 	int error;
899 	caddr_t a = uap->addr;
900 	int flags = (int)uap->flags;
901 	int fd = (int)uap->fd;
902 #ifdef _BIG_ENDIAN
903 	offset_t off = ((u_offset_t)uap->offhi << 32) | (u_offset_t)uap->offlo;
904 #else
905 	offset_t off = ((u_offset_t)uap->offlo << 32) | (u_offset_t)uap->offhi;
906 #endif
907 
908 	if (flags & _MAP_LOW32)
909 		error = EINVAL;
910 	else if (fd == -1 && (flags & MAP_ANON) != 0)
911 		error = smmap_common(&a, uap->len, (int)uap->prot,
912 		    flags | _MAP_LOW32, NULL, off);
913 	else if ((fp = getf(fd)) != NULL) {
914 		error = smmap_common(&a, uap->len, (int)uap->prot,
915 		    flags | _MAP_LOW32, fp, off);
916 		releasef(fd);
917 	} else
918 		error = EBADF;
919 
920 	if (error == 0)
921 		rvp->r_val1 = (uintptr_t)a;
922 	return (error);
923 }
924 
925 #endif	/* _SYSCALL32_IMPL || _ILP32 */
926 
927 int
928 munmap(caddr_t addr, size_t len)
929 {
930 	struct proc *p = curproc;
931 	struct as *as = p->p_as;
932 
933 	if (((uintptr_t)addr & PAGEOFFSET) != 0 || len == 0)
934 		return (set_errno(EINVAL));
935 
936 	if (valid_usr_range(addr, len, 0, as, as->a_userlimit) != RANGE_OKAY)
937 		return (set_errno(EINVAL));
938 
939 	/*
940 	 * Discard lwpchan mappings.
941 	 */
942 	if (p->p_lcp != NULL)
943 		lwpchan_delete_mapping(p, addr, addr + len);
944 	if (as_unmap(as, addr, len) != 0)
945 		return (set_errno(EINVAL));
946 
947 	return (0);
948 }
949 
950 int
951 mprotect(caddr_t addr, size_t len, int prot)
952 {
953 	struct as *as = curproc->p_as;
954 	uint_t uprot = prot | PROT_USER;
955 	int error;
956 
957 	if (((uintptr_t)addr & PAGEOFFSET) != 0 || len == 0)
958 		return (set_errno(EINVAL));
959 
960 	switch (valid_usr_range(addr, len, prot, as, as->a_userlimit)) {
961 	case RANGE_OKAY:
962 		break;
963 	case RANGE_BADPROT:
964 		return (set_errno(ENOTSUP));
965 	case RANGE_BADADDR:
966 	default:
967 		return (set_errno(ENOMEM));
968 	}
969 
970 	error = as_setprot(as, addr, len, uprot);
971 	if (error)
972 		return (set_errno(error));
973 	return (0);
974 }
975 
976 #define	MC_CACHE	128			/* internal result buffer */
977 #define	MC_QUANTUM	(MC_CACHE * PAGESIZE)	/* addresses covered in loop */
978 
979 int
980 mincore(caddr_t addr, size_t len, char *vecp)
981 {
982 	struct as *as = curproc->p_as;
983 	caddr_t ea;			/* end address of loop */
984 	size_t rl;			/* inner result length */
985 	char vec[MC_CACHE];		/* local vector cache */
986 	int error;
987 	model_t model;
988 	long	llen;
989 
990 	model = get_udatamodel();
991 	/*
992 	 * Validate form of address parameters.
993 	 */
994 	if (model == DATAMODEL_NATIVE) {
995 		llen = (long)len;
996 	} else {
997 		llen = (int32_t)(size32_t)len;
998 	}
999 	if (((uintptr_t)addr & PAGEOFFSET) != 0 || llen <= 0)
1000 		return (set_errno(EINVAL));
1001 
1002 	if (valid_usr_range(addr, len, 0, as, as->a_userlimit) != RANGE_OKAY)
1003 		return (set_errno(ENOMEM));
1004 
1005 	/*
1006 	 * Loop over subranges of interval [addr : addr + len), recovering
1007 	 * results internally and then copying them out to caller.  Subrange
1008 	 * is based on the size of MC_CACHE, defined above.
1009 	 */
1010 	for (ea = addr + len; addr < ea; addr += MC_QUANTUM) {
1011 		error = as_incore(as, addr,
1012 		    (size_t)MIN(MC_QUANTUM, ea - addr), vec, &rl);
1013 		if (rl != 0) {
1014 			rl = (rl + PAGESIZE - 1) / PAGESIZE;
1015 			if (copyout(vec, vecp, rl) != 0)
1016 				return (set_errno(EFAULT));
1017 			vecp += rl;
1018 		}
1019 		if (error != 0)
1020 			return (set_errno(ENOMEM));
1021 	}
1022 	return (0);
1023 }
1024