xref: /freebsd/sys/vm/vm_mmap.c (revision aa0a1e58f0189b0fde359a8bda032887e72057fa)
1 /*-
2  * Copyright (c) 1988 University of Utah.
3  * Copyright (c) 1991, 1993
4  *	The Regents of the University of California.  All rights reserved.
5  *
6  * This code is derived from software contributed to Berkeley by
7  * the Systems Programming Group of the University of Utah Computer
8  * Science Department.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 4. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  * from: Utah $Hdr: vm_mmap.c 1.6 91/10/21$
35  *
36  *	@(#)vm_mmap.c	8.4 (Berkeley) 1/12/94
37  */
38 
39 /*
40  * Mapped file (mmap) interface to VM
41  */
42 
43 #include <sys/cdefs.h>
44 __FBSDID("$FreeBSD$");
45 
46 #include "opt_compat.h"
47 #include "opt_hwpmc_hooks.h"
48 
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/lock.h>
52 #include <sys/mutex.h>
53 #include <sys/sysproto.h>
54 #include <sys/filedesc.h>
55 #include <sys/priv.h>
56 #include <sys/proc.h>
57 #include <sys/resource.h>
58 #include <sys/resourcevar.h>
59 #include <sys/vnode.h>
60 #include <sys/fcntl.h>
61 #include <sys/file.h>
62 #include <sys/mman.h>
63 #include <sys/mount.h>
64 #include <sys/conf.h>
65 #include <sys/stat.h>
66 #include <sys/sysent.h>
67 #include <sys/vmmeter.h>
68 
69 #include <security/mac/mac_framework.h>
70 
71 #include <vm/vm.h>
72 #include <vm/vm_param.h>
73 #include <vm/pmap.h>
74 #include <vm/vm_map.h>
75 #include <vm/vm_object.h>
76 #include <vm/vm_page.h>
77 #include <vm/vm_pager.h>
78 #include <vm/vm_pageout.h>
79 #include <vm/vm_extern.h>
80 #include <vm/vm_page.h>
81 
82 #ifdef HWPMC_HOOKS
83 #include <sys/pmckern.h>
84 #endif
85 
86 #ifndef _SYS_SYSPROTO_H_
87 struct sbrk_args {
88 	int incr;
89 };
90 #endif
91 
92 static int vm_mmap_vnode(struct thread *, vm_size_t, vm_prot_t, vm_prot_t *,
93     int *, struct vnode *, vm_ooffset_t *, vm_object_t *);
94 static int vm_mmap_cdev(struct thread *, vm_size_t, vm_prot_t, vm_prot_t *,
95     int *, struct cdev *, vm_ooffset_t *, vm_object_t *);
96 static int vm_mmap_shm(struct thread *, vm_size_t, vm_prot_t, vm_prot_t *,
97     int *, struct shmfd *, vm_ooffset_t, vm_object_t *);
98 
99 /*
100  * MPSAFE
101  */
102 /* ARGSUSED */
103 int
104 sbrk(td, uap)
105 	struct thread *td;
106 	struct sbrk_args *uap;
107 {
108 	/* Not yet implemented */
109 	return (EOPNOTSUPP);
110 }
111 
112 #ifndef _SYS_SYSPROTO_H_
113 struct sstk_args {
114 	int incr;
115 };
116 #endif
117 
118 /*
119  * MPSAFE
120  */
121 /* ARGSUSED */
122 int
123 sstk(td, uap)
124 	struct thread *td;
125 	struct sstk_args *uap;
126 {
127 	/* Not yet implemented */
128 	return (EOPNOTSUPP);
129 }
130 
131 #if defined(COMPAT_43)
132 #ifndef _SYS_SYSPROTO_H_
133 struct getpagesize_args {
134 	int dummy;
135 };
136 #endif
137 
138 /* ARGSUSED */
139 int
140 ogetpagesize(td, uap)
141 	struct thread *td;
142 	struct getpagesize_args *uap;
143 {
144 	/* MP SAFE */
145 	td->td_retval[0] = PAGE_SIZE;
146 	return (0);
147 }
148 #endif				/* COMPAT_43 */
149 
150 
151 /*
152  * Memory Map (mmap) system call.  Note that the file offset
153  * and address are allowed to be NOT page aligned, though if
154  * the MAP_FIXED flag it set, both must have the same remainder
155  * modulo the PAGE_SIZE (POSIX 1003.1b).  If the address is not
156  * page-aligned, the actual mapping starts at trunc_page(addr)
157  * and the return value is adjusted up by the page offset.
158  *
159  * Generally speaking, only character devices which are themselves
160  * memory-based, such as a video framebuffer, can be mmap'd.  Otherwise
161  * there would be no cache coherency between a descriptor and a VM mapping
162  * both to the same character device.
163  */
164 #ifndef _SYS_SYSPROTO_H_
165 struct mmap_args {
166 	void *addr;
167 	size_t len;
168 	int prot;
169 	int flags;
170 	int fd;
171 	long pad;
172 	off_t pos;
173 };
174 #endif
175 
176 /*
177  * MPSAFE
178  */
179 int
180 mmap(td, uap)
181 	struct thread *td;
182 	struct mmap_args *uap;
183 {
184 #ifdef HWPMC_HOOKS
185 	struct pmckern_map_in pkm;
186 #endif
187 	struct file *fp;
188 	struct vnode *vp;
189 	vm_offset_t addr;
190 	vm_size_t size, pageoff;
191 	vm_prot_t prot, maxprot;
192 	void *handle;
193 	objtype_t handle_type;
194 	int flags, error;
195 	off_t pos;
196 	struct vmspace *vms = td->td_proc->p_vmspace;
197 
198 	addr = (vm_offset_t) uap->addr;
199 	size = uap->len;
200 	prot = uap->prot & VM_PROT_ALL;
201 	flags = uap->flags;
202 	pos = uap->pos;
203 
204 	fp = NULL;
205 
206 	/* Make sure mapping fits into numeric range, etc. */
207 	if ((uap->len == 0 && !SV_CURPROC_FLAG(SV_AOUT) &&
208 	     curproc->p_osrel >= P_OSREL_MAP_ANON) ||
209 	    ((flags & MAP_ANON) && (uap->fd != -1 || pos != 0)))
210 		return (EINVAL);
211 
212 	if (flags & MAP_STACK) {
213 		if ((uap->fd != -1) ||
214 		    ((prot & (PROT_READ | PROT_WRITE)) != (PROT_READ | PROT_WRITE)))
215 			return (EINVAL);
216 		flags |= MAP_ANON;
217 		pos = 0;
218 	}
219 
220 	/*
221 	 * Align the file position to a page boundary,
222 	 * and save its page offset component.
223 	 */
224 	pageoff = (pos & PAGE_MASK);
225 	pos -= pageoff;
226 
227 	/* Adjust size for rounding (on both ends). */
228 	size += pageoff;			/* low end... */
229 	size = (vm_size_t) round_page(size);	/* hi end */
230 
231 	/*
232 	 * Check for illegal addresses.  Watch out for address wrap... Note
233 	 * that VM_*_ADDRESS are not constants due to casts (argh).
234 	 */
235 	if (flags & MAP_FIXED) {
236 		/*
237 		 * The specified address must have the same remainder
238 		 * as the file offset taken modulo PAGE_SIZE, so it
239 		 * should be aligned after adjustment by pageoff.
240 		 */
241 		addr -= pageoff;
242 		if (addr & PAGE_MASK)
243 			return (EINVAL);
244 
245 		/* Address range must be all in user VM space. */
246 		if (addr < vm_map_min(&vms->vm_map) ||
247 		    addr + size > vm_map_max(&vms->vm_map))
248 			return (EINVAL);
249 		if (addr + size < addr)
250 			return (EINVAL);
251 	} else {
252 		/*
253 		 * XXX for non-fixed mappings where no hint is provided or
254 		 * the hint would fall in the potential heap space,
255 		 * place it after the end of the largest possible heap.
256 		 *
257 		 * There should really be a pmap call to determine a reasonable
258 		 * location.
259 		 */
260 		PROC_LOCK(td->td_proc);
261 		if (addr == 0 ||
262 		    (addr >= round_page((vm_offset_t)vms->vm_taddr) &&
263 		    addr < round_page((vm_offset_t)vms->vm_daddr +
264 		    lim_max(td->td_proc, RLIMIT_DATA))))
265 			addr = round_page((vm_offset_t)vms->vm_daddr +
266 			    lim_max(td->td_proc, RLIMIT_DATA));
267 		PROC_UNLOCK(td->td_proc);
268 	}
269 	if (flags & MAP_ANON) {
270 		/*
271 		 * Mapping blank space is trivial.
272 		 */
273 		handle = NULL;
274 		handle_type = OBJT_DEFAULT;
275 		maxprot = VM_PROT_ALL;
276 	} else {
277 		/*
278 		 * Mapping file, get fp for validation and
279 		 * don't let the descriptor disappear on us if we block.
280 		 */
281 		if ((error = fget(td, uap->fd, &fp)) != 0)
282 			goto done;
283 		if (fp->f_type == DTYPE_SHM) {
284 			handle = fp->f_data;
285 			handle_type = OBJT_SWAP;
286 			maxprot = VM_PROT_NONE;
287 
288 			/* FREAD should always be set. */
289 			if (fp->f_flag & FREAD)
290 				maxprot |= VM_PROT_EXECUTE | VM_PROT_READ;
291 			if (fp->f_flag & FWRITE)
292 				maxprot |= VM_PROT_WRITE;
293 			goto map;
294 		}
295 		if (fp->f_type != DTYPE_VNODE) {
296 			error = ENODEV;
297 			goto done;
298 		}
299 #if defined(COMPAT_FREEBSD7) || defined(COMPAT_FREEBSD6) || \
300     defined(COMPAT_FREEBSD5) || defined(COMPAT_FREEBSD4)
301 		/*
302 		 * POSIX shared-memory objects are defined to have
303 		 * kernel persistence, and are not defined to support
304 		 * read(2)/write(2) -- or even open(2).  Thus, we can
305 		 * use MAP_ASYNC to trade on-disk coherence for speed.
306 		 * The shm_open(3) library routine turns on the FPOSIXSHM
307 		 * flag to request this behavior.
308 		 */
309 		if (fp->f_flag & FPOSIXSHM)
310 			flags |= MAP_NOSYNC;
311 #endif
312 		vp = fp->f_vnode;
313 		/*
314 		 * Ensure that file and memory protections are
315 		 * compatible.  Note that we only worry about
316 		 * writability if mapping is shared; in this case,
317 		 * current and max prot are dictated by the open file.
318 		 * XXX use the vnode instead?  Problem is: what
319 		 * credentials do we use for determination? What if
320 		 * proc does a setuid?
321 		 */
322 		if (vp->v_mount != NULL && vp->v_mount->mnt_flag & MNT_NOEXEC)
323 			maxprot = VM_PROT_NONE;
324 		else
325 			maxprot = VM_PROT_EXECUTE;
326 		if (fp->f_flag & FREAD) {
327 			maxprot |= VM_PROT_READ;
328 		} else if (prot & PROT_READ) {
329 			error = EACCES;
330 			goto done;
331 		}
332 		/*
333 		 * If we are sharing potential changes (either via
334 		 * MAP_SHARED or via the implicit sharing of character
335 		 * device mappings), and we are trying to get write
336 		 * permission although we opened it without asking
337 		 * for it, bail out.
338 		 */
339 		if ((flags & MAP_SHARED) != 0) {
340 			if ((fp->f_flag & FWRITE) != 0) {
341 				maxprot |= VM_PROT_WRITE;
342 			} else if ((prot & PROT_WRITE) != 0) {
343 				error = EACCES;
344 				goto done;
345 			}
346 		} else if (vp->v_type != VCHR || (fp->f_flag & FWRITE) != 0) {
347 			maxprot |= VM_PROT_WRITE;
348 		}
349 		handle = (void *)vp;
350 		handle_type = OBJT_VNODE;
351 	}
352 map:
353 	td->td_fpop = fp;
354 	error = vm_mmap(&vms->vm_map, &addr, size, prot, maxprot,
355 	    flags, handle_type, handle, pos);
356 	td->td_fpop = NULL;
357 #ifdef HWPMC_HOOKS
358 	/* inform hwpmc(4) if an executable is being mapped */
359 	if (error == 0 && handle_type == OBJT_VNODE &&
360 	    (prot & PROT_EXEC)) {
361 		pkm.pm_file = handle;
362 		pkm.pm_address = (uintptr_t) addr;
363 		PMC_CALL_HOOK(td, PMC_FN_MMAP, (void *) &pkm);
364 	}
365 #endif
366 	if (error == 0)
367 		td->td_retval[0] = (register_t) (addr + pageoff);
368 done:
369 	if (fp)
370 		fdrop(fp, td);
371 
372 	return (error);
373 }
374 
375 int
376 freebsd6_mmap(struct thread *td, struct freebsd6_mmap_args *uap)
377 {
378 	struct mmap_args oargs;
379 
380 	oargs.addr = uap->addr;
381 	oargs.len = uap->len;
382 	oargs.prot = uap->prot;
383 	oargs.flags = uap->flags;
384 	oargs.fd = uap->fd;
385 	oargs.pos = uap->pos;
386 	return (mmap(td, &oargs));
387 }
388 
389 #ifdef COMPAT_43
390 #ifndef _SYS_SYSPROTO_H_
391 struct ommap_args {
392 	caddr_t addr;
393 	int len;
394 	int prot;
395 	int flags;
396 	int fd;
397 	long pos;
398 };
399 #endif
400 int
401 ommap(td, uap)
402 	struct thread *td;
403 	struct ommap_args *uap;
404 {
405 	struct mmap_args nargs;
406 	static const char cvtbsdprot[8] = {
407 		0,
408 		PROT_EXEC,
409 		PROT_WRITE,
410 		PROT_EXEC | PROT_WRITE,
411 		PROT_READ,
412 		PROT_EXEC | PROT_READ,
413 		PROT_WRITE | PROT_READ,
414 		PROT_EXEC | PROT_WRITE | PROT_READ,
415 	};
416 
417 #define	OMAP_ANON	0x0002
418 #define	OMAP_COPY	0x0020
419 #define	OMAP_SHARED	0x0010
420 #define	OMAP_FIXED	0x0100
421 
422 	nargs.addr = uap->addr;
423 	nargs.len = uap->len;
424 	nargs.prot = cvtbsdprot[uap->prot & 0x7];
425 	nargs.flags = 0;
426 	if (uap->flags & OMAP_ANON)
427 		nargs.flags |= MAP_ANON;
428 	if (uap->flags & OMAP_COPY)
429 		nargs.flags |= MAP_COPY;
430 	if (uap->flags & OMAP_SHARED)
431 		nargs.flags |= MAP_SHARED;
432 	else
433 		nargs.flags |= MAP_PRIVATE;
434 	if (uap->flags & OMAP_FIXED)
435 		nargs.flags |= MAP_FIXED;
436 	nargs.fd = uap->fd;
437 	nargs.pos = uap->pos;
438 	return (mmap(td, &nargs));
439 }
440 #endif				/* COMPAT_43 */
441 
442 
443 #ifndef _SYS_SYSPROTO_H_
444 struct msync_args {
445 	void *addr;
446 	size_t len;
447 	int flags;
448 };
449 #endif
450 /*
451  * MPSAFE
452  */
453 int
454 msync(td, uap)
455 	struct thread *td;
456 	struct msync_args *uap;
457 {
458 	vm_offset_t addr;
459 	vm_size_t size, pageoff;
460 	int flags;
461 	vm_map_t map;
462 	int rv;
463 
464 	addr = (vm_offset_t) uap->addr;
465 	size = uap->len;
466 	flags = uap->flags;
467 
468 	pageoff = (addr & PAGE_MASK);
469 	addr -= pageoff;
470 	size += pageoff;
471 	size = (vm_size_t) round_page(size);
472 	if (addr + size < addr)
473 		return (EINVAL);
474 
475 	if ((flags & (MS_ASYNC|MS_INVALIDATE)) == (MS_ASYNC|MS_INVALIDATE))
476 		return (EINVAL);
477 
478 	map = &td->td_proc->p_vmspace->vm_map;
479 
480 	/*
481 	 * Clean the pages and interpret the return value.
482 	 */
483 	rv = vm_map_sync(map, addr, addr + size, (flags & MS_ASYNC) == 0,
484 	    (flags & MS_INVALIDATE) != 0);
485 	switch (rv) {
486 	case KERN_SUCCESS:
487 		return (0);
488 	case KERN_INVALID_ADDRESS:
489 		return (EINVAL);	/* Sun returns ENOMEM? */
490 	case KERN_INVALID_ARGUMENT:
491 		return (EBUSY);
492 	default:
493 		return (EINVAL);
494 	}
495 }
496 
497 #ifndef _SYS_SYSPROTO_H_
498 struct munmap_args {
499 	void *addr;
500 	size_t len;
501 };
502 #endif
503 /*
504  * MPSAFE
505  */
506 int
507 munmap(td, uap)
508 	struct thread *td;
509 	struct munmap_args *uap;
510 {
511 #ifdef HWPMC_HOOKS
512 	struct pmckern_map_out pkm;
513 	vm_map_entry_t entry;
514 #endif
515 	vm_offset_t addr;
516 	vm_size_t size, pageoff;
517 	vm_map_t map;
518 
519 	addr = (vm_offset_t) uap->addr;
520 	size = uap->len;
521 	if (size == 0)
522 		return (EINVAL);
523 
524 	pageoff = (addr & PAGE_MASK);
525 	addr -= pageoff;
526 	size += pageoff;
527 	size = (vm_size_t) round_page(size);
528 	if (addr + size < addr)
529 		return (EINVAL);
530 
531 	/*
532 	 * Check for illegal addresses.  Watch out for address wrap...
533 	 */
534 	map = &td->td_proc->p_vmspace->vm_map;
535 	if (addr < vm_map_min(map) || addr + size > vm_map_max(map))
536 		return (EINVAL);
537 	vm_map_lock(map);
538 #ifdef HWPMC_HOOKS
539 	/*
540 	 * Inform hwpmc if the address range being unmapped contains
541 	 * an executable region.
542 	 */
543 	pkm.pm_address = (uintptr_t) NULL;
544 	if (vm_map_lookup_entry(map, addr, &entry)) {
545 		for (;
546 		     entry != &map->header && entry->start < addr + size;
547 		     entry = entry->next) {
548 			if (vm_map_check_protection(map, entry->start,
549 				entry->end, VM_PROT_EXECUTE) == TRUE) {
550 				pkm.pm_address = (uintptr_t) addr;
551 				pkm.pm_size = (size_t) size;
552 				break;
553 			}
554 		}
555 	}
556 #endif
557 	vm_map_delete(map, addr, addr + size);
558 
559 #ifdef HWPMC_HOOKS
560 	/* downgrade the lock to prevent a LOR with the pmc-sx lock */
561 	vm_map_lock_downgrade(map);
562 	if (pkm.pm_address != (uintptr_t) NULL)
563 		PMC_CALL_HOOK(td, PMC_FN_MUNMAP, (void *) &pkm);
564 	vm_map_unlock_read(map);
565 #else
566 	vm_map_unlock(map);
567 #endif
568 	/* vm_map_delete returns nothing but KERN_SUCCESS anyway */
569 	return (0);
570 }
571 
572 #ifndef _SYS_SYSPROTO_H_
573 struct mprotect_args {
574 	const void *addr;
575 	size_t len;
576 	int prot;
577 };
578 #endif
579 /*
580  * MPSAFE
581  */
582 int
583 mprotect(td, uap)
584 	struct thread *td;
585 	struct mprotect_args *uap;
586 {
587 	vm_offset_t addr;
588 	vm_size_t size, pageoff;
589 	vm_prot_t prot;
590 
591 	addr = (vm_offset_t) uap->addr;
592 	size = uap->len;
593 	prot = uap->prot & VM_PROT_ALL;
594 
595 	pageoff = (addr & PAGE_MASK);
596 	addr -= pageoff;
597 	size += pageoff;
598 	size = (vm_size_t) round_page(size);
599 	if (addr + size < addr)
600 		return (EINVAL);
601 
602 	switch (vm_map_protect(&td->td_proc->p_vmspace->vm_map, addr,
603 	    addr + size, prot, FALSE)) {
604 	case KERN_SUCCESS:
605 		return (0);
606 	case KERN_PROTECTION_FAILURE:
607 		return (EACCES);
608 	case KERN_RESOURCE_SHORTAGE:
609 		return (ENOMEM);
610 	}
611 	return (EINVAL);
612 }
613 
614 #ifndef _SYS_SYSPROTO_H_
615 struct minherit_args {
616 	void *addr;
617 	size_t len;
618 	int inherit;
619 };
620 #endif
621 /*
622  * MPSAFE
623  */
624 int
625 minherit(td, uap)
626 	struct thread *td;
627 	struct minherit_args *uap;
628 {
629 	vm_offset_t addr;
630 	vm_size_t size, pageoff;
631 	vm_inherit_t inherit;
632 
633 	addr = (vm_offset_t)uap->addr;
634 	size = uap->len;
635 	inherit = uap->inherit;
636 
637 	pageoff = (addr & PAGE_MASK);
638 	addr -= pageoff;
639 	size += pageoff;
640 	size = (vm_size_t) round_page(size);
641 	if (addr + size < addr)
642 		return (EINVAL);
643 
644 	switch (vm_map_inherit(&td->td_proc->p_vmspace->vm_map, addr,
645 	    addr + size, inherit)) {
646 	case KERN_SUCCESS:
647 		return (0);
648 	case KERN_PROTECTION_FAILURE:
649 		return (EACCES);
650 	}
651 	return (EINVAL);
652 }
653 
654 #ifndef _SYS_SYSPROTO_H_
655 struct madvise_args {
656 	void *addr;
657 	size_t len;
658 	int behav;
659 };
660 #endif
661 
662 /*
663  * MPSAFE
664  */
665 /* ARGSUSED */
666 int
667 madvise(td, uap)
668 	struct thread *td;
669 	struct madvise_args *uap;
670 {
671 	vm_offset_t start, end;
672 	vm_map_t map;
673 	struct proc *p;
674 	int error;
675 
676 	/*
677 	 * Check for our special case, advising the swap pager we are
678 	 * "immortal."
679 	 */
680 	if (uap->behav == MADV_PROTECT) {
681 		error = priv_check(td, PRIV_VM_MADV_PROTECT);
682 		if (error == 0) {
683 			p = td->td_proc;
684 			PROC_LOCK(p);
685 			p->p_flag |= P_PROTECTED;
686 			PROC_UNLOCK(p);
687 		}
688 		return (error);
689 	}
690 	/*
691 	 * Check for illegal behavior
692 	 */
693 	if (uap->behav < 0 || uap->behav > MADV_CORE)
694 		return (EINVAL);
695 	/*
696 	 * Check for illegal addresses.  Watch out for address wrap... Note
697 	 * that VM_*_ADDRESS are not constants due to casts (argh).
698 	 */
699 	map = &td->td_proc->p_vmspace->vm_map;
700 	if ((vm_offset_t)uap->addr < vm_map_min(map) ||
701 	    (vm_offset_t)uap->addr + uap->len > vm_map_max(map))
702 		return (EINVAL);
703 	if (((vm_offset_t) uap->addr + uap->len) < (vm_offset_t) uap->addr)
704 		return (EINVAL);
705 
706 	/*
707 	 * Since this routine is only advisory, we default to conservative
708 	 * behavior.
709 	 */
710 	start = trunc_page((vm_offset_t) uap->addr);
711 	end = round_page((vm_offset_t) uap->addr + uap->len);
712 
713 	if (vm_map_madvise(map, start, end, uap->behav))
714 		return (EINVAL);
715 	return (0);
716 }
717 
718 #ifndef _SYS_SYSPROTO_H_
719 struct mincore_args {
720 	const void *addr;
721 	size_t len;
722 	char *vec;
723 };
724 #endif
725 
726 /*
727  * MPSAFE
728  */
729 /* ARGSUSED */
730 int
731 mincore(td, uap)
732 	struct thread *td;
733 	struct mincore_args *uap;
734 {
735 	vm_offset_t addr, first_addr;
736 	vm_offset_t end, cend;
737 	pmap_t pmap;
738 	vm_map_t map;
739 	char *vec;
740 	int error = 0;
741 	int vecindex, lastvecindex;
742 	vm_map_entry_t current;
743 	vm_map_entry_t entry;
744 	vm_object_t object;
745 	vm_paddr_t locked_pa;
746 	vm_page_t m;
747 	vm_pindex_t pindex;
748 	int mincoreinfo;
749 	unsigned int timestamp;
750 	boolean_t locked;
751 
752 	/*
753 	 * Make sure that the addresses presented are valid for user
754 	 * mode.
755 	 */
756 	first_addr = addr = trunc_page((vm_offset_t) uap->addr);
757 	end = addr + (vm_size_t)round_page(uap->len);
758 	map = &td->td_proc->p_vmspace->vm_map;
759 	if (end > vm_map_max(map) || end < addr)
760 		return (ENOMEM);
761 
762 	/*
763 	 * Address of byte vector
764 	 */
765 	vec = uap->vec;
766 
767 	pmap = vmspace_pmap(td->td_proc->p_vmspace);
768 
769 	vm_map_lock_read(map);
770 RestartScan:
771 	timestamp = map->timestamp;
772 
773 	if (!vm_map_lookup_entry(map, addr, &entry)) {
774 		vm_map_unlock_read(map);
775 		return (ENOMEM);
776 	}
777 
778 	/*
779 	 * Do this on a map entry basis so that if the pages are not
780 	 * in the current processes address space, we can easily look
781 	 * up the pages elsewhere.
782 	 */
783 	lastvecindex = -1;
784 	for (current = entry;
785 	    (current != &map->header) && (current->start < end);
786 	    current = current->next) {
787 
788 		/*
789 		 * check for contiguity
790 		 */
791 		if (current->end < end &&
792 		    (entry->next == &map->header ||
793 		     current->next->start > current->end)) {
794 			vm_map_unlock_read(map);
795 			return (ENOMEM);
796 		}
797 
798 		/*
799 		 * ignore submaps (for now) or null objects
800 		 */
801 		if ((current->eflags & MAP_ENTRY_IS_SUB_MAP) ||
802 			current->object.vm_object == NULL)
803 			continue;
804 
805 		/*
806 		 * limit this scan to the current map entry and the
807 		 * limits for the mincore call
808 		 */
809 		if (addr < current->start)
810 			addr = current->start;
811 		cend = current->end;
812 		if (cend > end)
813 			cend = end;
814 
815 		/*
816 		 * scan this entry one page at a time
817 		 */
818 		while (addr < cend) {
819 			/*
820 			 * Check pmap first, it is likely faster, also
821 			 * it can provide info as to whether we are the
822 			 * one referencing or modifying the page.
823 			 */
824 			object = NULL;
825 			locked_pa = 0;
826 		retry:
827 			m = NULL;
828 			mincoreinfo = pmap_mincore(pmap, addr, &locked_pa);
829 			if (locked_pa != 0) {
830 				/*
831 				 * The page is mapped by this process but not
832 				 * both accessed and modified.  It is also
833 				 * managed.  Acquire the object lock so that
834 				 * other mappings might be examined.
835 				 */
836 				m = PHYS_TO_VM_PAGE(locked_pa);
837 				if (m->object != object) {
838 					if (object != NULL)
839 						VM_OBJECT_UNLOCK(object);
840 					object = m->object;
841 					locked = VM_OBJECT_TRYLOCK(object);
842 					vm_page_unlock(m);
843 					if (!locked) {
844 						VM_OBJECT_LOCK(object);
845 						vm_page_lock(m);
846 						goto retry;
847 					}
848 				} else
849 					vm_page_unlock(m);
850 				KASSERT(m->valid == VM_PAGE_BITS_ALL,
851 				    ("mincore: page %p is mapped but invalid",
852 				    m));
853 			} else if (mincoreinfo == 0) {
854 				/*
855 				 * The page is not mapped by this process.  If
856 				 * the object implements managed pages, then
857 				 * determine if the page is resident so that
858 				 * the mappings might be examined.
859 				 */
860 				if (current->object.vm_object != object) {
861 					if (object != NULL)
862 						VM_OBJECT_UNLOCK(object);
863 					object = current->object.vm_object;
864 					VM_OBJECT_LOCK(object);
865 				}
866 				if (object->type == OBJT_DEFAULT ||
867 				    object->type == OBJT_SWAP ||
868 				    object->type == OBJT_VNODE) {
869 					pindex = OFF_TO_IDX(current->offset +
870 					    (addr - current->start));
871 					m = vm_page_lookup(object, pindex);
872 					if (m != NULL && m->valid == 0)
873 						m = NULL;
874 					if (m != NULL)
875 						mincoreinfo = MINCORE_INCORE;
876 				}
877 			}
878 			if (m != NULL) {
879 				/* Examine other mappings to the page. */
880 				if (m->dirty == 0 && pmap_is_modified(m))
881 					vm_page_dirty(m);
882 				if (m->dirty != 0)
883 					mincoreinfo |= MINCORE_MODIFIED_OTHER;
884 				/*
885 				 * The first test for PG_REFERENCED is an
886 				 * optimization.  The second test is
887 				 * required because a concurrent pmap
888 				 * operation could clear the last reference
889 				 * and set PG_REFERENCED before the call to
890 				 * pmap_is_referenced().
891 				 */
892 				if ((m->flags & PG_REFERENCED) != 0 ||
893 				    pmap_is_referenced(m) ||
894 				    (m->flags & PG_REFERENCED) != 0)
895 					mincoreinfo |= MINCORE_REFERENCED_OTHER;
896 			}
897 			if (object != NULL)
898 				VM_OBJECT_UNLOCK(object);
899 
900 			/*
901 			 * subyte may page fault.  In case it needs to modify
902 			 * the map, we release the lock.
903 			 */
904 			vm_map_unlock_read(map);
905 
906 			/*
907 			 * calculate index into user supplied byte vector
908 			 */
909 			vecindex = OFF_TO_IDX(addr - first_addr);
910 
911 			/*
912 			 * If we have skipped map entries, we need to make sure that
913 			 * the byte vector is zeroed for those skipped entries.
914 			 */
915 			while ((lastvecindex + 1) < vecindex) {
916 				error = subyte(vec + lastvecindex, 0);
917 				if (error) {
918 					error = EFAULT;
919 					goto done2;
920 				}
921 				++lastvecindex;
922 			}
923 
924 			/*
925 			 * Pass the page information to the user
926 			 */
927 			error = subyte(vec + vecindex, mincoreinfo);
928 			if (error) {
929 				error = EFAULT;
930 				goto done2;
931 			}
932 
933 			/*
934 			 * If the map has changed, due to the subyte, the previous
935 			 * output may be invalid.
936 			 */
937 			vm_map_lock_read(map);
938 			if (timestamp != map->timestamp)
939 				goto RestartScan;
940 
941 			lastvecindex = vecindex;
942 			addr += PAGE_SIZE;
943 		}
944 	}
945 
946 	/*
947 	 * subyte may page fault.  In case it needs to modify
948 	 * the map, we release the lock.
949 	 */
950 	vm_map_unlock_read(map);
951 
952 	/*
953 	 * Zero the last entries in the byte vector.
954 	 */
955 	vecindex = OFF_TO_IDX(end - first_addr);
956 	while ((lastvecindex + 1) < vecindex) {
957 		error = subyte(vec + lastvecindex, 0);
958 		if (error) {
959 			error = EFAULT;
960 			goto done2;
961 		}
962 		++lastvecindex;
963 	}
964 
965 	/*
966 	 * If the map has changed, due to the subyte, the previous
967 	 * output may be invalid.
968 	 */
969 	vm_map_lock_read(map);
970 	if (timestamp != map->timestamp)
971 		goto RestartScan;
972 	vm_map_unlock_read(map);
973 done2:
974 	return (error);
975 }
976 
977 #ifndef _SYS_SYSPROTO_H_
978 struct mlock_args {
979 	const void *addr;
980 	size_t len;
981 };
982 #endif
983 /*
984  * MPSAFE
985  */
986 int
987 mlock(td, uap)
988 	struct thread *td;
989 	struct mlock_args *uap;
990 {
991 	struct proc *proc;
992 	vm_offset_t addr, end, last, start;
993 	vm_size_t npages, size;
994 	int error;
995 
996 	error = priv_check(td, PRIV_VM_MLOCK);
997 	if (error)
998 		return (error);
999 	addr = (vm_offset_t)uap->addr;
1000 	size = uap->len;
1001 	last = addr + size;
1002 	start = trunc_page(addr);
1003 	end = round_page(last);
1004 	if (last < addr || end < addr)
1005 		return (EINVAL);
1006 	npages = atop(end - start);
1007 	if (npages > vm_page_max_wired)
1008 		return (ENOMEM);
1009 	proc = td->td_proc;
1010 	PROC_LOCK(proc);
1011 	if (ptoa(npages +
1012 	    pmap_wired_count(vm_map_pmap(&proc->p_vmspace->vm_map))) >
1013 	    lim_cur(proc, RLIMIT_MEMLOCK)) {
1014 		PROC_UNLOCK(proc);
1015 		return (ENOMEM);
1016 	}
1017 	PROC_UNLOCK(proc);
1018 	if (npages + cnt.v_wire_count > vm_page_max_wired)
1019 		return (EAGAIN);
1020 	error = vm_map_wire(&proc->p_vmspace->vm_map, start, end,
1021 	    VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES);
1022 	return (error == KERN_SUCCESS ? 0 : ENOMEM);
1023 }
1024 
1025 #ifndef _SYS_SYSPROTO_H_
1026 struct mlockall_args {
1027 	int	how;
1028 };
1029 #endif
1030 
1031 /*
1032  * MPSAFE
1033  */
1034 int
1035 mlockall(td, uap)
1036 	struct thread *td;
1037 	struct mlockall_args *uap;
1038 {
1039 	vm_map_t map;
1040 	int error;
1041 
1042 	map = &td->td_proc->p_vmspace->vm_map;
1043 	error = 0;
1044 
1045 	if ((uap->how == 0) || ((uap->how & ~(MCL_CURRENT|MCL_FUTURE)) != 0))
1046 		return (EINVAL);
1047 
1048 #if 0
1049 	/*
1050 	 * If wiring all pages in the process would cause it to exceed
1051 	 * a hard resource limit, return ENOMEM.
1052 	 */
1053 	PROC_LOCK(td->td_proc);
1054 	if (map->size > lim_cur(td->td_proc, RLIMIT_MEMLOCK)) {
1055 		PROC_UNLOCK(td->td_proc);
1056 		return (ENOMEM);
1057 	}
1058 	PROC_UNLOCK(td->td_proc);
1059 #else
1060 	error = priv_check(td, PRIV_VM_MLOCK);
1061 	if (error)
1062 		return (error);
1063 #endif
1064 
1065 	if (uap->how & MCL_FUTURE) {
1066 		vm_map_lock(map);
1067 		vm_map_modflags(map, MAP_WIREFUTURE, 0);
1068 		vm_map_unlock(map);
1069 		error = 0;
1070 	}
1071 
1072 	if (uap->how & MCL_CURRENT) {
1073 		/*
1074 		 * P1003.1-2001 mandates that all currently mapped pages
1075 		 * will be memory resident and locked (wired) upon return
1076 		 * from mlockall(). vm_map_wire() will wire pages, by
1077 		 * calling vm_fault_wire() for each page in the region.
1078 		 */
1079 		error = vm_map_wire(map, vm_map_min(map), vm_map_max(map),
1080 		    VM_MAP_WIRE_USER|VM_MAP_WIRE_HOLESOK);
1081 		error = (error == KERN_SUCCESS ? 0 : EAGAIN);
1082 	}
1083 
1084 	return (error);
1085 }
1086 
1087 #ifndef _SYS_SYSPROTO_H_
1088 struct munlockall_args {
1089 	register_t dummy;
1090 };
1091 #endif
1092 
1093 /*
1094  * MPSAFE
1095  */
1096 int
1097 munlockall(td, uap)
1098 	struct thread *td;
1099 	struct munlockall_args *uap;
1100 {
1101 	vm_map_t map;
1102 	int error;
1103 
1104 	map = &td->td_proc->p_vmspace->vm_map;
1105 	error = priv_check(td, PRIV_VM_MUNLOCK);
1106 	if (error)
1107 		return (error);
1108 
1109 	/* Clear the MAP_WIREFUTURE flag from this vm_map. */
1110 	vm_map_lock(map);
1111 	vm_map_modflags(map, 0, MAP_WIREFUTURE);
1112 	vm_map_unlock(map);
1113 
1114 	/* Forcibly unwire all pages. */
1115 	error = vm_map_unwire(map, vm_map_min(map), vm_map_max(map),
1116 	    VM_MAP_WIRE_USER|VM_MAP_WIRE_HOLESOK);
1117 
1118 	return (error);
1119 }
1120 
1121 #ifndef _SYS_SYSPROTO_H_
1122 struct munlock_args {
1123 	const void *addr;
1124 	size_t len;
1125 };
1126 #endif
1127 /*
1128  * MPSAFE
1129  */
1130 int
1131 munlock(td, uap)
1132 	struct thread *td;
1133 	struct munlock_args *uap;
1134 {
1135 	vm_offset_t addr, end, last, start;
1136 	vm_size_t size;
1137 	int error;
1138 
1139 	error = priv_check(td, PRIV_VM_MUNLOCK);
1140 	if (error)
1141 		return (error);
1142 	addr = (vm_offset_t)uap->addr;
1143 	size = uap->len;
1144 	last = addr + size;
1145 	start = trunc_page(addr);
1146 	end = round_page(last);
1147 	if (last < addr || end < addr)
1148 		return (EINVAL);
1149 	error = vm_map_unwire(&td->td_proc->p_vmspace->vm_map, start, end,
1150 	    VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES);
1151 	return (error == KERN_SUCCESS ? 0 : ENOMEM);
1152 }
1153 
1154 /*
1155  * vm_mmap_vnode()
1156  *
1157  * MPSAFE
1158  *
1159  * Helper function for vm_mmap.  Perform sanity check specific for mmap
1160  * operations on vnodes.
1161  */
1162 int
1163 vm_mmap_vnode(struct thread *td, vm_size_t objsize,
1164     vm_prot_t prot, vm_prot_t *maxprotp, int *flagsp,
1165     struct vnode *vp, vm_ooffset_t *foffp, vm_object_t *objp)
1166 {
1167 	struct vattr va;
1168 	vm_object_t obj;
1169 	vm_offset_t foff;
1170 	struct mount *mp;
1171 	struct ucred *cred;
1172 	int error, flags;
1173 	int vfslocked;
1174 
1175 	mp = vp->v_mount;
1176 	cred = td->td_ucred;
1177 	vfslocked = VFS_LOCK_GIANT(mp);
1178 	if ((error = vget(vp, LK_SHARED, td)) != 0) {
1179 		VFS_UNLOCK_GIANT(vfslocked);
1180 		return (error);
1181 	}
1182 	foff = *foffp;
1183 	flags = *flagsp;
1184 	obj = vp->v_object;
1185 	if (vp->v_type == VREG) {
1186 		/*
1187 		 * Get the proper underlying object
1188 		 */
1189 		if (obj == NULL) {
1190 			error = EINVAL;
1191 			goto done;
1192 		}
1193 		if (obj->handle != vp) {
1194 			vput(vp);
1195 			vp = (struct vnode*)obj->handle;
1196 			vget(vp, LK_SHARED, td);
1197 		}
1198 	} else if (vp->v_type == VCHR) {
1199 		error = vm_mmap_cdev(td, objsize, prot, maxprotp, flagsp,
1200 		    vp->v_rdev, foffp, objp);
1201 		if (error == 0)
1202 			goto mark_atime;
1203 		goto done;
1204 	} else {
1205 		error = EINVAL;
1206 		goto done;
1207 	}
1208 	if ((error = VOP_GETATTR(vp, &va, cred)))
1209 		goto done;
1210 #ifdef MAC
1211 	error = mac_vnode_check_mmap(cred, vp, prot, flags);
1212 	if (error != 0)
1213 		goto done;
1214 #endif
1215 	if ((flags & MAP_SHARED) != 0) {
1216 		if ((va.va_flags & (SF_SNAPSHOT|IMMUTABLE|APPEND)) != 0) {
1217 			if (prot & PROT_WRITE) {
1218 				error = EPERM;
1219 				goto done;
1220 			}
1221 			*maxprotp &= ~VM_PROT_WRITE;
1222 		}
1223 	}
1224 	/*
1225 	 * If it is a regular file without any references
1226 	 * we do not need to sync it.
1227 	 * Adjust object size to be the size of actual file.
1228 	 */
1229 	objsize = round_page(va.va_size);
1230 	if (va.va_nlink == 0)
1231 		flags |= MAP_NOSYNC;
1232 	obj = vm_pager_allocate(OBJT_VNODE, vp, objsize, prot, foff, td->td_ucred);
1233 	if (obj == NULL) {
1234 		error = ENOMEM;
1235 		goto done;
1236 	}
1237 	*objp = obj;
1238 	*flagsp = flags;
1239 
1240 mark_atime:
1241 	vfs_mark_atime(vp, cred);
1242 
1243 done:
1244 	vput(vp);
1245 	VFS_UNLOCK_GIANT(vfslocked);
1246 	return (error);
1247 }
1248 
1249 /*
1250  * vm_mmap_cdev()
1251  *
1252  * MPSAFE
1253  *
1254  * Helper function for vm_mmap.  Perform sanity check specific for mmap
1255  * operations on cdevs.
1256  */
1257 int
1258 vm_mmap_cdev(struct thread *td, vm_size_t objsize,
1259     vm_prot_t prot, vm_prot_t *maxprotp, int *flagsp,
1260     struct cdev *cdev, vm_ooffset_t *foff, vm_object_t *objp)
1261 {
1262 	vm_object_t obj;
1263 	struct cdevsw *dsw;
1264 	int error, flags, ref;
1265 
1266 	flags = *flagsp;
1267 
1268 	dsw = dev_refthread(cdev, &ref);
1269 	if (dsw == NULL)
1270 		return (ENXIO);
1271 	if (dsw->d_flags & D_MMAP_ANON) {
1272 		dev_relthread(cdev, ref);
1273 		*maxprotp = VM_PROT_ALL;
1274 		*flagsp |= MAP_ANON;
1275 		return (0);
1276 	}
1277 	/*
1278 	 * cdevs do not provide private mappings of any kind.
1279 	 */
1280 	if ((*maxprotp & VM_PROT_WRITE) == 0 &&
1281 	    (prot & PROT_WRITE) != 0) {
1282 		dev_relthread(cdev, ref);
1283 		return (EACCES);
1284 	}
1285 	if (flags & (MAP_PRIVATE|MAP_COPY)) {
1286 		dev_relthread(cdev, ref);
1287 		return (EINVAL);
1288 	}
1289 	/*
1290 	 * Force device mappings to be shared.
1291 	 */
1292 	flags |= MAP_SHARED;
1293 #ifdef MAC_XXX
1294 	error = mac_cdev_check_mmap(td->td_ucred, cdev, prot);
1295 	if (error != 0) {
1296 		dev_relthread(cdev, ref);
1297 		return (error);
1298 	}
1299 #endif
1300 	/*
1301 	 * First, try d_mmap_single().  If that is not implemented
1302 	 * (returns ENODEV), fall back to using the device pager.
1303 	 * Note that d_mmap_single() must return a reference to the
1304 	 * object (it needs to bump the reference count of the object
1305 	 * it returns somehow).
1306 	 *
1307 	 * XXX assumes VM_PROT_* == PROT_*
1308 	 */
1309 	error = dsw->d_mmap_single(cdev, foff, objsize, objp, (int)prot);
1310 	dev_relthread(cdev, ref);
1311 	if (error != ENODEV)
1312 		return (error);
1313 	obj = vm_pager_allocate(OBJT_DEVICE, cdev, objsize, prot, *foff,
1314 	    td->td_ucred);
1315 	if (obj == NULL)
1316 		return (EINVAL);
1317 	*objp = obj;
1318 	*flagsp = flags;
1319 	return (0);
1320 }
1321 
1322 /*
1323  * vm_mmap_shm()
1324  *
1325  * MPSAFE
1326  *
1327  * Helper function for vm_mmap.  Perform sanity check specific for mmap
1328  * operations on shm file descriptors.
1329  */
1330 int
1331 vm_mmap_shm(struct thread *td, vm_size_t objsize,
1332     vm_prot_t prot, vm_prot_t *maxprotp, int *flagsp,
1333     struct shmfd *shmfd, vm_ooffset_t foff, vm_object_t *objp)
1334 {
1335 	int error;
1336 
1337 	if ((*flagsp & MAP_SHARED) != 0 &&
1338 	    (*maxprotp & VM_PROT_WRITE) == 0 &&
1339 	    (prot & PROT_WRITE) != 0)
1340 		return (EACCES);
1341 #ifdef MAC
1342 	error = mac_posixshm_check_mmap(td->td_ucred, shmfd, prot, *flagsp);
1343 	if (error != 0)
1344 		return (error);
1345 #endif
1346 	error = shm_mmap(shmfd, objsize, foff, objp);
1347 	if (error)
1348 		return (error);
1349 	return (0);
1350 }
1351 
1352 /*
1353  * vm_mmap()
1354  *
1355  * MPSAFE
1356  *
1357  * Internal version of mmap.  Currently used by mmap, exec, and sys5
1358  * shared memory.  Handle is either a vnode pointer or NULL for MAP_ANON.
1359  */
1360 int
1361 vm_mmap(vm_map_t map, vm_offset_t *addr, vm_size_t size, vm_prot_t prot,
1362 	vm_prot_t maxprot, int flags,
1363 	objtype_t handle_type, void *handle,
1364 	vm_ooffset_t foff)
1365 {
1366 	boolean_t fitit;
1367 	vm_object_t object = NULL;
1368 	int rv = KERN_SUCCESS;
1369 	int docow, error;
1370 	struct thread *td = curthread;
1371 
1372 	if (size == 0)
1373 		return (0);
1374 
1375 	size = round_page(size);
1376 
1377 	PROC_LOCK(td->td_proc);
1378 	if (td->td_proc->p_vmspace->vm_map.size + size >
1379 	    lim_cur(td->td_proc, RLIMIT_VMEM)) {
1380 		PROC_UNLOCK(td->td_proc);
1381 		return(ENOMEM);
1382 	}
1383 	PROC_UNLOCK(td->td_proc);
1384 
1385 	/*
1386 	 * We currently can only deal with page aligned file offsets.
1387 	 * The check is here rather than in the syscall because the
1388 	 * kernel calls this function internally for other mmaping
1389 	 * operations (such as in exec) and non-aligned offsets will
1390 	 * cause pmap inconsistencies...so we want to be sure to
1391 	 * disallow this in all cases.
1392 	 */
1393 	if (foff & PAGE_MASK)
1394 		return (EINVAL);
1395 
1396 	if ((flags & MAP_FIXED) == 0) {
1397 		fitit = TRUE;
1398 		*addr = round_page(*addr);
1399 	} else {
1400 		if (*addr != trunc_page(*addr))
1401 			return (EINVAL);
1402 		fitit = FALSE;
1403 	}
1404 	/*
1405 	 * Lookup/allocate object.
1406 	 */
1407 	switch (handle_type) {
1408 	case OBJT_DEVICE:
1409 		error = vm_mmap_cdev(td, size, prot, &maxprot, &flags,
1410 		    handle, &foff, &object);
1411 		break;
1412 	case OBJT_VNODE:
1413 		error = vm_mmap_vnode(td, size, prot, &maxprot, &flags,
1414 		    handle, &foff, &object);
1415 		break;
1416 	case OBJT_SWAP:
1417 		error = vm_mmap_shm(td, size, prot, &maxprot, &flags,
1418 		    handle, foff, &object);
1419 		break;
1420 	case OBJT_DEFAULT:
1421 		if (handle == NULL) {
1422 			error = 0;
1423 			break;
1424 		}
1425 		/* FALLTHROUGH */
1426 	default:
1427 		error = EINVAL;
1428 		break;
1429 	}
1430 	if (error)
1431 		return (error);
1432 	if (flags & MAP_ANON) {
1433 		object = NULL;
1434 		docow = 0;
1435 		/*
1436 		 * Unnamed anonymous regions always start at 0.
1437 		 */
1438 		if (handle == 0)
1439 			foff = 0;
1440 	} else if (flags & MAP_PREFAULT_READ)
1441 		docow = MAP_PREFAULT;
1442 	else
1443 		docow = MAP_PREFAULT_PARTIAL;
1444 
1445 	if ((flags & (MAP_ANON|MAP_SHARED)) == 0)
1446 		docow |= MAP_COPY_ON_WRITE;
1447 	if (flags & MAP_NOSYNC)
1448 		docow |= MAP_DISABLE_SYNCER;
1449 	if (flags & MAP_NOCORE)
1450 		docow |= MAP_DISABLE_COREDUMP;
1451 
1452 	if (flags & MAP_STACK)
1453 		rv = vm_map_stack(map, *addr, size, prot, maxprot,
1454 		    docow | MAP_STACK_GROWS_DOWN);
1455 	else if (fitit)
1456 		rv = vm_map_find(map, object, foff, addr, size,
1457 		    object != NULL && object->type == OBJT_DEVICE ?
1458 		    VMFS_ALIGNED_SPACE : VMFS_ANY_SPACE, prot, maxprot, docow);
1459 	else
1460 		rv = vm_map_fixed(map, object, foff, *addr, size,
1461 				 prot, maxprot, docow);
1462 
1463 	if (rv != KERN_SUCCESS) {
1464 		/*
1465 		 * Lose the object reference. Will destroy the
1466 		 * object if it's an unnamed anonymous mapping
1467 		 * or named anonymous without other references.
1468 		 */
1469 		vm_object_deallocate(object);
1470 	} else if (flags & MAP_SHARED) {
1471 		/*
1472 		 * Shared memory is also shared with children.
1473 		 */
1474 		rv = vm_map_inherit(map, *addr, *addr + size, VM_INHERIT_SHARE);
1475 		if (rv != KERN_SUCCESS)
1476 			(void) vm_map_remove(map, *addr, *addr + size);
1477 	}
1478 
1479 	/*
1480 	 * If the process has requested that all future mappings
1481 	 * be wired, then heed this.
1482 	 */
1483 	if ((rv == KERN_SUCCESS) && (map->flags & MAP_WIREFUTURE))
1484 		vm_map_wire(map, *addr, *addr + size,
1485 		    VM_MAP_WIRE_USER|VM_MAP_WIRE_NOHOLES);
1486 
1487 	switch (rv) {
1488 	case KERN_SUCCESS:
1489 		return (0);
1490 	case KERN_INVALID_ADDRESS:
1491 	case KERN_NO_SPACE:
1492 		return (ENOMEM);
1493 	case KERN_PROTECTION_FAILURE:
1494 		return (EACCES);
1495 	default:
1496 		return (EINVAL);
1497 	}
1498 }
1499