xref: /freebsd/sys/vm/vm_mmap.c (revision 642870485c089b57000fe538d3485e272b038d59)
1 /*-
2  * Copyright (c) 1988 University of Utah.
3  * Copyright (c) 1991, 1993
4  *	The Regents of the University of California.  All rights reserved.
5  *
6  * This code is derived from software contributed to Berkeley by
7  * the Systems Programming Group of the University of Utah Computer
8  * Science Department.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  * from: Utah $Hdr: vm_mmap.c 1.6 91/10/21$
35  *
36  *	@(#)vm_mmap.c	8.4 (Berkeley) 1/12/94
37  */
38 
39 /*
40  * Mapped file (mmap) interface to VM
41  */
42 
43 #include <sys/cdefs.h>
44 __FBSDID("$FreeBSD$");
45 
46 #include "opt_compat.h"
47 #include "opt_hwpmc_hooks.h"
48 #include "opt_vm.h"
49 
50 #include <sys/param.h>
51 #include <sys/systm.h>
52 #include <sys/capsicum.h>
53 #include <sys/kernel.h>
54 #include <sys/lock.h>
55 #include <sys/mutex.h>
56 #include <sys/sysproto.h>
57 #include <sys/filedesc.h>
58 #include <sys/priv.h>
59 #include <sys/proc.h>
60 #include <sys/procctl.h>
61 #include <sys/racct.h>
62 #include <sys/resource.h>
63 #include <sys/resourcevar.h>
64 #include <sys/rwlock.h>
65 #include <sys/sysctl.h>
66 #include <sys/vnode.h>
67 #include <sys/fcntl.h>
68 #include <sys/file.h>
69 #include <sys/mman.h>
70 #include <sys/mount.h>
71 #include <sys/conf.h>
72 #include <sys/stat.h>
73 #include <sys/syscallsubr.h>
74 #include <sys/sysent.h>
75 #include <sys/vmmeter.h>
76 
77 #include <security/audit/audit.h>
78 #include <security/mac/mac_framework.h>
79 
80 #include <vm/vm.h>
81 #include <vm/vm_param.h>
82 #include <vm/pmap.h>
83 #include <vm/vm_map.h>
84 #include <vm/vm_object.h>
85 #include <vm/vm_page.h>
86 #include <vm/vm_pager.h>
87 #include <vm/vm_pageout.h>
88 #include <vm/vm_extern.h>
89 #include <vm/vm_page.h>
90 #include <vm/vnode_pager.h>
91 
92 #ifdef HWPMC_HOOKS
93 #include <sys/pmckern.h>
94 #endif
95 
96 int old_mlock = 0;
97 SYSCTL_INT(_vm, OID_AUTO, old_mlock, CTLFLAG_RWTUN, &old_mlock, 0,
98     "Do not apply RLIMIT_MEMLOCK on mlockall");
99 
100 #ifdef MAP_32BIT
101 #define	MAP_32BIT_MAX_ADDR	((vm_offset_t)1 << 31)
102 #endif
103 
104 #ifndef _SYS_SYSPROTO_H_
105 struct sbrk_args {
106 	int incr;
107 };
108 #endif
109 
110 int
111 sys_sbrk(struct thread *td, struct sbrk_args *uap)
112 {
113 	/* Not yet implemented */
114 	return (EOPNOTSUPP);
115 }
116 
117 #ifndef _SYS_SYSPROTO_H_
118 struct sstk_args {
119 	int incr;
120 };
121 #endif
122 
123 int
124 sys_sstk(struct thread *td, struct sstk_args *uap)
125 {
126 	/* Not yet implemented */
127 	return (EOPNOTSUPP);
128 }
129 
130 #if defined(COMPAT_43)
131 #ifndef _SYS_SYSPROTO_H_
132 struct getpagesize_args {
133 	int dummy;
134 };
135 #endif
136 
137 int
138 ogetpagesize(struct thread *td, struct getpagesize_args *uap)
139 {
140 
141 	td->td_retval[0] = PAGE_SIZE;
142 	return (0);
143 }
144 #endif				/* COMPAT_43 */
145 
146 
147 /*
148  * Memory Map (mmap) system call.  Note that the file offset
149  * and address are allowed to be NOT page aligned, though if
150  * the MAP_FIXED flag it set, both must have the same remainder
151  * modulo the PAGE_SIZE (POSIX 1003.1b).  If the address is not
152  * page-aligned, the actual mapping starts at trunc_page(addr)
153  * and the return value is adjusted up by the page offset.
154  *
155  * Generally speaking, only character devices which are themselves
156  * memory-based, such as a video framebuffer, can be mmap'd.  Otherwise
157  * there would be no cache coherency between a descriptor and a VM mapping
158  * both to the same character device.
159  */
160 #ifndef _SYS_SYSPROTO_H_
161 struct mmap_args {
162 	void *addr;
163 	size_t len;
164 	int prot;
165 	int flags;
166 	int fd;
167 	long pad;
168 	off_t pos;
169 };
170 #endif
171 
172 int
173 sys_mmap(struct thread *td, struct mmap_args *uap)
174 {
175 
176 	return (kern_mmap(td, (uintptr_t)uap->addr, uap->len, uap->prot,
177 	    uap->flags, uap->fd, uap->pos));
178 }
179 
180 int
181 kern_mmap(struct thread *td, uintptr_t addr0, size_t size, int prot, int flags,
182     int fd, off_t pos)
183 {
184 	struct vmspace *vms;
185 	struct file *fp;
186 	vm_offset_t addr;
187 	vm_size_t pageoff;
188 	vm_prot_t cap_maxprot;
189 	int align, error;
190 	cap_rights_t rights;
191 
192 	vms = td->td_proc->p_vmspace;
193 	fp = NULL;
194 	AUDIT_ARG_FD(fd);
195 	addr = addr0;
196 
197 	/*
198 	 * Ignore old flags that used to be defined but did not do anything.
199 	 */
200 	flags &= ~(MAP_RESERVED0020 | MAP_RESERVED0040);
201 
202 	/*
203 	 * Enforce the constraints.
204 	 * Mapping of length 0 is only allowed for old binaries.
205 	 * Anonymous mapping shall specify -1 as filedescriptor and
206 	 * zero position for new code. Be nice to ancient a.out
207 	 * binaries and correct pos for anonymous mapping, since old
208 	 * ld.so sometimes issues anonymous map requests with non-zero
209 	 * pos.
210 	 */
211 	if (!SV_CURPROC_FLAG(SV_AOUT)) {
212 		if ((size == 0 && curproc->p_osrel >= P_OSREL_MAP_ANON) ||
213 		    ((flags & MAP_ANON) != 0 && (fd != -1 || pos != 0)))
214 			return (EINVAL);
215 	} else {
216 		if ((flags & MAP_ANON) != 0)
217 			pos = 0;
218 	}
219 
220 	if (flags & MAP_STACK) {
221 		if ((fd != -1) ||
222 		    ((prot & (PROT_READ | PROT_WRITE)) != (PROT_READ | PROT_WRITE)))
223 			return (EINVAL);
224 		flags |= MAP_ANON;
225 		pos = 0;
226 	}
227 	if ((flags & ~(MAP_SHARED | MAP_PRIVATE | MAP_FIXED | MAP_HASSEMAPHORE |
228 	    MAP_STACK | MAP_NOSYNC | MAP_ANON | MAP_EXCL | MAP_NOCORE |
229 	    MAP_PREFAULT_READ |
230 #ifdef MAP_32BIT
231 	    MAP_32BIT |
232 #endif
233 	    MAP_ALIGNMENT_MASK)) != 0)
234 		return (EINVAL);
235 	if ((flags & (MAP_EXCL | MAP_FIXED)) == MAP_EXCL)
236 		return (EINVAL);
237 	if ((flags & (MAP_SHARED | MAP_PRIVATE)) == (MAP_SHARED | MAP_PRIVATE))
238 		return (EINVAL);
239 	if (prot != PROT_NONE &&
240 	    (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC)) != 0)
241 		return (EINVAL);
242 
243 	/*
244 	 * Align the file position to a page boundary,
245 	 * and save its page offset component.
246 	 */
247 	pageoff = (pos & PAGE_MASK);
248 	pos -= pageoff;
249 
250 	/* Adjust size for rounding (on both ends). */
251 	size += pageoff;			/* low end... */
252 	size = (vm_size_t) round_page(size);	/* hi end */
253 
254 	/* Ensure alignment is at least a page and fits in a pointer. */
255 	align = flags & MAP_ALIGNMENT_MASK;
256 	if (align != 0 && align != MAP_ALIGNED_SUPER &&
257 	    (align >> MAP_ALIGNMENT_SHIFT >= sizeof(void *) * NBBY ||
258 	    align >> MAP_ALIGNMENT_SHIFT < PAGE_SHIFT))
259 		return (EINVAL);
260 
261 	/*
262 	 * Check for illegal addresses.  Watch out for address wrap... Note
263 	 * that VM_*_ADDRESS are not constants due to casts (argh).
264 	 */
265 	if (flags & MAP_FIXED) {
266 		/*
267 		 * The specified address must have the same remainder
268 		 * as the file offset taken modulo PAGE_SIZE, so it
269 		 * should be aligned after adjustment by pageoff.
270 		 */
271 		addr -= pageoff;
272 		if (addr & PAGE_MASK)
273 			return (EINVAL);
274 
275 		/* Address range must be all in user VM space. */
276 		if (addr < vm_map_min(&vms->vm_map) ||
277 		    addr + size > vm_map_max(&vms->vm_map))
278 			return (EINVAL);
279 		if (addr + size < addr)
280 			return (EINVAL);
281 #ifdef MAP_32BIT
282 		if (flags & MAP_32BIT && addr + size > MAP_32BIT_MAX_ADDR)
283 			return (EINVAL);
284 	} else if (flags & MAP_32BIT) {
285 		/*
286 		 * For MAP_32BIT, override the hint if it is too high and
287 		 * do not bother moving the mapping past the heap (since
288 		 * the heap is usually above 2GB).
289 		 */
290 		if (addr + size > MAP_32BIT_MAX_ADDR)
291 			addr = 0;
292 #endif
293 	} else {
294 		/*
295 		 * XXX for non-fixed mappings where no hint is provided or
296 		 * the hint would fall in the potential heap space,
297 		 * place it after the end of the largest possible heap.
298 		 *
299 		 * There should really be a pmap call to determine a reasonable
300 		 * location.
301 		 */
302 		if (addr == 0 ||
303 		    (addr >= round_page((vm_offset_t)vms->vm_taddr) &&
304 		    addr < round_page((vm_offset_t)vms->vm_daddr +
305 		    lim_max(td, RLIMIT_DATA))))
306 			addr = round_page((vm_offset_t)vms->vm_daddr +
307 			    lim_max(td, RLIMIT_DATA));
308 	}
309 	if (size == 0) {
310 		/*
311 		 * Return success without mapping anything for old
312 		 * binaries that request a page-aligned mapping of
313 		 * length 0.  For modern binaries, this function
314 		 * returns an error earlier.
315 		 */
316 		error = 0;
317 	} else if (flags & MAP_ANON) {
318 		/*
319 		 * Mapping blank space is trivial.
320 		 *
321 		 * This relies on VM_PROT_* matching PROT_*.
322 		 */
323 		error = vm_mmap_object(&vms->vm_map, &addr, size, prot,
324 		    VM_PROT_ALL, flags, NULL, pos, FALSE, td);
325 	} else {
326 		/*
327 		 * Mapping file, get fp for validation and don't let the
328 		 * descriptor disappear on us if we block. Check capability
329 		 * rights, but also return the maximum rights to be combined
330 		 * with maxprot later.
331 		 */
332 		cap_rights_init(&rights, CAP_MMAP);
333 		if (prot & PROT_READ)
334 			cap_rights_set(&rights, CAP_MMAP_R);
335 		if ((flags & MAP_SHARED) != 0) {
336 			if (prot & PROT_WRITE)
337 				cap_rights_set(&rights, CAP_MMAP_W);
338 		}
339 		if (prot & PROT_EXEC)
340 			cap_rights_set(&rights, CAP_MMAP_X);
341 		error = fget_mmap(td, fd, &rights, &cap_maxprot, &fp);
342 		if (error != 0)
343 			goto done;
344 		if ((flags & (MAP_SHARED | MAP_PRIVATE)) == 0 &&
345 		    td->td_proc->p_osrel >= P_OSREL_MAP_FSTRICT) {
346 			error = EINVAL;
347 			goto done;
348 		}
349 
350 		/* This relies on VM_PROT_* matching PROT_*. */
351 		error = fo_mmap(fp, &vms->vm_map, &addr, size, prot,
352 		    cap_maxprot, flags, pos, td);
353 	}
354 
355 	if (error == 0)
356 		td->td_retval[0] = (register_t) (addr + pageoff);
357 done:
358 	if (fp)
359 		fdrop(fp, td);
360 
361 	return (error);
362 }
363 
364 #if defined(COMPAT_FREEBSD6)
365 int
366 freebsd6_mmap(struct thread *td, struct freebsd6_mmap_args *uap)
367 {
368 
369 	return (kern_mmap(td, (uintptr_t)uap->addr, uap->len, uap->prot,
370 	    uap->flags, uap->fd, uap->pos));
371 }
372 #endif
373 
374 #ifdef COMPAT_43
375 #ifndef _SYS_SYSPROTO_H_
376 struct ommap_args {
377 	caddr_t addr;
378 	int len;
379 	int prot;
380 	int flags;
381 	int fd;
382 	long pos;
383 };
384 #endif
385 int
386 ommap(struct thread *td, struct ommap_args *uap)
387 {
388 	static const char cvtbsdprot[8] = {
389 		0,
390 		PROT_EXEC,
391 		PROT_WRITE,
392 		PROT_EXEC | PROT_WRITE,
393 		PROT_READ,
394 		PROT_EXEC | PROT_READ,
395 		PROT_WRITE | PROT_READ,
396 		PROT_EXEC | PROT_WRITE | PROT_READ,
397 	};
398 	int flags, prot;
399 
400 #define	OMAP_ANON	0x0002
401 #define	OMAP_COPY	0x0020
402 #define	OMAP_SHARED	0x0010
403 #define	OMAP_FIXED	0x0100
404 
405 	prot = cvtbsdprot[uap->prot & 0x7];
406 #ifdef COMPAT_FREEBSD32
407 #if defined(__amd64__)
408 	if (i386_read_exec && SV_PROC_FLAG(td->td_proc, SV_ILP32) &&
409 	    prot != 0)
410 		prot |= PROT_EXEC;
411 #endif
412 #endif
413 	flags = 0;
414 	if (uap->flags & OMAP_ANON)
415 		flags |= MAP_ANON;
416 	if (uap->flags & OMAP_COPY)
417 		flags |= MAP_COPY;
418 	if (uap->flags & OMAP_SHARED)
419 		flags |= MAP_SHARED;
420 	else
421 		flags |= MAP_PRIVATE;
422 	if (uap->flags & OMAP_FIXED)
423 		flags |= MAP_FIXED;
424 	return (kern_mmap(td, (uintptr_t)uap->addr, uap->len, prot, flags,
425 	    uap->fd, uap->pos));
426 }
427 #endif				/* COMPAT_43 */
428 
429 
430 #ifndef _SYS_SYSPROTO_H_
431 struct msync_args {
432 	void *addr;
433 	size_t len;
434 	int flags;
435 };
436 #endif
437 int
438 sys_msync(struct thread *td, struct msync_args *uap)
439 {
440 
441 	return (kern_msync(td, (uintptr_t)uap->addr, uap->len, uap->flags));
442 }
443 
444 int
445 kern_msync(struct thread *td, uintptr_t addr0, size_t size, int flags)
446 {
447 	vm_offset_t addr;
448 	vm_size_t pageoff;
449 	vm_map_t map;
450 	int rv;
451 
452 	addr = addr0;
453 	pageoff = (addr & PAGE_MASK);
454 	addr -= pageoff;
455 	size += pageoff;
456 	size = (vm_size_t) round_page(size);
457 	if (addr + size < addr)
458 		return (EINVAL);
459 
460 	if ((flags & (MS_ASYNC|MS_INVALIDATE)) == (MS_ASYNC|MS_INVALIDATE))
461 		return (EINVAL);
462 
463 	map = &td->td_proc->p_vmspace->vm_map;
464 
465 	/*
466 	 * Clean the pages and interpret the return value.
467 	 */
468 	rv = vm_map_sync(map, addr, addr + size, (flags & MS_ASYNC) == 0,
469 	    (flags & MS_INVALIDATE) != 0);
470 	switch (rv) {
471 	case KERN_SUCCESS:
472 		return (0);
473 	case KERN_INVALID_ADDRESS:
474 		return (ENOMEM);
475 	case KERN_INVALID_ARGUMENT:
476 		return (EBUSY);
477 	case KERN_FAILURE:
478 		return (EIO);
479 	default:
480 		return (EINVAL);
481 	}
482 }
483 
484 #ifndef _SYS_SYSPROTO_H_
485 struct munmap_args {
486 	void *addr;
487 	size_t len;
488 };
489 #endif
490 int
491 sys_munmap(struct thread *td, struct munmap_args *uap)
492 {
493 
494 	return (kern_munmap(td, (uintptr_t)uap->addr, uap->len));
495 }
496 
497 int
498 kern_munmap(struct thread *td, uintptr_t addr0, size_t size)
499 {
500 #ifdef HWPMC_HOOKS
501 	struct pmckern_map_out pkm;
502 	vm_map_entry_t entry;
503 	bool pmc_handled;
504 #endif
505 	vm_offset_t addr;
506 	vm_size_t pageoff;
507 	vm_map_t map;
508 
509 	if (size == 0)
510 		return (EINVAL);
511 
512 	addr = addr0;
513 	pageoff = (addr & PAGE_MASK);
514 	addr -= pageoff;
515 	size += pageoff;
516 	size = (vm_size_t) round_page(size);
517 	if (addr + size < addr)
518 		return (EINVAL);
519 
520 	/*
521 	 * Check for illegal addresses.  Watch out for address wrap...
522 	 */
523 	map = &td->td_proc->p_vmspace->vm_map;
524 	if (addr < vm_map_min(map) || addr + size > vm_map_max(map))
525 		return (EINVAL);
526 	vm_map_lock(map);
527 #ifdef HWPMC_HOOKS
528 	pmc_handled = false;
529 	if (PMC_HOOK_INSTALLED(PMC_FN_MUNMAP)) {
530 		pmc_handled = true;
531 		/*
532 		 * Inform hwpmc if the address range being unmapped contains
533 		 * an executable region.
534 		 */
535 		pkm.pm_address = (uintptr_t) NULL;
536 		if (vm_map_lookup_entry(map, addr, &entry)) {
537 			for (;
538 			    entry != &map->header && entry->start < addr + size;
539 			    entry = entry->next) {
540 				if (vm_map_check_protection(map, entry->start,
541 					entry->end, VM_PROT_EXECUTE) == TRUE) {
542 					pkm.pm_address = (uintptr_t) addr;
543 					pkm.pm_size = (size_t) size;
544 					break;
545 				}
546 			}
547 		}
548 	}
549 #endif
550 	vm_map_delete(map, addr, addr + size);
551 
552 #ifdef HWPMC_HOOKS
553 	if (__predict_false(pmc_handled)) {
554 		/* downgrade the lock to prevent a LOR with the pmc-sx lock */
555 		vm_map_lock_downgrade(map);
556 		if (pkm.pm_address != (uintptr_t) NULL)
557 			PMC_CALL_HOOK(td, PMC_FN_MUNMAP, (void *) &pkm);
558 		vm_map_unlock_read(map);
559 	} else
560 #endif
561 		vm_map_unlock(map);
562 
563 	/* vm_map_delete returns nothing but KERN_SUCCESS anyway */
564 	return (0);
565 }
566 
567 #ifndef _SYS_SYSPROTO_H_
568 struct mprotect_args {
569 	const void *addr;
570 	size_t len;
571 	int prot;
572 };
573 #endif
574 int
575 sys_mprotect(struct thread *td, struct mprotect_args *uap)
576 {
577 
578 	return (kern_mprotect(td, (uintptr_t)uap->addr, uap->len, uap->prot));
579 }
580 
581 int
582 kern_mprotect(struct thread *td, uintptr_t addr0, size_t size, int prot)
583 {
584 	vm_offset_t addr;
585 	vm_size_t pageoff;
586 
587 	addr = addr0;
588 	prot = (prot & VM_PROT_ALL);
589 	pageoff = (addr & PAGE_MASK);
590 	addr -= pageoff;
591 	size += pageoff;
592 	size = (vm_size_t) round_page(size);
593 	if (addr + size < addr)
594 		return (EINVAL);
595 
596 	switch (vm_map_protect(&td->td_proc->p_vmspace->vm_map, addr,
597 	    addr + size, prot, FALSE)) {
598 	case KERN_SUCCESS:
599 		return (0);
600 	case KERN_PROTECTION_FAILURE:
601 		return (EACCES);
602 	case KERN_RESOURCE_SHORTAGE:
603 		return (ENOMEM);
604 	}
605 	return (EINVAL);
606 }
607 
608 #ifndef _SYS_SYSPROTO_H_
609 struct minherit_args {
610 	void *addr;
611 	size_t len;
612 	int inherit;
613 };
614 #endif
615 int
616 sys_minherit(struct thread *td, struct minherit_args *uap)
617 {
618 	vm_offset_t addr;
619 	vm_size_t size, pageoff;
620 	vm_inherit_t inherit;
621 
622 	addr = (vm_offset_t)uap->addr;
623 	size = uap->len;
624 	inherit = uap->inherit;
625 
626 	pageoff = (addr & PAGE_MASK);
627 	addr -= pageoff;
628 	size += pageoff;
629 	size = (vm_size_t) round_page(size);
630 	if (addr + size < addr)
631 		return (EINVAL);
632 
633 	switch (vm_map_inherit(&td->td_proc->p_vmspace->vm_map, addr,
634 	    addr + size, inherit)) {
635 	case KERN_SUCCESS:
636 		return (0);
637 	case KERN_PROTECTION_FAILURE:
638 		return (EACCES);
639 	}
640 	return (EINVAL);
641 }
642 
643 #ifndef _SYS_SYSPROTO_H_
644 struct madvise_args {
645 	void *addr;
646 	size_t len;
647 	int behav;
648 };
649 #endif
650 
651 int
652 sys_madvise(struct thread *td, struct madvise_args *uap)
653 {
654 
655 	return (kern_madvise(td, (uintptr_t)uap->addr, uap->len, uap->behav));
656 }
657 
658 int
659 kern_madvise(struct thread *td, uintptr_t addr0, size_t len, int behav)
660 {
661 	vm_map_t map;
662 	vm_offset_t addr, end, start;
663 	int flags;
664 
665 	/*
666 	 * Check for our special case, advising the swap pager we are
667 	 * "immortal."
668 	 */
669 	if (behav == MADV_PROTECT) {
670 		flags = PPROT_SET;
671 		return (kern_procctl(td, P_PID, td->td_proc->p_pid,
672 		    PROC_SPROTECT, &flags));
673 	}
674 
675 	/*
676 	 * Check for illegal behavior
677 	 */
678 	if (behav < 0 || behav > MADV_CORE)
679 		return (EINVAL);
680 	/*
681 	 * Check for illegal addresses.  Watch out for address wrap... Note
682 	 * that VM_*_ADDRESS are not constants due to casts (argh).
683 	 */
684 	map = &td->td_proc->p_vmspace->vm_map;
685 	addr = addr0;
686 	if (addr < vm_map_min(map) || addr + len > vm_map_max(map))
687 		return (EINVAL);
688 	if ((addr + len) < addr)
689 		return (EINVAL);
690 
691 	/*
692 	 * Since this routine is only advisory, we default to conservative
693 	 * behavior.
694 	 */
695 	start = trunc_page(addr);
696 	end = round_page(addr + len);
697 
698 	if (vm_map_madvise(map, start, end, behav))
699 		return (EINVAL);
700 	return (0);
701 }
702 
703 #ifndef _SYS_SYSPROTO_H_
704 struct mincore_args {
705 	const void *addr;
706 	size_t len;
707 	char *vec;
708 };
709 #endif
710 
711 int
712 sys_mincore(struct thread *td, struct mincore_args *uap)
713 {
714 	vm_offset_t addr, first_addr;
715 	vm_offset_t end, cend;
716 	pmap_t pmap;
717 	vm_map_t map;
718 	char *vec;
719 	int error = 0;
720 	int vecindex, lastvecindex;
721 	vm_map_entry_t current;
722 	vm_map_entry_t entry;
723 	vm_object_t object;
724 	vm_paddr_t locked_pa;
725 	vm_page_t m;
726 	vm_pindex_t pindex;
727 	int mincoreinfo;
728 	unsigned int timestamp;
729 	boolean_t locked;
730 
731 	/*
732 	 * Make sure that the addresses presented are valid for user
733 	 * mode.
734 	 */
735 	first_addr = addr = trunc_page((vm_offset_t) uap->addr);
736 	end = addr + (vm_size_t)round_page(uap->len);
737 	map = &td->td_proc->p_vmspace->vm_map;
738 	if (end > vm_map_max(map) || end < addr)
739 		return (ENOMEM);
740 
741 	/*
742 	 * Address of byte vector
743 	 */
744 	vec = uap->vec;
745 
746 	pmap = vmspace_pmap(td->td_proc->p_vmspace);
747 
748 	vm_map_lock_read(map);
749 RestartScan:
750 	timestamp = map->timestamp;
751 
752 	if (!vm_map_lookup_entry(map, addr, &entry)) {
753 		vm_map_unlock_read(map);
754 		return (ENOMEM);
755 	}
756 
757 	/*
758 	 * Do this on a map entry basis so that if the pages are not
759 	 * in the current processes address space, we can easily look
760 	 * up the pages elsewhere.
761 	 */
762 	lastvecindex = -1;
763 	for (current = entry;
764 	    (current != &map->header) && (current->start < end);
765 	    current = current->next) {
766 
767 		/*
768 		 * check for contiguity
769 		 */
770 		if (current->end < end &&
771 		    (entry->next == &map->header ||
772 		     current->next->start > current->end)) {
773 			vm_map_unlock_read(map);
774 			return (ENOMEM);
775 		}
776 
777 		/*
778 		 * ignore submaps (for now) or null objects
779 		 */
780 		if ((current->eflags & MAP_ENTRY_IS_SUB_MAP) ||
781 			current->object.vm_object == NULL)
782 			continue;
783 
784 		/*
785 		 * limit this scan to the current map entry and the
786 		 * limits for the mincore call
787 		 */
788 		if (addr < current->start)
789 			addr = current->start;
790 		cend = current->end;
791 		if (cend > end)
792 			cend = end;
793 
794 		/*
795 		 * scan this entry one page at a time
796 		 */
797 		while (addr < cend) {
798 			/*
799 			 * Check pmap first, it is likely faster, also
800 			 * it can provide info as to whether we are the
801 			 * one referencing or modifying the page.
802 			 */
803 			object = NULL;
804 			locked_pa = 0;
805 		retry:
806 			m = NULL;
807 			mincoreinfo = pmap_mincore(pmap, addr, &locked_pa);
808 			if (locked_pa != 0) {
809 				/*
810 				 * The page is mapped by this process but not
811 				 * both accessed and modified.  It is also
812 				 * managed.  Acquire the object lock so that
813 				 * other mappings might be examined.
814 				 */
815 				m = PHYS_TO_VM_PAGE(locked_pa);
816 				if (m->object != object) {
817 					if (object != NULL)
818 						VM_OBJECT_WUNLOCK(object);
819 					object = m->object;
820 					locked = VM_OBJECT_TRYWLOCK(object);
821 					vm_page_unlock(m);
822 					if (!locked) {
823 						VM_OBJECT_WLOCK(object);
824 						vm_page_lock(m);
825 						goto retry;
826 					}
827 				} else
828 					vm_page_unlock(m);
829 				KASSERT(m->valid == VM_PAGE_BITS_ALL,
830 				    ("mincore: page %p is mapped but invalid",
831 				    m));
832 			} else if (mincoreinfo == 0) {
833 				/*
834 				 * The page is not mapped by this process.  If
835 				 * the object implements managed pages, then
836 				 * determine if the page is resident so that
837 				 * the mappings might be examined.
838 				 */
839 				if (current->object.vm_object != object) {
840 					if (object != NULL)
841 						VM_OBJECT_WUNLOCK(object);
842 					object = current->object.vm_object;
843 					VM_OBJECT_WLOCK(object);
844 				}
845 				if (object->type == OBJT_DEFAULT ||
846 				    object->type == OBJT_SWAP ||
847 				    object->type == OBJT_VNODE) {
848 					pindex = OFF_TO_IDX(current->offset +
849 					    (addr - current->start));
850 					m = vm_page_lookup(object, pindex);
851 					if (m != NULL && m->valid == 0)
852 						m = NULL;
853 					if (m != NULL)
854 						mincoreinfo = MINCORE_INCORE;
855 				}
856 			}
857 			if (m != NULL) {
858 				/* Examine other mappings to the page. */
859 				if (m->dirty == 0 && pmap_is_modified(m))
860 					vm_page_dirty(m);
861 				if (m->dirty != 0)
862 					mincoreinfo |= MINCORE_MODIFIED_OTHER;
863 				/*
864 				 * The first test for PGA_REFERENCED is an
865 				 * optimization.  The second test is
866 				 * required because a concurrent pmap
867 				 * operation could clear the last reference
868 				 * and set PGA_REFERENCED before the call to
869 				 * pmap_is_referenced().
870 				 */
871 				if ((m->aflags & PGA_REFERENCED) != 0 ||
872 				    pmap_is_referenced(m) ||
873 				    (m->aflags & PGA_REFERENCED) != 0)
874 					mincoreinfo |= MINCORE_REFERENCED_OTHER;
875 			}
876 			if (object != NULL)
877 				VM_OBJECT_WUNLOCK(object);
878 
879 			/*
880 			 * subyte may page fault.  In case it needs to modify
881 			 * the map, we release the lock.
882 			 */
883 			vm_map_unlock_read(map);
884 
885 			/*
886 			 * calculate index into user supplied byte vector
887 			 */
888 			vecindex = OFF_TO_IDX(addr - first_addr);
889 
890 			/*
891 			 * If we have skipped map entries, we need to make sure that
892 			 * the byte vector is zeroed for those skipped entries.
893 			 */
894 			while ((lastvecindex + 1) < vecindex) {
895 				++lastvecindex;
896 				error = subyte(vec + lastvecindex, 0);
897 				if (error) {
898 					error = EFAULT;
899 					goto done2;
900 				}
901 			}
902 
903 			/*
904 			 * Pass the page information to the user
905 			 */
906 			error = subyte(vec + vecindex, mincoreinfo);
907 			if (error) {
908 				error = EFAULT;
909 				goto done2;
910 			}
911 
912 			/*
913 			 * If the map has changed, due to the subyte, the previous
914 			 * output may be invalid.
915 			 */
916 			vm_map_lock_read(map);
917 			if (timestamp != map->timestamp)
918 				goto RestartScan;
919 
920 			lastvecindex = vecindex;
921 			addr += PAGE_SIZE;
922 		}
923 	}
924 
925 	/*
926 	 * subyte may page fault.  In case it needs to modify
927 	 * the map, we release the lock.
928 	 */
929 	vm_map_unlock_read(map);
930 
931 	/*
932 	 * Zero the last entries in the byte vector.
933 	 */
934 	vecindex = OFF_TO_IDX(end - first_addr);
935 	while ((lastvecindex + 1) < vecindex) {
936 		++lastvecindex;
937 		error = subyte(vec + lastvecindex, 0);
938 		if (error) {
939 			error = EFAULT;
940 			goto done2;
941 		}
942 	}
943 
944 	/*
945 	 * If the map has changed, due to the subyte, the previous
946 	 * output may be invalid.
947 	 */
948 	vm_map_lock_read(map);
949 	if (timestamp != map->timestamp)
950 		goto RestartScan;
951 	vm_map_unlock_read(map);
952 done2:
953 	return (error);
954 }
955 
956 #ifndef _SYS_SYSPROTO_H_
957 struct mlock_args {
958 	const void *addr;
959 	size_t len;
960 };
961 #endif
962 int
963 sys_mlock(struct thread *td, struct mlock_args *uap)
964 {
965 
966 	return (kern_mlock(td->td_proc, td->td_ucred,
967 	    __DECONST(uintptr_t, uap->addr), uap->len));
968 }
969 
970 int
971 kern_mlock(struct proc *proc, struct ucred *cred, uintptr_t addr0, size_t len)
972 {
973 	vm_offset_t addr, end, last, start;
974 	vm_size_t npages, size;
975 	vm_map_t map;
976 	unsigned long nsize;
977 	int error;
978 
979 	error = priv_check_cred(cred, PRIV_VM_MLOCK, 0);
980 	if (error)
981 		return (error);
982 	addr = addr0;
983 	size = len;
984 	last = addr + size;
985 	start = trunc_page(addr);
986 	end = round_page(last);
987 	if (last < addr || end < addr)
988 		return (EINVAL);
989 	npages = atop(end - start);
990 	if (npages > vm_page_max_wired)
991 		return (ENOMEM);
992 	map = &proc->p_vmspace->vm_map;
993 	PROC_LOCK(proc);
994 	nsize = ptoa(npages + pmap_wired_count(map->pmap));
995 	if (nsize > lim_cur_proc(proc, RLIMIT_MEMLOCK)) {
996 		PROC_UNLOCK(proc);
997 		return (ENOMEM);
998 	}
999 	PROC_UNLOCK(proc);
1000 	if (npages + vm_cnt.v_wire_count > vm_page_max_wired)
1001 		return (EAGAIN);
1002 #ifdef RACCT
1003 	if (racct_enable) {
1004 		PROC_LOCK(proc);
1005 		error = racct_set(proc, RACCT_MEMLOCK, nsize);
1006 		PROC_UNLOCK(proc);
1007 		if (error != 0)
1008 			return (ENOMEM);
1009 	}
1010 #endif
1011 	error = vm_map_wire(map, start, end,
1012 	    VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES);
1013 #ifdef RACCT
1014 	if (racct_enable && error != KERN_SUCCESS) {
1015 		PROC_LOCK(proc);
1016 		racct_set(proc, RACCT_MEMLOCK,
1017 		    ptoa(pmap_wired_count(map->pmap)));
1018 		PROC_UNLOCK(proc);
1019 	}
1020 #endif
1021 	return (error == KERN_SUCCESS ? 0 : ENOMEM);
1022 }
1023 
1024 #ifndef _SYS_SYSPROTO_H_
1025 struct mlockall_args {
1026 	int	how;
1027 };
1028 #endif
1029 
1030 int
1031 sys_mlockall(struct thread *td, struct mlockall_args *uap)
1032 {
1033 	vm_map_t map;
1034 	int error;
1035 
1036 	map = &td->td_proc->p_vmspace->vm_map;
1037 	error = priv_check(td, PRIV_VM_MLOCK);
1038 	if (error)
1039 		return (error);
1040 
1041 	if ((uap->how == 0) || ((uap->how & ~(MCL_CURRENT|MCL_FUTURE)) != 0))
1042 		return (EINVAL);
1043 
1044 	/*
1045 	 * If wiring all pages in the process would cause it to exceed
1046 	 * a hard resource limit, return ENOMEM.
1047 	 */
1048 	if (!old_mlock && uap->how & MCL_CURRENT) {
1049 		PROC_LOCK(td->td_proc);
1050 		if (map->size > lim_cur(td, RLIMIT_MEMLOCK)) {
1051 			PROC_UNLOCK(td->td_proc);
1052 			return (ENOMEM);
1053 		}
1054 		PROC_UNLOCK(td->td_proc);
1055 	}
1056 #ifdef RACCT
1057 	if (racct_enable) {
1058 		PROC_LOCK(td->td_proc);
1059 		error = racct_set(td->td_proc, RACCT_MEMLOCK, map->size);
1060 		PROC_UNLOCK(td->td_proc);
1061 		if (error != 0)
1062 			return (ENOMEM);
1063 	}
1064 #endif
1065 
1066 	if (uap->how & MCL_FUTURE) {
1067 		vm_map_lock(map);
1068 		vm_map_modflags(map, MAP_WIREFUTURE, 0);
1069 		vm_map_unlock(map);
1070 		error = 0;
1071 	}
1072 
1073 	if (uap->how & MCL_CURRENT) {
1074 		/*
1075 		 * P1003.1-2001 mandates that all currently mapped pages
1076 		 * will be memory resident and locked (wired) upon return
1077 		 * from mlockall(). vm_map_wire() will wire pages, by
1078 		 * calling vm_fault_wire() for each page in the region.
1079 		 */
1080 		error = vm_map_wire(map, vm_map_min(map), vm_map_max(map),
1081 		    VM_MAP_WIRE_USER|VM_MAP_WIRE_HOLESOK);
1082 		error = (error == KERN_SUCCESS ? 0 : EAGAIN);
1083 	}
1084 #ifdef RACCT
1085 	if (racct_enable && error != KERN_SUCCESS) {
1086 		PROC_LOCK(td->td_proc);
1087 		racct_set(td->td_proc, RACCT_MEMLOCK,
1088 		    ptoa(pmap_wired_count(map->pmap)));
1089 		PROC_UNLOCK(td->td_proc);
1090 	}
1091 #endif
1092 
1093 	return (error);
1094 }
1095 
1096 #ifndef _SYS_SYSPROTO_H_
1097 struct munlockall_args {
1098 	register_t dummy;
1099 };
1100 #endif
1101 
1102 int
1103 sys_munlockall(struct thread *td, struct munlockall_args *uap)
1104 {
1105 	vm_map_t map;
1106 	int error;
1107 
1108 	map = &td->td_proc->p_vmspace->vm_map;
1109 	error = priv_check(td, PRIV_VM_MUNLOCK);
1110 	if (error)
1111 		return (error);
1112 
1113 	/* Clear the MAP_WIREFUTURE flag from this vm_map. */
1114 	vm_map_lock(map);
1115 	vm_map_modflags(map, 0, MAP_WIREFUTURE);
1116 	vm_map_unlock(map);
1117 
1118 	/* Forcibly unwire all pages. */
1119 	error = vm_map_unwire(map, vm_map_min(map), vm_map_max(map),
1120 	    VM_MAP_WIRE_USER|VM_MAP_WIRE_HOLESOK);
1121 #ifdef RACCT
1122 	if (racct_enable && error == KERN_SUCCESS) {
1123 		PROC_LOCK(td->td_proc);
1124 		racct_set(td->td_proc, RACCT_MEMLOCK, 0);
1125 		PROC_UNLOCK(td->td_proc);
1126 	}
1127 #endif
1128 
1129 	return (error);
1130 }
1131 
1132 #ifndef _SYS_SYSPROTO_H_
1133 struct munlock_args {
1134 	const void *addr;
1135 	size_t len;
1136 };
1137 #endif
1138 int
1139 sys_munlock(struct thread *td, struct munlock_args *uap)
1140 {
1141 
1142 	return (kern_munlock(td, (uintptr_t)uap->addr, uap->len));
1143 }
1144 
1145 int
1146 kern_munlock(struct thread *td, uintptr_t addr0, size_t size)
1147 {
1148 	vm_offset_t addr, end, last, start;
1149 #ifdef RACCT
1150 	vm_map_t map;
1151 #endif
1152 	int error;
1153 
1154 	error = priv_check(td, PRIV_VM_MUNLOCK);
1155 	if (error)
1156 		return (error);
1157 	addr = addr0;
1158 	last = addr + size;
1159 	start = trunc_page(addr);
1160 	end = round_page(last);
1161 	if (last < addr || end < addr)
1162 		return (EINVAL);
1163 	error = vm_map_unwire(&td->td_proc->p_vmspace->vm_map, start, end,
1164 	    VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES);
1165 #ifdef RACCT
1166 	if (racct_enable && error == KERN_SUCCESS) {
1167 		PROC_LOCK(td->td_proc);
1168 		map = &td->td_proc->p_vmspace->vm_map;
1169 		racct_set(td->td_proc, RACCT_MEMLOCK,
1170 		    ptoa(pmap_wired_count(map->pmap)));
1171 		PROC_UNLOCK(td->td_proc);
1172 	}
1173 #endif
1174 	return (error == KERN_SUCCESS ? 0 : ENOMEM);
1175 }
1176 
1177 /*
1178  * vm_mmap_vnode()
1179  *
1180  * Helper function for vm_mmap.  Perform sanity check specific for mmap
1181  * operations on vnodes.
1182  */
1183 int
1184 vm_mmap_vnode(struct thread *td, vm_size_t objsize,
1185     vm_prot_t prot, vm_prot_t *maxprotp, int *flagsp,
1186     struct vnode *vp, vm_ooffset_t *foffp, vm_object_t *objp,
1187     boolean_t *writecounted)
1188 {
1189 	struct vattr va;
1190 	vm_object_t obj;
1191 	vm_offset_t foff;
1192 	struct ucred *cred;
1193 	int error, flags, locktype;
1194 
1195 	cred = td->td_ucred;
1196 	if ((*maxprotp & VM_PROT_WRITE) && (*flagsp & MAP_SHARED))
1197 		locktype = LK_EXCLUSIVE;
1198 	else
1199 		locktype = LK_SHARED;
1200 	if ((error = vget(vp, locktype, td)) != 0)
1201 		return (error);
1202 	AUDIT_ARG_VNODE1(vp);
1203 	foff = *foffp;
1204 	flags = *flagsp;
1205 	obj = vp->v_object;
1206 	if (vp->v_type == VREG) {
1207 		/*
1208 		 * Get the proper underlying object
1209 		 */
1210 		if (obj == NULL) {
1211 			error = EINVAL;
1212 			goto done;
1213 		}
1214 		if (obj->type == OBJT_VNODE && obj->handle != vp) {
1215 			vput(vp);
1216 			vp = (struct vnode *)obj->handle;
1217 			/*
1218 			 * Bypass filesystems obey the mpsafety of the
1219 			 * underlying fs.  Tmpfs never bypasses.
1220 			 */
1221 			error = vget(vp, locktype, td);
1222 			if (error != 0)
1223 				return (error);
1224 		}
1225 		if (locktype == LK_EXCLUSIVE) {
1226 			*writecounted = TRUE;
1227 			vnode_pager_update_writecount(obj, 0, objsize);
1228 		}
1229 	} else {
1230 		error = EINVAL;
1231 		goto done;
1232 	}
1233 	if ((error = VOP_GETATTR(vp, &va, cred)))
1234 		goto done;
1235 #ifdef MAC
1236 	/* This relies on VM_PROT_* matching PROT_*. */
1237 	error = mac_vnode_check_mmap(cred, vp, (int)prot, flags);
1238 	if (error != 0)
1239 		goto done;
1240 #endif
1241 	if ((flags & MAP_SHARED) != 0) {
1242 		if ((va.va_flags & (SF_SNAPSHOT|IMMUTABLE|APPEND)) != 0) {
1243 			if (prot & VM_PROT_WRITE) {
1244 				error = EPERM;
1245 				goto done;
1246 			}
1247 			*maxprotp &= ~VM_PROT_WRITE;
1248 		}
1249 	}
1250 	/*
1251 	 * If it is a regular file without any references
1252 	 * we do not need to sync it.
1253 	 * Adjust object size to be the size of actual file.
1254 	 */
1255 	objsize = round_page(va.va_size);
1256 	if (va.va_nlink == 0)
1257 		flags |= MAP_NOSYNC;
1258 	if (obj->type == OBJT_VNODE) {
1259 		obj = vm_pager_allocate(OBJT_VNODE, vp, objsize, prot, foff,
1260 		    cred);
1261 		if (obj == NULL) {
1262 			error = ENOMEM;
1263 			goto done;
1264 		}
1265 	} else {
1266 		KASSERT(obj->type == OBJT_DEFAULT || obj->type == OBJT_SWAP,
1267 		    ("wrong object type"));
1268 		VM_OBJECT_WLOCK(obj);
1269 		vm_object_reference_locked(obj);
1270 #if VM_NRESERVLEVEL > 0
1271 		vm_object_color(obj, 0);
1272 #endif
1273 		VM_OBJECT_WUNLOCK(obj);
1274 	}
1275 	*objp = obj;
1276 	*flagsp = flags;
1277 
1278 	vfs_mark_atime(vp, cred);
1279 
1280 done:
1281 	if (error != 0 && *writecounted) {
1282 		*writecounted = FALSE;
1283 		vnode_pager_update_writecount(obj, objsize, 0);
1284 	}
1285 	vput(vp);
1286 	return (error);
1287 }
1288 
1289 /*
1290  * vm_mmap_cdev()
1291  *
1292  * Helper function for vm_mmap.  Perform sanity check specific for mmap
1293  * operations on cdevs.
1294  */
1295 int
1296 vm_mmap_cdev(struct thread *td, vm_size_t objsize, vm_prot_t prot,
1297     vm_prot_t *maxprotp, int *flagsp, struct cdev *cdev, struct cdevsw *dsw,
1298     vm_ooffset_t *foff, vm_object_t *objp)
1299 {
1300 	vm_object_t obj;
1301 	int error, flags;
1302 
1303 	flags = *flagsp;
1304 
1305 	if (dsw->d_flags & D_MMAP_ANON) {
1306 		*objp = NULL;
1307 		*foff = 0;
1308 		*maxprotp = VM_PROT_ALL;
1309 		*flagsp |= MAP_ANON;
1310 		return (0);
1311 	}
1312 	/*
1313 	 * cdevs do not provide private mappings of any kind.
1314 	 */
1315 	if ((*maxprotp & VM_PROT_WRITE) == 0 &&
1316 	    (prot & VM_PROT_WRITE) != 0)
1317 		return (EACCES);
1318 	if (flags & (MAP_PRIVATE|MAP_COPY))
1319 		return (EINVAL);
1320 	/*
1321 	 * Force device mappings to be shared.
1322 	 */
1323 	flags |= MAP_SHARED;
1324 #ifdef MAC_XXX
1325 	error = mac_cdev_check_mmap(td->td_ucred, cdev, (int)prot);
1326 	if (error != 0)
1327 		return (error);
1328 #endif
1329 	/*
1330 	 * First, try d_mmap_single().  If that is not implemented
1331 	 * (returns ENODEV), fall back to using the device pager.
1332 	 * Note that d_mmap_single() must return a reference to the
1333 	 * object (it needs to bump the reference count of the object
1334 	 * it returns somehow).
1335 	 *
1336 	 * XXX assumes VM_PROT_* == PROT_*
1337 	 */
1338 	error = dsw->d_mmap_single(cdev, foff, objsize, objp, (int)prot);
1339 	if (error != ENODEV)
1340 		return (error);
1341 	obj = vm_pager_allocate(OBJT_DEVICE, cdev, objsize, prot, *foff,
1342 	    td->td_ucred);
1343 	if (obj == NULL)
1344 		return (EINVAL);
1345 	*objp = obj;
1346 	*flagsp = flags;
1347 	return (0);
1348 }
1349 
1350 /*
1351  * vm_mmap()
1352  *
1353  * Internal version of mmap used by exec, sys5 shared memory, and
1354  * various device drivers.  Handle is either a vnode pointer, a
1355  * character device, or NULL for MAP_ANON.
1356  */
1357 int
1358 vm_mmap(vm_map_t map, vm_offset_t *addr, vm_size_t size, vm_prot_t prot,
1359 	vm_prot_t maxprot, int flags,
1360 	objtype_t handle_type, void *handle,
1361 	vm_ooffset_t foff)
1362 {
1363 	vm_object_t object;
1364 	struct thread *td = curthread;
1365 	int error;
1366 	boolean_t writecounted;
1367 
1368 	if (size == 0)
1369 		return (EINVAL);
1370 
1371 	size = round_page(size);
1372 	object = NULL;
1373 	writecounted = FALSE;
1374 
1375 	/*
1376 	 * Lookup/allocate object.
1377 	 */
1378 	switch (handle_type) {
1379 	case OBJT_DEVICE: {
1380 		struct cdevsw *dsw;
1381 		struct cdev *cdev;
1382 		int ref;
1383 
1384 		cdev = handle;
1385 		dsw = dev_refthread(cdev, &ref);
1386 		if (dsw == NULL)
1387 			return (ENXIO);
1388 		error = vm_mmap_cdev(td, size, prot, &maxprot, &flags, cdev,
1389 		    dsw, &foff, &object);
1390 		dev_relthread(cdev, ref);
1391 		break;
1392 	}
1393 	case OBJT_VNODE:
1394 		error = vm_mmap_vnode(td, size, prot, &maxprot, &flags,
1395 		    handle, &foff, &object, &writecounted);
1396 		break;
1397 	case OBJT_DEFAULT:
1398 		if (handle == NULL) {
1399 			error = 0;
1400 			break;
1401 		}
1402 		/* FALLTHROUGH */
1403 	default:
1404 		error = EINVAL;
1405 		break;
1406 	}
1407 	if (error)
1408 		return (error);
1409 
1410 	error = vm_mmap_object(map, addr, size, prot, maxprot, flags, object,
1411 	    foff, writecounted, td);
1412 	if (error != 0 && object != NULL) {
1413 		/*
1414 		 * If this mapping was accounted for in the vnode's
1415 		 * writecount, then undo that now.
1416 		 */
1417 		if (writecounted)
1418 			vnode_pager_release_writecount(object, 0, size);
1419 		vm_object_deallocate(object);
1420 	}
1421 	return (error);
1422 }
1423 
1424 /*
1425  * Internal version of mmap that maps a specific VM object into an
1426  * map.  Called by mmap for MAP_ANON, vm_mmap, shm_mmap, and vn_mmap.
1427  */
1428 int
1429 vm_mmap_object(vm_map_t map, vm_offset_t *addr, vm_size_t size, vm_prot_t prot,
1430     vm_prot_t maxprot, int flags, vm_object_t object, vm_ooffset_t foff,
1431     boolean_t writecounted, struct thread *td)
1432 {
1433 	boolean_t fitit;
1434 	int docow, error, findspace, rv;
1435 
1436 	if (map == &td->td_proc->p_vmspace->vm_map) {
1437 		PROC_LOCK(td->td_proc);
1438 		if (map->size + size > lim_cur_proc(td->td_proc, RLIMIT_VMEM)) {
1439 			PROC_UNLOCK(td->td_proc);
1440 			return (ENOMEM);
1441 		}
1442 		if (racct_set(td->td_proc, RACCT_VMEM, map->size + size)) {
1443 			PROC_UNLOCK(td->td_proc);
1444 			return (ENOMEM);
1445 		}
1446 		if (!old_mlock && map->flags & MAP_WIREFUTURE) {
1447 			if (ptoa(pmap_wired_count(map->pmap)) + size >
1448 			    lim_cur_proc(td->td_proc, RLIMIT_MEMLOCK)) {
1449 				racct_set_force(td->td_proc, RACCT_VMEM,
1450 				    map->size);
1451 				PROC_UNLOCK(td->td_proc);
1452 				return (ENOMEM);
1453 			}
1454 			error = racct_set(td->td_proc, RACCT_MEMLOCK,
1455 			    ptoa(pmap_wired_count(map->pmap)) + size);
1456 			if (error != 0) {
1457 				racct_set_force(td->td_proc, RACCT_VMEM,
1458 				    map->size);
1459 				PROC_UNLOCK(td->td_proc);
1460 				return (error);
1461 			}
1462 		}
1463 		PROC_UNLOCK(td->td_proc);
1464 	}
1465 
1466 	/*
1467 	 * We currently can only deal with page aligned file offsets.
1468 	 * The mmap() system call already enforces this by subtracting
1469 	 * the page offset from the file offset, but checking here
1470 	 * catches errors in device drivers (e.g. d_single_mmap()
1471 	 * callbacks) and other internal mapping requests (such as in
1472 	 * exec).
1473 	 */
1474 	if (foff & PAGE_MASK)
1475 		return (EINVAL);
1476 
1477 	if ((flags & MAP_FIXED) == 0) {
1478 		fitit = TRUE;
1479 		*addr = round_page(*addr);
1480 	} else {
1481 		if (*addr != trunc_page(*addr))
1482 			return (EINVAL);
1483 		fitit = FALSE;
1484 	}
1485 
1486 	if (flags & MAP_ANON) {
1487 		if (object != NULL || foff != 0)
1488 			return (EINVAL);
1489 		docow = 0;
1490 	} else if (flags & MAP_PREFAULT_READ)
1491 		docow = MAP_PREFAULT;
1492 	else
1493 		docow = MAP_PREFAULT_PARTIAL;
1494 
1495 	if ((flags & (MAP_ANON|MAP_SHARED)) == 0)
1496 		docow |= MAP_COPY_ON_WRITE;
1497 	if (flags & MAP_NOSYNC)
1498 		docow |= MAP_DISABLE_SYNCER;
1499 	if (flags & MAP_NOCORE)
1500 		docow |= MAP_DISABLE_COREDUMP;
1501 	/* Shared memory is also shared with children. */
1502 	if (flags & MAP_SHARED)
1503 		docow |= MAP_INHERIT_SHARE;
1504 	if (writecounted)
1505 		docow |= MAP_VN_WRITECOUNT;
1506 	if (flags & MAP_STACK) {
1507 		if (object != NULL)
1508 			return (EINVAL);
1509 		docow |= MAP_STACK_GROWS_DOWN;
1510 	}
1511 	if ((flags & MAP_EXCL) != 0)
1512 		docow |= MAP_CHECK_EXCL;
1513 
1514 	if (fitit) {
1515 		if ((flags & MAP_ALIGNMENT_MASK) == MAP_ALIGNED_SUPER)
1516 			findspace = VMFS_SUPER_SPACE;
1517 		else if ((flags & MAP_ALIGNMENT_MASK) != 0)
1518 			findspace = VMFS_ALIGNED_SPACE(flags >>
1519 			    MAP_ALIGNMENT_SHIFT);
1520 		else
1521 			findspace = VMFS_OPTIMAL_SPACE;
1522 		rv = vm_map_find(map, object, foff, addr, size,
1523 #ifdef MAP_32BIT
1524 		    flags & MAP_32BIT ? MAP_32BIT_MAX_ADDR :
1525 #endif
1526 		    0, findspace, prot, maxprot, docow);
1527 	} else {
1528 		rv = vm_map_fixed(map, object, foff, *addr, size,
1529 		    prot, maxprot, docow);
1530 	}
1531 
1532 	if (rv == KERN_SUCCESS) {
1533 		/*
1534 		 * If the process has requested that all future mappings
1535 		 * be wired, then heed this.
1536 		 */
1537 		if (map->flags & MAP_WIREFUTURE) {
1538 			vm_map_wire(map, *addr, *addr + size,
1539 			    VM_MAP_WIRE_USER | ((flags & MAP_STACK) ?
1540 			    VM_MAP_WIRE_HOLESOK : VM_MAP_WIRE_NOHOLES));
1541 		}
1542 	}
1543 	return (vm_mmap_to_errno(rv));
1544 }
1545 
1546 /*
1547  * Translate a Mach VM return code to zero on success or the appropriate errno
1548  * on failure.
1549  */
1550 int
1551 vm_mmap_to_errno(int rv)
1552 {
1553 
1554 	switch (rv) {
1555 	case KERN_SUCCESS:
1556 		return (0);
1557 	case KERN_INVALID_ADDRESS:
1558 	case KERN_NO_SPACE:
1559 		return (ENOMEM);
1560 	case KERN_PROTECTION_FAILURE:
1561 		return (EACCES);
1562 	default:
1563 		return (EINVAL);
1564 	}
1565 }
1566