xref: /freebsd/sys/vm/vm_mmap.c (revision b3e7694832e81d7a904a10f525f8797b753bf0d3)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 1988 University of Utah.
5  * Copyright (c) 1991, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  *
8  * This code is derived from software contributed to Berkeley by
9  * the Systems Programming Group of the University of Utah Computer
10  * Science Department.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  * from: Utah $Hdr: vm_mmap.c 1.6 91/10/21$
37  *
38  *	@(#)vm_mmap.c	8.4 (Berkeley) 1/12/94
39  */
40 
41 /*
42  * Mapped file (mmap) interface to VM
43  */
44 
45 #include <sys/cdefs.h>
46 __FBSDID("$FreeBSD$");
47 
48 #include "opt_hwpmc_hooks.h"
49 #include "opt_vm.h"
50 
51 #include <sys/param.h>
52 #include <sys/systm.h>
53 #include <sys/capsicum.h>
54 #include <sys/kernel.h>
55 #include <sys/lock.h>
56 #include <sys/mutex.h>
57 #include <sys/sysproto.h>
58 #include <sys/elf.h>
59 #include <sys/filedesc.h>
60 #include <sys/priv.h>
61 #include <sys/proc.h>
62 #include <sys/procctl.h>
63 #include <sys/racct.h>
64 #include <sys/resource.h>
65 #include <sys/resourcevar.h>
66 #include <sys/rwlock.h>
67 #include <sys/sysctl.h>
68 #include <sys/vnode.h>
69 #include <sys/fcntl.h>
70 #include <sys/file.h>
71 #include <sys/mman.h>
72 #include <sys/mount.h>
73 #include <sys/conf.h>
74 #include <sys/stat.h>
75 #include <sys/syscallsubr.h>
76 #include <sys/sysent.h>
77 #include <sys/vmmeter.h>
78 #if defined(__amd64__) || defined(__i386__) /* for i386_read_exec */
79 #include <machine/md_var.h>
80 #endif
81 
82 #include <security/audit/audit.h>
83 #include <security/mac/mac_framework.h>
84 
85 #include <vm/vm.h>
86 #include <vm/vm_param.h>
87 #include <vm/pmap.h>
88 #include <vm/vm_map.h>
89 #include <vm/vm_object.h>
90 #include <vm/vm_page.h>
91 #include <vm/vm_pager.h>
92 #include <vm/vm_pageout.h>
93 #include <vm/vm_extern.h>
94 #include <vm/vm_page.h>
95 #include <vm/vnode_pager.h>
96 
97 #ifdef HWPMC_HOOKS
98 #include <sys/pmckern.h>
99 #endif
100 
101 int old_mlock = 0;
102 SYSCTL_INT(_vm, OID_AUTO, old_mlock, CTLFLAG_RWTUN, &old_mlock, 0,
103     "Do not apply RLIMIT_MEMLOCK on mlockall");
104 static int mincore_mapped = 1;
105 SYSCTL_INT(_vm, OID_AUTO, mincore_mapped, CTLFLAG_RWTUN, &mincore_mapped, 0,
106     "mincore reports mappings, not residency");
107 static int imply_prot_max = 0;
108 SYSCTL_INT(_vm, OID_AUTO, imply_prot_max, CTLFLAG_RWTUN, &imply_prot_max, 0,
109     "Imply maximum page protections in mmap() when none are specified");
110 
111 _Static_assert(MAXPAGESIZES <= 4, "MINCORE_SUPER too narrow");
112 
113 #ifndef _SYS_SYSPROTO_H_
114 struct sbrk_args {
115 	int incr;
116 };
117 #endif
118 
119 int
120 sys_sbrk(struct thread *td, struct sbrk_args *uap)
121 {
122 	/* Not yet implemented */
123 	return (EOPNOTSUPP);
124 }
125 
126 #ifndef _SYS_SYSPROTO_H_
127 struct sstk_args {
128 	int incr;
129 };
130 #endif
131 
132 int
133 sys_sstk(struct thread *td, struct sstk_args *uap)
134 {
135 	/* Not yet implemented */
136 	return (EOPNOTSUPP);
137 }
138 
139 #if defined(COMPAT_43)
140 int
141 ogetpagesize(struct thread *td, struct ogetpagesize_args *uap)
142 {
143 
144 	td->td_retval[0] = PAGE_SIZE;
145 	return (0);
146 }
147 #endif				/* COMPAT_43 */
148 
149 /*
150  * Memory Map (mmap) system call.  Note that the file offset
151  * and address are allowed to be NOT page aligned, though if
152  * the MAP_FIXED flag it set, both must have the same remainder
153  * modulo the PAGE_SIZE (POSIX 1003.1b).  If the address is not
154  * page-aligned, the actual mapping starts at trunc_page(addr)
155  * and the return value is adjusted up by the page offset.
156  *
157  * Generally speaking, only character devices which are themselves
158  * memory-based, such as a video framebuffer, can be mmap'd.  Otherwise
159  * there would be no cache coherency between a descriptor and a VM mapping
160  * both to the same character device.
161  */
162 #ifndef _SYS_SYSPROTO_H_
163 struct mmap_args {
164 	void *addr;
165 	size_t len;
166 	int prot;
167 	int flags;
168 	int fd;
169 	long pad;
170 	off_t pos;
171 };
172 #endif
173 
174 int
175 sys_mmap(struct thread *td, struct mmap_args *uap)
176 {
177 
178 	return (kern_mmap(td, &(struct mmap_req){
179 		.mr_hint = (uintptr_t)uap->addr,
180 		.mr_len = uap->len,
181 		.mr_prot = uap->prot,
182 		.mr_flags = uap->flags,
183 		.mr_fd = uap->fd,
184 		.mr_pos = uap->pos,
185 	    }));
186 }
187 
188 int
189 kern_mmap_maxprot(struct proc *p, int prot)
190 {
191 
192 	if ((p->p_flag2 & P2_PROTMAX_DISABLE) != 0 ||
193 	    (p->p_fctl0 & NT_FREEBSD_FCTL_PROTMAX_DISABLE) != 0)
194 		return (_PROT_ALL);
195 	if (((p->p_flag2 & P2_PROTMAX_ENABLE) != 0 || imply_prot_max) &&
196 	    prot != PROT_NONE)
197 		 return (prot);
198 	return (_PROT_ALL);
199 }
200 
201 int
202 kern_mmap(struct thread *td, const struct mmap_req *mrp)
203 {
204 	struct vmspace *vms;
205 	struct file *fp;
206 	struct proc *p;
207 	off_t pos;
208 	vm_offset_t addr, orig_addr;
209 	vm_size_t len, pageoff, size;
210 	vm_prot_t cap_maxprot;
211 	int align, error, fd, flags, max_prot, prot;
212 	cap_rights_t rights;
213 	mmap_check_fp_fn check_fp_fn;
214 
215 	orig_addr = addr = mrp->mr_hint;
216 	len = mrp->mr_len;
217 	prot = mrp->mr_prot;
218 	flags = mrp->mr_flags;
219 	fd = mrp->mr_fd;
220 	pos = mrp->mr_pos;
221 	check_fp_fn = mrp->mr_check_fp_fn;
222 
223 	if ((prot & ~(_PROT_ALL | PROT_MAX(_PROT_ALL))) != 0)
224 		return (EINVAL);
225 	max_prot = PROT_MAX_EXTRACT(prot);
226 	prot = PROT_EXTRACT(prot);
227 	if (max_prot != 0 && (max_prot & prot) != prot)
228 		return (ENOTSUP);
229 
230 	p = td->td_proc;
231 
232 	/*
233 	 * Always honor PROT_MAX if set.  If not, default to all
234 	 * permissions unless we're implying maximum permissions.
235 	 */
236 	if (max_prot == 0)
237 		max_prot = kern_mmap_maxprot(p, prot);
238 
239 	vms = p->p_vmspace;
240 	fp = NULL;
241 	AUDIT_ARG_FD(fd);
242 
243 	/*
244 	 * Ignore old flags that used to be defined but did not do anything.
245 	 */
246 	flags &= ~(MAP_RESERVED0020 | MAP_RESERVED0040);
247 
248 	/*
249 	 * Enforce the constraints.
250 	 * Mapping of length 0 is only allowed for old binaries.
251 	 * Anonymous mapping shall specify -1 as filedescriptor and
252 	 * zero position for new code. Be nice to ancient a.out
253 	 * binaries and correct pos for anonymous mapping, since old
254 	 * ld.so sometimes issues anonymous map requests with non-zero
255 	 * pos.
256 	 */
257 	if (!SV_CURPROC_FLAG(SV_AOUT)) {
258 		if ((len == 0 && p->p_osrel >= P_OSREL_MAP_ANON) ||
259 		    ((flags & MAP_ANON) != 0 && (fd != -1 || pos != 0)))
260 			return (EINVAL);
261 	} else {
262 		if ((flags & MAP_ANON) != 0)
263 			pos = 0;
264 	}
265 
266 	if (flags & MAP_STACK) {
267 		if ((fd != -1) ||
268 		    ((prot & (PROT_READ | PROT_WRITE)) != (PROT_READ | PROT_WRITE)))
269 			return (EINVAL);
270 		flags |= MAP_ANON;
271 		pos = 0;
272 	}
273 	if ((flags & ~(MAP_SHARED | MAP_PRIVATE | MAP_FIXED | MAP_HASSEMAPHORE |
274 	    MAP_STACK | MAP_NOSYNC | MAP_ANON | MAP_EXCL | MAP_NOCORE |
275 	    MAP_PREFAULT_READ | MAP_GUARD | MAP_32BIT | MAP_ALIGNMENT_MASK)) != 0)
276 		return (EINVAL);
277 	if ((flags & (MAP_EXCL | MAP_FIXED)) == MAP_EXCL)
278 		return (EINVAL);
279 	if ((flags & (MAP_SHARED | MAP_PRIVATE)) == (MAP_SHARED | MAP_PRIVATE))
280 		return (EINVAL);
281 	if (prot != PROT_NONE &&
282 	    (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC)) != 0)
283 		return (EINVAL);
284 	if ((flags & MAP_GUARD) != 0 && (prot != PROT_NONE || fd != -1 ||
285 	    pos != 0 || (flags & ~(MAP_FIXED | MAP_GUARD | MAP_EXCL |
286 	    MAP_32BIT | MAP_ALIGNMENT_MASK)) != 0))
287 		return (EINVAL);
288 
289 	/*
290 	 * Align the file position to a page boundary,
291 	 * and save its page offset component.
292 	 */
293 	pageoff = (pos & PAGE_MASK);
294 	pos -= pageoff;
295 
296 	/* Compute size from len by rounding (on both ends). */
297 	size = len + pageoff;			/* low end... */
298 	size = round_page(size);		/* hi end */
299 	/* Check for rounding up to zero. */
300 	if (len > size)
301 		return (ENOMEM);
302 
303 	/* Ensure alignment is at least a page and fits in a pointer. */
304 	align = flags & MAP_ALIGNMENT_MASK;
305 	if (align != 0 && align != MAP_ALIGNED_SUPER &&
306 	    (align >> MAP_ALIGNMENT_SHIFT >= sizeof(void *) * NBBY ||
307 	    align >> MAP_ALIGNMENT_SHIFT < PAGE_SHIFT))
308 		return (EINVAL);
309 
310 	/*
311 	 * Check for illegal addresses.  Watch out for address wrap... Note
312 	 * that VM_*_ADDRESS are not constants due to casts (argh).
313 	 */
314 	if (flags & MAP_FIXED) {
315 		/*
316 		 * The specified address must have the same remainder
317 		 * as the file offset taken modulo PAGE_SIZE, so it
318 		 * should be aligned after adjustment by pageoff.
319 		 */
320 		addr -= pageoff;
321 		if (addr & PAGE_MASK)
322 			return (EINVAL);
323 
324 		/* Address range must be all in user VM space. */
325 		if (!vm_map_range_valid(&vms->vm_map, addr, addr + size))
326 			return (EINVAL);
327 		if (flags & MAP_32BIT && addr + size > MAP_32BIT_MAX_ADDR)
328 			return (EINVAL);
329 	} else if (flags & MAP_32BIT) {
330 		/*
331 		 * For MAP_32BIT, override the hint if it is too high and
332 		 * do not bother moving the mapping past the heap (since
333 		 * the heap is usually above 2GB).
334 		 */
335 		if (addr + size > MAP_32BIT_MAX_ADDR)
336 			addr = 0;
337 	} else {
338 		/*
339 		 * XXX for non-fixed mappings where no hint is provided or
340 		 * the hint would fall in the potential heap space,
341 		 * place it after the end of the largest possible heap.
342 		 *
343 		 * For anonymous mappings within the address space of the
344 		 * calling process, the absence of a hint is handled at a
345 		 * lower level in order to implement different clustering
346 		 * strategies for ASLR.
347 		 */
348 		if (((flags & MAP_ANON) == 0 && addr == 0) ||
349 		    (addr >= round_page((vm_offset_t)vms->vm_taddr) &&
350 		    addr < round_page((vm_offset_t)vms->vm_daddr +
351 		    lim_max(td, RLIMIT_DATA))))
352 			addr = round_page((vm_offset_t)vms->vm_daddr +
353 			    lim_max(td, RLIMIT_DATA));
354 	}
355 	if (len == 0) {
356 		/*
357 		 * Return success without mapping anything for old
358 		 * binaries that request a page-aligned mapping of
359 		 * length 0.  For modern binaries, this function
360 		 * returns an error earlier.
361 		 */
362 		error = 0;
363 	} else if ((flags & MAP_GUARD) != 0) {
364 		error = vm_mmap_object(&vms->vm_map, &addr, size, VM_PROT_NONE,
365 		    VM_PROT_NONE, flags, NULL, pos, FALSE, td);
366 	} else if ((flags & MAP_ANON) != 0) {
367 		/*
368 		 * Mapping blank space is trivial.
369 		 *
370 		 * This relies on VM_PROT_* matching PROT_*.
371 		 */
372 		error = vm_mmap_object(&vms->vm_map, &addr, size, prot,
373 		    max_prot, flags, NULL, pos, FALSE, td);
374 	} else {
375 		/*
376 		 * Mapping file, get fp for validation and don't let the
377 		 * descriptor disappear on us if we block. Check capability
378 		 * rights, but also return the maximum rights to be combined
379 		 * with maxprot later.
380 		 */
381 		cap_rights_init_one(&rights, CAP_MMAP);
382 		if (prot & PROT_READ)
383 			cap_rights_set_one(&rights, CAP_MMAP_R);
384 		if ((flags & MAP_SHARED) != 0) {
385 			if (prot & PROT_WRITE)
386 				cap_rights_set_one(&rights, CAP_MMAP_W);
387 		}
388 		if (prot & PROT_EXEC)
389 			cap_rights_set_one(&rights, CAP_MMAP_X);
390 		error = fget_mmap(td, fd, &rights, &cap_maxprot, &fp);
391 		if (error != 0)
392 			goto done;
393 		if ((flags & (MAP_SHARED | MAP_PRIVATE)) == 0 &&
394 		    p->p_osrel >= P_OSREL_MAP_FSTRICT) {
395 			error = EINVAL;
396 			goto done;
397 		}
398 		if (check_fp_fn != NULL) {
399 			error = check_fp_fn(fp, prot, max_prot & cap_maxprot,
400 			    flags);
401 			if (error != 0)
402 				goto done;
403 		}
404 		if (fp->f_ops == &shm_ops && shm_largepage(fp->f_data))
405 			addr = orig_addr;
406 		/* This relies on VM_PROT_* matching PROT_*. */
407 		error = fo_mmap(fp, &vms->vm_map, &addr, size, prot,
408 		    max_prot & cap_maxprot, flags, pos, td);
409 	}
410 
411 	if (error == 0)
412 		td->td_retval[0] = addr + pageoff;
413 done:
414 	if (fp)
415 		fdrop(fp, td);
416 
417 	return (error);
418 }
419 
420 #if defined(COMPAT_FREEBSD6)
421 int
422 freebsd6_mmap(struct thread *td, struct freebsd6_mmap_args *uap)
423 {
424 	return (kern_mmap(td, &(struct mmap_req){
425 		.mr_hint = (uintptr_t)uap->addr,
426 		.mr_len = uap->len,
427 		.mr_prot = uap->prot,
428 		.mr_flags = uap->flags,
429 		.mr_fd = uap->fd,
430 		.mr_pos = uap->pos,
431 	    }));
432 }
433 #endif
434 
435 #ifdef COMPAT_43
436 #ifndef _SYS_SYSPROTO_H_
437 struct ommap_args {
438 	caddr_t addr;
439 	int len;
440 	int prot;
441 	int flags;
442 	int fd;
443 	long pos;
444 };
445 #endif
446 int
447 ommap(struct thread *td, struct ommap_args *uap)
448 {
449 	return (kern_ommap(td, (uintptr_t)uap->addr, uap->len, uap->prot,
450 	    uap->flags, uap->fd, uap->pos));
451 }
452 
453 int
454 kern_ommap(struct thread *td, uintptr_t hint, int len, int oprot,
455     int oflags, int fd, long pos)
456 {
457 	static const char cvtbsdprot[8] = {
458 		0,
459 		PROT_EXEC,
460 		PROT_WRITE,
461 		PROT_EXEC | PROT_WRITE,
462 		PROT_READ,
463 		PROT_EXEC | PROT_READ,
464 		PROT_WRITE | PROT_READ,
465 		PROT_EXEC | PROT_WRITE | PROT_READ,
466 	};
467 	int flags, prot;
468 
469 	if (len < 0)
470 		return (EINVAL);
471 
472 #define	OMAP_ANON	0x0002
473 #define	OMAP_COPY	0x0020
474 #define	OMAP_SHARED	0x0010
475 #define	OMAP_FIXED	0x0100
476 
477 	prot = cvtbsdprot[oprot & 0x7];
478 #if (defined(COMPAT_FREEBSD32) && defined(__amd64__)) || defined(__i386__)
479 	if (i386_read_exec && SV_PROC_FLAG(td->td_proc, SV_ILP32) &&
480 	    prot != 0)
481 		prot |= PROT_EXEC;
482 #endif
483 	flags = 0;
484 	if (oflags & OMAP_ANON)
485 		flags |= MAP_ANON;
486 	if (oflags & OMAP_COPY)
487 		flags |= MAP_COPY;
488 	if (oflags & OMAP_SHARED)
489 		flags |= MAP_SHARED;
490 	else
491 		flags |= MAP_PRIVATE;
492 	if (oflags & OMAP_FIXED)
493 		flags |= MAP_FIXED;
494 	return (kern_mmap(td, &(struct mmap_req){
495 		.mr_hint = hint,
496 		.mr_len = len,
497 		.mr_prot = prot,
498 		.mr_flags = flags,
499 		.mr_fd = fd,
500 		.mr_pos = pos,
501 	    }));
502 }
503 #endif				/* COMPAT_43 */
504 
505 #ifndef _SYS_SYSPROTO_H_
506 struct msync_args {
507 	void *addr;
508 	size_t len;
509 	int flags;
510 };
511 #endif
512 int
513 sys_msync(struct thread *td, struct msync_args *uap)
514 {
515 
516 	return (kern_msync(td, (uintptr_t)uap->addr, uap->len, uap->flags));
517 }
518 
519 int
520 kern_msync(struct thread *td, uintptr_t addr0, size_t size, int flags)
521 {
522 	vm_offset_t addr;
523 	vm_size_t pageoff;
524 	vm_map_t map;
525 	int rv;
526 
527 	addr = addr0;
528 	pageoff = (addr & PAGE_MASK);
529 	addr -= pageoff;
530 	size += pageoff;
531 	size = (vm_size_t) round_page(size);
532 	if (addr + size < addr)
533 		return (EINVAL);
534 
535 	if ((flags & (MS_ASYNC|MS_INVALIDATE)) == (MS_ASYNC|MS_INVALIDATE))
536 		return (EINVAL);
537 
538 	map = &td->td_proc->p_vmspace->vm_map;
539 
540 	/*
541 	 * Clean the pages and interpret the return value.
542 	 */
543 	rv = vm_map_sync(map, addr, addr + size, (flags & MS_ASYNC) == 0,
544 	    (flags & MS_INVALIDATE) != 0);
545 	switch (rv) {
546 	case KERN_SUCCESS:
547 		return (0);
548 	case KERN_INVALID_ADDRESS:
549 		return (ENOMEM);
550 	case KERN_INVALID_ARGUMENT:
551 		return (EBUSY);
552 	case KERN_FAILURE:
553 		return (EIO);
554 	default:
555 		return (EINVAL);
556 	}
557 }
558 
559 #ifndef _SYS_SYSPROTO_H_
560 struct munmap_args {
561 	void *addr;
562 	size_t len;
563 };
564 #endif
565 int
566 sys_munmap(struct thread *td, struct munmap_args *uap)
567 {
568 
569 	return (kern_munmap(td, (uintptr_t)uap->addr, uap->len));
570 }
571 
572 int
573 kern_munmap(struct thread *td, uintptr_t addr0, size_t size)
574 {
575 #ifdef HWPMC_HOOKS
576 	struct pmckern_map_out pkm;
577 	vm_map_entry_t entry;
578 	bool pmc_handled;
579 #endif
580 	vm_offset_t addr, end;
581 	vm_size_t pageoff;
582 	vm_map_t map;
583 	int rv;
584 
585 	if (size == 0)
586 		return (EINVAL);
587 
588 	addr = addr0;
589 	pageoff = (addr & PAGE_MASK);
590 	addr -= pageoff;
591 	size += pageoff;
592 	size = (vm_size_t) round_page(size);
593 	end = addr + size;
594 	map = &td->td_proc->p_vmspace->vm_map;
595 	if (!vm_map_range_valid(map, addr, end))
596 		return (EINVAL);
597 
598 	vm_map_lock(map);
599 #ifdef HWPMC_HOOKS
600 	pmc_handled = false;
601 	if (PMC_HOOK_INSTALLED(PMC_FN_MUNMAP)) {
602 		pmc_handled = true;
603 		/*
604 		 * Inform hwpmc if the address range being unmapped contains
605 		 * an executable region.
606 		 */
607 		pkm.pm_address = (uintptr_t) NULL;
608 		if (vm_map_lookup_entry(map, addr, &entry)) {
609 			for (; entry->start < end;
610 			    entry = vm_map_entry_succ(entry)) {
611 				if (vm_map_check_protection(map, entry->start,
612 					entry->end, VM_PROT_EXECUTE) == TRUE) {
613 					pkm.pm_address = (uintptr_t) addr;
614 					pkm.pm_size = (size_t) size;
615 					break;
616 				}
617 			}
618 		}
619 	}
620 #endif
621 	rv = vm_map_delete(map, addr, end);
622 
623 #ifdef HWPMC_HOOKS
624 	if (rv == KERN_SUCCESS && __predict_false(pmc_handled)) {
625 		/* downgrade the lock to prevent a LOR with the pmc-sx lock */
626 		vm_map_lock_downgrade(map);
627 		if (pkm.pm_address != (uintptr_t) NULL)
628 			PMC_CALL_HOOK(td, PMC_FN_MUNMAP, (void *) &pkm);
629 		vm_map_unlock_read(map);
630 	} else
631 #endif
632 		vm_map_unlock(map);
633 
634 	return (vm_mmap_to_errno(rv));
635 }
636 
637 #ifndef _SYS_SYSPROTO_H_
638 struct mprotect_args {
639 	const void *addr;
640 	size_t len;
641 	int prot;
642 };
643 #endif
644 int
645 sys_mprotect(struct thread *td, struct mprotect_args *uap)
646 {
647 
648 	return (kern_mprotect(td, (uintptr_t)uap->addr, uap->len,
649 	    uap->prot, 0));
650 }
651 
652 int
653 kern_mprotect(struct thread *td, uintptr_t addr0, size_t size, int prot,
654     int flags)
655 {
656 	vm_offset_t addr;
657 	vm_size_t pageoff;
658 	int vm_error, max_prot;
659 
660 	addr = addr0;
661 	if ((prot & ~(_PROT_ALL | PROT_MAX(_PROT_ALL))) != 0)
662 		return (EINVAL);
663 	max_prot = PROT_MAX_EXTRACT(prot);
664 	prot = PROT_EXTRACT(prot);
665 	pageoff = (addr & PAGE_MASK);
666 	addr -= pageoff;
667 	size += pageoff;
668 	size = (vm_size_t) round_page(size);
669 #ifdef COMPAT_FREEBSD32
670 	if (SV_PROC_FLAG(td->td_proc, SV_ILP32)) {
671 		if (((addr + size) & 0xffffffff) < addr)
672 			return (EINVAL);
673 	} else
674 #endif
675 	if (addr + size < addr)
676 		return (EINVAL);
677 
678 	flags |= VM_MAP_PROTECT_SET_PROT;
679 	if (max_prot != 0)
680 		flags |= VM_MAP_PROTECT_SET_MAXPROT;
681 	vm_error = vm_map_protect(&td->td_proc->p_vmspace->vm_map,
682 	    addr, addr + size, prot, max_prot, flags);
683 
684 	switch (vm_error) {
685 	case KERN_SUCCESS:
686 		return (0);
687 	case KERN_PROTECTION_FAILURE:
688 		return (EACCES);
689 	case KERN_RESOURCE_SHORTAGE:
690 		return (ENOMEM);
691 	case KERN_OUT_OF_BOUNDS:
692 		return (ENOTSUP);
693 	}
694 	return (EINVAL);
695 }
696 
697 #ifndef _SYS_SYSPROTO_H_
698 struct minherit_args {
699 	void *addr;
700 	size_t len;
701 	int inherit;
702 };
703 #endif
704 int
705 sys_minherit(struct thread *td, struct minherit_args *uap)
706 {
707 
708 	return (kern_minherit(td, (uintptr_t)uap->addr, uap->len,
709 	    uap->inherit));
710 }
711 
712 int
713 kern_minherit(struct thread *td, uintptr_t addr0, size_t len, int inherit0)
714 {
715 	vm_offset_t addr;
716 	vm_size_t size, pageoff;
717 	vm_inherit_t inherit;
718 
719 	addr = (vm_offset_t)addr0;
720 	size = len;
721 	inherit = inherit0;
722 
723 	pageoff = (addr & PAGE_MASK);
724 	addr -= pageoff;
725 	size += pageoff;
726 	size = (vm_size_t) round_page(size);
727 	if (addr + size < addr)
728 		return (EINVAL);
729 
730 	switch (vm_map_inherit(&td->td_proc->p_vmspace->vm_map, addr,
731 	    addr + size, inherit)) {
732 	case KERN_SUCCESS:
733 		return (0);
734 	case KERN_PROTECTION_FAILURE:
735 		return (EACCES);
736 	}
737 	return (EINVAL);
738 }
739 
740 #ifndef _SYS_SYSPROTO_H_
741 struct madvise_args {
742 	void *addr;
743 	size_t len;
744 	int behav;
745 };
746 #endif
747 
748 int
749 sys_madvise(struct thread *td, struct madvise_args *uap)
750 {
751 
752 	return (kern_madvise(td, (uintptr_t)uap->addr, uap->len, uap->behav));
753 }
754 
755 int
756 kern_madvise(struct thread *td, uintptr_t addr0, size_t len, int behav)
757 {
758 	vm_map_t map;
759 	vm_offset_t addr, end, start;
760 	int flags;
761 
762 	/*
763 	 * Check for our special case, advising the swap pager we are
764 	 * "immortal."
765 	 */
766 	if (behav == MADV_PROTECT) {
767 		flags = PPROT_SET;
768 		return (kern_procctl(td, P_PID, td->td_proc->p_pid,
769 		    PROC_SPROTECT, &flags));
770 	}
771 
772 	/*
773 	 * Check for illegal addresses.  Watch out for address wrap... Note
774 	 * that VM_*_ADDRESS are not constants due to casts (argh).
775 	 */
776 	map = &td->td_proc->p_vmspace->vm_map;
777 	addr = addr0;
778 	if (!vm_map_range_valid(map, addr, addr + len))
779 		return (EINVAL);
780 
781 	/*
782 	 * Since this routine is only advisory, we default to conservative
783 	 * behavior.
784 	 */
785 	start = trunc_page(addr);
786 	end = round_page(addr + len);
787 
788 	/*
789 	 * vm_map_madvise() checks for illegal values of behav.
790 	 */
791 	return (vm_map_madvise(map, start, end, behav));
792 }
793 
794 #ifndef _SYS_SYSPROTO_H_
795 struct mincore_args {
796 	const void *addr;
797 	size_t len;
798 	char *vec;
799 };
800 #endif
801 
802 int
803 sys_mincore(struct thread *td, struct mincore_args *uap)
804 {
805 
806 	return (kern_mincore(td, (uintptr_t)uap->addr, uap->len, uap->vec));
807 }
808 
809 int
810 kern_mincore(struct thread *td, uintptr_t addr0, size_t len, char *vec)
811 {
812 	pmap_t pmap;
813 	vm_map_t map;
814 	vm_map_entry_t current, entry;
815 	vm_object_t object;
816 	vm_offset_t addr, cend, end, first_addr;
817 	vm_paddr_t pa;
818 	vm_page_t m;
819 	vm_pindex_t pindex;
820 	int error, lastvecindex, mincoreinfo, vecindex;
821 	unsigned int timestamp;
822 
823 	/*
824 	 * Make sure that the addresses presented are valid for user
825 	 * mode.
826 	 */
827 	first_addr = addr = trunc_page(addr0);
828 	end = round_page(addr0 + len);
829 	map = &td->td_proc->p_vmspace->vm_map;
830 	if (end > vm_map_max(map) || end < addr)
831 		return (ENOMEM);
832 
833 	pmap = vmspace_pmap(td->td_proc->p_vmspace);
834 
835 	vm_map_lock_read(map);
836 RestartScan:
837 	timestamp = map->timestamp;
838 
839 	if (!vm_map_lookup_entry(map, addr, &entry)) {
840 		vm_map_unlock_read(map);
841 		return (ENOMEM);
842 	}
843 
844 	/*
845 	 * Do this on a map entry basis so that if the pages are not
846 	 * in the current processes address space, we can easily look
847 	 * up the pages elsewhere.
848 	 */
849 	lastvecindex = -1;
850 	while (entry->start < end) {
851 		/*
852 		 * check for contiguity
853 		 */
854 		current = entry;
855 		entry = vm_map_entry_succ(current);
856 		if (current->end < end &&
857 		    entry->start > current->end) {
858 			vm_map_unlock_read(map);
859 			return (ENOMEM);
860 		}
861 
862 		/*
863 		 * ignore submaps (for now) or null objects
864 		 */
865 		if ((current->eflags & MAP_ENTRY_IS_SUB_MAP) ||
866 		    current->object.vm_object == NULL)
867 			continue;
868 
869 		/*
870 		 * limit this scan to the current map entry and the
871 		 * limits for the mincore call
872 		 */
873 		if (addr < current->start)
874 			addr = current->start;
875 		cend = current->end;
876 		if (cend > end)
877 			cend = end;
878 
879 		for (; addr < cend; addr += PAGE_SIZE) {
880 			/*
881 			 * Check pmap first, it is likely faster, also
882 			 * it can provide info as to whether we are the
883 			 * one referencing or modifying the page.
884 			 */
885 			m = NULL;
886 			object = NULL;
887 retry:
888 			pa = 0;
889 			mincoreinfo = pmap_mincore(pmap, addr, &pa);
890 			if (mincore_mapped) {
891 				/*
892 				 * We only care about this pmap's
893 				 * mapping of the page, if any.
894 				 */
895 				;
896 			} else if (pa != 0) {
897 				/*
898 				 * The page is mapped by this process but not
899 				 * both accessed and modified.  It is also
900 				 * managed.  Acquire the object lock so that
901 				 * other mappings might be examined.  The page's
902 				 * identity may change at any point before its
903 				 * object lock is acquired, so re-validate if
904 				 * necessary.
905 				 */
906 				m = PHYS_TO_VM_PAGE(pa);
907 				while (object == NULL || m->object != object) {
908 					if (object != NULL)
909 						VM_OBJECT_WUNLOCK(object);
910 					object = atomic_load_ptr(&m->object);
911 					if (object == NULL)
912 						goto retry;
913 					VM_OBJECT_WLOCK(object);
914 				}
915 				if (pa != pmap_extract(pmap, addr))
916 					goto retry;
917 				KASSERT(vm_page_all_valid(m),
918 				    ("mincore: page %p is mapped but invalid",
919 				    m));
920 			} else if (mincoreinfo == 0) {
921 				/*
922 				 * The page is not mapped by this process.  If
923 				 * the object implements managed pages, then
924 				 * determine if the page is resident so that
925 				 * the mappings might be examined.
926 				 */
927 				if (current->object.vm_object != object) {
928 					if (object != NULL)
929 						VM_OBJECT_WUNLOCK(object);
930 					object = current->object.vm_object;
931 					VM_OBJECT_WLOCK(object);
932 				}
933 				if ((object->flags & OBJ_SWAP) != 0 ||
934 				    object->type == OBJT_VNODE) {
935 					pindex = OFF_TO_IDX(current->offset +
936 					    (addr - current->start));
937 					m = vm_page_lookup(object, pindex);
938 					if (m != NULL && vm_page_none_valid(m))
939 						m = NULL;
940 					if (m != NULL)
941 						mincoreinfo = MINCORE_INCORE;
942 				}
943 			}
944 			if (m != NULL) {
945 				VM_OBJECT_ASSERT_WLOCKED(m->object);
946 
947 				/* Examine other mappings of the page. */
948 				if (m->dirty == 0 && pmap_is_modified(m))
949 					vm_page_dirty(m);
950 				if (m->dirty != 0)
951 					mincoreinfo |= MINCORE_MODIFIED_OTHER;
952 
953 				/*
954 				 * The first test for PGA_REFERENCED is an
955 				 * optimization.  The second test is
956 				 * required because a concurrent pmap
957 				 * operation could clear the last reference
958 				 * and set PGA_REFERENCED before the call to
959 				 * pmap_is_referenced().
960 				 */
961 				if ((m->a.flags & PGA_REFERENCED) != 0 ||
962 				    pmap_is_referenced(m) ||
963 				    (m->a.flags & PGA_REFERENCED) != 0)
964 					mincoreinfo |= MINCORE_REFERENCED_OTHER;
965 			}
966 			if (object != NULL)
967 				VM_OBJECT_WUNLOCK(object);
968 
969 			/*
970 			 * subyte may page fault.  In case it needs to modify
971 			 * the map, we release the lock.
972 			 */
973 			vm_map_unlock_read(map);
974 
975 			/*
976 			 * calculate index into user supplied byte vector
977 			 */
978 			vecindex = atop(addr - first_addr);
979 
980 			/*
981 			 * If we have skipped map entries, we need to make sure that
982 			 * the byte vector is zeroed for those skipped entries.
983 			 */
984 			while ((lastvecindex + 1) < vecindex) {
985 				++lastvecindex;
986 				error = subyte(vec + lastvecindex, 0);
987 				if (error) {
988 					error = EFAULT;
989 					goto done2;
990 				}
991 			}
992 
993 			/*
994 			 * Pass the page information to the user
995 			 */
996 			error = subyte(vec + vecindex, mincoreinfo);
997 			if (error) {
998 				error = EFAULT;
999 				goto done2;
1000 			}
1001 
1002 			/*
1003 			 * If the map has changed, due to the subyte, the previous
1004 			 * output may be invalid.
1005 			 */
1006 			vm_map_lock_read(map);
1007 			if (timestamp != map->timestamp)
1008 				goto RestartScan;
1009 
1010 			lastvecindex = vecindex;
1011 		}
1012 	}
1013 
1014 	/*
1015 	 * subyte may page fault.  In case it needs to modify
1016 	 * the map, we release the lock.
1017 	 */
1018 	vm_map_unlock_read(map);
1019 
1020 	/*
1021 	 * Zero the last entries in the byte vector.
1022 	 */
1023 	vecindex = atop(end - first_addr);
1024 	while ((lastvecindex + 1) < vecindex) {
1025 		++lastvecindex;
1026 		error = subyte(vec + lastvecindex, 0);
1027 		if (error) {
1028 			error = EFAULT;
1029 			goto done2;
1030 		}
1031 	}
1032 
1033 	/*
1034 	 * If the map has changed, due to the subyte, the previous
1035 	 * output may be invalid.
1036 	 */
1037 	vm_map_lock_read(map);
1038 	if (timestamp != map->timestamp)
1039 		goto RestartScan;
1040 	vm_map_unlock_read(map);
1041 done2:
1042 	return (error);
1043 }
1044 
1045 #ifndef _SYS_SYSPROTO_H_
1046 struct mlock_args {
1047 	const void *addr;
1048 	size_t len;
1049 };
1050 #endif
1051 int
1052 sys_mlock(struct thread *td, struct mlock_args *uap)
1053 {
1054 
1055 	return (kern_mlock(td->td_proc, td->td_ucred,
1056 	    __DECONST(uintptr_t, uap->addr), uap->len));
1057 }
1058 
1059 int
1060 kern_mlock(struct proc *proc, struct ucred *cred, uintptr_t addr0, size_t len)
1061 {
1062 	vm_offset_t addr, end, last, start;
1063 	vm_size_t npages, size;
1064 	vm_map_t map;
1065 	unsigned long nsize;
1066 	int error;
1067 
1068 	error = priv_check_cred(cred, PRIV_VM_MLOCK);
1069 	if (error)
1070 		return (error);
1071 	addr = addr0;
1072 	size = len;
1073 	last = addr + size;
1074 	start = trunc_page(addr);
1075 	end = round_page(last);
1076 	if (last < addr || end < addr)
1077 		return (EINVAL);
1078 	npages = atop(end - start);
1079 	if (npages > vm_page_max_user_wired)
1080 		return (ENOMEM);
1081 	map = &proc->p_vmspace->vm_map;
1082 	PROC_LOCK(proc);
1083 	nsize = ptoa(npages + pmap_wired_count(map->pmap));
1084 	if (nsize > lim_cur_proc(proc, RLIMIT_MEMLOCK)) {
1085 		PROC_UNLOCK(proc);
1086 		return (ENOMEM);
1087 	}
1088 	PROC_UNLOCK(proc);
1089 #ifdef RACCT
1090 	if (racct_enable) {
1091 		PROC_LOCK(proc);
1092 		error = racct_set(proc, RACCT_MEMLOCK, nsize);
1093 		PROC_UNLOCK(proc);
1094 		if (error != 0)
1095 			return (ENOMEM);
1096 	}
1097 #endif
1098 	error = vm_map_wire(map, start, end,
1099 	    VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES);
1100 #ifdef RACCT
1101 	if (racct_enable && error != KERN_SUCCESS) {
1102 		PROC_LOCK(proc);
1103 		racct_set(proc, RACCT_MEMLOCK,
1104 		    ptoa(pmap_wired_count(map->pmap)));
1105 		PROC_UNLOCK(proc);
1106 	}
1107 #endif
1108 	switch (error) {
1109 	case KERN_SUCCESS:
1110 		return (0);
1111 	case KERN_INVALID_ARGUMENT:
1112 		return (EINVAL);
1113 	default:
1114 		return (ENOMEM);
1115 	}
1116 }
1117 
1118 #ifndef _SYS_SYSPROTO_H_
1119 struct mlockall_args {
1120 	int	how;
1121 };
1122 #endif
1123 
1124 int
1125 sys_mlockall(struct thread *td, struct mlockall_args *uap)
1126 {
1127 	vm_map_t map;
1128 	int error;
1129 
1130 	map = &td->td_proc->p_vmspace->vm_map;
1131 	error = priv_check(td, PRIV_VM_MLOCK);
1132 	if (error)
1133 		return (error);
1134 
1135 	if ((uap->how == 0) || ((uap->how & ~(MCL_CURRENT|MCL_FUTURE)) != 0))
1136 		return (EINVAL);
1137 
1138 	/*
1139 	 * If wiring all pages in the process would cause it to exceed
1140 	 * a hard resource limit, return ENOMEM.
1141 	 */
1142 	if (!old_mlock && uap->how & MCL_CURRENT) {
1143 		if (map->size > lim_cur(td, RLIMIT_MEMLOCK))
1144 			return (ENOMEM);
1145 	}
1146 #ifdef RACCT
1147 	if (racct_enable) {
1148 		PROC_LOCK(td->td_proc);
1149 		error = racct_set(td->td_proc, RACCT_MEMLOCK, map->size);
1150 		PROC_UNLOCK(td->td_proc);
1151 		if (error != 0)
1152 			return (ENOMEM);
1153 	}
1154 #endif
1155 
1156 	if (uap->how & MCL_FUTURE) {
1157 		vm_map_lock(map);
1158 		vm_map_modflags(map, MAP_WIREFUTURE, 0);
1159 		vm_map_unlock(map);
1160 		error = 0;
1161 	}
1162 
1163 	if (uap->how & MCL_CURRENT) {
1164 		/*
1165 		 * P1003.1-2001 mandates that all currently mapped pages
1166 		 * will be memory resident and locked (wired) upon return
1167 		 * from mlockall(). vm_map_wire() will wire pages, by
1168 		 * calling vm_fault_wire() for each page in the region.
1169 		 */
1170 		error = vm_map_wire(map, vm_map_min(map), vm_map_max(map),
1171 		    VM_MAP_WIRE_USER|VM_MAP_WIRE_HOLESOK);
1172 		if (error == KERN_SUCCESS)
1173 			error = 0;
1174 		else if (error == KERN_RESOURCE_SHORTAGE)
1175 			error = ENOMEM;
1176 		else
1177 			error = EAGAIN;
1178 	}
1179 #ifdef RACCT
1180 	if (racct_enable && error != KERN_SUCCESS) {
1181 		PROC_LOCK(td->td_proc);
1182 		racct_set(td->td_proc, RACCT_MEMLOCK,
1183 		    ptoa(pmap_wired_count(map->pmap)));
1184 		PROC_UNLOCK(td->td_proc);
1185 	}
1186 #endif
1187 
1188 	return (error);
1189 }
1190 
1191 #ifndef _SYS_SYSPROTO_H_
1192 struct munlockall_args {
1193 	register_t dummy;
1194 };
1195 #endif
1196 
1197 int
1198 sys_munlockall(struct thread *td, struct munlockall_args *uap)
1199 {
1200 	vm_map_t map;
1201 	int error;
1202 
1203 	map = &td->td_proc->p_vmspace->vm_map;
1204 	error = priv_check(td, PRIV_VM_MUNLOCK);
1205 	if (error)
1206 		return (error);
1207 
1208 	/* Clear the MAP_WIREFUTURE flag from this vm_map. */
1209 	vm_map_lock(map);
1210 	vm_map_modflags(map, 0, MAP_WIREFUTURE);
1211 	vm_map_unlock(map);
1212 
1213 	/* Forcibly unwire all pages. */
1214 	error = vm_map_unwire(map, vm_map_min(map), vm_map_max(map),
1215 	    VM_MAP_WIRE_USER|VM_MAP_WIRE_HOLESOK);
1216 #ifdef RACCT
1217 	if (racct_enable && error == KERN_SUCCESS) {
1218 		PROC_LOCK(td->td_proc);
1219 		racct_set(td->td_proc, RACCT_MEMLOCK, 0);
1220 		PROC_UNLOCK(td->td_proc);
1221 	}
1222 #endif
1223 
1224 	return (error);
1225 }
1226 
1227 #ifndef _SYS_SYSPROTO_H_
1228 struct munlock_args {
1229 	const void *addr;
1230 	size_t len;
1231 };
1232 #endif
1233 int
1234 sys_munlock(struct thread *td, struct munlock_args *uap)
1235 {
1236 
1237 	return (kern_munlock(td, (uintptr_t)uap->addr, uap->len));
1238 }
1239 
1240 int
1241 kern_munlock(struct thread *td, uintptr_t addr0, size_t size)
1242 {
1243 	vm_offset_t addr, end, last, start;
1244 #ifdef RACCT
1245 	vm_map_t map;
1246 #endif
1247 	int error;
1248 
1249 	error = priv_check(td, PRIV_VM_MUNLOCK);
1250 	if (error)
1251 		return (error);
1252 	addr = addr0;
1253 	last = addr + size;
1254 	start = trunc_page(addr);
1255 	end = round_page(last);
1256 	if (last < addr || end < addr)
1257 		return (EINVAL);
1258 	error = vm_map_unwire(&td->td_proc->p_vmspace->vm_map, start, end,
1259 	    VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES);
1260 #ifdef RACCT
1261 	if (racct_enable && error == KERN_SUCCESS) {
1262 		PROC_LOCK(td->td_proc);
1263 		map = &td->td_proc->p_vmspace->vm_map;
1264 		racct_set(td->td_proc, RACCT_MEMLOCK,
1265 		    ptoa(pmap_wired_count(map->pmap)));
1266 		PROC_UNLOCK(td->td_proc);
1267 	}
1268 #endif
1269 	return (error == KERN_SUCCESS ? 0 : ENOMEM);
1270 }
1271 
1272 /*
1273  * vm_mmap_vnode()
1274  *
1275  * Helper function for vm_mmap.  Perform sanity check specific for mmap
1276  * operations on vnodes.
1277  */
1278 int
1279 vm_mmap_vnode(struct thread *td, vm_size_t objsize,
1280     vm_prot_t prot, vm_prot_t *maxprotp, int *flagsp,
1281     struct vnode *vp, vm_ooffset_t *foffp, vm_object_t *objp,
1282     boolean_t *writecounted)
1283 {
1284 	struct vattr va;
1285 	vm_object_t obj;
1286 	vm_ooffset_t foff;
1287 	struct ucred *cred;
1288 	int error, flags;
1289 	bool writex;
1290 
1291 	cred = td->td_ucred;
1292 	writex = (*maxprotp & VM_PROT_WRITE) != 0 &&
1293 	    (*flagsp & MAP_SHARED) != 0;
1294 	if ((error = vget(vp, LK_SHARED)) != 0)
1295 		return (error);
1296 	AUDIT_ARG_VNODE1(vp);
1297 	foff = *foffp;
1298 	flags = *flagsp;
1299 	obj = vp->v_object;
1300 	if (vp->v_type == VREG) {
1301 		/*
1302 		 * Get the proper underlying object
1303 		 */
1304 		if (obj == NULL) {
1305 			error = EINVAL;
1306 			goto done;
1307 		}
1308 		if (obj->type == OBJT_VNODE && obj->handle != vp) {
1309 			vput(vp);
1310 			vp = (struct vnode *)obj->handle;
1311 			/*
1312 			 * Bypass filesystems obey the mpsafety of the
1313 			 * underlying fs.  Tmpfs never bypasses.
1314 			 */
1315 			error = vget(vp, LK_SHARED);
1316 			if (error != 0)
1317 				return (error);
1318 		}
1319 		if (writex) {
1320 			*writecounted = TRUE;
1321 			vm_pager_update_writecount(obj, 0, objsize);
1322 		}
1323 	} else {
1324 		error = EINVAL;
1325 		goto done;
1326 	}
1327 	if ((error = VOP_GETATTR(vp, &va, cred)))
1328 		goto done;
1329 #ifdef MAC
1330 	/* This relies on VM_PROT_* matching PROT_*. */
1331 	error = mac_vnode_check_mmap(cred, vp, (int)prot, flags);
1332 	if (error != 0)
1333 		goto done;
1334 #endif
1335 	if ((flags & MAP_SHARED) != 0) {
1336 		if ((va.va_flags & (SF_SNAPSHOT|IMMUTABLE|APPEND)) != 0) {
1337 			if (prot & VM_PROT_WRITE) {
1338 				error = EPERM;
1339 				goto done;
1340 			}
1341 			*maxprotp &= ~VM_PROT_WRITE;
1342 		}
1343 	}
1344 	/*
1345 	 * If it is a regular file without any references
1346 	 * we do not need to sync it.
1347 	 * Adjust object size to be the size of actual file.
1348 	 */
1349 	objsize = round_page(va.va_size);
1350 	if (va.va_nlink == 0)
1351 		flags |= MAP_NOSYNC;
1352 	if (obj->type == OBJT_VNODE) {
1353 		obj = vm_pager_allocate(OBJT_VNODE, vp, objsize, prot, foff,
1354 		    cred);
1355 		if (obj == NULL) {
1356 			error = ENOMEM;
1357 			goto done;
1358 		}
1359 	} else {
1360 		KASSERT((obj->flags & OBJ_SWAP) != 0, ("wrong object type"));
1361 		vm_object_reference(obj);
1362 #if VM_NRESERVLEVEL > 0
1363 		if ((obj->flags & OBJ_COLORED) == 0) {
1364 			VM_OBJECT_WLOCK(obj);
1365 			vm_object_color(obj, 0);
1366 			VM_OBJECT_WUNLOCK(obj);
1367 		}
1368 #endif
1369 	}
1370 	*objp = obj;
1371 	*flagsp = flags;
1372 
1373 	VOP_MMAPPED(vp);
1374 
1375 done:
1376 	if (error != 0 && *writecounted) {
1377 		*writecounted = FALSE;
1378 		vm_pager_update_writecount(obj, objsize, 0);
1379 	}
1380 	vput(vp);
1381 	return (error);
1382 }
1383 
1384 /*
1385  * vm_mmap_cdev()
1386  *
1387  * Helper function for vm_mmap.  Perform sanity check specific for mmap
1388  * operations on cdevs.
1389  */
1390 int
1391 vm_mmap_cdev(struct thread *td, vm_size_t objsize, vm_prot_t prot,
1392     vm_prot_t *maxprotp, int *flagsp, struct cdev *cdev, struct cdevsw *dsw,
1393     vm_ooffset_t *foff, vm_object_t *objp)
1394 {
1395 	vm_object_t obj;
1396 	int error, flags;
1397 
1398 	flags = *flagsp;
1399 
1400 	if (dsw->d_flags & D_MMAP_ANON) {
1401 		*objp = NULL;
1402 		*foff = 0;
1403 		*maxprotp = VM_PROT_ALL;
1404 		*flagsp |= MAP_ANON;
1405 		return (0);
1406 	}
1407 	/*
1408 	 * cdevs do not provide private mappings of any kind.
1409 	 */
1410 	if ((*maxprotp & VM_PROT_WRITE) == 0 &&
1411 	    (prot & VM_PROT_WRITE) != 0)
1412 		return (EACCES);
1413 	if (flags & (MAP_PRIVATE|MAP_COPY))
1414 		return (EINVAL);
1415 	/*
1416 	 * Force device mappings to be shared.
1417 	 */
1418 	flags |= MAP_SHARED;
1419 #ifdef MAC_XXX
1420 	error = mac_cdev_check_mmap(td->td_ucred, cdev, (int)prot);
1421 	if (error != 0)
1422 		return (error);
1423 #endif
1424 	/*
1425 	 * First, try d_mmap_single().  If that is not implemented
1426 	 * (returns ENODEV), fall back to using the device pager.
1427 	 * Note that d_mmap_single() must return a reference to the
1428 	 * object (it needs to bump the reference count of the object
1429 	 * it returns somehow).
1430 	 *
1431 	 * XXX assumes VM_PROT_* == PROT_*
1432 	 */
1433 	error = dsw->d_mmap_single(cdev, foff, objsize, objp, (int)prot);
1434 	if (error != ENODEV)
1435 		return (error);
1436 	obj = vm_pager_allocate(OBJT_DEVICE, cdev, objsize, prot, *foff,
1437 	    td->td_ucred);
1438 	if (obj == NULL)
1439 		return (EINVAL);
1440 	*objp = obj;
1441 	*flagsp = flags;
1442 	return (0);
1443 }
1444 
1445 int
1446 vm_mmap(vm_map_t map, vm_offset_t *addr, vm_size_t size, vm_prot_t prot,
1447 	vm_prot_t maxprot, int flags,
1448 	objtype_t handle_type, void *handle,
1449 	vm_ooffset_t foff)
1450 {
1451 	vm_object_t object;
1452 	struct thread *td = curthread;
1453 	int error;
1454 	boolean_t writecounted;
1455 
1456 	if (size == 0)
1457 		return (EINVAL);
1458 
1459 	size = round_page(size);
1460 	object = NULL;
1461 	writecounted = FALSE;
1462 
1463 	switch (handle_type) {
1464 	case OBJT_DEVICE: {
1465 		struct cdevsw *dsw;
1466 		struct cdev *cdev;
1467 		int ref;
1468 
1469 		cdev = handle;
1470 		dsw = dev_refthread(cdev, &ref);
1471 		if (dsw == NULL)
1472 			return (ENXIO);
1473 		error = vm_mmap_cdev(td, size, prot, &maxprot, &flags, cdev,
1474 		    dsw, &foff, &object);
1475 		dev_relthread(cdev, ref);
1476 		break;
1477 	}
1478 	case OBJT_VNODE:
1479 		error = vm_mmap_vnode(td, size, prot, &maxprot, &flags,
1480 		    handle, &foff, &object, &writecounted);
1481 		break;
1482 	default:
1483 		error = EINVAL;
1484 		break;
1485 	}
1486 	if (error)
1487 		return (error);
1488 
1489 	error = vm_mmap_object(map, addr, size, prot, maxprot, flags, object,
1490 	    foff, writecounted, td);
1491 	if (error != 0 && object != NULL) {
1492 		/*
1493 		 * If this mapping was accounted for in the vnode's
1494 		 * writecount, then undo that now.
1495 		 */
1496 		if (writecounted)
1497 			vm_pager_release_writecount(object, 0, size);
1498 		vm_object_deallocate(object);
1499 	}
1500 	return (error);
1501 }
1502 
1503 int
1504 kern_mmap_racct_check(struct thread *td, vm_map_t map, vm_size_t size)
1505 {
1506 	int error;
1507 
1508 	RACCT_PROC_LOCK(td->td_proc);
1509 	if (map->size + size > lim_cur(td, RLIMIT_VMEM)) {
1510 		RACCT_PROC_UNLOCK(td->td_proc);
1511 		return (ENOMEM);
1512 	}
1513 	if (racct_set(td->td_proc, RACCT_VMEM, map->size + size)) {
1514 		RACCT_PROC_UNLOCK(td->td_proc);
1515 		return (ENOMEM);
1516 	}
1517 	if (!old_mlock && map->flags & MAP_WIREFUTURE) {
1518 		if (ptoa(pmap_wired_count(map->pmap)) + size >
1519 		    lim_cur(td, RLIMIT_MEMLOCK)) {
1520 			racct_set_force(td->td_proc, RACCT_VMEM, map->size);
1521 			RACCT_PROC_UNLOCK(td->td_proc);
1522 			return (ENOMEM);
1523 		}
1524 		error = racct_set(td->td_proc, RACCT_MEMLOCK,
1525 		    ptoa(pmap_wired_count(map->pmap)) + size);
1526 		if (error != 0) {
1527 			racct_set_force(td->td_proc, RACCT_VMEM, map->size);
1528 			RACCT_PROC_UNLOCK(td->td_proc);
1529 			return (error);
1530 		}
1531 	}
1532 	RACCT_PROC_UNLOCK(td->td_proc);
1533 	return (0);
1534 }
1535 
1536 /*
1537  * Internal version of mmap that maps a specific VM object into an
1538  * map.  Called by mmap for MAP_ANON, vm_mmap, shm_mmap, and vn_mmap.
1539  */
1540 int
1541 vm_mmap_object(vm_map_t map, vm_offset_t *addr, vm_size_t size, vm_prot_t prot,
1542     vm_prot_t maxprot, int flags, vm_object_t object, vm_ooffset_t foff,
1543     boolean_t writecounted, struct thread *td)
1544 {
1545 	vm_offset_t default_addr, max_addr;
1546 	int docow, error, findspace, rv;
1547 	bool curmap, fitit;
1548 
1549 	curmap = map == &td->td_proc->p_vmspace->vm_map;
1550 	if (curmap) {
1551 		error = kern_mmap_racct_check(td, map, size);
1552 		if (error != 0)
1553 			return (error);
1554 	}
1555 
1556 	/*
1557 	 * We currently can only deal with page aligned file offsets.
1558 	 * The mmap() system call already enforces this by subtracting
1559 	 * the page offset from the file offset, but checking here
1560 	 * catches errors in device drivers (e.g. d_single_mmap()
1561 	 * callbacks) and other internal mapping requests (such as in
1562 	 * exec).
1563 	 */
1564 	if (foff & PAGE_MASK)
1565 		return (EINVAL);
1566 
1567 	if ((flags & MAP_FIXED) == 0) {
1568 		fitit = true;
1569 		*addr = round_page(*addr);
1570 	} else {
1571 		if (*addr != trunc_page(*addr))
1572 			return (EINVAL);
1573 		fitit = false;
1574 	}
1575 
1576 	if (flags & MAP_ANON) {
1577 		if (object != NULL || foff != 0)
1578 			return (EINVAL);
1579 		docow = 0;
1580 	} else if (flags & MAP_PREFAULT_READ)
1581 		docow = MAP_PREFAULT;
1582 	else
1583 		docow = MAP_PREFAULT_PARTIAL;
1584 
1585 	if ((flags & (MAP_ANON|MAP_SHARED)) == 0)
1586 		docow |= MAP_COPY_ON_WRITE;
1587 	if (flags & MAP_NOSYNC)
1588 		docow |= MAP_DISABLE_SYNCER;
1589 	if (flags & MAP_NOCORE)
1590 		docow |= MAP_DISABLE_COREDUMP;
1591 	/* Shared memory is also shared with children. */
1592 	if (flags & MAP_SHARED)
1593 		docow |= MAP_INHERIT_SHARE;
1594 	if (writecounted)
1595 		docow |= MAP_WRITECOUNT;
1596 	if (flags & MAP_STACK) {
1597 		if (object != NULL)
1598 			return (EINVAL);
1599 		docow |= MAP_STACK_GROWS_DOWN;
1600 	}
1601 	if ((flags & MAP_EXCL) != 0)
1602 		docow |= MAP_CHECK_EXCL;
1603 	if ((flags & MAP_GUARD) != 0)
1604 		docow |= MAP_CREATE_GUARD;
1605 
1606 	if (fitit) {
1607 		if ((flags & MAP_ALIGNMENT_MASK) == MAP_ALIGNED_SUPER)
1608 			findspace = VMFS_SUPER_SPACE;
1609 		else if ((flags & MAP_ALIGNMENT_MASK) != 0)
1610 			findspace = VMFS_ALIGNED_SPACE(flags >>
1611 			    MAP_ALIGNMENT_SHIFT);
1612 		else
1613 			findspace = VMFS_OPTIMAL_SPACE;
1614 		max_addr = 0;
1615 		if ((flags & MAP_32BIT) != 0)
1616 			max_addr = MAP_32BIT_MAX_ADDR;
1617 		if (curmap) {
1618 			default_addr =
1619 			    round_page((vm_offset_t)td->td_proc->p_vmspace->
1620 			    vm_daddr + lim_max(td, RLIMIT_DATA));
1621 			if ((flags & MAP_32BIT) != 0)
1622 				default_addr = 0;
1623 			rv = vm_map_find_min(map, object, foff, addr, size,
1624 			    default_addr, max_addr, findspace, prot, maxprot,
1625 			    docow);
1626 		} else {
1627 			rv = vm_map_find(map, object, foff, addr, size,
1628 			    max_addr, findspace, prot, maxprot, docow);
1629 		}
1630 	} else {
1631 		rv = vm_map_fixed(map, object, foff, *addr, size,
1632 		    prot, maxprot, docow);
1633 	}
1634 
1635 	if (rv == KERN_SUCCESS) {
1636 		/*
1637 		 * If the process has requested that all future mappings
1638 		 * be wired, then heed this.
1639 		 */
1640 		if ((map->flags & MAP_WIREFUTURE) != 0) {
1641 			vm_map_lock(map);
1642 			if ((map->flags & MAP_WIREFUTURE) != 0)
1643 				(void)vm_map_wire_locked(map, *addr,
1644 				    *addr + size, VM_MAP_WIRE_USER |
1645 				    ((flags & MAP_STACK) ? VM_MAP_WIRE_HOLESOK :
1646 				    VM_MAP_WIRE_NOHOLES));
1647 			vm_map_unlock(map);
1648 		}
1649 	}
1650 	return (vm_mmap_to_errno(rv));
1651 }
1652 
1653 /*
1654  * Translate a Mach VM return code to zero on success or the appropriate errno
1655  * on failure.
1656  */
1657 int
1658 vm_mmap_to_errno(int rv)
1659 {
1660 
1661 	switch (rv) {
1662 	case KERN_SUCCESS:
1663 		return (0);
1664 	case KERN_INVALID_ADDRESS:
1665 	case KERN_NO_SPACE:
1666 		return (ENOMEM);
1667 	case KERN_PROTECTION_FAILURE:
1668 		return (EACCES);
1669 	default:
1670 		return (EINVAL);
1671 	}
1672 }
1673