xref: /freebsd/sys/vm/vm_mmap.c (revision 2008043f386721d58158e37e0d7e50df8095942d)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 1988 University of Utah.
5  * Copyright (c) 1991, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  *
8  * This code is derived from software contributed to Berkeley by
9  * the Systems Programming Group of the University of Utah Computer
10  * Science Department.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  * from: Utah $Hdr: vm_mmap.c 1.6 91/10/21$
37  *
38  *	@(#)vm_mmap.c	8.4 (Berkeley) 1/12/94
39  */
40 
41 /*
42  * Mapped file (mmap) interface to VM
43  */
44 
45 #include <sys/cdefs.h>
46 #include "opt_hwpmc_hooks.h"
47 #include "opt_vm.h"
48 
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/capsicum.h>
52 #include <sys/kernel.h>
53 #include <sys/lock.h>
54 #include <sys/mutex.h>
55 #include <sys/sysproto.h>
56 #include <sys/elf.h>
57 #include <sys/filedesc.h>
58 #include <sys/priv.h>
59 #include <sys/proc.h>
60 #include <sys/procctl.h>
61 #include <sys/racct.h>
62 #include <sys/resource.h>
63 #include <sys/resourcevar.h>
64 #include <sys/rwlock.h>
65 #include <sys/sysctl.h>
66 #include <sys/vnode.h>
67 #include <sys/fcntl.h>
68 #include <sys/file.h>
69 #include <sys/mman.h>
70 #include <sys/mount.h>
71 #include <sys/conf.h>
72 #include <sys/stat.h>
73 #include <sys/syscallsubr.h>
74 #include <sys/sysent.h>
75 #include <sys/vmmeter.h>
76 #if defined(__amd64__) || defined(__i386__) /* for i386_read_exec */
77 #include <machine/md_var.h>
78 #endif
79 
80 #include <security/audit/audit.h>
81 #include <security/mac/mac_framework.h>
82 
83 #include <vm/vm.h>
84 #include <vm/vm_param.h>
85 #include <vm/pmap.h>
86 #include <vm/vm_map.h>
87 #include <vm/vm_object.h>
88 #include <vm/vm_page.h>
89 #include <vm/vm_pager.h>
90 #include <vm/vm_pageout.h>
91 #include <vm/vm_extern.h>
92 #include <vm/vm_page.h>
93 #include <vm/vnode_pager.h>
94 
95 #ifdef HWPMC_HOOKS
96 #include <sys/pmckern.h>
97 #endif
98 
99 int old_mlock = 0;
100 SYSCTL_INT(_vm, OID_AUTO, old_mlock, CTLFLAG_RWTUN, &old_mlock, 0,
101     "Do not apply RLIMIT_MEMLOCK on mlockall");
102 static int mincore_mapped = 1;
103 SYSCTL_INT(_vm, OID_AUTO, mincore_mapped, CTLFLAG_RWTUN, &mincore_mapped, 0,
104     "mincore reports mappings, not residency");
105 static int imply_prot_max = 0;
106 SYSCTL_INT(_vm, OID_AUTO, imply_prot_max, CTLFLAG_RWTUN, &imply_prot_max, 0,
107     "Imply maximum page protections in mmap() when none are specified");
108 
109 _Static_assert(MAXPAGESIZES <= 4, "MINCORE_SUPER too narrow");
110 
111 #ifndef _SYS_SYSPROTO_H_
112 struct sbrk_args {
113 	int incr;
114 };
115 #endif
116 
117 int
118 sys_sbrk(struct thread *td, struct sbrk_args *uap)
119 {
120 	/* Not yet implemented */
121 	return (EOPNOTSUPP);
122 }
123 
124 #ifndef _SYS_SYSPROTO_H_
125 struct sstk_args {
126 	int incr;
127 };
128 #endif
129 
130 int
131 sys_sstk(struct thread *td, struct sstk_args *uap)
132 {
133 	/* Not yet implemented */
134 	return (EOPNOTSUPP);
135 }
136 
137 #if defined(COMPAT_43)
138 int
139 ogetpagesize(struct thread *td, struct ogetpagesize_args *uap)
140 {
141 
142 	td->td_retval[0] = PAGE_SIZE;
143 	return (0);
144 }
145 #endif				/* COMPAT_43 */
146 
147 /*
148  * Memory Map (mmap) system call.  Note that the file offset
149  * and address are allowed to be NOT page aligned, though if
150  * the MAP_FIXED flag it set, both must have the same remainder
151  * modulo the PAGE_SIZE (POSIX 1003.1b).  If the address is not
152  * page-aligned, the actual mapping starts at trunc_page(addr)
153  * and the return value is adjusted up by the page offset.
154  *
155  * Generally speaking, only character devices which are themselves
156  * memory-based, such as a video framebuffer, can be mmap'd.  Otherwise
157  * there would be no cache coherency between a descriptor and a VM mapping
158  * both to the same character device.
159  */
160 #ifndef _SYS_SYSPROTO_H_
161 struct mmap_args {
162 	void *addr;
163 	size_t len;
164 	int prot;
165 	int flags;
166 	int fd;
167 	long pad;
168 	off_t pos;
169 };
170 #endif
171 
172 int
173 sys_mmap(struct thread *td, struct mmap_args *uap)
174 {
175 
176 	return (kern_mmap(td, &(struct mmap_req){
177 		.mr_hint = (uintptr_t)uap->addr,
178 		.mr_len = uap->len,
179 		.mr_prot = uap->prot,
180 		.mr_flags = uap->flags,
181 		.mr_fd = uap->fd,
182 		.mr_pos = uap->pos,
183 	    }));
184 }
185 
186 int
187 kern_mmap_maxprot(struct proc *p, int prot)
188 {
189 
190 	if ((p->p_flag2 & P2_PROTMAX_DISABLE) != 0 ||
191 	    (p->p_fctl0 & NT_FREEBSD_FCTL_PROTMAX_DISABLE) != 0)
192 		return (_PROT_ALL);
193 	if (((p->p_flag2 & P2_PROTMAX_ENABLE) != 0 || imply_prot_max) &&
194 	    prot != PROT_NONE)
195 		 return (prot);
196 	return (_PROT_ALL);
197 }
198 
199 int
200 kern_mmap(struct thread *td, const struct mmap_req *mrp)
201 {
202 	struct vmspace *vms;
203 	struct file *fp;
204 	struct proc *p;
205 	off_t pos;
206 	vm_offset_t addr, orig_addr;
207 	vm_size_t len, pageoff, size;
208 	vm_prot_t cap_maxprot;
209 	int align, error, fd, flags, max_prot, prot;
210 	cap_rights_t rights;
211 	mmap_check_fp_fn check_fp_fn;
212 
213 	orig_addr = addr = mrp->mr_hint;
214 	len = mrp->mr_len;
215 	prot = mrp->mr_prot;
216 	flags = mrp->mr_flags;
217 	fd = mrp->mr_fd;
218 	pos = mrp->mr_pos;
219 	check_fp_fn = mrp->mr_check_fp_fn;
220 
221 	if ((prot & ~(_PROT_ALL | PROT_MAX(_PROT_ALL))) != 0)
222 		return (EINVAL);
223 	max_prot = PROT_MAX_EXTRACT(prot);
224 	prot = PROT_EXTRACT(prot);
225 	if (max_prot != 0 && (max_prot & prot) != prot)
226 		return (ENOTSUP);
227 
228 	p = td->td_proc;
229 
230 	/*
231 	 * Always honor PROT_MAX if set.  If not, default to all
232 	 * permissions unless we're implying maximum permissions.
233 	 */
234 	if (max_prot == 0)
235 		max_prot = kern_mmap_maxprot(p, prot);
236 
237 	vms = p->p_vmspace;
238 	fp = NULL;
239 	AUDIT_ARG_FD(fd);
240 
241 	/*
242 	 * Ignore old flags that used to be defined but did not do anything.
243 	 */
244 	flags &= ~(MAP_RESERVED0020 | MAP_RESERVED0040);
245 
246 	/*
247 	 * Enforce the constraints.
248 	 * Mapping of length 0 is only allowed for old binaries.
249 	 * Anonymous mapping shall specify -1 as filedescriptor and
250 	 * zero position for new code. Be nice to ancient a.out
251 	 * binaries and correct pos for anonymous mapping, since old
252 	 * ld.so sometimes issues anonymous map requests with non-zero
253 	 * pos.
254 	 */
255 	if (!SV_CURPROC_FLAG(SV_AOUT)) {
256 		if ((len == 0 && p->p_osrel >= P_OSREL_MAP_ANON) ||
257 		    ((flags & MAP_ANON) != 0 && (fd != -1 || pos != 0)))
258 			return (EINVAL);
259 	} else {
260 		if ((flags & MAP_ANON) != 0)
261 			pos = 0;
262 	}
263 
264 	if (flags & MAP_STACK) {
265 		if ((fd != -1) ||
266 		    ((prot & (PROT_READ | PROT_WRITE)) != (PROT_READ | PROT_WRITE)))
267 			return (EINVAL);
268 		flags |= MAP_ANON;
269 		pos = 0;
270 	}
271 	if ((flags & ~(MAP_SHARED | MAP_PRIVATE | MAP_FIXED | MAP_HASSEMAPHORE |
272 	    MAP_STACK | MAP_NOSYNC | MAP_ANON | MAP_EXCL | MAP_NOCORE |
273 	    MAP_PREFAULT_READ | MAP_GUARD | MAP_32BIT | MAP_ALIGNMENT_MASK)) != 0)
274 		return (EINVAL);
275 	if ((flags & (MAP_EXCL | MAP_FIXED)) == MAP_EXCL)
276 		return (EINVAL);
277 	if ((flags & (MAP_SHARED | MAP_PRIVATE)) == (MAP_SHARED | MAP_PRIVATE))
278 		return (EINVAL);
279 	if (prot != PROT_NONE &&
280 	    (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC)) != 0)
281 		return (EINVAL);
282 	if ((flags & MAP_GUARD) != 0 && (prot != PROT_NONE || fd != -1 ||
283 	    pos != 0 || (flags & ~(MAP_FIXED | MAP_GUARD | MAP_EXCL |
284 	    MAP_32BIT | MAP_ALIGNMENT_MASK)) != 0))
285 		return (EINVAL);
286 
287 	/*
288 	 * Align the file position to a page boundary,
289 	 * and save its page offset component.
290 	 */
291 	pageoff = (pos & PAGE_MASK);
292 	pos -= pageoff;
293 
294 	/* Compute size from len by rounding (on both ends). */
295 	size = len + pageoff;			/* low end... */
296 	size = round_page(size);		/* hi end */
297 	/* Check for rounding up to zero. */
298 	if (len > size)
299 		return (ENOMEM);
300 
301 	/* Ensure alignment is at least a page and fits in a pointer. */
302 	align = flags & MAP_ALIGNMENT_MASK;
303 	if (align != 0 && align != MAP_ALIGNED_SUPER &&
304 	    (align >> MAP_ALIGNMENT_SHIFT >= sizeof(void *) * NBBY ||
305 	    align >> MAP_ALIGNMENT_SHIFT < PAGE_SHIFT))
306 		return (EINVAL);
307 
308 	/*
309 	 * Check for illegal addresses.  Watch out for address wrap... Note
310 	 * that VM_*_ADDRESS are not constants due to casts (argh).
311 	 */
312 	if (flags & MAP_FIXED) {
313 		/*
314 		 * The specified address must have the same remainder
315 		 * as the file offset taken modulo PAGE_SIZE, so it
316 		 * should be aligned after adjustment by pageoff.
317 		 */
318 		addr -= pageoff;
319 		if (addr & PAGE_MASK)
320 			return (EINVAL);
321 
322 		/* Address range must be all in user VM space. */
323 		if (!vm_map_range_valid(&vms->vm_map, addr, addr + size))
324 			return (EINVAL);
325 		if (flags & MAP_32BIT && addr + size > MAP_32BIT_MAX_ADDR)
326 			return (EINVAL);
327 	} else if (flags & MAP_32BIT) {
328 		/*
329 		 * For MAP_32BIT, override the hint if it is too high and
330 		 * do not bother moving the mapping past the heap (since
331 		 * the heap is usually above 2GB).
332 		 */
333 		if (addr + size > MAP_32BIT_MAX_ADDR)
334 			addr = 0;
335 	} else {
336 		/*
337 		 * XXX for non-fixed mappings where no hint is provided or
338 		 * the hint would fall in the potential heap space,
339 		 * place it after the end of the largest possible heap.
340 		 *
341 		 * For anonymous mappings within the address space of the
342 		 * calling process, the absence of a hint is handled at a
343 		 * lower level in order to implement different clustering
344 		 * strategies for ASLR.
345 		 */
346 		if (((flags & MAP_ANON) == 0 && addr == 0) ||
347 		    (addr >= round_page((vm_offset_t)vms->vm_taddr) &&
348 		    addr < round_page((vm_offset_t)vms->vm_daddr +
349 		    lim_max(td, RLIMIT_DATA))))
350 			addr = round_page((vm_offset_t)vms->vm_daddr +
351 			    lim_max(td, RLIMIT_DATA));
352 	}
353 	if (len == 0) {
354 		/*
355 		 * Return success without mapping anything for old
356 		 * binaries that request a page-aligned mapping of
357 		 * length 0.  For modern binaries, this function
358 		 * returns an error earlier.
359 		 */
360 		error = 0;
361 	} else if ((flags & MAP_GUARD) != 0) {
362 		error = vm_mmap_object(&vms->vm_map, &addr, size, VM_PROT_NONE,
363 		    VM_PROT_NONE, flags, NULL, pos, FALSE, td);
364 	} else if ((flags & MAP_ANON) != 0) {
365 		/*
366 		 * Mapping blank space is trivial.
367 		 *
368 		 * This relies on VM_PROT_* matching PROT_*.
369 		 */
370 		error = vm_mmap_object(&vms->vm_map, &addr, size, prot,
371 		    max_prot, flags, NULL, pos, FALSE, td);
372 	} else {
373 		/*
374 		 * Mapping file, get fp for validation and don't let the
375 		 * descriptor disappear on us if we block. Check capability
376 		 * rights, but also return the maximum rights to be combined
377 		 * with maxprot later.
378 		 */
379 		cap_rights_init_one(&rights, CAP_MMAP);
380 		if (prot & PROT_READ)
381 			cap_rights_set_one(&rights, CAP_MMAP_R);
382 		if ((flags & MAP_SHARED) != 0) {
383 			if (prot & PROT_WRITE)
384 				cap_rights_set_one(&rights, CAP_MMAP_W);
385 		}
386 		if (prot & PROT_EXEC)
387 			cap_rights_set_one(&rights, CAP_MMAP_X);
388 		error = fget_mmap(td, fd, &rights, &cap_maxprot, &fp);
389 		if (error != 0)
390 			goto done;
391 		if ((flags & (MAP_SHARED | MAP_PRIVATE)) == 0 &&
392 		    p->p_osrel >= P_OSREL_MAP_FSTRICT) {
393 			error = EINVAL;
394 			goto done;
395 		}
396 		if (check_fp_fn != NULL) {
397 			error = check_fp_fn(fp, prot, max_prot & cap_maxprot,
398 			    flags);
399 			if (error != 0)
400 				goto done;
401 		}
402 		if (fp->f_ops == &shm_ops && shm_largepage(fp->f_data))
403 			addr = orig_addr;
404 		/* This relies on VM_PROT_* matching PROT_*. */
405 		error = fo_mmap(fp, &vms->vm_map, &addr, size, prot,
406 		    max_prot & cap_maxprot, flags, pos, td);
407 	}
408 
409 	if (error == 0)
410 		td->td_retval[0] = addr + pageoff;
411 done:
412 	if (fp)
413 		fdrop(fp, td);
414 
415 	return (error);
416 }
417 
418 #if defined(COMPAT_FREEBSD6)
419 int
420 freebsd6_mmap(struct thread *td, struct freebsd6_mmap_args *uap)
421 {
422 	return (kern_mmap(td, &(struct mmap_req){
423 		.mr_hint = (uintptr_t)uap->addr,
424 		.mr_len = uap->len,
425 		.mr_prot = uap->prot,
426 		.mr_flags = uap->flags,
427 		.mr_fd = uap->fd,
428 		.mr_pos = uap->pos,
429 	    }));
430 }
431 #endif
432 
433 #ifdef COMPAT_43
434 #ifndef _SYS_SYSPROTO_H_
435 struct ommap_args {
436 	caddr_t addr;
437 	int len;
438 	int prot;
439 	int flags;
440 	int fd;
441 	long pos;
442 };
443 #endif
444 int
445 ommap(struct thread *td, struct ommap_args *uap)
446 {
447 	return (kern_ommap(td, (uintptr_t)uap->addr, uap->len, uap->prot,
448 	    uap->flags, uap->fd, uap->pos));
449 }
450 
451 int
452 kern_ommap(struct thread *td, uintptr_t hint, int len, int oprot,
453     int oflags, int fd, long pos)
454 {
455 	static const char cvtbsdprot[8] = {
456 		0,
457 		PROT_EXEC,
458 		PROT_WRITE,
459 		PROT_EXEC | PROT_WRITE,
460 		PROT_READ,
461 		PROT_EXEC | PROT_READ,
462 		PROT_WRITE | PROT_READ,
463 		PROT_EXEC | PROT_WRITE | PROT_READ,
464 	};
465 	int flags, prot;
466 
467 	if (len < 0)
468 		return (EINVAL);
469 
470 #define	OMAP_ANON	0x0002
471 #define	OMAP_COPY	0x0020
472 #define	OMAP_SHARED	0x0010
473 #define	OMAP_FIXED	0x0100
474 
475 	prot = cvtbsdprot[oprot & 0x7];
476 #if (defined(COMPAT_FREEBSD32) && defined(__amd64__)) || defined(__i386__)
477 	if (i386_read_exec && SV_PROC_FLAG(td->td_proc, SV_ILP32) &&
478 	    prot != 0)
479 		prot |= PROT_EXEC;
480 #endif
481 	flags = 0;
482 	if (oflags & OMAP_ANON)
483 		flags |= MAP_ANON;
484 	if (oflags & OMAP_COPY)
485 		flags |= MAP_COPY;
486 	if (oflags & OMAP_SHARED)
487 		flags |= MAP_SHARED;
488 	else
489 		flags |= MAP_PRIVATE;
490 	if (oflags & OMAP_FIXED)
491 		flags |= MAP_FIXED;
492 	return (kern_mmap(td, &(struct mmap_req){
493 		.mr_hint = hint,
494 		.mr_len = len,
495 		.mr_prot = prot,
496 		.mr_flags = flags,
497 		.mr_fd = fd,
498 		.mr_pos = pos,
499 	    }));
500 }
501 #endif				/* COMPAT_43 */
502 
503 #ifndef _SYS_SYSPROTO_H_
504 struct msync_args {
505 	void *addr;
506 	size_t len;
507 	int flags;
508 };
509 #endif
510 int
511 sys_msync(struct thread *td, struct msync_args *uap)
512 {
513 
514 	return (kern_msync(td, (uintptr_t)uap->addr, uap->len, uap->flags));
515 }
516 
517 int
518 kern_msync(struct thread *td, uintptr_t addr0, size_t size, int flags)
519 {
520 	vm_offset_t addr;
521 	vm_size_t pageoff;
522 	vm_map_t map;
523 	int rv;
524 
525 	addr = addr0;
526 	pageoff = (addr & PAGE_MASK);
527 	addr -= pageoff;
528 	size += pageoff;
529 	size = (vm_size_t) round_page(size);
530 	if (addr + size < addr)
531 		return (EINVAL);
532 
533 	if ((flags & (MS_ASYNC|MS_INVALIDATE)) == (MS_ASYNC|MS_INVALIDATE))
534 		return (EINVAL);
535 
536 	map = &td->td_proc->p_vmspace->vm_map;
537 
538 	/*
539 	 * Clean the pages and interpret the return value.
540 	 */
541 	rv = vm_map_sync(map, addr, addr + size, (flags & MS_ASYNC) == 0,
542 	    (flags & MS_INVALIDATE) != 0);
543 	switch (rv) {
544 	case KERN_SUCCESS:
545 		return (0);
546 	case KERN_INVALID_ADDRESS:
547 		return (ENOMEM);
548 	case KERN_INVALID_ARGUMENT:
549 		return (EBUSY);
550 	case KERN_FAILURE:
551 		return (EIO);
552 	default:
553 		return (EINVAL);
554 	}
555 }
556 
557 #ifndef _SYS_SYSPROTO_H_
558 struct munmap_args {
559 	void *addr;
560 	size_t len;
561 };
562 #endif
563 int
564 sys_munmap(struct thread *td, struct munmap_args *uap)
565 {
566 
567 	return (kern_munmap(td, (uintptr_t)uap->addr, uap->len));
568 }
569 
570 int
571 kern_munmap(struct thread *td, uintptr_t addr0, size_t size)
572 {
573 #ifdef HWPMC_HOOKS
574 	struct pmckern_map_out pkm;
575 	vm_map_entry_t entry;
576 	bool pmc_handled;
577 #endif
578 	vm_offset_t addr, end;
579 	vm_size_t pageoff;
580 	vm_map_t map;
581 	int rv;
582 
583 	if (size == 0)
584 		return (EINVAL);
585 
586 	addr = addr0;
587 	pageoff = (addr & PAGE_MASK);
588 	addr -= pageoff;
589 	size += pageoff;
590 	size = (vm_size_t) round_page(size);
591 	end = addr + size;
592 	map = &td->td_proc->p_vmspace->vm_map;
593 	if (!vm_map_range_valid(map, addr, end))
594 		return (EINVAL);
595 
596 	vm_map_lock(map);
597 #ifdef HWPMC_HOOKS
598 	pmc_handled = false;
599 	if (PMC_HOOK_INSTALLED(PMC_FN_MUNMAP)) {
600 		pmc_handled = true;
601 		/*
602 		 * Inform hwpmc if the address range being unmapped contains
603 		 * an executable region.
604 		 */
605 		pkm.pm_address = (uintptr_t) NULL;
606 		if (vm_map_lookup_entry(map, addr, &entry)) {
607 			for (; entry->start < end;
608 			    entry = vm_map_entry_succ(entry)) {
609 				if (vm_map_check_protection(map, entry->start,
610 					entry->end, VM_PROT_EXECUTE) == TRUE) {
611 					pkm.pm_address = (uintptr_t) addr;
612 					pkm.pm_size = (size_t) size;
613 					break;
614 				}
615 			}
616 		}
617 	}
618 #endif
619 	rv = vm_map_delete(map, addr, end);
620 
621 #ifdef HWPMC_HOOKS
622 	if (rv == KERN_SUCCESS && __predict_false(pmc_handled)) {
623 		/* downgrade the lock to prevent a LOR with the pmc-sx lock */
624 		vm_map_lock_downgrade(map);
625 		if (pkm.pm_address != (uintptr_t) NULL)
626 			PMC_CALL_HOOK(td, PMC_FN_MUNMAP, (void *) &pkm);
627 		vm_map_unlock_read(map);
628 	} else
629 #endif
630 		vm_map_unlock(map);
631 
632 	return (vm_mmap_to_errno(rv));
633 }
634 
635 #ifndef _SYS_SYSPROTO_H_
636 struct mprotect_args {
637 	const void *addr;
638 	size_t len;
639 	int prot;
640 };
641 #endif
642 int
643 sys_mprotect(struct thread *td, struct mprotect_args *uap)
644 {
645 
646 	return (kern_mprotect(td, (uintptr_t)uap->addr, uap->len,
647 	    uap->prot, 0));
648 }
649 
650 int
651 kern_mprotect(struct thread *td, uintptr_t addr0, size_t size, int prot,
652     int flags)
653 {
654 	vm_offset_t addr;
655 	vm_size_t pageoff;
656 	int vm_error, max_prot;
657 
658 	addr = addr0;
659 	if ((prot & ~(_PROT_ALL | PROT_MAX(_PROT_ALL))) != 0)
660 		return (EINVAL);
661 	max_prot = PROT_MAX_EXTRACT(prot);
662 	prot = PROT_EXTRACT(prot);
663 	pageoff = (addr & PAGE_MASK);
664 	addr -= pageoff;
665 	size += pageoff;
666 	size = (vm_size_t) round_page(size);
667 #ifdef COMPAT_FREEBSD32
668 	if (SV_PROC_FLAG(td->td_proc, SV_ILP32)) {
669 		if (((addr + size) & 0xffffffff) < addr)
670 			return (EINVAL);
671 	} else
672 #endif
673 	if (addr + size < addr)
674 		return (EINVAL);
675 
676 	flags |= VM_MAP_PROTECT_SET_PROT;
677 	if (max_prot != 0)
678 		flags |= VM_MAP_PROTECT_SET_MAXPROT;
679 	vm_error = vm_map_protect(&td->td_proc->p_vmspace->vm_map,
680 	    addr, addr + size, prot, max_prot, flags);
681 
682 	switch (vm_error) {
683 	case KERN_SUCCESS:
684 		return (0);
685 	case KERN_PROTECTION_FAILURE:
686 		return (EACCES);
687 	case KERN_RESOURCE_SHORTAGE:
688 		return (ENOMEM);
689 	case KERN_OUT_OF_BOUNDS:
690 		return (ENOTSUP);
691 	}
692 	return (EINVAL);
693 }
694 
695 #ifndef _SYS_SYSPROTO_H_
696 struct minherit_args {
697 	void *addr;
698 	size_t len;
699 	int inherit;
700 };
701 #endif
702 int
703 sys_minherit(struct thread *td, struct minherit_args *uap)
704 {
705 
706 	return (kern_minherit(td, (uintptr_t)uap->addr, uap->len,
707 	    uap->inherit));
708 }
709 
710 int
711 kern_minherit(struct thread *td, uintptr_t addr0, size_t len, int inherit0)
712 {
713 	vm_offset_t addr;
714 	vm_size_t size, pageoff;
715 	vm_inherit_t inherit;
716 
717 	addr = (vm_offset_t)addr0;
718 	size = len;
719 	inherit = inherit0;
720 
721 	pageoff = (addr & PAGE_MASK);
722 	addr -= pageoff;
723 	size += pageoff;
724 	size = (vm_size_t) round_page(size);
725 	if (addr + size < addr)
726 		return (EINVAL);
727 
728 	switch (vm_map_inherit(&td->td_proc->p_vmspace->vm_map, addr,
729 	    addr + size, inherit)) {
730 	case KERN_SUCCESS:
731 		return (0);
732 	case KERN_PROTECTION_FAILURE:
733 		return (EACCES);
734 	}
735 	return (EINVAL);
736 }
737 
738 #ifndef _SYS_SYSPROTO_H_
739 struct madvise_args {
740 	void *addr;
741 	size_t len;
742 	int behav;
743 };
744 #endif
745 
746 int
747 sys_madvise(struct thread *td, struct madvise_args *uap)
748 {
749 
750 	return (kern_madvise(td, (uintptr_t)uap->addr, uap->len, uap->behav));
751 }
752 
753 int
754 kern_madvise(struct thread *td, uintptr_t addr0, size_t len, int behav)
755 {
756 	vm_map_t map;
757 	vm_offset_t addr, end, start;
758 	int flags;
759 
760 	/*
761 	 * Check for our special case, advising the swap pager we are
762 	 * "immortal."
763 	 */
764 	if (behav == MADV_PROTECT) {
765 		flags = PPROT_SET;
766 		return (kern_procctl(td, P_PID, td->td_proc->p_pid,
767 		    PROC_SPROTECT, &flags));
768 	}
769 
770 	/*
771 	 * Check for illegal addresses.  Watch out for address wrap... Note
772 	 * that VM_*_ADDRESS are not constants due to casts (argh).
773 	 */
774 	map = &td->td_proc->p_vmspace->vm_map;
775 	addr = addr0;
776 	if (!vm_map_range_valid(map, addr, addr + len))
777 		return (EINVAL);
778 
779 	/*
780 	 * Since this routine is only advisory, we default to conservative
781 	 * behavior.
782 	 */
783 	start = trunc_page(addr);
784 	end = round_page(addr + len);
785 
786 	/*
787 	 * vm_map_madvise() checks for illegal values of behav.
788 	 */
789 	return (vm_map_madvise(map, start, end, behav));
790 }
791 
792 #ifndef _SYS_SYSPROTO_H_
793 struct mincore_args {
794 	const void *addr;
795 	size_t len;
796 	char *vec;
797 };
798 #endif
799 
800 int
801 sys_mincore(struct thread *td, struct mincore_args *uap)
802 {
803 
804 	return (kern_mincore(td, (uintptr_t)uap->addr, uap->len, uap->vec));
805 }
806 
807 int
808 kern_mincore(struct thread *td, uintptr_t addr0, size_t len, char *vec)
809 {
810 	pmap_t pmap;
811 	vm_map_t map;
812 	vm_map_entry_t current, entry;
813 	vm_object_t object;
814 	vm_offset_t addr, cend, end, first_addr;
815 	vm_paddr_t pa;
816 	vm_page_t m;
817 	vm_pindex_t pindex;
818 	int error, lastvecindex, mincoreinfo, vecindex;
819 	unsigned int timestamp;
820 
821 	/*
822 	 * Make sure that the addresses presented are valid for user
823 	 * mode.
824 	 */
825 	first_addr = addr = trunc_page(addr0);
826 	end = round_page(addr0 + len);
827 	map = &td->td_proc->p_vmspace->vm_map;
828 	if (end > vm_map_max(map) || end < addr)
829 		return (ENOMEM);
830 
831 	pmap = vmspace_pmap(td->td_proc->p_vmspace);
832 
833 	vm_map_lock_read(map);
834 RestartScan:
835 	timestamp = map->timestamp;
836 
837 	if (!vm_map_lookup_entry(map, addr, &entry)) {
838 		vm_map_unlock_read(map);
839 		return (ENOMEM);
840 	}
841 
842 	/*
843 	 * Do this on a map entry basis so that if the pages are not
844 	 * in the current processes address space, we can easily look
845 	 * up the pages elsewhere.
846 	 */
847 	lastvecindex = -1;
848 	while (entry->start < end) {
849 		/*
850 		 * check for contiguity
851 		 */
852 		current = entry;
853 		entry = vm_map_entry_succ(current);
854 		if (current->end < end &&
855 		    entry->start > current->end) {
856 			vm_map_unlock_read(map);
857 			return (ENOMEM);
858 		}
859 
860 		/*
861 		 * ignore submaps (for now) or null objects
862 		 */
863 		if ((current->eflags & MAP_ENTRY_IS_SUB_MAP) ||
864 		    current->object.vm_object == NULL)
865 			continue;
866 
867 		/*
868 		 * limit this scan to the current map entry and the
869 		 * limits for the mincore call
870 		 */
871 		if (addr < current->start)
872 			addr = current->start;
873 		cend = current->end;
874 		if (cend > end)
875 			cend = end;
876 
877 		for (; addr < cend; addr += PAGE_SIZE) {
878 			/*
879 			 * Check pmap first, it is likely faster, also
880 			 * it can provide info as to whether we are the
881 			 * one referencing or modifying the page.
882 			 */
883 			m = NULL;
884 			object = NULL;
885 retry:
886 			pa = 0;
887 			mincoreinfo = pmap_mincore(pmap, addr, &pa);
888 			if (mincore_mapped) {
889 				/*
890 				 * We only care about this pmap's
891 				 * mapping of the page, if any.
892 				 */
893 				;
894 			} else if (pa != 0) {
895 				/*
896 				 * The page is mapped by this process but not
897 				 * both accessed and modified.  It is also
898 				 * managed.  Acquire the object lock so that
899 				 * other mappings might be examined.  The page's
900 				 * identity may change at any point before its
901 				 * object lock is acquired, so re-validate if
902 				 * necessary.
903 				 */
904 				m = PHYS_TO_VM_PAGE(pa);
905 				while (object == NULL || m->object != object) {
906 					if (object != NULL)
907 						VM_OBJECT_WUNLOCK(object);
908 					object = atomic_load_ptr(&m->object);
909 					if (object == NULL)
910 						goto retry;
911 					VM_OBJECT_WLOCK(object);
912 				}
913 				if (pa != pmap_extract(pmap, addr))
914 					goto retry;
915 				KASSERT(vm_page_all_valid(m),
916 				    ("mincore: page %p is mapped but invalid",
917 				    m));
918 			} else if (mincoreinfo == 0) {
919 				/*
920 				 * The page is not mapped by this process.  If
921 				 * the object implements managed pages, then
922 				 * determine if the page is resident so that
923 				 * the mappings might be examined.
924 				 */
925 				if (current->object.vm_object != object) {
926 					if (object != NULL)
927 						VM_OBJECT_WUNLOCK(object);
928 					object = current->object.vm_object;
929 					VM_OBJECT_WLOCK(object);
930 				}
931 				if ((object->flags & OBJ_SWAP) != 0 ||
932 				    object->type == OBJT_VNODE) {
933 					pindex = OFF_TO_IDX(current->offset +
934 					    (addr - current->start));
935 					m = vm_page_lookup(object, pindex);
936 					if (m != NULL && vm_page_none_valid(m))
937 						m = NULL;
938 					if (m != NULL)
939 						mincoreinfo = MINCORE_INCORE;
940 				}
941 			}
942 			if (m != NULL) {
943 				VM_OBJECT_ASSERT_WLOCKED(m->object);
944 
945 				/* Examine other mappings of the page. */
946 				if (m->dirty == 0 && pmap_is_modified(m))
947 					vm_page_dirty(m);
948 				if (m->dirty != 0)
949 					mincoreinfo |= MINCORE_MODIFIED_OTHER;
950 
951 				/*
952 				 * The first test for PGA_REFERENCED is an
953 				 * optimization.  The second test is
954 				 * required because a concurrent pmap
955 				 * operation could clear the last reference
956 				 * and set PGA_REFERENCED before the call to
957 				 * pmap_is_referenced().
958 				 */
959 				if ((m->a.flags & PGA_REFERENCED) != 0 ||
960 				    pmap_is_referenced(m) ||
961 				    (m->a.flags & PGA_REFERENCED) != 0)
962 					mincoreinfo |= MINCORE_REFERENCED_OTHER;
963 			}
964 			if (object != NULL)
965 				VM_OBJECT_WUNLOCK(object);
966 
967 			/*
968 			 * subyte may page fault.  In case it needs to modify
969 			 * the map, we release the lock.
970 			 */
971 			vm_map_unlock_read(map);
972 
973 			/*
974 			 * calculate index into user supplied byte vector
975 			 */
976 			vecindex = atop(addr - first_addr);
977 
978 			/*
979 			 * If we have skipped map entries, we need to make sure that
980 			 * the byte vector is zeroed for those skipped entries.
981 			 */
982 			while ((lastvecindex + 1) < vecindex) {
983 				++lastvecindex;
984 				error = subyte(vec + lastvecindex, 0);
985 				if (error) {
986 					error = EFAULT;
987 					goto done2;
988 				}
989 			}
990 
991 			/*
992 			 * Pass the page information to the user
993 			 */
994 			error = subyte(vec + vecindex, mincoreinfo);
995 			if (error) {
996 				error = EFAULT;
997 				goto done2;
998 			}
999 
1000 			/*
1001 			 * If the map has changed, due to the subyte, the previous
1002 			 * output may be invalid.
1003 			 */
1004 			vm_map_lock_read(map);
1005 			if (timestamp != map->timestamp)
1006 				goto RestartScan;
1007 
1008 			lastvecindex = vecindex;
1009 		}
1010 	}
1011 
1012 	/*
1013 	 * subyte may page fault.  In case it needs to modify
1014 	 * the map, we release the lock.
1015 	 */
1016 	vm_map_unlock_read(map);
1017 
1018 	/*
1019 	 * Zero the last entries in the byte vector.
1020 	 */
1021 	vecindex = atop(end - first_addr);
1022 	while ((lastvecindex + 1) < vecindex) {
1023 		++lastvecindex;
1024 		error = subyte(vec + lastvecindex, 0);
1025 		if (error) {
1026 			error = EFAULT;
1027 			goto done2;
1028 		}
1029 	}
1030 
1031 	/*
1032 	 * If the map has changed, due to the subyte, the previous
1033 	 * output may be invalid.
1034 	 */
1035 	vm_map_lock_read(map);
1036 	if (timestamp != map->timestamp)
1037 		goto RestartScan;
1038 	vm_map_unlock_read(map);
1039 done2:
1040 	return (error);
1041 }
1042 
1043 #ifndef _SYS_SYSPROTO_H_
1044 struct mlock_args {
1045 	const void *addr;
1046 	size_t len;
1047 };
1048 #endif
1049 int
1050 sys_mlock(struct thread *td, struct mlock_args *uap)
1051 {
1052 
1053 	return (kern_mlock(td->td_proc, td->td_ucred,
1054 	    __DECONST(uintptr_t, uap->addr), uap->len));
1055 }
1056 
1057 int
1058 kern_mlock(struct proc *proc, struct ucred *cred, uintptr_t addr0, size_t len)
1059 {
1060 	vm_offset_t addr, end, last, start;
1061 	vm_size_t npages, size;
1062 	vm_map_t map;
1063 	unsigned long nsize;
1064 	int error;
1065 
1066 	error = priv_check_cred(cred, PRIV_VM_MLOCK);
1067 	if (error)
1068 		return (error);
1069 	addr = addr0;
1070 	size = len;
1071 	last = addr + size;
1072 	start = trunc_page(addr);
1073 	end = round_page(last);
1074 	if (last < addr || end < addr)
1075 		return (EINVAL);
1076 	npages = atop(end - start);
1077 	if (npages > vm_page_max_user_wired)
1078 		return (ENOMEM);
1079 	map = &proc->p_vmspace->vm_map;
1080 	PROC_LOCK(proc);
1081 	nsize = ptoa(npages + pmap_wired_count(map->pmap));
1082 	if (nsize > lim_cur_proc(proc, RLIMIT_MEMLOCK)) {
1083 		PROC_UNLOCK(proc);
1084 		return (ENOMEM);
1085 	}
1086 	PROC_UNLOCK(proc);
1087 #ifdef RACCT
1088 	if (racct_enable) {
1089 		PROC_LOCK(proc);
1090 		error = racct_set(proc, RACCT_MEMLOCK, nsize);
1091 		PROC_UNLOCK(proc);
1092 		if (error != 0)
1093 			return (ENOMEM);
1094 	}
1095 #endif
1096 	error = vm_map_wire(map, start, end,
1097 	    VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES);
1098 #ifdef RACCT
1099 	if (racct_enable && error != KERN_SUCCESS) {
1100 		PROC_LOCK(proc);
1101 		racct_set(proc, RACCT_MEMLOCK,
1102 		    ptoa(pmap_wired_count(map->pmap)));
1103 		PROC_UNLOCK(proc);
1104 	}
1105 #endif
1106 	switch (error) {
1107 	case KERN_SUCCESS:
1108 		return (0);
1109 	case KERN_INVALID_ARGUMENT:
1110 		return (EINVAL);
1111 	default:
1112 		return (ENOMEM);
1113 	}
1114 }
1115 
1116 #ifndef _SYS_SYSPROTO_H_
1117 struct mlockall_args {
1118 	int	how;
1119 };
1120 #endif
1121 
1122 int
1123 sys_mlockall(struct thread *td, struct mlockall_args *uap)
1124 {
1125 	vm_map_t map;
1126 	int error;
1127 
1128 	map = &td->td_proc->p_vmspace->vm_map;
1129 	error = priv_check(td, PRIV_VM_MLOCK);
1130 	if (error)
1131 		return (error);
1132 
1133 	if ((uap->how == 0) || ((uap->how & ~(MCL_CURRENT|MCL_FUTURE)) != 0))
1134 		return (EINVAL);
1135 
1136 	/*
1137 	 * If wiring all pages in the process would cause it to exceed
1138 	 * a hard resource limit, return ENOMEM.
1139 	 */
1140 	if (!old_mlock && uap->how & MCL_CURRENT) {
1141 		if (map->size > lim_cur(td, RLIMIT_MEMLOCK))
1142 			return (ENOMEM);
1143 	}
1144 #ifdef RACCT
1145 	if (racct_enable) {
1146 		PROC_LOCK(td->td_proc);
1147 		error = racct_set(td->td_proc, RACCT_MEMLOCK, map->size);
1148 		PROC_UNLOCK(td->td_proc);
1149 		if (error != 0)
1150 			return (ENOMEM);
1151 	}
1152 #endif
1153 
1154 	if (uap->how & MCL_FUTURE) {
1155 		vm_map_lock(map);
1156 		vm_map_modflags(map, MAP_WIREFUTURE, 0);
1157 		vm_map_unlock(map);
1158 		error = 0;
1159 	}
1160 
1161 	if (uap->how & MCL_CURRENT) {
1162 		/*
1163 		 * P1003.1-2001 mandates that all currently mapped pages
1164 		 * will be memory resident and locked (wired) upon return
1165 		 * from mlockall(). vm_map_wire() will wire pages, by
1166 		 * calling vm_fault_wire() for each page in the region.
1167 		 */
1168 		error = vm_map_wire(map, vm_map_min(map), vm_map_max(map),
1169 		    VM_MAP_WIRE_USER|VM_MAP_WIRE_HOLESOK);
1170 		if (error == KERN_SUCCESS)
1171 			error = 0;
1172 		else if (error == KERN_RESOURCE_SHORTAGE)
1173 			error = ENOMEM;
1174 		else
1175 			error = EAGAIN;
1176 	}
1177 #ifdef RACCT
1178 	if (racct_enable && error != KERN_SUCCESS) {
1179 		PROC_LOCK(td->td_proc);
1180 		racct_set(td->td_proc, RACCT_MEMLOCK,
1181 		    ptoa(pmap_wired_count(map->pmap)));
1182 		PROC_UNLOCK(td->td_proc);
1183 	}
1184 #endif
1185 
1186 	return (error);
1187 }
1188 
1189 #ifndef _SYS_SYSPROTO_H_
1190 struct munlockall_args {
1191 	register_t dummy;
1192 };
1193 #endif
1194 
1195 int
1196 sys_munlockall(struct thread *td, struct munlockall_args *uap)
1197 {
1198 	vm_map_t map;
1199 	int error;
1200 
1201 	map = &td->td_proc->p_vmspace->vm_map;
1202 	error = priv_check(td, PRIV_VM_MUNLOCK);
1203 	if (error)
1204 		return (error);
1205 
1206 	/* Clear the MAP_WIREFUTURE flag from this vm_map. */
1207 	vm_map_lock(map);
1208 	vm_map_modflags(map, 0, MAP_WIREFUTURE);
1209 	vm_map_unlock(map);
1210 
1211 	/* Forcibly unwire all pages. */
1212 	error = vm_map_unwire(map, vm_map_min(map), vm_map_max(map),
1213 	    VM_MAP_WIRE_USER|VM_MAP_WIRE_HOLESOK);
1214 #ifdef RACCT
1215 	if (racct_enable && error == KERN_SUCCESS) {
1216 		PROC_LOCK(td->td_proc);
1217 		racct_set(td->td_proc, RACCT_MEMLOCK, 0);
1218 		PROC_UNLOCK(td->td_proc);
1219 	}
1220 #endif
1221 
1222 	return (error);
1223 }
1224 
1225 #ifndef _SYS_SYSPROTO_H_
1226 struct munlock_args {
1227 	const void *addr;
1228 	size_t len;
1229 };
1230 #endif
1231 int
1232 sys_munlock(struct thread *td, struct munlock_args *uap)
1233 {
1234 
1235 	return (kern_munlock(td, (uintptr_t)uap->addr, uap->len));
1236 }
1237 
1238 int
1239 kern_munlock(struct thread *td, uintptr_t addr0, size_t size)
1240 {
1241 	vm_offset_t addr, end, last, start;
1242 #ifdef RACCT
1243 	vm_map_t map;
1244 #endif
1245 	int error;
1246 
1247 	error = priv_check(td, PRIV_VM_MUNLOCK);
1248 	if (error)
1249 		return (error);
1250 	addr = addr0;
1251 	last = addr + size;
1252 	start = trunc_page(addr);
1253 	end = round_page(last);
1254 	if (last < addr || end < addr)
1255 		return (EINVAL);
1256 	error = vm_map_unwire(&td->td_proc->p_vmspace->vm_map, start, end,
1257 	    VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES);
1258 #ifdef RACCT
1259 	if (racct_enable && error == KERN_SUCCESS) {
1260 		PROC_LOCK(td->td_proc);
1261 		map = &td->td_proc->p_vmspace->vm_map;
1262 		racct_set(td->td_proc, RACCT_MEMLOCK,
1263 		    ptoa(pmap_wired_count(map->pmap)));
1264 		PROC_UNLOCK(td->td_proc);
1265 	}
1266 #endif
1267 	return (error == KERN_SUCCESS ? 0 : ENOMEM);
1268 }
1269 
1270 /*
1271  * vm_mmap_vnode()
1272  *
1273  * Helper function for vm_mmap.  Perform sanity check specific for mmap
1274  * operations on vnodes.
1275  */
1276 int
1277 vm_mmap_vnode(struct thread *td, vm_size_t objsize,
1278     vm_prot_t prot, vm_prot_t *maxprotp, int *flagsp,
1279     struct vnode *vp, vm_ooffset_t *foffp, vm_object_t *objp,
1280     boolean_t *writecounted)
1281 {
1282 	struct vattr va;
1283 	vm_object_t obj;
1284 	vm_ooffset_t foff;
1285 	struct ucred *cred;
1286 	int error, flags;
1287 	bool writex;
1288 
1289 	cred = td->td_ucred;
1290 	writex = (*maxprotp & VM_PROT_WRITE) != 0 &&
1291 	    (*flagsp & MAP_SHARED) != 0;
1292 	if ((error = vget(vp, LK_SHARED)) != 0)
1293 		return (error);
1294 	AUDIT_ARG_VNODE1(vp);
1295 	foff = *foffp;
1296 	flags = *flagsp;
1297 	obj = vp->v_object;
1298 	if (vp->v_type == VREG) {
1299 		/*
1300 		 * Get the proper underlying object
1301 		 */
1302 		if (obj == NULL) {
1303 			error = EINVAL;
1304 			goto done;
1305 		}
1306 		if (obj->type == OBJT_VNODE && obj->handle != vp) {
1307 			vput(vp);
1308 			vp = (struct vnode *)obj->handle;
1309 			/*
1310 			 * Bypass filesystems obey the mpsafety of the
1311 			 * underlying fs.  Tmpfs never bypasses.
1312 			 */
1313 			error = vget(vp, LK_SHARED);
1314 			if (error != 0)
1315 				return (error);
1316 		}
1317 		if (writex) {
1318 			*writecounted = TRUE;
1319 			vm_pager_update_writecount(obj, 0, objsize);
1320 		}
1321 	} else {
1322 		error = EINVAL;
1323 		goto done;
1324 	}
1325 	if ((error = VOP_GETATTR(vp, &va, cred)))
1326 		goto done;
1327 #ifdef MAC
1328 	/* This relies on VM_PROT_* matching PROT_*. */
1329 	error = mac_vnode_check_mmap(cred, vp, (int)prot, flags);
1330 	if (error != 0)
1331 		goto done;
1332 #endif
1333 	if ((flags & MAP_SHARED) != 0) {
1334 		if ((va.va_flags & (SF_SNAPSHOT|IMMUTABLE|APPEND)) != 0) {
1335 			if (prot & VM_PROT_WRITE) {
1336 				error = EPERM;
1337 				goto done;
1338 			}
1339 			*maxprotp &= ~VM_PROT_WRITE;
1340 		}
1341 	}
1342 	/*
1343 	 * If it is a regular file without any references
1344 	 * we do not need to sync it.
1345 	 * Adjust object size to be the size of actual file.
1346 	 */
1347 	objsize = round_page(va.va_size);
1348 	if (va.va_nlink == 0)
1349 		flags |= MAP_NOSYNC;
1350 	if (obj->type == OBJT_VNODE) {
1351 		obj = vm_pager_allocate(OBJT_VNODE, vp, objsize, prot, foff,
1352 		    cred);
1353 		if (obj == NULL) {
1354 			error = ENOMEM;
1355 			goto done;
1356 		}
1357 	} else {
1358 		KASSERT((obj->flags & OBJ_SWAP) != 0, ("wrong object type"));
1359 		vm_object_reference(obj);
1360 #if VM_NRESERVLEVEL > 0
1361 		if ((obj->flags & OBJ_COLORED) == 0) {
1362 			VM_OBJECT_WLOCK(obj);
1363 			vm_object_color(obj, 0);
1364 			VM_OBJECT_WUNLOCK(obj);
1365 		}
1366 #endif
1367 	}
1368 	*objp = obj;
1369 	*flagsp = flags;
1370 
1371 	VOP_MMAPPED(vp);
1372 
1373 done:
1374 	if (error != 0 && *writecounted) {
1375 		*writecounted = FALSE;
1376 		vm_pager_update_writecount(obj, objsize, 0);
1377 	}
1378 	vput(vp);
1379 	return (error);
1380 }
1381 
1382 /*
1383  * vm_mmap_cdev()
1384  *
1385  * Helper function for vm_mmap.  Perform sanity check specific for mmap
1386  * operations on cdevs.
1387  */
1388 int
1389 vm_mmap_cdev(struct thread *td, vm_size_t objsize, vm_prot_t prot,
1390     vm_prot_t *maxprotp, int *flagsp, struct cdev *cdev, struct cdevsw *dsw,
1391     vm_ooffset_t *foff, vm_object_t *objp)
1392 {
1393 	vm_object_t obj;
1394 	int error, flags;
1395 
1396 	flags = *flagsp;
1397 
1398 	if (dsw->d_flags & D_MMAP_ANON) {
1399 		*objp = NULL;
1400 		*foff = 0;
1401 		*maxprotp = VM_PROT_ALL;
1402 		*flagsp |= MAP_ANON;
1403 		return (0);
1404 	}
1405 	/*
1406 	 * cdevs do not provide private mappings of any kind.
1407 	 */
1408 	if ((*maxprotp & VM_PROT_WRITE) == 0 &&
1409 	    (prot & VM_PROT_WRITE) != 0)
1410 		return (EACCES);
1411 	if (flags & (MAP_PRIVATE|MAP_COPY))
1412 		return (EINVAL);
1413 	/*
1414 	 * Force device mappings to be shared.
1415 	 */
1416 	flags |= MAP_SHARED;
1417 #ifdef MAC_XXX
1418 	error = mac_cdev_check_mmap(td->td_ucred, cdev, (int)prot);
1419 	if (error != 0)
1420 		return (error);
1421 #endif
1422 	/*
1423 	 * First, try d_mmap_single().  If that is not implemented
1424 	 * (returns ENODEV), fall back to using the device pager.
1425 	 * Note that d_mmap_single() must return a reference to the
1426 	 * object (it needs to bump the reference count of the object
1427 	 * it returns somehow).
1428 	 *
1429 	 * XXX assumes VM_PROT_* == PROT_*
1430 	 */
1431 	error = dsw->d_mmap_single(cdev, foff, objsize, objp, (int)prot);
1432 	if (error != ENODEV)
1433 		return (error);
1434 	obj = vm_pager_allocate(OBJT_DEVICE, cdev, objsize, prot, *foff,
1435 	    td->td_ucred);
1436 	if (obj == NULL)
1437 		return (EINVAL);
1438 	*objp = obj;
1439 	*flagsp = flags;
1440 	return (0);
1441 }
1442 
1443 int
1444 vm_mmap(vm_map_t map, vm_offset_t *addr, vm_size_t size, vm_prot_t prot,
1445 	vm_prot_t maxprot, int flags,
1446 	objtype_t handle_type, void *handle,
1447 	vm_ooffset_t foff)
1448 {
1449 	vm_object_t object;
1450 	struct thread *td = curthread;
1451 	int error;
1452 	boolean_t writecounted;
1453 
1454 	if (size == 0)
1455 		return (EINVAL);
1456 
1457 	size = round_page(size);
1458 	object = NULL;
1459 	writecounted = FALSE;
1460 
1461 	switch (handle_type) {
1462 	case OBJT_DEVICE: {
1463 		struct cdevsw *dsw;
1464 		struct cdev *cdev;
1465 		int ref;
1466 
1467 		cdev = handle;
1468 		dsw = dev_refthread(cdev, &ref);
1469 		if (dsw == NULL)
1470 			return (ENXIO);
1471 		error = vm_mmap_cdev(td, size, prot, &maxprot, &flags, cdev,
1472 		    dsw, &foff, &object);
1473 		dev_relthread(cdev, ref);
1474 		break;
1475 	}
1476 	case OBJT_VNODE:
1477 		error = vm_mmap_vnode(td, size, prot, &maxprot, &flags,
1478 		    handle, &foff, &object, &writecounted);
1479 		break;
1480 	default:
1481 		error = EINVAL;
1482 		break;
1483 	}
1484 	if (error)
1485 		return (error);
1486 
1487 	error = vm_mmap_object(map, addr, size, prot, maxprot, flags, object,
1488 	    foff, writecounted, td);
1489 	if (error != 0 && object != NULL) {
1490 		/*
1491 		 * If this mapping was accounted for in the vnode's
1492 		 * writecount, then undo that now.
1493 		 */
1494 		if (writecounted)
1495 			vm_pager_release_writecount(object, 0, size);
1496 		vm_object_deallocate(object);
1497 	}
1498 	return (error);
1499 }
1500 
1501 int
1502 kern_mmap_racct_check(struct thread *td, vm_map_t map, vm_size_t size)
1503 {
1504 	int error;
1505 
1506 	RACCT_PROC_LOCK(td->td_proc);
1507 	if (map->size + size > lim_cur(td, RLIMIT_VMEM)) {
1508 		RACCT_PROC_UNLOCK(td->td_proc);
1509 		return (ENOMEM);
1510 	}
1511 	if (racct_set(td->td_proc, RACCT_VMEM, map->size + size)) {
1512 		RACCT_PROC_UNLOCK(td->td_proc);
1513 		return (ENOMEM);
1514 	}
1515 	if (!old_mlock && map->flags & MAP_WIREFUTURE) {
1516 		if (ptoa(pmap_wired_count(map->pmap)) + size >
1517 		    lim_cur(td, RLIMIT_MEMLOCK)) {
1518 			racct_set_force(td->td_proc, RACCT_VMEM, map->size);
1519 			RACCT_PROC_UNLOCK(td->td_proc);
1520 			return (ENOMEM);
1521 		}
1522 		error = racct_set(td->td_proc, RACCT_MEMLOCK,
1523 		    ptoa(pmap_wired_count(map->pmap)) + size);
1524 		if (error != 0) {
1525 			racct_set_force(td->td_proc, RACCT_VMEM, map->size);
1526 			RACCT_PROC_UNLOCK(td->td_proc);
1527 			return (error);
1528 		}
1529 	}
1530 	RACCT_PROC_UNLOCK(td->td_proc);
1531 	return (0);
1532 }
1533 
1534 /*
1535  * Internal version of mmap that maps a specific VM object into an
1536  * map.  Called by mmap for MAP_ANON, vm_mmap, shm_mmap, and vn_mmap.
1537  */
1538 int
1539 vm_mmap_object(vm_map_t map, vm_offset_t *addr, vm_size_t size, vm_prot_t prot,
1540     vm_prot_t maxprot, int flags, vm_object_t object, vm_ooffset_t foff,
1541     boolean_t writecounted, struct thread *td)
1542 {
1543 	vm_offset_t default_addr, max_addr;
1544 	int docow, error, findspace, rv;
1545 	bool curmap, fitit;
1546 
1547 	curmap = map == &td->td_proc->p_vmspace->vm_map;
1548 	if (curmap) {
1549 		error = kern_mmap_racct_check(td, map, size);
1550 		if (error != 0)
1551 			return (error);
1552 	}
1553 
1554 	/*
1555 	 * We currently can only deal with page aligned file offsets.
1556 	 * The mmap() system call already enforces this by subtracting
1557 	 * the page offset from the file offset, but checking here
1558 	 * catches errors in device drivers (e.g. d_single_mmap()
1559 	 * callbacks) and other internal mapping requests (such as in
1560 	 * exec).
1561 	 */
1562 	if (foff & PAGE_MASK)
1563 		return (EINVAL);
1564 
1565 	if ((flags & MAP_FIXED) == 0) {
1566 		fitit = true;
1567 		*addr = round_page(*addr);
1568 	} else {
1569 		if (*addr != trunc_page(*addr))
1570 			return (EINVAL);
1571 		fitit = false;
1572 	}
1573 
1574 	if (flags & MAP_ANON) {
1575 		if (object != NULL || foff != 0)
1576 			return (EINVAL);
1577 		docow = 0;
1578 	} else if (flags & MAP_PREFAULT_READ)
1579 		docow = MAP_PREFAULT;
1580 	else
1581 		docow = MAP_PREFAULT_PARTIAL;
1582 
1583 	if ((flags & (MAP_ANON|MAP_SHARED)) == 0)
1584 		docow |= MAP_COPY_ON_WRITE;
1585 	if (flags & MAP_NOSYNC)
1586 		docow |= MAP_DISABLE_SYNCER;
1587 	if (flags & MAP_NOCORE)
1588 		docow |= MAP_DISABLE_COREDUMP;
1589 	/* Shared memory is also shared with children. */
1590 	if (flags & MAP_SHARED)
1591 		docow |= MAP_INHERIT_SHARE;
1592 	if (writecounted)
1593 		docow |= MAP_WRITECOUNT;
1594 	if (flags & MAP_STACK) {
1595 		if (object != NULL)
1596 			return (EINVAL);
1597 		docow |= MAP_STACK_GROWS_DOWN;
1598 	}
1599 	if ((flags & MAP_EXCL) != 0)
1600 		docow |= MAP_CHECK_EXCL;
1601 	if ((flags & MAP_GUARD) != 0)
1602 		docow |= MAP_CREATE_GUARD;
1603 
1604 	if (fitit) {
1605 		if ((flags & MAP_ALIGNMENT_MASK) == MAP_ALIGNED_SUPER)
1606 			findspace = VMFS_SUPER_SPACE;
1607 		else if ((flags & MAP_ALIGNMENT_MASK) != 0)
1608 			findspace = VMFS_ALIGNED_SPACE(flags >>
1609 			    MAP_ALIGNMENT_SHIFT);
1610 		else
1611 			findspace = VMFS_OPTIMAL_SPACE;
1612 		max_addr = 0;
1613 		if ((flags & MAP_32BIT) != 0)
1614 			max_addr = MAP_32BIT_MAX_ADDR;
1615 		if (curmap) {
1616 			default_addr =
1617 			    round_page((vm_offset_t)td->td_proc->p_vmspace->
1618 			    vm_daddr + lim_max(td, RLIMIT_DATA));
1619 			if ((flags & MAP_32BIT) != 0)
1620 				default_addr = 0;
1621 			rv = vm_map_find_min(map, object, foff, addr, size,
1622 			    default_addr, max_addr, findspace, prot, maxprot,
1623 			    docow);
1624 		} else {
1625 			rv = vm_map_find(map, object, foff, addr, size,
1626 			    max_addr, findspace, prot, maxprot, docow);
1627 		}
1628 	} else {
1629 		rv = vm_map_fixed(map, object, foff, *addr, size,
1630 		    prot, maxprot, docow);
1631 	}
1632 
1633 	if (rv == KERN_SUCCESS) {
1634 		/*
1635 		 * If the process has requested that all future mappings
1636 		 * be wired, then heed this.
1637 		 */
1638 		if ((map->flags & MAP_WIREFUTURE) != 0) {
1639 			vm_map_lock(map);
1640 			if ((map->flags & MAP_WIREFUTURE) != 0)
1641 				(void)vm_map_wire_locked(map, *addr,
1642 				    *addr + size, VM_MAP_WIRE_USER |
1643 				    ((flags & MAP_STACK) ? VM_MAP_WIRE_HOLESOK :
1644 				    VM_MAP_WIRE_NOHOLES));
1645 			vm_map_unlock(map);
1646 		}
1647 	}
1648 	return (vm_mmap_to_errno(rv));
1649 }
1650 
1651 /*
1652  * Translate a Mach VM return code to zero on success or the appropriate errno
1653  * on failure.
1654  */
1655 int
1656 vm_mmap_to_errno(int rv)
1657 {
1658 
1659 	switch (rv) {
1660 	case KERN_SUCCESS:
1661 		return (0);
1662 	case KERN_INVALID_ADDRESS:
1663 	case KERN_NO_SPACE:
1664 		return (ENOMEM);
1665 	case KERN_PROTECTION_FAILURE:
1666 		return (EACCES);
1667 	default:
1668 		return (EINVAL);
1669 	}
1670 }
1671