xref: /freebsd/sys/vm/vm_mmap.c (revision 0c43d89a0d8e976ca494d4837f4c1f3734d2c300)
1 /*
2  * Copyright (c) 1988 University of Utah.
3  * Copyright (c) 1991, 1993
4  *	The Regents of the University of California.  All rights reserved.
5  *
6  * This code is derived from software contributed to Berkeley by
7  * the Systems Programming Group of the University of Utah Computer
8  * Science Department.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the University of
21  *	California, Berkeley and its contributors.
22  * 4. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  * from: Utah $Hdr: vm_mmap.c 1.6 91/10/21$
39  *
40  *	@(#)vm_mmap.c	8.4 (Berkeley) 1/12/94
41  * $Id: vm_mmap.c,v 1.4 1994/08/04 03:06:44 davidg Exp $
42  */
43 
44 /*
45  * Mapped file (mmap) interface to VM
46  */
47 
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/filedesc.h>
51 #include <sys/resourcevar.h>
52 #include <sys/proc.h>
53 #include <sys/vnode.h>
54 #include <sys/file.h>
55 #include <sys/mman.h>
56 #include <sys/conf.h>
57 
58 #include <miscfs/specfs/specdev.h>
59 
60 #include <vm/vm.h>
61 #include <vm/vm_pager.h>
62 #include <vm/vm_prot.h>
63 
64 #ifdef DEBUG
65 int mmapdebug = 0;
66 #define MDB_FOLLOW	0x01
67 #define MDB_SYNC	0x02
68 #define MDB_MAPIT	0x04
69 #endif
70 
71 void pmap_object_init_pt();
72 
73 struct sbrk_args {
74 	int	incr;
75 };
76 /* ARGSUSED */
77 int
78 sbrk(p, uap, retval)
79 	struct proc *p;
80 	struct sbrk_args *uap;
81 	int *retval;
82 {
83 
84 	/* Not yet implemented */
85 	return (EOPNOTSUPP);
86 }
87 
88 struct sstk_args {
89 	int	incr;
90 };
91 /* ARGSUSED */
92 int
93 sstk(p, uap, retval)
94 	struct proc *p;
95 	struct sstk_args *uap;
96 	int *retval;
97 {
98 
99 	/* Not yet implemented */
100 	return (EOPNOTSUPP);
101 }
102 
103 #if defined(COMPAT_43) || defined(COMPAT_SUNOS)
104 struct getpagesize_args {
105 	int	dummy;
106 };
107 /* ARGSUSED */
108 int
109 ogetpagesize(p, uap, retval)
110 	struct proc *p;
111 	struct getpagesize_args *uap;
112 	int *retval;
113 {
114 
115 	*retval = PAGE_SIZE;
116 	return (0);
117 }
118 #endif /* COMPAT_43 || COMPAT_SUNOS */
119 
120 struct mmap_args {
121 	caddr_t	addr;
122 	size_t	len;
123 	int	prot;
124 	int	flags;
125 	int	fd;
126 	long	pad;
127 	off_t	pos;
128 };
129 
130 #ifdef COMPAT_43
131 struct ommap_args {
132 	caddr_t	addr;
133 	int	len;
134 	int	prot;
135 	int	flags;
136 	int	fd;
137 	long	pos;
138 };
139 int
140 ommap(p, uap, retval)
141 	struct proc *p;
142 	register struct ommap_args *uap;
143 	int *retval;
144 {
145 	struct mmap_args nargs;
146 	static const char cvtbsdprot[8] = {
147 		0,
148 		PROT_EXEC,
149 		PROT_WRITE,
150 		PROT_EXEC|PROT_WRITE,
151 		PROT_READ,
152 		PROT_EXEC|PROT_READ,
153 		PROT_WRITE|PROT_READ,
154 		PROT_EXEC|PROT_WRITE|PROT_READ,
155 	};
156 #define	OMAP_ANON	0x0002
157 #define	OMAP_COPY	0x0020
158 #define	OMAP_SHARED	0x0010
159 #define	OMAP_FIXED	0x0100
160 #define	OMAP_INHERIT	0x0800
161 
162 	nargs.addr = uap->addr;
163 	nargs.len = uap->len;
164 	nargs.prot = cvtbsdprot[uap->prot&0x7];
165 	nargs.flags = 0;
166 	if (uap->flags & OMAP_ANON)
167 		nargs.flags |= MAP_ANON;
168 	if (uap->flags & OMAP_COPY)
169 		nargs.flags |= MAP_COPY;
170 	if (uap->flags & OMAP_SHARED)
171 		nargs.flags |= MAP_SHARED;
172 	else
173 		nargs.flags |= MAP_PRIVATE;
174 	if (uap->flags & OMAP_FIXED)
175 		nargs.flags |= MAP_FIXED;
176 	if (uap->flags & OMAP_INHERIT)
177 		nargs.flags |= MAP_INHERIT;
178 	nargs.fd = uap->fd;
179 	nargs.pos = uap->pos;
180 	return (mmap(p, &nargs, retval));
181 }
182 #endif
183 
184 int
185 mmap(p, uap, retval)
186 	struct proc *p;
187 	register struct mmap_args *uap;
188 	int *retval;
189 {
190 	register struct filedesc *fdp = p->p_fd;
191 	register struct file *fp;
192 	struct vnode *vp;
193 	vm_offset_t addr;
194 	vm_size_t size;
195 	vm_prot_t prot, maxprot;
196 	caddr_t handle;
197 	int flags, error;
198 
199 	prot = uap->prot & VM_PROT_ALL;
200 	flags = uap->flags;
201 #ifdef DEBUG
202 	if (mmapdebug & MDB_FOLLOW)
203 		printf("mmap(%d): addr %x len %x pro %x flg %x fd %d pos %x\n",
204 		       p->p_pid, uap->addr, uap->len, prot,
205 		       flags, uap->fd, (vm_offset_t)uap->pos);
206 #endif
207 	/*
208 	 * Address (if FIXED) must be page aligned.
209 	 * Size is implicitly rounded to a page boundary.
210 	 */
211 	addr = (vm_offset_t) uap->addr;
212 	if (((flags & MAP_FIXED) && (addr & PAGE_MASK)) ||
213 	    (ssize_t)uap->len < 0 || ((flags & MAP_ANON) && uap->fd != -1))
214 		return (EINVAL);
215 	size = (vm_size_t) round_page(uap->len);
216 	/*
217 	 * Check for illegal addresses.  Watch out for address wrap...
218 	 * Note that VM_*_ADDRESS are not constants due to casts (argh).
219 	 */
220 	if (flags & MAP_FIXED) {
221 		if (VM_MAXUSER_ADDRESS > 0 && addr + size > VM_MAXUSER_ADDRESS)
222 			return (EINVAL);
223 #ifndef i386
224 		if (VM_MIN_ADDRESS > 0 && addr < VM_MIN_ADDRESS)
225 			return (EINVAL);
226 #endif
227 		if (addr + size < addr)
228 			return (EINVAL);
229 	}
230 	/*
231 	 * XXX if no hint provided for a non-fixed mapping place it after
232 	 * the end of the largest possible heap.
233 	 *
234 	 * There should really be a pmap call to determine a reasonable
235 	 * location.
236 	 */
237 	if (addr == 0 && (flags & MAP_FIXED) == 0)
238 		addr = round_page(p->p_vmspace->vm_daddr + MAXDSIZ);
239 	if (flags & MAP_ANON) {
240 		/*
241 		 * Mapping blank space is trivial.
242 		 */
243 		handle = NULL;
244 		maxprot = VM_PROT_ALL;
245 	} else {
246 		/*
247 		 * Mapping file, get fp for validation.
248 		 * Obtain vnode and make sure it is of appropriate type.
249 		 */
250 		if (((unsigned)uap->fd) >= fdp->fd_nfiles ||
251 		    (fp = fdp->fd_ofiles[uap->fd]) == NULL)
252 			return (EBADF);
253 		if (fp->f_type != DTYPE_VNODE)
254 			return (EINVAL);
255 		vp = (struct vnode *)fp->f_data;
256 		if (vp->v_type != VREG && vp->v_type != VCHR)
257 			return (EINVAL);
258 		/*
259 		 * XXX hack to handle use of /dev/zero to map anon
260 		 * memory (ala SunOS).
261 		 */
262 		if (vp->v_type == VCHR && iszerodev(vp->v_rdev)) {
263 			handle = NULL;
264 			maxprot = VM_PROT_ALL;
265 			flags |= MAP_ANON;
266 		} else {
267 			/*
268 			 * Ensure that file and memory protections are
269 			 * compatible.  Note that we only worry about
270 			 * writability if mapping is shared; in this case,
271 			 * current and max prot are dictated by the open file.
272 			 * XXX use the vnode instead?  Problem is: what
273 			 * credentials do we use for determination?
274 			 * What if proc does a setuid?
275 			 */
276 			maxprot = VM_PROT_EXECUTE;	/* ??? */
277 			if (fp->f_flag & FREAD)
278 				maxprot |= VM_PROT_READ;
279 			else if (prot & PROT_READ)
280 				return (EACCES);
281 			if (flags & MAP_SHARED) {
282 				if (fp->f_flag & FWRITE)
283 					maxprot |= VM_PROT_WRITE;
284 				else if (prot & PROT_WRITE)
285 					return (EACCES);
286 			} else
287 				maxprot |= VM_PROT_WRITE;
288 			handle = (caddr_t)vp;
289 		}
290 	}
291 	error = vm_mmap(&p->p_vmspace->vm_map, &addr, size, prot, maxprot,
292 	    flags, handle, (vm_offset_t)uap->pos);
293 	if (error == 0)
294 		*retval = (int)addr;
295 	return (error);
296 }
297 
298 struct msync_args {
299 	caddr_t	addr;
300 	int	len;
301 };
302 int
303 msync(p, uap, retval)
304 	struct proc *p;
305 	struct msync_args *uap;
306 	int *retval;
307 {
308 	vm_offset_t addr;
309 	vm_size_t size;
310 	vm_map_t map;
311 	int rv;
312 	boolean_t syncio, invalidate;
313 
314 #ifdef DEBUG
315 	if (mmapdebug & (MDB_FOLLOW|MDB_SYNC))
316 		printf("msync(%d): addr %x len %x\n",
317 		       p->p_pid, uap->addr, uap->len);
318 #endif
319 	if (((int)uap->addr & PAGE_MASK) || uap->addr + uap->len < uap->addr)
320 		return (EINVAL);
321 	map = &p->p_vmspace->vm_map;
322 	addr = (vm_offset_t)uap->addr;
323 	size = (vm_size_t)uap->len;
324 	/*
325 	 * XXX Gak!  If size is zero we are supposed to sync "all modified
326 	 * pages with the region containing addr".  Unfortunately, we
327 	 * don't really keep track of individual mmaps so we approximate
328 	 * by flushing the range of the map entry containing addr.
329 	 * This can be incorrect if the region splits or is coalesced
330 	 * with a neighbor.
331 	 */
332 	if (size == 0) {
333 		vm_map_entry_t entry;
334 
335 		vm_map_lock_read(map);
336 		rv = vm_map_lookup_entry(map, addr, &entry);
337 		vm_map_unlock_read(map);
338 		if (rv)
339 			return (EINVAL);
340 		addr = entry->start;
341 		size = entry->end - entry->start;
342 	}
343 #ifdef DEBUG
344 	if (mmapdebug & MDB_SYNC)
345 		printf("msync: cleaning/flushing address range [%x-%x)\n",
346 		       addr, addr+size);
347 #endif
348 	/*
349 	 * Could pass this in as a third flag argument to implement
350 	 * Sun's MS_ASYNC.
351 	 */
352 	syncio = TRUE;
353 	/*
354 	 * XXX bummer, gotta flush all cached pages to ensure
355 	 * consistency with the file system cache.  Otherwise, we could
356 	 * pass this in to implement Sun's MS_INVALIDATE.
357 	 */
358 	invalidate = TRUE;
359 	/*
360 	 * Clean the pages and interpret the return value.
361 	 */
362 	rv = vm_map_clean(map, addr, addr+size, syncio, invalidate);
363 	switch (rv) {
364 	case KERN_SUCCESS:
365 		break;
366 	case KERN_INVALID_ADDRESS:
367 		return (EINVAL);	/* Sun returns ENOMEM? */
368 	case KERN_FAILURE:
369 		return (EIO);
370 	default:
371 		return (EINVAL);
372 	}
373 	return (0);
374 }
375 
376 struct munmap_args {
377 	caddr_t	addr;
378 	int	len;
379 };
380 int
381 munmap(p, uap, retval)
382 	register struct proc *p;
383 	register struct munmap_args *uap;
384 	int *retval;
385 {
386 	vm_offset_t addr;
387 	vm_size_t size;
388 	vm_map_t map;
389 
390 #ifdef DEBUG
391 	if (mmapdebug & MDB_FOLLOW)
392 		printf("munmap(%d): addr %x len %x\n",
393 		       p->p_pid, uap->addr, uap->len);
394 #endif
395 
396 	addr = (vm_offset_t) uap->addr;
397 	if ((addr & PAGE_MASK) || uap->len < 0)
398 		return(EINVAL);
399 	size = (vm_size_t) round_page(uap->len);
400 	if (size == 0)
401 		return(0);
402 	/*
403 	 * Check for illegal addresses.  Watch out for address wrap...
404 	 * Note that VM_*_ADDRESS are not constants due to casts (argh).
405 	 */
406 	if (VM_MAXUSER_ADDRESS > 0 && addr + size > VM_MAXUSER_ADDRESS)
407 		return (EINVAL);
408 #ifndef i386
409 	if (VM_MIN_ADDRESS > 0 && addr < VM_MIN_ADDRESS)
410 		return (EINVAL);
411 #endif
412 	if (addr + size < addr)
413 		return (EINVAL);
414 	map = &p->p_vmspace->vm_map;
415 	/*
416 	 * Make sure entire range is allocated.
417 	 */
418 	if (!vm_map_check_protection(map, addr, addr + size, VM_PROT_NONE))
419 		return(EINVAL);
420 	/* returns nothing but KERN_SUCCESS anyway */
421 	(void) vm_map_remove(map, addr, addr+size);
422 	return(0);
423 }
424 
425 void
426 munmapfd(fd)
427 	int fd;
428 {
429 #ifdef DEBUG
430 	if (mmapdebug & MDB_FOLLOW)
431 		printf("munmapfd(%d): fd %d\n", curproc->p_pid, fd);
432 #endif
433 
434 	/*
435 	 * XXX should vm_deallocate any regions mapped to this file
436 	 */
437 	curproc->p_fd->fd_ofileflags[fd] &= ~UF_MAPPED;
438 }
439 
440 struct mprotect_args {
441 	caddr_t	addr;
442 	int	len;
443 	int	prot;
444 };
445 int
446 mprotect(p, uap, retval)
447 	struct proc *p;
448 	struct mprotect_args *uap;
449 	int *retval;
450 {
451 	vm_offset_t addr;
452 	vm_size_t size;
453 	register vm_prot_t prot;
454 
455 #ifdef DEBUG
456 	if (mmapdebug & MDB_FOLLOW)
457 		printf("mprotect(%d): addr %x len %x prot %d\n",
458 		       p->p_pid, uap->addr, uap->len, uap->prot);
459 #endif
460 
461 	addr = (vm_offset_t)uap->addr;
462 	if ((addr & PAGE_MASK) || uap->len < 0)
463 		return(EINVAL);
464 	size = (vm_size_t)uap->len;
465 	prot = uap->prot & VM_PROT_ALL;
466 
467 	switch (vm_map_protect(&p->p_vmspace->vm_map, addr, addr+size, prot,
468 	    FALSE)) {
469 	case KERN_SUCCESS:
470 		return (0);
471 	case KERN_PROTECTION_FAILURE:
472 		return (EACCES);
473 	}
474 	return (EINVAL);
475 }
476 
477 struct madvise_args {
478 	caddr_t	addr;
479 	int	len;
480 	int	behav;
481 };
482 /* ARGSUSED */
483 int
484 madvise(p, uap, retval)
485 	struct proc *p;
486 	struct madvise_args *uap;
487 	int *retval;
488 {
489 
490 	/* Not yet implemented */
491 	return (EOPNOTSUPP);
492 }
493 
494 struct mincore_args {
495 	caddr_t	addr;
496 	int	len;
497 	char	*vec;
498 };
499 /* ARGSUSED */
500 int
501 mincore(p, uap, retval)
502 	struct proc *p;
503 	struct mincore_args *uap;
504 	int *retval;
505 {
506 
507 	/* Not yet implemented */
508 	return (EOPNOTSUPP);
509 }
510 
511 struct mlock_args {
512 	caddr_t	addr;
513 	size_t	len;
514 };
515 int
516 mlock(p, uap, retval)
517 	struct proc *p;
518 	struct mlock_args *uap;
519 	int *retval;
520 {
521 	vm_offset_t addr;
522 	vm_size_t size;
523 	int error;
524 	extern int vm_page_max_wired;
525 
526 #ifdef DEBUG
527 	if (mmapdebug & MDB_FOLLOW)
528 		printf("mlock(%d): addr %x len %x\n",
529 		       p->p_pid, uap->addr, uap->len);
530 #endif
531 	addr = (vm_offset_t)uap->addr;
532 	if ((addr & PAGE_MASK) || uap->addr + uap->len < uap->addr)
533 		return (EINVAL);
534 	size = round_page((vm_size_t)uap->len);
535 	if (atop(size) + cnt.v_wire_count > vm_page_max_wired)
536 		return (EAGAIN);
537 #ifdef pmap_wired_count
538 	if (size + ptoa(pmap_wired_count(vm_map_pmap(&p->p_vmspace->vm_map))) >
539 	    p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur)
540 		return (EAGAIN);
541 #else
542 	if (error = suser(p->p_ucred, &p->p_acflag))
543 		return (error);
544 #endif
545 
546 	error = vm_map_pageable(&p->p_vmspace->vm_map, addr, addr+size, FALSE);
547 	return (error == KERN_SUCCESS ? 0 : ENOMEM);
548 }
549 
550 struct munlock_args {
551 	caddr_t	addr;
552 	size_t	len;
553 };
554 int
555 munlock(p, uap, retval)
556 	struct proc *p;
557 	struct munlock_args *uap;
558 	int *retval;
559 {
560 	vm_offset_t addr;
561 	vm_size_t size;
562 	int error;
563 
564 #ifdef DEBUG
565 	if (mmapdebug & MDB_FOLLOW)
566 		printf("munlock(%d): addr %x len %x\n",
567 		       p->p_pid, uap->addr, uap->len);
568 #endif
569 	addr = (vm_offset_t)uap->addr;
570 	if ((addr & PAGE_MASK) || uap->addr + uap->len < uap->addr)
571 		return (EINVAL);
572 #ifndef pmap_wired_count
573 	if (error = suser(p->p_ucred, &p->p_acflag))
574 		return (error);
575 #endif
576 	size = round_page((vm_size_t)uap->len);
577 
578 	error = vm_map_pageable(&p->p_vmspace->vm_map, addr, addr+size, TRUE);
579 	return (error == KERN_SUCCESS ? 0 : ENOMEM);
580 }
581 
582 /*
583  * Internal version of mmap.
584  * Currently used by mmap, exec, and sys5 shared memory.
585  * Handle is either a vnode pointer or NULL for MAP_ANON.
586  */
587 int
588 vm_mmap(map, addr, size, prot, maxprot, flags, handle, foff)
589 	register vm_map_t map;
590 	register vm_offset_t *addr;
591 	register vm_size_t size;
592 	vm_prot_t prot, maxprot;
593 	register int flags;
594 	caddr_t handle;		/* XXX should be vp */
595 	vm_offset_t foff;
596 {
597 	register vm_pager_t pager;
598 	boolean_t fitit;
599 	vm_object_t object;
600 	struct vnode *vp = NULL;
601 	int type;
602 	int rv = KERN_SUCCESS;
603 
604 	if (size == 0)
605 		return (0);
606 
607 	if ((flags & MAP_FIXED) == 0) {
608 		fitit = TRUE;
609 		*addr = round_page(*addr);
610 	} else {
611 		fitit = FALSE;
612 		(void)vm_deallocate(map, *addr, size);
613 	}
614 
615 	/*
616 	 * Lookup/allocate pager.  All except an unnamed anonymous lookup
617 	 * gain a reference to ensure continued existance of the object.
618 	 * (XXX the exception is to appease the pageout daemon)
619 	 */
620 	if (flags & MAP_ANON)
621 		type = PG_DFLT;
622 	else {
623 		vp = (struct vnode *)handle;
624 		if (vp->v_type == VCHR) {
625 			type = PG_DEVICE;
626 			handle = (caddr_t)vp->v_rdev;
627 		} else
628 			type = PG_VNODE;
629 	}
630 	pager = vm_pager_allocate(type, handle, size, prot, foff);
631 	if (pager == NULL)
632 		return (type == PG_DEVICE ? EINVAL : ENOMEM);
633 	/*
634 	 * Find object and release extra reference gained by lookup
635 	 */
636 	object = vm_object_lookup(pager);
637 	vm_object_deallocate(object);
638 
639 	/*
640 	 * Anonymous memory.
641 	 */
642 	if (flags & MAP_ANON) {
643 		rv = vm_allocate_with_pager(map, addr, size, fitit,
644 					    pager, foff, TRUE);
645 		if (rv != KERN_SUCCESS) {
646 			if (handle == NULL)
647 				vm_pager_deallocate(pager);
648 			else
649 				vm_object_deallocate(object);
650 			goto out;
651 		}
652 		/*
653 		 * Don't cache anonymous objects.
654 		 * Loses the reference gained by vm_pager_allocate.
655 		 * Note that object will be NULL when handle == NULL,
656 		 * this is ok since vm_allocate_with_pager has made
657 		 * sure that these objects are uncached.
658 		 */
659 		(void) pager_cache(object, FALSE);
660 #ifdef DEBUG
661 		if (mmapdebug & MDB_MAPIT)
662 			printf("vm_mmap(%d): ANON *addr %x size %x pager %x\n",
663 			       curproc->p_pid, *addr, size, pager);
664 #endif
665 	}
666 	/*
667 	 * Must be a mapped file.
668 	 * Distinguish between character special and regular files.
669 	 */
670 	else if (vp->v_type == VCHR) {
671 		rv = vm_allocate_with_pager(map, addr, size, fitit,
672 					    pager, foff, FALSE);
673 		/*
674 		 * Uncache the object and lose the reference gained
675 		 * by vm_pager_allocate().  If the call to
676 		 * vm_allocate_with_pager() was sucessful, then we
677 		 * gained an additional reference ensuring the object
678 		 * will continue to exist.  If the call failed then
679 		 * the deallocate call below will terminate the
680 		 * object which is fine.
681 		 */
682 		(void) pager_cache(object, FALSE);
683 		if (rv != KERN_SUCCESS)
684 			goto out;
685 	}
686 	/*
687 	 * A regular file
688 	 */
689 	else {
690 #ifdef DEBUG
691 		if (object == NULL)
692 			printf("vm_mmap: no object: vp %x, pager %x\n",
693 			       vp, pager);
694 #endif
695 		/*
696 		 * Map it directly.
697 		 * Allows modifications to go out to the vnode.
698 		 */
699 		if (flags & MAP_SHARED) {
700 			rv = vm_allocate_with_pager(map, addr, size,
701 						    fitit, pager,
702 						    foff, FALSE);
703 			if (rv != KERN_SUCCESS) {
704 				vm_object_deallocate(object);
705 				goto out;
706 			}
707 			/*
708 			 * Don't cache the object.  This is the easiest way
709 			 * of ensuring that data gets back to the filesystem
710 			 * because vnode_pager_deallocate() will fsync the
711 			 * vnode.  pager_cache() will lose the extra ref.
712 			 */
713 			if (prot & VM_PROT_WRITE)
714 				pager_cache(object, FALSE);
715 			else
716 				vm_object_deallocate(object);
717 
718 			if( map->pmap)
719 				pmap_object_init_pt(map->pmap, *addr, object, foff, size);
720 		}
721 		/*
722 		 * Copy-on-write of file.  Two flavors.
723 		 * MAP_COPY is true COW, you essentially get a snapshot of
724 		 * the region at the time of mapping.  MAP_PRIVATE means only
725 		 * that your changes are not reflected back to the object.
726 		 * Changes made by others will be seen.
727 		 */
728 		else {
729 			vm_map_t tmap;
730 			vm_offset_t off;
731 
732 			/* locate and allocate the target address space */
733 			rv = vm_map_find(map, NULL, (vm_offset_t)0,
734 					 addr, size, fitit);
735 			if (rv != KERN_SUCCESS) {
736 				vm_object_deallocate(object);
737 				goto out;
738 			}
739 			tmap = vm_map_create(pmap_create(size), VM_MIN_ADDRESS,
740 					     VM_MIN_ADDRESS+size, TRUE);
741 			off = VM_MIN_ADDRESS;
742 			rv = vm_allocate_with_pager(tmap, &off, size,
743 						    TRUE, pager,
744 						    foff, FALSE);
745 			if (rv != KERN_SUCCESS) {
746 				vm_object_deallocate(object);
747 				vm_map_deallocate(tmap);
748 				goto out;
749 			}
750 			/*
751 			 * (XXX)
752 			 * MAP_PRIVATE implies that we see changes made by
753 			 * others.  To ensure that we need to guarentee that
754 			 * no copy object is created (otherwise original
755 			 * pages would be pushed to the copy object and we
756 			 * would never see changes made by others).  We
757 			 * totally sleeze it right now by marking the object
758 			 * internal temporarily.
759 			 */
760 			if ((flags & MAP_COPY) == 0)
761 				object->flags |= OBJ_INTERNAL;
762 			rv = vm_map_copy(map, tmap, *addr, size, off,
763 					 FALSE, FALSE);
764 			object->flags &= ~OBJ_INTERNAL;
765 			/*
766 			 * (XXX)
767 			 * My oh my, this only gets worse...
768 			 * Force creation of a shadow object so that
769 			 * vm_map_fork will do the right thing.
770 			 */
771 			if ((flags & MAP_COPY) == 0) {
772 				vm_map_t tmap;
773 				vm_map_entry_t tentry;
774 				vm_object_t tobject;
775 				vm_offset_t toffset;
776 				vm_prot_t tprot;
777 				boolean_t twired, tsu;
778 
779 				tmap = map;
780 				vm_map_lookup(&tmap, *addr, VM_PROT_WRITE,
781 					      &tentry, &tobject, &toffset,
782 					      &tprot, &twired, &tsu);
783 				vm_map_lookup_done(tmap, tentry);
784 			}
785 			/*
786 			 * (XXX)
787 			 * Map copy code cannot detect sharing unless a
788 			 * sharing map is involved.  So we cheat and write
789 			 * protect everything ourselves.
790 			 */
791 			vm_object_pmap_copy(object, foff, foff + size);
792 			if( map->pmap)
793 				pmap_object_init_pt(map->pmap, *addr, object, foff, size);
794 			vm_object_deallocate(object);
795 			vm_map_deallocate(tmap);
796 			if (rv != KERN_SUCCESS)
797 				goto out;
798 		}
799 #ifdef DEBUG
800 		if (mmapdebug & MDB_MAPIT)
801 			printf("vm_mmap(%d): FILE *addr %x size %x pager %x\n",
802 			       curproc->p_pid, *addr, size, pager);
803 #endif
804 	}
805 	/*
806 	 * Correct protection (default is VM_PROT_ALL).
807 	 * If maxprot is different than prot, we must set both explicitly.
808 	 */
809 	rv = KERN_SUCCESS;
810 	if (maxprot != VM_PROT_ALL)
811 		rv = vm_map_protect(map, *addr, *addr+size, maxprot, TRUE);
812 	if (rv == KERN_SUCCESS && prot != maxprot)
813 		rv = vm_map_protect(map, *addr, *addr+size, prot, FALSE);
814 	if (rv != KERN_SUCCESS) {
815 		(void) vm_deallocate(map, *addr, size);
816 		goto out;
817 	}
818 	/*
819 	 * Shared memory is also shared with children.
820 	 */
821 	if (flags & MAP_SHARED) {
822 		rv = vm_map_inherit(map, *addr, *addr+size, VM_INHERIT_SHARE);
823 		if (rv != KERN_SUCCESS) {
824 			(void) vm_deallocate(map, *addr, size);
825 			goto out;
826 		}
827 	}
828 out:
829 #ifdef DEBUG
830 	if (mmapdebug & MDB_MAPIT)
831 		printf("vm_mmap: rv %d\n", rv);
832 #endif
833 	switch (rv) {
834 	case KERN_SUCCESS:
835 		return (0);
836 	case KERN_INVALID_ADDRESS:
837 	case KERN_NO_SPACE:
838 		return (ENOMEM);
839 	case KERN_PROTECTION_FAILURE:
840 		return (EACCES);
841 	default:
842 		return (EINVAL);
843 	}
844 }
845