1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 1988 University of Utah.
5 * Copyright (c) 1991, 1993
6 * The Regents of the University of California. All rights reserved.
7 *
8 * This code is derived from software contributed to Berkeley by
9 * the Systems Programming Group of the University of Utah Computer
10 * Science Department.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * from: Utah $Hdr: vm_mmap.c 1.6 91/10/21$
37 */
38
39 /*
40 * Mapped file (mmap) interface to VM
41 */
42
43 #include "opt_hwpmc_hooks.h"
44 #include "opt_vm.h"
45
46 #define EXTERR_CATEGORY EXTERR_CAT_MMAP
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/capsicum.h>
50 #include <sys/exterrvar.h>
51 #include <sys/kernel.h>
52 #include <sys/lock.h>
53 #include <sys/mutex.h>
54 #include <sys/sysproto.h>
55 #include <sys/elf.h>
56 #include <sys/filedesc.h>
57 #include <sys/priv.h>
58 #include <sys/proc.h>
59 #include <sys/procctl.h>
60 #include <sys/racct.h>
61 #include <sys/resource.h>
62 #include <sys/resourcevar.h>
63 #include <sys/rwlock.h>
64 #include <sys/sysctl.h>
65 #include <sys/vnode.h>
66 #include <sys/fcntl.h>
67 #include <sys/file.h>
68 #include <sys/mman.h>
69 #include <sys/mount.h>
70 #include <sys/conf.h>
71 #include <sys/stat.h>
72 #include <sys/syscallsubr.h>
73 #include <sys/sysent.h>
74 #include <sys/vmmeter.h>
75 #if defined(__amd64__) || defined(__i386__) /* for i386_read_exec */
76 #include <machine/md_var.h>
77 #endif
78
79 #include <security/audit/audit.h>
80 #include <security/mac/mac_framework.h>
81
82 #include <vm/vm.h>
83 #include <vm/vm_param.h>
84 #include <vm/pmap.h>
85 #include <vm/vm_map.h>
86 #include <vm/vm_object.h>
87 #include <vm/vm_page.h>
88 #include <vm/vm_pager.h>
89 #include <vm/vm_pageout.h>
90 #include <vm/vm_extern.h>
91 #include <vm/vm_page.h>
92 #include <vm/vnode_pager.h>
93
94 #ifdef HWPMC_HOOKS
95 #include <sys/pmckern.h>
96 #endif
97
98 int old_mlock = 0;
99 SYSCTL_INT(_vm, OID_AUTO, old_mlock, CTLFLAG_RWTUN, &old_mlock, 0,
100 "Do not apply RLIMIT_MEMLOCK on mlockall");
101 static int mincore_mapped = 1;
102 SYSCTL_INT(_vm, OID_AUTO, mincore_mapped, CTLFLAG_RWTUN, &mincore_mapped, 0,
103 "mincore reports mappings, not residency");
104 static int imply_prot_max = 0;
105 SYSCTL_INT(_vm, OID_AUTO, imply_prot_max, CTLFLAG_RWTUN, &imply_prot_max, 0,
106 "Imply maximum page protections in mmap() when none are specified");
107
108 _Static_assert(MAXPAGESIZES <= 4, "MINCORE_SUPER too narrow");
109
110 #if defined(COMPAT_43)
111 int
ogetpagesize(struct thread * td,struct ogetpagesize_args * uap)112 ogetpagesize(struct thread *td, struct ogetpagesize_args *uap)
113 {
114
115 td->td_retval[0] = PAGE_SIZE;
116 return (0);
117 }
118 #endif /* COMPAT_43 */
119
120 /*
121 * Memory Map (mmap) system call. Note that the file offset
122 * and address are allowed to be NOT page aligned, though if
123 * the MAP_FIXED flag it set, both must have the same remainder
124 * modulo the PAGE_SIZE (POSIX 1003.1b). If the address is not
125 * page-aligned, the actual mapping starts at trunc_page(addr)
126 * and the return value is adjusted up by the page offset.
127 *
128 * Generally speaking, only character devices which are themselves
129 * memory-based, such as a video framebuffer, can be mmap'd. Otherwise
130 * there would be no cache coherency between a descriptor and a VM mapping
131 * both to the same character device.
132 */
133 #ifndef _SYS_SYSPROTO_H_
134 struct mmap_args {
135 void *addr;
136 size_t len;
137 int prot;
138 int flags;
139 int fd;
140 long pad;
141 off_t pos;
142 };
143 #endif
144
145 int
sys_mmap(struct thread * td,struct mmap_args * uap)146 sys_mmap(struct thread *td, struct mmap_args *uap)
147 {
148
149 return (kern_mmap(td, &(struct mmap_req){
150 .mr_hint = (uintptr_t)uap->addr,
151 .mr_len = uap->len,
152 .mr_prot = uap->prot,
153 .mr_flags = uap->flags,
154 .mr_fd = uap->fd,
155 .mr_pos = uap->pos,
156 }));
157 }
158
159 int
kern_mmap_maxprot(struct proc * p,int prot)160 kern_mmap_maxprot(struct proc *p, int prot)
161 {
162
163 if ((p->p_flag2 & P2_PROTMAX_DISABLE) != 0 ||
164 (p->p_fctl0 & NT_FREEBSD_FCTL_PROTMAX_DISABLE) != 0)
165 return (_PROT_ALL);
166 if (((p->p_flag2 & P2_PROTMAX_ENABLE) != 0 || imply_prot_max) &&
167 prot != PROT_NONE)
168 return (prot);
169 return (_PROT_ALL);
170 }
171
172 int
kern_mmap(struct thread * td,const struct mmap_req * mrp)173 kern_mmap(struct thread *td, const struct mmap_req *mrp)
174 {
175 struct vmspace *vms;
176 struct file *fp;
177 struct proc *p;
178 off_t pos;
179 vm_offset_t addr, orig_addr;
180 vm_size_t len, pageoff, size;
181 vm_prot_t cap_maxprot;
182 int align, error, fd, flags, max_prot, prot;
183 cap_rights_t rights;
184 mmap_check_fp_fn check_fp_fn;
185
186 orig_addr = addr = mrp->mr_hint;
187 len = mrp->mr_len;
188 prot = mrp->mr_prot;
189 flags = mrp->mr_flags;
190 fd = mrp->mr_fd;
191 pos = mrp->mr_pos;
192 check_fp_fn = mrp->mr_check_fp_fn;
193
194 if ((prot & ~(_PROT_ALL | PROT_MAX(_PROT_ALL))) != 0) {
195 return (EXTERROR(EINVAL, "unknown PROT bits"));
196 }
197 max_prot = PROT_MAX_EXTRACT(prot);
198 prot = PROT_EXTRACT(prot);
199 if (max_prot != 0 && (max_prot & prot) != prot) {
200 return (EXTERROR(ENOTSUP, "prot is not subset of max_prot"));
201 }
202
203 p = td->td_proc;
204
205 /*
206 * Always honor PROT_MAX if set. If not, default to all
207 * permissions unless we're implying maximum permissions.
208 */
209 if (max_prot == 0)
210 max_prot = kern_mmap_maxprot(p, prot);
211
212 vms = p->p_vmspace;
213 fp = NULL;
214 AUDIT_ARG_FD(fd);
215
216 /*
217 * Ignore old flags that used to be defined but did not do anything.
218 */
219 flags &= ~(MAP_RESERVED0020 | MAP_RESERVED0040);
220
221 /*
222 * Enforce the constraints.
223 * Mapping of length 0 is only allowed for old binaries.
224 * Anonymous mapping shall specify -1 as filedescriptor and
225 * zero position for new code. Be nice to ancient a.out
226 * binaries and correct pos for anonymous mapping, since old
227 * ld.so sometimes issues anonymous map requests with non-zero
228 * pos.
229 */
230 if (!SV_CURPROC_FLAG(SV_AOUT)) {
231 if ((len == 0 && p->p_osrel >= P_OSREL_MAP_ANON) ||
232 ((flags & MAP_ANON) != 0 && (fd != -1 || pos != 0))) {
233 return (EXTERROR(EINVAL,
234 "offset not zero/fd not -1 for MAP_ANON",
235 fd, pos));
236 }
237 } else {
238 if ((flags & MAP_ANON) != 0)
239 pos = 0;
240 }
241
242 if (flags & MAP_STACK) {
243 if ((fd != -1) || ((prot & (PROT_READ | PROT_WRITE)) !=
244 (PROT_READ | PROT_WRITE))) {
245 return (EXTERROR(EINVAL, "MAP_STACK with prot < rw",
246 prot));
247 }
248 flags |= MAP_ANON;
249 pos = 0;
250 }
251 if ((flags & ~(MAP_SHARED | MAP_PRIVATE | MAP_FIXED | MAP_HASSEMAPHORE |
252 MAP_STACK | MAP_NOSYNC | MAP_ANON | MAP_EXCL | MAP_NOCORE |
253 MAP_PREFAULT_READ | MAP_GUARD | MAP_32BIT |
254 MAP_ALIGNMENT_MASK)) != 0) {
255 return (EXTERROR(EINVAL, "reserved flag set"));
256 }
257 if ((flags & (MAP_EXCL | MAP_FIXED)) == MAP_EXCL) {
258 return (EXTERROR(EINVAL, "EXCL without FIXED"));
259 }
260 if ((flags & (MAP_SHARED | MAP_PRIVATE)) == (MAP_SHARED |
261 MAP_PRIVATE)) {
262 return (EXTERROR(EINVAL, "both SHARED and PRIVATE set"));
263 }
264 if (prot != PROT_NONE &&
265 (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC)) != 0) {
266 return (EXTERROR(EINVAL, "invalid prot", prot));
267 }
268 if ((flags & MAP_GUARD) != 0 && (prot != PROT_NONE || fd != -1 ||
269 pos != 0 || (flags & ~(MAP_FIXED | MAP_GUARD | MAP_EXCL |
270 MAP_32BIT | MAP_ALIGNMENT_MASK)) != 0)) {
271 return (EXTERROR(EINVAL, "GUARD with wrong parameters"));
272 }
273
274 /*
275 * Align the file position to a page boundary,
276 * and save its page offset component.
277 */
278 pageoff = (pos & PAGE_MASK);
279 pos -= pageoff;
280
281 /* Compute size from len by rounding (on both ends). */
282 size = len + pageoff; /* low end... */
283 size = round_page(size); /* hi end */
284 /* Check for rounding up to zero. */
285 if (len > size)
286 return (ENOMEM);
287
288 /* Ensure alignment is at least a page and fits in a pointer. */
289 align = flags & MAP_ALIGNMENT_MASK;
290 if (align != 0 && align != MAP_ALIGNED_SUPER &&
291 (align >> MAP_ALIGNMENT_SHIFT >= sizeof(void *) * NBBY ||
292 align >> MAP_ALIGNMENT_SHIFT < PAGE_SHIFT)) {
293 return (EXTERROR(EINVAL, "bad alignment", align));
294 }
295
296 /*
297 * Check for illegal addresses. Watch out for address wrap... Note
298 * that VM_*_ADDRESS are not constants due to casts (argh).
299 */
300 if (flags & MAP_FIXED) {
301 /*
302 * The specified address must have the same remainder
303 * as the file offset taken modulo PAGE_SIZE, so it
304 * should be aligned after adjustment by pageoff.
305 */
306 addr -= pageoff;
307 if ((addr & PAGE_MASK) != 0) {
308 return (EXTERROR(EINVAL, "fixed mapping not aligned",
309 addr));
310 }
311
312 /* Address range must be all in user VM space. */
313 if (!vm_map_range_valid(&vms->vm_map, addr, addr + size)) {
314 EXTERROR(EINVAL, "mapping outside vm_map");
315 return (EINVAL);
316 }
317 if (flags & MAP_32BIT && addr + size > MAP_32BIT_MAX_ADDR) {
318 return (EXTERROR(EINVAL,
319 "fixed 32bit mapping does not fit into 4G"));
320 }
321 } else if (flags & MAP_32BIT) {
322 /*
323 * For MAP_32BIT, override the hint if it is too high and
324 * do not bother moving the mapping past the heap (since
325 * the heap is usually above 2GB).
326 */
327 if (addr + size > MAP_32BIT_MAX_ADDR)
328 addr = 0;
329 } else {
330 /*
331 * XXX for non-fixed mappings where no hint is provided or
332 * the hint would fall in the potential heap space,
333 * place it after the end of the largest possible heap.
334 *
335 * For anonymous mappings within the address space of the
336 * calling process, the absence of a hint is handled at a
337 * lower level in order to implement different clustering
338 * strategies for ASLR.
339 */
340 if (((flags & MAP_ANON) == 0 && addr == 0) ||
341 (addr >= round_page((vm_offset_t)vms->vm_taddr) &&
342 addr < round_page((vm_offset_t)vms->vm_daddr +
343 lim_max(td, RLIMIT_DATA))))
344 addr = round_page((vm_offset_t)vms->vm_daddr +
345 lim_max(td, RLIMIT_DATA));
346 }
347 if (len == 0) {
348 /*
349 * Return success without mapping anything for old
350 * binaries that request a page-aligned mapping of
351 * length 0. For modern binaries, this function
352 * returns an error earlier.
353 */
354 error = 0;
355 } else if ((flags & MAP_GUARD) != 0) {
356 error = vm_mmap_object(&vms->vm_map, &addr, size, VM_PROT_NONE,
357 VM_PROT_NONE, flags, NULL, pos, FALSE, td);
358 } else if ((flags & MAP_ANON) != 0) {
359 /*
360 * Mapping blank space is trivial.
361 *
362 * This relies on VM_PROT_* matching PROT_*.
363 */
364 error = vm_mmap_object(&vms->vm_map, &addr, size, prot,
365 max_prot, flags, NULL, pos, FALSE, td);
366 } else {
367 /*
368 * Mapping file, get fp for validation and don't let the
369 * descriptor disappear on us if we block. Check capability
370 * rights, but also return the maximum rights to be combined
371 * with maxprot later.
372 */
373 cap_rights_init_one(&rights, CAP_MMAP);
374 if (prot & PROT_READ)
375 cap_rights_set_one(&rights, CAP_MMAP_R);
376 if ((flags & MAP_SHARED) != 0) {
377 if (prot & PROT_WRITE)
378 cap_rights_set_one(&rights, CAP_MMAP_W);
379 }
380 if (prot & PROT_EXEC)
381 cap_rights_set_one(&rights, CAP_MMAP_X);
382 error = fget_mmap(td, fd, &rights, &cap_maxprot, &fp);
383 if (error != 0)
384 goto done;
385 if ((flags & (MAP_SHARED | MAP_PRIVATE)) == 0 &&
386 p->p_osrel >= P_OSREL_MAP_FSTRICT) {
387 EXTERROR(EINVAL, "neither SHARED nor PRIVATE req");
388 error = EINVAL;
389 goto done;
390 }
391 if (check_fp_fn != NULL) {
392 error = check_fp_fn(fp, prot, max_prot & cap_maxprot,
393 flags);
394 if (error != 0)
395 goto done;
396 }
397 if (fp->f_ops == &shm_ops && shm_largepage(fp->f_data))
398 addr = orig_addr;
399 /* This relies on VM_PROT_* matching PROT_*. */
400 error = fo_mmap(fp, &vms->vm_map, &addr, size, prot,
401 max_prot & cap_maxprot, flags, pos, td);
402 }
403
404 if (error == 0)
405 td->td_retval[0] = addr + pageoff;
406 done:
407 if (fp)
408 fdrop(fp, td);
409
410 return (error);
411 }
412
413 #if defined(COMPAT_FREEBSD6)
414 int
freebsd6_mmap(struct thread * td,struct freebsd6_mmap_args * uap)415 freebsd6_mmap(struct thread *td, struct freebsd6_mmap_args *uap)
416 {
417 return (kern_mmap(td, &(struct mmap_req){
418 .mr_hint = (uintptr_t)uap->addr,
419 .mr_len = uap->len,
420 .mr_prot = uap->prot,
421 .mr_flags = uap->flags,
422 .mr_fd = uap->fd,
423 .mr_pos = uap->pos,
424 }));
425 }
426 #endif
427
428 #ifdef COMPAT_43
429 #ifndef _SYS_SYSPROTO_H_
430 struct ommap_args {
431 caddr_t addr;
432 int len;
433 int prot;
434 int flags;
435 int fd;
436 long pos;
437 };
438 #endif
439 int
ommap(struct thread * td,struct ommap_args * uap)440 ommap(struct thread *td, struct ommap_args *uap)
441 {
442 return (kern_ommap(td, (uintptr_t)uap->addr, uap->len, uap->prot,
443 uap->flags, uap->fd, uap->pos));
444 }
445
446 int
kern_ommap(struct thread * td,uintptr_t hint,int len,int oprot,int oflags,int fd,long pos)447 kern_ommap(struct thread *td, uintptr_t hint, int len, int oprot,
448 int oflags, int fd, long pos)
449 {
450 static const char cvtbsdprot[8] = {
451 0,
452 PROT_EXEC,
453 PROT_WRITE,
454 PROT_EXEC | PROT_WRITE,
455 PROT_READ,
456 PROT_EXEC | PROT_READ,
457 PROT_WRITE | PROT_READ,
458 PROT_EXEC | PROT_WRITE | PROT_READ,
459 };
460 int flags, prot;
461
462 if (len < 0)
463 return (EINVAL);
464
465 #define OMAP_ANON 0x0002
466 #define OMAP_COPY 0x0020
467 #define OMAP_SHARED 0x0010
468 #define OMAP_FIXED 0x0100
469
470 prot = cvtbsdprot[oprot & 0x7];
471 #if (defined(COMPAT_FREEBSD32) && defined(__amd64__)) || defined(__i386__)
472 if (i386_read_exec && SV_PROC_FLAG(td->td_proc, SV_ILP32) &&
473 prot != 0)
474 prot |= PROT_EXEC;
475 #endif
476 flags = 0;
477 if (oflags & OMAP_ANON)
478 flags |= MAP_ANON;
479 if (oflags & OMAP_COPY)
480 flags |= MAP_COPY;
481 if (oflags & OMAP_SHARED)
482 flags |= MAP_SHARED;
483 else
484 flags |= MAP_PRIVATE;
485 if (oflags & OMAP_FIXED)
486 flags |= MAP_FIXED;
487 return (kern_mmap(td, &(struct mmap_req){
488 .mr_hint = hint,
489 .mr_len = len,
490 .mr_prot = prot,
491 .mr_flags = flags,
492 .mr_fd = fd,
493 .mr_pos = pos,
494 }));
495 }
496 #endif /* COMPAT_43 */
497
498 #ifndef _SYS_SYSPROTO_H_
499 struct msync_args {
500 void *addr;
501 size_t len;
502 int flags;
503 };
504 #endif
505 int
sys_msync(struct thread * td,struct msync_args * uap)506 sys_msync(struct thread *td, struct msync_args *uap)
507 {
508
509 return (kern_msync(td, (uintptr_t)uap->addr, uap->len, uap->flags));
510 }
511
512 int
kern_msync(struct thread * td,uintptr_t addr0,size_t size,int flags)513 kern_msync(struct thread *td, uintptr_t addr0, size_t size, int flags)
514 {
515 vm_offset_t addr;
516 vm_size_t pageoff;
517 vm_map_t map;
518 int rv;
519
520 addr = addr0;
521 pageoff = (addr & PAGE_MASK);
522 addr -= pageoff;
523 size += pageoff;
524 size = (vm_size_t) round_page(size);
525 if (addr + size < addr)
526 return (EINVAL);
527
528 if ((flags & (MS_ASYNC|MS_INVALIDATE)) == (MS_ASYNC|MS_INVALIDATE))
529 return (EINVAL);
530
531 map = &td->td_proc->p_vmspace->vm_map;
532
533 /*
534 * Clean the pages and interpret the return value.
535 */
536 rv = vm_map_sync(map, addr, addr + size, (flags & MS_ASYNC) == 0,
537 (flags & MS_INVALIDATE) != 0);
538 switch (rv) {
539 case KERN_SUCCESS:
540 return (0);
541 case KERN_INVALID_ADDRESS:
542 return (ENOMEM);
543 case KERN_INVALID_ARGUMENT:
544 return (EBUSY);
545 case KERN_FAILURE:
546 return (EIO);
547 default:
548 return (EINVAL);
549 }
550 }
551
552 #ifndef _SYS_SYSPROTO_H_
553 struct munmap_args {
554 void *addr;
555 size_t len;
556 };
557 #endif
558 int
sys_munmap(struct thread * td,struct munmap_args * uap)559 sys_munmap(struct thread *td, struct munmap_args *uap)
560 {
561
562 return (kern_munmap(td, (uintptr_t)uap->addr, uap->len));
563 }
564
565 int
kern_munmap(struct thread * td,uintptr_t addr0,size_t size)566 kern_munmap(struct thread *td, uintptr_t addr0, size_t size)
567 {
568 #ifdef HWPMC_HOOKS
569 struct pmckern_map_out pkm;
570 vm_map_entry_t entry;
571 bool pmc_handled;
572 #endif
573 vm_offset_t addr, end;
574 vm_size_t pageoff;
575 vm_map_t map;
576 int rv;
577
578 if (size == 0)
579 return (EINVAL);
580
581 addr = addr0;
582 pageoff = (addr & PAGE_MASK);
583 addr -= pageoff;
584 size += pageoff;
585 size = (vm_size_t) round_page(size);
586 end = addr + size;
587 map = &td->td_proc->p_vmspace->vm_map;
588 if (!vm_map_range_valid(map, addr, end))
589 return (EINVAL);
590
591 vm_map_lock(map);
592 #ifdef HWPMC_HOOKS
593 pmc_handled = false;
594 if (PMC_HOOK_INSTALLED(PMC_FN_MUNMAP)) {
595 pmc_handled = true;
596 /*
597 * Inform hwpmc if the address range being unmapped contains
598 * an executable region.
599 */
600 pkm.pm_address = (uintptr_t) NULL;
601 if (vm_map_lookup_entry(map, addr, &entry)) {
602 for (; entry->start < end;
603 entry = vm_map_entry_succ(entry)) {
604 if (vm_map_check_protection(map, entry->start,
605 entry->end, VM_PROT_EXECUTE) == TRUE) {
606 pkm.pm_address = (uintptr_t) addr;
607 pkm.pm_size = (size_t) size;
608 break;
609 }
610 }
611 }
612 }
613 #endif
614 rv = vm_map_delete(map, addr, end);
615
616 #ifdef HWPMC_HOOKS
617 if (rv == KERN_SUCCESS && __predict_false(pmc_handled)) {
618 /* downgrade the lock to prevent a LOR with the pmc-sx lock */
619 vm_map_lock_downgrade(map);
620 if (pkm.pm_address != (uintptr_t) NULL)
621 PMC_CALL_HOOK(td, PMC_FN_MUNMAP, (void *) &pkm);
622 vm_map_unlock_read(map);
623 } else
624 #endif
625 vm_map_unlock(map);
626
627 return (vm_mmap_to_errno(rv));
628 }
629
630 #ifndef _SYS_SYSPROTO_H_
631 struct mprotect_args {
632 const void *addr;
633 size_t len;
634 int prot;
635 };
636 #endif
637 int
sys_mprotect(struct thread * td,struct mprotect_args * uap)638 sys_mprotect(struct thread *td, struct mprotect_args *uap)
639 {
640
641 return (kern_mprotect(td, (uintptr_t)uap->addr, uap->len,
642 uap->prot, 0));
643 }
644
645 int
kern_mprotect(struct thread * td,uintptr_t addr0,size_t size,int prot,int flags)646 kern_mprotect(struct thread *td, uintptr_t addr0, size_t size, int prot,
647 int flags)
648 {
649 vm_offset_t addr;
650 vm_size_t pageoff;
651 int vm_error, max_prot;
652
653 addr = addr0;
654 if ((prot & ~(_PROT_ALL | PROT_MAX(_PROT_ALL))) != 0)
655 return (EINVAL);
656 max_prot = PROT_MAX_EXTRACT(prot);
657 prot = PROT_EXTRACT(prot);
658 pageoff = (addr & PAGE_MASK);
659 addr -= pageoff;
660 size += pageoff;
661 size = (vm_size_t) round_page(size);
662 #ifdef COMPAT_FREEBSD32
663 if (SV_PROC_FLAG(td->td_proc, SV_ILP32)) {
664 if (((addr + size) & 0xffffffff) < addr)
665 return (EINVAL);
666 } else
667 #endif
668 if (addr + size < addr)
669 return (EINVAL);
670
671 flags |= VM_MAP_PROTECT_SET_PROT;
672 if (max_prot != 0)
673 flags |= VM_MAP_PROTECT_SET_MAXPROT;
674 vm_error = vm_map_protect(&td->td_proc->p_vmspace->vm_map,
675 addr, addr + size, prot, max_prot, flags);
676
677 switch (vm_error) {
678 case KERN_SUCCESS:
679 return (0);
680 case KERN_PROTECTION_FAILURE:
681 return (EACCES);
682 case KERN_RESOURCE_SHORTAGE:
683 return (ENOMEM);
684 case KERN_OUT_OF_BOUNDS:
685 return (ENOTSUP);
686 }
687 return (EINVAL);
688 }
689
690 #ifndef _SYS_SYSPROTO_H_
691 struct minherit_args {
692 void *addr;
693 size_t len;
694 int inherit;
695 };
696 #endif
697 int
sys_minherit(struct thread * td,struct minherit_args * uap)698 sys_minherit(struct thread *td, struct minherit_args *uap)
699 {
700
701 return (kern_minherit(td, (uintptr_t)uap->addr, uap->len,
702 uap->inherit));
703 }
704
705 int
kern_minherit(struct thread * td,uintptr_t addr0,size_t len,int inherit0)706 kern_minherit(struct thread *td, uintptr_t addr0, size_t len, int inherit0)
707 {
708 vm_offset_t addr;
709 vm_size_t size, pageoff;
710 vm_inherit_t inherit;
711
712 addr = (vm_offset_t)addr0;
713 size = len;
714 inherit = inherit0;
715
716 pageoff = (addr & PAGE_MASK);
717 addr -= pageoff;
718 size += pageoff;
719 size = (vm_size_t) round_page(size);
720 if (addr + size < addr)
721 return (EINVAL);
722
723 switch (vm_map_inherit(&td->td_proc->p_vmspace->vm_map, addr,
724 addr + size, inherit)) {
725 case KERN_SUCCESS:
726 return (0);
727 case KERN_PROTECTION_FAILURE:
728 return (EACCES);
729 }
730 return (EINVAL);
731 }
732
733 #ifndef _SYS_SYSPROTO_H_
734 struct madvise_args {
735 void *addr;
736 size_t len;
737 int behav;
738 };
739 #endif
740
741 int
sys_madvise(struct thread * td,struct madvise_args * uap)742 sys_madvise(struct thread *td, struct madvise_args *uap)
743 {
744
745 return (kern_madvise(td, (uintptr_t)uap->addr, uap->len, uap->behav));
746 }
747
748 int
kern_madvise(struct thread * td,uintptr_t addr0,size_t len,int behav)749 kern_madvise(struct thread *td, uintptr_t addr0, size_t len, int behav)
750 {
751 vm_map_t map;
752 vm_offset_t addr, end, start;
753 int flags;
754
755 /*
756 * Check for our special case, advising the swap pager we are
757 * "immortal."
758 */
759 if (behav == MADV_PROTECT) {
760 flags = PPROT_SET;
761 return (kern_procctl(td, P_PID, td->td_proc->p_pid,
762 PROC_SPROTECT, &flags));
763 }
764
765 /*
766 * Check for illegal addresses. Watch out for address wrap... Note
767 * that VM_*_ADDRESS are not constants due to casts (argh).
768 */
769 map = &td->td_proc->p_vmspace->vm_map;
770 addr = addr0;
771 if (!vm_map_range_valid(map, addr, addr + len))
772 return (EINVAL);
773
774 /*
775 * Since this routine is only advisory, we default to conservative
776 * behavior.
777 */
778 start = trunc_page(addr);
779 end = round_page(addr + len);
780
781 /*
782 * vm_map_madvise() checks for illegal values of behav.
783 */
784 return (vm_map_madvise(map, start, end, behav));
785 }
786
787 #ifndef _SYS_SYSPROTO_H_
788 struct mincore_args {
789 const void *addr;
790 size_t len;
791 char *vec;
792 };
793 #endif
794
795 int
sys_mincore(struct thread * td,struct mincore_args * uap)796 sys_mincore(struct thread *td, struct mincore_args *uap)
797 {
798
799 return (kern_mincore(td, (uintptr_t)uap->addr, uap->len, uap->vec));
800 }
801
802 int
kern_mincore(struct thread * td,uintptr_t addr0,size_t len,char * vec)803 kern_mincore(struct thread *td, uintptr_t addr0, size_t len, char *vec)
804 {
805 pmap_t pmap;
806 vm_map_t map;
807 vm_map_entry_t current, entry;
808 vm_object_t object;
809 vm_offset_t addr, cend, end, first_addr;
810 vm_paddr_t pa;
811 vm_page_t m;
812 vm_pindex_t pindex;
813 int error, lastvecindex, mincoreinfo, vecindex;
814 unsigned int timestamp;
815
816 /*
817 * Make sure that the addresses presented are valid for user
818 * mode.
819 */
820 first_addr = addr = trunc_page(addr0);
821 end = round_page(addr0 + len);
822 map = &td->td_proc->p_vmspace->vm_map;
823 if (end > vm_map_max(map) || end < addr)
824 return (ENOMEM);
825
826 pmap = vmspace_pmap(td->td_proc->p_vmspace);
827
828 vm_map_lock_read(map);
829 RestartScan:
830 timestamp = map->timestamp;
831
832 if (!vm_map_lookup_entry(map, addr, &entry)) {
833 vm_map_unlock_read(map);
834 return (ENOMEM);
835 }
836
837 /*
838 * Do this on a map entry basis so that if the pages are not
839 * in the current processes address space, we can easily look
840 * up the pages elsewhere.
841 */
842 lastvecindex = -1;
843 while (entry->start < end) {
844 /*
845 * check for contiguity
846 */
847 current = entry;
848 entry = vm_map_entry_succ(current);
849 if (current->end < end &&
850 entry->start > current->end) {
851 vm_map_unlock_read(map);
852 return (ENOMEM);
853 }
854
855 /*
856 * ignore submaps (for now) or null objects
857 */
858 if ((current->eflags & MAP_ENTRY_IS_SUB_MAP) ||
859 current->object.vm_object == NULL)
860 continue;
861
862 /*
863 * limit this scan to the current map entry and the
864 * limits for the mincore call
865 */
866 if (addr < current->start)
867 addr = current->start;
868 cend = current->end;
869 if (cend > end)
870 cend = end;
871
872 for (; addr < cend; addr += PAGE_SIZE) {
873 /*
874 * Check pmap first, it is likely faster, also
875 * it can provide info as to whether we are the
876 * one referencing or modifying the page.
877 */
878 m = NULL;
879 object = NULL;
880 retry:
881 pa = 0;
882 mincoreinfo = pmap_mincore(pmap, addr, &pa);
883 if (mincore_mapped) {
884 /*
885 * We only care about this pmap's
886 * mapping of the page, if any.
887 */
888 ;
889 } else if (pa != 0) {
890 /*
891 * The page is mapped by this process but not
892 * both accessed and modified. It is also
893 * managed. Acquire the object lock so that
894 * other mappings might be examined. The page's
895 * identity may change at any point before its
896 * object lock is acquired, so re-validate if
897 * necessary.
898 */
899 m = PHYS_TO_VM_PAGE(pa);
900 while (object == NULL || m->object != object) {
901 if (object != NULL)
902 VM_OBJECT_WUNLOCK(object);
903 object = atomic_load_ptr(&m->object);
904 if (object == NULL)
905 goto retry;
906 VM_OBJECT_WLOCK(object);
907 }
908 if (pa != pmap_extract(pmap, addr))
909 goto retry;
910 KASSERT(vm_page_all_valid(m),
911 ("mincore: page %p is mapped but invalid",
912 m));
913 } else if (mincoreinfo == 0) {
914 /*
915 * The page is not mapped by this process. If
916 * the object implements managed pages, then
917 * determine if the page is resident so that
918 * the mappings might be examined.
919 */
920 if (current->object.vm_object != object) {
921 if (object != NULL)
922 VM_OBJECT_WUNLOCK(object);
923 object = current->object.vm_object;
924 VM_OBJECT_WLOCK(object);
925 }
926 if ((object->flags & OBJ_SWAP) != 0 ||
927 object->type == OBJT_VNODE) {
928 pindex = OFF_TO_IDX(current->offset +
929 (addr - current->start));
930 m = vm_page_lookup(object, pindex);
931 if (m != NULL && vm_page_none_valid(m))
932 m = NULL;
933 if (m != NULL)
934 mincoreinfo = MINCORE_INCORE;
935 }
936 }
937 if (m != NULL) {
938 VM_OBJECT_ASSERT_WLOCKED(m->object);
939
940 /* Examine other mappings of the page. */
941 if (m->dirty == 0 && pmap_is_modified(m))
942 vm_page_dirty(m);
943 if (m->dirty != 0)
944 mincoreinfo |= MINCORE_MODIFIED_OTHER;
945
946 /*
947 * The first test for PGA_REFERENCED is an
948 * optimization. The second test is
949 * required because a concurrent pmap
950 * operation could clear the last reference
951 * and set PGA_REFERENCED before the call to
952 * pmap_is_referenced().
953 */
954 if ((m->a.flags & PGA_REFERENCED) != 0 ||
955 pmap_is_referenced(m) ||
956 (m->a.flags & PGA_REFERENCED) != 0)
957 mincoreinfo |= MINCORE_REFERENCED_OTHER;
958 }
959 if (object != NULL)
960 VM_OBJECT_WUNLOCK(object);
961
962 /*
963 * subyte may page fault. In case it needs to modify
964 * the map, we release the lock.
965 */
966 vm_map_unlock_read(map);
967
968 /*
969 * calculate index into user supplied byte vector
970 */
971 vecindex = atop(addr - first_addr);
972
973 /*
974 * If we have skipped map entries, we need to make sure that
975 * the byte vector is zeroed for those skipped entries.
976 */
977 while ((lastvecindex + 1) < vecindex) {
978 ++lastvecindex;
979 error = subyte(vec + lastvecindex, 0);
980 if (error) {
981 error = EFAULT;
982 goto done2;
983 }
984 }
985
986 /*
987 * Pass the page information to the user
988 */
989 error = subyte(vec + vecindex, mincoreinfo);
990 if (error) {
991 error = EFAULT;
992 goto done2;
993 }
994
995 /*
996 * If the map has changed, due to the subyte, the previous
997 * output may be invalid.
998 */
999 vm_map_lock_read(map);
1000 if (timestamp != map->timestamp)
1001 goto RestartScan;
1002
1003 lastvecindex = vecindex;
1004 }
1005 }
1006
1007 /*
1008 * subyte may page fault. In case it needs to modify
1009 * the map, we release the lock.
1010 */
1011 vm_map_unlock_read(map);
1012
1013 /*
1014 * Zero the last entries in the byte vector.
1015 */
1016 vecindex = atop(end - first_addr);
1017 while ((lastvecindex + 1) < vecindex) {
1018 ++lastvecindex;
1019 error = subyte(vec + lastvecindex, 0);
1020 if (error) {
1021 error = EFAULT;
1022 goto done2;
1023 }
1024 }
1025
1026 /*
1027 * If the map has changed, due to the subyte, the previous
1028 * output may be invalid.
1029 */
1030 vm_map_lock_read(map);
1031 if (timestamp != map->timestamp)
1032 goto RestartScan;
1033 vm_map_unlock_read(map);
1034 done2:
1035 return (error);
1036 }
1037
1038 #ifndef _SYS_SYSPROTO_H_
1039 struct mlock_args {
1040 const void *addr;
1041 size_t len;
1042 };
1043 #endif
1044 int
sys_mlock(struct thread * td,struct mlock_args * uap)1045 sys_mlock(struct thread *td, struct mlock_args *uap)
1046 {
1047
1048 return (kern_mlock(td->td_proc, td->td_ucred,
1049 __DECONST(uintptr_t, uap->addr), uap->len));
1050 }
1051
1052 int
kern_mlock(struct proc * proc,struct ucred * cred,uintptr_t addr0,size_t len)1053 kern_mlock(struct proc *proc, struct ucred *cred, uintptr_t addr0, size_t len)
1054 {
1055 vm_offset_t addr, end, last, start;
1056 vm_size_t npages, size;
1057 vm_map_t map;
1058 unsigned long nsize;
1059 int error;
1060
1061 error = priv_check_cred(cred, PRIV_VM_MLOCK);
1062 if (error)
1063 return (error);
1064 addr = addr0;
1065 size = len;
1066 last = addr + size;
1067 start = trunc_page(addr);
1068 end = round_page(last);
1069 if (last < addr || end < addr)
1070 return (EINVAL);
1071 npages = atop(end - start);
1072 if (npages > vm_page_max_user_wired)
1073 return (ENOMEM);
1074 map = &proc->p_vmspace->vm_map;
1075 PROC_LOCK(proc);
1076 nsize = ptoa(npages + pmap_wired_count(map->pmap));
1077 if (nsize > lim_cur_proc(proc, RLIMIT_MEMLOCK)) {
1078 PROC_UNLOCK(proc);
1079 return (ENOMEM);
1080 }
1081 PROC_UNLOCK(proc);
1082 #ifdef RACCT
1083 if (racct_enable) {
1084 PROC_LOCK(proc);
1085 error = racct_set(proc, RACCT_MEMLOCK, nsize);
1086 PROC_UNLOCK(proc);
1087 if (error != 0)
1088 return (ENOMEM);
1089 }
1090 #endif
1091 error = vm_map_wire(map, start, end,
1092 VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES);
1093 #ifdef RACCT
1094 if (racct_enable && error != KERN_SUCCESS) {
1095 PROC_LOCK(proc);
1096 racct_set(proc, RACCT_MEMLOCK,
1097 ptoa(pmap_wired_count(map->pmap)));
1098 PROC_UNLOCK(proc);
1099 }
1100 #endif
1101 switch (error) {
1102 case KERN_SUCCESS:
1103 return (0);
1104 case KERN_INVALID_ARGUMENT:
1105 return (EINVAL);
1106 default:
1107 return (ENOMEM);
1108 }
1109 }
1110
1111 #ifndef _SYS_SYSPROTO_H_
1112 struct mlockall_args {
1113 int how;
1114 };
1115 #endif
1116
1117 int
sys_mlockall(struct thread * td,struct mlockall_args * uap)1118 sys_mlockall(struct thread *td, struct mlockall_args *uap)
1119 {
1120 vm_map_t map;
1121 int error;
1122
1123 map = &td->td_proc->p_vmspace->vm_map;
1124 error = priv_check(td, PRIV_VM_MLOCK);
1125 if (error)
1126 return (error);
1127
1128 if ((uap->how == 0) || ((uap->how & ~(MCL_CURRENT|MCL_FUTURE)) != 0))
1129 return (EINVAL);
1130
1131 /*
1132 * If wiring all pages in the process would cause it to exceed
1133 * a hard resource limit, return ENOMEM.
1134 */
1135 if (!old_mlock && uap->how & MCL_CURRENT) {
1136 if (map->size > lim_cur(td, RLIMIT_MEMLOCK))
1137 return (ENOMEM);
1138 }
1139 #ifdef RACCT
1140 if (racct_enable) {
1141 PROC_LOCK(td->td_proc);
1142 error = racct_set(td->td_proc, RACCT_MEMLOCK, map->size);
1143 PROC_UNLOCK(td->td_proc);
1144 if (error != 0)
1145 return (ENOMEM);
1146 }
1147 #endif
1148
1149 if (uap->how & MCL_FUTURE) {
1150 vm_map_lock(map);
1151 vm_map_modflags(map, MAP_WIREFUTURE, 0);
1152 vm_map_unlock(map);
1153 error = 0;
1154 }
1155
1156 if (uap->how & MCL_CURRENT) {
1157 /*
1158 * P1003.1-2001 mandates that all currently mapped pages
1159 * will be memory resident and locked (wired) upon return
1160 * from mlockall(). vm_map_wire() will wire pages, by
1161 * calling vm_fault_wire() for each page in the region.
1162 */
1163 error = vm_map_wire(map, vm_map_min(map), vm_map_max(map),
1164 VM_MAP_WIRE_USER|VM_MAP_WIRE_HOLESOK);
1165 if (error == KERN_SUCCESS)
1166 error = 0;
1167 else if (error == KERN_RESOURCE_SHORTAGE)
1168 error = ENOMEM;
1169 else
1170 error = EAGAIN;
1171 }
1172 #ifdef RACCT
1173 if (racct_enable && error != KERN_SUCCESS) {
1174 PROC_LOCK(td->td_proc);
1175 racct_set(td->td_proc, RACCT_MEMLOCK,
1176 ptoa(pmap_wired_count(map->pmap)));
1177 PROC_UNLOCK(td->td_proc);
1178 }
1179 #endif
1180
1181 return (error);
1182 }
1183
1184 #ifndef _SYS_SYSPROTO_H_
1185 struct munlockall_args {
1186 register_t dummy;
1187 };
1188 #endif
1189
1190 int
sys_munlockall(struct thread * td,struct munlockall_args * uap)1191 sys_munlockall(struct thread *td, struct munlockall_args *uap)
1192 {
1193 vm_map_t map;
1194 int error;
1195
1196 map = &td->td_proc->p_vmspace->vm_map;
1197 error = priv_check(td, PRIV_VM_MUNLOCK);
1198 if (error)
1199 return (error);
1200
1201 /* Clear the MAP_WIREFUTURE flag from this vm_map. */
1202 vm_map_lock(map);
1203 vm_map_modflags(map, 0, MAP_WIREFUTURE);
1204 vm_map_unlock(map);
1205
1206 /* Forcibly unwire all pages. */
1207 error = vm_map_unwire(map, vm_map_min(map), vm_map_max(map),
1208 VM_MAP_WIRE_USER|VM_MAP_WIRE_HOLESOK);
1209 #ifdef RACCT
1210 if (racct_enable && error == KERN_SUCCESS) {
1211 PROC_LOCK(td->td_proc);
1212 racct_set(td->td_proc, RACCT_MEMLOCK, 0);
1213 PROC_UNLOCK(td->td_proc);
1214 }
1215 #endif
1216
1217 return (error);
1218 }
1219
1220 #ifndef _SYS_SYSPROTO_H_
1221 struct munlock_args {
1222 const void *addr;
1223 size_t len;
1224 };
1225 #endif
1226 int
sys_munlock(struct thread * td,struct munlock_args * uap)1227 sys_munlock(struct thread *td, struct munlock_args *uap)
1228 {
1229
1230 return (kern_munlock(td, (uintptr_t)uap->addr, uap->len));
1231 }
1232
1233 int
kern_munlock(struct thread * td,uintptr_t addr0,size_t size)1234 kern_munlock(struct thread *td, uintptr_t addr0, size_t size)
1235 {
1236 vm_offset_t addr, end, last, start;
1237 #ifdef RACCT
1238 vm_map_t map;
1239 #endif
1240 int error;
1241
1242 error = priv_check(td, PRIV_VM_MUNLOCK);
1243 if (error)
1244 return (error);
1245 addr = addr0;
1246 last = addr + size;
1247 start = trunc_page(addr);
1248 end = round_page(last);
1249 if (last < addr || end < addr)
1250 return (EINVAL);
1251 error = vm_map_unwire(&td->td_proc->p_vmspace->vm_map, start, end,
1252 VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES);
1253 #ifdef RACCT
1254 if (racct_enable && error == KERN_SUCCESS) {
1255 PROC_LOCK(td->td_proc);
1256 map = &td->td_proc->p_vmspace->vm_map;
1257 racct_set(td->td_proc, RACCT_MEMLOCK,
1258 ptoa(pmap_wired_count(map->pmap)));
1259 PROC_UNLOCK(td->td_proc);
1260 }
1261 #endif
1262 return (error == KERN_SUCCESS ? 0 : ENOMEM);
1263 }
1264
1265 /*
1266 * vm_mmap_vnode()
1267 *
1268 * Helper function for vm_mmap. Perform sanity check specific for mmap
1269 * operations on vnodes.
1270 */
1271 int
vm_mmap_vnode(struct thread * td,vm_size_t objsize,vm_prot_t prot,vm_prot_t * maxprotp,int * flagsp,struct vnode * vp,vm_ooffset_t * foffp,vm_object_t * objp,boolean_t * writecounted)1272 vm_mmap_vnode(struct thread *td, vm_size_t objsize,
1273 vm_prot_t prot, vm_prot_t *maxprotp, int *flagsp,
1274 struct vnode *vp, vm_ooffset_t *foffp, vm_object_t *objp,
1275 boolean_t *writecounted)
1276 {
1277 struct vattr va;
1278 vm_object_t obj;
1279 vm_ooffset_t foff;
1280 struct ucred *cred;
1281 int error, flags;
1282 bool writex;
1283
1284 cred = td->td_ucred;
1285 writex = (*maxprotp & VM_PROT_WRITE) != 0 &&
1286 (*flagsp & MAP_SHARED) != 0;
1287 if ((error = vget(vp, LK_SHARED)) != 0)
1288 return (error);
1289 AUDIT_ARG_VNODE1(vp);
1290 foff = *foffp;
1291 flags = *flagsp;
1292 obj = vp->v_object;
1293 if (vp->v_type == VREG) {
1294 /*
1295 * Get the proper underlying object
1296 */
1297 if (obj == NULL) {
1298 error = EINVAL;
1299 goto done;
1300 }
1301 if (obj->type == OBJT_VNODE && obj->handle != vp) {
1302 vput(vp);
1303 vp = (struct vnode *)obj->handle;
1304 /*
1305 * Bypass filesystems obey the mpsafety of the
1306 * underlying fs. Tmpfs never bypasses.
1307 */
1308 error = vget(vp, LK_SHARED);
1309 if (error != 0)
1310 return (error);
1311 }
1312 if (writex) {
1313 *writecounted = TRUE;
1314 vm_pager_update_writecount(obj, 0, objsize);
1315 }
1316 } else {
1317 error = EXTERROR(EINVAL, "non-reg file");
1318 goto done;
1319 }
1320 if ((error = VOP_GETATTR(vp, &va, cred)))
1321 goto done;
1322 #ifdef MAC
1323 /* This relies on VM_PROT_* matching PROT_*. */
1324 error = mac_vnode_check_mmap(cred, vp, (int)prot, flags);
1325 if (error != 0)
1326 goto done;
1327 #endif
1328 if ((flags & MAP_SHARED) != 0) {
1329 if ((va.va_flags & (SF_SNAPSHOT|IMMUTABLE|APPEND)) != 0) {
1330 if (prot & VM_PROT_WRITE) {
1331 error = EPERM;
1332 goto done;
1333 }
1334 *maxprotp &= ~VM_PROT_WRITE;
1335 }
1336 }
1337 /*
1338 * If it is a regular file without any references
1339 * we do not need to sync it.
1340 * Adjust object size to be the size of actual file.
1341 */
1342 objsize = round_page(va.va_size);
1343 if (va.va_nlink == 0)
1344 flags |= MAP_NOSYNC;
1345 if (obj->type == OBJT_VNODE) {
1346 obj = vm_pager_allocate(OBJT_VNODE, vp, objsize, prot, foff,
1347 cred);
1348 if (obj == NULL) {
1349 error = ENOMEM;
1350 goto done;
1351 }
1352 } else {
1353 KASSERT((obj->flags & OBJ_SWAP) != 0, ("wrong object type"));
1354 vm_object_reference(obj);
1355 #if VM_NRESERVLEVEL > 0
1356 if ((obj->flags & OBJ_COLORED) == 0) {
1357 VM_OBJECT_WLOCK(obj);
1358 vm_object_color(obj, 0);
1359 VM_OBJECT_WUNLOCK(obj);
1360 }
1361 #endif
1362 }
1363 *objp = obj;
1364 *flagsp = flags;
1365
1366 VOP_MMAPPED(vp);
1367
1368 done:
1369 if (error != 0 && *writecounted) {
1370 *writecounted = FALSE;
1371 vm_pager_update_writecount(obj, objsize, 0);
1372 }
1373 vput(vp);
1374 return (error);
1375 }
1376
1377 /*
1378 * vm_mmap_cdev()
1379 *
1380 * Helper function for vm_mmap. Perform sanity check specific for mmap
1381 * operations on cdevs.
1382 */
1383 int
vm_mmap_cdev(struct thread * td,vm_size_t objsize,vm_prot_t prot,vm_prot_t * maxprotp,int * flagsp,struct cdev * cdev,struct cdevsw * dsw,vm_ooffset_t * foff,vm_object_t * objp)1384 vm_mmap_cdev(struct thread *td, vm_size_t objsize, vm_prot_t prot,
1385 vm_prot_t *maxprotp, int *flagsp, struct cdev *cdev, struct cdevsw *dsw,
1386 vm_ooffset_t *foff, vm_object_t *objp)
1387 {
1388 vm_object_t obj;
1389 int error, flags;
1390
1391 flags = *flagsp;
1392
1393 if (dsw->d_flags & D_MMAP_ANON) {
1394 *objp = NULL;
1395 *foff = 0;
1396 *maxprotp = VM_PROT_ALL;
1397 *flagsp |= MAP_ANON;
1398 return (0);
1399 }
1400
1401 /*
1402 * cdevs do not provide private mappings of any kind.
1403 */
1404 if ((*maxprotp & VM_PROT_WRITE) == 0 &&
1405 (prot & VM_PROT_WRITE) != 0)
1406 return (EACCES);
1407 if ((flags & (MAP_PRIVATE | MAP_COPY)) != 0) {
1408 return (EXTERROR(EINVAL, "cdev mapping must be shared"));
1409 }
1410
1411 /*
1412 * Force device mappings to be shared.
1413 */
1414 flags |= MAP_SHARED;
1415 #ifdef MAC_XXX
1416 error = mac_cdev_check_mmap(td->td_ucred, cdev, (int)prot);
1417 if (error != 0)
1418 return (error);
1419 #endif
1420 /*
1421 * First, try d_mmap_single(). If that is not implemented
1422 * (returns ENODEV), fall back to using the device pager.
1423 * Note that d_mmap_single() must return a reference to the
1424 * object (it needs to bump the reference count of the object
1425 * it returns somehow).
1426 *
1427 * XXX assumes VM_PROT_* == PROT_*
1428 */
1429 error = dsw->d_mmap_single(cdev, foff, objsize, objp, (int)prot);
1430 if (error != ENODEV)
1431 return (error);
1432 obj = vm_pager_allocate(OBJT_DEVICE, cdev, objsize, prot, *foff,
1433 td->td_ucred);
1434 if (obj == NULL) {
1435 return (EXTERROR(EINVAL,
1436 "cdev driver does not support mmap"));
1437 }
1438 *objp = obj;
1439 *flagsp = flags;
1440 return (0);
1441 }
1442
1443 int
vm_mmap(vm_map_t map,vm_offset_t * addr,vm_size_t size,vm_prot_t prot,vm_prot_t maxprot,int flags,objtype_t handle_type,void * handle,vm_ooffset_t foff)1444 vm_mmap(vm_map_t map, vm_offset_t *addr, vm_size_t size, vm_prot_t prot,
1445 vm_prot_t maxprot, int flags,
1446 objtype_t handle_type, void *handle,
1447 vm_ooffset_t foff)
1448 {
1449 vm_object_t object;
1450 struct thread *td = curthread;
1451 int error;
1452 boolean_t writecounted;
1453
1454 if (size == 0) {
1455 return (EXTERROR(EINVAL, "zero-sized req"));
1456 }
1457
1458 size = round_page(size);
1459 object = NULL;
1460 writecounted = FALSE;
1461
1462 switch (handle_type) {
1463 case OBJT_DEVICE: {
1464 struct cdevsw *dsw;
1465 struct cdev *cdev;
1466 int ref;
1467
1468 cdev = handle;
1469 dsw = dev_refthread(cdev, &ref);
1470 if (dsw == NULL)
1471 return (ENXIO);
1472 error = vm_mmap_cdev(td, size, prot, &maxprot, &flags, cdev,
1473 dsw, &foff, &object);
1474 dev_relthread(cdev, ref);
1475 break;
1476 }
1477 case OBJT_VNODE:
1478 error = vm_mmap_vnode(td, size, prot, &maxprot, &flags,
1479 handle, &foff, &object, &writecounted);
1480 break;
1481 default:
1482 error = EXTERROR(EINVAL, "unsupported backing obj type",
1483 handle_type);
1484 break;
1485 }
1486 if (error)
1487 return (error);
1488
1489 error = vm_mmap_object(map, addr, size, prot, maxprot, flags, object,
1490 foff, writecounted, td);
1491 if (error != 0 && object != NULL) {
1492 /*
1493 * If this mapping was accounted for in the vnode's
1494 * writecount, then undo that now.
1495 */
1496 if (writecounted)
1497 vm_pager_release_writecount(object, 0, size);
1498 vm_object_deallocate(object);
1499 }
1500 return (error);
1501 }
1502
1503 int
kern_mmap_racct_check(struct thread * td,vm_map_t map,vm_size_t size)1504 kern_mmap_racct_check(struct thread *td, vm_map_t map, vm_size_t size)
1505 {
1506 int error;
1507
1508 RACCT_PROC_LOCK(td->td_proc);
1509 if (map->size + size > lim_cur(td, RLIMIT_VMEM)) {
1510 RACCT_PROC_UNLOCK(td->td_proc);
1511 return (ENOMEM);
1512 }
1513 if (racct_set(td->td_proc, RACCT_VMEM, map->size + size)) {
1514 RACCT_PROC_UNLOCK(td->td_proc);
1515 return (ENOMEM);
1516 }
1517 if (!old_mlock && map->flags & MAP_WIREFUTURE) {
1518 if (ptoa(pmap_wired_count(map->pmap)) + size >
1519 lim_cur(td, RLIMIT_MEMLOCK)) {
1520 racct_set_force(td->td_proc, RACCT_VMEM, map->size);
1521 RACCT_PROC_UNLOCK(td->td_proc);
1522 return (ENOMEM);
1523 }
1524 error = racct_set(td->td_proc, RACCT_MEMLOCK,
1525 ptoa(pmap_wired_count(map->pmap)) + size);
1526 if (error != 0) {
1527 racct_set_force(td->td_proc, RACCT_VMEM, map->size);
1528 RACCT_PROC_UNLOCK(td->td_proc);
1529 return (error);
1530 }
1531 }
1532 RACCT_PROC_UNLOCK(td->td_proc);
1533 return (0);
1534 }
1535
1536 /*
1537 * Internal version of mmap that maps a specific VM object into an
1538 * map. Called by mmap for MAP_ANON, vm_mmap, shm_mmap, and vn_mmap.
1539 */
1540 int
vm_mmap_object(vm_map_t map,vm_offset_t * addr,vm_size_t size,vm_prot_t prot,vm_prot_t maxprot,int flags,vm_object_t object,vm_ooffset_t foff,boolean_t writecounted,struct thread * td)1541 vm_mmap_object(vm_map_t map, vm_offset_t *addr, vm_size_t size, vm_prot_t prot,
1542 vm_prot_t maxprot, int flags, vm_object_t object, vm_ooffset_t foff,
1543 boolean_t writecounted, struct thread *td)
1544 {
1545 vm_offset_t default_addr, max_addr;
1546 int docow, error, findspace, rv;
1547 bool curmap, fitit;
1548
1549 curmap = map == &td->td_proc->p_vmspace->vm_map;
1550 if (curmap) {
1551 error = kern_mmap_racct_check(td, map, size);
1552 if (error != 0)
1553 return (error);
1554 }
1555
1556 /*
1557 * We currently can only deal with page aligned file offsets.
1558 * The mmap() system call already enforces this by subtracting
1559 * the page offset from the file offset, but checking here
1560 * catches errors in device drivers (e.g. d_single_mmap()
1561 * callbacks) and other internal mapping requests (such as in
1562 * exec).
1563 */
1564 if ((foff & PAGE_MASK) != 0) {
1565 return (EXTERROR(EINVAL, "offset not page-aligned", foff));
1566 }
1567
1568 if ((flags & MAP_FIXED) == 0) {
1569 fitit = true;
1570 *addr = round_page(*addr);
1571 } else {
1572 if (*addr != trunc_page(*addr)) {
1573 return (EXTERROR(EINVAL,
1574 "non-fixed mapping address not aligned", *addr));
1575 }
1576 fitit = false;
1577 }
1578
1579 if (flags & MAP_ANON) {
1580 if (object != NULL) {
1581 return (EXTERROR(EINVAL,
1582 "anon mapping backed by an object"));
1583 }
1584 if (foff != 0) {
1585 return (EXTERROR(EINVAL,
1586 "anon mapping with non-zero offset"));
1587 }
1588 docow = 0;
1589 } else if (flags & MAP_PREFAULT_READ)
1590 docow = MAP_PREFAULT;
1591 else
1592 docow = MAP_PREFAULT_PARTIAL;
1593
1594 if ((flags & (MAP_ANON|MAP_SHARED)) == 0)
1595 docow |= MAP_COPY_ON_WRITE;
1596 if (flags & MAP_NOSYNC)
1597 docow |= MAP_DISABLE_SYNCER;
1598 if (flags & MAP_NOCORE)
1599 docow |= MAP_DISABLE_COREDUMP;
1600 /* Shared memory is also shared with children. */
1601 if (flags & MAP_SHARED)
1602 docow |= MAP_INHERIT_SHARE;
1603 if (writecounted)
1604 docow |= MAP_WRITECOUNT;
1605 if (flags & MAP_STACK) {
1606 if (object != NULL) {
1607 return (EXTERROR(EINVAL,
1608 "stack mapping backed by an object"));
1609 }
1610 docow |= MAP_STACK_AREA;
1611 }
1612 if ((flags & MAP_EXCL) != 0)
1613 docow |= MAP_CHECK_EXCL;
1614 if ((flags & MAP_GUARD) != 0)
1615 docow |= MAP_CREATE_GUARD;
1616
1617 if (fitit) {
1618 if ((flags & MAP_ALIGNMENT_MASK) == MAP_ALIGNED_SUPER)
1619 findspace = VMFS_SUPER_SPACE;
1620 else if ((flags & MAP_ALIGNMENT_MASK) != 0)
1621 findspace = VMFS_ALIGNED_SPACE(flags >>
1622 MAP_ALIGNMENT_SHIFT);
1623 else
1624 findspace = VMFS_OPTIMAL_SPACE;
1625 max_addr = 0;
1626 if ((flags & MAP_32BIT) != 0)
1627 max_addr = MAP_32BIT_MAX_ADDR;
1628 if (curmap) {
1629 default_addr =
1630 round_page((vm_offset_t)td->td_proc->p_vmspace->
1631 vm_daddr + lim_max(td, RLIMIT_DATA));
1632 if ((flags & MAP_32BIT) != 0)
1633 default_addr = 0;
1634 rv = vm_map_find_min(map, object, foff, addr, size,
1635 default_addr, max_addr, findspace, prot, maxprot,
1636 docow);
1637 } else {
1638 rv = vm_map_find(map, object, foff, addr, size,
1639 max_addr, findspace, prot, maxprot, docow);
1640 }
1641 } else {
1642 rv = vm_map_fixed(map, object, foff, *addr, size,
1643 prot, maxprot, docow);
1644 }
1645
1646 if (rv == KERN_SUCCESS) {
1647 /*
1648 * If the process has requested that all future mappings
1649 * be wired, then heed this.
1650 */
1651 if ((map->flags & MAP_WIREFUTURE) != 0) {
1652 vm_map_lock(map);
1653 if ((map->flags & MAP_WIREFUTURE) != 0)
1654 (void)vm_map_wire_locked(map, *addr,
1655 *addr + size, VM_MAP_WIRE_USER |
1656 ((flags & MAP_STACK) ? VM_MAP_WIRE_HOLESOK :
1657 VM_MAP_WIRE_NOHOLES));
1658 vm_map_unlock(map);
1659 }
1660 }
1661 return (vm_mmap_to_errno(rv));
1662 }
1663
1664 /*
1665 * Translate a Mach VM return code to zero on success or the appropriate errno
1666 * on failure.
1667 */
1668 int
vm_mmap_to_errno(int rv)1669 vm_mmap_to_errno(int rv)
1670 {
1671 int error;
1672
1673 switch (rv) {
1674 case KERN_SUCCESS:
1675 return (0);
1676 case KERN_INVALID_ADDRESS:
1677 case KERN_NO_SPACE:
1678 error = ENOMEM;
1679 break;
1680 case KERN_PROTECTION_FAILURE:
1681 error = EACCES;
1682 break;
1683 default:
1684 error = EINVAL;
1685 break;
1686 }
1687 if ((curthread->td_pflags2 & (TDP2_UEXTERR | TDP2_EXTERR)) ==
1688 TDP2_UEXTERR)
1689 EXTERROR(error, "mach error", rv);
1690 return (error);
1691 }
1692