Lines Matching +full:vm +full:- +full:map
1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
40 * Mapped file (mmap) interface to VM
81 #include <vm/vm.h>
82 #include <vm/vm_param.h>
83 #include <vm/pmap.h>
84 #include <vm/vm_map.h>
85 #include <vm/vm_object.h>
86 #include <vm/vm_page.h>
87 #include <vm/vm_pager.h>
88 #include <vm/vm_pageout.h>
89 #include <vm/vm_extern.h>
90 #include <vm/vm_page.h>
91 #include <vm/vnode_pager.h>
114 td->td_retval[0] = PAGE_SIZE; in ogetpagesize()
120 * Memory Map (mmap) system call. Note that the file offset
124 * page-aligned, the actual mapping starts at trunc_page(addr)
128 * memory-based, such as a video framebuffer, can be mmap'd. Otherwise
129 * there would be no cache coherency between a descriptor and a VM mapping
149 .mr_hint = (uintptr_t)uap->addr, in sys_mmap()
150 .mr_len = uap->len, in sys_mmap()
151 .mr_prot = uap->prot, in sys_mmap()
152 .mr_flags = uap->flags, in sys_mmap()
153 .mr_fd = uap->fd, in sys_mmap()
154 .mr_pos = uap->pos, in sys_mmap()
162 if ((p->p_flag2 & P2_PROTMAX_DISABLE) != 0 || in kern_mmap_maxprot()
163 (p->p_fctl0 & NT_FREEBSD_FCTL_PROTMAX_DISABLE) != 0) in kern_mmap_maxprot()
165 if (((p->p_flag2 & P2_PROTMAX_ENABLE) != 0 || imply_prot_max) && in kern_mmap_maxprot()
185 orig_addr = addr = mrp->mr_hint; in kern_mmap()
186 len = mrp->mr_len; in kern_mmap()
187 prot = mrp->mr_prot; in kern_mmap()
188 flags = mrp->mr_flags; in kern_mmap()
189 fd = mrp->mr_fd; in kern_mmap()
190 pos = mrp->mr_pos; in kern_mmap()
191 check_fp_fn = mrp->mr_check_fp_fn; in kern_mmap()
200 p = td->td_proc; in kern_mmap()
209 vms = p->p_vmspace; in kern_mmap()
221 * Anonymous mapping shall specify -1 as filedescriptor and in kern_mmap()
224 * ld.so sometimes issues anonymous map requests with non-zero in kern_mmap()
228 if ((len == 0 && p->p_osrel >= P_OSREL_MAP_ANON) || in kern_mmap()
229 ((flags & MAP_ANON) != 0 && (fd != -1 || pos != 0))) in kern_mmap()
237 if ((fd != -1) || in kern_mmap()
254 if ((flags & MAP_GUARD) != 0 && (prot != PROT_NONE || fd != -1 || in kern_mmap()
264 pos -= pageoff; in kern_mmap()
290 addr -= pageoff; in kern_mmap()
294 /* Address range must be all in user VM space. */ in kern_mmap()
295 if (!vm_map_range_valid(&vms->vm_map, addr, addr + size)) in kern_mmap()
309 * XXX for non-fixed mappings where no hint is provided or in kern_mmap()
319 (addr >= round_page((vm_offset_t)vms->vm_taddr) && in kern_mmap()
320 addr < round_page((vm_offset_t)vms->vm_daddr + in kern_mmap()
322 addr = round_page((vm_offset_t)vms->vm_daddr + in kern_mmap()
328 * binaries that request a page-aligned mapping of in kern_mmap()
334 error = vm_mmap_object(&vms->vm_map, &addr, size, VM_PROT_NONE, in kern_mmap()
342 error = vm_mmap_object(&vms->vm_map, &addr, size, prot, in kern_mmap()
364 p->p_osrel >= P_OSREL_MAP_FSTRICT) { in kern_mmap()
374 if (fp->f_ops == &shm_ops && shm_largepage(fp->f_data)) in kern_mmap()
377 error = fo_mmap(fp, &vms->vm_map, &addr, size, prot, in kern_mmap()
382 td->td_retval[0] = addr + pageoff; in kern_mmap()
395 .mr_hint = (uintptr_t)uap->addr, in freebsd6_mmap()
396 .mr_len = uap->len, in freebsd6_mmap()
397 .mr_prot = uap->prot, in freebsd6_mmap()
398 .mr_flags = uap->flags, in freebsd6_mmap()
399 .mr_fd = uap->fd, in freebsd6_mmap()
400 .mr_pos = uap->pos, in freebsd6_mmap()
419 return (kern_ommap(td, (uintptr_t)uap->addr, uap->len, uap->prot, in ommap()
420 uap->flags, uap->fd, uap->pos)); in ommap()
449 if (i386_read_exec && SV_PROC_FLAG(td->td_proc, SV_ILP32) && in kern_ommap()
486 return (kern_msync(td, (uintptr_t)uap->addr, uap->len, uap->flags)); in sys_msync()
494 vm_map_t map; in kern_msync() local
499 addr -= pageoff; in kern_msync()
508 map = &td->td_proc->p_vmspace->vm_map; in kern_msync()
513 rv = vm_map_sync(map, addr, addr + size, (flags & MS_ASYNC) == 0, in kern_msync()
539 return (kern_munmap(td, (uintptr_t)uap->addr, uap->len)); in sys_munmap()
552 vm_map_t map; in kern_munmap() local
560 addr -= pageoff; in kern_munmap()
564 map = &td->td_proc->p_vmspace->vm_map; in kern_munmap()
565 if (!vm_map_range_valid(map, addr, end)) in kern_munmap()
568 vm_map_lock(map); in kern_munmap()
578 if (vm_map_lookup_entry(map, addr, &entry)) { in kern_munmap()
579 for (; entry->start < end; in kern_munmap()
581 if (vm_map_check_protection(map, entry->start, in kern_munmap()
582 entry->end, VM_PROT_EXECUTE) == TRUE) { in kern_munmap()
591 rv = vm_map_delete(map, addr, end); in kern_munmap()
595 /* downgrade the lock to prevent a LOR with the pmc-sx lock */ in kern_munmap()
596 vm_map_lock_downgrade(map); in kern_munmap()
599 vm_map_unlock_read(map); in kern_munmap()
602 vm_map_unlock(map); in kern_munmap()
618 return (kern_mprotect(td, (uintptr_t)uap->addr, uap->len, in sys_mprotect()
619 uap->prot, 0)); in sys_mprotect()
636 addr -= pageoff; in kern_mprotect()
640 if (SV_PROC_FLAG(td->td_proc, SV_ILP32)) { in kern_mprotect()
651 vm_error = vm_map_protect(&td->td_proc->p_vmspace->vm_map, in kern_mprotect()
678 return (kern_minherit(td, (uintptr_t)uap->addr, uap->len, in sys_minherit()
679 uap->inherit)); in sys_minherit()
694 addr -= pageoff; in kern_minherit()
700 switch (vm_map_inherit(&td->td_proc->p_vmspace->vm_map, addr, in kern_minherit()
722 return (kern_madvise(td, (uintptr_t)uap->addr, uap->len, uap->behav)); in sys_madvise()
728 vm_map_t map; in kern_madvise() local
738 return (kern_procctl(td, P_PID, td->td_proc->p_pid, in kern_madvise()
746 map = &td->td_proc->p_vmspace->vm_map; in kern_madvise()
748 if (!vm_map_range_valid(map, addr, addr + len)) in kern_madvise()
761 return (vm_map_madvise(map, start, end, behav)); in kern_madvise()
776 return (kern_mincore(td, (uintptr_t)uap->addr, uap->len, uap->vec)); in sys_mincore()
783 vm_map_t map; in kern_mincore() local
799 map = &td->td_proc->p_vmspace->vm_map; in kern_mincore()
800 if (end > vm_map_max(map) || end < addr) in kern_mincore()
803 pmap = vmspace_pmap(td->td_proc->p_vmspace); in kern_mincore()
805 vm_map_lock_read(map); in kern_mincore()
807 timestamp = map->timestamp; in kern_mincore()
809 if (!vm_map_lookup_entry(map, addr, &entry)) { in kern_mincore()
810 vm_map_unlock_read(map); in kern_mincore()
815 * Do this on a map entry basis so that if the pages are not in kern_mincore()
819 lastvecindex = -1; in kern_mincore()
820 while (entry->start < end) { in kern_mincore()
826 if (current->end < end && in kern_mincore()
827 entry->start > current->end) { in kern_mincore()
828 vm_map_unlock_read(map); in kern_mincore()
835 if ((current->eflags & MAP_ENTRY_IS_SUB_MAP) || in kern_mincore()
836 current->object.vm_object == NULL) in kern_mincore()
840 * limit this scan to the current map entry and the in kern_mincore()
843 if (addr < current->start) in kern_mincore()
844 addr = current->start; in kern_mincore()
845 cend = current->end; in kern_mincore()
873 * object lock is acquired, so re-validate if in kern_mincore()
877 while (object == NULL || m->object != object) { in kern_mincore()
880 object = atomic_load_ptr(&m->object); in kern_mincore()
897 if (current->object.vm_object != object) { in kern_mincore()
900 object = current->object.vm_object; in kern_mincore()
903 if ((object->flags & OBJ_SWAP) != 0 || in kern_mincore()
904 object->type == OBJT_VNODE) { in kern_mincore()
905 pindex = OFF_TO_IDX(current->offset + in kern_mincore()
906 (addr - current->start)); in kern_mincore()
915 VM_OBJECT_ASSERT_WLOCKED(m->object); in kern_mincore()
918 if (m->dirty == 0 && pmap_is_modified(m)) in kern_mincore()
920 if (m->dirty != 0) in kern_mincore()
931 if ((m->a.flags & PGA_REFERENCED) != 0 || in kern_mincore()
933 (m->a.flags & PGA_REFERENCED) != 0) in kern_mincore()
941 * the map, we release the lock. in kern_mincore()
943 vm_map_unlock_read(map); in kern_mincore()
948 vecindex = atop(addr - first_addr); in kern_mincore()
951 * If we have skipped map entries, we need to make sure that in kern_mincore()
973 * If the map has changed, due to the subyte, the previous in kern_mincore()
976 vm_map_lock_read(map); in kern_mincore()
977 if (timestamp != map->timestamp) in kern_mincore()
986 * the map, we release the lock. in kern_mincore()
988 vm_map_unlock_read(map); in kern_mincore()
993 vecindex = atop(end - first_addr); in kern_mincore()
1004 * If the map has changed, due to the subyte, the previous in kern_mincore()
1007 vm_map_lock_read(map); in kern_mincore()
1008 if (timestamp != map->timestamp) in kern_mincore()
1010 vm_map_unlock_read(map); in kern_mincore()
1025 return (kern_mlock(td->td_proc, td->td_ucred, in sys_mlock()
1026 __DECONST(uintptr_t, uap->addr), uap->len)); in sys_mlock()
1034 vm_map_t map; in kern_mlock() local
1048 npages = atop(end - start); in kern_mlock()
1051 map = &proc->p_vmspace->vm_map; in kern_mlock()
1053 nsize = ptoa(npages + pmap_wired_count(map->pmap)); in kern_mlock()
1068 error = vm_map_wire(map, start, end, in kern_mlock()
1074 ptoa(pmap_wired_count(map->pmap))); in kern_mlock()
1097 vm_map_t map; in sys_mlockall() local
1100 map = &td->td_proc->p_vmspace->vm_map; in sys_mlockall()
1105 if ((uap->how == 0) || ((uap->how & ~(MCL_CURRENT|MCL_FUTURE)) != 0)) in sys_mlockall()
1112 if (!old_mlock && uap->how & MCL_CURRENT) { in sys_mlockall()
1113 if (map->size > lim_cur(td, RLIMIT_MEMLOCK)) in sys_mlockall()
1118 PROC_LOCK(td->td_proc); in sys_mlockall()
1119 error = racct_set(td->td_proc, RACCT_MEMLOCK, map->size); in sys_mlockall()
1120 PROC_UNLOCK(td->td_proc); in sys_mlockall()
1126 if (uap->how & MCL_FUTURE) { in sys_mlockall()
1127 vm_map_lock(map); in sys_mlockall()
1128 vm_map_modflags(map, MAP_WIREFUTURE, 0); in sys_mlockall()
1129 vm_map_unlock(map); in sys_mlockall()
1133 if (uap->how & MCL_CURRENT) { in sys_mlockall()
1135 * P1003.1-2001 mandates that all currently mapped pages in sys_mlockall()
1140 error = vm_map_wire(map, vm_map_min(map), vm_map_max(map), in sys_mlockall()
1151 PROC_LOCK(td->td_proc); in sys_mlockall()
1152 racct_set(td->td_proc, RACCT_MEMLOCK, in sys_mlockall()
1153 ptoa(pmap_wired_count(map->pmap))); in sys_mlockall()
1154 PROC_UNLOCK(td->td_proc); in sys_mlockall()
1170 vm_map_t map; in sys_munlockall() local
1173 map = &td->td_proc->p_vmspace->vm_map; in sys_munlockall()
1179 vm_map_lock(map); in sys_munlockall()
1180 vm_map_modflags(map, 0, MAP_WIREFUTURE); in sys_munlockall()
1181 vm_map_unlock(map); in sys_munlockall()
1184 error = vm_map_unwire(map, vm_map_min(map), vm_map_max(map), in sys_munlockall()
1188 PROC_LOCK(td->td_proc); in sys_munlockall()
1189 racct_set(td->td_proc, RACCT_MEMLOCK, 0); in sys_munlockall()
1190 PROC_UNLOCK(td->td_proc); in sys_munlockall()
1207 return (kern_munlock(td, (uintptr_t)uap->addr, uap->len)); in sys_munlock()
1215 vm_map_t map; in kern_munlock() local
1228 error = vm_map_unwire(&td->td_proc->p_vmspace->vm_map, start, end, in kern_munlock()
1232 PROC_LOCK(td->td_proc); in kern_munlock()
1233 map = &td->td_proc->p_vmspace->vm_map; in kern_munlock()
1234 racct_set(td->td_proc, RACCT_MEMLOCK, in kern_munlock()
1235 ptoa(pmap_wired_count(map->pmap))); in kern_munlock()
1236 PROC_UNLOCK(td->td_proc); in kern_munlock()
1261 cred = td->td_ucred; in vm_mmap_vnode()
1269 obj = vp->v_object; in vm_mmap_vnode()
1270 if (vp->v_type == VREG) { in vm_mmap_vnode()
1278 if (obj->type == OBJT_VNODE && obj->handle != vp) { in vm_mmap_vnode()
1280 vp = (struct vnode *)obj->handle; in vm_mmap_vnode()
1322 if (obj->type == OBJT_VNODE) { in vm_mmap_vnode()
1330 KASSERT((obj->flags & OBJ_SWAP) != 0, ("wrong object type")); in vm_mmap_vnode()
1333 if ((obj->flags & OBJ_COLORED) == 0) { in vm_mmap_vnode()
1370 if (dsw->d_flags & D_MMAP_ANON) { in vm_mmap_cdev()
1390 error = mac_cdev_check_mmap(td->td_ucred, cdev, (int)prot); in vm_mmap_cdev()
1403 error = dsw->d_mmap_single(cdev, foff, objsize, objp, (int)prot); in vm_mmap_cdev()
1407 td->td_ucred); in vm_mmap_cdev()
1419 vm_mmap(vm_map_t map, vm_offset_t *addr, vm_size_t size, vm_prot_t prot, in vm_mmap() argument
1462 error = vm_mmap_object(map, addr, size, prot, maxprot, flags, object, in vm_mmap()
1477 kern_mmap_racct_check(struct thread *td, vm_map_t map, vm_size_t size) in kern_mmap_racct_check() argument
1481 RACCT_PROC_LOCK(td->td_proc); in kern_mmap_racct_check()
1482 if (map->size + size > lim_cur(td, RLIMIT_VMEM)) { in kern_mmap_racct_check()
1483 RACCT_PROC_UNLOCK(td->td_proc); in kern_mmap_racct_check()
1486 if (racct_set(td->td_proc, RACCT_VMEM, map->size + size)) { in kern_mmap_racct_check()
1487 RACCT_PROC_UNLOCK(td->td_proc); in kern_mmap_racct_check()
1490 if (!old_mlock && map->flags & MAP_WIREFUTURE) { in kern_mmap_racct_check()
1491 if (ptoa(pmap_wired_count(map->pmap)) + size > in kern_mmap_racct_check()
1493 racct_set_force(td->td_proc, RACCT_VMEM, map->size); in kern_mmap_racct_check()
1494 RACCT_PROC_UNLOCK(td->td_proc); in kern_mmap_racct_check()
1497 error = racct_set(td->td_proc, RACCT_MEMLOCK, in kern_mmap_racct_check()
1498 ptoa(pmap_wired_count(map->pmap)) + size); in kern_mmap_racct_check()
1500 racct_set_force(td->td_proc, RACCT_VMEM, map->size); in kern_mmap_racct_check()
1501 RACCT_PROC_UNLOCK(td->td_proc); in kern_mmap_racct_check()
1505 RACCT_PROC_UNLOCK(td->td_proc); in kern_mmap_racct_check()
1510 * Internal version of mmap that maps a specific VM object into an
1511 * map. Called by mmap for MAP_ANON, vm_mmap, shm_mmap, and vn_mmap.
1514 vm_mmap_object(vm_map_t map, vm_offset_t *addr, vm_size_t size, vm_prot_t prot, in vm_mmap_object() argument
1522 curmap = map == &td->td_proc->p_vmspace->vm_map; in vm_mmap_object()
1524 error = kern_mmap_racct_check(td, map, size); in vm_mmap_object()
1592 round_page((vm_offset_t)td->td_proc->p_vmspace-> in vm_mmap_object()
1596 rv = vm_map_find_min(map, object, foff, addr, size, in vm_mmap_object()
1600 rv = vm_map_find(map, object, foff, addr, size, in vm_mmap_object()
1604 rv = vm_map_fixed(map, object, foff, *addr, size, in vm_mmap_object()
1613 if ((map->flags & MAP_WIREFUTURE) != 0) { in vm_mmap_object()
1614 vm_map_lock(map); in vm_mmap_object()
1615 if ((map->flags & MAP_WIREFUTURE) != 0) in vm_mmap_object()
1616 (void)vm_map_wire_locked(map, *addr, in vm_mmap_object()
1620 vm_map_unlock(map); in vm_mmap_object()
1627 * Translate a Mach VM return code to zero on success or the appropriate errno