Lines Matching +full:enforce +full:- +full:video +full:- +full:mode
1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
114 td->td_retval[0] = PAGE_SIZE; in ogetpagesize()
124 * page-aligned, the actual mapping starts at trunc_page(addr)
128 * memory-based, such as a video framebuffer, can be mmap'd. Otherwise
149 .mr_hint = (uintptr_t)uap->addr, in sys_mmap()
150 .mr_len = uap->len, in sys_mmap()
151 .mr_prot = uap->prot, in sys_mmap()
152 .mr_flags = uap->flags, in sys_mmap()
153 .mr_fd = uap->fd, in sys_mmap()
154 .mr_pos = uap->pos, in sys_mmap()
162 if ((p->p_flag2 & P2_PROTMAX_DISABLE) != 0 || in kern_mmap_maxprot()
163 (p->p_fctl0 & NT_FREEBSD_FCTL_PROTMAX_DISABLE) != 0) in kern_mmap_maxprot()
165 if (((p->p_flag2 & P2_PROTMAX_ENABLE) != 0 || imply_prot_max) && in kern_mmap_maxprot()
185 orig_addr = addr = mrp->mr_hint; in kern_mmap()
186 len = mrp->mr_len; in kern_mmap()
187 prot = mrp->mr_prot; in kern_mmap()
188 flags = mrp->mr_flags; in kern_mmap()
189 fd = mrp->mr_fd; in kern_mmap()
190 pos = mrp->mr_pos; in kern_mmap()
191 check_fp_fn = mrp->mr_check_fp_fn; in kern_mmap()
200 p = td->td_proc; in kern_mmap()
209 vms = p->p_vmspace; in kern_mmap()
219 * Enforce the constraints. in kern_mmap()
221 * Anonymous mapping shall specify -1 as filedescriptor and in kern_mmap()
224 * ld.so sometimes issues anonymous map requests with non-zero in kern_mmap()
228 if ((len == 0 && p->p_osrel >= P_OSREL_MAP_ANON) || in kern_mmap()
229 ((flags & MAP_ANON) != 0 && (fd != -1 || pos != 0))) in kern_mmap()
237 if ((fd != -1) || in kern_mmap()
254 if ((flags & MAP_GUARD) != 0 && (prot != PROT_NONE || fd != -1 || in kern_mmap()
264 pos -= pageoff; in kern_mmap()
290 addr -= pageoff; in kern_mmap()
295 if (!vm_map_range_valid(&vms->vm_map, addr, addr + size)) in kern_mmap()
309 * XXX for non-fixed mappings where no hint is provided or in kern_mmap()
319 (addr >= round_page((vm_offset_t)vms->vm_taddr) && in kern_mmap()
320 addr < round_page((vm_offset_t)vms->vm_daddr + in kern_mmap()
322 addr = round_page((vm_offset_t)vms->vm_daddr + in kern_mmap()
328 * binaries that request a page-aligned mapping of in kern_mmap()
334 error = vm_mmap_object(&vms->vm_map, &addr, size, VM_PROT_NONE, in kern_mmap()
342 error = vm_mmap_object(&vms->vm_map, &addr, size, prot, in kern_mmap()
364 p->p_osrel >= P_OSREL_MAP_FSTRICT) { in kern_mmap()
374 if (fp->f_ops == &shm_ops && shm_largepage(fp->f_data)) in kern_mmap()
377 error = fo_mmap(fp, &vms->vm_map, &addr, size, prot, in kern_mmap()
382 td->td_retval[0] = addr + pageoff; in kern_mmap()
395 .mr_hint = (uintptr_t)uap->addr, in freebsd6_mmap()
396 .mr_len = uap->len, in freebsd6_mmap()
397 .mr_prot = uap->prot, in freebsd6_mmap()
398 .mr_flags = uap->flags, in freebsd6_mmap()
399 .mr_fd = uap->fd, in freebsd6_mmap()
400 .mr_pos = uap->pos, in freebsd6_mmap()
419 return (kern_ommap(td, (uintptr_t)uap->addr, uap->len, uap->prot, in ommap()
420 uap->flags, uap->fd, uap->pos)); in ommap()
449 if (i386_read_exec && SV_PROC_FLAG(td->td_proc, SV_ILP32) && in kern_ommap()
486 return (kern_msync(td, (uintptr_t)uap->addr, uap->len, uap->flags)); in sys_msync()
499 addr -= pageoff; in kern_msync()
508 map = &td->td_proc->p_vmspace->vm_map; in kern_msync()
539 return (kern_munmap(td, (uintptr_t)uap->addr, uap->len)); in sys_munmap()
560 addr -= pageoff; in kern_munmap()
564 map = &td->td_proc->p_vmspace->vm_map; in kern_munmap()
579 for (; entry->start < end; in kern_munmap()
581 if (vm_map_check_protection(map, entry->start, in kern_munmap()
582 entry->end, VM_PROT_EXECUTE) == TRUE) { in kern_munmap()
595 /* downgrade the lock to prevent a LOR with the pmc-sx lock */ in kern_munmap()
618 return (kern_mprotect(td, (uintptr_t)uap->addr, uap->len, in sys_mprotect()
619 uap->prot, 0)); in sys_mprotect()
636 addr -= pageoff; in kern_mprotect()
640 if (SV_PROC_FLAG(td->td_proc, SV_ILP32)) { in kern_mprotect()
651 vm_error = vm_map_protect(&td->td_proc->p_vmspace->vm_map, in kern_mprotect()
678 return (kern_minherit(td, (uintptr_t)uap->addr, uap->len, in sys_minherit()
679 uap->inherit)); in sys_minherit()
694 addr -= pageoff; in kern_minherit()
700 switch (vm_map_inherit(&td->td_proc->p_vmspace->vm_map, addr, in kern_minherit()
722 return (kern_madvise(td, (uintptr_t)uap->addr, uap->len, uap->behav)); in sys_madvise()
738 return (kern_procctl(td, P_PID, td->td_proc->p_pid, in kern_madvise()
746 map = &td->td_proc->p_vmspace->vm_map; in kern_madvise()
776 return (kern_mincore(td, (uintptr_t)uap->addr, uap->len, uap->vec)); in sys_mincore()
795 * mode. in kern_mincore()
799 map = &td->td_proc->p_vmspace->vm_map; in kern_mincore()
803 pmap = vmspace_pmap(td->td_proc->p_vmspace); in kern_mincore()
807 timestamp = map->timestamp; in kern_mincore()
819 lastvecindex = -1; in kern_mincore()
820 while (entry->start < end) { in kern_mincore()
826 if (current->end < end && in kern_mincore()
827 entry->start > current->end) { in kern_mincore()
835 if ((current->eflags & MAP_ENTRY_IS_SUB_MAP) || in kern_mincore()
836 current->object.vm_object == NULL) in kern_mincore()
843 if (addr < current->start) in kern_mincore()
844 addr = current->start; in kern_mincore()
845 cend = current->end; in kern_mincore()
873 * object lock is acquired, so re-validate if in kern_mincore()
877 while (object == NULL || m->object != object) { in kern_mincore()
880 object = atomic_load_ptr(&m->object); in kern_mincore()
897 if (current->object.vm_object != object) { in kern_mincore()
900 object = current->object.vm_object; in kern_mincore()
903 if ((object->flags & OBJ_SWAP) != 0 || in kern_mincore()
904 object->type == OBJT_VNODE) { in kern_mincore()
905 pindex = OFF_TO_IDX(current->offset + in kern_mincore()
906 (addr - current->start)); in kern_mincore()
915 VM_OBJECT_ASSERT_WLOCKED(m->object); in kern_mincore()
918 if (m->dirty == 0 && pmap_is_modified(m)) in kern_mincore()
920 if (m->dirty != 0) in kern_mincore()
931 if ((m->a.flags & PGA_REFERENCED) != 0 || in kern_mincore()
933 (m->a.flags & PGA_REFERENCED) != 0) in kern_mincore()
948 vecindex = atop(addr - first_addr); in kern_mincore()
977 if (timestamp != map->timestamp) in kern_mincore()
993 vecindex = atop(end - first_addr); in kern_mincore()
1008 if (timestamp != map->timestamp) in kern_mincore()
1025 return (kern_mlock(td->td_proc, td->td_ucred, in sys_mlock()
1026 __DECONST(uintptr_t, uap->addr), uap->len)); in sys_mlock()
1048 npages = atop(end - start); in kern_mlock()
1051 map = &proc->p_vmspace->vm_map; in kern_mlock()
1053 nsize = ptoa(npages + pmap_wired_count(map->pmap)); in kern_mlock()
1074 ptoa(pmap_wired_count(map->pmap))); in kern_mlock()
1100 map = &td->td_proc->p_vmspace->vm_map; in sys_mlockall()
1105 if ((uap->how == 0) || ((uap->how & ~(MCL_CURRENT|MCL_FUTURE)) != 0)) in sys_mlockall()
1112 if (!old_mlock && uap->how & MCL_CURRENT) { in sys_mlockall()
1113 if (map->size > lim_cur(td, RLIMIT_MEMLOCK)) in sys_mlockall()
1118 PROC_LOCK(td->td_proc); in sys_mlockall()
1119 error = racct_set(td->td_proc, RACCT_MEMLOCK, map->size); in sys_mlockall()
1120 PROC_UNLOCK(td->td_proc); in sys_mlockall()
1126 if (uap->how & MCL_FUTURE) { in sys_mlockall()
1133 if (uap->how & MCL_CURRENT) { in sys_mlockall()
1135 * P1003.1-2001 mandates that all currently mapped pages in sys_mlockall()
1151 PROC_LOCK(td->td_proc); in sys_mlockall()
1152 racct_set(td->td_proc, RACCT_MEMLOCK, in sys_mlockall()
1153 ptoa(pmap_wired_count(map->pmap))); in sys_mlockall()
1154 PROC_UNLOCK(td->td_proc); in sys_mlockall()
1173 map = &td->td_proc->p_vmspace->vm_map; in sys_munlockall()
1188 PROC_LOCK(td->td_proc); in sys_munlockall()
1189 racct_set(td->td_proc, RACCT_MEMLOCK, 0); in sys_munlockall()
1190 PROC_UNLOCK(td->td_proc); in sys_munlockall()
1207 return (kern_munlock(td, (uintptr_t)uap->addr, uap->len)); in sys_munlock()
1228 error = vm_map_unwire(&td->td_proc->p_vmspace->vm_map, start, end, in kern_munlock()
1232 PROC_LOCK(td->td_proc); in kern_munlock()
1233 map = &td->td_proc->p_vmspace->vm_map; in kern_munlock()
1234 racct_set(td->td_proc, RACCT_MEMLOCK, in kern_munlock()
1235 ptoa(pmap_wired_count(map->pmap))); in kern_munlock()
1236 PROC_UNLOCK(td->td_proc); in kern_munlock()
1261 cred = td->td_ucred; in vm_mmap_vnode()
1269 obj = vp->v_object; in vm_mmap_vnode()
1270 if (vp->v_type == VREG) { in vm_mmap_vnode()
1278 if (obj->type == OBJT_VNODE && obj->handle != vp) { in vm_mmap_vnode()
1280 vp = (struct vnode *)obj->handle; in vm_mmap_vnode()
1322 if (obj->type == OBJT_VNODE) { in vm_mmap_vnode()
1330 KASSERT((obj->flags & OBJ_SWAP) != 0, ("wrong object type")); in vm_mmap_vnode()
1333 if ((obj->flags & OBJ_COLORED) == 0) { in vm_mmap_vnode()
1370 if (dsw->d_flags & D_MMAP_ANON) { in vm_mmap_cdev()
1390 error = mac_cdev_check_mmap(td->td_ucred, cdev, (int)prot); in vm_mmap_cdev()
1403 error = dsw->d_mmap_single(cdev, foff, objsize, objp, (int)prot); in vm_mmap_cdev()
1407 td->td_ucred); in vm_mmap_cdev()
1481 RACCT_PROC_LOCK(td->td_proc); in kern_mmap_racct_check()
1482 if (map->size + size > lim_cur(td, RLIMIT_VMEM)) { in kern_mmap_racct_check()
1483 RACCT_PROC_UNLOCK(td->td_proc); in kern_mmap_racct_check()
1486 if (racct_set(td->td_proc, RACCT_VMEM, map->size + size)) { in kern_mmap_racct_check()
1487 RACCT_PROC_UNLOCK(td->td_proc); in kern_mmap_racct_check()
1490 if (!old_mlock && map->flags & MAP_WIREFUTURE) { in kern_mmap_racct_check()
1491 if (ptoa(pmap_wired_count(map->pmap)) + size > in kern_mmap_racct_check()
1493 racct_set_force(td->td_proc, RACCT_VMEM, map->size); in kern_mmap_racct_check()
1494 RACCT_PROC_UNLOCK(td->td_proc); in kern_mmap_racct_check()
1497 error = racct_set(td->td_proc, RACCT_MEMLOCK, in kern_mmap_racct_check()
1498 ptoa(pmap_wired_count(map->pmap)) + size); in kern_mmap_racct_check()
1500 racct_set_force(td->td_proc, RACCT_VMEM, map->size); in kern_mmap_racct_check()
1501 RACCT_PROC_UNLOCK(td->td_proc); in kern_mmap_racct_check()
1505 RACCT_PROC_UNLOCK(td->td_proc); in kern_mmap_racct_check()
1522 curmap = map == &td->td_proc->p_vmspace->vm_map; in vm_mmap_object()
1592 round_page((vm_offset_t)td->td_proc->p_vmspace-> in vm_mmap_object()
1613 if ((map->flags & MAP_WIREFUTURE) != 0) { in vm_mmap_object()
1615 if ((map->flags & MAP_WIREFUTURE) != 0) in vm_mmap_object()