Lines Matching +full:path +full:- +full:map

1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2006, 2011, 2016-2017 Robert N. M. Watson
10 * contract FA8650-15-C-7558 ("CADETS"), as part of the DARPA Transparent
39 * Support for shared swap-backed anonymous memory objects via
45 * objects. Per-uid swap resource limit controls total amount of
124 static void shm_insert(char *path, Fnv32_t fnv, struct shmfd *shmfd);
125 static struct shmfd *shm_lookup(char *path, Fnv32_t fnv);
126 static int shm_remove(char *path, Fnv32_t fnv, struct ucred *ucred);
127 static void shm_doremove(struct shm_mapping *map);
188 rangelock_unlock(&(shmfd)->shm_rl, (cookie))
190 rangelock_rlock(&(shmfd)->shm_rl, (start), (end))
192 rangelock_tryrlock(&(shmfd)->shm_rl, (start), (end))
194 rangelock_wlock(&(shmfd)->shm_rl, (start), (end))
205 idx = OFF_TO_IDX(uio->uio_offset); in uiomove_object_page()
206 offset = uio->uio_offset & PAGE_MASK; in uiomove_object_page()
207 tlen = MIN(PAGE_SIZE - offset, len); in uiomove_object_page()
222 if (uio->uio_rw == UIO_READ && m == NULL && in uiomove_object_page()
249 if (uio->uio_rw == UIO_WRITE && error == 0) in uiomove_object_page()
265 while ((resid = uio->uio_resid) > 0) { in uiomove_object()
266 if (obj_size <= uio->uio_offset) in uiomove_object()
268 len = MIN(obj_size - uio->uio_offset, resid); in uiomove_object()
272 if (error != 0 || resid == uio->uio_resid) in uiomove_object()
287 psind = object->un_pager.phys.data_val; in shm_largepage_phys_populate()
288 if (psind == 0 || pidx >= object->size) in shm_largepage_phys_populate()
301 *last = *first + atop(pagesizes[psind]) - 1; in shm_largepage_phys_populate()
311 psind = object->un_pager.phys.data_val; in shm_largepage_phys_haspage()
312 if (psind == 0 || pindex >= object->size) in shm_largepage_phys_haspage()
315 *before = pindex - rounddown2(pindex, pagesizes[psind] / in shm_largepage_phys_haspage()
319 *after = roundup2(pindex, pagesizes[psind] / PAGE_SIZE) - in shm_largepage_phys_haspage()
336 psind = object->un_pager.phys.data_val; in shm_largepage_phys_dtor()
339 object->size / (pagesizes[psind] / PAGE_SIZE)); in shm_largepage_phys_dtor()
340 vm_wire_sub(object->size); in shm_largepage_phys_dtor()
342 KASSERT(object->size == 0, in shm_largepage_phys_dtor()
344 object, (uintmax_t)object->size)); in shm_largepage_phys_dtor()
358 return (shmfd->shm_object->type == OBJT_PHYS); in shm_largepage()
371 shm = obj->un_pager.swp.swp_priv; in shm_pager_freespace()
374 KASSERT(shm->shm_pages >= c, in shm_pager_freespace()
376 (uintmax_t)shm->shm_pages, (uintmax_t)c)); in shm_pager_freespace()
377 shm->shm_pages -= c; in shm_pager_freespace()
385 shm = obj->un_pager.swp.swp_priv; in shm_page_inserted()
388 if (!vm_pager_has_page(obj, m->pindex, NULL, NULL)) in shm_page_inserted()
389 shm->shm_pages += 1; in shm_page_inserted()
397 shm = obj->un_pager.swp.swp_priv; in shm_page_removed()
400 if (!vm_pager_has_page(obj, m->pindex, NULL, NULL)) { in shm_page_removed()
401 KASSERT(shm->shm_pages >= 1, in shm_page_removed()
403 (uintmax_t)shm->shm_pages)); in shm_page_removed()
404 shm->shm_pages -= 1; in shm_page_removed()
414 static int shmfd_pager_type = -1;
423 shmfd = fp->f_data; in shm_seek()
429 (offset > 0 && foffset > OFF_MAX - offset)) { in shm_seek()
436 if (offset > 0 && shmfd->shm_size > OFF_MAX - offset) { in shm_seek()
440 offset += shmfd->shm_size; in shm_seek()
448 if (offset < 0 || offset > shmfd->shm_size) in shm_seek()
451 td->td_uretoff.tdu_off = offset; in shm_seek()
465 shmfd = fp->f_data; in shm_read()
467 error = mac_posixshm_check_read(active_cred, fp->f_cred, shmfd); in shm_read()
472 rl_cookie = shm_rangelock_rlock(shmfd, uio->uio_offset, in shm_read()
473 uio->uio_offset + uio->uio_resid); in shm_read()
474 error = uiomove_object(shmfd->shm_object, shmfd->shm_size, uio); in shm_read()
489 KASSERT((flags & FOF_OFFSET) == 0 || uio->uio_offset >= 0, in shm_write()
492 shmfd = fp->f_data; in shm_write()
494 error = mac_posixshm_check_write(active_cred, fp->f_cred, shmfd); in shm_write()
498 if (shm_largepage(shmfd) && shmfd->shm_lp_psind == 0) in shm_write()
501 if (uio->uio_resid > OFF_MAX - uio->uio_offset) { in shm_write()
507 if ((shmfd->shm_flags & SHM_GROW_ON_WRITE) != 0) { in shm_write()
512 newsize = atomic_load_64(&shmfd->shm_size); in shm_write()
514 newsize = uio->uio_offset + uio->uio_resid; in shm_write()
519 rl_cookie = shm_rangelock_wlock(shmfd, uio->uio_offset, in shm_write()
520 MAX(newsize, uio->uio_offset)); in shm_write()
521 if ((shmfd->shm_seals & F_SEAL_WRITE) != 0) { in shm_write()
525 if ((shmfd->shm_flags & SHM_GROW_ON_WRITE) != 0 && in shm_write()
526 newsize > shmfd->shm_size) { in shm_write()
531 error = uiomove_object(shmfd->shm_object, in shm_write()
532 shmfd->shm_size, uio); in shm_write()
548 shmfd = fp->f_data; in shm_truncate()
550 error = mac_posixshm_check_truncate(active_cred, fp->f_cred, shmfd); in shm_truncate()
565 shmfd = fp->f_data; in shm_ioctl()
578 if (shmfd->shm_lp_psind != 0 && in shm_ioctl()
579 conf->psind != shmfd->shm_lp_psind) in shm_ioctl()
581 if (conf->psind <= 0 || conf->psind >= MAXPAGESIZES || in shm_ioctl()
582 pagesizes[conf->psind] == 0) in shm_ioctl()
584 if (conf->alloc_policy != SHM_LARGEPAGE_ALLOC_DEFAULT && in shm_ioctl()
585 conf->alloc_policy != SHM_LARGEPAGE_ALLOC_NOWAIT && in shm_ioctl()
586 conf->alloc_policy != SHM_LARGEPAGE_ALLOC_HARD) in shm_ioctl()
590 shmfd->shm_lp_psind = conf->psind; in shm_ioctl()
591 shmfd->shm_lp_alloc_policy = conf->alloc_policy; in shm_ioctl()
592 shmfd->shm_object->un_pager.phys.data_val = conf->psind; in shm_ioctl()
600 conf->psind = shmfd->shm_lp_psind; in shm_ioctl()
601 conf->alloc_policy = shmfd->shm_lp_alloc_policy; in shm_ioctl()
617 shmfd = fp->f_data; in shm_stat()
620 error = mac_posixshm_check_stat(active_cred, fp->f_cred, shmfd); in shm_stat()
630 sb->st_blksize = PAGE_SIZE; in shm_stat()
631 sb->st_size = shmfd->shm_size; in shm_stat()
633 sb->st_atim = shmfd->shm_atime; in shm_stat()
634 sb->st_ctim = shmfd->shm_ctime; in shm_stat()
635 sb->st_mtim = shmfd->shm_mtime; in shm_stat()
636 sb->st_birthtim = shmfd->shm_birthtime; in shm_stat()
637 sb->st_mode = S_IFREG | shmfd->shm_mode; /* XXX */ in shm_stat()
638 sb->st_uid = shmfd->shm_uid; in shm_stat()
639 sb->st_gid = shmfd->shm_gid; in shm_stat()
641 sb->st_dev = shm_dev_ino; in shm_stat()
642 sb->st_ino = shmfd->shm_ino; in shm_stat()
643 sb->st_nlink = shmfd->shm_object->ref_count; in shm_stat()
645 sb->st_blocks = shmfd->shm_object->size / in shm_stat()
646 (pagesizes[shmfd->shm_lp_psind] >> PAGE_SHIFT); in shm_stat()
648 sb->st_blocks = shmfd->shm_pages; in shm_stat()
659 shmfd = fp->f_data; in shm_close()
660 fp->f_data = NULL; in shm_close()
669 char *path; in shm_copyin_path() local
673 path = malloc(MAXPATHLEN, M_SHMFD, M_WAITOK); in shm_copyin_path()
674 pr_path = td->td_ucred->cr_prison->pr_path; in shm_copyin_path()
678 0 ? 0 : strlcpy(path, pr_path, MAXPATHLEN); in shm_copyin_path()
679 error = copyinstr(userpath_in, path + pr_pathlen, in shm_copyin_path()
680 MAXPATHLEN - pr_pathlen, NULL); in shm_copyin_path()
686 ktrnamei(path); in shm_copyin_path()
690 if (path[pr_pathlen] != '/') { in shm_copyin_path()
695 *path_out = path; in shm_copyin_path()
699 free(path, M_SHMFD); in shm_copyin_path()
725 object = shmfd->shm_object; in shm_dotruncate_locked()
728 if (length == shmfd->shm_size) in shm_dotruncate_locked()
733 if (length < shmfd->shm_size) { in shm_dotruncate_locked()
734 if ((shmfd->shm_seals & F_SEAL_SHRINK) != 0) in shm_dotruncate_locked()
741 if (shmfd->shm_kmappings > 0) in shm_dotruncate_locked()
754 delta = IDX_TO_OFF(object->size - nobjsize); in shm_dotruncate_locked()
756 if (nobjsize < object->size) in shm_dotruncate_locked()
757 vm_object_page_remove(object, nobjsize, object->size, in shm_dotruncate_locked()
761 swap_release_by_cred(delta, object->cred); in shm_dotruncate_locked()
762 object->charge -= delta; in shm_dotruncate_locked()
764 if ((shmfd->shm_seals & F_SEAL_GROW) != 0) in shm_dotruncate_locked()
768 delta = IDX_TO_OFF(nobjsize - object->size); in shm_dotruncate_locked()
769 if (!swap_reserve_by_cred(delta, object->cred)) in shm_dotruncate_locked()
771 object->charge += delta; in shm_dotruncate_locked()
773 shmfd->shm_size = length; in shm_dotruncate_locked()
775 vfs_timestamp(&shmfd->shm_ctime); in shm_dotruncate_locked()
776 shmfd->shm_mtime = shmfd->shm_ctime; in shm_dotruncate_locked()
778 object->size = nobjsize; in shm_dotruncate_locked()
792 object = shmfd->shm_object; in shm_dotruncate_largepage()
796 oldobjsz = object->size; in shm_dotruncate_largepage()
798 if (length == shmfd->shm_size) in shm_dotruncate_largepage()
800 psind = shmfd->shm_lp_psind; in shm_dotruncate_largepage()
803 if ((length & (pagesizes[psind] - 1)) != 0) in shm_dotruncate_largepage()
806 if (length < shmfd->shm_size) { in shm_dotruncate_largepage()
807 if ((shmfd->shm_seals & F_SEAL_SHRINK) != 0) in shm_dotruncate_largepage()
809 if (shmfd->shm_kmappings > 0) in shm_dotruncate_largepage()
814 object->size = newobjsz; in shm_dotruncate_largepage()
815 shmfd->shm_size = length; in shm_dotruncate_largepage()
820 if ((shmfd->shm_seals & F_SEAL_GROW) != 0) in shm_dotruncate_largepage()
824 if (shmfd->shm_lp_alloc_policy == SHM_LARGEPAGE_ALLOC_NOWAIT) in shm_dotruncate_largepage()
833 while (object->size < newobjsz) { in shm_dotruncate_largepage()
834 m = vm_page_alloc_contig(object, object->size, aflags, in shm_dotruncate_largepage()
840 if (shmfd->shm_lp_alloc_policy == in shm_dotruncate_largepage()
842 (shmfd->shm_lp_alloc_policy == in shm_dotruncate_largepage()
868 object->size += OFF_TO_IDX(pagesizes[psind]); in shm_dotruncate_largepage()
869 shmfd->shm_size += pagesizes[psind]; in shm_dotruncate_largepage()
881 VM_OBJECT_WLOCK(shmfd->shm_object); in shm_dotruncate_cookie()
885 VM_OBJECT_WUNLOCK(shmfd->shm_object); in shm_dotruncate_cookie()
927 shmfd->shm_uid = ucred->cr_uid; in shm_alloc()
928 shmfd->shm_gid = ucred->cr_gid; in shm_alloc()
929 shmfd->shm_mode = mode; in shm_alloc()
931 obj->un_pager.phys.phys_priv = shmfd; in shm_alloc()
932 shmfd->shm_lp_alloc_policy = SHM_LARGEPAGE_ALLOC_DEFAULT; in shm_alloc()
934 obj->un_pager.swp.swp_priv = shmfd; in shm_alloc()
940 shmfd->shm_object = obj; in shm_alloc()
941 vfs_timestamp(&shmfd->shm_birthtime); in shm_alloc()
942 shmfd->shm_atime = shmfd->shm_mtime = shmfd->shm_ctime = in shm_alloc()
943 shmfd->shm_birthtime; in shm_alloc()
944 shmfd->shm_ino = alloc_unr64(&shm_ino_unr); in shm_alloc()
945 refcount_init(&shmfd->shm_refs, 1); in shm_alloc()
946 mtx_init(&shmfd->shm_mtx, "shmrl", NULL, MTX_DEF); in shm_alloc()
947 rangelock_init(&shmfd->shm_rl); in shm_alloc()
960 refcount_acquire(&shmfd->shm_refs); in shm_hold()
969 if (refcount_release(&shmfd->shm_refs)) { in shm_drop()
973 rangelock_destroy(&shmfd->shm_rl); in shm_drop()
974 mtx_destroy(&shmfd->shm_mtx); in shm_drop()
975 obj = shmfd->shm_object; in shm_drop()
978 obj->un_pager.phys.phys_priv = NULL; in shm_drop()
980 obj->un_pager.swp.swp_priv = NULL; in shm_drop()
1003 error = vaccess(VREG, shmfd->shm_mode, shmfd->shm_uid, shmfd->shm_gid, in shm_access()
1023 MPASS(shmfd_pager_type != -1); in shm_init()
1040 "number of non-transient largepages allocated"); in shm_init()
1057 if (shmm->sm_shmfd->shm_object->cred && in shm_remove_prison()
1058 shmm->sm_shmfd->shm_object->cred->cr_prison == pr) in shm_remove_prison()
1066 * Dictionary management. We maintain an in-kernel dictionary to map
1067 * paths to shmfd objects. We use the FNV hash on the path to store
1071 shm_lookup(char *path, Fnv32_t fnv) in shm_lookup() argument
1073 struct shm_mapping *map; in shm_lookup() local
1075 LIST_FOREACH(map, SHM_HASH(fnv), sm_link) { in shm_lookup()
1076 if (map->sm_fnv != fnv) in shm_lookup()
1078 if (strcmp(map->sm_path, path) == 0) in shm_lookup()
1079 return (map->sm_shmfd); in shm_lookup()
1086 shm_insert(char *path, Fnv32_t fnv, struct shmfd *shmfd) in shm_insert() argument
1088 struct shm_mapping *map; in shm_insert() local
1090 map = malloc(sizeof(struct shm_mapping), M_SHMFD, M_WAITOK); in shm_insert()
1091 map->sm_path = path; in shm_insert()
1092 map->sm_fnv = fnv; in shm_insert()
1093 map->sm_shmfd = shm_hold(shmfd); in shm_insert()
1094 shmfd->shm_path = path; in shm_insert()
1095 LIST_INSERT_HEAD(SHM_HASH(fnv), map, sm_link); in shm_insert()
1099 shm_remove(char *path, Fnv32_t fnv, struct ucred *ucred) in shm_remove() argument
1101 struct shm_mapping *map; in shm_remove() local
1104 LIST_FOREACH(map, SHM_HASH(fnv), sm_link) { in shm_remove()
1105 if (map->sm_fnv != fnv) in shm_remove()
1107 if (strcmp(map->sm_path, path) == 0) { in shm_remove()
1109 error = mac_posixshm_check_unlink(ucred, map->sm_shmfd); in shm_remove()
1113 error = shm_access(map->sm_shmfd, ucred, in shm_remove()
1117 shm_doremove(map); in shm_remove()
1126 shm_doremove(struct shm_mapping *map) in shm_doremove() argument
1128 map->sm_shmfd->shm_path = NULL; in shm_doremove()
1129 LIST_REMOVE(map, sm_link); in shm_doremove()
1130 shm_drop(map->sm_shmfd); in shm_doremove()
1131 free(map->sm_path, M_SHMFD); in shm_doremove()
1132 free(map, M_SHMFD); in shm_doremove()
1142 char *path; in kern_shm_open2() local
1174 * already existed -- this currently assumes that only F_SEAL_SEAL can in kern_shm_open2()
1182 error = shm_copyin_path(td, userpath, &path); in kern_shm_open2()
1191 ktrcapfail(CAPFAIL_NAMEI, path); in kern_shm_open2()
1198 AUDIT_ARG_UPATH1_CANON(path); in kern_shm_open2()
1200 path = NULL; in kern_shm_open2()
1203 pdp = td->td_proc->p_pd; in kern_shm_open2()
1204 cmode = (mode & ~pdp->pd_cmask) & ACCESSPERMS; in kern_shm_open2()
1208 * by POSIX. We allow it to be unset here so that an in-kernel in kern_shm_open2()
1217 /* A SHM_ANON path pointer creates an anonymous object. */ in kern_shm_open2()
1219 /* A read-only anonymous object is pointless. */ in kern_shm_open2()
1224 shmfd = shm_alloc(td->td_ucred, cmode, largepage); in kern_shm_open2()
1229 shmfd->shm_seals = initial_seals; in kern_shm_open2()
1230 shmfd->shm_flags = shmflags; in kern_shm_open2()
1232 fnv = fnv_32_str(path, FNV1_32_INIT); in kern_shm_open2()
1234 shmfd = shm_lookup(path, fnv); in kern_shm_open2()
1239 error = mac_posixshm_check_create(td->td_ucred, in kern_shm_open2()
1240 path); in kern_shm_open2()
1243 shmfd = shm_alloc(td->td_ucred, cmode, in kern_shm_open2()
1248 shmfd->shm_seals = in kern_shm_open2()
1250 shmfd->shm_flags = shmflags; in kern_shm_open2()
1251 shm_insert(path, fnv, shmfd); in kern_shm_open2()
1252 path = NULL; in kern_shm_open2()
1275 initial_seals &= ~shmfd->shm_seals; in kern_shm_open2()
1286 if ((shmfd->shm_seals & F_SEAL_SEAL) != 0 && in kern_shm_open2()
1292 else if (shmflags != 0 && shmflags != shmfd->shm_flags) in kern_shm_open2()
1296 error = mac_posixshm_check_open(td->td_ucred, in kern_shm_open2()
1300 error = shm_access(shmfd, td->td_ucred, in kern_shm_open2()
1312 VM_OBJECT_WLOCK(shmfd->shm_object); in kern_shm_open2()
1315 td->td_ucred, fp->f_cred, shmfd); in kern_shm_open2()
1320 VM_OBJECT_WUNLOCK(shmfd->shm_object); in kern_shm_open2()
1328 shmfd->shm_seals |= initial_seals; in kern_shm_open2()
1341 td->td_retval[0] = fd; in kern_shm_open2()
1343 free(path, M_SHMFD); in kern_shm_open2()
1351 free(path, M_SHMFD); in kern_shm_open2()
1362 return (kern_shm_open(td, uap->path, uap->flags | O_CLOEXEC, in freebsd12_shm_open()
1363 uap->mode, NULL)); in freebsd12_shm_open()
1370 char *path; in sys_shm_unlink() local
1374 error = shm_copyin_path(td, uap->path, &path); in sys_shm_unlink()
1378 AUDIT_ARG_UPATH1_CANON(path); in sys_shm_unlink()
1379 fnv = fnv_32_str(path, FNV1_32_INIT); in sys_shm_unlink()
1381 error = shm_remove(path, fnv, td->td_ucred); in sys_shm_unlink()
1383 free(path, M_SHMFD); in sys_shm_unlink()
1398 flags = uap->flags; in sys_shm_rename()
1424 if (uap->path_from == SHM_ANON || uap->path_to == SHM_ANON) { in sys_shm_rename()
1429 error = shm_copyin_path(td, uap->path_from, &path_from); in sys_shm_rename()
1433 error = shm_copyin_path(td, uap->path_to, &path_to); in sys_shm_rename()
1440 /* Rename with from/to equal is a no-op */ in sys_shm_rename()
1466 error = shm_remove(path_from, fnv_from, td->td_ucred); in sys_shm_rename()
1498 error = shm_remove(path_to, fnv_to, td->td_ucred); in sys_shm_rename()
1516 KASSERT(fd_from->shm_refs > 0, ("Expected >0 refs; got: %d\n", in sys_shm_rename()
1517 fd_from->shm_refs)); in sys_shm_rename()
1523 KASSERT(fd_to->shm_refs > 0, ("Expected >0 refs; got: %d\n", in sys_shm_rename()
1524 fd_to->shm_refs)); in sys_shm_rename()
1537 shm_mmap_large(struct shmfd *shmfd, vm_map_t map, vm_offset_t *addr, in shm_mmap_large() argument
1547 if (shmfd->shm_lp_psind == 0) in shm_mmap_large()
1555 vms = td->td_proc->p_vmspace; in shm_mmap_large()
1556 curmap = map == &vms->vm_map; in shm_mmap_large()
1558 error = kern_mmap_racct_check(td, map, size); in shm_mmap_large()
1563 docow = shmfd->shm_lp_psind << MAP_SPLIT_BOUNDARY_SHIFT; in shm_mmap_large()
1568 mask = pagesizes[shmfd->shm_lp_psind] - 1; in shm_mmap_large()
1571 maxaddr = vm_map_max(map); in shm_mmap_large()
1581 align = pagesizes[shmfd->shm_lp_psind]; in shm_mmap_large()
1585 * i.e., [1, VM_NRESERVLEVEL]. shmfd->shm_lp_psind < 1 is in shm_mmap_large()
1590 shmfd->shm_lp_psind > VM_NRESERVLEVEL in shm_mmap_large()
1592 shmfd->shm_lp_psind > 1 in shm_mmap_large()
1596 align = pagesizes[shmfd->shm_lp_psind]; in shm_mmap_large()
1601 if (align < pagesizes[shmfd->shm_lp_psind]) in shm_mmap_large()
1605 vm_map_lock(map); in shm_mmap_large()
1609 (*addr >= round_page((vm_offset_t)vms->vm_taddr) && in shm_mmap_large()
1610 *addr < round_page((vm_offset_t)vms->vm_daddr + in shm_mmap_large()
1612 *addr = roundup2((vm_offset_t)vms->vm_daddr + in shm_mmap_large()
1614 pagesizes[shmfd->shm_lp_psind]); in shm_mmap_large()
1617 rv = vm_map_find_aligned(map, addr, size, maxaddr, align); in shm_mmap_large()
1621 *addr = vm_map_min(map); in shm_mmap_large()
1629 rv = vm_map_delete(map, *addr, *addr + size); in shm_mmap_large()
1634 if (vm_map_lookup_entry(map, *addr, &prev_entry)) in shm_mmap_large()
1637 if (next_entry->start < *addr + size) in shm_mmap_large()
1641 rv = vm_map_insert(map, shmfd->shm_object, foff, *addr, *addr + size, in shm_mmap_large()
1646 vm_map_unlock(map); in shm_mmap_large()
1651 shm_mmap(struct file *fp, vm_map_t map, vm_offset_t *addr, vm_size_t objsize, in shm_mmap() argument
1661 shmfd = fp->f_data; in shm_mmap()
1666 if ((fp->f_flag & FREAD) != 0) in shm_mmap()
1679 if ((fp->f_flag & FWRITE) != 0 && in shm_mmap()
1680 (shmfd->shm_seals & F_SEAL_WRITE) == 0) in shm_mmap()
1685 * VM_PROT_WRITE with mprotect(2), unless a write-seal was in shm_mmap()
1703 foff > OFF_MAX - objsize) { in shm_mmap()
1709 error = mac_posixshm_check_mmap(td->td_ucred, shmfd, prot, flags); in shm_mmap()
1715 vfs_timestamp(&shmfd->shm_atime); in shm_mmap()
1717 vm_object_reference(shmfd->shm_object); in shm_mmap()
1721 error = shm_mmap_large(shmfd, map, addr, objsize, prot, in shm_mmap()
1725 vm_pager_update_writecount(shmfd->shm_object, 0, in shm_mmap()
1728 error = vm_mmap_object(map, addr, objsize, prot, maxprot, flags, in shm_mmap()
1729 shmfd->shm_object, foff, writecnt, td); in shm_mmap()
1733 vm_pager_release_writecount(shmfd->shm_object, 0, in shm_mmap()
1735 vm_object_deallocate(shmfd->shm_object); in shm_mmap()
1750 shmfd = fp->f_data; in shm_chmod()
1761 error = vaccess(VREG, shmfd->shm_mode, shmfd->shm_uid, shmfd->shm_gid, in shm_chmod()
1765 shmfd->shm_mode = mode & ACCESSPERMS; in shm_chmod()
1779 shmfd = fp->f_data; in shm_chown()
1786 if (uid == (uid_t)-1) in shm_chown()
1787 uid = shmfd->shm_uid; in shm_chown()
1788 if (gid == (gid_t)-1) in shm_chown()
1789 gid = shmfd->shm_gid; in shm_chown()
1790 if (((uid != shmfd->shm_uid && uid != active_cred->cr_uid) || in shm_chown()
1791 (gid != shmfd->shm_gid && !groupmember(gid, active_cred))) && in shm_chown()
1794 shmfd->shm_uid = uid; in shm_chown()
1795 shmfd->shm_gid = gid; in shm_chown()
1813 if (fp->f_type != DTYPE_SHM) in shm_map()
1815 shmfd = fp->f_data; in shm_map()
1816 obj = shmfd->shm_object; in shm_map()
1822 if (offset >= shmfd->shm_size || in shm_map()
1823 offset + size > round_page(shmfd->shm_size)) { in shm_map()
1828 shmfd->shm_kmappings++; in shm_map()
1832 /* Map the object into the kernel_map and wire it. */ in shm_map()
1853 shmfd->shm_kmappings--; in shm_map()
1873 vm_map_t map; in shm_unmap() local
1876 if (fp->f_type != DTYPE_SHM) in shm_unmap()
1878 shmfd = fp->f_data; in shm_unmap()
1883 map = kernel_map; in shm_unmap()
1884 rv = vm_map_lookup(&map, kva, VM_PROT_READ | VM_PROT_WRITE, &entry, in shm_unmap()
1888 if (entry->start != kva || entry->end != kva + size) { in shm_unmap()
1889 vm_map_lookup_done(map, entry); in shm_unmap()
1892 vm_map_lookup_done(map, entry); in shm_unmap()
1893 if (obj != shmfd->shm_object) in shm_unmap()
1895 vm_map_remove(map, kva, kva + size); in shm_unmap()
1897 KASSERT(shmfd->shm_kmappings > 0, ("shm_unmap: object not mapped")); in shm_unmap()
1898 shmfd->shm_kmappings--; in shm_unmap()
1906 const char *path, *pr_path; in shm_fill_kinfo_locked() local
1911 kif->kf_type = KF_TYPE_SHM; in shm_fill_kinfo_locked()
1912 kif->kf_un.kf_file.kf_file_mode = S_IFREG | shmfd->shm_mode; in shm_fill_kinfo_locked()
1913 kif->kf_un.kf_file.kf_file_size = shmfd->shm_size; in shm_fill_kinfo_locked()
1914 if (shmfd->shm_path != NULL) { in shm_fill_kinfo_locked()
1915 path = shmfd->shm_path; in shm_fill_kinfo_locked()
1916 pr_path = curthread->td_ucred->cr_prison->pr_path; in shm_fill_kinfo_locked()
1918 /* Return the jail-rooted pathname. */ in shm_fill_kinfo_locked()
1920 visible = strncmp(path, pr_path, pr_pathlen) == 0 && in shm_fill_kinfo_locked()
1921 path[pr_pathlen] == '/'; in shm_fill_kinfo_locked()
1925 path += pr_pathlen; in shm_fill_kinfo_locked()
1927 strlcpy(kif->kf_path, path, sizeof(kif->kf_path)); in shm_fill_kinfo_locked()
1939 res = shm_fill_kinfo_locked(fp->f_data, kif, false); in shm_fill_kinfo()
1953 shmfd = fp->f_data; in shm_add_seals()
1956 /* Even already-set seals should result in EPERM. */ in shm_add_seals()
1957 if ((shmfd->shm_seals & F_SEAL_SEAL) != 0) { in shm_add_seals()
1961 nseals = seals & ~shmfd->shm_seals; in shm_add_seals()
1974 VM_OBJECT_RLOCK(shmfd->shm_object); in shm_add_seals()
1975 writemappings = shmfd->shm_object->un_pager.swp.writemappings; in shm_add_seals()
1976 VM_OBJECT_RUNLOCK(shmfd->shm_object); in shm_add_seals()
1983 shmfd->shm_seals |= nseals; in shm_add_seals()
1994 shmfd = fp->f_data; in shm_get_seals()
1995 *seals = shmfd->shm_seals; in shm_get_seals()
2011 if (off + len > shmfd->shm_size) in shm_deallocate()
2012 len = shmfd->shm_size - off; in shm_deallocate()
2013 object = shmfd->shm_object; in shm_deallocate()
2035 off += end - startofs; in shm_deallocate()
2036 len -= end - startofs; in shm_deallocate()
2041 off += IDX_TO_OFF(piend - pi); in shm_deallocate()
2042 len -= IDX_TO_OFF(piend - pi); in shm_deallocate()
2050 len -= endofs; in shm_deallocate()
2054 VM_OBJECT_WUNLOCK(shmfd->shm_object); in shm_deallocate()
2071 ("shm_fspacectl: non-zero flags")); in shm_fspacectl()
2072 KASSERT(*offset >= 0 && *length > 0 && *length <= OFF_MAX - *offset, in shm_fspacectl()
2075 shmfd = fp->f_data; in shm_fspacectl()
2082 if ((shmfd->shm_seals & F_SEAL_WRITE) != 0) { in shm_fspacectl()
2108 shmfd = fp->f_data; in shm_fallocate()
2118 * added to grow a shmfd, this may need to be re-evaluated. in shm_fallocate()
2121 if (size > shmfd->shm_size) in shm_fallocate()
2145 error = shm_fill_kinfo_locked(shmm->sm_shmfd, in sysctl_posix_shm_list()
2172 kern_shm_open(struct thread *td, const char *path, int flags, mode_t mode, in kern_shm_open() argument
2176 return (kern_shm_open2(td, path, flags, mode, 0, caps, NULL)); in kern_shm_open()
2182 * allows other consumers, like memfd_create(), to opt-in for CLOEXEC. This
2193 return (kern_shm_open2(td, uap->path, uap->flags, uap->mode, in sys_shm_open2()
2194 uap->shmflags, NULL, uap->name)); in sys_shm_open2()
2198 shm_get_path(struct vm_object *obj, char *path, size_t sz) in shm_get_path() argument
2207 if ((obj->flags & OBJ_POSIXSHM) == 0) { in shm_get_path()
2210 if (obj->type == shmfd_pager_type) in shm_get_path()
2211 shmfd = obj->un_pager.swp.swp_priv; in shm_get_path()
2212 else if (obj->type == OBJT_PHYS) in shm_get_path()
2213 shmfd = obj->un_pager.phys.phys_priv; in shm_get_path()
2217 strlcpy(path, shmfd->shm_path == NULL ? "anon" : in shm_get_path()
2218 shmfd->shm_path, sz); in shm_get_path()
2222 path[0] = '\0'; in shm_get_path()