1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2006, 2011, 2016-2017 Robert N. M. Watson 5 * All rights reserved. 6 * 7 * Portions of this software were developed by BAE Systems, the University of 8 * Cambridge Computer Laboratory, and Memorial University under DARPA/AFRL 9 * contract FA8650-15-C-7558 ("CADETS"), as part of the DARPA Transparent 10 * Computing (TC) research program. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 */ 33 34 /* 35 * Support for shared swap-backed anonymous memory objects via 36 * shm_open(2) and shm_unlink(2). While most of the implementation is 37 * here, vm_mmap.c contains mapping logic changes. 38 * 39 * posixshmcontrol(1) allows users to inspect the state of the memory 40 * objects. Per-uid swap resource limit controls total amount of 41 * memory that user can consume for anonymous objects, including 42 * shared. 43 */ 44 45 #include <sys/cdefs.h> 46 __FBSDID("$FreeBSD$"); 47 48 #include "opt_capsicum.h" 49 #include "opt_ktrace.h" 50 51 #include <sys/param.h> 52 #include <sys/capsicum.h> 53 #include <sys/conf.h> 54 #include <sys/fcntl.h> 55 #include <sys/file.h> 56 #include <sys/filedesc.h> 57 #include <sys/filio.h> 58 #include <sys/fnv_hash.h> 59 #include <sys/kernel.h> 60 #include <sys/limits.h> 61 #include <sys/uio.h> 62 #include <sys/signal.h> 63 #include <sys/jail.h> 64 #include <sys/ktrace.h> 65 #include <sys/lock.h> 66 #include <sys/malloc.h> 67 #include <sys/mman.h> 68 #include <sys/mutex.h> 69 #include <sys/priv.h> 70 #include <sys/proc.h> 71 #include <sys/refcount.h> 72 #include <sys/resourcevar.h> 73 #include <sys/rwlock.h> 74 #include <sys/sbuf.h> 75 #include <sys/stat.h> 76 #include <sys/syscallsubr.h> 77 #include <sys/sysctl.h> 78 #include <sys/sysproto.h> 79 #include <sys/systm.h> 80 #include <sys/sx.h> 81 #include <sys/time.h> 82 #include <sys/vnode.h> 83 #include <sys/unistd.h> 84 #include <sys/user.h> 85 86 #include <security/audit/audit.h> 87 #include <security/mac/mac_framework.h> 88 89 #include <vm/vm.h> 90 #include <vm/vm_param.h> 91 #include <vm/pmap.h> 92 #include <vm/vm_extern.h> 93 #include <vm/vm_map.h> 94 #include <vm/vm_kern.h> 95 #include <vm/vm_object.h> 96 #include <vm/vm_page.h> 97 #include <vm/vm_pageout.h> 98 #include <vm/vm_pager.h> 99 #include <vm/swap_pager.h> 100 101 struct shm_mapping { 102 char *sm_path; 103 Fnv32_t sm_fnv; 104 struct shmfd *sm_shmfd; 105 LIST_ENTRY(shm_mapping) sm_link; 106 }; 107 108 static MALLOC_DEFINE(M_SHMFD, "shmfd", "shared memory file descriptor"); 109 static LIST_HEAD(, shm_mapping) *shm_dictionary; 110 static struct sx shm_dict_lock; 111 static struct mtx shm_timestamp_lock; 112 static u_long shm_hash; 113 static struct unrhdr64 shm_ino_unr; 114 static dev_t shm_dev_ino; 115 116 #define SHM_HASH(fnv) (&shm_dictionary[(fnv) & shm_hash]) 117 118 static void shm_init(void *arg); 119 static void shm_insert(char *path, Fnv32_t fnv, struct shmfd *shmfd); 120 static struct shmfd *shm_lookup(char *path, Fnv32_t fnv); 121 static int shm_remove(char *path, Fnv32_t fnv, struct ucred *ucred); 122 static int shm_dotruncate_locked(struct shmfd *shmfd, off_t length, 123 void *rl_cookie); 124 125 static fo_rdwr_t shm_read; 126 static fo_rdwr_t shm_write; 127 static fo_truncate_t shm_truncate; 128 static fo_ioctl_t shm_ioctl; 129 static fo_stat_t shm_stat; 130 static fo_close_t shm_close; 131 static fo_chmod_t shm_chmod; 132 static fo_chown_t shm_chown; 133 static fo_seek_t shm_seek; 134 static fo_fill_kinfo_t shm_fill_kinfo; 135 static fo_mmap_t shm_mmap; 136 static fo_get_seals_t shm_get_seals; 137 static fo_add_seals_t shm_add_seals; 138 139 /* File descriptor operations. */ 140 struct fileops shm_ops = { 141 .fo_read = shm_read, 142 .fo_write = shm_write, 143 .fo_truncate = shm_truncate, 144 .fo_ioctl = shm_ioctl, 145 .fo_poll = invfo_poll, 146 .fo_kqfilter = invfo_kqfilter, 147 .fo_stat = shm_stat, 148 .fo_close = shm_close, 149 .fo_chmod = shm_chmod, 150 .fo_chown = shm_chown, 151 .fo_sendfile = vn_sendfile, 152 .fo_seek = shm_seek, 153 .fo_fill_kinfo = shm_fill_kinfo, 154 .fo_mmap = shm_mmap, 155 .fo_get_seals = shm_get_seals, 156 .fo_add_seals = shm_add_seals, 157 .fo_flags = DFLAG_PASSABLE | DFLAG_SEEKABLE 158 }; 159 160 FEATURE(posix_shm, "POSIX shared memory"); 161 162 static int 163 uiomove_object_page(vm_object_t obj, size_t len, struct uio *uio) 164 { 165 vm_page_t m; 166 vm_pindex_t idx; 167 size_t tlen; 168 int error, offset, rv; 169 170 idx = OFF_TO_IDX(uio->uio_offset); 171 offset = uio->uio_offset & PAGE_MASK; 172 tlen = MIN(PAGE_SIZE - offset, len); 173 174 VM_OBJECT_WLOCK(obj); 175 176 /* 177 * Read I/O without either a corresponding resident page or swap 178 * page: use zero_region. This is intended to avoid instantiating 179 * pages on read from a sparse region. 180 */ 181 if (uio->uio_rw == UIO_READ && vm_page_lookup(obj, idx) == NULL && 182 !vm_pager_has_page(obj, idx, NULL, NULL)) { 183 VM_OBJECT_WUNLOCK(obj); 184 return (uiomove(__DECONST(void *, zero_region), tlen, uio)); 185 } 186 187 /* 188 * Parallel reads of the page content from disk are prevented 189 * by exclusive busy. 190 * 191 * Although the tmpfs vnode lock is held here, it is 192 * nonetheless safe to sleep waiting for a free page. The 193 * pageout daemon does not need to acquire the tmpfs vnode 194 * lock to page out tobj's pages because tobj is a OBJT_SWAP 195 * type object. 196 */ 197 rv = vm_page_grab_valid(&m, obj, idx, 198 VM_ALLOC_NORMAL | VM_ALLOC_WIRED | VM_ALLOC_NOBUSY); 199 if (rv != VM_PAGER_OK) { 200 VM_OBJECT_WUNLOCK(obj); 201 printf("uiomove_object: vm_obj %p idx %jd pager error %d\n", 202 obj, idx, rv); 203 return (EIO); 204 } 205 VM_OBJECT_WUNLOCK(obj); 206 error = uiomove_fromphys(&m, offset, tlen, uio); 207 if (uio->uio_rw == UIO_WRITE && error == 0) { 208 VM_OBJECT_WLOCK(obj); 209 vm_page_dirty(m); 210 vm_pager_page_unswapped(m); 211 VM_OBJECT_WUNLOCK(obj); 212 } 213 vm_page_unwire(m, PQ_ACTIVE); 214 215 return (error); 216 } 217 218 int 219 uiomove_object(vm_object_t obj, off_t obj_size, struct uio *uio) 220 { 221 ssize_t resid; 222 size_t len; 223 int error; 224 225 error = 0; 226 while ((resid = uio->uio_resid) > 0) { 227 if (obj_size <= uio->uio_offset) 228 break; 229 len = MIN(obj_size - uio->uio_offset, resid); 230 if (len == 0) 231 break; 232 error = uiomove_object_page(obj, len, uio); 233 if (error != 0 || resid == uio->uio_resid) 234 break; 235 } 236 return (error); 237 } 238 239 static int 240 shm_seek(struct file *fp, off_t offset, int whence, struct thread *td) 241 { 242 struct shmfd *shmfd; 243 off_t foffset; 244 int error; 245 246 shmfd = fp->f_data; 247 foffset = foffset_lock(fp, 0); 248 error = 0; 249 switch (whence) { 250 case L_INCR: 251 if (foffset < 0 || 252 (offset > 0 && foffset > OFF_MAX - offset)) { 253 error = EOVERFLOW; 254 break; 255 } 256 offset += foffset; 257 break; 258 case L_XTND: 259 if (offset > 0 && shmfd->shm_size > OFF_MAX - offset) { 260 error = EOVERFLOW; 261 break; 262 } 263 offset += shmfd->shm_size; 264 break; 265 case L_SET: 266 break; 267 default: 268 error = EINVAL; 269 } 270 if (error == 0) { 271 if (offset < 0 || offset > shmfd->shm_size) 272 error = EINVAL; 273 else 274 td->td_uretoff.tdu_off = offset; 275 } 276 foffset_unlock(fp, offset, error != 0 ? FOF_NOUPDATE : 0); 277 return (error); 278 } 279 280 static int 281 shm_read(struct file *fp, struct uio *uio, struct ucred *active_cred, 282 int flags, struct thread *td) 283 { 284 struct shmfd *shmfd; 285 void *rl_cookie; 286 int error; 287 288 shmfd = fp->f_data; 289 #ifdef MAC 290 error = mac_posixshm_check_read(active_cred, fp->f_cred, shmfd); 291 if (error) 292 return (error); 293 #endif 294 foffset_lock_uio(fp, uio, flags); 295 rl_cookie = rangelock_rlock(&shmfd->shm_rl, uio->uio_offset, 296 uio->uio_offset + uio->uio_resid, &shmfd->shm_mtx); 297 error = uiomove_object(shmfd->shm_object, shmfd->shm_size, uio); 298 rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx); 299 foffset_unlock_uio(fp, uio, flags); 300 return (error); 301 } 302 303 static int 304 shm_write(struct file *fp, struct uio *uio, struct ucred *active_cred, 305 int flags, struct thread *td) 306 { 307 struct shmfd *shmfd; 308 void *rl_cookie; 309 int error; 310 311 shmfd = fp->f_data; 312 #ifdef MAC 313 error = mac_posixshm_check_write(active_cred, fp->f_cred, shmfd); 314 if (error) 315 return (error); 316 #endif 317 foffset_lock_uio(fp, uio, flags); 318 if ((flags & FOF_OFFSET) == 0) { 319 rl_cookie = rangelock_wlock(&shmfd->shm_rl, 0, OFF_MAX, 320 &shmfd->shm_mtx); 321 } else { 322 rl_cookie = rangelock_wlock(&shmfd->shm_rl, uio->uio_offset, 323 uio->uio_offset + uio->uio_resid, &shmfd->shm_mtx); 324 } 325 if ((shmfd->shm_seals & F_SEAL_WRITE) != 0) 326 error = EPERM; 327 else 328 error = uiomove_object(shmfd->shm_object, shmfd->shm_size, uio); 329 rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx); 330 foffset_unlock_uio(fp, uio, flags); 331 return (error); 332 } 333 334 static int 335 shm_truncate(struct file *fp, off_t length, struct ucred *active_cred, 336 struct thread *td) 337 { 338 struct shmfd *shmfd; 339 #ifdef MAC 340 int error; 341 #endif 342 343 shmfd = fp->f_data; 344 #ifdef MAC 345 error = mac_posixshm_check_truncate(active_cred, fp->f_cred, shmfd); 346 if (error) 347 return (error); 348 #endif 349 return (shm_dotruncate(shmfd, length)); 350 } 351 352 int 353 shm_ioctl(struct file *fp, u_long com, void *data, struct ucred *active_cred, 354 struct thread *td) 355 { 356 357 switch (com) { 358 case FIONBIO: 359 case FIOASYNC: 360 /* 361 * Allow fcntl(fd, F_SETFL, O_NONBLOCK) to work, 362 * just like it would on an unlinked regular file 363 */ 364 return (0); 365 default: 366 return (ENOTTY); 367 } 368 } 369 370 static int 371 shm_stat(struct file *fp, struct stat *sb, struct ucred *active_cred, 372 struct thread *td) 373 { 374 struct shmfd *shmfd; 375 #ifdef MAC 376 int error; 377 #endif 378 379 shmfd = fp->f_data; 380 381 #ifdef MAC 382 error = mac_posixshm_check_stat(active_cred, fp->f_cred, shmfd); 383 if (error) 384 return (error); 385 #endif 386 387 /* 388 * Attempt to return sanish values for fstat() on a memory file 389 * descriptor. 390 */ 391 bzero(sb, sizeof(*sb)); 392 sb->st_blksize = PAGE_SIZE; 393 sb->st_size = shmfd->shm_size; 394 sb->st_blocks = howmany(sb->st_size, sb->st_blksize); 395 mtx_lock(&shm_timestamp_lock); 396 sb->st_atim = shmfd->shm_atime; 397 sb->st_ctim = shmfd->shm_ctime; 398 sb->st_mtim = shmfd->shm_mtime; 399 sb->st_birthtim = shmfd->shm_birthtime; 400 sb->st_mode = S_IFREG | shmfd->shm_mode; /* XXX */ 401 sb->st_uid = shmfd->shm_uid; 402 sb->st_gid = shmfd->shm_gid; 403 mtx_unlock(&shm_timestamp_lock); 404 sb->st_dev = shm_dev_ino; 405 sb->st_ino = shmfd->shm_ino; 406 sb->st_nlink = shmfd->shm_object->ref_count; 407 408 return (0); 409 } 410 411 static int 412 shm_close(struct file *fp, struct thread *td) 413 { 414 struct shmfd *shmfd; 415 416 shmfd = fp->f_data; 417 fp->f_data = NULL; 418 shm_drop(shmfd); 419 420 return (0); 421 } 422 423 static int 424 shm_dotruncate_locked(struct shmfd *shmfd, off_t length, void *rl_cookie) 425 { 426 vm_object_t object; 427 vm_page_t m; 428 vm_pindex_t idx, nobjsize; 429 vm_ooffset_t delta; 430 int base, rv; 431 432 KASSERT(length >= 0, ("shm_dotruncate: length < 0")); 433 object = shmfd->shm_object; 434 VM_OBJECT_ASSERT_WLOCKED(object); 435 rangelock_cookie_assert(rl_cookie, RA_WLOCKED); 436 if (length == shmfd->shm_size) 437 return (0); 438 nobjsize = OFF_TO_IDX(length + PAGE_MASK); 439 440 /* Are we shrinking? If so, trim the end. */ 441 if (length < shmfd->shm_size) { 442 if ((shmfd->shm_seals & F_SEAL_SHRINK) != 0) 443 return (EPERM); 444 445 /* 446 * Disallow any requests to shrink the size if this 447 * object is mapped into the kernel. 448 */ 449 if (shmfd->shm_kmappings > 0) 450 return (EBUSY); 451 452 /* 453 * Zero the truncated part of the last page. 454 */ 455 base = length & PAGE_MASK; 456 if (base != 0) { 457 idx = OFF_TO_IDX(length); 458 retry: 459 m = vm_page_lookup(object, idx); 460 if (m != NULL) { 461 if (vm_page_sleep_if_busy(m, "shmtrc")) 462 goto retry; 463 } else if (vm_pager_has_page(object, idx, NULL, NULL)) { 464 m = vm_page_alloc(object, idx, 465 VM_ALLOC_NORMAL | VM_ALLOC_WAITFAIL); 466 if (m == NULL) 467 goto retry; 468 rv = vm_pager_get_pages(object, &m, 1, NULL, 469 NULL); 470 if (rv == VM_PAGER_OK) { 471 /* 472 * Since the page was not resident, 473 * and therefore not recently 474 * accessed, immediately enqueue it 475 * for asynchronous laundering. The 476 * current operation is not regarded 477 * as an access. 478 */ 479 vm_page_launder(m); 480 vm_page_xunbusy(m); 481 } else { 482 vm_page_free(m); 483 VM_OBJECT_WUNLOCK(object); 484 return (EIO); 485 } 486 } 487 if (m != NULL) { 488 pmap_zero_page_area(m, base, PAGE_SIZE - base); 489 KASSERT(m->valid == VM_PAGE_BITS_ALL, 490 ("shm_dotruncate: page %p is invalid", m)); 491 vm_page_dirty(m); 492 vm_pager_page_unswapped(m); 493 } 494 } 495 delta = IDX_TO_OFF(object->size - nobjsize); 496 497 /* Toss in memory pages. */ 498 if (nobjsize < object->size) 499 vm_object_page_remove(object, nobjsize, object->size, 500 0); 501 502 /* Toss pages from swap. */ 503 if (object->type == OBJT_SWAP) 504 swap_pager_freespace(object, nobjsize, delta); 505 506 /* Free the swap accounted for shm */ 507 swap_release_by_cred(delta, object->cred); 508 object->charge -= delta; 509 } else { 510 if ((shmfd->shm_seals & F_SEAL_GROW) != 0) 511 return (EPERM); 512 513 /* Try to reserve additional swap space. */ 514 delta = IDX_TO_OFF(nobjsize - object->size); 515 if (!swap_reserve_by_cred(delta, object->cred)) 516 return (ENOMEM); 517 object->charge += delta; 518 } 519 shmfd->shm_size = length; 520 mtx_lock(&shm_timestamp_lock); 521 vfs_timestamp(&shmfd->shm_ctime); 522 shmfd->shm_mtime = shmfd->shm_ctime; 523 mtx_unlock(&shm_timestamp_lock); 524 object->size = nobjsize; 525 return (0); 526 } 527 528 int 529 shm_dotruncate(struct shmfd *shmfd, off_t length) 530 { 531 void *rl_cookie; 532 int error; 533 534 rl_cookie = rangelock_wlock(&shmfd->shm_rl, 0, OFF_MAX, 535 &shmfd->shm_mtx); 536 VM_OBJECT_WLOCK(shmfd->shm_object); 537 error = shm_dotruncate_locked(shmfd, length, rl_cookie); 538 VM_OBJECT_WUNLOCK(shmfd->shm_object); 539 rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx); 540 return (error); 541 } 542 543 /* 544 * shmfd object management including creation and reference counting 545 * routines. 546 */ 547 struct shmfd * 548 shm_alloc(struct ucred *ucred, mode_t mode) 549 { 550 struct shmfd *shmfd; 551 552 shmfd = malloc(sizeof(*shmfd), M_SHMFD, M_WAITOK | M_ZERO); 553 shmfd->shm_size = 0; 554 shmfd->shm_uid = ucred->cr_uid; 555 shmfd->shm_gid = ucred->cr_gid; 556 shmfd->shm_mode = mode; 557 shmfd->shm_object = vm_pager_allocate(OBJT_SWAP, NULL, 558 shmfd->shm_size, VM_PROT_DEFAULT, 0, ucred); 559 KASSERT(shmfd->shm_object != NULL, ("shm_create: vm_pager_allocate")); 560 shmfd->shm_object->pg_color = 0; 561 VM_OBJECT_WLOCK(shmfd->shm_object); 562 vm_object_clear_flag(shmfd->shm_object, OBJ_ONEMAPPING); 563 vm_object_set_flag(shmfd->shm_object, OBJ_COLORED | OBJ_NOSPLIT); 564 VM_OBJECT_WUNLOCK(shmfd->shm_object); 565 vfs_timestamp(&shmfd->shm_birthtime); 566 shmfd->shm_atime = shmfd->shm_mtime = shmfd->shm_ctime = 567 shmfd->shm_birthtime; 568 shmfd->shm_ino = alloc_unr64(&shm_ino_unr); 569 refcount_init(&shmfd->shm_refs, 1); 570 mtx_init(&shmfd->shm_mtx, "shmrl", NULL, MTX_DEF); 571 rangelock_init(&shmfd->shm_rl); 572 #ifdef MAC 573 mac_posixshm_init(shmfd); 574 mac_posixshm_create(ucred, shmfd); 575 #endif 576 577 return (shmfd); 578 } 579 580 struct shmfd * 581 shm_hold(struct shmfd *shmfd) 582 { 583 584 refcount_acquire(&shmfd->shm_refs); 585 return (shmfd); 586 } 587 588 void 589 shm_drop(struct shmfd *shmfd) 590 { 591 592 if (refcount_release(&shmfd->shm_refs)) { 593 #ifdef MAC 594 mac_posixshm_destroy(shmfd); 595 #endif 596 rangelock_destroy(&shmfd->shm_rl); 597 mtx_destroy(&shmfd->shm_mtx); 598 vm_object_deallocate(shmfd->shm_object); 599 free(shmfd, M_SHMFD); 600 } 601 } 602 603 /* 604 * Determine if the credentials have sufficient permissions for a 605 * specified combination of FREAD and FWRITE. 606 */ 607 int 608 shm_access(struct shmfd *shmfd, struct ucred *ucred, int flags) 609 { 610 accmode_t accmode; 611 int error; 612 613 accmode = 0; 614 if (flags & FREAD) 615 accmode |= VREAD; 616 if (flags & FWRITE) 617 accmode |= VWRITE; 618 mtx_lock(&shm_timestamp_lock); 619 error = vaccess(VREG, shmfd->shm_mode, shmfd->shm_uid, shmfd->shm_gid, 620 accmode, ucred, NULL); 621 mtx_unlock(&shm_timestamp_lock); 622 return (error); 623 } 624 625 /* 626 * Dictionary management. We maintain an in-kernel dictionary to map 627 * paths to shmfd objects. We use the FNV hash on the path to store 628 * the mappings in a hash table. 629 */ 630 static void 631 shm_init(void *arg) 632 { 633 634 mtx_init(&shm_timestamp_lock, "shm timestamps", NULL, MTX_DEF); 635 sx_init(&shm_dict_lock, "shm dictionary"); 636 shm_dictionary = hashinit(1024, M_SHMFD, &shm_hash); 637 new_unrhdr64(&shm_ino_unr, 1); 638 shm_dev_ino = devfs_alloc_cdp_inode(); 639 KASSERT(shm_dev_ino > 0, ("shm dev inode not initialized")); 640 } 641 SYSINIT(shm_init, SI_SUB_SYSV_SHM, SI_ORDER_ANY, shm_init, NULL); 642 643 static struct shmfd * 644 shm_lookup(char *path, Fnv32_t fnv) 645 { 646 struct shm_mapping *map; 647 648 LIST_FOREACH(map, SHM_HASH(fnv), sm_link) { 649 if (map->sm_fnv != fnv) 650 continue; 651 if (strcmp(map->sm_path, path) == 0) 652 return (map->sm_shmfd); 653 } 654 655 return (NULL); 656 } 657 658 static void 659 shm_insert(char *path, Fnv32_t fnv, struct shmfd *shmfd) 660 { 661 struct shm_mapping *map; 662 663 map = malloc(sizeof(struct shm_mapping), M_SHMFD, M_WAITOK); 664 map->sm_path = path; 665 map->sm_fnv = fnv; 666 map->sm_shmfd = shm_hold(shmfd); 667 shmfd->shm_path = path; 668 LIST_INSERT_HEAD(SHM_HASH(fnv), map, sm_link); 669 } 670 671 static int 672 shm_remove(char *path, Fnv32_t fnv, struct ucred *ucred) 673 { 674 struct shm_mapping *map; 675 int error; 676 677 LIST_FOREACH(map, SHM_HASH(fnv), sm_link) { 678 if (map->sm_fnv != fnv) 679 continue; 680 if (strcmp(map->sm_path, path) == 0) { 681 #ifdef MAC 682 error = mac_posixshm_check_unlink(ucred, map->sm_shmfd); 683 if (error) 684 return (error); 685 #endif 686 error = shm_access(map->sm_shmfd, ucred, 687 FREAD | FWRITE); 688 if (error) 689 return (error); 690 map->sm_shmfd->shm_path = NULL; 691 LIST_REMOVE(map, sm_link); 692 shm_drop(map->sm_shmfd); 693 free(map->sm_path, M_SHMFD); 694 free(map, M_SHMFD); 695 return (0); 696 } 697 } 698 699 return (ENOENT); 700 } 701 702 int 703 kern_shm_open(struct thread *td, const char *userpath, int flags, mode_t mode, 704 struct filecaps *fcaps, int initial_seals) 705 { 706 struct filedesc *fdp; 707 struct shmfd *shmfd; 708 struct file *fp; 709 char *path; 710 const char *pr_path; 711 void *rl_cookie; 712 size_t pr_pathlen; 713 Fnv32_t fnv; 714 mode_t cmode; 715 int fd, error; 716 717 #ifdef CAPABILITY_MODE 718 /* 719 * shm_open(2) is only allowed for anonymous objects. 720 */ 721 if (IN_CAPABILITY_MODE(td) && (userpath != SHM_ANON)) 722 return (ECAPMODE); 723 #endif 724 725 AUDIT_ARG_FFLAGS(flags); 726 AUDIT_ARG_MODE(mode); 727 728 if ((flags & O_ACCMODE) != O_RDONLY && (flags & O_ACCMODE) != O_RDWR) 729 return (EINVAL); 730 731 if ((flags & ~(O_ACCMODE | O_CREAT | O_EXCL | O_TRUNC | O_CLOEXEC)) != 0) 732 return (EINVAL); 733 734 /* 735 * Currently only F_SEAL_SEAL may be set when creating or opening shmfd. 736 * If the decision is made later to allow additional seals, care must be 737 * taken below to ensure that the seals are properly set if the shmfd 738 * already existed -- this currently assumes that only F_SEAL_SEAL can 739 * be set and doesn't take further precautions to ensure the validity of 740 * the seals being added with respect to current mappings. 741 */ 742 if ((initial_seals & ~F_SEAL_SEAL) != 0) 743 return (EINVAL); 744 745 fdp = td->td_proc->p_fd; 746 cmode = (mode & ~fdp->fd_cmask) & ACCESSPERMS; 747 748 /* 749 * shm_open(2) created shm should always have O_CLOEXEC set, as mandated 750 * by POSIX. We allow it to be unset here so that an in-kernel 751 * interface may be written as a thin layer around shm, optionally not 752 * setting CLOEXEC. For shm_open(2), O_CLOEXEC is set unconditionally 753 * in sys_shm_open() to keep this implementation compliant. 754 */ 755 error = falloc_caps(td, &fp, &fd, flags & O_CLOEXEC, fcaps); 756 if (error) 757 return (error); 758 759 /* A SHM_ANON path pointer creates an anonymous object. */ 760 if (userpath == SHM_ANON) { 761 /* A read-only anonymous object is pointless. */ 762 if ((flags & O_ACCMODE) == O_RDONLY) { 763 fdclose(td, fp, fd); 764 fdrop(fp, td); 765 return (EINVAL); 766 } 767 shmfd = shm_alloc(td->td_ucred, cmode); 768 shmfd->shm_seals = initial_seals; 769 } else { 770 path = malloc(MAXPATHLEN, M_SHMFD, M_WAITOK); 771 pr_path = td->td_ucred->cr_prison->pr_path; 772 773 /* Construct a full pathname for jailed callers. */ 774 pr_pathlen = strcmp(pr_path, "/") == 0 ? 0 775 : strlcpy(path, pr_path, MAXPATHLEN); 776 error = copyinstr(userpath, path + pr_pathlen, 777 MAXPATHLEN - pr_pathlen, NULL); 778 #ifdef KTRACE 779 if (error == 0 && KTRPOINT(curthread, KTR_NAMEI)) 780 ktrnamei(path); 781 #endif 782 /* Require paths to start with a '/' character. */ 783 if (error == 0 && path[pr_pathlen] != '/') 784 error = EINVAL; 785 if (error) { 786 fdclose(td, fp, fd); 787 fdrop(fp, td); 788 free(path, M_SHMFD); 789 return (error); 790 } 791 792 AUDIT_ARG_UPATH1_CANON(path); 793 fnv = fnv_32_str(path, FNV1_32_INIT); 794 sx_xlock(&shm_dict_lock); 795 shmfd = shm_lookup(path, fnv); 796 if (shmfd == NULL) { 797 /* Object does not yet exist, create it if requested. */ 798 if (flags & O_CREAT) { 799 #ifdef MAC 800 error = mac_posixshm_check_create(td->td_ucred, 801 path); 802 if (error == 0) { 803 #endif 804 shmfd = shm_alloc(td->td_ucred, cmode); 805 shmfd->shm_seals = initial_seals; 806 shm_insert(path, fnv, shmfd); 807 #ifdef MAC 808 } 809 #endif 810 } else { 811 free(path, M_SHMFD); 812 error = ENOENT; 813 } 814 } else { 815 rl_cookie = rangelock_wlock(&shmfd->shm_rl, 0, OFF_MAX, 816 &shmfd->shm_mtx); 817 818 /* 819 * kern_shm_open() likely shouldn't ever error out on 820 * trying to set a seal that already exists, unlike 821 * F_ADD_SEALS. This would break terribly as 822 * shm_open(2) actually sets F_SEAL_SEAL to maintain 823 * historical behavior where the underlying file could 824 * not be sealed. 825 */ 826 initial_seals &= ~shmfd->shm_seals; 827 828 /* 829 * Object already exists, obtain a new 830 * reference if requested and permitted. 831 */ 832 free(path, M_SHMFD); 833 834 /* 835 * initial_seals can't set additional seals if we've 836 * already been set F_SEAL_SEAL. If F_SEAL_SEAL is set, 837 * then we've already removed that one from 838 * initial_seals. This is currently redundant as we 839 * only allow setting F_SEAL_SEAL at creation time, but 840 * it's cheap to check and decreases the effort required 841 * to allow additional seals. 842 */ 843 if ((shmfd->shm_seals & F_SEAL_SEAL) != 0 && 844 initial_seals != 0) 845 error = EPERM; 846 else if ((flags & (O_CREAT | O_EXCL)) == 847 (O_CREAT | O_EXCL)) 848 error = EEXIST; 849 else { 850 #ifdef MAC 851 error = mac_posixshm_check_open(td->td_ucred, 852 shmfd, FFLAGS(flags & O_ACCMODE)); 853 if (error == 0) 854 #endif 855 error = shm_access(shmfd, td->td_ucred, 856 FFLAGS(flags & O_ACCMODE)); 857 } 858 859 /* 860 * Truncate the file back to zero length if 861 * O_TRUNC was specified and the object was 862 * opened with read/write. 863 */ 864 if (error == 0 && 865 (flags & (O_ACCMODE | O_TRUNC)) == 866 (O_RDWR | O_TRUNC)) { 867 VM_OBJECT_WLOCK(shmfd->shm_object); 868 #ifdef MAC 869 error = mac_posixshm_check_truncate( 870 td->td_ucred, fp->f_cred, shmfd); 871 if (error == 0) 872 #endif 873 error = shm_dotruncate_locked(shmfd, 0, 874 rl_cookie); 875 VM_OBJECT_WUNLOCK(shmfd->shm_object); 876 } 877 if (error == 0) { 878 /* 879 * Currently we only allow F_SEAL_SEAL to be 880 * set initially. As noted above, this would 881 * need to be reworked should that change. 882 */ 883 shmfd->shm_seals |= initial_seals; 884 shm_hold(shmfd); 885 } 886 rangelock_unlock(&shmfd->shm_rl, rl_cookie, 887 &shmfd->shm_mtx); 888 } 889 sx_xunlock(&shm_dict_lock); 890 891 if (error) { 892 fdclose(td, fp, fd); 893 fdrop(fp, td); 894 return (error); 895 } 896 } 897 898 finit(fp, FFLAGS(flags & O_ACCMODE), DTYPE_SHM, shmfd, &shm_ops); 899 900 td->td_retval[0] = fd; 901 fdrop(fp, td); 902 903 return (0); 904 } 905 906 /* System calls. */ 907 #ifdef COMPAT_FREEBSD12 908 int 909 freebsd12_shm_open(struct thread *td, struct freebsd12_shm_open_args *uap) 910 { 911 912 return (kern_shm_open(td, uap->path, uap->flags | O_CLOEXEC, uap->mode, 913 NULL, F_SEAL_SEAL)); 914 } 915 #endif 916 917 int 918 sys_shm_unlink(struct thread *td, struct shm_unlink_args *uap) 919 { 920 char *path; 921 const char *pr_path; 922 size_t pr_pathlen; 923 Fnv32_t fnv; 924 int error; 925 926 path = malloc(MAXPATHLEN, M_TEMP, M_WAITOK); 927 pr_path = td->td_ucred->cr_prison->pr_path; 928 pr_pathlen = strcmp(pr_path, "/") == 0 ? 0 929 : strlcpy(path, pr_path, MAXPATHLEN); 930 error = copyinstr(uap->path, path + pr_pathlen, MAXPATHLEN - pr_pathlen, 931 NULL); 932 if (error) { 933 free(path, M_TEMP); 934 return (error); 935 } 936 #ifdef KTRACE 937 if (KTRPOINT(curthread, KTR_NAMEI)) 938 ktrnamei(path); 939 #endif 940 AUDIT_ARG_UPATH1_CANON(path); 941 fnv = fnv_32_str(path, FNV1_32_INIT); 942 sx_xlock(&shm_dict_lock); 943 error = shm_remove(path, fnv, td->td_ucred); 944 sx_xunlock(&shm_dict_lock); 945 free(path, M_TEMP); 946 947 return (error); 948 } 949 950 int 951 shm_mmap(struct file *fp, vm_map_t map, vm_offset_t *addr, vm_size_t objsize, 952 vm_prot_t prot, vm_prot_t cap_maxprot, int flags, 953 vm_ooffset_t foff, struct thread *td) 954 { 955 struct shmfd *shmfd; 956 vm_prot_t maxprot; 957 int error; 958 bool writecnt; 959 void *rl_cookie; 960 961 shmfd = fp->f_data; 962 maxprot = VM_PROT_NONE; 963 964 rl_cookie = rangelock_rlock(&shmfd->shm_rl, 0, objsize, 965 &shmfd->shm_mtx); 966 /* FREAD should always be set. */ 967 if ((fp->f_flag & FREAD) != 0) 968 maxprot |= VM_PROT_EXECUTE | VM_PROT_READ; 969 if ((fp->f_flag & FWRITE) != 0) 970 maxprot |= VM_PROT_WRITE; 971 972 writecnt = (flags & MAP_SHARED) != 0 && (prot & VM_PROT_WRITE) != 0; 973 974 if (writecnt && (shmfd->shm_seals & F_SEAL_WRITE) != 0) { 975 error = EPERM; 976 goto out; 977 } 978 979 /* Don't permit shared writable mappings on read-only descriptors. */ 980 if (writecnt && (maxprot & VM_PROT_WRITE) == 0) { 981 error = EACCES; 982 goto out; 983 } 984 maxprot &= cap_maxprot; 985 986 /* See comment in vn_mmap(). */ 987 if ( 988 #ifdef _LP64 989 objsize > OFF_MAX || 990 #endif 991 foff < 0 || foff > OFF_MAX - objsize) { 992 error = EINVAL; 993 goto out; 994 } 995 996 #ifdef MAC 997 error = mac_posixshm_check_mmap(td->td_ucred, shmfd, prot, flags); 998 if (error != 0) 999 goto out; 1000 #endif 1001 1002 mtx_lock(&shm_timestamp_lock); 1003 vfs_timestamp(&shmfd->shm_atime); 1004 mtx_unlock(&shm_timestamp_lock); 1005 vm_object_reference(shmfd->shm_object); 1006 1007 if (writecnt) 1008 vm_pager_update_writecount(shmfd->shm_object, 0, objsize); 1009 error = vm_mmap_object(map, addr, objsize, prot, maxprot, flags, 1010 shmfd->shm_object, foff, writecnt, td); 1011 if (error != 0) { 1012 if (writecnt) 1013 vm_pager_release_writecount(shmfd->shm_object, 0, 1014 objsize); 1015 vm_object_deallocate(shmfd->shm_object); 1016 } 1017 out: 1018 rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx); 1019 return (error); 1020 } 1021 1022 static int 1023 shm_chmod(struct file *fp, mode_t mode, struct ucred *active_cred, 1024 struct thread *td) 1025 { 1026 struct shmfd *shmfd; 1027 int error; 1028 1029 error = 0; 1030 shmfd = fp->f_data; 1031 mtx_lock(&shm_timestamp_lock); 1032 /* 1033 * SUSv4 says that x bits of permission need not be affected. 1034 * Be consistent with our shm_open there. 1035 */ 1036 #ifdef MAC 1037 error = mac_posixshm_check_setmode(active_cred, shmfd, mode); 1038 if (error != 0) 1039 goto out; 1040 #endif 1041 error = vaccess(VREG, shmfd->shm_mode, shmfd->shm_uid, 1042 shmfd->shm_gid, VADMIN, active_cred, NULL); 1043 if (error != 0) 1044 goto out; 1045 shmfd->shm_mode = mode & ACCESSPERMS; 1046 out: 1047 mtx_unlock(&shm_timestamp_lock); 1048 return (error); 1049 } 1050 1051 static int 1052 shm_chown(struct file *fp, uid_t uid, gid_t gid, struct ucred *active_cred, 1053 struct thread *td) 1054 { 1055 struct shmfd *shmfd; 1056 int error; 1057 1058 error = 0; 1059 shmfd = fp->f_data; 1060 mtx_lock(&shm_timestamp_lock); 1061 #ifdef MAC 1062 error = mac_posixshm_check_setowner(active_cred, shmfd, uid, gid); 1063 if (error != 0) 1064 goto out; 1065 #endif 1066 if (uid == (uid_t)-1) 1067 uid = shmfd->shm_uid; 1068 if (gid == (gid_t)-1) 1069 gid = shmfd->shm_gid; 1070 if (((uid != shmfd->shm_uid && uid != active_cred->cr_uid) || 1071 (gid != shmfd->shm_gid && !groupmember(gid, active_cred))) && 1072 (error = priv_check_cred(active_cred, PRIV_VFS_CHOWN))) 1073 goto out; 1074 shmfd->shm_uid = uid; 1075 shmfd->shm_gid = gid; 1076 out: 1077 mtx_unlock(&shm_timestamp_lock); 1078 return (error); 1079 } 1080 1081 /* 1082 * Helper routines to allow the backing object of a shared memory file 1083 * descriptor to be mapped in the kernel. 1084 */ 1085 int 1086 shm_map(struct file *fp, size_t size, off_t offset, void **memp) 1087 { 1088 struct shmfd *shmfd; 1089 vm_offset_t kva, ofs; 1090 vm_object_t obj; 1091 int rv; 1092 1093 if (fp->f_type != DTYPE_SHM) 1094 return (EINVAL); 1095 shmfd = fp->f_data; 1096 obj = shmfd->shm_object; 1097 VM_OBJECT_WLOCK(obj); 1098 /* 1099 * XXXRW: This validation is probably insufficient, and subject to 1100 * sign errors. It should be fixed. 1101 */ 1102 if (offset >= shmfd->shm_size || 1103 offset + size > round_page(shmfd->shm_size)) { 1104 VM_OBJECT_WUNLOCK(obj); 1105 return (EINVAL); 1106 } 1107 1108 shmfd->shm_kmappings++; 1109 vm_object_reference_locked(obj); 1110 VM_OBJECT_WUNLOCK(obj); 1111 1112 /* Map the object into the kernel_map and wire it. */ 1113 kva = vm_map_min(kernel_map); 1114 ofs = offset & PAGE_MASK; 1115 offset = trunc_page(offset); 1116 size = round_page(size + ofs); 1117 rv = vm_map_find(kernel_map, obj, offset, &kva, size, 0, 1118 VMFS_OPTIMAL_SPACE, VM_PROT_READ | VM_PROT_WRITE, 1119 VM_PROT_READ | VM_PROT_WRITE, 0); 1120 if (rv == KERN_SUCCESS) { 1121 rv = vm_map_wire(kernel_map, kva, kva + size, 1122 VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES); 1123 if (rv == KERN_SUCCESS) { 1124 *memp = (void *)(kva + ofs); 1125 return (0); 1126 } 1127 vm_map_remove(kernel_map, kva, kva + size); 1128 } else 1129 vm_object_deallocate(obj); 1130 1131 /* On failure, drop our mapping reference. */ 1132 VM_OBJECT_WLOCK(obj); 1133 shmfd->shm_kmappings--; 1134 VM_OBJECT_WUNLOCK(obj); 1135 1136 return (vm_mmap_to_errno(rv)); 1137 } 1138 1139 /* 1140 * We require the caller to unmap the entire entry. This allows us to 1141 * safely decrement shm_kmappings when a mapping is removed. 1142 */ 1143 int 1144 shm_unmap(struct file *fp, void *mem, size_t size) 1145 { 1146 struct shmfd *shmfd; 1147 vm_map_entry_t entry; 1148 vm_offset_t kva, ofs; 1149 vm_object_t obj; 1150 vm_pindex_t pindex; 1151 vm_prot_t prot; 1152 boolean_t wired; 1153 vm_map_t map; 1154 int rv; 1155 1156 if (fp->f_type != DTYPE_SHM) 1157 return (EINVAL); 1158 shmfd = fp->f_data; 1159 kva = (vm_offset_t)mem; 1160 ofs = kva & PAGE_MASK; 1161 kva = trunc_page(kva); 1162 size = round_page(size + ofs); 1163 map = kernel_map; 1164 rv = vm_map_lookup(&map, kva, VM_PROT_READ | VM_PROT_WRITE, &entry, 1165 &obj, &pindex, &prot, &wired); 1166 if (rv != KERN_SUCCESS) 1167 return (EINVAL); 1168 if (entry->start != kva || entry->end != kva + size) { 1169 vm_map_lookup_done(map, entry); 1170 return (EINVAL); 1171 } 1172 vm_map_lookup_done(map, entry); 1173 if (obj != shmfd->shm_object) 1174 return (EINVAL); 1175 vm_map_remove(map, kva, kva + size); 1176 VM_OBJECT_WLOCK(obj); 1177 KASSERT(shmfd->shm_kmappings > 0, ("shm_unmap: object not mapped")); 1178 shmfd->shm_kmappings--; 1179 VM_OBJECT_WUNLOCK(obj); 1180 return (0); 1181 } 1182 1183 static int 1184 shm_fill_kinfo_locked(struct shmfd *shmfd, struct kinfo_file *kif, bool list) 1185 { 1186 const char *path, *pr_path; 1187 size_t pr_pathlen; 1188 bool visible; 1189 1190 sx_assert(&shm_dict_lock, SA_LOCKED); 1191 kif->kf_type = KF_TYPE_SHM; 1192 kif->kf_un.kf_file.kf_file_mode = S_IFREG | shmfd->shm_mode; 1193 kif->kf_un.kf_file.kf_file_size = shmfd->shm_size; 1194 if (shmfd->shm_path != NULL) { 1195 if (shmfd->shm_path != NULL) { 1196 path = shmfd->shm_path; 1197 pr_path = curthread->td_ucred->cr_prison->pr_path; 1198 if (strcmp(pr_path, "/") != 0) { 1199 /* Return the jail-rooted pathname. */ 1200 pr_pathlen = strlen(pr_path); 1201 visible = strncmp(path, pr_path, pr_pathlen) 1202 == 0 && path[pr_pathlen] == '/'; 1203 if (list && !visible) 1204 return (EPERM); 1205 if (visible) 1206 path += pr_pathlen; 1207 } 1208 strlcpy(kif->kf_path, path, sizeof(kif->kf_path)); 1209 } 1210 } 1211 return (0); 1212 } 1213 1214 static int 1215 shm_fill_kinfo(struct file *fp, struct kinfo_file *kif, 1216 struct filedesc *fdp __unused) 1217 { 1218 int res; 1219 1220 sx_slock(&shm_dict_lock); 1221 res = shm_fill_kinfo_locked(fp->f_data, kif, false); 1222 sx_sunlock(&shm_dict_lock); 1223 return (res); 1224 } 1225 1226 static int 1227 shm_add_seals(struct file *fp, int seals) 1228 { 1229 struct shmfd *shmfd; 1230 void *rl_cookie; 1231 vm_ooffset_t writemappings; 1232 int error, nseals; 1233 1234 error = 0; 1235 shmfd = fp->f_data; 1236 rl_cookie = rangelock_wlock(&shmfd->shm_rl, 0, OFF_MAX, 1237 &shmfd->shm_mtx); 1238 1239 /* Even already-set seals should result in EPERM. */ 1240 if ((shmfd->shm_seals & F_SEAL_SEAL) != 0) { 1241 error = EPERM; 1242 goto out; 1243 } 1244 nseals = seals & ~shmfd->shm_seals; 1245 if ((nseals & F_SEAL_WRITE) != 0) { 1246 /* 1247 * The rangelock above prevents writable mappings from being 1248 * added after we've started applying seals. The RLOCK here 1249 * is to avoid torn reads on ILP32 arches as unmapping/reducing 1250 * writemappings will be done without a rangelock. 1251 */ 1252 VM_OBJECT_RLOCK(shmfd->shm_object); 1253 writemappings = shmfd->shm_object->un_pager.swp.writemappings; 1254 VM_OBJECT_RUNLOCK(shmfd->shm_object); 1255 /* kmappings are also writable */ 1256 if (writemappings > 0) { 1257 error = EBUSY; 1258 goto out; 1259 } 1260 } 1261 shmfd->shm_seals |= nseals; 1262 out: 1263 rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx); 1264 return (error); 1265 } 1266 1267 static int 1268 shm_get_seals(struct file *fp, int *seals) 1269 { 1270 struct shmfd *shmfd; 1271 1272 shmfd = fp->f_data; 1273 *seals = shmfd->shm_seals; 1274 return (0); 1275 } 1276 1277 static int 1278 sysctl_posix_shm_list(SYSCTL_HANDLER_ARGS) 1279 { 1280 struct shm_mapping *shmm; 1281 struct sbuf sb; 1282 struct kinfo_file kif; 1283 u_long i; 1284 ssize_t curlen; 1285 int error, error2; 1286 1287 sbuf_new_for_sysctl(&sb, NULL, sizeof(struct kinfo_file) * 5, req); 1288 sbuf_clear_flags(&sb, SBUF_INCLUDENUL); 1289 curlen = 0; 1290 error = 0; 1291 sx_slock(&shm_dict_lock); 1292 for (i = 0; i < shm_hash + 1; i++) { 1293 LIST_FOREACH(shmm, &shm_dictionary[i], sm_link) { 1294 error = shm_fill_kinfo_locked(shmm->sm_shmfd, 1295 &kif, true); 1296 if (error == EPERM) 1297 continue; 1298 if (error != 0) 1299 break; 1300 pack_kinfo(&kif); 1301 if (req->oldptr != NULL && 1302 kif.kf_structsize + curlen > req->oldlen) 1303 break; 1304 error = sbuf_bcat(&sb, &kif, kif.kf_structsize) == 0 ? 1305 0 : ENOMEM; 1306 if (error != 0) 1307 break; 1308 curlen += kif.kf_structsize; 1309 } 1310 } 1311 sx_sunlock(&shm_dict_lock); 1312 error2 = sbuf_finish(&sb); 1313 sbuf_delete(&sb); 1314 return (error != 0 ? error : error2); 1315 } 1316 1317 SYSCTL_PROC(_kern_ipc, OID_AUTO, posix_shm_list, 1318 CTLFLAG_RD | CTLFLAG_MPSAFE | CTLTYPE_OPAQUE, 1319 NULL, 0, sysctl_posix_shm_list, "", 1320 "POSIX SHM list"); 1321 1322 int 1323 kern_shm_open2(struct thread *td, const char *path, int flags, mode_t mode, 1324 int shmflags, const char *name __unused) 1325 { 1326 int initial_seals; 1327 1328 if ((shmflags & ~SHM_ALLOW_SEALING) != 0) 1329 return (EINVAL); 1330 1331 initial_seals = F_SEAL_SEAL; 1332 if ((shmflags & SHM_ALLOW_SEALING) != 0) 1333 initial_seals &= ~F_SEAL_SEAL; 1334 return (kern_shm_open(td, path, flags, 0, NULL, initial_seals)); 1335 } 1336 1337 /* 1338 * This version of the shm_open() interface leaves CLOEXEC behavior up to the 1339 * caller, and libc will enforce it for the traditional shm_open() call. This 1340 * allows other consumers, like memfd_create(), to opt-in for CLOEXEC. This 1341 * interface also includes a 'name' argument that is currently unused, but could 1342 * potentially be exported later via some interface for debugging purposes. 1343 * From the kernel's perspective, it is optional. Individual consumers like 1344 * memfd_create() may require it in order to be compatible with other systems 1345 * implementing the same function. 1346 */ 1347 int 1348 sys_shm_open2(struct thread *td, struct shm_open2_args *uap) 1349 { 1350 1351 return (kern_shm_open2(td, uap->path, uap->flags, uap->mode, 1352 uap->shmflags, uap->name)); 1353 } 1354