1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2006, 2011, 2016-2017 Robert N. M. Watson 5 * All rights reserved. 6 * 7 * Portions of this software were developed by BAE Systems, the University of 8 * Cambridge Computer Laboratory, and Memorial University under DARPA/AFRL 9 * contract FA8650-15-C-7558 ("CADETS"), as part of the DARPA Transparent 10 * Computing (TC) research program. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 */ 33 34 /* 35 * Support for shared swap-backed anonymous memory objects via 36 * shm_open(2), shm_rename(2), and shm_unlink(2). 37 * While most of the implementation is here, vm_mmap.c contains 38 * mapping logic changes. 39 * 40 * posixshmcontrol(1) allows users to inspect the state of the memory 41 * objects. Per-uid swap resource limit controls total amount of 42 * memory that user can consume for anonymous objects, including 43 * shared. 44 */ 45 46 #include <sys/cdefs.h> 47 __FBSDID("$FreeBSD$"); 48 49 #include "opt_capsicum.h" 50 #include "opt_ktrace.h" 51 52 #include <sys/param.h> 53 #include <sys/capsicum.h> 54 #include <sys/conf.h> 55 #include <sys/fcntl.h> 56 #include <sys/file.h> 57 #include <sys/filedesc.h> 58 #include <sys/filio.h> 59 #include <sys/fnv_hash.h> 60 #include <sys/kernel.h> 61 #include <sys/limits.h> 62 #include <sys/uio.h> 63 #include <sys/signal.h> 64 #include <sys/jail.h> 65 #include <sys/ktrace.h> 66 #include <sys/lock.h> 67 #include <sys/malloc.h> 68 #include <sys/mman.h> 69 #include <sys/mutex.h> 70 #include <sys/priv.h> 71 #include <sys/proc.h> 72 #include <sys/refcount.h> 73 #include <sys/resourcevar.h> 74 #include <sys/rwlock.h> 75 #include <sys/sbuf.h> 76 #include <sys/stat.h> 77 #include <sys/syscallsubr.h> 78 #include <sys/sysctl.h> 79 #include <sys/sysproto.h> 80 #include <sys/systm.h> 81 #include <sys/sx.h> 82 #include <sys/time.h> 83 #include <sys/vnode.h> 84 #include <sys/unistd.h> 85 #include <sys/user.h> 86 87 #include <security/audit/audit.h> 88 #include <security/mac/mac_framework.h> 89 90 #include <vm/vm.h> 91 #include <vm/vm_param.h> 92 #include <vm/pmap.h> 93 #include <vm/vm_extern.h> 94 #include <vm/vm_map.h> 95 #include <vm/vm_kern.h> 96 #include <vm/vm_object.h> 97 #include <vm/vm_page.h> 98 #include <vm/vm_pageout.h> 99 #include <vm/vm_pager.h> 100 #include <vm/swap_pager.h> 101 102 struct shm_mapping { 103 char *sm_path; 104 Fnv32_t sm_fnv; 105 struct shmfd *sm_shmfd; 106 LIST_ENTRY(shm_mapping) sm_link; 107 }; 108 109 static MALLOC_DEFINE(M_SHMFD, "shmfd", "shared memory file descriptor"); 110 static LIST_HEAD(, shm_mapping) *shm_dictionary; 111 static struct sx shm_dict_lock; 112 static struct mtx shm_timestamp_lock; 113 static u_long shm_hash; 114 static struct unrhdr64 shm_ino_unr; 115 static dev_t shm_dev_ino; 116 117 #define SHM_HASH(fnv) (&shm_dictionary[(fnv) & shm_hash]) 118 119 static void shm_init(void *arg); 120 static void shm_insert(char *path, Fnv32_t fnv, struct shmfd *shmfd); 121 static struct shmfd *shm_lookup(char *path, Fnv32_t fnv); 122 static int shm_remove(char *path, Fnv32_t fnv, struct ucred *ucred); 123 static int shm_dotruncate_locked(struct shmfd *shmfd, off_t length, 124 void *rl_cookie); 125 static int shm_copyin_path(struct thread *td, const char *userpath_in, 126 char **path_out); 127 128 static fo_rdwr_t shm_read; 129 static fo_rdwr_t shm_write; 130 static fo_truncate_t shm_truncate; 131 static fo_ioctl_t shm_ioctl; 132 static fo_stat_t shm_stat; 133 static fo_close_t shm_close; 134 static fo_chmod_t shm_chmod; 135 static fo_chown_t shm_chown; 136 static fo_seek_t shm_seek; 137 static fo_fill_kinfo_t shm_fill_kinfo; 138 static fo_mmap_t shm_mmap; 139 static fo_get_seals_t shm_get_seals; 140 static fo_add_seals_t shm_add_seals; 141 142 /* File descriptor operations. */ 143 struct fileops shm_ops = { 144 .fo_read = shm_read, 145 .fo_write = shm_write, 146 .fo_truncate = shm_truncate, 147 .fo_ioctl = shm_ioctl, 148 .fo_poll = invfo_poll, 149 .fo_kqfilter = invfo_kqfilter, 150 .fo_stat = shm_stat, 151 .fo_close = shm_close, 152 .fo_chmod = shm_chmod, 153 .fo_chown = shm_chown, 154 .fo_sendfile = vn_sendfile, 155 .fo_seek = shm_seek, 156 .fo_fill_kinfo = shm_fill_kinfo, 157 .fo_mmap = shm_mmap, 158 .fo_get_seals = shm_get_seals, 159 .fo_add_seals = shm_add_seals, 160 .fo_flags = DFLAG_PASSABLE | DFLAG_SEEKABLE 161 }; 162 163 FEATURE(posix_shm, "POSIX shared memory"); 164 165 static int 166 uiomove_object_page(vm_object_t obj, size_t len, struct uio *uio) 167 { 168 vm_page_t m; 169 vm_pindex_t idx; 170 size_t tlen; 171 int error, offset, rv; 172 173 idx = OFF_TO_IDX(uio->uio_offset); 174 offset = uio->uio_offset & PAGE_MASK; 175 tlen = MIN(PAGE_SIZE - offset, len); 176 177 VM_OBJECT_WLOCK(obj); 178 179 /* 180 * Read I/O without either a corresponding resident page or swap 181 * page: use zero_region. This is intended to avoid instantiating 182 * pages on read from a sparse region. 183 */ 184 if (uio->uio_rw == UIO_READ && vm_page_lookup(obj, idx) == NULL && 185 !vm_pager_has_page(obj, idx, NULL, NULL)) { 186 VM_OBJECT_WUNLOCK(obj); 187 return (uiomove(__DECONST(void *, zero_region), tlen, uio)); 188 } 189 190 /* 191 * Parallel reads of the page content from disk are prevented 192 * by exclusive busy. 193 * 194 * Although the tmpfs vnode lock is held here, it is 195 * nonetheless safe to sleep waiting for a free page. The 196 * pageout daemon does not need to acquire the tmpfs vnode 197 * lock to page out tobj's pages because tobj is a OBJT_SWAP 198 * type object. 199 */ 200 rv = vm_page_grab_valid(&m, obj, idx, 201 VM_ALLOC_NORMAL | VM_ALLOC_SBUSY | VM_ALLOC_IGN_SBUSY); 202 if (rv != VM_PAGER_OK) { 203 VM_OBJECT_WUNLOCK(obj); 204 printf("uiomove_object: vm_obj %p idx %jd pager error %d\n", 205 obj, idx, rv); 206 return (EIO); 207 } 208 VM_OBJECT_WUNLOCK(obj); 209 error = uiomove_fromphys(&m, offset, tlen, uio); 210 if (uio->uio_rw == UIO_WRITE && error == 0) 211 vm_page_set_dirty(m); 212 vm_page_lock(m); 213 vm_page_activate(m); 214 vm_page_unlock(m); 215 vm_page_sunbusy(m); 216 217 return (error); 218 } 219 220 int 221 uiomove_object(vm_object_t obj, off_t obj_size, struct uio *uio) 222 { 223 ssize_t resid; 224 size_t len; 225 int error; 226 227 error = 0; 228 while ((resid = uio->uio_resid) > 0) { 229 if (obj_size <= uio->uio_offset) 230 break; 231 len = MIN(obj_size - uio->uio_offset, resid); 232 if (len == 0) 233 break; 234 error = uiomove_object_page(obj, len, uio); 235 if (error != 0 || resid == uio->uio_resid) 236 break; 237 } 238 return (error); 239 } 240 241 static int 242 shm_seek(struct file *fp, off_t offset, int whence, struct thread *td) 243 { 244 struct shmfd *shmfd; 245 off_t foffset; 246 int error; 247 248 shmfd = fp->f_data; 249 foffset = foffset_lock(fp, 0); 250 error = 0; 251 switch (whence) { 252 case L_INCR: 253 if (foffset < 0 || 254 (offset > 0 && foffset > OFF_MAX - offset)) { 255 error = EOVERFLOW; 256 break; 257 } 258 offset += foffset; 259 break; 260 case L_XTND: 261 if (offset > 0 && shmfd->shm_size > OFF_MAX - offset) { 262 error = EOVERFLOW; 263 break; 264 } 265 offset += shmfd->shm_size; 266 break; 267 case L_SET: 268 break; 269 default: 270 error = EINVAL; 271 } 272 if (error == 0) { 273 if (offset < 0 || offset > shmfd->shm_size) 274 error = EINVAL; 275 else 276 td->td_uretoff.tdu_off = offset; 277 } 278 foffset_unlock(fp, offset, error != 0 ? FOF_NOUPDATE : 0); 279 return (error); 280 } 281 282 static int 283 shm_read(struct file *fp, struct uio *uio, struct ucred *active_cred, 284 int flags, struct thread *td) 285 { 286 struct shmfd *shmfd; 287 void *rl_cookie; 288 int error; 289 290 shmfd = fp->f_data; 291 #ifdef MAC 292 error = mac_posixshm_check_read(active_cred, fp->f_cred, shmfd); 293 if (error) 294 return (error); 295 #endif 296 foffset_lock_uio(fp, uio, flags); 297 rl_cookie = rangelock_rlock(&shmfd->shm_rl, uio->uio_offset, 298 uio->uio_offset + uio->uio_resid, &shmfd->shm_mtx); 299 error = uiomove_object(shmfd->shm_object, shmfd->shm_size, uio); 300 rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx); 301 foffset_unlock_uio(fp, uio, flags); 302 return (error); 303 } 304 305 static int 306 shm_write(struct file *fp, struct uio *uio, struct ucred *active_cred, 307 int flags, struct thread *td) 308 { 309 struct shmfd *shmfd; 310 void *rl_cookie; 311 int error; 312 313 shmfd = fp->f_data; 314 #ifdef MAC 315 error = mac_posixshm_check_write(active_cred, fp->f_cred, shmfd); 316 if (error) 317 return (error); 318 #endif 319 foffset_lock_uio(fp, uio, flags); 320 if ((flags & FOF_OFFSET) == 0) { 321 rl_cookie = rangelock_wlock(&shmfd->shm_rl, 0, OFF_MAX, 322 &shmfd->shm_mtx); 323 } else { 324 rl_cookie = rangelock_wlock(&shmfd->shm_rl, uio->uio_offset, 325 uio->uio_offset + uio->uio_resid, &shmfd->shm_mtx); 326 } 327 if ((shmfd->shm_seals & F_SEAL_WRITE) != 0) 328 error = EPERM; 329 else 330 error = uiomove_object(shmfd->shm_object, shmfd->shm_size, uio); 331 rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx); 332 foffset_unlock_uio(fp, uio, flags); 333 return (error); 334 } 335 336 static int 337 shm_truncate(struct file *fp, off_t length, struct ucred *active_cred, 338 struct thread *td) 339 { 340 struct shmfd *shmfd; 341 #ifdef MAC 342 int error; 343 #endif 344 345 shmfd = fp->f_data; 346 #ifdef MAC 347 error = mac_posixshm_check_truncate(active_cred, fp->f_cred, shmfd); 348 if (error) 349 return (error); 350 #endif 351 return (shm_dotruncate(shmfd, length)); 352 } 353 354 int 355 shm_ioctl(struct file *fp, u_long com, void *data, struct ucred *active_cred, 356 struct thread *td) 357 { 358 359 switch (com) { 360 case FIONBIO: 361 case FIOASYNC: 362 /* 363 * Allow fcntl(fd, F_SETFL, O_NONBLOCK) to work, 364 * just like it would on an unlinked regular file 365 */ 366 return (0); 367 default: 368 return (ENOTTY); 369 } 370 } 371 372 static int 373 shm_stat(struct file *fp, struct stat *sb, struct ucred *active_cred, 374 struct thread *td) 375 { 376 struct shmfd *shmfd; 377 #ifdef MAC 378 int error; 379 #endif 380 381 shmfd = fp->f_data; 382 383 #ifdef MAC 384 error = mac_posixshm_check_stat(active_cred, fp->f_cred, shmfd); 385 if (error) 386 return (error); 387 #endif 388 389 /* 390 * Attempt to return sanish values for fstat() on a memory file 391 * descriptor. 392 */ 393 bzero(sb, sizeof(*sb)); 394 sb->st_blksize = PAGE_SIZE; 395 sb->st_size = shmfd->shm_size; 396 sb->st_blocks = howmany(sb->st_size, sb->st_blksize); 397 mtx_lock(&shm_timestamp_lock); 398 sb->st_atim = shmfd->shm_atime; 399 sb->st_ctim = shmfd->shm_ctime; 400 sb->st_mtim = shmfd->shm_mtime; 401 sb->st_birthtim = shmfd->shm_birthtime; 402 sb->st_mode = S_IFREG | shmfd->shm_mode; /* XXX */ 403 sb->st_uid = shmfd->shm_uid; 404 sb->st_gid = shmfd->shm_gid; 405 mtx_unlock(&shm_timestamp_lock); 406 sb->st_dev = shm_dev_ino; 407 sb->st_ino = shmfd->shm_ino; 408 sb->st_nlink = shmfd->shm_object->ref_count; 409 410 return (0); 411 } 412 413 static int 414 shm_close(struct file *fp, struct thread *td) 415 { 416 struct shmfd *shmfd; 417 418 shmfd = fp->f_data; 419 fp->f_data = NULL; 420 shm_drop(shmfd); 421 422 return (0); 423 } 424 425 static int 426 shm_copyin_path(struct thread *td, const char *userpath_in, char **path_out) { 427 int error; 428 char *path; 429 const char *pr_path; 430 size_t pr_pathlen; 431 432 path = malloc(MAXPATHLEN, M_SHMFD, M_WAITOK); 433 pr_path = td->td_ucred->cr_prison->pr_path; 434 435 /* Construct a full pathname for jailed callers. */ 436 pr_pathlen = strcmp(pr_path, "/") == 437 0 ? 0 : strlcpy(path, pr_path, MAXPATHLEN); 438 error = copyinstr(userpath_in, path + pr_pathlen, 439 MAXPATHLEN - pr_pathlen, NULL); 440 if (error != 0) 441 goto out; 442 443 #ifdef KTRACE 444 if (KTRPOINT(curthread, KTR_NAMEI)) 445 ktrnamei(path); 446 #endif 447 448 /* Require paths to start with a '/' character. */ 449 if (path[pr_pathlen] != '/') { 450 error = EINVAL; 451 goto out; 452 } 453 454 *path_out = path; 455 456 out: 457 if (error != 0) 458 free(path, M_SHMFD); 459 460 return (error); 461 } 462 463 static int 464 shm_dotruncate_locked(struct shmfd *shmfd, off_t length, void *rl_cookie) 465 { 466 vm_object_t object; 467 vm_page_t m; 468 vm_pindex_t idx, nobjsize; 469 vm_ooffset_t delta; 470 int base, rv; 471 472 KASSERT(length >= 0, ("shm_dotruncate: length < 0")); 473 object = shmfd->shm_object; 474 VM_OBJECT_ASSERT_WLOCKED(object); 475 rangelock_cookie_assert(rl_cookie, RA_WLOCKED); 476 if (length == shmfd->shm_size) 477 return (0); 478 nobjsize = OFF_TO_IDX(length + PAGE_MASK); 479 480 /* Are we shrinking? If so, trim the end. */ 481 if (length < shmfd->shm_size) { 482 if ((shmfd->shm_seals & F_SEAL_SHRINK) != 0) 483 return (EPERM); 484 485 /* 486 * Disallow any requests to shrink the size if this 487 * object is mapped into the kernel. 488 */ 489 if (shmfd->shm_kmappings > 0) 490 return (EBUSY); 491 492 /* 493 * Zero the truncated part of the last page. 494 */ 495 base = length & PAGE_MASK; 496 if (base != 0) { 497 idx = OFF_TO_IDX(length); 498 retry: 499 m = vm_page_grab(object, idx, VM_ALLOC_NOCREAT); 500 if (m != NULL) { 501 MPASS(vm_page_all_valid(m)); 502 } else if (vm_pager_has_page(object, idx, NULL, NULL)) { 503 m = vm_page_alloc(object, idx, 504 VM_ALLOC_NORMAL | VM_ALLOC_WAITFAIL); 505 if (m == NULL) 506 goto retry; 507 rv = vm_pager_get_pages(object, &m, 1, NULL, 508 NULL); 509 if (rv == VM_PAGER_OK) { 510 /* 511 * Since the page was not resident, 512 * and therefore not recently 513 * accessed, immediately enqueue it 514 * for asynchronous laundering. The 515 * current operation is not regarded 516 * as an access. 517 */ 518 vm_page_launder(m); 519 } else { 520 vm_page_free(m); 521 VM_OBJECT_WUNLOCK(object); 522 return (EIO); 523 } 524 } 525 if (m != NULL) { 526 pmap_zero_page_area(m, base, PAGE_SIZE - base); 527 KASSERT(vm_page_all_valid(m), 528 ("shm_dotruncate: page %p is invalid", m)); 529 vm_page_set_dirty(m); 530 vm_page_xunbusy(m); 531 } 532 } 533 delta = IDX_TO_OFF(object->size - nobjsize); 534 535 /* Toss in memory pages. */ 536 if (nobjsize < object->size) 537 vm_object_page_remove(object, nobjsize, object->size, 538 0); 539 540 /* Toss pages from swap. */ 541 if (object->type == OBJT_SWAP) 542 swap_pager_freespace(object, nobjsize, delta); 543 544 /* Free the swap accounted for shm */ 545 swap_release_by_cred(delta, object->cred); 546 object->charge -= delta; 547 } else { 548 if ((shmfd->shm_seals & F_SEAL_GROW) != 0) 549 return (EPERM); 550 551 /* Try to reserve additional swap space. */ 552 delta = IDX_TO_OFF(nobjsize - object->size); 553 if (!swap_reserve_by_cred(delta, object->cred)) 554 return (ENOMEM); 555 object->charge += delta; 556 } 557 shmfd->shm_size = length; 558 mtx_lock(&shm_timestamp_lock); 559 vfs_timestamp(&shmfd->shm_ctime); 560 shmfd->shm_mtime = shmfd->shm_ctime; 561 mtx_unlock(&shm_timestamp_lock); 562 object->size = nobjsize; 563 return (0); 564 } 565 566 int 567 shm_dotruncate(struct shmfd *shmfd, off_t length) 568 { 569 void *rl_cookie; 570 int error; 571 572 rl_cookie = rangelock_wlock(&shmfd->shm_rl, 0, OFF_MAX, 573 &shmfd->shm_mtx); 574 VM_OBJECT_WLOCK(shmfd->shm_object); 575 error = shm_dotruncate_locked(shmfd, length, rl_cookie); 576 VM_OBJECT_WUNLOCK(shmfd->shm_object); 577 rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx); 578 return (error); 579 } 580 581 /* 582 * shmfd object management including creation and reference counting 583 * routines. 584 */ 585 struct shmfd * 586 shm_alloc(struct ucred *ucred, mode_t mode) 587 { 588 struct shmfd *shmfd; 589 590 shmfd = malloc(sizeof(*shmfd), M_SHMFD, M_WAITOK | M_ZERO); 591 shmfd->shm_size = 0; 592 shmfd->shm_uid = ucred->cr_uid; 593 shmfd->shm_gid = ucred->cr_gid; 594 shmfd->shm_mode = mode; 595 shmfd->shm_object = vm_pager_allocate(OBJT_SWAP, NULL, 596 shmfd->shm_size, VM_PROT_DEFAULT, 0, ucred); 597 KASSERT(shmfd->shm_object != NULL, ("shm_create: vm_pager_allocate")); 598 vfs_timestamp(&shmfd->shm_birthtime); 599 shmfd->shm_atime = shmfd->shm_mtime = shmfd->shm_ctime = 600 shmfd->shm_birthtime; 601 shmfd->shm_ino = alloc_unr64(&shm_ino_unr); 602 refcount_init(&shmfd->shm_refs, 1); 603 mtx_init(&shmfd->shm_mtx, "shmrl", NULL, MTX_DEF); 604 rangelock_init(&shmfd->shm_rl); 605 #ifdef MAC 606 mac_posixshm_init(shmfd); 607 mac_posixshm_create(ucred, shmfd); 608 #endif 609 610 return (shmfd); 611 } 612 613 struct shmfd * 614 shm_hold(struct shmfd *shmfd) 615 { 616 617 refcount_acquire(&shmfd->shm_refs); 618 return (shmfd); 619 } 620 621 void 622 shm_drop(struct shmfd *shmfd) 623 { 624 625 if (refcount_release(&shmfd->shm_refs)) { 626 #ifdef MAC 627 mac_posixshm_destroy(shmfd); 628 #endif 629 rangelock_destroy(&shmfd->shm_rl); 630 mtx_destroy(&shmfd->shm_mtx); 631 vm_object_deallocate(shmfd->shm_object); 632 free(shmfd, M_SHMFD); 633 } 634 } 635 636 /* 637 * Determine if the credentials have sufficient permissions for a 638 * specified combination of FREAD and FWRITE. 639 */ 640 int 641 shm_access(struct shmfd *shmfd, struct ucred *ucred, int flags) 642 { 643 accmode_t accmode; 644 int error; 645 646 accmode = 0; 647 if (flags & FREAD) 648 accmode |= VREAD; 649 if (flags & FWRITE) 650 accmode |= VWRITE; 651 mtx_lock(&shm_timestamp_lock); 652 error = vaccess(VREG, shmfd->shm_mode, shmfd->shm_uid, shmfd->shm_gid, 653 accmode, ucred, NULL); 654 mtx_unlock(&shm_timestamp_lock); 655 return (error); 656 } 657 658 /* 659 * Dictionary management. We maintain an in-kernel dictionary to map 660 * paths to shmfd objects. We use the FNV hash on the path to store 661 * the mappings in a hash table. 662 */ 663 static void 664 shm_init(void *arg) 665 { 666 667 mtx_init(&shm_timestamp_lock, "shm timestamps", NULL, MTX_DEF); 668 sx_init(&shm_dict_lock, "shm dictionary"); 669 shm_dictionary = hashinit(1024, M_SHMFD, &shm_hash); 670 new_unrhdr64(&shm_ino_unr, 1); 671 shm_dev_ino = devfs_alloc_cdp_inode(); 672 KASSERT(shm_dev_ino > 0, ("shm dev inode not initialized")); 673 } 674 SYSINIT(shm_init, SI_SUB_SYSV_SHM, SI_ORDER_ANY, shm_init, NULL); 675 676 static struct shmfd * 677 shm_lookup(char *path, Fnv32_t fnv) 678 { 679 struct shm_mapping *map; 680 681 LIST_FOREACH(map, SHM_HASH(fnv), sm_link) { 682 if (map->sm_fnv != fnv) 683 continue; 684 if (strcmp(map->sm_path, path) == 0) 685 return (map->sm_shmfd); 686 } 687 688 return (NULL); 689 } 690 691 static void 692 shm_insert(char *path, Fnv32_t fnv, struct shmfd *shmfd) 693 { 694 struct shm_mapping *map; 695 696 map = malloc(sizeof(struct shm_mapping), M_SHMFD, M_WAITOK); 697 map->sm_path = path; 698 map->sm_fnv = fnv; 699 map->sm_shmfd = shm_hold(shmfd); 700 shmfd->shm_path = path; 701 LIST_INSERT_HEAD(SHM_HASH(fnv), map, sm_link); 702 } 703 704 static int 705 shm_remove(char *path, Fnv32_t fnv, struct ucred *ucred) 706 { 707 struct shm_mapping *map; 708 int error; 709 710 LIST_FOREACH(map, SHM_HASH(fnv), sm_link) { 711 if (map->sm_fnv != fnv) 712 continue; 713 if (strcmp(map->sm_path, path) == 0) { 714 #ifdef MAC 715 error = mac_posixshm_check_unlink(ucred, map->sm_shmfd); 716 if (error) 717 return (error); 718 #endif 719 error = shm_access(map->sm_shmfd, ucred, 720 FREAD | FWRITE); 721 if (error) 722 return (error); 723 map->sm_shmfd->shm_path = NULL; 724 LIST_REMOVE(map, sm_link); 725 shm_drop(map->sm_shmfd); 726 free(map->sm_path, M_SHMFD); 727 free(map, M_SHMFD); 728 return (0); 729 } 730 } 731 732 return (ENOENT); 733 } 734 735 int 736 kern_shm_open(struct thread *td, const char *userpath, int flags, mode_t mode, 737 struct filecaps *fcaps, int initial_seals) 738 { 739 struct filedesc *fdp; 740 struct shmfd *shmfd; 741 struct file *fp; 742 char *path; 743 void *rl_cookie; 744 Fnv32_t fnv; 745 mode_t cmode; 746 int fd, error; 747 748 #ifdef CAPABILITY_MODE 749 /* 750 * shm_open(2) is only allowed for anonymous objects. 751 */ 752 if (IN_CAPABILITY_MODE(td) && (userpath != SHM_ANON)) 753 return (ECAPMODE); 754 #endif 755 756 AUDIT_ARG_FFLAGS(flags); 757 AUDIT_ARG_MODE(mode); 758 759 if ((flags & O_ACCMODE) != O_RDONLY && (flags & O_ACCMODE) != O_RDWR) 760 return (EINVAL); 761 762 if ((flags & ~(O_ACCMODE | O_CREAT | O_EXCL | O_TRUNC | O_CLOEXEC)) != 0) 763 return (EINVAL); 764 765 /* 766 * Currently only F_SEAL_SEAL may be set when creating or opening shmfd. 767 * If the decision is made later to allow additional seals, care must be 768 * taken below to ensure that the seals are properly set if the shmfd 769 * already existed -- this currently assumes that only F_SEAL_SEAL can 770 * be set and doesn't take further precautions to ensure the validity of 771 * the seals being added with respect to current mappings. 772 */ 773 if ((initial_seals & ~F_SEAL_SEAL) != 0) 774 return (EINVAL); 775 776 fdp = td->td_proc->p_fd; 777 cmode = (mode & ~fdp->fd_cmask) & ACCESSPERMS; 778 779 /* 780 * shm_open(2) created shm should always have O_CLOEXEC set, as mandated 781 * by POSIX. We allow it to be unset here so that an in-kernel 782 * interface may be written as a thin layer around shm, optionally not 783 * setting CLOEXEC. For shm_open(2), O_CLOEXEC is set unconditionally 784 * in sys_shm_open() to keep this implementation compliant. 785 */ 786 error = falloc_caps(td, &fp, &fd, flags & O_CLOEXEC, fcaps); 787 if (error) 788 return (error); 789 790 /* A SHM_ANON path pointer creates an anonymous object. */ 791 if (userpath == SHM_ANON) { 792 /* A read-only anonymous object is pointless. */ 793 if ((flags & O_ACCMODE) == O_RDONLY) { 794 fdclose(td, fp, fd); 795 fdrop(fp, td); 796 return (EINVAL); 797 } 798 shmfd = shm_alloc(td->td_ucred, cmode); 799 shmfd->shm_seals = initial_seals; 800 } else { 801 error = shm_copyin_path(td, userpath, &path); 802 if (error != 0) { 803 fdclose(td, fp, fd); 804 fdrop(fp, td); 805 return (error); 806 } 807 808 AUDIT_ARG_UPATH1_CANON(path); 809 fnv = fnv_32_str(path, FNV1_32_INIT); 810 sx_xlock(&shm_dict_lock); 811 shmfd = shm_lookup(path, fnv); 812 if (shmfd == NULL) { 813 /* Object does not yet exist, create it if requested. */ 814 if (flags & O_CREAT) { 815 #ifdef MAC 816 error = mac_posixshm_check_create(td->td_ucred, 817 path); 818 if (error == 0) { 819 #endif 820 shmfd = shm_alloc(td->td_ucred, cmode); 821 shmfd->shm_seals = initial_seals; 822 shm_insert(path, fnv, shmfd); 823 #ifdef MAC 824 } 825 #endif 826 } else { 827 free(path, M_SHMFD); 828 error = ENOENT; 829 } 830 } else { 831 rl_cookie = rangelock_wlock(&shmfd->shm_rl, 0, OFF_MAX, 832 &shmfd->shm_mtx); 833 834 /* 835 * kern_shm_open() likely shouldn't ever error out on 836 * trying to set a seal that already exists, unlike 837 * F_ADD_SEALS. This would break terribly as 838 * shm_open(2) actually sets F_SEAL_SEAL to maintain 839 * historical behavior where the underlying file could 840 * not be sealed. 841 */ 842 initial_seals &= ~shmfd->shm_seals; 843 844 /* 845 * Object already exists, obtain a new 846 * reference if requested and permitted. 847 */ 848 free(path, M_SHMFD); 849 850 /* 851 * initial_seals can't set additional seals if we've 852 * already been set F_SEAL_SEAL. If F_SEAL_SEAL is set, 853 * then we've already removed that one from 854 * initial_seals. This is currently redundant as we 855 * only allow setting F_SEAL_SEAL at creation time, but 856 * it's cheap to check and decreases the effort required 857 * to allow additional seals. 858 */ 859 if ((shmfd->shm_seals & F_SEAL_SEAL) != 0 && 860 initial_seals != 0) 861 error = EPERM; 862 else if ((flags & (O_CREAT | O_EXCL)) == 863 (O_CREAT | O_EXCL)) 864 error = EEXIST; 865 else { 866 #ifdef MAC 867 error = mac_posixshm_check_open(td->td_ucred, 868 shmfd, FFLAGS(flags & O_ACCMODE)); 869 if (error == 0) 870 #endif 871 error = shm_access(shmfd, td->td_ucred, 872 FFLAGS(flags & O_ACCMODE)); 873 } 874 875 /* 876 * Truncate the file back to zero length if 877 * O_TRUNC was specified and the object was 878 * opened with read/write. 879 */ 880 if (error == 0 && 881 (flags & (O_ACCMODE | O_TRUNC)) == 882 (O_RDWR | O_TRUNC)) { 883 VM_OBJECT_WLOCK(shmfd->shm_object); 884 #ifdef MAC 885 error = mac_posixshm_check_truncate( 886 td->td_ucred, fp->f_cred, shmfd); 887 if (error == 0) 888 #endif 889 error = shm_dotruncate_locked(shmfd, 0, 890 rl_cookie); 891 VM_OBJECT_WUNLOCK(shmfd->shm_object); 892 } 893 if (error == 0) { 894 /* 895 * Currently we only allow F_SEAL_SEAL to be 896 * set initially. As noted above, this would 897 * need to be reworked should that change. 898 */ 899 shmfd->shm_seals |= initial_seals; 900 shm_hold(shmfd); 901 } 902 rangelock_unlock(&shmfd->shm_rl, rl_cookie, 903 &shmfd->shm_mtx); 904 } 905 sx_xunlock(&shm_dict_lock); 906 907 if (error) { 908 fdclose(td, fp, fd); 909 fdrop(fp, td); 910 return (error); 911 } 912 } 913 914 finit(fp, FFLAGS(flags & O_ACCMODE), DTYPE_SHM, shmfd, &shm_ops); 915 916 td->td_retval[0] = fd; 917 fdrop(fp, td); 918 919 return (0); 920 } 921 922 /* System calls. */ 923 #ifdef COMPAT_FREEBSD12 924 int 925 freebsd12_shm_open(struct thread *td, struct freebsd12_shm_open_args *uap) 926 { 927 928 return (kern_shm_open(td, uap->path, uap->flags | O_CLOEXEC, uap->mode, 929 NULL, F_SEAL_SEAL)); 930 } 931 #endif 932 933 int 934 sys_shm_unlink(struct thread *td, struct shm_unlink_args *uap) 935 { 936 char *path; 937 Fnv32_t fnv; 938 int error; 939 940 error = shm_copyin_path(td, uap->path, &path); 941 if (error != 0) 942 return (error); 943 944 AUDIT_ARG_UPATH1_CANON(path); 945 fnv = fnv_32_str(path, FNV1_32_INIT); 946 sx_xlock(&shm_dict_lock); 947 error = shm_remove(path, fnv, td->td_ucred); 948 sx_xunlock(&shm_dict_lock); 949 free(path, M_TEMP); 950 951 return (error); 952 } 953 954 int 955 sys_shm_rename(struct thread *td, struct shm_rename_args *uap) 956 { 957 char *path_from = NULL, *path_to = NULL; 958 Fnv32_t fnv_from, fnv_to; 959 struct shmfd *fd_from; 960 struct shmfd *fd_to; 961 int error; 962 int flags; 963 964 flags = uap->flags; 965 AUDIT_ARG_FFLAGS(flags); 966 967 /* 968 * Make sure the user passed only valid flags. 969 * If you add a new flag, please add a new term here. 970 */ 971 if ((flags & ~( 972 SHM_RENAME_NOREPLACE | 973 SHM_RENAME_EXCHANGE 974 )) != 0) { 975 error = EINVAL; 976 goto out; 977 } 978 979 /* 980 * EXCHANGE and NOREPLACE don't quite make sense together. Let's 981 * force the user to choose one or the other. 982 */ 983 if ((flags & SHM_RENAME_NOREPLACE) != 0 && 984 (flags & SHM_RENAME_EXCHANGE) != 0) { 985 error = EINVAL; 986 goto out; 987 } 988 989 /* Renaming to or from anonymous makes no sense */ 990 if (uap->path_from == SHM_ANON || uap->path_to == SHM_ANON) { 991 error = EINVAL; 992 goto out; 993 } 994 995 error = shm_copyin_path(td, uap->path_from, &path_from); 996 if (error != 0) 997 goto out; 998 999 error = shm_copyin_path(td, uap->path_to, &path_to); 1000 if (error != 0) 1001 goto out; 1002 1003 AUDIT_ARG_UPATH1_CANON(path_from); 1004 AUDIT_ARG_UPATH2_CANON(path_to); 1005 1006 /* Rename with from/to equal is a no-op */ 1007 if (strcmp(path_from, path_to) == 0) 1008 goto out; 1009 1010 fnv_from = fnv_32_str(path_from, FNV1_32_INIT); 1011 fnv_to = fnv_32_str(path_to, FNV1_32_INIT); 1012 1013 sx_xlock(&shm_dict_lock); 1014 1015 fd_from = shm_lookup(path_from, fnv_from); 1016 if (fd_from == NULL) { 1017 error = ENOENT; 1018 goto out_locked; 1019 } 1020 1021 fd_to = shm_lookup(path_to, fnv_to); 1022 if ((flags & SHM_RENAME_NOREPLACE) != 0 && fd_to != NULL) { 1023 error = EEXIST; 1024 goto out_locked; 1025 } 1026 1027 /* 1028 * Unconditionally prevents shm_remove from invalidating the 'from' 1029 * shm's state. 1030 */ 1031 shm_hold(fd_from); 1032 error = shm_remove(path_from, fnv_from, td->td_ucred); 1033 1034 /* 1035 * One of my assumptions failed if ENOENT (e.g. locking didn't 1036 * protect us) 1037 */ 1038 KASSERT(error != ENOENT, ("Our shm disappeared during shm_rename: %s", 1039 path_from)); 1040 if (error != 0) { 1041 shm_drop(fd_from); 1042 goto out_locked; 1043 } 1044 1045 /* 1046 * If we are exchanging, we need to ensure the shm_remove below 1047 * doesn't invalidate the dest shm's state. 1048 */ 1049 if ((flags & SHM_RENAME_EXCHANGE) != 0 && fd_to != NULL) 1050 shm_hold(fd_to); 1051 1052 /* 1053 * NOTE: if path_to is not already in the hash, c'est la vie; 1054 * it simply means we have nothing already at path_to to unlink. 1055 * That is the ENOENT case. 1056 * 1057 * If we somehow don't have access to unlink this guy, but 1058 * did for the shm at path_from, then relink the shm to path_from 1059 * and abort with EACCES. 1060 * 1061 * All other errors: that is weird; let's relink and abort the 1062 * operation. 1063 */ 1064 error = shm_remove(path_to, fnv_to, td->td_ucred); 1065 if (error != 0 && error != ENOENT) { 1066 shm_insert(path_from, fnv_from, fd_from); 1067 shm_drop(fd_from); 1068 /* Don't free path_from now, since the hash references it */ 1069 path_from = NULL; 1070 goto out_locked; 1071 } 1072 1073 error = 0; 1074 1075 shm_insert(path_to, fnv_to, fd_from); 1076 1077 /* Don't free path_to now, since the hash references it */ 1078 path_to = NULL; 1079 1080 /* We kept a ref when we removed, and incremented again in insert */ 1081 shm_drop(fd_from); 1082 KASSERT(fd_from->shm_refs > 0, ("Expected >0 refs; got: %d\n", 1083 fd_from->shm_refs)); 1084 1085 if ((flags & SHM_RENAME_EXCHANGE) != 0 && fd_to != NULL) { 1086 shm_insert(path_from, fnv_from, fd_to); 1087 path_from = NULL; 1088 shm_drop(fd_to); 1089 KASSERT(fd_to->shm_refs > 0, ("Expected >0 refs; got: %d\n", 1090 fd_to->shm_refs)); 1091 } 1092 1093 out_locked: 1094 sx_xunlock(&shm_dict_lock); 1095 1096 out: 1097 free(path_from, M_SHMFD); 1098 free(path_to, M_SHMFD); 1099 return (error); 1100 } 1101 1102 int 1103 shm_mmap(struct file *fp, vm_map_t map, vm_offset_t *addr, vm_size_t objsize, 1104 vm_prot_t prot, vm_prot_t cap_maxprot, int flags, 1105 vm_ooffset_t foff, struct thread *td) 1106 { 1107 struct shmfd *shmfd; 1108 vm_prot_t maxprot; 1109 int error; 1110 bool writecnt; 1111 void *rl_cookie; 1112 1113 shmfd = fp->f_data; 1114 maxprot = VM_PROT_NONE; 1115 1116 rl_cookie = rangelock_rlock(&shmfd->shm_rl, 0, objsize, 1117 &shmfd->shm_mtx); 1118 /* FREAD should always be set. */ 1119 if ((fp->f_flag & FREAD) != 0) 1120 maxprot |= VM_PROT_EXECUTE | VM_PROT_READ; 1121 if ((fp->f_flag & FWRITE) != 0) 1122 maxprot |= VM_PROT_WRITE; 1123 1124 writecnt = (flags & MAP_SHARED) != 0 && (prot & VM_PROT_WRITE) != 0; 1125 1126 if (writecnt && (shmfd->shm_seals & F_SEAL_WRITE) != 0) { 1127 error = EPERM; 1128 goto out; 1129 } 1130 1131 /* Don't permit shared writable mappings on read-only descriptors. */ 1132 if (writecnt && (maxprot & VM_PROT_WRITE) == 0) { 1133 error = EACCES; 1134 goto out; 1135 } 1136 maxprot &= cap_maxprot; 1137 1138 /* See comment in vn_mmap(). */ 1139 if ( 1140 #ifdef _LP64 1141 objsize > OFF_MAX || 1142 #endif 1143 foff < 0 || foff > OFF_MAX - objsize) { 1144 error = EINVAL; 1145 goto out; 1146 } 1147 1148 #ifdef MAC 1149 error = mac_posixshm_check_mmap(td->td_ucred, shmfd, prot, flags); 1150 if (error != 0) 1151 goto out; 1152 #endif 1153 1154 mtx_lock(&shm_timestamp_lock); 1155 vfs_timestamp(&shmfd->shm_atime); 1156 mtx_unlock(&shm_timestamp_lock); 1157 vm_object_reference(shmfd->shm_object); 1158 1159 if (writecnt) 1160 vm_pager_update_writecount(shmfd->shm_object, 0, objsize); 1161 error = vm_mmap_object(map, addr, objsize, prot, maxprot, flags, 1162 shmfd->shm_object, foff, writecnt, td); 1163 if (error != 0) { 1164 if (writecnt) 1165 vm_pager_release_writecount(shmfd->shm_object, 0, 1166 objsize); 1167 vm_object_deallocate(shmfd->shm_object); 1168 } 1169 out: 1170 rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx); 1171 return (error); 1172 } 1173 1174 static int 1175 shm_chmod(struct file *fp, mode_t mode, struct ucred *active_cred, 1176 struct thread *td) 1177 { 1178 struct shmfd *shmfd; 1179 int error; 1180 1181 error = 0; 1182 shmfd = fp->f_data; 1183 mtx_lock(&shm_timestamp_lock); 1184 /* 1185 * SUSv4 says that x bits of permission need not be affected. 1186 * Be consistent with our shm_open there. 1187 */ 1188 #ifdef MAC 1189 error = mac_posixshm_check_setmode(active_cred, shmfd, mode); 1190 if (error != 0) 1191 goto out; 1192 #endif 1193 error = vaccess(VREG, shmfd->shm_mode, shmfd->shm_uid, 1194 shmfd->shm_gid, VADMIN, active_cred, NULL); 1195 if (error != 0) 1196 goto out; 1197 shmfd->shm_mode = mode & ACCESSPERMS; 1198 out: 1199 mtx_unlock(&shm_timestamp_lock); 1200 return (error); 1201 } 1202 1203 static int 1204 shm_chown(struct file *fp, uid_t uid, gid_t gid, struct ucred *active_cred, 1205 struct thread *td) 1206 { 1207 struct shmfd *shmfd; 1208 int error; 1209 1210 error = 0; 1211 shmfd = fp->f_data; 1212 mtx_lock(&shm_timestamp_lock); 1213 #ifdef MAC 1214 error = mac_posixshm_check_setowner(active_cred, shmfd, uid, gid); 1215 if (error != 0) 1216 goto out; 1217 #endif 1218 if (uid == (uid_t)-1) 1219 uid = shmfd->shm_uid; 1220 if (gid == (gid_t)-1) 1221 gid = shmfd->shm_gid; 1222 if (((uid != shmfd->shm_uid && uid != active_cred->cr_uid) || 1223 (gid != shmfd->shm_gid && !groupmember(gid, active_cred))) && 1224 (error = priv_check_cred(active_cred, PRIV_VFS_CHOWN))) 1225 goto out; 1226 shmfd->shm_uid = uid; 1227 shmfd->shm_gid = gid; 1228 out: 1229 mtx_unlock(&shm_timestamp_lock); 1230 return (error); 1231 } 1232 1233 /* 1234 * Helper routines to allow the backing object of a shared memory file 1235 * descriptor to be mapped in the kernel. 1236 */ 1237 int 1238 shm_map(struct file *fp, size_t size, off_t offset, void **memp) 1239 { 1240 struct shmfd *shmfd; 1241 vm_offset_t kva, ofs; 1242 vm_object_t obj; 1243 int rv; 1244 1245 if (fp->f_type != DTYPE_SHM) 1246 return (EINVAL); 1247 shmfd = fp->f_data; 1248 obj = shmfd->shm_object; 1249 VM_OBJECT_WLOCK(obj); 1250 /* 1251 * XXXRW: This validation is probably insufficient, and subject to 1252 * sign errors. It should be fixed. 1253 */ 1254 if (offset >= shmfd->shm_size || 1255 offset + size > round_page(shmfd->shm_size)) { 1256 VM_OBJECT_WUNLOCK(obj); 1257 return (EINVAL); 1258 } 1259 1260 shmfd->shm_kmappings++; 1261 vm_object_reference_locked(obj); 1262 VM_OBJECT_WUNLOCK(obj); 1263 1264 /* Map the object into the kernel_map and wire it. */ 1265 kva = vm_map_min(kernel_map); 1266 ofs = offset & PAGE_MASK; 1267 offset = trunc_page(offset); 1268 size = round_page(size + ofs); 1269 rv = vm_map_find(kernel_map, obj, offset, &kva, size, 0, 1270 VMFS_OPTIMAL_SPACE, VM_PROT_READ | VM_PROT_WRITE, 1271 VM_PROT_READ | VM_PROT_WRITE, 0); 1272 if (rv == KERN_SUCCESS) { 1273 rv = vm_map_wire(kernel_map, kva, kva + size, 1274 VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES); 1275 if (rv == KERN_SUCCESS) { 1276 *memp = (void *)(kva + ofs); 1277 return (0); 1278 } 1279 vm_map_remove(kernel_map, kva, kva + size); 1280 } else 1281 vm_object_deallocate(obj); 1282 1283 /* On failure, drop our mapping reference. */ 1284 VM_OBJECT_WLOCK(obj); 1285 shmfd->shm_kmappings--; 1286 VM_OBJECT_WUNLOCK(obj); 1287 1288 return (vm_mmap_to_errno(rv)); 1289 } 1290 1291 /* 1292 * We require the caller to unmap the entire entry. This allows us to 1293 * safely decrement shm_kmappings when a mapping is removed. 1294 */ 1295 int 1296 shm_unmap(struct file *fp, void *mem, size_t size) 1297 { 1298 struct shmfd *shmfd; 1299 vm_map_entry_t entry; 1300 vm_offset_t kva, ofs; 1301 vm_object_t obj; 1302 vm_pindex_t pindex; 1303 vm_prot_t prot; 1304 boolean_t wired; 1305 vm_map_t map; 1306 int rv; 1307 1308 if (fp->f_type != DTYPE_SHM) 1309 return (EINVAL); 1310 shmfd = fp->f_data; 1311 kva = (vm_offset_t)mem; 1312 ofs = kva & PAGE_MASK; 1313 kva = trunc_page(kva); 1314 size = round_page(size + ofs); 1315 map = kernel_map; 1316 rv = vm_map_lookup(&map, kva, VM_PROT_READ | VM_PROT_WRITE, &entry, 1317 &obj, &pindex, &prot, &wired); 1318 if (rv != KERN_SUCCESS) 1319 return (EINVAL); 1320 if (entry->start != kva || entry->end != kva + size) { 1321 vm_map_lookup_done(map, entry); 1322 return (EINVAL); 1323 } 1324 vm_map_lookup_done(map, entry); 1325 if (obj != shmfd->shm_object) 1326 return (EINVAL); 1327 vm_map_remove(map, kva, kva + size); 1328 VM_OBJECT_WLOCK(obj); 1329 KASSERT(shmfd->shm_kmappings > 0, ("shm_unmap: object not mapped")); 1330 shmfd->shm_kmappings--; 1331 VM_OBJECT_WUNLOCK(obj); 1332 return (0); 1333 } 1334 1335 static int 1336 shm_fill_kinfo_locked(struct shmfd *shmfd, struct kinfo_file *kif, bool list) 1337 { 1338 const char *path, *pr_path; 1339 size_t pr_pathlen; 1340 bool visible; 1341 1342 sx_assert(&shm_dict_lock, SA_LOCKED); 1343 kif->kf_type = KF_TYPE_SHM; 1344 kif->kf_un.kf_file.kf_file_mode = S_IFREG | shmfd->shm_mode; 1345 kif->kf_un.kf_file.kf_file_size = shmfd->shm_size; 1346 if (shmfd->shm_path != NULL) { 1347 if (shmfd->shm_path != NULL) { 1348 path = shmfd->shm_path; 1349 pr_path = curthread->td_ucred->cr_prison->pr_path; 1350 if (strcmp(pr_path, "/") != 0) { 1351 /* Return the jail-rooted pathname. */ 1352 pr_pathlen = strlen(pr_path); 1353 visible = strncmp(path, pr_path, pr_pathlen) 1354 == 0 && path[pr_pathlen] == '/'; 1355 if (list && !visible) 1356 return (EPERM); 1357 if (visible) 1358 path += pr_pathlen; 1359 } 1360 strlcpy(kif->kf_path, path, sizeof(kif->kf_path)); 1361 } 1362 } 1363 return (0); 1364 } 1365 1366 static int 1367 shm_fill_kinfo(struct file *fp, struct kinfo_file *kif, 1368 struct filedesc *fdp __unused) 1369 { 1370 int res; 1371 1372 sx_slock(&shm_dict_lock); 1373 res = shm_fill_kinfo_locked(fp->f_data, kif, false); 1374 sx_sunlock(&shm_dict_lock); 1375 return (res); 1376 } 1377 1378 static int 1379 shm_add_seals(struct file *fp, int seals) 1380 { 1381 struct shmfd *shmfd; 1382 void *rl_cookie; 1383 vm_ooffset_t writemappings; 1384 int error, nseals; 1385 1386 error = 0; 1387 shmfd = fp->f_data; 1388 rl_cookie = rangelock_wlock(&shmfd->shm_rl, 0, OFF_MAX, 1389 &shmfd->shm_mtx); 1390 1391 /* Even already-set seals should result in EPERM. */ 1392 if ((shmfd->shm_seals & F_SEAL_SEAL) != 0) { 1393 error = EPERM; 1394 goto out; 1395 } 1396 nseals = seals & ~shmfd->shm_seals; 1397 if ((nseals & F_SEAL_WRITE) != 0) { 1398 /* 1399 * The rangelock above prevents writable mappings from being 1400 * added after we've started applying seals. The RLOCK here 1401 * is to avoid torn reads on ILP32 arches as unmapping/reducing 1402 * writemappings will be done without a rangelock. 1403 */ 1404 VM_OBJECT_RLOCK(shmfd->shm_object); 1405 writemappings = shmfd->shm_object->un_pager.swp.writemappings; 1406 VM_OBJECT_RUNLOCK(shmfd->shm_object); 1407 /* kmappings are also writable */ 1408 if (writemappings > 0) { 1409 error = EBUSY; 1410 goto out; 1411 } 1412 } 1413 shmfd->shm_seals |= nseals; 1414 out: 1415 rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx); 1416 return (error); 1417 } 1418 1419 static int 1420 shm_get_seals(struct file *fp, int *seals) 1421 { 1422 struct shmfd *shmfd; 1423 1424 shmfd = fp->f_data; 1425 *seals = shmfd->shm_seals; 1426 return (0); 1427 } 1428 1429 static int 1430 sysctl_posix_shm_list(SYSCTL_HANDLER_ARGS) 1431 { 1432 struct shm_mapping *shmm; 1433 struct sbuf sb; 1434 struct kinfo_file kif; 1435 u_long i; 1436 ssize_t curlen; 1437 int error, error2; 1438 1439 sbuf_new_for_sysctl(&sb, NULL, sizeof(struct kinfo_file) * 5, req); 1440 sbuf_clear_flags(&sb, SBUF_INCLUDENUL); 1441 curlen = 0; 1442 error = 0; 1443 sx_slock(&shm_dict_lock); 1444 for (i = 0; i < shm_hash + 1; i++) { 1445 LIST_FOREACH(shmm, &shm_dictionary[i], sm_link) { 1446 error = shm_fill_kinfo_locked(shmm->sm_shmfd, 1447 &kif, true); 1448 if (error == EPERM) 1449 continue; 1450 if (error != 0) 1451 break; 1452 pack_kinfo(&kif); 1453 if (req->oldptr != NULL && 1454 kif.kf_structsize + curlen > req->oldlen) 1455 break; 1456 error = sbuf_bcat(&sb, &kif, kif.kf_structsize) == 0 ? 1457 0 : ENOMEM; 1458 if (error != 0) 1459 break; 1460 curlen += kif.kf_structsize; 1461 } 1462 } 1463 sx_sunlock(&shm_dict_lock); 1464 error2 = sbuf_finish(&sb); 1465 sbuf_delete(&sb); 1466 return (error != 0 ? error : error2); 1467 } 1468 1469 SYSCTL_PROC(_kern_ipc, OID_AUTO, posix_shm_list, 1470 CTLFLAG_RD | CTLFLAG_MPSAFE | CTLTYPE_OPAQUE, 1471 NULL, 0, sysctl_posix_shm_list, "", 1472 "POSIX SHM list"); 1473 1474 int 1475 kern_shm_open2(struct thread *td, const char *path, int flags, mode_t mode, 1476 int shmflags, const char *name __unused) 1477 { 1478 int initial_seals; 1479 1480 if ((shmflags & ~SHM_ALLOW_SEALING) != 0) 1481 return (EINVAL); 1482 1483 initial_seals = F_SEAL_SEAL; 1484 if ((shmflags & SHM_ALLOW_SEALING) != 0) 1485 initial_seals &= ~F_SEAL_SEAL; 1486 return (kern_shm_open(td, path, flags, mode, NULL, initial_seals)); 1487 } 1488 1489 /* 1490 * This version of the shm_open() interface leaves CLOEXEC behavior up to the 1491 * caller, and libc will enforce it for the traditional shm_open() call. This 1492 * allows other consumers, like memfd_create(), to opt-in for CLOEXEC. This 1493 * interface also includes a 'name' argument that is currently unused, but could 1494 * potentially be exported later via some interface for debugging purposes. 1495 * From the kernel's perspective, it is optional. Individual consumers like 1496 * memfd_create() may require it in order to be compatible with other systems 1497 * implementing the same function. 1498 */ 1499 int 1500 sys_shm_open2(struct thread *td, struct shm_open2_args *uap) 1501 { 1502 1503 return (kern_shm_open2(td, uap->path, uap->flags, uap->mode, 1504 uap->shmflags, uap->name)); 1505 } 1506