1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2006, 2011, 2016-2017 Robert N. M. Watson 5 * All rights reserved. 6 * 7 * Portions of this software were developed by BAE Systems, the University of 8 * Cambridge Computer Laboratory, and Memorial University under DARPA/AFRL 9 * contract FA8650-15-C-7558 ("CADETS"), as part of the DARPA Transparent 10 * Computing (TC) research program. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 */ 33 34 /* 35 * Support for shared swap-backed anonymous memory objects via 36 * shm_open(2), shm_rename(2), and shm_unlink(2). 37 * While most of the implementation is here, vm_mmap.c contains 38 * mapping logic changes. 39 * 40 * posixshmcontrol(1) allows users to inspect the state of the memory 41 * objects. Per-uid swap resource limit controls total amount of 42 * memory that user can consume for anonymous objects, including 43 * shared. 44 */ 45 46 #include <sys/cdefs.h> 47 __FBSDID("$FreeBSD$"); 48 49 #include "opt_capsicum.h" 50 #include "opt_ktrace.h" 51 52 #include <sys/param.h> 53 #include <sys/capsicum.h> 54 #include <sys/conf.h> 55 #include <sys/fcntl.h> 56 #include <sys/file.h> 57 #include <sys/filedesc.h> 58 #include <sys/filio.h> 59 #include <sys/fnv_hash.h> 60 #include <sys/kernel.h> 61 #include <sys/limits.h> 62 #include <sys/uio.h> 63 #include <sys/signal.h> 64 #include <sys/jail.h> 65 #include <sys/ktrace.h> 66 #include <sys/lock.h> 67 #include <sys/malloc.h> 68 #include <sys/mman.h> 69 #include <sys/mutex.h> 70 #include <sys/priv.h> 71 #include <sys/proc.h> 72 #include <sys/refcount.h> 73 #include <sys/resourcevar.h> 74 #include <sys/rwlock.h> 75 #include <sys/sbuf.h> 76 #include <sys/stat.h> 77 #include <sys/syscallsubr.h> 78 #include <sys/sysctl.h> 79 #include <sys/sysproto.h> 80 #include <sys/systm.h> 81 #include <sys/sx.h> 82 #include <sys/time.h> 83 #include <sys/vnode.h> 84 #include <sys/unistd.h> 85 #include <sys/user.h> 86 87 #include <security/audit/audit.h> 88 #include <security/mac/mac_framework.h> 89 90 #include <vm/vm.h> 91 #include <vm/vm_param.h> 92 #include <vm/pmap.h> 93 #include <vm/vm_extern.h> 94 #include <vm/vm_map.h> 95 #include <vm/vm_kern.h> 96 #include <vm/vm_object.h> 97 #include <vm/vm_page.h> 98 #include <vm/vm_pageout.h> 99 #include <vm/vm_pager.h> 100 #include <vm/swap_pager.h> 101 102 struct shm_mapping { 103 char *sm_path; 104 Fnv32_t sm_fnv; 105 struct shmfd *sm_shmfd; 106 LIST_ENTRY(shm_mapping) sm_link; 107 }; 108 109 static MALLOC_DEFINE(M_SHMFD, "shmfd", "shared memory file descriptor"); 110 static LIST_HEAD(, shm_mapping) *shm_dictionary; 111 static struct sx shm_dict_lock; 112 static struct mtx shm_timestamp_lock; 113 static u_long shm_hash; 114 static struct unrhdr64 shm_ino_unr; 115 static dev_t shm_dev_ino; 116 117 #define SHM_HASH(fnv) (&shm_dictionary[(fnv) & shm_hash]) 118 119 static void shm_init(void *arg); 120 static void shm_insert(char *path, Fnv32_t fnv, struct shmfd *shmfd); 121 static struct shmfd *shm_lookup(char *path, Fnv32_t fnv); 122 static int shm_remove(char *path, Fnv32_t fnv, struct ucred *ucred); 123 static int shm_dotruncate_locked(struct shmfd *shmfd, off_t length, 124 void *rl_cookie); 125 static int shm_copyin_path(struct thread *td, const char *userpath_in, 126 char **path_out); 127 128 static fo_rdwr_t shm_read; 129 static fo_rdwr_t shm_write; 130 static fo_truncate_t shm_truncate; 131 static fo_ioctl_t shm_ioctl; 132 static fo_stat_t shm_stat; 133 static fo_close_t shm_close; 134 static fo_chmod_t shm_chmod; 135 static fo_chown_t shm_chown; 136 static fo_seek_t shm_seek; 137 static fo_fill_kinfo_t shm_fill_kinfo; 138 static fo_mmap_t shm_mmap; 139 static fo_get_seals_t shm_get_seals; 140 static fo_add_seals_t shm_add_seals; 141 static fo_fallocate_t shm_fallocate; 142 143 /* File descriptor operations. */ 144 struct fileops shm_ops = { 145 .fo_read = shm_read, 146 .fo_write = shm_write, 147 .fo_truncate = shm_truncate, 148 .fo_ioctl = shm_ioctl, 149 .fo_poll = invfo_poll, 150 .fo_kqfilter = invfo_kqfilter, 151 .fo_stat = shm_stat, 152 .fo_close = shm_close, 153 .fo_chmod = shm_chmod, 154 .fo_chown = shm_chown, 155 .fo_sendfile = vn_sendfile, 156 .fo_seek = shm_seek, 157 .fo_fill_kinfo = shm_fill_kinfo, 158 .fo_mmap = shm_mmap, 159 .fo_get_seals = shm_get_seals, 160 .fo_add_seals = shm_add_seals, 161 .fo_fallocate = shm_fallocate, 162 .fo_flags = DFLAG_PASSABLE | DFLAG_SEEKABLE 163 }; 164 165 FEATURE(posix_shm, "POSIX shared memory"); 166 167 static int 168 uiomove_object_page(vm_object_t obj, size_t len, struct uio *uio) 169 { 170 vm_page_t m; 171 vm_pindex_t idx; 172 size_t tlen; 173 int error, offset, rv; 174 175 idx = OFF_TO_IDX(uio->uio_offset); 176 offset = uio->uio_offset & PAGE_MASK; 177 tlen = MIN(PAGE_SIZE - offset, len); 178 179 rv = vm_page_grab_valid_unlocked(&m, obj, idx, 180 VM_ALLOC_SBUSY | VM_ALLOC_IGN_SBUSY | VM_ALLOC_NOCREAT); 181 if (rv == VM_PAGER_OK) 182 goto found; 183 184 /* 185 * Read I/O without either a corresponding resident page or swap 186 * page: use zero_region. This is intended to avoid instantiating 187 * pages on read from a sparse region. 188 */ 189 VM_OBJECT_WLOCK(obj); 190 m = vm_page_lookup(obj, idx); 191 if (uio->uio_rw == UIO_READ && m == NULL && 192 !vm_pager_has_page(obj, idx, NULL, NULL)) { 193 VM_OBJECT_WUNLOCK(obj); 194 return (uiomove(__DECONST(void *, zero_region), tlen, uio)); 195 } 196 197 /* 198 * Although the tmpfs vnode lock is held here, it is 199 * nonetheless safe to sleep waiting for a free page. The 200 * pageout daemon does not need to acquire the tmpfs vnode 201 * lock to page out tobj's pages because tobj is a OBJT_SWAP 202 * type object. 203 */ 204 rv = vm_page_grab_valid(&m, obj, idx, 205 VM_ALLOC_NORMAL | VM_ALLOC_SBUSY | VM_ALLOC_IGN_SBUSY); 206 if (rv != VM_PAGER_OK) { 207 VM_OBJECT_WUNLOCK(obj); 208 printf("uiomove_object: vm_obj %p idx %jd pager error %d\n", 209 obj, idx, rv); 210 return (EIO); 211 } 212 VM_OBJECT_WUNLOCK(obj); 213 214 found: 215 error = uiomove_fromphys(&m, offset, tlen, uio); 216 if (uio->uio_rw == UIO_WRITE && error == 0) 217 vm_page_set_dirty(m); 218 vm_page_activate(m); 219 vm_page_sunbusy(m); 220 221 return (error); 222 } 223 224 int 225 uiomove_object(vm_object_t obj, off_t obj_size, struct uio *uio) 226 { 227 ssize_t resid; 228 size_t len; 229 int error; 230 231 error = 0; 232 while ((resid = uio->uio_resid) > 0) { 233 if (obj_size <= uio->uio_offset) 234 break; 235 len = MIN(obj_size - uio->uio_offset, resid); 236 if (len == 0) 237 break; 238 error = uiomove_object_page(obj, len, uio); 239 if (error != 0 || resid == uio->uio_resid) 240 break; 241 } 242 return (error); 243 } 244 245 static int 246 shm_seek(struct file *fp, off_t offset, int whence, struct thread *td) 247 { 248 struct shmfd *shmfd; 249 off_t foffset; 250 int error; 251 252 shmfd = fp->f_data; 253 foffset = foffset_lock(fp, 0); 254 error = 0; 255 switch (whence) { 256 case L_INCR: 257 if (foffset < 0 || 258 (offset > 0 && foffset > OFF_MAX - offset)) { 259 error = EOVERFLOW; 260 break; 261 } 262 offset += foffset; 263 break; 264 case L_XTND: 265 if (offset > 0 && shmfd->shm_size > OFF_MAX - offset) { 266 error = EOVERFLOW; 267 break; 268 } 269 offset += shmfd->shm_size; 270 break; 271 case L_SET: 272 break; 273 default: 274 error = EINVAL; 275 } 276 if (error == 0) { 277 if (offset < 0 || offset > shmfd->shm_size) 278 error = EINVAL; 279 else 280 td->td_uretoff.tdu_off = offset; 281 } 282 foffset_unlock(fp, offset, error != 0 ? FOF_NOUPDATE : 0); 283 return (error); 284 } 285 286 static int 287 shm_read(struct file *fp, struct uio *uio, struct ucred *active_cred, 288 int flags, struct thread *td) 289 { 290 struct shmfd *shmfd; 291 void *rl_cookie; 292 int error; 293 294 shmfd = fp->f_data; 295 #ifdef MAC 296 error = mac_posixshm_check_read(active_cred, fp->f_cred, shmfd); 297 if (error) 298 return (error); 299 #endif 300 foffset_lock_uio(fp, uio, flags); 301 rl_cookie = rangelock_rlock(&shmfd->shm_rl, uio->uio_offset, 302 uio->uio_offset + uio->uio_resid, &shmfd->shm_mtx); 303 error = uiomove_object(shmfd->shm_object, shmfd->shm_size, uio); 304 rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx); 305 foffset_unlock_uio(fp, uio, flags); 306 return (error); 307 } 308 309 static int 310 shm_write(struct file *fp, struct uio *uio, struct ucred *active_cred, 311 int flags, struct thread *td) 312 { 313 struct shmfd *shmfd; 314 void *rl_cookie; 315 int error; 316 317 shmfd = fp->f_data; 318 #ifdef MAC 319 error = mac_posixshm_check_write(active_cred, fp->f_cred, shmfd); 320 if (error) 321 return (error); 322 #endif 323 foffset_lock_uio(fp, uio, flags); 324 if ((flags & FOF_OFFSET) == 0) { 325 rl_cookie = rangelock_wlock(&shmfd->shm_rl, 0, OFF_MAX, 326 &shmfd->shm_mtx); 327 } else { 328 rl_cookie = rangelock_wlock(&shmfd->shm_rl, uio->uio_offset, 329 uio->uio_offset + uio->uio_resid, &shmfd->shm_mtx); 330 } 331 if ((shmfd->shm_seals & F_SEAL_WRITE) != 0) 332 error = EPERM; 333 else 334 error = uiomove_object(shmfd->shm_object, shmfd->shm_size, uio); 335 rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx); 336 foffset_unlock_uio(fp, uio, flags); 337 return (error); 338 } 339 340 static int 341 shm_truncate(struct file *fp, off_t length, struct ucred *active_cred, 342 struct thread *td) 343 { 344 struct shmfd *shmfd; 345 #ifdef MAC 346 int error; 347 #endif 348 349 shmfd = fp->f_data; 350 #ifdef MAC 351 error = mac_posixshm_check_truncate(active_cred, fp->f_cred, shmfd); 352 if (error) 353 return (error); 354 #endif 355 return (shm_dotruncate(shmfd, length)); 356 } 357 358 int 359 shm_ioctl(struct file *fp, u_long com, void *data, struct ucred *active_cred, 360 struct thread *td) 361 { 362 363 switch (com) { 364 case FIONBIO: 365 case FIOASYNC: 366 /* 367 * Allow fcntl(fd, F_SETFL, O_NONBLOCK) to work, 368 * just like it would on an unlinked regular file 369 */ 370 return (0); 371 default: 372 return (ENOTTY); 373 } 374 } 375 376 static int 377 shm_stat(struct file *fp, struct stat *sb, struct ucred *active_cred, 378 struct thread *td) 379 { 380 struct shmfd *shmfd; 381 #ifdef MAC 382 int error; 383 #endif 384 385 shmfd = fp->f_data; 386 387 #ifdef MAC 388 error = mac_posixshm_check_stat(active_cred, fp->f_cred, shmfd); 389 if (error) 390 return (error); 391 #endif 392 393 /* 394 * Attempt to return sanish values for fstat() on a memory file 395 * descriptor. 396 */ 397 bzero(sb, sizeof(*sb)); 398 sb->st_blksize = PAGE_SIZE; 399 sb->st_size = shmfd->shm_size; 400 sb->st_blocks = howmany(sb->st_size, sb->st_blksize); 401 mtx_lock(&shm_timestamp_lock); 402 sb->st_atim = shmfd->shm_atime; 403 sb->st_ctim = shmfd->shm_ctime; 404 sb->st_mtim = shmfd->shm_mtime; 405 sb->st_birthtim = shmfd->shm_birthtime; 406 sb->st_mode = S_IFREG | shmfd->shm_mode; /* XXX */ 407 sb->st_uid = shmfd->shm_uid; 408 sb->st_gid = shmfd->shm_gid; 409 mtx_unlock(&shm_timestamp_lock); 410 sb->st_dev = shm_dev_ino; 411 sb->st_ino = shmfd->shm_ino; 412 sb->st_nlink = shmfd->shm_object->ref_count; 413 414 return (0); 415 } 416 417 static int 418 shm_close(struct file *fp, struct thread *td) 419 { 420 struct shmfd *shmfd; 421 422 shmfd = fp->f_data; 423 fp->f_data = NULL; 424 shm_drop(shmfd); 425 426 return (0); 427 } 428 429 static int 430 shm_copyin_path(struct thread *td, const char *userpath_in, char **path_out) { 431 int error; 432 char *path; 433 const char *pr_path; 434 size_t pr_pathlen; 435 436 path = malloc(MAXPATHLEN, M_SHMFD, M_WAITOK); 437 pr_path = td->td_ucred->cr_prison->pr_path; 438 439 /* Construct a full pathname for jailed callers. */ 440 pr_pathlen = strcmp(pr_path, "/") == 441 0 ? 0 : strlcpy(path, pr_path, MAXPATHLEN); 442 error = copyinstr(userpath_in, path + pr_pathlen, 443 MAXPATHLEN - pr_pathlen, NULL); 444 if (error != 0) 445 goto out; 446 447 #ifdef KTRACE 448 if (KTRPOINT(curthread, KTR_NAMEI)) 449 ktrnamei(path); 450 #endif 451 452 /* Require paths to start with a '/' character. */ 453 if (path[pr_pathlen] != '/') { 454 error = EINVAL; 455 goto out; 456 } 457 458 *path_out = path; 459 460 out: 461 if (error != 0) 462 free(path, M_SHMFD); 463 464 return (error); 465 } 466 467 static int 468 shm_dotruncate_locked(struct shmfd *shmfd, off_t length, void *rl_cookie) 469 { 470 vm_object_t object; 471 vm_page_t m; 472 vm_pindex_t idx, nobjsize; 473 vm_ooffset_t delta; 474 int base, rv; 475 476 KASSERT(length >= 0, ("shm_dotruncate: length < 0")); 477 object = shmfd->shm_object; 478 VM_OBJECT_ASSERT_WLOCKED(object); 479 rangelock_cookie_assert(rl_cookie, RA_WLOCKED); 480 if (length == shmfd->shm_size) 481 return (0); 482 nobjsize = OFF_TO_IDX(length + PAGE_MASK); 483 484 /* Are we shrinking? If so, trim the end. */ 485 if (length < shmfd->shm_size) { 486 if ((shmfd->shm_seals & F_SEAL_SHRINK) != 0) 487 return (EPERM); 488 489 /* 490 * Disallow any requests to shrink the size if this 491 * object is mapped into the kernel. 492 */ 493 if (shmfd->shm_kmappings > 0) 494 return (EBUSY); 495 496 /* 497 * Zero the truncated part of the last page. 498 */ 499 base = length & PAGE_MASK; 500 if (base != 0) { 501 idx = OFF_TO_IDX(length); 502 retry: 503 m = vm_page_grab(object, idx, VM_ALLOC_NOCREAT); 504 if (m != NULL) { 505 MPASS(vm_page_all_valid(m)); 506 } else if (vm_pager_has_page(object, idx, NULL, NULL)) { 507 m = vm_page_alloc(object, idx, 508 VM_ALLOC_NORMAL | VM_ALLOC_WAITFAIL); 509 if (m == NULL) 510 goto retry; 511 vm_object_pip_add(object, 1); 512 VM_OBJECT_WUNLOCK(object); 513 rv = vm_pager_get_pages(object, &m, 1, NULL, 514 NULL); 515 VM_OBJECT_WLOCK(object); 516 vm_object_pip_wakeup(object); 517 if (rv == VM_PAGER_OK) { 518 /* 519 * Since the page was not resident, 520 * and therefore not recently 521 * accessed, immediately enqueue it 522 * for asynchronous laundering. The 523 * current operation is not regarded 524 * as an access. 525 */ 526 vm_page_launder(m); 527 } else { 528 vm_page_free(m); 529 VM_OBJECT_WUNLOCK(object); 530 return (EIO); 531 } 532 } 533 if (m != NULL) { 534 pmap_zero_page_area(m, base, PAGE_SIZE - base); 535 KASSERT(vm_page_all_valid(m), 536 ("shm_dotruncate: page %p is invalid", m)); 537 vm_page_set_dirty(m); 538 vm_page_xunbusy(m); 539 } 540 } 541 delta = IDX_TO_OFF(object->size - nobjsize); 542 543 if (nobjsize < object->size) 544 vm_object_page_remove(object, nobjsize, object->size, 545 0); 546 547 /* Free the swap accounted for shm */ 548 swap_release_by_cred(delta, object->cred); 549 object->charge -= delta; 550 } else { 551 if ((shmfd->shm_seals & F_SEAL_GROW) != 0) 552 return (EPERM); 553 554 /* Try to reserve additional swap space. */ 555 delta = IDX_TO_OFF(nobjsize - object->size); 556 if (!swap_reserve_by_cred(delta, object->cred)) 557 return (ENOMEM); 558 object->charge += delta; 559 } 560 shmfd->shm_size = length; 561 mtx_lock(&shm_timestamp_lock); 562 vfs_timestamp(&shmfd->shm_ctime); 563 shmfd->shm_mtime = shmfd->shm_ctime; 564 mtx_unlock(&shm_timestamp_lock); 565 object->size = nobjsize; 566 return (0); 567 } 568 569 int 570 shm_dotruncate(struct shmfd *shmfd, off_t length) 571 { 572 void *rl_cookie; 573 int error; 574 575 rl_cookie = rangelock_wlock(&shmfd->shm_rl, 0, OFF_MAX, 576 &shmfd->shm_mtx); 577 VM_OBJECT_WLOCK(shmfd->shm_object); 578 error = shm_dotruncate_locked(shmfd, length, rl_cookie); 579 VM_OBJECT_WUNLOCK(shmfd->shm_object); 580 rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx); 581 return (error); 582 } 583 584 /* 585 * shmfd object management including creation and reference counting 586 * routines. 587 */ 588 struct shmfd * 589 shm_alloc(struct ucred *ucred, mode_t mode) 590 { 591 struct shmfd *shmfd; 592 593 shmfd = malloc(sizeof(*shmfd), M_SHMFD, M_WAITOK | M_ZERO); 594 shmfd->shm_size = 0; 595 shmfd->shm_uid = ucred->cr_uid; 596 shmfd->shm_gid = ucred->cr_gid; 597 shmfd->shm_mode = mode; 598 shmfd->shm_object = vm_pager_allocate(OBJT_SWAP, NULL, 599 shmfd->shm_size, VM_PROT_DEFAULT, 0, ucred); 600 KASSERT(shmfd->shm_object != NULL, ("shm_create: vm_pager_allocate")); 601 vfs_timestamp(&shmfd->shm_birthtime); 602 shmfd->shm_atime = shmfd->shm_mtime = shmfd->shm_ctime = 603 shmfd->shm_birthtime; 604 shmfd->shm_ino = alloc_unr64(&shm_ino_unr); 605 refcount_init(&shmfd->shm_refs, 1); 606 mtx_init(&shmfd->shm_mtx, "shmrl", NULL, MTX_DEF); 607 rangelock_init(&shmfd->shm_rl); 608 #ifdef MAC 609 mac_posixshm_init(shmfd); 610 mac_posixshm_create(ucred, shmfd); 611 #endif 612 613 return (shmfd); 614 } 615 616 struct shmfd * 617 shm_hold(struct shmfd *shmfd) 618 { 619 620 refcount_acquire(&shmfd->shm_refs); 621 return (shmfd); 622 } 623 624 void 625 shm_drop(struct shmfd *shmfd) 626 { 627 628 if (refcount_release(&shmfd->shm_refs)) { 629 #ifdef MAC 630 mac_posixshm_destroy(shmfd); 631 #endif 632 rangelock_destroy(&shmfd->shm_rl); 633 mtx_destroy(&shmfd->shm_mtx); 634 vm_object_deallocate(shmfd->shm_object); 635 free(shmfd, M_SHMFD); 636 } 637 } 638 639 /* 640 * Determine if the credentials have sufficient permissions for a 641 * specified combination of FREAD and FWRITE. 642 */ 643 int 644 shm_access(struct shmfd *shmfd, struct ucred *ucred, int flags) 645 { 646 accmode_t accmode; 647 int error; 648 649 accmode = 0; 650 if (flags & FREAD) 651 accmode |= VREAD; 652 if (flags & FWRITE) 653 accmode |= VWRITE; 654 mtx_lock(&shm_timestamp_lock); 655 error = vaccess(VREG, shmfd->shm_mode, shmfd->shm_uid, shmfd->shm_gid, 656 accmode, ucred, NULL); 657 mtx_unlock(&shm_timestamp_lock); 658 return (error); 659 } 660 661 /* 662 * Dictionary management. We maintain an in-kernel dictionary to map 663 * paths to shmfd objects. We use the FNV hash on the path to store 664 * the mappings in a hash table. 665 */ 666 static void 667 shm_init(void *arg) 668 { 669 670 mtx_init(&shm_timestamp_lock, "shm timestamps", NULL, MTX_DEF); 671 sx_init(&shm_dict_lock, "shm dictionary"); 672 shm_dictionary = hashinit(1024, M_SHMFD, &shm_hash); 673 new_unrhdr64(&shm_ino_unr, 1); 674 shm_dev_ino = devfs_alloc_cdp_inode(); 675 KASSERT(shm_dev_ino > 0, ("shm dev inode not initialized")); 676 } 677 SYSINIT(shm_init, SI_SUB_SYSV_SHM, SI_ORDER_ANY, shm_init, NULL); 678 679 static struct shmfd * 680 shm_lookup(char *path, Fnv32_t fnv) 681 { 682 struct shm_mapping *map; 683 684 LIST_FOREACH(map, SHM_HASH(fnv), sm_link) { 685 if (map->sm_fnv != fnv) 686 continue; 687 if (strcmp(map->sm_path, path) == 0) 688 return (map->sm_shmfd); 689 } 690 691 return (NULL); 692 } 693 694 static void 695 shm_insert(char *path, Fnv32_t fnv, struct shmfd *shmfd) 696 { 697 struct shm_mapping *map; 698 699 map = malloc(sizeof(struct shm_mapping), M_SHMFD, M_WAITOK); 700 map->sm_path = path; 701 map->sm_fnv = fnv; 702 map->sm_shmfd = shm_hold(shmfd); 703 shmfd->shm_path = path; 704 LIST_INSERT_HEAD(SHM_HASH(fnv), map, sm_link); 705 } 706 707 static int 708 shm_remove(char *path, Fnv32_t fnv, struct ucred *ucred) 709 { 710 struct shm_mapping *map; 711 int error; 712 713 LIST_FOREACH(map, SHM_HASH(fnv), sm_link) { 714 if (map->sm_fnv != fnv) 715 continue; 716 if (strcmp(map->sm_path, path) == 0) { 717 #ifdef MAC 718 error = mac_posixshm_check_unlink(ucred, map->sm_shmfd); 719 if (error) 720 return (error); 721 #endif 722 error = shm_access(map->sm_shmfd, ucred, 723 FREAD | FWRITE); 724 if (error) 725 return (error); 726 map->sm_shmfd->shm_path = NULL; 727 LIST_REMOVE(map, sm_link); 728 shm_drop(map->sm_shmfd); 729 free(map->sm_path, M_SHMFD); 730 free(map, M_SHMFD); 731 return (0); 732 } 733 } 734 735 return (ENOENT); 736 } 737 738 int 739 kern_shm_open2(struct thread *td, const char *userpath, int flags, mode_t mode, 740 int shmflags, struct filecaps *fcaps, const char *name __unused) 741 { 742 struct filedesc *fdp; 743 struct shmfd *shmfd; 744 struct file *fp; 745 char *path; 746 void *rl_cookie; 747 Fnv32_t fnv; 748 mode_t cmode; 749 int error, fd, initial_seals; 750 751 if ((shmflags & ~SHM_ALLOW_SEALING) != 0) 752 return (EINVAL); 753 754 initial_seals = F_SEAL_SEAL; 755 if ((shmflags & SHM_ALLOW_SEALING) != 0) 756 initial_seals &= ~F_SEAL_SEAL; 757 758 #ifdef CAPABILITY_MODE 759 /* 760 * shm_open(2) is only allowed for anonymous objects. 761 */ 762 if (IN_CAPABILITY_MODE(td) && (userpath != SHM_ANON)) 763 return (ECAPMODE); 764 #endif 765 766 AUDIT_ARG_FFLAGS(flags); 767 AUDIT_ARG_MODE(mode); 768 769 if ((flags & O_ACCMODE) != O_RDONLY && (flags & O_ACCMODE) != O_RDWR) 770 return (EINVAL); 771 772 if ((flags & ~(O_ACCMODE | O_CREAT | O_EXCL | O_TRUNC | O_CLOEXEC)) != 0) 773 return (EINVAL); 774 775 /* 776 * Currently only F_SEAL_SEAL may be set when creating or opening shmfd. 777 * If the decision is made later to allow additional seals, care must be 778 * taken below to ensure that the seals are properly set if the shmfd 779 * already existed -- this currently assumes that only F_SEAL_SEAL can 780 * be set and doesn't take further precautions to ensure the validity of 781 * the seals being added with respect to current mappings. 782 */ 783 if ((initial_seals & ~F_SEAL_SEAL) != 0) 784 return (EINVAL); 785 786 fdp = td->td_proc->p_fd; 787 cmode = (mode & ~fdp->fd_cmask) & ACCESSPERMS; 788 789 /* 790 * shm_open(2) created shm should always have O_CLOEXEC set, as mandated 791 * by POSIX. We allow it to be unset here so that an in-kernel 792 * interface may be written as a thin layer around shm, optionally not 793 * setting CLOEXEC. For shm_open(2), O_CLOEXEC is set unconditionally 794 * in sys_shm_open() to keep this implementation compliant. 795 */ 796 error = falloc_caps(td, &fp, &fd, flags & O_CLOEXEC, fcaps); 797 if (error) 798 return (error); 799 800 /* A SHM_ANON path pointer creates an anonymous object. */ 801 if (userpath == SHM_ANON) { 802 /* A read-only anonymous object is pointless. */ 803 if ((flags & O_ACCMODE) == O_RDONLY) { 804 fdclose(td, fp, fd); 805 fdrop(fp, td); 806 return (EINVAL); 807 } 808 shmfd = shm_alloc(td->td_ucred, cmode); 809 shmfd->shm_seals = initial_seals; 810 } else { 811 error = shm_copyin_path(td, userpath, &path); 812 if (error != 0) { 813 fdclose(td, fp, fd); 814 fdrop(fp, td); 815 return (error); 816 } 817 818 AUDIT_ARG_UPATH1_CANON(path); 819 fnv = fnv_32_str(path, FNV1_32_INIT); 820 sx_xlock(&shm_dict_lock); 821 shmfd = shm_lookup(path, fnv); 822 if (shmfd == NULL) { 823 /* Object does not yet exist, create it if requested. */ 824 if (flags & O_CREAT) { 825 #ifdef MAC 826 error = mac_posixshm_check_create(td->td_ucred, 827 path); 828 if (error == 0) { 829 #endif 830 shmfd = shm_alloc(td->td_ucred, cmode); 831 shmfd->shm_seals = initial_seals; 832 shm_insert(path, fnv, shmfd); 833 #ifdef MAC 834 } 835 #endif 836 } else { 837 free(path, M_SHMFD); 838 error = ENOENT; 839 } 840 } else { 841 rl_cookie = rangelock_wlock(&shmfd->shm_rl, 0, OFF_MAX, 842 &shmfd->shm_mtx); 843 844 /* 845 * kern_shm_open() likely shouldn't ever error out on 846 * trying to set a seal that already exists, unlike 847 * F_ADD_SEALS. This would break terribly as 848 * shm_open(2) actually sets F_SEAL_SEAL to maintain 849 * historical behavior where the underlying file could 850 * not be sealed. 851 */ 852 initial_seals &= ~shmfd->shm_seals; 853 854 /* 855 * Object already exists, obtain a new 856 * reference if requested and permitted. 857 */ 858 free(path, M_SHMFD); 859 860 /* 861 * initial_seals can't set additional seals if we've 862 * already been set F_SEAL_SEAL. If F_SEAL_SEAL is set, 863 * then we've already removed that one from 864 * initial_seals. This is currently redundant as we 865 * only allow setting F_SEAL_SEAL at creation time, but 866 * it's cheap to check and decreases the effort required 867 * to allow additional seals. 868 */ 869 if ((shmfd->shm_seals & F_SEAL_SEAL) != 0 && 870 initial_seals != 0) 871 error = EPERM; 872 else if ((flags & (O_CREAT | O_EXCL)) == 873 (O_CREAT | O_EXCL)) 874 error = EEXIST; 875 else { 876 #ifdef MAC 877 error = mac_posixshm_check_open(td->td_ucred, 878 shmfd, FFLAGS(flags & O_ACCMODE)); 879 if (error == 0) 880 #endif 881 error = shm_access(shmfd, td->td_ucred, 882 FFLAGS(flags & O_ACCMODE)); 883 } 884 885 /* 886 * Truncate the file back to zero length if 887 * O_TRUNC was specified and the object was 888 * opened with read/write. 889 */ 890 if (error == 0 && 891 (flags & (O_ACCMODE | O_TRUNC)) == 892 (O_RDWR | O_TRUNC)) { 893 VM_OBJECT_WLOCK(shmfd->shm_object); 894 #ifdef MAC 895 error = mac_posixshm_check_truncate( 896 td->td_ucred, fp->f_cred, shmfd); 897 if (error == 0) 898 #endif 899 error = shm_dotruncate_locked(shmfd, 0, 900 rl_cookie); 901 VM_OBJECT_WUNLOCK(shmfd->shm_object); 902 } 903 if (error == 0) { 904 /* 905 * Currently we only allow F_SEAL_SEAL to be 906 * set initially. As noted above, this would 907 * need to be reworked should that change. 908 */ 909 shmfd->shm_seals |= initial_seals; 910 shm_hold(shmfd); 911 } 912 rangelock_unlock(&shmfd->shm_rl, rl_cookie, 913 &shmfd->shm_mtx); 914 } 915 sx_xunlock(&shm_dict_lock); 916 917 if (error) { 918 fdclose(td, fp, fd); 919 fdrop(fp, td); 920 return (error); 921 } 922 } 923 924 finit(fp, FFLAGS(flags & O_ACCMODE), DTYPE_SHM, shmfd, &shm_ops); 925 926 td->td_retval[0] = fd; 927 fdrop(fp, td); 928 929 return (0); 930 } 931 932 /* System calls. */ 933 #ifdef COMPAT_FREEBSD12 934 int 935 freebsd12_shm_open(struct thread *td, struct freebsd12_shm_open_args *uap) 936 { 937 938 return (kern_shm_open(td, uap->path, uap->flags | O_CLOEXEC, 939 uap->mode, NULL)); 940 } 941 #endif 942 943 int 944 sys_shm_unlink(struct thread *td, struct shm_unlink_args *uap) 945 { 946 char *path; 947 Fnv32_t fnv; 948 int error; 949 950 error = shm_copyin_path(td, uap->path, &path); 951 if (error != 0) 952 return (error); 953 954 AUDIT_ARG_UPATH1_CANON(path); 955 fnv = fnv_32_str(path, FNV1_32_INIT); 956 sx_xlock(&shm_dict_lock); 957 error = shm_remove(path, fnv, td->td_ucred); 958 sx_xunlock(&shm_dict_lock); 959 free(path, M_SHMFD); 960 961 return (error); 962 } 963 964 int 965 sys_shm_rename(struct thread *td, struct shm_rename_args *uap) 966 { 967 char *path_from = NULL, *path_to = NULL; 968 Fnv32_t fnv_from, fnv_to; 969 struct shmfd *fd_from; 970 struct shmfd *fd_to; 971 int error; 972 int flags; 973 974 flags = uap->flags; 975 AUDIT_ARG_FFLAGS(flags); 976 977 /* 978 * Make sure the user passed only valid flags. 979 * If you add a new flag, please add a new term here. 980 */ 981 if ((flags & ~( 982 SHM_RENAME_NOREPLACE | 983 SHM_RENAME_EXCHANGE 984 )) != 0) { 985 error = EINVAL; 986 goto out; 987 } 988 989 /* 990 * EXCHANGE and NOREPLACE don't quite make sense together. Let's 991 * force the user to choose one or the other. 992 */ 993 if ((flags & SHM_RENAME_NOREPLACE) != 0 && 994 (flags & SHM_RENAME_EXCHANGE) != 0) { 995 error = EINVAL; 996 goto out; 997 } 998 999 /* Renaming to or from anonymous makes no sense */ 1000 if (uap->path_from == SHM_ANON || uap->path_to == SHM_ANON) { 1001 error = EINVAL; 1002 goto out; 1003 } 1004 1005 error = shm_copyin_path(td, uap->path_from, &path_from); 1006 if (error != 0) 1007 goto out; 1008 1009 error = shm_copyin_path(td, uap->path_to, &path_to); 1010 if (error != 0) 1011 goto out; 1012 1013 AUDIT_ARG_UPATH1_CANON(path_from); 1014 AUDIT_ARG_UPATH2_CANON(path_to); 1015 1016 /* Rename with from/to equal is a no-op */ 1017 if (strcmp(path_from, path_to) == 0) 1018 goto out; 1019 1020 fnv_from = fnv_32_str(path_from, FNV1_32_INIT); 1021 fnv_to = fnv_32_str(path_to, FNV1_32_INIT); 1022 1023 sx_xlock(&shm_dict_lock); 1024 1025 fd_from = shm_lookup(path_from, fnv_from); 1026 if (fd_from == NULL) { 1027 error = ENOENT; 1028 goto out_locked; 1029 } 1030 1031 fd_to = shm_lookup(path_to, fnv_to); 1032 if ((flags & SHM_RENAME_NOREPLACE) != 0 && fd_to != NULL) { 1033 error = EEXIST; 1034 goto out_locked; 1035 } 1036 1037 /* 1038 * Unconditionally prevents shm_remove from invalidating the 'from' 1039 * shm's state. 1040 */ 1041 shm_hold(fd_from); 1042 error = shm_remove(path_from, fnv_from, td->td_ucred); 1043 1044 /* 1045 * One of my assumptions failed if ENOENT (e.g. locking didn't 1046 * protect us) 1047 */ 1048 KASSERT(error != ENOENT, ("Our shm disappeared during shm_rename: %s", 1049 path_from)); 1050 if (error != 0) { 1051 shm_drop(fd_from); 1052 goto out_locked; 1053 } 1054 1055 /* 1056 * If we are exchanging, we need to ensure the shm_remove below 1057 * doesn't invalidate the dest shm's state. 1058 */ 1059 if ((flags & SHM_RENAME_EXCHANGE) != 0 && fd_to != NULL) 1060 shm_hold(fd_to); 1061 1062 /* 1063 * NOTE: if path_to is not already in the hash, c'est la vie; 1064 * it simply means we have nothing already at path_to to unlink. 1065 * That is the ENOENT case. 1066 * 1067 * If we somehow don't have access to unlink this guy, but 1068 * did for the shm at path_from, then relink the shm to path_from 1069 * and abort with EACCES. 1070 * 1071 * All other errors: that is weird; let's relink and abort the 1072 * operation. 1073 */ 1074 error = shm_remove(path_to, fnv_to, td->td_ucred); 1075 if (error != 0 && error != ENOENT) { 1076 shm_insert(path_from, fnv_from, fd_from); 1077 shm_drop(fd_from); 1078 /* Don't free path_from now, since the hash references it */ 1079 path_from = NULL; 1080 goto out_locked; 1081 } 1082 1083 error = 0; 1084 1085 shm_insert(path_to, fnv_to, fd_from); 1086 1087 /* Don't free path_to now, since the hash references it */ 1088 path_to = NULL; 1089 1090 /* We kept a ref when we removed, and incremented again in insert */ 1091 shm_drop(fd_from); 1092 KASSERT(fd_from->shm_refs > 0, ("Expected >0 refs; got: %d\n", 1093 fd_from->shm_refs)); 1094 1095 if ((flags & SHM_RENAME_EXCHANGE) != 0 && fd_to != NULL) { 1096 shm_insert(path_from, fnv_from, fd_to); 1097 path_from = NULL; 1098 shm_drop(fd_to); 1099 KASSERT(fd_to->shm_refs > 0, ("Expected >0 refs; got: %d\n", 1100 fd_to->shm_refs)); 1101 } 1102 1103 out_locked: 1104 sx_xunlock(&shm_dict_lock); 1105 1106 out: 1107 free(path_from, M_SHMFD); 1108 free(path_to, M_SHMFD); 1109 return (error); 1110 } 1111 1112 int 1113 shm_mmap(struct file *fp, vm_map_t map, vm_offset_t *addr, vm_size_t objsize, 1114 vm_prot_t prot, vm_prot_t cap_maxprot, int flags, 1115 vm_ooffset_t foff, struct thread *td) 1116 { 1117 struct shmfd *shmfd; 1118 vm_prot_t maxprot; 1119 int error; 1120 bool writecnt; 1121 void *rl_cookie; 1122 1123 shmfd = fp->f_data; 1124 maxprot = VM_PROT_NONE; 1125 1126 rl_cookie = rangelock_rlock(&shmfd->shm_rl, 0, objsize, 1127 &shmfd->shm_mtx); 1128 /* FREAD should always be set. */ 1129 if ((fp->f_flag & FREAD) != 0) 1130 maxprot |= VM_PROT_EXECUTE | VM_PROT_READ; 1131 1132 /* 1133 * If FWRITE's set, we can allow VM_PROT_WRITE unless it's a shared 1134 * mapping with a write seal applied. Private mappings are always 1135 * writeable. 1136 */ 1137 if ((flags & MAP_SHARED) == 0) { 1138 cap_maxprot |= VM_PROT_WRITE; 1139 maxprot |= VM_PROT_WRITE; 1140 writecnt = false; 1141 } else { 1142 if ((fp->f_flag & FWRITE) != 0 && 1143 (shmfd->shm_seals & F_SEAL_WRITE) == 0) 1144 maxprot |= VM_PROT_WRITE; 1145 1146 /* 1147 * Any mappings from a writable descriptor may be upgraded to 1148 * VM_PROT_WRITE with mprotect(2), unless a write-seal was 1149 * applied between the open and subsequent mmap(2). We want to 1150 * reject application of a write seal as long as any such 1151 * mapping exists so that the seal cannot be trivially bypassed. 1152 */ 1153 writecnt = (maxprot & VM_PROT_WRITE) != 0; 1154 if (!writecnt && (prot & VM_PROT_WRITE) != 0) { 1155 error = EACCES; 1156 goto out; 1157 } 1158 } 1159 maxprot &= cap_maxprot; 1160 1161 /* See comment in vn_mmap(). */ 1162 if ( 1163 #ifdef _LP64 1164 objsize > OFF_MAX || 1165 #endif 1166 foff < 0 || foff > OFF_MAX - objsize) { 1167 error = EINVAL; 1168 goto out; 1169 } 1170 1171 #ifdef MAC 1172 error = mac_posixshm_check_mmap(td->td_ucred, shmfd, prot, flags); 1173 if (error != 0) 1174 goto out; 1175 #endif 1176 1177 mtx_lock(&shm_timestamp_lock); 1178 vfs_timestamp(&shmfd->shm_atime); 1179 mtx_unlock(&shm_timestamp_lock); 1180 vm_object_reference(shmfd->shm_object); 1181 1182 if (writecnt) 1183 vm_pager_update_writecount(shmfd->shm_object, 0, objsize); 1184 error = vm_mmap_object(map, addr, objsize, prot, maxprot, flags, 1185 shmfd->shm_object, foff, writecnt, td); 1186 if (error != 0) { 1187 if (writecnt) 1188 vm_pager_release_writecount(shmfd->shm_object, 0, 1189 objsize); 1190 vm_object_deallocate(shmfd->shm_object); 1191 } 1192 out: 1193 rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx); 1194 return (error); 1195 } 1196 1197 static int 1198 shm_chmod(struct file *fp, mode_t mode, struct ucred *active_cred, 1199 struct thread *td) 1200 { 1201 struct shmfd *shmfd; 1202 int error; 1203 1204 error = 0; 1205 shmfd = fp->f_data; 1206 mtx_lock(&shm_timestamp_lock); 1207 /* 1208 * SUSv4 says that x bits of permission need not be affected. 1209 * Be consistent with our shm_open there. 1210 */ 1211 #ifdef MAC 1212 error = mac_posixshm_check_setmode(active_cred, shmfd, mode); 1213 if (error != 0) 1214 goto out; 1215 #endif 1216 error = vaccess(VREG, shmfd->shm_mode, shmfd->shm_uid, 1217 shmfd->shm_gid, VADMIN, active_cred, NULL); 1218 if (error != 0) 1219 goto out; 1220 shmfd->shm_mode = mode & ACCESSPERMS; 1221 out: 1222 mtx_unlock(&shm_timestamp_lock); 1223 return (error); 1224 } 1225 1226 static int 1227 shm_chown(struct file *fp, uid_t uid, gid_t gid, struct ucred *active_cred, 1228 struct thread *td) 1229 { 1230 struct shmfd *shmfd; 1231 int error; 1232 1233 error = 0; 1234 shmfd = fp->f_data; 1235 mtx_lock(&shm_timestamp_lock); 1236 #ifdef MAC 1237 error = mac_posixshm_check_setowner(active_cred, shmfd, uid, gid); 1238 if (error != 0) 1239 goto out; 1240 #endif 1241 if (uid == (uid_t)-1) 1242 uid = shmfd->shm_uid; 1243 if (gid == (gid_t)-1) 1244 gid = shmfd->shm_gid; 1245 if (((uid != shmfd->shm_uid && uid != active_cred->cr_uid) || 1246 (gid != shmfd->shm_gid && !groupmember(gid, active_cred))) && 1247 (error = priv_check_cred(active_cred, PRIV_VFS_CHOWN))) 1248 goto out; 1249 shmfd->shm_uid = uid; 1250 shmfd->shm_gid = gid; 1251 out: 1252 mtx_unlock(&shm_timestamp_lock); 1253 return (error); 1254 } 1255 1256 /* 1257 * Helper routines to allow the backing object of a shared memory file 1258 * descriptor to be mapped in the kernel. 1259 */ 1260 int 1261 shm_map(struct file *fp, size_t size, off_t offset, void **memp) 1262 { 1263 struct shmfd *shmfd; 1264 vm_offset_t kva, ofs; 1265 vm_object_t obj; 1266 int rv; 1267 1268 if (fp->f_type != DTYPE_SHM) 1269 return (EINVAL); 1270 shmfd = fp->f_data; 1271 obj = shmfd->shm_object; 1272 VM_OBJECT_WLOCK(obj); 1273 /* 1274 * XXXRW: This validation is probably insufficient, and subject to 1275 * sign errors. It should be fixed. 1276 */ 1277 if (offset >= shmfd->shm_size || 1278 offset + size > round_page(shmfd->shm_size)) { 1279 VM_OBJECT_WUNLOCK(obj); 1280 return (EINVAL); 1281 } 1282 1283 shmfd->shm_kmappings++; 1284 vm_object_reference_locked(obj); 1285 VM_OBJECT_WUNLOCK(obj); 1286 1287 /* Map the object into the kernel_map and wire it. */ 1288 kva = vm_map_min(kernel_map); 1289 ofs = offset & PAGE_MASK; 1290 offset = trunc_page(offset); 1291 size = round_page(size + ofs); 1292 rv = vm_map_find(kernel_map, obj, offset, &kva, size, 0, 1293 VMFS_OPTIMAL_SPACE, VM_PROT_READ | VM_PROT_WRITE, 1294 VM_PROT_READ | VM_PROT_WRITE, 0); 1295 if (rv == KERN_SUCCESS) { 1296 rv = vm_map_wire(kernel_map, kva, kva + size, 1297 VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES); 1298 if (rv == KERN_SUCCESS) { 1299 *memp = (void *)(kva + ofs); 1300 return (0); 1301 } 1302 vm_map_remove(kernel_map, kva, kva + size); 1303 } else 1304 vm_object_deallocate(obj); 1305 1306 /* On failure, drop our mapping reference. */ 1307 VM_OBJECT_WLOCK(obj); 1308 shmfd->shm_kmappings--; 1309 VM_OBJECT_WUNLOCK(obj); 1310 1311 return (vm_mmap_to_errno(rv)); 1312 } 1313 1314 /* 1315 * We require the caller to unmap the entire entry. This allows us to 1316 * safely decrement shm_kmappings when a mapping is removed. 1317 */ 1318 int 1319 shm_unmap(struct file *fp, void *mem, size_t size) 1320 { 1321 struct shmfd *shmfd; 1322 vm_map_entry_t entry; 1323 vm_offset_t kva, ofs; 1324 vm_object_t obj; 1325 vm_pindex_t pindex; 1326 vm_prot_t prot; 1327 boolean_t wired; 1328 vm_map_t map; 1329 int rv; 1330 1331 if (fp->f_type != DTYPE_SHM) 1332 return (EINVAL); 1333 shmfd = fp->f_data; 1334 kva = (vm_offset_t)mem; 1335 ofs = kva & PAGE_MASK; 1336 kva = trunc_page(kva); 1337 size = round_page(size + ofs); 1338 map = kernel_map; 1339 rv = vm_map_lookup(&map, kva, VM_PROT_READ | VM_PROT_WRITE, &entry, 1340 &obj, &pindex, &prot, &wired); 1341 if (rv != KERN_SUCCESS) 1342 return (EINVAL); 1343 if (entry->start != kva || entry->end != kva + size) { 1344 vm_map_lookup_done(map, entry); 1345 return (EINVAL); 1346 } 1347 vm_map_lookup_done(map, entry); 1348 if (obj != shmfd->shm_object) 1349 return (EINVAL); 1350 vm_map_remove(map, kva, kva + size); 1351 VM_OBJECT_WLOCK(obj); 1352 KASSERT(shmfd->shm_kmappings > 0, ("shm_unmap: object not mapped")); 1353 shmfd->shm_kmappings--; 1354 VM_OBJECT_WUNLOCK(obj); 1355 return (0); 1356 } 1357 1358 static int 1359 shm_fill_kinfo_locked(struct shmfd *shmfd, struct kinfo_file *kif, bool list) 1360 { 1361 const char *path, *pr_path; 1362 size_t pr_pathlen; 1363 bool visible; 1364 1365 sx_assert(&shm_dict_lock, SA_LOCKED); 1366 kif->kf_type = KF_TYPE_SHM; 1367 kif->kf_un.kf_file.kf_file_mode = S_IFREG | shmfd->shm_mode; 1368 kif->kf_un.kf_file.kf_file_size = shmfd->shm_size; 1369 if (shmfd->shm_path != NULL) { 1370 if (shmfd->shm_path != NULL) { 1371 path = shmfd->shm_path; 1372 pr_path = curthread->td_ucred->cr_prison->pr_path; 1373 if (strcmp(pr_path, "/") != 0) { 1374 /* Return the jail-rooted pathname. */ 1375 pr_pathlen = strlen(pr_path); 1376 visible = strncmp(path, pr_path, pr_pathlen) 1377 == 0 && path[pr_pathlen] == '/'; 1378 if (list && !visible) 1379 return (EPERM); 1380 if (visible) 1381 path += pr_pathlen; 1382 } 1383 strlcpy(kif->kf_path, path, sizeof(kif->kf_path)); 1384 } 1385 } 1386 return (0); 1387 } 1388 1389 static int 1390 shm_fill_kinfo(struct file *fp, struct kinfo_file *kif, 1391 struct filedesc *fdp __unused) 1392 { 1393 int res; 1394 1395 sx_slock(&shm_dict_lock); 1396 res = shm_fill_kinfo_locked(fp->f_data, kif, false); 1397 sx_sunlock(&shm_dict_lock); 1398 return (res); 1399 } 1400 1401 static int 1402 shm_add_seals(struct file *fp, int seals) 1403 { 1404 struct shmfd *shmfd; 1405 void *rl_cookie; 1406 vm_ooffset_t writemappings; 1407 int error, nseals; 1408 1409 error = 0; 1410 shmfd = fp->f_data; 1411 rl_cookie = rangelock_wlock(&shmfd->shm_rl, 0, OFF_MAX, 1412 &shmfd->shm_mtx); 1413 1414 /* Even already-set seals should result in EPERM. */ 1415 if ((shmfd->shm_seals & F_SEAL_SEAL) != 0) { 1416 error = EPERM; 1417 goto out; 1418 } 1419 nseals = seals & ~shmfd->shm_seals; 1420 if ((nseals & F_SEAL_WRITE) != 0) { 1421 /* 1422 * The rangelock above prevents writable mappings from being 1423 * added after we've started applying seals. The RLOCK here 1424 * is to avoid torn reads on ILP32 arches as unmapping/reducing 1425 * writemappings will be done without a rangelock. 1426 */ 1427 VM_OBJECT_RLOCK(shmfd->shm_object); 1428 writemappings = shmfd->shm_object->un_pager.swp.writemappings; 1429 VM_OBJECT_RUNLOCK(shmfd->shm_object); 1430 /* kmappings are also writable */ 1431 if (writemappings > 0) { 1432 error = EBUSY; 1433 goto out; 1434 } 1435 } 1436 shmfd->shm_seals |= nseals; 1437 out: 1438 rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx); 1439 return (error); 1440 } 1441 1442 static int 1443 shm_get_seals(struct file *fp, int *seals) 1444 { 1445 struct shmfd *shmfd; 1446 1447 shmfd = fp->f_data; 1448 *seals = shmfd->shm_seals; 1449 return (0); 1450 } 1451 1452 static int 1453 shm_fallocate(struct file *fp, off_t offset, off_t len, struct thread *td) 1454 { 1455 void *rl_cookie; 1456 struct shmfd *shmfd; 1457 size_t size; 1458 int error; 1459 1460 /* This assumes that the caller already checked for overflow. */ 1461 error = 0; 1462 shmfd = fp->f_data; 1463 size = offset + len; 1464 1465 /* 1466 * Just grab the rangelock for the range that we may be attempting to 1467 * grow, rather than blocking read/write for regions we won't be 1468 * touching while this (potential) resize is in progress. Other 1469 * attempts to resize the shmfd will have to take a write lock from 0 to 1470 * OFF_MAX, so this being potentially beyond the current usable range of 1471 * the shmfd is not necessarily a concern. If other mechanisms are 1472 * added to grow a shmfd, this may need to be re-evaluated. 1473 */ 1474 rl_cookie = rangelock_wlock(&shmfd->shm_rl, offset, size, 1475 &shmfd->shm_mtx); 1476 if (size > shmfd->shm_size) { 1477 VM_OBJECT_WLOCK(shmfd->shm_object); 1478 error = shm_dotruncate_locked(shmfd, size, rl_cookie); 1479 VM_OBJECT_WUNLOCK(shmfd->shm_object); 1480 } 1481 rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx); 1482 /* Translate to posix_fallocate(2) return value as needed. */ 1483 if (error == ENOMEM) 1484 error = ENOSPC; 1485 return (error); 1486 } 1487 1488 static int 1489 sysctl_posix_shm_list(SYSCTL_HANDLER_ARGS) 1490 { 1491 struct shm_mapping *shmm; 1492 struct sbuf sb; 1493 struct kinfo_file kif; 1494 u_long i; 1495 ssize_t curlen; 1496 int error, error2; 1497 1498 sbuf_new_for_sysctl(&sb, NULL, sizeof(struct kinfo_file) * 5, req); 1499 sbuf_clear_flags(&sb, SBUF_INCLUDENUL); 1500 curlen = 0; 1501 error = 0; 1502 sx_slock(&shm_dict_lock); 1503 for (i = 0; i < shm_hash + 1; i++) { 1504 LIST_FOREACH(shmm, &shm_dictionary[i], sm_link) { 1505 error = shm_fill_kinfo_locked(shmm->sm_shmfd, 1506 &kif, true); 1507 if (error == EPERM) 1508 continue; 1509 if (error != 0) 1510 break; 1511 pack_kinfo(&kif); 1512 if (req->oldptr != NULL && 1513 kif.kf_structsize + curlen > req->oldlen) 1514 break; 1515 error = sbuf_bcat(&sb, &kif, kif.kf_structsize) == 0 ? 1516 0 : ENOMEM; 1517 if (error != 0) 1518 break; 1519 curlen += kif.kf_structsize; 1520 } 1521 } 1522 sx_sunlock(&shm_dict_lock); 1523 error2 = sbuf_finish(&sb); 1524 sbuf_delete(&sb); 1525 return (error != 0 ? error : error2); 1526 } 1527 1528 SYSCTL_PROC(_kern_ipc, OID_AUTO, posix_shm_list, 1529 CTLFLAG_RD | CTLFLAG_MPSAFE | CTLTYPE_OPAQUE, 1530 NULL, 0, sysctl_posix_shm_list, "", 1531 "POSIX SHM list"); 1532 1533 int 1534 kern_shm_open(struct thread *td, const char *path, int flags, mode_t mode, 1535 struct filecaps *caps) 1536 { 1537 1538 return (kern_shm_open2(td, path, flags, mode, 0, caps, NULL)); 1539 } 1540 1541 /* 1542 * This version of the shm_open() interface leaves CLOEXEC behavior up to the 1543 * caller, and libc will enforce it for the traditional shm_open() call. This 1544 * allows other consumers, like memfd_create(), to opt-in for CLOEXEC. This 1545 * interface also includes a 'name' argument that is currently unused, but could 1546 * potentially be exported later via some interface for debugging purposes. 1547 * From the kernel's perspective, it is optional. Individual consumers like 1548 * memfd_create() may require it in order to be compatible with other systems 1549 * implementing the same function. 1550 */ 1551 int 1552 sys_shm_open2(struct thread *td, struct shm_open2_args *uap) 1553 { 1554 1555 return (kern_shm_open2(td, uap->path, uap->flags, uap->mode, 1556 uap->shmflags, NULL, uap->name)); 1557 } 1558