1 /*- 2 * Copyright (c) 2006, 2011 Robert N. M. Watson 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 /* 28 * Support for shared swap-backed anonymous memory objects via 29 * shm_open(2) and shm_unlink(2). While most of the implementation is 30 * here, vm_mmap.c contains mapping logic changes. 31 * 32 * TODO: 33 * 34 * (1) Need to export data to a userland tool via a sysctl. Should ipcs(1) 35 * and ipcrm(1) be expanded or should new tools to manage both POSIX 36 * kernel semaphores and POSIX shared memory be written? 37 * 38 * (2) Add support for this file type to fstat(1). 39 * 40 * (3) Resource limits? Does this need its own resource limits or are the 41 * existing limits in mmap(2) sufficient? 42 */ 43 44 #include <sys/cdefs.h> 45 __FBSDID("$FreeBSD$"); 46 47 #include "opt_capsicum.h" 48 #include "opt_ktrace.h" 49 50 #include <sys/param.h> 51 #include <sys/capsicum.h> 52 #include <sys/conf.h> 53 #include <sys/fcntl.h> 54 #include <sys/file.h> 55 #include <sys/filedesc.h> 56 #include <sys/fnv_hash.h> 57 #include <sys/kernel.h> 58 #include <sys/uio.h> 59 #include <sys/signal.h> 60 #include <sys/ktrace.h> 61 #include <sys/lock.h> 62 #include <sys/malloc.h> 63 #include <sys/mman.h> 64 #include <sys/mutex.h> 65 #include <sys/priv.h> 66 #include <sys/proc.h> 67 #include <sys/refcount.h> 68 #include <sys/resourcevar.h> 69 #include <sys/rwlock.h> 70 #include <sys/stat.h> 71 #include <sys/sysctl.h> 72 #include <sys/sysproto.h> 73 #include <sys/systm.h> 74 #include <sys/sx.h> 75 #include <sys/time.h> 76 #include <sys/vnode.h> 77 #include <sys/unistd.h> 78 #include <sys/user.h> 79 80 #include <security/mac/mac_framework.h> 81 82 #include <vm/vm.h> 83 #include <vm/vm_param.h> 84 #include <vm/pmap.h> 85 #include <vm/vm_extern.h> 86 #include <vm/vm_map.h> 87 #include <vm/vm_kern.h> 88 #include <vm/vm_object.h> 89 #include <vm/vm_page.h> 90 #include <vm/vm_pageout.h> 91 #include <vm/vm_pager.h> 92 #include <vm/swap_pager.h> 93 94 struct shm_mapping { 95 char *sm_path; 96 Fnv32_t sm_fnv; 97 struct shmfd *sm_shmfd; 98 LIST_ENTRY(shm_mapping) sm_link; 99 }; 100 101 static MALLOC_DEFINE(M_SHMFD, "shmfd", "shared memory file descriptor"); 102 static LIST_HEAD(, shm_mapping) *shm_dictionary; 103 static struct sx shm_dict_lock; 104 static struct mtx shm_timestamp_lock; 105 static u_long shm_hash; 106 static struct unrhdr *shm_ino_unr; 107 static dev_t shm_dev_ino; 108 109 #define SHM_HASH(fnv) (&shm_dictionary[(fnv) & shm_hash]) 110 111 static int shm_access(struct shmfd *shmfd, struct ucred *ucred, int flags); 112 static struct shmfd *shm_alloc(struct ucred *ucred, mode_t mode); 113 static void shm_init(void *arg); 114 static void shm_drop(struct shmfd *shmfd); 115 static struct shmfd *shm_hold(struct shmfd *shmfd); 116 static void shm_insert(char *path, Fnv32_t fnv, struct shmfd *shmfd); 117 static struct shmfd *shm_lookup(char *path, Fnv32_t fnv); 118 static int shm_remove(char *path, Fnv32_t fnv, struct ucred *ucred); 119 static int shm_dotruncate(struct shmfd *shmfd, off_t length); 120 121 static fo_rdwr_t shm_read; 122 static fo_rdwr_t shm_write; 123 static fo_truncate_t shm_truncate; 124 static fo_stat_t shm_stat; 125 static fo_close_t shm_close; 126 static fo_chmod_t shm_chmod; 127 static fo_chown_t shm_chown; 128 static fo_seek_t shm_seek; 129 static fo_fill_kinfo_t shm_fill_kinfo; 130 131 /* File descriptor operations. */ 132 static struct fileops shm_ops = { 133 .fo_read = shm_read, 134 .fo_write = shm_write, 135 .fo_truncate = shm_truncate, 136 .fo_ioctl = invfo_ioctl, 137 .fo_poll = invfo_poll, 138 .fo_kqfilter = invfo_kqfilter, 139 .fo_stat = shm_stat, 140 .fo_close = shm_close, 141 .fo_chmod = shm_chmod, 142 .fo_chown = shm_chown, 143 .fo_sendfile = vn_sendfile, 144 .fo_seek = shm_seek, 145 .fo_fill_kinfo = shm_fill_kinfo, 146 .fo_flags = DFLAG_PASSABLE | DFLAG_SEEKABLE 147 }; 148 149 FEATURE(posix_shm, "POSIX shared memory"); 150 151 static int 152 uiomove_object_page(vm_object_t obj, size_t len, struct uio *uio) 153 { 154 vm_page_t m; 155 vm_pindex_t idx; 156 size_t tlen; 157 int error, offset, rv; 158 159 idx = OFF_TO_IDX(uio->uio_offset); 160 offset = uio->uio_offset & PAGE_MASK; 161 tlen = MIN(PAGE_SIZE - offset, len); 162 163 VM_OBJECT_WLOCK(obj); 164 165 /* 166 * Parallel reads of the page content from disk are prevented 167 * by exclusive busy. 168 * 169 * Although the tmpfs vnode lock is held here, it is 170 * nonetheless safe to sleep waiting for a free page. The 171 * pageout daemon does not need to acquire the tmpfs vnode 172 * lock to page out tobj's pages because tobj is a OBJT_SWAP 173 * type object. 174 */ 175 m = vm_page_grab(obj, idx, VM_ALLOC_NORMAL); 176 if (m->valid != VM_PAGE_BITS_ALL) { 177 if (vm_pager_has_page(obj, idx, NULL, NULL)) { 178 rv = vm_pager_get_pages(obj, &m, 1, 0); 179 m = vm_page_lookup(obj, idx); 180 if (m == NULL) { 181 printf( 182 "uiomove_object: vm_obj %p idx %jd null lookup rv %d\n", 183 obj, idx, rv); 184 VM_OBJECT_WUNLOCK(obj); 185 return (EIO); 186 } 187 if (rv != VM_PAGER_OK) { 188 printf( 189 "uiomove_object: vm_obj %p idx %jd valid %x pager error %d\n", 190 obj, idx, m->valid, rv); 191 vm_page_lock(m); 192 vm_page_free(m); 193 vm_page_unlock(m); 194 VM_OBJECT_WUNLOCK(obj); 195 return (EIO); 196 } 197 } else 198 vm_page_zero_invalid(m, TRUE); 199 } 200 vm_page_xunbusy(m); 201 vm_page_lock(m); 202 vm_page_hold(m); 203 if (m->queue == PQ_NONE) { 204 vm_page_deactivate(m); 205 } else { 206 /* Requeue to maintain LRU ordering. */ 207 vm_page_requeue(m); 208 } 209 vm_page_unlock(m); 210 VM_OBJECT_WUNLOCK(obj); 211 error = uiomove_fromphys(&m, offset, tlen, uio); 212 if (uio->uio_rw == UIO_WRITE && error == 0) { 213 VM_OBJECT_WLOCK(obj); 214 vm_page_dirty(m); 215 vm_pager_page_unswapped(m); 216 VM_OBJECT_WUNLOCK(obj); 217 } 218 vm_page_lock(m); 219 vm_page_unhold(m); 220 vm_page_unlock(m); 221 222 return (error); 223 } 224 225 int 226 uiomove_object(vm_object_t obj, off_t obj_size, struct uio *uio) 227 { 228 ssize_t resid; 229 size_t len; 230 int error; 231 232 error = 0; 233 while ((resid = uio->uio_resid) > 0) { 234 if (obj_size <= uio->uio_offset) 235 break; 236 len = MIN(obj_size - uio->uio_offset, resid); 237 if (len == 0) 238 break; 239 error = uiomove_object_page(obj, len, uio); 240 if (error != 0 || resid == uio->uio_resid) 241 break; 242 } 243 return (error); 244 } 245 246 static int 247 shm_seek(struct file *fp, off_t offset, int whence, struct thread *td) 248 { 249 struct shmfd *shmfd; 250 off_t foffset; 251 int error; 252 253 shmfd = fp->f_data; 254 foffset = foffset_lock(fp, 0); 255 error = 0; 256 switch (whence) { 257 case L_INCR: 258 if (foffset < 0 || 259 (offset > 0 && foffset > OFF_MAX - offset)) { 260 error = EOVERFLOW; 261 break; 262 } 263 offset += foffset; 264 break; 265 case L_XTND: 266 if (offset > 0 && shmfd->shm_size > OFF_MAX - offset) { 267 error = EOVERFLOW; 268 break; 269 } 270 offset += shmfd->shm_size; 271 break; 272 case L_SET: 273 break; 274 default: 275 error = EINVAL; 276 } 277 if (error == 0) { 278 if (offset < 0 || offset > shmfd->shm_size) 279 error = EINVAL; 280 else 281 td->td_uretoff.tdu_off = offset; 282 } 283 foffset_unlock(fp, offset, error != 0 ? FOF_NOUPDATE : 0); 284 return (error); 285 } 286 287 static int 288 shm_read(struct file *fp, struct uio *uio, struct ucred *active_cred, 289 int flags, struct thread *td) 290 { 291 struct shmfd *shmfd; 292 void *rl_cookie; 293 int error; 294 295 shmfd = fp->f_data; 296 foffset_lock_uio(fp, uio, flags); 297 rl_cookie = rangelock_rlock(&shmfd->shm_rl, uio->uio_offset, 298 uio->uio_offset + uio->uio_resid, &shmfd->shm_mtx); 299 #ifdef MAC 300 error = mac_posixshm_check_read(active_cred, fp->f_cred, shmfd); 301 if (error) 302 return (error); 303 #endif 304 error = uiomove_object(shmfd->shm_object, shmfd->shm_size, uio); 305 rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx); 306 foffset_unlock_uio(fp, uio, flags); 307 return (error); 308 } 309 310 static int 311 shm_write(struct file *fp, struct uio *uio, struct ucred *active_cred, 312 int flags, struct thread *td) 313 { 314 struct shmfd *shmfd; 315 void *rl_cookie; 316 int error; 317 318 shmfd = fp->f_data; 319 #ifdef MAC 320 error = mac_posixshm_check_write(active_cred, fp->f_cred, shmfd); 321 if (error) 322 return (error); 323 #endif 324 foffset_lock_uio(fp, uio, flags); 325 if ((flags & FOF_OFFSET) == 0) { 326 rl_cookie = rangelock_wlock(&shmfd->shm_rl, 0, OFF_MAX, 327 &shmfd->shm_mtx); 328 } else { 329 rl_cookie = rangelock_wlock(&shmfd->shm_rl, uio->uio_offset, 330 uio->uio_offset + uio->uio_resid, &shmfd->shm_mtx); 331 } 332 333 error = uiomove_object(shmfd->shm_object, shmfd->shm_size, uio); 334 rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx); 335 foffset_unlock_uio(fp, uio, flags); 336 return (error); 337 } 338 339 static int 340 shm_truncate(struct file *fp, off_t length, struct ucred *active_cred, 341 struct thread *td) 342 { 343 struct shmfd *shmfd; 344 #ifdef MAC 345 int error; 346 #endif 347 348 shmfd = fp->f_data; 349 #ifdef MAC 350 error = mac_posixshm_check_truncate(active_cred, fp->f_cred, shmfd); 351 if (error) 352 return (error); 353 #endif 354 return (shm_dotruncate(shmfd, length)); 355 } 356 357 static int 358 shm_stat(struct file *fp, struct stat *sb, struct ucred *active_cred, 359 struct thread *td) 360 { 361 struct shmfd *shmfd; 362 #ifdef MAC 363 int error; 364 #endif 365 366 shmfd = fp->f_data; 367 368 #ifdef MAC 369 error = mac_posixshm_check_stat(active_cred, fp->f_cred, shmfd); 370 if (error) 371 return (error); 372 #endif 373 374 /* 375 * Attempt to return sanish values for fstat() on a memory file 376 * descriptor. 377 */ 378 bzero(sb, sizeof(*sb)); 379 sb->st_blksize = PAGE_SIZE; 380 sb->st_size = shmfd->shm_size; 381 sb->st_blocks = (sb->st_size + sb->st_blksize - 1) / sb->st_blksize; 382 mtx_lock(&shm_timestamp_lock); 383 sb->st_atim = shmfd->shm_atime; 384 sb->st_ctim = shmfd->shm_ctime; 385 sb->st_mtim = shmfd->shm_mtime; 386 sb->st_birthtim = shmfd->shm_birthtime; 387 sb->st_mode = S_IFREG | shmfd->shm_mode; /* XXX */ 388 sb->st_uid = shmfd->shm_uid; 389 sb->st_gid = shmfd->shm_gid; 390 mtx_unlock(&shm_timestamp_lock); 391 sb->st_dev = shm_dev_ino; 392 sb->st_ino = shmfd->shm_ino; 393 394 return (0); 395 } 396 397 static int 398 shm_close(struct file *fp, struct thread *td) 399 { 400 struct shmfd *shmfd; 401 402 shmfd = fp->f_data; 403 fp->f_data = NULL; 404 shm_drop(shmfd); 405 406 return (0); 407 } 408 409 static int 410 shm_dotruncate(struct shmfd *shmfd, off_t length) 411 { 412 vm_object_t object; 413 vm_page_t m, ma[1]; 414 vm_pindex_t idx, nobjsize; 415 vm_ooffset_t delta; 416 int base, rv; 417 418 object = shmfd->shm_object; 419 VM_OBJECT_WLOCK(object); 420 if (length == shmfd->shm_size) { 421 VM_OBJECT_WUNLOCK(object); 422 return (0); 423 } 424 nobjsize = OFF_TO_IDX(length + PAGE_MASK); 425 426 /* Are we shrinking? If so, trim the end. */ 427 if (length < shmfd->shm_size) { 428 /* 429 * Disallow any requests to shrink the size if this 430 * object is mapped into the kernel. 431 */ 432 if (shmfd->shm_kmappings > 0) { 433 VM_OBJECT_WUNLOCK(object); 434 return (EBUSY); 435 } 436 437 /* 438 * Zero the truncated part of the last page. 439 */ 440 base = length & PAGE_MASK; 441 if (base != 0) { 442 idx = OFF_TO_IDX(length); 443 retry: 444 m = vm_page_lookup(object, idx); 445 if (m != NULL) { 446 if (vm_page_sleep_if_busy(m, "shmtrc")) 447 goto retry; 448 } else if (vm_pager_has_page(object, idx, NULL, NULL)) { 449 m = vm_page_alloc(object, idx, VM_ALLOC_NORMAL); 450 if (m == NULL) { 451 VM_OBJECT_WUNLOCK(object); 452 VM_WAIT; 453 VM_OBJECT_WLOCK(object); 454 goto retry; 455 } else if (m->valid != VM_PAGE_BITS_ALL) { 456 ma[0] = m; 457 rv = vm_pager_get_pages(object, ma, 1, 458 0); 459 m = vm_page_lookup(object, idx); 460 } else 461 /* A cached page was reactivated. */ 462 rv = VM_PAGER_OK; 463 vm_page_lock(m); 464 if (rv == VM_PAGER_OK) { 465 vm_page_deactivate(m); 466 vm_page_unlock(m); 467 vm_page_xunbusy(m); 468 } else { 469 vm_page_free(m); 470 vm_page_unlock(m); 471 VM_OBJECT_WUNLOCK(object); 472 return (EIO); 473 } 474 } 475 if (m != NULL) { 476 pmap_zero_page_area(m, base, PAGE_SIZE - base); 477 KASSERT(m->valid == VM_PAGE_BITS_ALL, 478 ("shm_dotruncate: page %p is invalid", m)); 479 vm_page_dirty(m); 480 vm_pager_page_unswapped(m); 481 } 482 } 483 delta = ptoa(object->size - nobjsize); 484 485 /* Toss in memory pages. */ 486 if (nobjsize < object->size) 487 vm_object_page_remove(object, nobjsize, object->size, 488 0); 489 490 /* Toss pages from swap. */ 491 if (object->type == OBJT_SWAP) 492 swap_pager_freespace(object, nobjsize, delta); 493 494 /* Free the swap accounted for shm */ 495 swap_release_by_cred(delta, object->cred); 496 object->charge -= delta; 497 } else { 498 /* Attempt to reserve the swap */ 499 delta = ptoa(nobjsize - object->size); 500 if (!swap_reserve_by_cred(delta, object->cred)) { 501 VM_OBJECT_WUNLOCK(object); 502 return (ENOMEM); 503 } 504 object->charge += delta; 505 } 506 shmfd->shm_size = length; 507 mtx_lock(&shm_timestamp_lock); 508 vfs_timestamp(&shmfd->shm_ctime); 509 shmfd->shm_mtime = shmfd->shm_ctime; 510 mtx_unlock(&shm_timestamp_lock); 511 object->size = nobjsize; 512 VM_OBJECT_WUNLOCK(object); 513 return (0); 514 } 515 516 /* 517 * shmfd object management including creation and reference counting 518 * routines. 519 */ 520 static struct shmfd * 521 shm_alloc(struct ucred *ucred, mode_t mode) 522 { 523 struct shmfd *shmfd; 524 int ino; 525 526 shmfd = malloc(sizeof(*shmfd), M_SHMFD, M_WAITOK | M_ZERO); 527 shmfd->shm_size = 0; 528 shmfd->shm_uid = ucred->cr_uid; 529 shmfd->shm_gid = ucred->cr_gid; 530 shmfd->shm_mode = mode; 531 shmfd->shm_object = vm_pager_allocate(OBJT_DEFAULT, NULL, 532 shmfd->shm_size, VM_PROT_DEFAULT, 0, ucred); 533 KASSERT(shmfd->shm_object != NULL, ("shm_create: vm_pager_allocate")); 534 VM_OBJECT_WLOCK(shmfd->shm_object); 535 vm_object_clear_flag(shmfd->shm_object, OBJ_ONEMAPPING); 536 vm_object_set_flag(shmfd->shm_object, OBJ_NOSPLIT); 537 VM_OBJECT_WUNLOCK(shmfd->shm_object); 538 vfs_timestamp(&shmfd->shm_birthtime); 539 shmfd->shm_atime = shmfd->shm_mtime = shmfd->shm_ctime = 540 shmfd->shm_birthtime; 541 ino = alloc_unr(shm_ino_unr); 542 if (ino == -1) 543 shmfd->shm_ino = 0; 544 else 545 shmfd->shm_ino = ino; 546 refcount_init(&shmfd->shm_refs, 1); 547 mtx_init(&shmfd->shm_mtx, "shmrl", NULL, MTX_DEF); 548 rangelock_init(&shmfd->shm_rl); 549 #ifdef MAC 550 mac_posixshm_init(shmfd); 551 mac_posixshm_create(ucred, shmfd); 552 #endif 553 554 return (shmfd); 555 } 556 557 static struct shmfd * 558 shm_hold(struct shmfd *shmfd) 559 { 560 561 refcount_acquire(&shmfd->shm_refs); 562 return (shmfd); 563 } 564 565 static void 566 shm_drop(struct shmfd *shmfd) 567 { 568 569 if (refcount_release(&shmfd->shm_refs)) { 570 #ifdef MAC 571 mac_posixshm_destroy(shmfd); 572 #endif 573 rangelock_destroy(&shmfd->shm_rl); 574 mtx_destroy(&shmfd->shm_mtx); 575 vm_object_deallocate(shmfd->shm_object); 576 if (shmfd->shm_ino != 0) 577 free_unr(shm_ino_unr, shmfd->shm_ino); 578 free(shmfd, M_SHMFD); 579 } 580 } 581 582 /* 583 * Determine if the credentials have sufficient permissions for a 584 * specified combination of FREAD and FWRITE. 585 */ 586 static int 587 shm_access(struct shmfd *shmfd, struct ucred *ucred, int flags) 588 { 589 accmode_t accmode; 590 int error; 591 592 accmode = 0; 593 if (flags & FREAD) 594 accmode |= VREAD; 595 if (flags & FWRITE) 596 accmode |= VWRITE; 597 mtx_lock(&shm_timestamp_lock); 598 error = vaccess(VREG, shmfd->shm_mode, shmfd->shm_uid, shmfd->shm_gid, 599 accmode, ucred, NULL); 600 mtx_unlock(&shm_timestamp_lock); 601 return (error); 602 } 603 604 /* 605 * Dictionary management. We maintain an in-kernel dictionary to map 606 * paths to shmfd objects. We use the FNV hash on the path to store 607 * the mappings in a hash table. 608 */ 609 static void 610 shm_init(void *arg) 611 { 612 613 mtx_init(&shm_timestamp_lock, "shm timestamps", NULL, MTX_DEF); 614 sx_init(&shm_dict_lock, "shm dictionary"); 615 shm_dictionary = hashinit(1024, M_SHMFD, &shm_hash); 616 shm_ino_unr = new_unrhdr(1, INT32_MAX, NULL); 617 KASSERT(shm_ino_unr != NULL, ("shm fake inodes not initialized")); 618 shm_dev_ino = devfs_alloc_cdp_inode(); 619 KASSERT(shm_dev_ino > 0, ("shm dev inode not initialized")); 620 } 621 SYSINIT(shm_init, SI_SUB_SYSV_SHM, SI_ORDER_ANY, shm_init, NULL); 622 623 static struct shmfd * 624 shm_lookup(char *path, Fnv32_t fnv) 625 { 626 struct shm_mapping *map; 627 628 LIST_FOREACH(map, SHM_HASH(fnv), sm_link) { 629 if (map->sm_fnv != fnv) 630 continue; 631 if (strcmp(map->sm_path, path) == 0) 632 return (map->sm_shmfd); 633 } 634 635 return (NULL); 636 } 637 638 static void 639 shm_insert(char *path, Fnv32_t fnv, struct shmfd *shmfd) 640 { 641 struct shm_mapping *map; 642 643 map = malloc(sizeof(struct shm_mapping), M_SHMFD, M_WAITOK); 644 map->sm_path = path; 645 map->sm_fnv = fnv; 646 map->sm_shmfd = shm_hold(shmfd); 647 shmfd->shm_path = path; 648 LIST_INSERT_HEAD(SHM_HASH(fnv), map, sm_link); 649 } 650 651 static int 652 shm_remove(char *path, Fnv32_t fnv, struct ucred *ucred) 653 { 654 struct shm_mapping *map; 655 int error; 656 657 LIST_FOREACH(map, SHM_HASH(fnv), sm_link) { 658 if (map->sm_fnv != fnv) 659 continue; 660 if (strcmp(map->sm_path, path) == 0) { 661 #ifdef MAC 662 error = mac_posixshm_check_unlink(ucred, map->sm_shmfd); 663 if (error) 664 return (error); 665 #endif 666 error = shm_access(map->sm_shmfd, ucred, 667 FREAD | FWRITE); 668 if (error) 669 return (error); 670 map->sm_shmfd->shm_path = NULL; 671 LIST_REMOVE(map, sm_link); 672 shm_drop(map->sm_shmfd); 673 free(map->sm_path, M_SHMFD); 674 free(map, M_SHMFD); 675 return (0); 676 } 677 } 678 679 return (ENOENT); 680 } 681 682 /* System calls. */ 683 int 684 sys_shm_open(struct thread *td, struct shm_open_args *uap) 685 { 686 struct filedesc *fdp; 687 struct shmfd *shmfd; 688 struct file *fp; 689 char *path; 690 Fnv32_t fnv; 691 mode_t cmode; 692 int fd, error; 693 694 #ifdef CAPABILITY_MODE 695 /* 696 * shm_open(2) is only allowed for anonymous objects. 697 */ 698 if (IN_CAPABILITY_MODE(td) && (uap->path != SHM_ANON)) 699 return (ECAPMODE); 700 #endif 701 702 if ((uap->flags & O_ACCMODE) != O_RDONLY && 703 (uap->flags & O_ACCMODE) != O_RDWR) 704 return (EINVAL); 705 706 if ((uap->flags & ~(O_ACCMODE | O_CREAT | O_EXCL | O_TRUNC | O_CLOEXEC)) != 0) 707 return (EINVAL); 708 709 fdp = td->td_proc->p_fd; 710 cmode = (uap->mode & ~fdp->fd_cmask) & ACCESSPERMS; 711 712 error = falloc(td, &fp, &fd, O_CLOEXEC); 713 if (error) 714 return (error); 715 716 /* A SHM_ANON path pointer creates an anonymous object. */ 717 if (uap->path == SHM_ANON) { 718 /* A read-only anonymous object is pointless. */ 719 if ((uap->flags & O_ACCMODE) == O_RDONLY) { 720 fdclose(fdp, fp, fd, td); 721 fdrop(fp, td); 722 return (EINVAL); 723 } 724 shmfd = shm_alloc(td->td_ucred, cmode); 725 } else { 726 path = malloc(MAXPATHLEN, M_SHMFD, M_WAITOK); 727 error = copyinstr(uap->path, path, MAXPATHLEN, NULL); 728 #ifdef KTRACE 729 if (error == 0 && KTRPOINT(curthread, KTR_NAMEI)) 730 ktrnamei(path); 731 #endif 732 /* Require paths to start with a '/' character. */ 733 if (error == 0 && path[0] != '/') 734 error = EINVAL; 735 if (error) { 736 fdclose(fdp, fp, fd, td); 737 fdrop(fp, td); 738 free(path, M_SHMFD); 739 return (error); 740 } 741 742 fnv = fnv_32_str(path, FNV1_32_INIT); 743 sx_xlock(&shm_dict_lock); 744 shmfd = shm_lookup(path, fnv); 745 if (shmfd == NULL) { 746 /* Object does not yet exist, create it if requested. */ 747 if (uap->flags & O_CREAT) { 748 #ifdef MAC 749 error = mac_posixshm_check_create(td->td_ucred, 750 path); 751 if (error == 0) { 752 #endif 753 shmfd = shm_alloc(td->td_ucred, cmode); 754 shm_insert(path, fnv, shmfd); 755 #ifdef MAC 756 } 757 #endif 758 } else { 759 free(path, M_SHMFD); 760 error = ENOENT; 761 } 762 } else { 763 /* 764 * Object already exists, obtain a new 765 * reference if requested and permitted. 766 */ 767 free(path, M_SHMFD); 768 if ((uap->flags & (O_CREAT | O_EXCL)) == 769 (O_CREAT | O_EXCL)) 770 error = EEXIST; 771 else { 772 #ifdef MAC 773 error = mac_posixshm_check_open(td->td_ucred, 774 shmfd, FFLAGS(uap->flags & O_ACCMODE)); 775 if (error == 0) 776 #endif 777 error = shm_access(shmfd, td->td_ucred, 778 FFLAGS(uap->flags & O_ACCMODE)); 779 } 780 781 /* 782 * Truncate the file back to zero length if 783 * O_TRUNC was specified and the object was 784 * opened with read/write. 785 */ 786 if (error == 0 && 787 (uap->flags & (O_ACCMODE | O_TRUNC)) == 788 (O_RDWR | O_TRUNC)) { 789 #ifdef MAC 790 error = mac_posixshm_check_truncate( 791 td->td_ucred, fp->f_cred, shmfd); 792 if (error == 0) 793 #endif 794 shm_dotruncate(shmfd, 0); 795 } 796 if (error == 0) 797 shm_hold(shmfd); 798 } 799 sx_xunlock(&shm_dict_lock); 800 801 if (error) { 802 fdclose(fdp, fp, fd, td); 803 fdrop(fp, td); 804 return (error); 805 } 806 } 807 808 finit(fp, FFLAGS(uap->flags & O_ACCMODE), DTYPE_SHM, shmfd, &shm_ops); 809 810 td->td_retval[0] = fd; 811 fdrop(fp, td); 812 813 return (0); 814 } 815 816 int 817 sys_shm_unlink(struct thread *td, struct shm_unlink_args *uap) 818 { 819 char *path; 820 Fnv32_t fnv; 821 int error; 822 823 path = malloc(MAXPATHLEN, M_TEMP, M_WAITOK); 824 error = copyinstr(uap->path, path, MAXPATHLEN, NULL); 825 if (error) { 826 free(path, M_TEMP); 827 return (error); 828 } 829 #ifdef KTRACE 830 if (KTRPOINT(curthread, KTR_NAMEI)) 831 ktrnamei(path); 832 #endif 833 fnv = fnv_32_str(path, FNV1_32_INIT); 834 sx_xlock(&shm_dict_lock); 835 error = shm_remove(path, fnv, td->td_ucred); 836 sx_xunlock(&shm_dict_lock); 837 free(path, M_TEMP); 838 839 return (error); 840 } 841 842 /* 843 * mmap() helper to validate mmap() requests against shm object state 844 * and give mmap() the vm_object to use for the mapping. 845 */ 846 int 847 shm_mmap(struct shmfd *shmfd, vm_size_t objsize, vm_ooffset_t foff, 848 vm_object_t *obj) 849 { 850 851 /* 852 * XXXRW: This validation is probably insufficient, and subject to 853 * sign errors. It should be fixed. 854 */ 855 if (foff >= shmfd->shm_size || 856 foff + objsize > round_page(shmfd->shm_size)) 857 return (EINVAL); 858 859 mtx_lock(&shm_timestamp_lock); 860 vfs_timestamp(&shmfd->shm_atime); 861 mtx_unlock(&shm_timestamp_lock); 862 vm_object_reference(shmfd->shm_object); 863 *obj = shmfd->shm_object; 864 return (0); 865 } 866 867 static int 868 shm_chmod(struct file *fp, mode_t mode, struct ucred *active_cred, 869 struct thread *td) 870 { 871 struct shmfd *shmfd; 872 int error; 873 874 error = 0; 875 shmfd = fp->f_data; 876 mtx_lock(&shm_timestamp_lock); 877 /* 878 * SUSv4 says that x bits of permission need not be affected. 879 * Be consistent with our shm_open there. 880 */ 881 #ifdef MAC 882 error = mac_posixshm_check_setmode(active_cred, shmfd, mode); 883 if (error != 0) 884 goto out; 885 #endif 886 error = vaccess(VREG, shmfd->shm_mode, shmfd->shm_uid, 887 shmfd->shm_gid, VADMIN, active_cred, NULL); 888 if (error != 0) 889 goto out; 890 shmfd->shm_mode = mode & ACCESSPERMS; 891 out: 892 mtx_unlock(&shm_timestamp_lock); 893 return (error); 894 } 895 896 static int 897 shm_chown(struct file *fp, uid_t uid, gid_t gid, struct ucred *active_cred, 898 struct thread *td) 899 { 900 struct shmfd *shmfd; 901 int error; 902 903 error = 0; 904 shmfd = fp->f_data; 905 mtx_lock(&shm_timestamp_lock); 906 #ifdef MAC 907 error = mac_posixshm_check_setowner(active_cred, shmfd, uid, gid); 908 if (error != 0) 909 goto out; 910 #endif 911 if (uid == (uid_t)-1) 912 uid = shmfd->shm_uid; 913 if (gid == (gid_t)-1) 914 gid = shmfd->shm_gid; 915 if (((uid != shmfd->shm_uid && uid != active_cred->cr_uid) || 916 (gid != shmfd->shm_gid && !groupmember(gid, active_cred))) && 917 (error = priv_check_cred(active_cred, PRIV_VFS_CHOWN, 0))) 918 goto out; 919 shmfd->shm_uid = uid; 920 shmfd->shm_gid = gid; 921 out: 922 mtx_unlock(&shm_timestamp_lock); 923 return (error); 924 } 925 926 /* 927 * Helper routines to allow the backing object of a shared memory file 928 * descriptor to be mapped in the kernel. 929 */ 930 int 931 shm_map(struct file *fp, size_t size, off_t offset, void **memp) 932 { 933 struct shmfd *shmfd; 934 vm_offset_t kva, ofs; 935 vm_object_t obj; 936 int rv; 937 938 if (fp->f_type != DTYPE_SHM) 939 return (EINVAL); 940 shmfd = fp->f_data; 941 obj = shmfd->shm_object; 942 VM_OBJECT_WLOCK(obj); 943 /* 944 * XXXRW: This validation is probably insufficient, and subject to 945 * sign errors. It should be fixed. 946 */ 947 if (offset >= shmfd->shm_size || 948 offset + size > round_page(shmfd->shm_size)) { 949 VM_OBJECT_WUNLOCK(obj); 950 return (EINVAL); 951 } 952 953 shmfd->shm_kmappings++; 954 vm_object_reference_locked(obj); 955 VM_OBJECT_WUNLOCK(obj); 956 957 /* Map the object into the kernel_map and wire it. */ 958 kva = vm_map_min(kernel_map); 959 ofs = offset & PAGE_MASK; 960 offset = trunc_page(offset); 961 size = round_page(size + ofs); 962 rv = vm_map_find(kernel_map, obj, offset, &kva, size, 0, 963 VMFS_OPTIMAL_SPACE, VM_PROT_READ | VM_PROT_WRITE, 964 VM_PROT_READ | VM_PROT_WRITE, 0); 965 if (rv == KERN_SUCCESS) { 966 rv = vm_map_wire(kernel_map, kva, kva + size, 967 VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES); 968 if (rv == KERN_SUCCESS) { 969 *memp = (void *)(kva + ofs); 970 return (0); 971 } 972 vm_map_remove(kernel_map, kva, kva + size); 973 } else 974 vm_object_deallocate(obj); 975 976 /* On failure, drop our mapping reference. */ 977 VM_OBJECT_WLOCK(obj); 978 shmfd->shm_kmappings--; 979 VM_OBJECT_WUNLOCK(obj); 980 981 return (vm_mmap_to_errno(rv)); 982 } 983 984 /* 985 * We require the caller to unmap the entire entry. This allows us to 986 * safely decrement shm_kmappings when a mapping is removed. 987 */ 988 int 989 shm_unmap(struct file *fp, void *mem, size_t size) 990 { 991 struct shmfd *shmfd; 992 vm_map_entry_t entry; 993 vm_offset_t kva, ofs; 994 vm_object_t obj; 995 vm_pindex_t pindex; 996 vm_prot_t prot; 997 boolean_t wired; 998 vm_map_t map; 999 int rv; 1000 1001 if (fp->f_type != DTYPE_SHM) 1002 return (EINVAL); 1003 shmfd = fp->f_data; 1004 kva = (vm_offset_t)mem; 1005 ofs = kva & PAGE_MASK; 1006 kva = trunc_page(kva); 1007 size = round_page(size + ofs); 1008 map = kernel_map; 1009 rv = vm_map_lookup(&map, kva, VM_PROT_READ | VM_PROT_WRITE, &entry, 1010 &obj, &pindex, &prot, &wired); 1011 if (rv != KERN_SUCCESS) 1012 return (EINVAL); 1013 if (entry->start != kva || entry->end != kva + size) { 1014 vm_map_lookup_done(map, entry); 1015 return (EINVAL); 1016 } 1017 vm_map_lookup_done(map, entry); 1018 if (obj != shmfd->shm_object) 1019 return (EINVAL); 1020 vm_map_remove(map, kva, kva + size); 1021 VM_OBJECT_WLOCK(obj); 1022 KASSERT(shmfd->shm_kmappings > 0, ("shm_unmap: object not mapped")); 1023 shmfd->shm_kmappings--; 1024 VM_OBJECT_WUNLOCK(obj); 1025 return (0); 1026 } 1027 1028 static int 1029 shm_fill_kinfo(struct file *fp, struct kinfo_file *kif, struct filedesc *fdp) 1030 { 1031 struct shmfd *shmfd; 1032 1033 kif->kf_type = KF_TYPE_SHM; 1034 shmfd = fp->f_data; 1035 1036 mtx_lock(&shm_timestamp_lock); 1037 kif->kf_un.kf_file.kf_file_mode = S_IFREG | shmfd->shm_mode; /* XXX */ 1038 mtx_unlock(&shm_timestamp_lock); 1039 kif->kf_un.kf_file.kf_file_size = shmfd->shm_size; 1040 if (shmfd->shm_path != NULL) { 1041 sx_slock(&shm_dict_lock); 1042 if (shmfd->shm_path != NULL) 1043 strlcpy(kif->kf_path, shmfd->shm_path, 1044 sizeof(kif->kf_path)); 1045 sx_sunlock(&shm_dict_lock); 1046 } 1047 return (0); 1048 } 1049