1 /*- 2 * Copyright (c) 2006, 2011 Robert N. M. Watson 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 /* 28 * Support for shared swap-backed anonymous memory objects via 29 * shm_open(2) and shm_unlink(2). While most of the implementation is 30 * here, vm_mmap.c contains mapping logic changes. 31 * 32 * TODO: 33 * 34 * (1) Need to export data to a userland tool via a sysctl. Should ipcs(1) 35 * and ipcrm(1) be expanded or should new tools to manage both POSIX 36 * kernel semaphores and POSIX shared memory be written? 37 * 38 * (2) Add support for this file type to fstat(1). 39 * 40 * (3) Resource limits? Does this need its own resource limits or are the 41 * existing limits in mmap(2) sufficient? 42 */ 43 44 #include <sys/cdefs.h> 45 __FBSDID("$FreeBSD$"); 46 47 #include "opt_capsicum.h" 48 #include "opt_ktrace.h" 49 50 #include <sys/param.h> 51 #include <sys/capsicum.h> 52 #include <sys/conf.h> 53 #include <sys/fcntl.h> 54 #include <sys/file.h> 55 #include <sys/filedesc.h> 56 #include <sys/fnv_hash.h> 57 #include <sys/kernel.h> 58 #include <sys/uio.h> 59 #include <sys/signal.h> 60 #include <sys/jail.h> 61 #include <sys/ktrace.h> 62 #include <sys/lock.h> 63 #include <sys/malloc.h> 64 #include <sys/mman.h> 65 #include <sys/mutex.h> 66 #include <sys/priv.h> 67 #include <sys/proc.h> 68 #include <sys/refcount.h> 69 #include <sys/resourcevar.h> 70 #include <sys/rwlock.h> 71 #include <sys/stat.h> 72 #include <sys/syscallsubr.h> 73 #include <sys/sysctl.h> 74 #include <sys/sysproto.h> 75 #include <sys/systm.h> 76 #include <sys/sx.h> 77 #include <sys/time.h> 78 #include <sys/vnode.h> 79 #include <sys/unistd.h> 80 #include <sys/user.h> 81 82 #include <security/mac/mac_framework.h> 83 84 #include <vm/vm.h> 85 #include <vm/vm_param.h> 86 #include <vm/pmap.h> 87 #include <vm/vm_extern.h> 88 #include <vm/vm_map.h> 89 #include <vm/vm_kern.h> 90 #include <vm/vm_object.h> 91 #include <vm/vm_page.h> 92 #include <vm/vm_pageout.h> 93 #include <vm/vm_pager.h> 94 #include <vm/swap_pager.h> 95 96 struct shm_mapping { 97 char *sm_path; 98 Fnv32_t sm_fnv; 99 struct shmfd *sm_shmfd; 100 LIST_ENTRY(shm_mapping) sm_link; 101 }; 102 103 static MALLOC_DEFINE(M_SHMFD, "shmfd", "shared memory file descriptor"); 104 static LIST_HEAD(, shm_mapping) *shm_dictionary; 105 static struct sx shm_dict_lock; 106 static struct mtx shm_timestamp_lock; 107 static u_long shm_hash; 108 static struct unrhdr *shm_ino_unr; 109 static dev_t shm_dev_ino; 110 111 #define SHM_HASH(fnv) (&shm_dictionary[(fnv) & shm_hash]) 112 113 static void shm_init(void *arg); 114 static void shm_insert(char *path, Fnv32_t fnv, struct shmfd *shmfd); 115 static struct shmfd *shm_lookup(char *path, Fnv32_t fnv); 116 static int shm_remove(char *path, Fnv32_t fnv, struct ucred *ucred); 117 118 static fo_rdwr_t shm_read; 119 static fo_rdwr_t shm_write; 120 static fo_truncate_t shm_truncate; 121 static fo_stat_t shm_stat; 122 static fo_close_t shm_close; 123 static fo_chmod_t shm_chmod; 124 static fo_chown_t shm_chown; 125 static fo_seek_t shm_seek; 126 static fo_fill_kinfo_t shm_fill_kinfo; 127 static fo_mmap_t shm_mmap; 128 129 /* File descriptor operations. */ 130 struct fileops shm_ops = { 131 .fo_read = shm_read, 132 .fo_write = shm_write, 133 .fo_truncate = shm_truncate, 134 .fo_ioctl = invfo_ioctl, 135 .fo_poll = invfo_poll, 136 .fo_kqfilter = invfo_kqfilter, 137 .fo_stat = shm_stat, 138 .fo_close = shm_close, 139 .fo_chmod = shm_chmod, 140 .fo_chown = shm_chown, 141 .fo_sendfile = vn_sendfile, 142 .fo_seek = shm_seek, 143 .fo_fill_kinfo = shm_fill_kinfo, 144 .fo_mmap = shm_mmap, 145 .fo_flags = DFLAG_PASSABLE | DFLAG_SEEKABLE 146 }; 147 148 FEATURE(posix_shm, "POSIX shared memory"); 149 150 static int 151 uiomove_object_page(vm_object_t obj, size_t len, struct uio *uio) 152 { 153 vm_page_t m; 154 vm_pindex_t idx; 155 size_t tlen; 156 int error, offset, rv; 157 158 idx = OFF_TO_IDX(uio->uio_offset); 159 offset = uio->uio_offset & PAGE_MASK; 160 tlen = MIN(PAGE_SIZE - offset, len); 161 162 VM_OBJECT_WLOCK(obj); 163 164 /* 165 * Read I/O without either a corresponding resident page or swap 166 * page: use zero_region. This is intended to avoid instantiating 167 * pages on read from a sparse region. 168 */ 169 if (uio->uio_rw == UIO_READ && vm_page_lookup(obj, idx) == NULL && 170 !vm_pager_has_page(obj, idx, NULL, NULL)) { 171 VM_OBJECT_WUNLOCK(obj); 172 return (uiomove(__DECONST(void *, zero_region), tlen, uio)); 173 } 174 175 /* 176 * Parallel reads of the page content from disk are prevented 177 * by exclusive busy. 178 * 179 * Although the tmpfs vnode lock is held here, it is 180 * nonetheless safe to sleep waiting for a free page. The 181 * pageout daemon does not need to acquire the tmpfs vnode 182 * lock to page out tobj's pages because tobj is a OBJT_SWAP 183 * type object. 184 */ 185 m = vm_page_grab(obj, idx, VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY); 186 if (m->valid != VM_PAGE_BITS_ALL) { 187 vm_page_xbusy(m); 188 if (vm_pager_has_page(obj, idx, NULL, NULL)) { 189 rv = vm_pager_get_pages(obj, &m, 1, NULL, NULL); 190 if (rv != VM_PAGER_OK) { 191 printf( 192 "uiomove_object: vm_obj %p idx %jd valid %x pager error %d\n", 193 obj, idx, m->valid, rv); 194 vm_page_lock(m); 195 vm_page_free(m); 196 vm_page_unlock(m); 197 VM_OBJECT_WUNLOCK(obj); 198 return (EIO); 199 } 200 } else 201 vm_page_zero_invalid(m, TRUE); 202 vm_page_xunbusy(m); 203 } 204 vm_page_lock(m); 205 vm_page_hold(m); 206 if (m->queue == PQ_NONE) { 207 vm_page_deactivate(m); 208 } else { 209 /* Requeue to maintain LRU ordering. */ 210 vm_page_requeue(m); 211 } 212 vm_page_unlock(m); 213 VM_OBJECT_WUNLOCK(obj); 214 error = uiomove_fromphys(&m, offset, tlen, uio); 215 if (uio->uio_rw == UIO_WRITE && error == 0) { 216 VM_OBJECT_WLOCK(obj); 217 vm_page_dirty(m); 218 vm_pager_page_unswapped(m); 219 VM_OBJECT_WUNLOCK(obj); 220 } 221 vm_page_lock(m); 222 vm_page_unhold(m); 223 vm_page_unlock(m); 224 225 return (error); 226 } 227 228 int 229 uiomove_object(vm_object_t obj, off_t obj_size, struct uio *uio) 230 { 231 ssize_t resid; 232 size_t len; 233 int error; 234 235 error = 0; 236 while ((resid = uio->uio_resid) > 0) { 237 if (obj_size <= uio->uio_offset) 238 break; 239 len = MIN(obj_size - uio->uio_offset, resid); 240 if (len == 0) 241 break; 242 error = uiomove_object_page(obj, len, uio); 243 if (error != 0 || resid == uio->uio_resid) 244 break; 245 } 246 return (error); 247 } 248 249 static int 250 shm_seek(struct file *fp, off_t offset, int whence, struct thread *td) 251 { 252 struct shmfd *shmfd; 253 off_t foffset; 254 int error; 255 256 shmfd = fp->f_data; 257 foffset = foffset_lock(fp, 0); 258 error = 0; 259 switch (whence) { 260 case L_INCR: 261 if (foffset < 0 || 262 (offset > 0 && foffset > OFF_MAX - offset)) { 263 error = EOVERFLOW; 264 break; 265 } 266 offset += foffset; 267 break; 268 case L_XTND: 269 if (offset > 0 && shmfd->shm_size > OFF_MAX - offset) { 270 error = EOVERFLOW; 271 break; 272 } 273 offset += shmfd->shm_size; 274 break; 275 case L_SET: 276 break; 277 default: 278 error = EINVAL; 279 } 280 if (error == 0) { 281 if (offset < 0 || offset > shmfd->shm_size) 282 error = EINVAL; 283 else 284 td->td_uretoff.tdu_off = offset; 285 } 286 foffset_unlock(fp, offset, error != 0 ? FOF_NOUPDATE : 0); 287 return (error); 288 } 289 290 static int 291 shm_read(struct file *fp, struct uio *uio, struct ucred *active_cred, 292 int flags, struct thread *td) 293 { 294 struct shmfd *shmfd; 295 void *rl_cookie; 296 int error; 297 298 shmfd = fp->f_data; 299 #ifdef MAC 300 error = mac_posixshm_check_read(active_cred, fp->f_cred, shmfd); 301 if (error) 302 return (error); 303 #endif 304 foffset_lock_uio(fp, uio, flags); 305 rl_cookie = rangelock_rlock(&shmfd->shm_rl, uio->uio_offset, 306 uio->uio_offset + uio->uio_resid, &shmfd->shm_mtx); 307 error = uiomove_object(shmfd->shm_object, shmfd->shm_size, uio); 308 rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx); 309 foffset_unlock_uio(fp, uio, flags); 310 return (error); 311 } 312 313 static int 314 shm_write(struct file *fp, struct uio *uio, struct ucred *active_cred, 315 int flags, struct thread *td) 316 { 317 struct shmfd *shmfd; 318 void *rl_cookie; 319 int error; 320 321 shmfd = fp->f_data; 322 #ifdef MAC 323 error = mac_posixshm_check_write(active_cred, fp->f_cred, shmfd); 324 if (error) 325 return (error); 326 #endif 327 foffset_lock_uio(fp, uio, flags); 328 if ((flags & FOF_OFFSET) == 0) { 329 rl_cookie = rangelock_wlock(&shmfd->shm_rl, 0, OFF_MAX, 330 &shmfd->shm_mtx); 331 } else { 332 rl_cookie = rangelock_wlock(&shmfd->shm_rl, uio->uio_offset, 333 uio->uio_offset + uio->uio_resid, &shmfd->shm_mtx); 334 } 335 336 error = uiomove_object(shmfd->shm_object, shmfd->shm_size, uio); 337 rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx); 338 foffset_unlock_uio(fp, uio, flags); 339 return (error); 340 } 341 342 static int 343 shm_truncate(struct file *fp, off_t length, struct ucred *active_cred, 344 struct thread *td) 345 { 346 struct shmfd *shmfd; 347 #ifdef MAC 348 int error; 349 #endif 350 351 shmfd = fp->f_data; 352 #ifdef MAC 353 error = mac_posixshm_check_truncate(active_cred, fp->f_cred, shmfd); 354 if (error) 355 return (error); 356 #endif 357 return (shm_dotruncate(shmfd, length)); 358 } 359 360 static int 361 shm_stat(struct file *fp, struct stat *sb, struct ucred *active_cred, 362 struct thread *td) 363 { 364 struct shmfd *shmfd; 365 #ifdef MAC 366 int error; 367 #endif 368 369 shmfd = fp->f_data; 370 371 #ifdef MAC 372 error = mac_posixshm_check_stat(active_cred, fp->f_cred, shmfd); 373 if (error) 374 return (error); 375 #endif 376 377 /* 378 * Attempt to return sanish values for fstat() on a memory file 379 * descriptor. 380 */ 381 bzero(sb, sizeof(*sb)); 382 sb->st_blksize = PAGE_SIZE; 383 sb->st_size = shmfd->shm_size; 384 sb->st_blocks = howmany(sb->st_size, sb->st_blksize); 385 mtx_lock(&shm_timestamp_lock); 386 sb->st_atim = shmfd->shm_atime; 387 sb->st_ctim = shmfd->shm_ctime; 388 sb->st_mtim = shmfd->shm_mtime; 389 sb->st_birthtim = shmfd->shm_birthtime; 390 sb->st_mode = S_IFREG | shmfd->shm_mode; /* XXX */ 391 sb->st_uid = shmfd->shm_uid; 392 sb->st_gid = shmfd->shm_gid; 393 mtx_unlock(&shm_timestamp_lock); 394 sb->st_dev = shm_dev_ino; 395 sb->st_ino = shmfd->shm_ino; 396 397 return (0); 398 } 399 400 static int 401 shm_close(struct file *fp, struct thread *td) 402 { 403 struct shmfd *shmfd; 404 405 shmfd = fp->f_data; 406 fp->f_data = NULL; 407 shm_drop(shmfd); 408 409 return (0); 410 } 411 412 int 413 shm_dotruncate(struct shmfd *shmfd, off_t length) 414 { 415 vm_object_t object; 416 vm_page_t m; 417 vm_pindex_t idx, nobjsize; 418 vm_ooffset_t delta; 419 int base, rv; 420 421 object = shmfd->shm_object; 422 VM_OBJECT_WLOCK(object); 423 if (length == shmfd->shm_size) { 424 VM_OBJECT_WUNLOCK(object); 425 return (0); 426 } 427 nobjsize = OFF_TO_IDX(length + PAGE_MASK); 428 429 /* Are we shrinking? If so, trim the end. */ 430 if (length < shmfd->shm_size) { 431 /* 432 * Disallow any requests to shrink the size if this 433 * object is mapped into the kernel. 434 */ 435 if (shmfd->shm_kmappings > 0) { 436 VM_OBJECT_WUNLOCK(object); 437 return (EBUSY); 438 } 439 440 /* 441 * Zero the truncated part of the last page. 442 */ 443 base = length & PAGE_MASK; 444 if (base != 0) { 445 idx = OFF_TO_IDX(length); 446 retry: 447 m = vm_page_lookup(object, idx); 448 if (m != NULL) { 449 if (vm_page_sleep_if_busy(m, "shmtrc")) 450 goto retry; 451 } else if (vm_pager_has_page(object, idx, NULL, NULL)) { 452 m = vm_page_alloc(object, idx, VM_ALLOC_NORMAL); 453 if (m == NULL) { 454 VM_OBJECT_WUNLOCK(object); 455 VM_WAIT; 456 VM_OBJECT_WLOCK(object); 457 goto retry; 458 } else if (m->valid != VM_PAGE_BITS_ALL) 459 rv = vm_pager_get_pages(object, &m, 1, 460 NULL, NULL); 461 else 462 /* A cached page was reactivated. */ 463 rv = VM_PAGER_OK; 464 vm_page_lock(m); 465 if (rv == VM_PAGER_OK) { 466 vm_page_deactivate(m); 467 vm_page_unlock(m); 468 vm_page_xunbusy(m); 469 } else { 470 vm_page_free(m); 471 vm_page_unlock(m); 472 VM_OBJECT_WUNLOCK(object); 473 return (EIO); 474 } 475 } 476 if (m != NULL) { 477 pmap_zero_page_area(m, base, PAGE_SIZE - base); 478 KASSERT(m->valid == VM_PAGE_BITS_ALL, 479 ("shm_dotruncate: page %p is invalid", m)); 480 vm_page_dirty(m); 481 vm_pager_page_unswapped(m); 482 } 483 } 484 delta = ptoa(object->size - nobjsize); 485 486 /* Toss in memory pages. */ 487 if (nobjsize < object->size) 488 vm_object_page_remove(object, nobjsize, object->size, 489 0); 490 491 /* Toss pages from swap. */ 492 if (object->type == OBJT_SWAP) 493 swap_pager_freespace(object, nobjsize, delta); 494 495 /* Free the swap accounted for shm */ 496 swap_release_by_cred(delta, object->cred); 497 object->charge -= delta; 498 } else { 499 /* Attempt to reserve the swap */ 500 delta = ptoa(nobjsize - object->size); 501 if (!swap_reserve_by_cred(delta, object->cred)) { 502 VM_OBJECT_WUNLOCK(object); 503 return (ENOMEM); 504 } 505 object->charge += delta; 506 } 507 shmfd->shm_size = length; 508 mtx_lock(&shm_timestamp_lock); 509 vfs_timestamp(&shmfd->shm_ctime); 510 shmfd->shm_mtime = shmfd->shm_ctime; 511 mtx_unlock(&shm_timestamp_lock); 512 object->size = nobjsize; 513 VM_OBJECT_WUNLOCK(object); 514 return (0); 515 } 516 517 /* 518 * shmfd object management including creation and reference counting 519 * routines. 520 */ 521 struct shmfd * 522 shm_alloc(struct ucred *ucred, mode_t mode) 523 { 524 struct shmfd *shmfd; 525 int ino; 526 527 shmfd = malloc(sizeof(*shmfd), M_SHMFD, M_WAITOK | M_ZERO); 528 shmfd->shm_size = 0; 529 shmfd->shm_uid = ucred->cr_uid; 530 shmfd->shm_gid = ucred->cr_gid; 531 shmfd->shm_mode = mode; 532 shmfd->shm_object = vm_pager_allocate(OBJT_DEFAULT, NULL, 533 shmfd->shm_size, VM_PROT_DEFAULT, 0, ucred); 534 KASSERT(shmfd->shm_object != NULL, ("shm_create: vm_pager_allocate")); 535 shmfd->shm_object->pg_color = 0; 536 VM_OBJECT_WLOCK(shmfd->shm_object); 537 vm_object_clear_flag(shmfd->shm_object, OBJ_ONEMAPPING); 538 vm_object_set_flag(shmfd->shm_object, OBJ_COLORED | OBJ_NOSPLIT); 539 VM_OBJECT_WUNLOCK(shmfd->shm_object); 540 vfs_timestamp(&shmfd->shm_birthtime); 541 shmfd->shm_atime = shmfd->shm_mtime = shmfd->shm_ctime = 542 shmfd->shm_birthtime; 543 ino = alloc_unr(shm_ino_unr); 544 if (ino == -1) 545 shmfd->shm_ino = 0; 546 else 547 shmfd->shm_ino = ino; 548 refcount_init(&shmfd->shm_refs, 1); 549 mtx_init(&shmfd->shm_mtx, "shmrl", NULL, MTX_DEF); 550 rangelock_init(&shmfd->shm_rl); 551 #ifdef MAC 552 mac_posixshm_init(shmfd); 553 mac_posixshm_create(ucred, shmfd); 554 #endif 555 556 return (shmfd); 557 } 558 559 struct shmfd * 560 shm_hold(struct shmfd *shmfd) 561 { 562 563 refcount_acquire(&shmfd->shm_refs); 564 return (shmfd); 565 } 566 567 void 568 shm_drop(struct shmfd *shmfd) 569 { 570 571 if (refcount_release(&shmfd->shm_refs)) { 572 #ifdef MAC 573 mac_posixshm_destroy(shmfd); 574 #endif 575 rangelock_destroy(&shmfd->shm_rl); 576 mtx_destroy(&shmfd->shm_mtx); 577 vm_object_deallocate(shmfd->shm_object); 578 if (shmfd->shm_ino != 0) 579 free_unr(shm_ino_unr, shmfd->shm_ino); 580 free(shmfd, M_SHMFD); 581 } 582 } 583 584 /* 585 * Determine if the credentials have sufficient permissions for a 586 * specified combination of FREAD and FWRITE. 587 */ 588 int 589 shm_access(struct shmfd *shmfd, struct ucred *ucred, int flags) 590 { 591 accmode_t accmode; 592 int error; 593 594 accmode = 0; 595 if (flags & FREAD) 596 accmode |= VREAD; 597 if (flags & FWRITE) 598 accmode |= VWRITE; 599 mtx_lock(&shm_timestamp_lock); 600 error = vaccess(VREG, shmfd->shm_mode, shmfd->shm_uid, shmfd->shm_gid, 601 accmode, ucred, NULL); 602 mtx_unlock(&shm_timestamp_lock); 603 return (error); 604 } 605 606 /* 607 * Dictionary management. We maintain an in-kernel dictionary to map 608 * paths to shmfd objects. We use the FNV hash on the path to store 609 * the mappings in a hash table. 610 */ 611 static void 612 shm_init(void *arg) 613 { 614 615 mtx_init(&shm_timestamp_lock, "shm timestamps", NULL, MTX_DEF); 616 sx_init(&shm_dict_lock, "shm dictionary"); 617 shm_dictionary = hashinit(1024, M_SHMFD, &shm_hash); 618 shm_ino_unr = new_unrhdr(1, INT32_MAX, NULL); 619 KASSERT(shm_ino_unr != NULL, ("shm fake inodes not initialized")); 620 shm_dev_ino = devfs_alloc_cdp_inode(); 621 KASSERT(shm_dev_ino > 0, ("shm dev inode not initialized")); 622 } 623 SYSINIT(shm_init, SI_SUB_SYSV_SHM, SI_ORDER_ANY, shm_init, NULL); 624 625 static struct shmfd * 626 shm_lookup(char *path, Fnv32_t fnv) 627 { 628 struct shm_mapping *map; 629 630 LIST_FOREACH(map, SHM_HASH(fnv), sm_link) { 631 if (map->sm_fnv != fnv) 632 continue; 633 if (strcmp(map->sm_path, path) == 0) 634 return (map->sm_shmfd); 635 } 636 637 return (NULL); 638 } 639 640 static void 641 shm_insert(char *path, Fnv32_t fnv, struct shmfd *shmfd) 642 { 643 struct shm_mapping *map; 644 645 map = malloc(sizeof(struct shm_mapping), M_SHMFD, M_WAITOK); 646 map->sm_path = path; 647 map->sm_fnv = fnv; 648 map->sm_shmfd = shm_hold(shmfd); 649 shmfd->shm_path = path; 650 LIST_INSERT_HEAD(SHM_HASH(fnv), map, sm_link); 651 } 652 653 static int 654 shm_remove(char *path, Fnv32_t fnv, struct ucred *ucred) 655 { 656 struct shm_mapping *map; 657 int error; 658 659 LIST_FOREACH(map, SHM_HASH(fnv), sm_link) { 660 if (map->sm_fnv != fnv) 661 continue; 662 if (strcmp(map->sm_path, path) == 0) { 663 #ifdef MAC 664 error = mac_posixshm_check_unlink(ucred, map->sm_shmfd); 665 if (error) 666 return (error); 667 #endif 668 error = shm_access(map->sm_shmfd, ucred, 669 FREAD | FWRITE); 670 if (error) 671 return (error); 672 map->sm_shmfd->shm_path = NULL; 673 LIST_REMOVE(map, sm_link); 674 shm_drop(map->sm_shmfd); 675 free(map->sm_path, M_SHMFD); 676 free(map, M_SHMFD); 677 return (0); 678 } 679 } 680 681 return (ENOENT); 682 } 683 684 int 685 kern_shm_open(struct thread *td, const char *userpath, int flags, mode_t mode, 686 struct filecaps *fcaps) 687 { 688 struct filedesc *fdp; 689 struct shmfd *shmfd; 690 struct file *fp; 691 char *path; 692 const char *pr_path; 693 size_t pr_pathlen; 694 Fnv32_t fnv; 695 mode_t cmode; 696 int fd, error; 697 698 #ifdef CAPABILITY_MODE 699 /* 700 * shm_open(2) is only allowed for anonymous objects. 701 */ 702 if (IN_CAPABILITY_MODE(td) && (userpath != SHM_ANON)) 703 return (ECAPMODE); 704 #endif 705 706 if ((flags & O_ACCMODE) != O_RDONLY && (flags & O_ACCMODE) != O_RDWR) 707 return (EINVAL); 708 709 if ((flags & ~(O_ACCMODE | O_CREAT | O_EXCL | O_TRUNC | O_CLOEXEC)) != 0) 710 return (EINVAL); 711 712 fdp = td->td_proc->p_fd; 713 cmode = (mode & ~fdp->fd_cmask) & ACCESSPERMS; 714 715 error = falloc_caps(td, &fp, &fd, O_CLOEXEC, fcaps); 716 if (error) 717 return (error); 718 719 /* A SHM_ANON path pointer creates an anonymous object. */ 720 if (userpath == SHM_ANON) { 721 /* A read-only anonymous object is pointless. */ 722 if ((flags & O_ACCMODE) == O_RDONLY) { 723 fdclose(td, fp, fd); 724 fdrop(fp, td); 725 return (EINVAL); 726 } 727 shmfd = shm_alloc(td->td_ucred, cmode); 728 } else { 729 path = malloc(MAXPATHLEN, M_SHMFD, M_WAITOK); 730 pr_path = td->td_ucred->cr_prison->pr_path; 731 732 /* Construct a full pathname for jailed callers. */ 733 pr_pathlen = strcmp(pr_path, "/") == 0 ? 0 734 : strlcpy(path, pr_path, MAXPATHLEN); 735 error = copyinstr(userpath, path + pr_pathlen, 736 MAXPATHLEN - pr_pathlen, NULL); 737 #ifdef KTRACE 738 if (error == 0 && KTRPOINT(curthread, KTR_NAMEI)) 739 ktrnamei(path); 740 #endif 741 /* Require paths to start with a '/' character. */ 742 if (error == 0 && path[pr_pathlen] != '/') 743 error = EINVAL; 744 if (error) { 745 fdclose(td, fp, fd); 746 fdrop(fp, td); 747 free(path, M_SHMFD); 748 return (error); 749 } 750 751 fnv = fnv_32_str(path, FNV1_32_INIT); 752 sx_xlock(&shm_dict_lock); 753 shmfd = shm_lookup(path, fnv); 754 if (shmfd == NULL) { 755 /* Object does not yet exist, create it if requested. */ 756 if (flags & O_CREAT) { 757 #ifdef MAC 758 error = mac_posixshm_check_create(td->td_ucred, 759 path); 760 if (error == 0) { 761 #endif 762 shmfd = shm_alloc(td->td_ucred, cmode); 763 shm_insert(path, fnv, shmfd); 764 #ifdef MAC 765 } 766 #endif 767 } else { 768 free(path, M_SHMFD); 769 error = ENOENT; 770 } 771 } else { 772 /* 773 * Object already exists, obtain a new 774 * reference if requested and permitted. 775 */ 776 free(path, M_SHMFD); 777 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL)) 778 error = EEXIST; 779 else { 780 #ifdef MAC 781 error = mac_posixshm_check_open(td->td_ucred, 782 shmfd, FFLAGS(flags & O_ACCMODE)); 783 if (error == 0) 784 #endif 785 error = shm_access(shmfd, td->td_ucred, 786 FFLAGS(flags & O_ACCMODE)); 787 } 788 789 /* 790 * Truncate the file back to zero length if 791 * O_TRUNC was specified and the object was 792 * opened with read/write. 793 */ 794 if (error == 0 && 795 (flags & (O_ACCMODE | O_TRUNC)) == 796 (O_RDWR | O_TRUNC)) { 797 #ifdef MAC 798 error = mac_posixshm_check_truncate( 799 td->td_ucred, fp->f_cred, shmfd); 800 if (error == 0) 801 #endif 802 shm_dotruncate(shmfd, 0); 803 } 804 if (error == 0) 805 shm_hold(shmfd); 806 } 807 sx_xunlock(&shm_dict_lock); 808 809 if (error) { 810 fdclose(td, fp, fd); 811 fdrop(fp, td); 812 return (error); 813 } 814 } 815 816 finit(fp, FFLAGS(flags & O_ACCMODE), DTYPE_SHM, shmfd, &shm_ops); 817 818 td->td_retval[0] = fd; 819 fdrop(fp, td); 820 821 return (0); 822 } 823 824 /* System calls. */ 825 int 826 sys_shm_open(struct thread *td, struct shm_open_args *uap) 827 { 828 829 return (kern_shm_open(td, uap->path, uap->flags, uap->mode, NULL)); 830 } 831 832 int 833 sys_shm_unlink(struct thread *td, struct shm_unlink_args *uap) 834 { 835 char *path; 836 const char *pr_path; 837 size_t pr_pathlen; 838 Fnv32_t fnv; 839 int error; 840 841 path = malloc(MAXPATHLEN, M_TEMP, M_WAITOK); 842 pr_path = td->td_ucred->cr_prison->pr_path; 843 pr_pathlen = strcmp(pr_path, "/") == 0 ? 0 844 : strlcpy(path, pr_path, MAXPATHLEN); 845 error = copyinstr(uap->path, path + pr_pathlen, MAXPATHLEN - pr_pathlen, 846 NULL); 847 if (error) { 848 free(path, M_TEMP); 849 return (error); 850 } 851 #ifdef KTRACE 852 if (KTRPOINT(curthread, KTR_NAMEI)) 853 ktrnamei(path); 854 #endif 855 fnv = fnv_32_str(path, FNV1_32_INIT); 856 sx_xlock(&shm_dict_lock); 857 error = shm_remove(path, fnv, td->td_ucred); 858 sx_xunlock(&shm_dict_lock); 859 free(path, M_TEMP); 860 861 return (error); 862 } 863 864 int 865 shm_mmap(struct file *fp, vm_map_t map, vm_offset_t *addr, vm_size_t objsize, 866 vm_prot_t prot, vm_prot_t cap_maxprot, int flags, 867 vm_ooffset_t foff, struct thread *td) 868 { 869 struct shmfd *shmfd; 870 vm_prot_t maxprot; 871 int error; 872 873 shmfd = fp->f_data; 874 maxprot = VM_PROT_NONE; 875 876 /* FREAD should always be set. */ 877 if ((fp->f_flag & FREAD) != 0) 878 maxprot |= VM_PROT_EXECUTE | VM_PROT_READ; 879 if ((fp->f_flag & FWRITE) != 0) 880 maxprot |= VM_PROT_WRITE; 881 882 /* Don't permit shared writable mappings on read-only descriptors. */ 883 if ((flags & MAP_SHARED) != 0 && 884 (maxprot & VM_PROT_WRITE) == 0 && 885 (prot & VM_PROT_WRITE) != 0) 886 return (EACCES); 887 maxprot &= cap_maxprot; 888 889 #ifdef MAC 890 error = mac_posixshm_check_mmap(td->td_ucred, shmfd, prot, flags); 891 if (error != 0) 892 return (error); 893 #endif 894 895 /* 896 * XXXRW: This validation is probably insufficient, and subject to 897 * sign errors. It should be fixed. 898 */ 899 if (foff >= shmfd->shm_size || 900 foff + objsize > round_page(shmfd->shm_size)) 901 return (EINVAL); 902 903 mtx_lock(&shm_timestamp_lock); 904 vfs_timestamp(&shmfd->shm_atime); 905 mtx_unlock(&shm_timestamp_lock); 906 vm_object_reference(shmfd->shm_object); 907 908 error = vm_mmap_object(map, addr, objsize, prot, maxprot, flags, 909 shmfd->shm_object, foff, FALSE, td); 910 if (error != 0) 911 vm_object_deallocate(shmfd->shm_object); 912 return (0); 913 } 914 915 static int 916 shm_chmod(struct file *fp, mode_t mode, struct ucred *active_cred, 917 struct thread *td) 918 { 919 struct shmfd *shmfd; 920 int error; 921 922 error = 0; 923 shmfd = fp->f_data; 924 mtx_lock(&shm_timestamp_lock); 925 /* 926 * SUSv4 says that x bits of permission need not be affected. 927 * Be consistent with our shm_open there. 928 */ 929 #ifdef MAC 930 error = mac_posixshm_check_setmode(active_cred, shmfd, mode); 931 if (error != 0) 932 goto out; 933 #endif 934 error = vaccess(VREG, shmfd->shm_mode, shmfd->shm_uid, 935 shmfd->shm_gid, VADMIN, active_cred, NULL); 936 if (error != 0) 937 goto out; 938 shmfd->shm_mode = mode & ACCESSPERMS; 939 out: 940 mtx_unlock(&shm_timestamp_lock); 941 return (error); 942 } 943 944 static int 945 shm_chown(struct file *fp, uid_t uid, gid_t gid, struct ucred *active_cred, 946 struct thread *td) 947 { 948 struct shmfd *shmfd; 949 int error; 950 951 error = 0; 952 shmfd = fp->f_data; 953 mtx_lock(&shm_timestamp_lock); 954 #ifdef MAC 955 error = mac_posixshm_check_setowner(active_cred, shmfd, uid, gid); 956 if (error != 0) 957 goto out; 958 #endif 959 if (uid == (uid_t)-1) 960 uid = shmfd->shm_uid; 961 if (gid == (gid_t)-1) 962 gid = shmfd->shm_gid; 963 if (((uid != shmfd->shm_uid && uid != active_cred->cr_uid) || 964 (gid != shmfd->shm_gid && !groupmember(gid, active_cred))) && 965 (error = priv_check_cred(active_cred, PRIV_VFS_CHOWN, 0))) 966 goto out; 967 shmfd->shm_uid = uid; 968 shmfd->shm_gid = gid; 969 out: 970 mtx_unlock(&shm_timestamp_lock); 971 return (error); 972 } 973 974 /* 975 * Helper routines to allow the backing object of a shared memory file 976 * descriptor to be mapped in the kernel. 977 */ 978 int 979 shm_map(struct file *fp, size_t size, off_t offset, void **memp) 980 { 981 struct shmfd *shmfd; 982 vm_offset_t kva, ofs; 983 vm_object_t obj; 984 int rv; 985 986 if (fp->f_type != DTYPE_SHM) 987 return (EINVAL); 988 shmfd = fp->f_data; 989 obj = shmfd->shm_object; 990 VM_OBJECT_WLOCK(obj); 991 /* 992 * XXXRW: This validation is probably insufficient, and subject to 993 * sign errors. It should be fixed. 994 */ 995 if (offset >= shmfd->shm_size || 996 offset + size > round_page(shmfd->shm_size)) { 997 VM_OBJECT_WUNLOCK(obj); 998 return (EINVAL); 999 } 1000 1001 shmfd->shm_kmappings++; 1002 vm_object_reference_locked(obj); 1003 VM_OBJECT_WUNLOCK(obj); 1004 1005 /* Map the object into the kernel_map and wire it. */ 1006 kva = vm_map_min(kernel_map); 1007 ofs = offset & PAGE_MASK; 1008 offset = trunc_page(offset); 1009 size = round_page(size + ofs); 1010 rv = vm_map_find(kernel_map, obj, offset, &kva, size, 0, 1011 VMFS_OPTIMAL_SPACE, VM_PROT_READ | VM_PROT_WRITE, 1012 VM_PROT_READ | VM_PROT_WRITE, 0); 1013 if (rv == KERN_SUCCESS) { 1014 rv = vm_map_wire(kernel_map, kva, kva + size, 1015 VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES); 1016 if (rv == KERN_SUCCESS) { 1017 *memp = (void *)(kva + ofs); 1018 return (0); 1019 } 1020 vm_map_remove(kernel_map, kva, kva + size); 1021 } else 1022 vm_object_deallocate(obj); 1023 1024 /* On failure, drop our mapping reference. */ 1025 VM_OBJECT_WLOCK(obj); 1026 shmfd->shm_kmappings--; 1027 VM_OBJECT_WUNLOCK(obj); 1028 1029 return (vm_mmap_to_errno(rv)); 1030 } 1031 1032 /* 1033 * We require the caller to unmap the entire entry. This allows us to 1034 * safely decrement shm_kmappings when a mapping is removed. 1035 */ 1036 int 1037 shm_unmap(struct file *fp, void *mem, size_t size) 1038 { 1039 struct shmfd *shmfd; 1040 vm_map_entry_t entry; 1041 vm_offset_t kva, ofs; 1042 vm_object_t obj; 1043 vm_pindex_t pindex; 1044 vm_prot_t prot; 1045 boolean_t wired; 1046 vm_map_t map; 1047 int rv; 1048 1049 if (fp->f_type != DTYPE_SHM) 1050 return (EINVAL); 1051 shmfd = fp->f_data; 1052 kva = (vm_offset_t)mem; 1053 ofs = kva & PAGE_MASK; 1054 kva = trunc_page(kva); 1055 size = round_page(size + ofs); 1056 map = kernel_map; 1057 rv = vm_map_lookup(&map, kva, VM_PROT_READ | VM_PROT_WRITE, &entry, 1058 &obj, &pindex, &prot, &wired); 1059 if (rv != KERN_SUCCESS) 1060 return (EINVAL); 1061 if (entry->start != kva || entry->end != kva + size) { 1062 vm_map_lookup_done(map, entry); 1063 return (EINVAL); 1064 } 1065 vm_map_lookup_done(map, entry); 1066 if (obj != shmfd->shm_object) 1067 return (EINVAL); 1068 vm_map_remove(map, kva, kva + size); 1069 VM_OBJECT_WLOCK(obj); 1070 KASSERT(shmfd->shm_kmappings > 0, ("shm_unmap: object not mapped")); 1071 shmfd->shm_kmappings--; 1072 VM_OBJECT_WUNLOCK(obj); 1073 return (0); 1074 } 1075 1076 static int 1077 shm_fill_kinfo(struct file *fp, struct kinfo_file *kif, struct filedesc *fdp) 1078 { 1079 const char *path, *pr_path; 1080 struct shmfd *shmfd; 1081 size_t pr_pathlen; 1082 1083 kif->kf_type = KF_TYPE_SHM; 1084 shmfd = fp->f_data; 1085 1086 mtx_lock(&shm_timestamp_lock); 1087 kif->kf_un.kf_file.kf_file_mode = S_IFREG | shmfd->shm_mode; /* XXX */ 1088 mtx_unlock(&shm_timestamp_lock); 1089 kif->kf_un.kf_file.kf_file_size = shmfd->shm_size; 1090 if (shmfd->shm_path != NULL) { 1091 sx_slock(&shm_dict_lock); 1092 if (shmfd->shm_path != NULL) { 1093 path = shmfd->shm_path; 1094 pr_path = curthread->td_ucred->cr_prison->pr_path; 1095 if (strcmp(pr_path, "/") != 0) { 1096 /* Return the jail-rooted pathname. */ 1097 pr_pathlen = strlen(pr_path); 1098 if (strncmp(path, pr_path, pr_pathlen) == 0 && 1099 path[pr_pathlen] == '/') 1100 path += pr_pathlen; 1101 } 1102 strlcpy(kif->kf_path, path, sizeof(kif->kf_path)); 1103 } 1104 sx_sunlock(&shm_dict_lock); 1105 } 1106 return (0); 1107 } 1108