1 /*- 2 * Copyright (c) 2006, 2011 Robert N. M. Watson 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 /* 28 * Support for shared swap-backed anonymous memory objects via 29 * shm_open(2) and shm_unlink(2). While most of the implementation is 30 * here, vm_mmap.c contains mapping logic changes. 31 * 32 * TODO: 33 * 34 * (1) Need to export data to a userland tool via a sysctl. Should ipcs(1) 35 * and ipcrm(1) be expanded or should new tools to manage both POSIX 36 * kernel semaphores and POSIX shared memory be written? 37 * 38 * (2) Add support for this file type to fstat(1). 39 * 40 * (3) Resource limits? Does this need its own resource limits or are the 41 * existing limits in mmap(2) sufficient? 42 * 43 * (4) Partial page truncation. vnode_pager_setsize() will zero any parts 44 * of a partially mapped page as a result of ftruncate(2)/truncate(2). 45 * We can do the same (with the same pmap evil), but do we need to 46 * worry about the bits on disk if the page is swapped out or will the 47 * swapper zero the parts of a page that are invalid if the page is 48 * swapped back in for us? 49 */ 50 51 #include <sys/cdefs.h> 52 __FBSDID("$FreeBSD$"); 53 54 #include "opt_capsicum.h" 55 56 #include <sys/param.h> 57 #include <sys/capability.h> 58 #include <sys/fcntl.h> 59 #include <sys/file.h> 60 #include <sys/filedesc.h> 61 #include <sys/fnv_hash.h> 62 #include <sys/kernel.h> 63 #include <sys/lock.h> 64 #include <sys/malloc.h> 65 #include <sys/mman.h> 66 #include <sys/mutex.h> 67 #include <sys/priv.h> 68 #include <sys/proc.h> 69 #include <sys/refcount.h> 70 #include <sys/resourcevar.h> 71 #include <sys/stat.h> 72 #include <sys/sysctl.h> 73 #include <sys/sysproto.h> 74 #include <sys/systm.h> 75 #include <sys/sx.h> 76 #include <sys/time.h> 77 #include <sys/vnode.h> 78 79 #include <security/mac/mac_framework.h> 80 81 #include <vm/vm.h> 82 #include <vm/vm_param.h> 83 #include <vm/pmap.h> 84 #include <vm/vm_extern.h> 85 #include <vm/vm_map.h> 86 #include <vm/vm_kern.h> 87 #include <vm/vm_object.h> 88 #include <vm/vm_page.h> 89 #include <vm/vm_pager.h> 90 #include <vm/swap_pager.h> 91 92 struct shm_mapping { 93 char *sm_path; 94 Fnv32_t sm_fnv; 95 struct shmfd *sm_shmfd; 96 LIST_ENTRY(shm_mapping) sm_link; 97 }; 98 99 static MALLOC_DEFINE(M_SHMFD, "shmfd", "shared memory file descriptor"); 100 static LIST_HEAD(, shm_mapping) *shm_dictionary; 101 static struct sx shm_dict_lock; 102 static struct mtx shm_timestamp_lock; 103 static u_long shm_hash; 104 105 #define SHM_HASH(fnv) (&shm_dictionary[(fnv) & shm_hash]) 106 107 static int shm_access(struct shmfd *shmfd, struct ucred *ucred, int flags); 108 static struct shmfd *shm_alloc(struct ucred *ucred, mode_t mode); 109 static void shm_dict_init(void *arg); 110 static void shm_drop(struct shmfd *shmfd); 111 static struct shmfd *shm_hold(struct shmfd *shmfd); 112 static void shm_insert(char *path, Fnv32_t fnv, struct shmfd *shmfd); 113 static struct shmfd *shm_lookup(char *path, Fnv32_t fnv); 114 static int shm_remove(char *path, Fnv32_t fnv, struct ucred *ucred); 115 static int shm_dotruncate(struct shmfd *shmfd, off_t length); 116 117 static fo_rdwr_t shm_read; 118 static fo_rdwr_t shm_write; 119 static fo_truncate_t shm_truncate; 120 static fo_ioctl_t shm_ioctl; 121 static fo_poll_t shm_poll; 122 static fo_kqfilter_t shm_kqfilter; 123 static fo_stat_t shm_stat; 124 static fo_close_t shm_close; 125 static fo_chmod_t shm_chmod; 126 static fo_chown_t shm_chown; 127 128 /* File descriptor operations. */ 129 static struct fileops shm_ops = { 130 .fo_read = shm_read, 131 .fo_write = shm_write, 132 .fo_truncate = shm_truncate, 133 .fo_ioctl = shm_ioctl, 134 .fo_poll = shm_poll, 135 .fo_kqfilter = shm_kqfilter, 136 .fo_stat = shm_stat, 137 .fo_close = shm_close, 138 .fo_chmod = shm_chmod, 139 .fo_chown = shm_chown, 140 .fo_flags = DFLAG_PASSABLE 141 }; 142 143 FEATURE(posix_shm, "POSIX shared memory"); 144 145 static int 146 shm_read(struct file *fp, struct uio *uio, struct ucred *active_cred, 147 int flags, struct thread *td) 148 { 149 150 return (EOPNOTSUPP); 151 } 152 153 static int 154 shm_write(struct file *fp, struct uio *uio, struct ucred *active_cred, 155 int flags, struct thread *td) 156 { 157 158 return (EOPNOTSUPP); 159 } 160 161 static int 162 shm_truncate(struct file *fp, off_t length, struct ucred *active_cred, 163 struct thread *td) 164 { 165 struct shmfd *shmfd; 166 #ifdef MAC 167 int error; 168 #endif 169 170 shmfd = fp->f_data; 171 #ifdef MAC 172 error = mac_posixshm_check_truncate(active_cred, fp->f_cred, shmfd); 173 if (error) 174 return (error); 175 #endif 176 return (shm_dotruncate(shmfd, length)); 177 } 178 179 static int 180 shm_ioctl(struct file *fp, u_long com, void *data, 181 struct ucred *active_cred, struct thread *td) 182 { 183 184 return (EOPNOTSUPP); 185 } 186 187 static int 188 shm_poll(struct file *fp, int events, struct ucred *active_cred, 189 struct thread *td) 190 { 191 192 return (EOPNOTSUPP); 193 } 194 195 static int 196 shm_kqfilter(struct file *fp, struct knote *kn) 197 { 198 199 return (EOPNOTSUPP); 200 } 201 202 static int 203 shm_stat(struct file *fp, struct stat *sb, struct ucred *active_cred, 204 struct thread *td) 205 { 206 struct shmfd *shmfd; 207 #ifdef MAC 208 int error; 209 #endif 210 211 shmfd = fp->f_data; 212 213 #ifdef MAC 214 error = mac_posixshm_check_stat(active_cred, fp->f_cred, shmfd); 215 if (error) 216 return (error); 217 #endif 218 219 /* 220 * Attempt to return sanish values for fstat() on a memory file 221 * descriptor. 222 */ 223 bzero(sb, sizeof(*sb)); 224 sb->st_blksize = PAGE_SIZE; 225 sb->st_size = shmfd->shm_size; 226 sb->st_blocks = (sb->st_size + sb->st_blksize - 1) / sb->st_blksize; 227 mtx_lock(&shm_timestamp_lock); 228 sb->st_atim = shmfd->shm_atime; 229 sb->st_ctim = shmfd->shm_ctime; 230 sb->st_mtim = shmfd->shm_mtime; 231 sb->st_birthtim = shmfd->shm_birthtime; 232 sb->st_mode = S_IFREG | shmfd->shm_mode; /* XXX */ 233 sb->st_uid = shmfd->shm_uid; 234 sb->st_gid = shmfd->shm_gid; 235 mtx_unlock(&shm_timestamp_lock); 236 237 return (0); 238 } 239 240 static int 241 shm_close(struct file *fp, struct thread *td) 242 { 243 struct shmfd *shmfd; 244 245 shmfd = fp->f_data; 246 fp->f_data = NULL; 247 shm_drop(shmfd); 248 249 return (0); 250 } 251 252 static int 253 shm_dotruncate(struct shmfd *shmfd, off_t length) 254 { 255 vm_object_t object; 256 vm_page_t m; 257 vm_pindex_t nobjsize; 258 vm_ooffset_t delta; 259 260 object = shmfd->shm_object; 261 VM_OBJECT_LOCK(object); 262 if (length == shmfd->shm_size) { 263 VM_OBJECT_UNLOCK(object); 264 return (0); 265 } 266 nobjsize = OFF_TO_IDX(length + PAGE_MASK); 267 268 /* Are we shrinking? If so, trim the end. */ 269 if (length < shmfd->shm_size) { 270 /* 271 * Disallow any requests to shrink the size if this 272 * object is mapped into the kernel. 273 */ 274 if (shmfd->shm_kmappings > 0) { 275 VM_OBJECT_UNLOCK(object); 276 return (EBUSY); 277 } 278 delta = ptoa(object->size - nobjsize); 279 280 /* Toss in memory pages. */ 281 if (nobjsize < object->size) 282 vm_object_page_remove(object, nobjsize, object->size, 283 0); 284 285 /* Toss pages from swap. */ 286 if (object->type == OBJT_SWAP) 287 swap_pager_freespace(object, nobjsize, delta); 288 289 /* Free the swap accounted for shm */ 290 swap_release_by_cred(delta, object->cred); 291 object->charge -= delta; 292 293 /* 294 * If the last page is partially mapped, then zero out 295 * the garbage at the end of the page. See comments 296 * in vnode_pager_setsize() for more details. 297 * 298 * XXXJHB: This handles in memory pages, but what about 299 * a page swapped out to disk? 300 */ 301 if ((length & PAGE_MASK) && 302 (m = vm_page_lookup(object, OFF_TO_IDX(length))) != NULL && 303 m->valid != 0) { 304 int base = (int)length & PAGE_MASK; 305 int size = PAGE_SIZE - base; 306 307 pmap_zero_page_area(m, base, size); 308 309 /* 310 * Update the valid bits to reflect the blocks that 311 * have been zeroed. Some of these valid bits may 312 * have already been set. 313 */ 314 vm_page_set_valid_range(m, base, size); 315 316 /* 317 * Round "base" to the next block boundary so that the 318 * dirty bit for a partially zeroed block is not 319 * cleared. 320 */ 321 base = roundup2(base, DEV_BSIZE); 322 323 vm_page_clear_dirty(m, base, PAGE_SIZE - base); 324 } else if ((length & PAGE_MASK) && 325 __predict_false(object->cache != NULL)) { 326 vm_page_cache_free(object, OFF_TO_IDX(length), 327 nobjsize); 328 } 329 } else { 330 331 /* Attempt to reserve the swap */ 332 delta = ptoa(nobjsize - object->size); 333 if (!swap_reserve_by_cred(delta, object->cred)) { 334 VM_OBJECT_UNLOCK(object); 335 return (ENOMEM); 336 } 337 object->charge += delta; 338 } 339 shmfd->shm_size = length; 340 mtx_lock(&shm_timestamp_lock); 341 vfs_timestamp(&shmfd->shm_ctime); 342 shmfd->shm_mtime = shmfd->shm_ctime; 343 mtx_unlock(&shm_timestamp_lock); 344 object->size = nobjsize; 345 VM_OBJECT_UNLOCK(object); 346 return (0); 347 } 348 349 /* 350 * shmfd object management including creation and reference counting 351 * routines. 352 */ 353 static struct shmfd * 354 shm_alloc(struct ucred *ucred, mode_t mode) 355 { 356 struct shmfd *shmfd; 357 358 shmfd = malloc(sizeof(*shmfd), M_SHMFD, M_WAITOK | M_ZERO); 359 shmfd->shm_size = 0; 360 shmfd->shm_uid = ucred->cr_uid; 361 shmfd->shm_gid = ucred->cr_gid; 362 shmfd->shm_mode = mode; 363 shmfd->shm_object = vm_pager_allocate(OBJT_DEFAULT, NULL, 364 shmfd->shm_size, VM_PROT_DEFAULT, 0, ucred); 365 KASSERT(shmfd->shm_object != NULL, ("shm_create: vm_pager_allocate")); 366 VM_OBJECT_LOCK(shmfd->shm_object); 367 vm_object_clear_flag(shmfd->shm_object, OBJ_ONEMAPPING); 368 vm_object_set_flag(shmfd->shm_object, OBJ_NOSPLIT); 369 VM_OBJECT_UNLOCK(shmfd->shm_object); 370 vfs_timestamp(&shmfd->shm_birthtime); 371 shmfd->shm_atime = shmfd->shm_mtime = shmfd->shm_ctime = 372 shmfd->shm_birthtime; 373 refcount_init(&shmfd->shm_refs, 1); 374 #ifdef MAC 375 mac_posixshm_init(shmfd); 376 mac_posixshm_create(ucred, shmfd); 377 #endif 378 379 return (shmfd); 380 } 381 382 static struct shmfd * 383 shm_hold(struct shmfd *shmfd) 384 { 385 386 refcount_acquire(&shmfd->shm_refs); 387 return (shmfd); 388 } 389 390 static void 391 shm_drop(struct shmfd *shmfd) 392 { 393 394 if (refcount_release(&shmfd->shm_refs)) { 395 #ifdef MAC 396 mac_posixshm_destroy(shmfd); 397 #endif 398 vm_object_deallocate(shmfd->shm_object); 399 free(shmfd, M_SHMFD); 400 } 401 } 402 403 /* 404 * Determine if the credentials have sufficient permissions for a 405 * specified combination of FREAD and FWRITE. 406 */ 407 static int 408 shm_access(struct shmfd *shmfd, struct ucred *ucred, int flags) 409 { 410 accmode_t accmode; 411 int error; 412 413 accmode = 0; 414 if (flags & FREAD) 415 accmode |= VREAD; 416 if (flags & FWRITE) 417 accmode |= VWRITE; 418 mtx_lock(&shm_timestamp_lock); 419 error = vaccess(VREG, shmfd->shm_mode, shmfd->shm_uid, shmfd->shm_gid, 420 accmode, ucred, NULL); 421 mtx_unlock(&shm_timestamp_lock); 422 return (error); 423 } 424 425 /* 426 * Dictionary management. We maintain an in-kernel dictionary to map 427 * paths to shmfd objects. We use the FNV hash on the path to store 428 * the mappings in a hash table. 429 */ 430 static void 431 shm_dict_init(void *arg) 432 { 433 434 mtx_init(&shm_timestamp_lock, "shm timestamps", NULL, MTX_DEF); 435 sx_init(&shm_dict_lock, "shm dictionary"); 436 shm_dictionary = hashinit(1024, M_SHMFD, &shm_hash); 437 } 438 SYSINIT(shm_dict_init, SI_SUB_SYSV_SHM, SI_ORDER_ANY, shm_dict_init, NULL); 439 440 static struct shmfd * 441 shm_lookup(char *path, Fnv32_t fnv) 442 { 443 struct shm_mapping *map; 444 445 LIST_FOREACH(map, SHM_HASH(fnv), sm_link) { 446 if (map->sm_fnv != fnv) 447 continue; 448 if (strcmp(map->sm_path, path) == 0) 449 return (map->sm_shmfd); 450 } 451 452 return (NULL); 453 } 454 455 static void 456 shm_insert(char *path, Fnv32_t fnv, struct shmfd *shmfd) 457 { 458 struct shm_mapping *map; 459 460 map = malloc(sizeof(struct shm_mapping), M_SHMFD, M_WAITOK); 461 map->sm_path = path; 462 map->sm_fnv = fnv; 463 map->sm_shmfd = shm_hold(shmfd); 464 LIST_INSERT_HEAD(SHM_HASH(fnv), map, sm_link); 465 } 466 467 static int 468 shm_remove(char *path, Fnv32_t fnv, struct ucred *ucred) 469 { 470 struct shm_mapping *map; 471 int error; 472 473 LIST_FOREACH(map, SHM_HASH(fnv), sm_link) { 474 if (map->sm_fnv != fnv) 475 continue; 476 if (strcmp(map->sm_path, path) == 0) { 477 #ifdef MAC 478 error = mac_posixshm_check_unlink(ucred, map->sm_shmfd); 479 if (error) 480 return (error); 481 #endif 482 error = shm_access(map->sm_shmfd, ucred, 483 FREAD | FWRITE); 484 if (error) 485 return (error); 486 LIST_REMOVE(map, sm_link); 487 shm_drop(map->sm_shmfd); 488 free(map->sm_path, M_SHMFD); 489 free(map, M_SHMFD); 490 return (0); 491 } 492 } 493 494 return (ENOENT); 495 } 496 497 /* System calls. */ 498 int 499 sys_shm_open(struct thread *td, struct shm_open_args *uap) 500 { 501 struct filedesc *fdp; 502 struct shmfd *shmfd; 503 struct file *fp; 504 char *path; 505 Fnv32_t fnv; 506 mode_t cmode; 507 int fd, error; 508 509 #ifdef CAPABILITY_MODE 510 /* 511 * shm_open(2) is only allowed for anonymous objects. 512 */ 513 if (IN_CAPABILITY_MODE(td) && (uap->path != SHM_ANON)) 514 return (ECAPMODE); 515 #endif 516 517 if ((uap->flags & O_ACCMODE) != O_RDONLY && 518 (uap->flags & O_ACCMODE) != O_RDWR) 519 return (EINVAL); 520 521 if ((uap->flags & ~(O_ACCMODE | O_CREAT | O_EXCL | O_TRUNC)) != 0) 522 return (EINVAL); 523 524 fdp = td->td_proc->p_fd; 525 cmode = (uap->mode & ~fdp->fd_cmask) & ACCESSPERMS; 526 527 error = falloc(td, &fp, &fd, 0); 528 if (error) 529 return (error); 530 531 /* A SHM_ANON path pointer creates an anonymous object. */ 532 if (uap->path == SHM_ANON) { 533 /* A read-only anonymous object is pointless. */ 534 if ((uap->flags & O_ACCMODE) == O_RDONLY) { 535 fdclose(fdp, fp, fd, td); 536 fdrop(fp, td); 537 return (EINVAL); 538 } 539 shmfd = shm_alloc(td->td_ucred, cmode); 540 } else { 541 path = malloc(MAXPATHLEN, M_SHMFD, M_WAITOK); 542 error = copyinstr(uap->path, path, MAXPATHLEN, NULL); 543 544 /* Require paths to start with a '/' character. */ 545 if (error == 0 && path[0] != '/') 546 error = EINVAL; 547 if (error) { 548 fdclose(fdp, fp, fd, td); 549 fdrop(fp, td); 550 free(path, M_SHMFD); 551 return (error); 552 } 553 554 fnv = fnv_32_str(path, FNV1_32_INIT); 555 sx_xlock(&shm_dict_lock); 556 shmfd = shm_lookup(path, fnv); 557 if (shmfd == NULL) { 558 /* Object does not yet exist, create it if requested. */ 559 if (uap->flags & O_CREAT) { 560 #ifdef MAC 561 error = mac_posixshm_check_create(td->td_ucred, 562 path); 563 if (error == 0) { 564 #endif 565 shmfd = shm_alloc(td->td_ucred, cmode); 566 shm_insert(path, fnv, shmfd); 567 #ifdef MAC 568 } 569 #endif 570 } else { 571 free(path, M_SHMFD); 572 error = ENOENT; 573 } 574 } else { 575 /* 576 * Object already exists, obtain a new 577 * reference if requested and permitted. 578 */ 579 free(path, M_SHMFD); 580 if ((uap->flags & (O_CREAT | O_EXCL)) == 581 (O_CREAT | O_EXCL)) 582 error = EEXIST; 583 else { 584 #ifdef MAC 585 error = mac_posixshm_check_open(td->td_ucred, 586 shmfd, FFLAGS(uap->flags & O_ACCMODE)); 587 if (error == 0) 588 #endif 589 error = shm_access(shmfd, td->td_ucred, 590 FFLAGS(uap->flags & O_ACCMODE)); 591 } 592 593 /* 594 * Truncate the file back to zero length if 595 * O_TRUNC was specified and the object was 596 * opened with read/write. 597 */ 598 if (error == 0 && 599 (uap->flags & (O_ACCMODE | O_TRUNC)) == 600 (O_RDWR | O_TRUNC)) { 601 #ifdef MAC 602 error = mac_posixshm_check_truncate( 603 td->td_ucred, fp->f_cred, shmfd); 604 if (error == 0) 605 #endif 606 shm_dotruncate(shmfd, 0); 607 } 608 if (error == 0) 609 shm_hold(shmfd); 610 } 611 sx_xunlock(&shm_dict_lock); 612 613 if (error) { 614 fdclose(fdp, fp, fd, td); 615 fdrop(fp, td); 616 return (error); 617 } 618 } 619 620 finit(fp, FFLAGS(uap->flags & O_ACCMODE), DTYPE_SHM, shmfd, &shm_ops); 621 622 FILEDESC_XLOCK(fdp); 623 if (fdp->fd_ofiles[fd] == fp) 624 fdp->fd_ofileflags[fd] |= UF_EXCLOSE; 625 FILEDESC_XUNLOCK(fdp); 626 td->td_retval[0] = fd; 627 fdrop(fp, td); 628 629 return (0); 630 } 631 632 int 633 sys_shm_unlink(struct thread *td, struct shm_unlink_args *uap) 634 { 635 char *path; 636 Fnv32_t fnv; 637 int error; 638 639 path = malloc(MAXPATHLEN, M_TEMP, M_WAITOK); 640 error = copyinstr(uap->path, path, MAXPATHLEN, NULL); 641 if (error) { 642 free(path, M_TEMP); 643 return (error); 644 } 645 646 fnv = fnv_32_str(path, FNV1_32_INIT); 647 sx_xlock(&shm_dict_lock); 648 error = shm_remove(path, fnv, td->td_ucred); 649 sx_xunlock(&shm_dict_lock); 650 free(path, M_TEMP); 651 652 return (error); 653 } 654 655 /* 656 * mmap() helper to validate mmap() requests against shm object state 657 * and give mmap() the vm_object to use for the mapping. 658 */ 659 int 660 shm_mmap(struct shmfd *shmfd, vm_size_t objsize, vm_ooffset_t foff, 661 vm_object_t *obj) 662 { 663 664 /* 665 * XXXRW: This validation is probably insufficient, and subject to 666 * sign errors. It should be fixed. 667 */ 668 if (foff >= shmfd->shm_size || 669 foff + objsize > round_page(shmfd->shm_size)) 670 return (EINVAL); 671 672 mtx_lock(&shm_timestamp_lock); 673 vfs_timestamp(&shmfd->shm_atime); 674 mtx_unlock(&shm_timestamp_lock); 675 vm_object_reference(shmfd->shm_object); 676 *obj = shmfd->shm_object; 677 return (0); 678 } 679 680 static int 681 shm_chmod(struct file *fp, mode_t mode, struct ucred *active_cred, 682 struct thread *td) 683 { 684 struct shmfd *shmfd; 685 int error; 686 687 error = 0; 688 shmfd = fp->f_data; 689 mtx_lock(&shm_timestamp_lock); 690 /* 691 * SUSv4 says that x bits of permission need not be affected. 692 * Be consistent with our shm_open there. 693 */ 694 #ifdef MAC 695 error = mac_posixshm_check_setmode(active_cred, shmfd, mode); 696 if (error != 0) 697 goto out; 698 #endif 699 error = vaccess(VREG, shmfd->shm_mode, shmfd->shm_uid, 700 shmfd->shm_gid, VADMIN, active_cred, NULL); 701 if (error != 0) 702 goto out; 703 shmfd->shm_mode = mode & ACCESSPERMS; 704 out: 705 mtx_unlock(&shm_timestamp_lock); 706 return (error); 707 } 708 709 static int 710 shm_chown(struct file *fp, uid_t uid, gid_t gid, struct ucred *active_cred, 711 struct thread *td) 712 { 713 struct shmfd *shmfd; 714 int error; 715 716 error = 0; 717 shmfd = fp->f_data; 718 mtx_lock(&shm_timestamp_lock); 719 #ifdef MAC 720 error = mac_posixshm_check_setowner(active_cred, shmfd, uid, gid); 721 if (error != 0) 722 goto out; 723 #endif 724 if (uid == (uid_t)-1) 725 uid = shmfd->shm_uid; 726 if (gid == (gid_t)-1) 727 gid = shmfd->shm_gid; 728 if (((uid != shmfd->shm_uid && uid != active_cred->cr_uid) || 729 (gid != shmfd->shm_gid && !groupmember(gid, active_cred))) && 730 (error = priv_check_cred(active_cred, PRIV_VFS_CHOWN, 0))) 731 goto out; 732 shmfd->shm_uid = uid; 733 shmfd->shm_gid = gid; 734 out: 735 mtx_unlock(&shm_timestamp_lock); 736 return (error); 737 } 738 739 /* 740 * Helper routines to allow the backing object of a shared memory file 741 * descriptor to be mapped in the kernel. 742 */ 743 int 744 shm_map(struct file *fp, size_t size, off_t offset, void **memp) 745 { 746 struct shmfd *shmfd; 747 vm_offset_t kva, ofs; 748 vm_object_t obj; 749 int rv; 750 751 if (fp->f_type != DTYPE_SHM) 752 return (EINVAL); 753 shmfd = fp->f_data; 754 obj = shmfd->shm_object; 755 VM_OBJECT_LOCK(obj); 756 /* 757 * XXXRW: This validation is probably insufficient, and subject to 758 * sign errors. It should be fixed. 759 */ 760 if (offset >= shmfd->shm_size || 761 offset + size > round_page(shmfd->shm_size)) { 762 VM_OBJECT_UNLOCK(obj); 763 return (EINVAL); 764 } 765 766 shmfd->shm_kmappings++; 767 vm_object_reference_locked(obj); 768 VM_OBJECT_UNLOCK(obj); 769 770 /* Map the object into the kernel_map and wire it. */ 771 kva = vm_map_min(kernel_map); 772 ofs = offset & PAGE_MASK; 773 offset = trunc_page(offset); 774 size = round_page(size + ofs); 775 rv = vm_map_find(kernel_map, obj, offset, &kva, size, 776 VMFS_ALIGNED_SPACE, VM_PROT_READ | VM_PROT_WRITE, 777 VM_PROT_READ | VM_PROT_WRITE, 0); 778 if (rv == KERN_SUCCESS) { 779 rv = vm_map_wire(kernel_map, kva, kva + size, 780 VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES); 781 if (rv == KERN_SUCCESS) { 782 *memp = (void *)(kva + ofs); 783 return (0); 784 } 785 vm_map_remove(kernel_map, kva, kva + size); 786 } else 787 vm_object_deallocate(obj); 788 789 /* On failure, drop our mapping reference. */ 790 VM_OBJECT_LOCK(obj); 791 shmfd->shm_kmappings--; 792 VM_OBJECT_UNLOCK(obj); 793 794 return (vm_mmap_to_errno(rv)); 795 } 796 797 /* 798 * We require the caller to unmap the entire entry. This allows us to 799 * safely decrement shm_kmappings when a mapping is removed. 800 */ 801 int 802 shm_unmap(struct file *fp, void *mem, size_t size) 803 { 804 struct shmfd *shmfd; 805 vm_map_entry_t entry; 806 vm_offset_t kva, ofs; 807 vm_object_t obj; 808 vm_pindex_t pindex; 809 vm_prot_t prot; 810 boolean_t wired; 811 vm_map_t map; 812 int rv; 813 814 if (fp->f_type != DTYPE_SHM) 815 return (EINVAL); 816 shmfd = fp->f_data; 817 kva = (vm_offset_t)mem; 818 ofs = kva & PAGE_MASK; 819 kva = trunc_page(kva); 820 size = round_page(size + ofs); 821 map = kernel_map; 822 rv = vm_map_lookup(&map, kva, VM_PROT_READ | VM_PROT_WRITE, &entry, 823 &obj, &pindex, &prot, &wired); 824 if (rv != KERN_SUCCESS) 825 return (EINVAL); 826 if (entry->start != kva || entry->end != kva + size) { 827 vm_map_lookup_done(map, entry); 828 return (EINVAL); 829 } 830 vm_map_lookup_done(map, entry); 831 if (obj != shmfd->shm_object) 832 return (EINVAL); 833 vm_map_remove(map, kva, kva + size); 834 VM_OBJECT_LOCK(obj); 835 KASSERT(shmfd->shm_kmappings > 0, ("shm_unmap: object not mapped")); 836 shmfd->shm_kmappings--; 837 VM_OBJECT_UNLOCK(obj); 838 return (0); 839 } 840