1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 /* 30 * System call I/F to doors (outside of vnodes I/F) and misc support 31 * routines 32 */ 33 #include <sys/types.h> 34 #include <sys/systm.h> 35 #include <sys/door.h> 36 #include <sys/door_data.h> 37 #include <sys/proc.h> 38 #include <sys/thread.h> 39 #include <sys/class.h> 40 #include <sys/cred.h> 41 #include <sys/kmem.h> 42 #include <sys/cmn_err.h> 43 #include <sys/stack.h> 44 #include <sys/debug.h> 45 #include <sys/cpuvar.h> 46 #include <sys/file.h> 47 #include <sys/fcntl.h> 48 #include <sys/vnode.h> 49 #include <sys/vfs.h> 50 #include <sys/vfs_opreg.h> 51 #include <sys/sobject.h> 52 #include <sys/schedctl.h> 53 #include <sys/callb.h> 54 #include <sys/ucred.h> 55 56 #include <sys/mman.h> 57 #include <sys/sysmacros.h> 58 #include <sys/vmsystm.h> 59 #include <vm/as.h> 60 #include <vm/hat.h> 61 #include <vm/page.h> 62 #include <vm/seg.h> 63 #include <vm/seg_vn.h> 64 #include <vm/seg_vn.h> 65 66 #include <sys/modctl.h> 67 #include <sys/syscall.h> 68 #include <sys/pathname.h> 69 #include <sys/rctl.h> 70 71 /* 72 * The maximum amount of data (in bytes) that will be transferred using 73 * an intermediate kernel buffer. For sizes greater than this we map 74 * in the destination pages and perform a 1-copy transfer. 75 */ 76 size_t door_max_arg = 16 * 1024; 77 78 /* 79 * Maximum amount of data that will be transferred in a reply to a 80 * door_upcall. Need to guard against a process returning huge amounts 81 * of data and getting the kernel stuck in kmem_alloc. 82 */ 83 size_t door_max_upcall_reply = 1024 * 1024; 84 85 /* 86 * Maximum number of descriptors allowed to be passed in a single 87 * door_call or door_return. We need to allocate kernel memory 88 * for all of them at once, so we can't let it scale without limit. 89 */ 90 uint_t door_max_desc = 1024; 91 92 /* 93 * Definition of a door handle, used by other kernel subsystems when 94 * calling door functions. This is really a file structure but we 95 * want to hide that fact. 96 */ 97 struct __door_handle { 98 file_t dh_file; 99 }; 100 101 #define DHTOF(dh) ((file_t *)(dh)) 102 #define FTODH(fp) ((door_handle_t)(fp)) 103 104 static int doorfs(long, long, long, long, long, long); 105 106 static struct sysent door_sysent = { 107 6, 108 SE_ARGC | SE_NOUNLOAD, 109 (int (*)())doorfs, 110 }; 111 112 static struct modlsys modlsys = { 113 &mod_syscallops, "doors", &door_sysent 114 }; 115 116 #ifdef _SYSCALL32_IMPL 117 118 static int 119 doorfs32(int32_t arg1, int32_t arg2, int32_t arg3, int32_t arg4, 120 int32_t arg5, int32_t subcode); 121 122 static struct sysent door_sysent32 = { 123 6, 124 SE_ARGC | SE_NOUNLOAD, 125 (int (*)())doorfs32, 126 }; 127 128 static struct modlsys modlsys32 = { 129 &mod_syscallops32, 130 "32-bit door syscalls", 131 &door_sysent32 132 }; 133 #endif 134 135 static struct modlinkage modlinkage = { 136 MODREV_1, 137 &modlsys, 138 #ifdef _SYSCALL32_IMPL 139 &modlsys32, 140 #endif 141 NULL 142 }; 143 144 dev_t doordev; 145 146 extern struct vfs door_vfs; 147 extern struct vnodeops *door_vnodeops; 148 149 int 150 _init(void) 151 { 152 static const fs_operation_def_t door_vfsops_template[] = { 153 NULL, NULL 154 }; 155 extern const fs_operation_def_t door_vnodeops_template[]; 156 vfsops_t *door_vfsops; 157 major_t major; 158 int error; 159 160 mutex_init(&door_knob, NULL, MUTEX_DEFAULT, NULL); 161 if ((major = getudev()) == (major_t)-1) 162 return (ENXIO); 163 doordev = makedevice(major, 0); 164 165 /* Create a dummy vfs */ 166 error = vfs_makefsops(door_vfsops_template, &door_vfsops); 167 if (error != 0) { 168 cmn_err(CE_WARN, "door init: bad vfs ops"); 169 return (error); 170 } 171 VFS_INIT(&door_vfs, door_vfsops, NULL); 172 door_vfs.vfs_flag = VFS_RDONLY; 173 door_vfs.vfs_dev = doordev; 174 vfs_make_fsid(&(door_vfs.vfs_fsid), doordev, 0); 175 176 error = vn_make_ops("doorfs", door_vnodeops_template, &door_vnodeops); 177 if (error != 0) { 178 vfs_freevfsops(door_vfsops); 179 cmn_err(CE_WARN, "door init: bad vnode ops"); 180 return (error); 181 } 182 return (mod_install(&modlinkage)); 183 } 184 185 int 186 _info(struct modinfo *modinfop) 187 { 188 return (mod_info(&modlinkage, modinfop)); 189 } 190 191 /* system call functions */ 192 static int door_call(int, void *); 193 static int door_return(caddr_t, size_t, door_desc_t *, uint_t, caddr_t, size_t); 194 static int door_create(void (*pc_cookie)(void *, char *, size_t, door_desc_t *, 195 uint_t), void *data_cookie, uint_t); 196 static int door_revoke(int); 197 static int door_info(int, struct door_info *); 198 static int door_ucred(struct ucred_s *); 199 static int door_bind(int); 200 static int door_unbind(void); 201 static int door_unref(void); 202 static int door_getparam(int, int, size_t *); 203 static int door_setparam(int, int, size_t); 204 205 #define DOOR_RETURN_OLD 4 /* historic value, for s10 */ 206 207 /* 208 * System call wrapper for all door related system calls 209 */ 210 static int 211 doorfs(long arg1, long arg2, long arg3, long arg4, long arg5, long subcode) 212 { 213 switch (subcode) { 214 case DOOR_CALL: 215 return (door_call(arg1, (void *)arg2)); 216 case DOOR_RETURN: { 217 door_return_desc_t *drdp = (door_return_desc_t *)arg3; 218 219 if (drdp != NULL) { 220 door_return_desc_t drd; 221 if (copyin(drdp, &drd, sizeof (drd))) 222 return (EFAULT); 223 return (door_return((caddr_t)arg1, arg2, drd.desc_ptr, 224 drd.desc_num, (caddr_t)arg4, arg5)); 225 } 226 return (door_return((caddr_t)arg1, arg2, NULL, 227 0, (caddr_t)arg4, arg5)); 228 } 229 case DOOR_RETURN_OLD: 230 /* 231 * In order to support the S10 runtime environment, we 232 * still respond to the old syscall subcode for door_return. 233 * We treat it as having no stack limits. This code should 234 * be removed when such support is no longer needed. 235 */ 236 return (door_return((caddr_t)arg1, arg2, (door_desc_t *)arg3, 237 arg4, (caddr_t)arg5, 0)); 238 case DOOR_CREATE: 239 return (door_create((void (*)())arg1, (void *)arg2, arg3)); 240 case DOOR_REVOKE: 241 return (door_revoke(arg1)); 242 case DOOR_INFO: 243 return (door_info(arg1, (struct door_info *)arg2)); 244 case DOOR_BIND: 245 return (door_bind(arg1)); 246 case DOOR_UNBIND: 247 return (door_unbind()); 248 case DOOR_UNREFSYS: 249 return (door_unref()); 250 case DOOR_UCRED: 251 return (door_ucred((struct ucred_s *)arg1)); 252 case DOOR_GETPARAM: 253 return (door_getparam(arg1, arg2, (size_t *)arg3)); 254 case DOOR_SETPARAM: 255 return (door_setparam(arg1, arg2, arg3)); 256 default: 257 return (set_errno(EINVAL)); 258 } 259 } 260 261 #ifdef _SYSCALL32_IMPL 262 /* 263 * System call wrapper for all door related system calls from 32-bit programs. 264 * Needed at the moment because of the casts - they undo some damage 265 * that truss causes (sign-extending the stack pointer) when truss'ing 266 * a 32-bit program using doors. 267 */ 268 static int 269 doorfs32(int32_t arg1, int32_t arg2, int32_t arg3, 270 int32_t arg4, int32_t arg5, int32_t subcode) 271 { 272 switch (subcode) { 273 case DOOR_CALL: 274 return (door_call(arg1, (void *)(uintptr_t)(caddr32_t)arg2)); 275 case DOOR_RETURN: { 276 door_return_desc32_t *drdp = 277 (door_return_desc32_t *)(uintptr_t)(caddr32_t)arg3; 278 if (drdp != NULL) { 279 door_return_desc32_t drd; 280 if (copyin(drdp, &drd, sizeof (drd))) 281 return (EFAULT); 282 return (door_return( 283 (caddr_t)(uintptr_t)(caddr32_t)arg1, arg2, 284 (door_desc_t *)(uintptr_t)drd.desc_ptr, 285 drd.desc_num, (caddr_t)(uintptr_t)(caddr32_t)arg4, 286 (size_t)(uintptr_t)(size32_t)arg5)); 287 } 288 return (door_return((caddr_t)(uintptr_t)(caddr32_t)arg1, 289 arg2, NULL, 0, (caddr_t)(uintptr_t)(caddr32_t)arg4, 290 (size_t)(uintptr_t)(size32_t)arg5)); 291 } 292 case DOOR_RETURN_OLD: 293 /* 294 * In order to support the S10 runtime environment, we 295 * still respond to the old syscall subcode for door_return. 296 * We treat it as having no stack limits. This code should 297 * be removed when such support is no longer needed. 298 */ 299 return (door_return((caddr_t)(uintptr_t)(caddr32_t)arg1, arg2, 300 (door_desc_t *)(uintptr_t)(caddr32_t)arg3, arg4, 301 (caddr_t)(uintptr_t)(caddr32_t)arg5, 0)); 302 case DOOR_CREATE: 303 return (door_create((void (*)())(uintptr_t)(caddr32_t)arg1, 304 (void *)(uintptr_t)(caddr32_t)arg2, arg3)); 305 case DOOR_REVOKE: 306 return (door_revoke(arg1)); 307 case DOOR_INFO: 308 return (door_info(arg1, 309 (struct door_info *)(uintptr_t)(caddr32_t)arg2)); 310 case DOOR_BIND: 311 return (door_bind(arg1)); 312 case DOOR_UNBIND: 313 return (door_unbind()); 314 case DOOR_UNREFSYS: 315 return (door_unref()); 316 case DOOR_UCRED: 317 return (door_ucred( 318 (struct ucred_s *)(uintptr_t)(caddr32_t)arg1)); 319 case DOOR_GETPARAM: 320 return (door_getparam(arg1, arg2, 321 (size_t *)(uintptr_t)(caddr32_t)arg3)); 322 case DOOR_SETPARAM: 323 return (door_setparam(arg1, arg2, (size_t)(size32_t)arg3)); 324 325 default: 326 return (set_errno(EINVAL)); 327 } 328 } 329 #endif 330 331 void shuttle_resume(kthread_t *, kmutex_t *); 332 void shuttle_swtch(kmutex_t *); 333 void shuttle_sleep(kthread_t *); 334 335 /* 336 * Support routines 337 */ 338 static int door_create_common(void (*)(), void *, uint_t, int, int *, 339 file_t **); 340 static int door_overflow(kthread_t *, caddr_t, size_t, door_desc_t *, uint_t); 341 static int door_args(kthread_t *, int); 342 static int door_results(kthread_t *, caddr_t, size_t, door_desc_t *, uint_t); 343 static int door_copy(struct as *, caddr_t, caddr_t, uint_t); 344 static void door_server_exit(proc_t *, kthread_t *); 345 static void door_release_server(door_node_t *, kthread_t *); 346 static kthread_t *door_get_server(door_node_t *); 347 static door_node_t *door_lookup(int, file_t **); 348 static int door_translate_in(void); 349 static int door_translate_out(void); 350 static void door_fd_rele(door_desc_t *, uint_t, int); 351 static void door_list_insert(door_node_t *); 352 static void door_info_common(door_node_t *, door_info_t *, file_t *); 353 static int door_release_fds(door_desc_t *, uint_t); 354 static void door_fd_close(door_desc_t *, uint_t); 355 static void door_fp_close(struct file **, uint_t); 356 357 static door_data_t * 358 door_my_data(int create_if_missing) 359 { 360 door_data_t *ddp; 361 362 ddp = curthread->t_door; 363 if (create_if_missing && ddp == NULL) 364 ddp = curthread->t_door = kmem_zalloc(sizeof (*ddp), KM_SLEEP); 365 366 return (ddp); 367 } 368 369 static door_server_t * 370 door_my_server(int create_if_missing) 371 { 372 door_data_t *ddp = door_my_data(create_if_missing); 373 374 return ((ddp != NULL)? DOOR_SERVER(ddp) : NULL); 375 } 376 377 static door_client_t * 378 door_my_client(int create_if_missing) 379 { 380 door_data_t *ddp = door_my_data(create_if_missing); 381 382 return ((ddp != NULL)? DOOR_CLIENT(ddp) : NULL); 383 } 384 385 /* 386 * System call to create a door 387 */ 388 int 389 door_create(void (*pc_cookie)(), void *data_cookie, uint_t attributes) 390 { 391 int fd; 392 int err; 393 394 if ((attributes & ~DOOR_CREATE_MASK) || 395 ((attributes & (DOOR_UNREF | DOOR_UNREF_MULTI)) == 396 (DOOR_UNREF | DOOR_UNREF_MULTI))) 397 return (set_errno(EINVAL)); 398 399 if ((err = door_create_common(pc_cookie, data_cookie, attributes, 0, 400 &fd, NULL)) != 0) 401 return (set_errno(err)); 402 403 f_setfd(fd, FD_CLOEXEC); 404 return (fd); 405 } 406 407 /* 408 * Common code for creating user and kernel doors. If a door was 409 * created, stores a file structure pointer in the location pointed 410 * to by fpp (if fpp is non-NULL) and returns 0. Also, if a non-NULL 411 * pointer to a file descriptor is passed in as fdp, allocates a file 412 * descriptor representing the door. If a door could not be created, 413 * returns an error. 414 */ 415 static int 416 door_create_common(void (*pc_cookie)(), void *data_cookie, uint_t attributes, 417 int from_kernel, int *fdp, file_t **fpp) 418 { 419 door_node_t *dp; 420 vnode_t *vp; 421 struct file *fp; 422 static door_id_t index = 0; 423 proc_t *p = (from_kernel)? &p0 : curproc; 424 425 dp = kmem_zalloc(sizeof (door_node_t), KM_SLEEP); 426 427 dp->door_vnode = vn_alloc(KM_SLEEP); 428 dp->door_target = p; 429 dp->door_data = data_cookie; 430 dp->door_pc = pc_cookie; 431 dp->door_flags = attributes; 432 #ifdef _SYSCALL32_IMPL 433 if (!from_kernel && get_udatamodel() != DATAMODEL_NATIVE) 434 dp->door_data_max = UINT32_MAX; 435 else 436 #endif 437 dp->door_data_max = SIZE_MAX; 438 dp->door_data_min = 0UL; 439 dp->door_desc_max = (attributes & DOOR_REFUSE_DESC)? 0 : INT_MAX; 440 441 vp = DTOV(dp); 442 vn_setops(vp, door_vnodeops); 443 vp->v_type = VDOOR; 444 vp->v_vfsp = &door_vfs; 445 vp->v_data = (caddr_t)dp; 446 mutex_enter(&door_knob); 447 dp->door_index = index++; 448 /* add to per-process door list */ 449 door_list_insert(dp); 450 mutex_exit(&door_knob); 451 452 if (falloc(vp, FREAD | FWRITE, &fp, fdp)) { 453 /* 454 * If the file table is full, remove the door from the 455 * per-process list, free the door, and return NULL. 456 */ 457 mutex_enter(&door_knob); 458 door_list_delete(dp); 459 mutex_exit(&door_knob); 460 vn_free(vp); 461 kmem_free(dp, sizeof (door_node_t)); 462 return (EMFILE); 463 } 464 vn_exists(vp); 465 if (fdp != NULL) 466 setf(*fdp, fp); 467 mutex_exit(&fp->f_tlock); 468 469 if (fpp != NULL) 470 *fpp = fp; 471 return (0); 472 } 473 474 static int 475 door_check_limits(door_node_t *dp, door_arg_t *da, int upcall) 476 { 477 ASSERT(MUTEX_HELD(&door_knob)); 478 479 /* we allow unref upcalls through, despite any minimum */ 480 if (da->data_size < dp->door_data_min && 481 !(upcall && da->data_ptr == DOOR_UNREF_DATA)) 482 return (ENOBUFS); 483 484 if (da->data_size > dp->door_data_max) 485 return (ENOBUFS); 486 487 if (da->desc_num > 0 && (dp->door_flags & DOOR_REFUSE_DESC)) 488 return (ENOTSUP); 489 490 if (da->desc_num > dp->door_desc_max) 491 return (ENFILE); 492 493 return (0); 494 } 495 496 /* 497 * Door invocation. 498 */ 499 int 500 door_call(int did, void *args) 501 { 502 /* Locals */ 503 door_node_t *dp; 504 kthread_t *server_thread; 505 int error = 0; 506 klwp_t *lwp; 507 door_client_t *ct; /* curthread door_data */ 508 door_server_t *st; /* server thread door_data */ 509 door_desc_t *start = NULL; 510 uint_t ncopied = 0; 511 size_t dsize; 512 /* destructor for data returned by a kernel server */ 513 void (*destfn)() = NULL; 514 void *destarg; 515 model_t datamodel; 516 int gotresults = 0; 517 int cancel_pending; 518 519 lwp = ttolwp(curthread); 520 datamodel = lwp_getdatamodel(lwp); 521 522 ct = door_my_client(1); 523 524 /* 525 * Get the arguments 526 */ 527 if (args) { 528 if (datamodel == DATAMODEL_NATIVE) { 529 if (copyin(args, &ct->d_args, sizeof (door_arg_t)) != 0) 530 return (set_errno(EFAULT)); 531 } else { 532 door_arg32_t da32; 533 534 if (copyin(args, &da32, sizeof (door_arg32_t)) != 0) 535 return (set_errno(EFAULT)); 536 ct->d_args.data_ptr = 537 (char *)(uintptr_t)da32.data_ptr; 538 ct->d_args.data_size = da32.data_size; 539 ct->d_args.desc_ptr = 540 (door_desc_t *)(uintptr_t)da32.desc_ptr; 541 ct->d_args.desc_num = da32.desc_num; 542 ct->d_args.rbuf = 543 (char *)(uintptr_t)da32.rbuf; 544 ct->d_args.rsize = da32.rsize; 545 } 546 } else { 547 /* No arguments, and no results allowed */ 548 ct->d_noresults = 1; 549 ct->d_args.data_size = 0; 550 ct->d_args.desc_num = 0; 551 ct->d_args.rsize = 0; 552 } 553 554 if ((dp = door_lookup(did, NULL)) == NULL) 555 return (set_errno(EBADF)); 556 557 mutex_enter(&door_knob); 558 if (DOOR_INVALID(dp)) { 559 mutex_exit(&door_knob); 560 error = EBADF; 561 goto out; 562 } 563 564 /* 565 * before we do anything, check that we are not overflowing the 566 * required limits. 567 */ 568 error = door_check_limits(dp, &ct->d_args, 0); 569 if (error != 0) { 570 mutex_exit(&door_knob); 571 goto out; 572 } 573 574 /* 575 * Check for in-kernel door server. 576 */ 577 if (dp->door_target == &p0) { 578 caddr_t rbuf = ct->d_args.rbuf; 579 size_t rsize = ct->d_args.rsize; 580 581 dp->door_active++; 582 ct->d_kernel = 1; 583 ct->d_error = DOOR_WAIT; 584 mutex_exit(&door_knob); 585 /* translate file descriptors to vnodes */ 586 if (ct->d_args.desc_num) { 587 error = door_translate_in(); 588 if (error) 589 goto out; 590 } 591 /* 592 * Call kernel door server. Arguments are passed and 593 * returned as a door_arg pointer. When called, data_ptr 594 * points to user data and desc_ptr points to a kernel list 595 * of door descriptors that have been converted to file 596 * structure pointers. It's the server function's 597 * responsibility to copyin the data pointed to by data_ptr 598 * (this avoids extra copying in some cases). On return, 599 * data_ptr points to a user buffer of data, and desc_ptr 600 * points to a kernel list of door descriptors representing 601 * files. When a reference is passed to a kernel server, 602 * it is the server's responsibility to release the reference 603 * (by calling closef). When the server includes a 604 * reference in its reply, it is released as part of the 605 * the call (the server must duplicate the reference if 606 * it wants to retain a copy). The destfn, if set to 607 * non-NULL, is a destructor to be called when the returned 608 * kernel data (if any) is no longer needed (has all been 609 * translated and copied to user level). 610 */ 611 (*(dp->door_pc))(dp->door_data, &ct->d_args, 612 &destfn, &destarg, &error); 613 mutex_enter(&door_knob); 614 /* not implemented yet */ 615 if (--dp->door_active == 0 && (dp->door_flags & DOOR_DELAY)) 616 door_deliver_unref(dp); 617 mutex_exit(&door_knob); 618 if (error) 619 goto out; 620 621 /* translate vnodes to files */ 622 if (ct->d_args.desc_num) { 623 error = door_translate_out(); 624 if (error) 625 goto out; 626 } 627 ct->d_buf = ct->d_args.rbuf; 628 ct->d_bufsize = ct->d_args.rsize; 629 if (rsize < (ct->d_args.data_size + 630 (ct->d_args.desc_num * sizeof (door_desc_t)))) { 631 /* handle overflow */ 632 error = door_overflow(curthread, ct->d_args.data_ptr, 633 ct->d_args.data_size, ct->d_args.desc_ptr, 634 ct->d_args.desc_num); 635 if (error) 636 goto out; 637 /* door_overflow sets d_args rbuf and rsize */ 638 } else { 639 ct->d_args.rbuf = rbuf; 640 ct->d_args.rsize = rsize; 641 } 642 goto results; 643 } 644 645 /* 646 * Get a server thread from the target domain 647 */ 648 if ((server_thread = door_get_server(dp)) == NULL) { 649 if (DOOR_INVALID(dp)) 650 error = EBADF; 651 else 652 error = EAGAIN; 653 mutex_exit(&door_knob); 654 goto out; 655 } 656 657 st = DOOR_SERVER(server_thread->t_door); 658 if (ct->d_args.desc_num || ct->d_args.data_size) { 659 int is_private = (dp->door_flags & DOOR_PRIVATE); 660 /* 661 * Move data from client to server 662 */ 663 DOOR_T_HOLD(st); 664 mutex_exit(&door_knob); 665 error = door_args(server_thread, is_private); 666 mutex_enter(&door_knob); 667 DOOR_T_RELEASE(st); 668 if (error) { 669 /* 670 * We're not going to resume this thread after all 671 */ 672 door_release_server(dp, server_thread); 673 shuttle_sleep(server_thread); 674 mutex_exit(&door_knob); 675 goto out; 676 } 677 } 678 679 dp->door_active++; 680 ct->d_error = DOOR_WAIT; 681 st->d_caller = curthread; 682 st->d_active = dp; 683 684 shuttle_resume(server_thread, &door_knob); 685 686 mutex_enter(&door_knob); 687 shuttle_return: 688 if ((error = ct->d_error) < 0) { /* DOOR_WAIT or DOOR_EXIT */ 689 /* 690 * Premature wakeup. Find out why (stop, forkall, sig, exit ...) 691 */ 692 mutex_exit(&door_knob); /* May block in ISSIG */ 693 cancel_pending = 0; 694 if (ISSIG(curthread, FORREAL) || lwp->lwp_sysabort || 695 MUSTRETURN(curproc, curthread) || 696 (cancel_pending = schedctl_cancel_pending()) != 0) { 697 /* Signal, forkall, ... */ 698 lwp->lwp_sysabort = 0; 699 if (cancel_pending) 700 schedctl_cancel_eintr(); 701 mutex_enter(&door_knob); 702 error = EINTR; 703 /* 704 * If the server has finished processing our call, 705 * or exited (calling door_slam()), then d_error 706 * will have changed. If the server hasn't finished 707 * yet, d_error will still be DOOR_WAIT, and we 708 * let it know we are not interested in any 709 * results by sending a SIGCANCEL, unless the door 710 * is marked with DOOR_NO_CANCEL. 711 */ 712 if (ct->d_error == DOOR_WAIT && 713 st->d_caller == curthread) { 714 proc_t *p = ttoproc(server_thread); 715 716 st->d_active = NULL; 717 st->d_caller = NULL; 718 719 if (!(dp->door_flags & DOOR_NO_CANCEL)) { 720 DOOR_T_HOLD(st); 721 mutex_exit(&door_knob); 722 723 mutex_enter(&p->p_lock); 724 sigtoproc(p, server_thread, SIGCANCEL); 725 mutex_exit(&p->p_lock); 726 727 mutex_enter(&door_knob); 728 DOOR_T_RELEASE(st); 729 } 730 } 731 } else { 732 /* 733 * Return from stop(), server exit... 734 * 735 * Note that the server could have done a 736 * door_return while the client was in stop state 737 * (ISSIG), in which case the error condition 738 * is updated by the server. 739 */ 740 mutex_enter(&door_knob); 741 if (ct->d_error == DOOR_WAIT) { 742 /* Still waiting for a reply */ 743 shuttle_swtch(&door_knob); 744 mutex_enter(&door_knob); 745 lwp->lwp_asleep = 0; 746 goto shuttle_return; 747 } else if (ct->d_error == DOOR_EXIT) { 748 /* Server exit */ 749 error = EINTR; 750 } else { 751 /* Server did a door_return during ISSIG */ 752 error = ct->d_error; 753 } 754 } 755 /* 756 * Can't exit if the server is currently copying 757 * results for me. 758 */ 759 while (DOOR_T_HELD(ct)) 760 cv_wait(&ct->d_cv, &door_knob); 761 762 /* 763 * Find out if results were successfully copied. 764 */ 765 if (ct->d_error == 0) 766 gotresults = 1; 767 } 768 lwp->lwp_asleep = 0; /* /proc */ 769 lwp->lwp_sysabort = 0; /* /proc */ 770 if (--dp->door_active == 0 && (dp->door_flags & DOOR_DELAY)) 771 door_deliver_unref(dp); 772 mutex_exit(&door_knob); 773 774 results: 775 /* 776 * Move the results to userland (if any) 777 */ 778 779 if (ct->d_noresults) 780 goto out; 781 782 if (error) { 783 /* 784 * If server returned results successfully, then we've 785 * been interrupted and may need to clean up. 786 */ 787 if (gotresults) { 788 ASSERT(error == EINTR); 789 door_fp_close(ct->d_fpp, ct->d_args.desc_num); 790 } 791 goto out; 792 } 793 794 /* 795 * Copy back data if we haven't caused an overflow (already 796 * handled) and we are using a 2 copy transfer, or we are 797 * returning data from a kernel server. 798 */ 799 if (ct->d_args.data_size) { 800 ct->d_args.data_ptr = ct->d_args.rbuf; 801 if (ct->d_kernel || (!ct->d_overflow && 802 ct->d_args.data_size <= door_max_arg)) { 803 if (copyout(ct->d_buf, ct->d_args.rbuf, 804 ct->d_args.data_size)) { 805 door_fp_close(ct->d_fpp, ct->d_args.desc_num); 806 error = EFAULT; 807 goto out; 808 } 809 } 810 } 811 812 /* 813 * stuff returned doors into our proc, copyout the descriptors 814 */ 815 if (ct->d_args.desc_num) { 816 struct file **fpp; 817 door_desc_t *didpp; 818 uint_t n = ct->d_args.desc_num; 819 820 dsize = n * sizeof (door_desc_t); 821 start = didpp = kmem_alloc(dsize, KM_SLEEP); 822 fpp = ct->d_fpp; 823 824 while (n--) { 825 if (door_insert(*fpp, didpp) == -1) { 826 /* Close remaining files */ 827 door_fp_close(fpp, n + 1); 828 error = EMFILE; 829 goto out; 830 } 831 fpp++; didpp++; ncopied++; 832 } 833 834 ct->d_args.desc_ptr = (door_desc_t *)(ct->d_args.rbuf + 835 roundup(ct->d_args.data_size, sizeof (door_desc_t))); 836 837 if (copyout(start, ct->d_args.desc_ptr, dsize)) { 838 error = EFAULT; 839 goto out; 840 } 841 } 842 843 /* 844 * Return the results 845 */ 846 if (datamodel == DATAMODEL_NATIVE) { 847 if (copyout(&ct->d_args, args, sizeof (door_arg_t)) != 0) 848 error = EFAULT; 849 } else { 850 door_arg32_t da32; 851 852 da32.data_ptr = (caddr32_t)(uintptr_t)ct->d_args.data_ptr; 853 da32.data_size = ct->d_args.data_size; 854 da32.desc_ptr = (caddr32_t)(uintptr_t)ct->d_args.desc_ptr; 855 da32.desc_num = ct->d_args.desc_num; 856 da32.rbuf = (caddr32_t)(uintptr_t)ct->d_args.rbuf; 857 da32.rsize = ct->d_args.rsize; 858 if (copyout(&da32, args, sizeof (door_arg32_t)) != 0) { 859 error = EFAULT; 860 } 861 } 862 863 out: 864 ct->d_noresults = 0; 865 866 /* clean up the overflow buffer if an error occurred */ 867 if (error != 0 && ct->d_overflow) { 868 (void) as_unmap(curproc->p_as, ct->d_args.rbuf, 869 ct->d_args.rsize); 870 } 871 ct->d_overflow = 0; 872 873 /* call destructor */ 874 if (destfn) { 875 ASSERT(ct->d_kernel); 876 (*destfn)(dp->door_data, destarg); 877 ct->d_buf = NULL; 878 ct->d_bufsize = 0; 879 } 880 881 if (dp) 882 releasef(did); 883 884 if (ct->d_buf) { 885 ASSERT(!ct->d_kernel); 886 kmem_free(ct->d_buf, ct->d_bufsize); 887 ct->d_buf = NULL; 888 ct->d_bufsize = 0; 889 } 890 ct->d_kernel = 0; 891 892 /* clean up the descriptor copyout buffer */ 893 if (start != NULL) { 894 if (error != 0) 895 door_fd_close(start, ncopied); 896 kmem_free(start, dsize); 897 } 898 899 if (ct->d_fpp) { 900 kmem_free(ct->d_fpp, ct->d_fpp_size); 901 ct->d_fpp = NULL; 902 ct->d_fpp_size = 0; 903 } 904 905 if (error) 906 return (set_errno(error)); 907 908 return (0); 909 } 910 911 static int 912 door_setparam_common(door_node_t *dp, int from_kernel, int type, size_t val) 913 { 914 int error = 0; 915 916 mutex_enter(&door_knob); 917 918 if (DOOR_INVALID(dp)) { 919 mutex_exit(&door_knob); 920 return (EBADF); 921 } 922 923 /* 924 * door_ki_setparam() can only affect kernel doors. 925 * door_setparam() can only affect doors attached to the current 926 * process. 927 */ 928 if ((from_kernel && dp->door_target != &p0) || 929 (!from_kernel && dp->door_target != curproc)) { 930 mutex_exit(&door_knob); 931 return (EPERM); 932 } 933 934 switch (type) { 935 case DOOR_PARAM_DESC_MAX: 936 if (val > INT_MAX) 937 error = ERANGE; 938 else if ((dp->door_flags & DOOR_REFUSE_DESC) && val != 0) 939 error = ENOTSUP; 940 else 941 dp->door_desc_max = (uint_t)val; 942 break; 943 944 case DOOR_PARAM_DATA_MIN: 945 if (val > dp->door_data_max) 946 error = EINVAL; 947 else 948 dp->door_data_min = val; 949 break; 950 951 case DOOR_PARAM_DATA_MAX: 952 if (val < dp->door_data_min) 953 error = EINVAL; 954 else 955 dp->door_data_max = val; 956 break; 957 958 default: 959 error = EINVAL; 960 break; 961 } 962 963 mutex_exit(&door_knob); 964 return (error); 965 } 966 967 static int 968 door_getparam_common(door_node_t *dp, int type, size_t *out) 969 { 970 int error = 0; 971 972 mutex_enter(&door_knob); 973 switch (type) { 974 case DOOR_PARAM_DESC_MAX: 975 *out = (size_t)dp->door_desc_max; 976 break; 977 case DOOR_PARAM_DATA_MIN: 978 *out = dp->door_data_min; 979 break; 980 case DOOR_PARAM_DATA_MAX: 981 *out = dp->door_data_max; 982 break; 983 default: 984 error = EINVAL; 985 break; 986 } 987 mutex_exit(&door_knob); 988 return (error); 989 } 990 991 int 992 door_setparam(int did, int type, size_t val) 993 { 994 door_node_t *dp; 995 int error = 0; 996 997 if ((dp = door_lookup(did, NULL)) == NULL) 998 return (set_errno(EBADF)); 999 1000 error = door_setparam_common(dp, 0, type, val); 1001 1002 releasef(did); 1003 1004 if (error) 1005 return (set_errno(error)); 1006 1007 return (0); 1008 } 1009 1010 int 1011 door_getparam(int did, int type, size_t *out) 1012 { 1013 door_node_t *dp; 1014 size_t val = 0; 1015 int error = 0; 1016 1017 if ((dp = door_lookup(did, NULL)) == NULL) 1018 return (set_errno(EBADF)); 1019 1020 error = door_getparam_common(dp, type, &val); 1021 1022 releasef(did); 1023 1024 if (error) 1025 return (set_errno(error)); 1026 1027 if (get_udatamodel() == DATAMODEL_NATIVE) { 1028 if (copyout(&val, out, sizeof (val))) 1029 return (set_errno(EFAULT)); 1030 #ifdef _SYSCALL32_IMPL 1031 } else { 1032 size32_t val32 = (size32_t)val; 1033 1034 if (val != val32) 1035 return (set_errno(EOVERFLOW)); 1036 1037 if (copyout(&val32, out, sizeof (val32))) 1038 return (set_errno(EFAULT)); 1039 #endif /* _SYSCALL32_IMPL */ 1040 } 1041 1042 return (0); 1043 } 1044 1045 /* 1046 * A copyout() which proceeds from high addresses to low addresses. This way, 1047 * stack guard pages are effective. 1048 */ 1049 static int 1050 door_stack_copyout(const void *kaddr, void *uaddr, size_t count) 1051 { 1052 const char *kbase = (const char *)kaddr; 1053 uintptr_t ubase = (uintptr_t)uaddr; 1054 size_t pgsize = PAGESIZE; 1055 1056 if (count <= pgsize) 1057 return (copyout(kaddr, uaddr, count)); 1058 1059 while (count > 0) { 1060 uintptr_t start, end, offset, amount; 1061 1062 end = ubase + count; 1063 start = P2ALIGN(end - 1, pgsize); 1064 if (P2ALIGN(ubase, pgsize) == start) 1065 start = ubase; 1066 1067 offset = start - ubase; 1068 amount = end - start; 1069 1070 ASSERT(amount > 0 && amount <= count && amount <= pgsize); 1071 1072 if (copyout(kbase + offset, (void *)start, amount)) 1073 return (1); 1074 count -= amount; 1075 } 1076 return (0); 1077 } 1078 1079 /* 1080 * Writes the stack layout for door_return() into the door_server_t of the 1081 * server thread. 1082 */ 1083 static int 1084 door_layout(kthread_t *tp, size_t data_size, uint_t ndesc, int info_needed) 1085 { 1086 door_server_t *st = DOOR_SERVER(tp->t_door); 1087 door_layout_t *out = &st->d_layout; 1088 uintptr_t base_sp = (uintptr_t)st->d_sp; 1089 size_t ssize = st->d_ssize; 1090 size_t descsz; 1091 uintptr_t descp, datap, infop, resultsp, finalsp; 1092 size_t align = STACK_ALIGN; 1093 size_t results_sz = sizeof (struct door_results); 1094 model_t datamodel = lwp_getdatamodel(ttolwp(tp)); 1095 1096 ASSERT(!st->d_layout_done); 1097 1098 #ifndef _STACK_GROWS_DOWNWARD 1099 #error stack does not grow downward, door_layout() must change 1100 #endif 1101 1102 #ifdef _SYSCALL32_IMPL 1103 if (datamodel != DATAMODEL_NATIVE) { 1104 align = STACK_ALIGN32; 1105 results_sz = sizeof (struct door_results32); 1106 } 1107 #endif 1108 1109 descsz = ndesc * sizeof (door_desc_t); 1110 1111 /* 1112 * To speed up the overflow checking, we do an initial check 1113 * that the passed in data size won't cause us to wrap past 1114 * base_sp. Since door_max_desc limits descsz, we can 1115 * safely use it here. 65535 is an arbitrary 'bigger than 1116 * we need, small enough to not cause trouble' constant; 1117 * the only constraint is that it must be > than: 1118 * 1119 * 5 * STACK_ALIGN + 1120 * sizeof (door_info_t) + 1121 * sizeof (door_results_t) + 1122 * (max adjustment from door_final_sp()) 1123 * 1124 * After we compute the layout, we can safely do a "did we wrap 1125 * around" check, followed by a check against the recorded 1126 * stack size. 1127 */ 1128 if (data_size >= SIZE_MAX - (size_t)65535UL - descsz) 1129 return (E2BIG); /* overflow */ 1130 1131 descp = P2ALIGN(base_sp - descsz, align); 1132 datap = P2ALIGN(descp - data_size, align); 1133 1134 if (info_needed) 1135 infop = P2ALIGN(datap - sizeof (door_info_t), align); 1136 else 1137 infop = datap; 1138 1139 resultsp = P2ALIGN(infop - results_sz, align); 1140 finalsp = door_final_sp(resultsp, align, datamodel); 1141 1142 if (finalsp > base_sp) 1143 return (E2BIG); /* overflow */ 1144 1145 if (ssize != 0 && (base_sp - finalsp) > ssize) 1146 return (E2BIG); /* doesn't fit in stack */ 1147 1148 out->dl_descp = (ndesc != 0)? (caddr_t)descp : 0; 1149 out->dl_datap = (data_size != 0)? (caddr_t)datap : 0; 1150 out->dl_infop = info_needed? (caddr_t)infop : 0; 1151 out->dl_resultsp = (caddr_t)resultsp; 1152 out->dl_sp = (caddr_t)finalsp; 1153 1154 st->d_layout_done = 1; 1155 return (0); 1156 } 1157 1158 static int 1159 door_server_dispatch(door_client_t *ct, door_node_t *dp) 1160 { 1161 door_server_t *st = DOOR_SERVER(curthread->t_door); 1162 door_layout_t *layout = &st->d_layout; 1163 int error = 0; 1164 1165 int is_private = (dp->door_flags & DOOR_PRIVATE); 1166 1167 door_pool_t *pool = (is_private)? &dp->door_servers : 1168 &curproc->p_server_threads; 1169 1170 int empty_pool = (pool->dp_threads == NULL); 1171 1172 caddr_t infop = NULL; 1173 char *datap = NULL; 1174 size_t datasize = 0; 1175 size_t descsize; 1176 1177 file_t **fpp = ct->d_fpp; 1178 door_desc_t *start = NULL; 1179 uint_t ndesc = 0; 1180 uint_t ncopied = 0; 1181 1182 if (ct != NULL) { 1183 datap = ct->d_args.data_ptr; 1184 datasize = ct->d_args.data_size; 1185 ndesc = ct->d_args.desc_num; 1186 } 1187 1188 descsize = ndesc * sizeof (door_desc_t); 1189 1190 /* 1191 * Reset datap to NULL if we aren't passing any data. Be careful 1192 * to let unref notifications through, though. 1193 */ 1194 if (datap == DOOR_UNREF_DATA) { 1195 if (ct->d_upcall) 1196 datasize = 0; 1197 else 1198 datap = NULL; 1199 } else if (datasize == 0) { 1200 datap = NULL; 1201 } 1202 1203 /* 1204 * Get the stack layout, if it hasn't already been done. 1205 */ 1206 if (!st->d_layout_done) { 1207 error = door_layout(curthread, datasize, ndesc, 1208 (is_private && empty_pool)); 1209 if (error != 0) 1210 goto fail; 1211 } 1212 1213 /* 1214 * fill out the stack, starting from the top. Layout was already 1215 * filled in by door_args() or door_translate_out(). 1216 */ 1217 if (layout->dl_descp != NULL) { 1218 ASSERT(ndesc != 0); 1219 start = kmem_alloc(descsize, KM_SLEEP); 1220 1221 while (ndesc > 0) { 1222 if (door_insert(*fpp, &start[ncopied]) == -1) { 1223 error = EMFILE; 1224 goto fail; 1225 } 1226 ndesc--; 1227 ncopied++; 1228 fpp++; 1229 } 1230 if (door_stack_copyout(start, layout->dl_descp, descsize)) { 1231 error = E2BIG; 1232 goto fail; 1233 } 1234 } 1235 fpp = NULL; /* finished processing */ 1236 1237 if (layout->dl_datap != NULL) { 1238 ASSERT(datasize != 0); 1239 datap = layout->dl_datap; 1240 if (ct->d_upcall || datasize <= door_max_arg) { 1241 if (door_stack_copyout(ct->d_buf, datap, datasize)) { 1242 error = E2BIG; 1243 goto fail; 1244 } 1245 } 1246 } 1247 1248 if (is_private && empty_pool) { 1249 door_info_t di; 1250 1251 infop = layout->dl_infop; 1252 ASSERT(infop != NULL); 1253 1254 di.di_target = curproc->p_pid; 1255 di.di_proc = (door_ptr_t)(uintptr_t)dp->door_pc; 1256 di.di_data = (door_ptr_t)(uintptr_t)dp->door_data; 1257 di.di_uniquifier = dp->door_index; 1258 di.di_attributes = (dp->door_flags & DOOR_ATTR_MASK) | 1259 DOOR_LOCAL; 1260 1261 if (copyout(&di, infop, sizeof (di))) { 1262 error = E2BIG; 1263 goto fail; 1264 } 1265 } 1266 1267 if (get_udatamodel() == DATAMODEL_NATIVE) { 1268 struct door_results dr; 1269 1270 dr.cookie = dp->door_data; 1271 dr.data_ptr = datap; 1272 dr.data_size = datasize; 1273 dr.desc_ptr = (door_desc_t *)layout->dl_descp; 1274 dr.desc_num = ncopied; 1275 dr.pc = dp->door_pc; 1276 dr.nservers = !empty_pool; 1277 dr.door_info = (door_info_t *)infop; 1278 1279 if (copyout(&dr, layout->dl_resultsp, sizeof (dr))) { 1280 error = E2BIG; 1281 goto fail; 1282 } 1283 #ifdef _SYSCALL32_IMPL 1284 } else { 1285 struct door_results32 dr32; 1286 1287 dr32.cookie = (caddr32_t)(uintptr_t)dp->door_data; 1288 dr32.data_ptr = (caddr32_t)(uintptr_t)datap; 1289 dr32.data_size = (size32_t)datasize; 1290 dr32.desc_ptr = (caddr32_t)(uintptr_t)layout->dl_descp; 1291 dr32.desc_num = ncopied; 1292 dr32.pc = (caddr32_t)(uintptr_t)dp->door_pc; 1293 dr32.nservers = !empty_pool; 1294 dr32.door_info = (caddr32_t)(uintptr_t)infop; 1295 1296 if (copyout(&dr32, layout->dl_resultsp, sizeof (dr32))) { 1297 error = E2BIG; 1298 goto fail; 1299 } 1300 #endif 1301 } 1302 1303 error = door_finish_dispatch(layout->dl_sp); 1304 fail: 1305 if (start != NULL) { 1306 if (error != 0) 1307 door_fd_close(start, ncopied); 1308 kmem_free(start, descsize); 1309 } 1310 if (fpp != NULL) 1311 door_fp_close(fpp, ndesc); 1312 1313 return (error); 1314 } 1315 1316 /* 1317 * Return the results (if any) to the caller (if any) and wait for the 1318 * next invocation on a door. 1319 */ 1320 int 1321 door_return(caddr_t data_ptr, size_t data_size, 1322 door_desc_t *desc_ptr, uint_t desc_num, caddr_t sp, size_t ssize) 1323 { 1324 kthread_t *caller; 1325 klwp_t *lwp; 1326 int error = 0; 1327 door_node_t *dp; 1328 door_server_t *st; /* curthread door_data */ 1329 door_client_t *ct; /* caller door_data */ 1330 int cancel_pending; 1331 1332 st = door_my_server(1); 1333 1334 /* 1335 * If thread was bound to a door that no longer exists, return 1336 * an error. This can happen if a thread is bound to a door 1337 * before the process calls forkall(); in the child, the door 1338 * doesn't exist and door_fork() sets the d_invbound flag. 1339 */ 1340 if (st->d_invbound) 1341 return (set_errno(EINVAL)); 1342 1343 st->d_sp = sp; /* Save base of stack. */ 1344 st->d_ssize = ssize; /* and its size */ 1345 1346 /* 1347 * before we release our stack to the whims of our next caller, 1348 * copy in the syscall arguments if we're being traced by /proc. 1349 */ 1350 if (curthread->t_post_sys && PTOU(ttoproc(curthread))->u_systrap) 1351 (void) save_syscall_args(); 1352 1353 /* Make sure the caller hasn't gone away */ 1354 mutex_enter(&door_knob); 1355 if ((caller = st->d_caller) == NULL || caller->t_door == NULL) { 1356 if (desc_num != 0) { 1357 /* close any DOOR_RELEASE descriptors */ 1358 mutex_exit(&door_knob); 1359 error = door_release_fds(desc_ptr, desc_num); 1360 if (error) 1361 return (set_errno(error)); 1362 mutex_enter(&door_knob); 1363 } 1364 goto out; 1365 } 1366 ct = DOOR_CLIENT(caller->t_door); 1367 1368 ct->d_args.data_size = data_size; 1369 ct->d_args.desc_num = desc_num; 1370 /* 1371 * Transfer results, if any, to the client 1372 */ 1373 if (data_size != 0 || desc_num != 0) { 1374 /* 1375 * Prevent the client from exiting until we have finished 1376 * moving results. 1377 */ 1378 DOOR_T_HOLD(ct); 1379 mutex_exit(&door_knob); 1380 error = door_results(caller, data_ptr, data_size, 1381 desc_ptr, desc_num); 1382 mutex_enter(&door_knob); 1383 DOOR_T_RELEASE(ct); 1384 /* 1385 * Pass EOVERFLOW errors back to the client 1386 */ 1387 if (error && error != EOVERFLOW) { 1388 mutex_exit(&door_knob); 1389 return (set_errno(error)); 1390 } 1391 } 1392 out: 1393 /* Put ourselves on the available server thread list */ 1394 door_release_server(st->d_pool, curthread); 1395 1396 /* 1397 * Make sure the caller is still waiting to be resumed 1398 */ 1399 if (caller) { 1400 disp_lock_t *tlp; 1401 1402 thread_lock(caller); 1403 ct->d_error = error; /* Return any errors */ 1404 if (caller->t_state == TS_SLEEP && 1405 SOBJ_TYPE(caller->t_sobj_ops) == SOBJ_SHUTTLE) { 1406 cpu_t *cp = CPU; 1407 1408 tlp = caller->t_lockp; 1409 /* 1410 * Setting t_disp_queue prevents erroneous preemptions 1411 * if this thread is still in execution on another 1412 * processor 1413 */ 1414 caller->t_disp_queue = cp->cpu_disp; 1415 CL_ACTIVE(caller); 1416 /* 1417 * We are calling thread_onproc() instead of 1418 * THREAD_ONPROC() because compiler can reorder 1419 * the two stores of t_state and t_lockp in 1420 * THREAD_ONPROC(). 1421 */ 1422 thread_onproc(caller, cp); 1423 disp_lock_exit_high(tlp); 1424 shuttle_resume(caller, &door_knob); 1425 } else { 1426 /* May have been setrun or in stop state */ 1427 thread_unlock(caller); 1428 shuttle_swtch(&door_knob); 1429 } 1430 } else { 1431 shuttle_swtch(&door_knob); 1432 } 1433 1434 /* 1435 * We've sprung to life. Determine if we are part of a door 1436 * invocation, or just interrupted 1437 */ 1438 lwp = ttolwp(curthread); 1439 mutex_enter(&door_knob); 1440 if ((dp = st->d_active) != NULL) { 1441 /* 1442 * Normal door invocation. Return any error condition 1443 * encountered while trying to pass args to the server 1444 * thread. 1445 */ 1446 lwp->lwp_asleep = 0; 1447 /* 1448 * Prevent the caller from leaving us while we 1449 * are copying out the arguments from it's buffer. 1450 */ 1451 ASSERT(st->d_caller != NULL); 1452 ct = DOOR_CLIENT(st->d_caller->t_door); 1453 1454 DOOR_T_HOLD(ct); 1455 mutex_exit(&door_knob); 1456 error = door_server_dispatch(ct, dp); 1457 mutex_enter(&door_knob); 1458 DOOR_T_RELEASE(ct); 1459 1460 if (error) { 1461 caller = st->d_caller; 1462 if (caller) 1463 ct = DOOR_CLIENT(caller->t_door); 1464 else 1465 ct = NULL; 1466 goto out; 1467 } 1468 mutex_exit(&door_knob); 1469 return (0); 1470 } else { 1471 /* 1472 * We are not involved in a door_invocation. 1473 * Check for /proc related activity... 1474 */ 1475 st->d_caller = NULL; 1476 door_server_exit(curproc, curthread); 1477 mutex_exit(&door_knob); 1478 cancel_pending = 0; 1479 if (ISSIG(curthread, FORREAL) || lwp->lwp_sysabort || 1480 MUSTRETURN(curproc, curthread) || 1481 (cancel_pending = schedctl_cancel_pending()) != 0) { 1482 if (cancel_pending) 1483 schedctl_cancel_eintr(); 1484 lwp->lwp_asleep = 0; 1485 lwp->lwp_sysabort = 0; 1486 return (set_errno(EINTR)); 1487 } 1488 /* Go back and wait for another request */ 1489 lwp->lwp_asleep = 0; 1490 mutex_enter(&door_knob); 1491 caller = NULL; 1492 goto out; 1493 } 1494 } 1495 1496 /* 1497 * Revoke any future invocations on this door 1498 */ 1499 int 1500 door_revoke(int did) 1501 { 1502 door_node_t *d; 1503 int error; 1504 1505 if ((d = door_lookup(did, NULL)) == NULL) 1506 return (set_errno(EBADF)); 1507 1508 mutex_enter(&door_knob); 1509 if (d->door_target != curproc) { 1510 mutex_exit(&door_knob); 1511 releasef(did); 1512 return (set_errno(EPERM)); 1513 } 1514 d->door_flags |= DOOR_REVOKED; 1515 if (d->door_flags & DOOR_PRIVATE) 1516 cv_broadcast(&d->door_servers.dp_cv); 1517 else 1518 cv_broadcast(&curproc->p_server_threads.dp_cv); 1519 mutex_exit(&door_knob); 1520 releasef(did); 1521 /* Invalidate the descriptor */ 1522 if ((error = closeandsetf(did, NULL)) != 0) 1523 return (set_errno(error)); 1524 return (0); 1525 } 1526 1527 int 1528 door_info(int did, struct door_info *d_info) 1529 { 1530 door_node_t *dp; 1531 door_info_t di; 1532 door_server_t *st; 1533 file_t *fp = NULL; 1534 1535 if (did == DOOR_QUERY) { 1536 /* Get information on door current thread is bound to */ 1537 if ((st = door_my_server(0)) == NULL || 1538 (dp = st->d_pool) == NULL) 1539 /* Thread isn't bound to a door */ 1540 return (set_errno(EBADF)); 1541 } else if ((dp = door_lookup(did, &fp)) == NULL) { 1542 /* Not a door */ 1543 return (set_errno(EBADF)); 1544 } 1545 1546 door_info_common(dp, &di, fp); 1547 1548 if (did != DOOR_QUERY) 1549 releasef(did); 1550 1551 if (copyout(&di, d_info, sizeof (struct door_info))) 1552 return (set_errno(EFAULT)); 1553 return (0); 1554 } 1555 1556 /* 1557 * Common code for getting information about a door either via the 1558 * door_info system call or the door_ki_info kernel call. 1559 */ 1560 void 1561 door_info_common(door_node_t *dp, struct door_info *dip, file_t *fp) 1562 { 1563 int unref_count; 1564 1565 bzero(dip, sizeof (door_info_t)); 1566 1567 mutex_enter(&door_knob); 1568 if (dp->door_target == NULL) 1569 dip->di_target = -1; 1570 else 1571 dip->di_target = dp->door_target->p_pid; 1572 1573 dip->di_attributes = dp->door_flags & DOOR_ATTR_MASK; 1574 if (dp->door_target == curproc) 1575 dip->di_attributes |= DOOR_LOCAL; 1576 dip->di_proc = (door_ptr_t)(uintptr_t)dp->door_pc; 1577 dip->di_data = (door_ptr_t)(uintptr_t)dp->door_data; 1578 dip->di_uniquifier = dp->door_index; 1579 /* 1580 * If this door is in the middle of having an unreferenced 1581 * notification delivered, don't count the VN_HOLD by 1582 * door_deliver_unref in determining if it is unreferenced. 1583 * This handles the case where door_info is called from the 1584 * thread delivering the unref notification. 1585 */ 1586 if (dp->door_flags & DOOR_UNREF_ACTIVE) 1587 unref_count = 2; 1588 else 1589 unref_count = 1; 1590 mutex_exit(&door_knob); 1591 1592 if (fp == NULL) { 1593 /* 1594 * If this thread is bound to the door, then we can just 1595 * check the vnode; a ref count of 1 (or 2 if this is 1596 * handling an unref notification) means that the hold 1597 * from the door_bind is the only reference to the door 1598 * (no file descriptor refers to it). 1599 */ 1600 if (DTOV(dp)->v_count == unref_count) 1601 dip->di_attributes |= DOOR_IS_UNREF; 1602 } else { 1603 /* 1604 * If we're working from a file descriptor or door handle 1605 * we need to look at the file structure count. We don't 1606 * need to hold the vnode lock since this is just a snapshot. 1607 */ 1608 mutex_enter(&fp->f_tlock); 1609 if (fp->f_count == 1 && DTOV(dp)->v_count == unref_count) 1610 dip->di_attributes |= DOOR_IS_UNREF; 1611 mutex_exit(&fp->f_tlock); 1612 } 1613 } 1614 1615 /* 1616 * Return credentials of the door caller (if any) for this invocation 1617 */ 1618 int 1619 door_ucred(struct ucred_s *uch) 1620 { 1621 kthread_t *caller; 1622 door_server_t *st; 1623 door_client_t *ct; 1624 struct proc *p; 1625 struct ucred_s *res; 1626 int err; 1627 1628 mutex_enter(&door_knob); 1629 if ((st = door_my_server(0)) == NULL || 1630 (caller = st->d_caller) == NULL) { 1631 mutex_exit(&door_knob); 1632 return (set_errno(EINVAL)); 1633 } 1634 1635 ASSERT(caller->t_door != NULL); 1636 ct = DOOR_CLIENT(caller->t_door); 1637 1638 /* Prevent caller from exiting while we examine the cred */ 1639 DOOR_T_HOLD(ct); 1640 mutex_exit(&door_knob); 1641 1642 p = ttoproc(caller); 1643 1644 /* 1645 * If the credentials are not specified by the client, get the one 1646 * associated with the calling process. 1647 */ 1648 if (ct->d_cred == NULL) { 1649 res = pgetucred(p); 1650 } else { 1651 res = cred2ucred(ct->d_cred, ct->d_upcall ? 1652 p0.p_pid : p->p_pid, NULL, CRED()); 1653 } 1654 1655 mutex_enter(&door_knob); 1656 DOOR_T_RELEASE(ct); 1657 mutex_exit(&door_knob); 1658 1659 err = copyout(res, uch, res->uc_size); 1660 1661 kmem_free(res, res->uc_size); 1662 1663 if (err != 0) 1664 return (set_errno(EFAULT)); 1665 1666 return (0); 1667 } 1668 1669 /* 1670 * Bind the current lwp to the server thread pool associated with 'did' 1671 */ 1672 int 1673 door_bind(int did) 1674 { 1675 door_node_t *dp; 1676 door_server_t *st; 1677 1678 if ((dp = door_lookup(did, NULL)) == NULL) { 1679 /* Not a door */ 1680 return (set_errno(EBADF)); 1681 } 1682 1683 /* 1684 * Can't bind to a non-private door, and can't bind to a door 1685 * served by another process. 1686 */ 1687 if ((dp->door_flags & DOOR_PRIVATE) == 0 || 1688 dp->door_target != curproc) { 1689 releasef(did); 1690 return (set_errno(EINVAL)); 1691 } 1692 1693 st = door_my_server(1); 1694 if (st->d_pool) 1695 door_unbind_thread(st->d_pool); 1696 st->d_pool = dp; 1697 st->d_invbound = 0; 1698 door_bind_thread(dp); 1699 releasef(did); 1700 1701 return (0); 1702 } 1703 1704 /* 1705 * Unbind the current lwp from it's server thread pool 1706 */ 1707 int 1708 door_unbind(void) 1709 { 1710 door_server_t *st; 1711 1712 if ((st = door_my_server(0)) == NULL) 1713 return (set_errno(EBADF)); 1714 1715 if (st->d_invbound) { 1716 ASSERT(st->d_pool == NULL); 1717 st->d_invbound = 0; 1718 return (0); 1719 } 1720 if (st->d_pool == NULL) 1721 return (set_errno(EBADF)); 1722 door_unbind_thread(st->d_pool); 1723 st->d_pool = NULL; 1724 return (0); 1725 } 1726 1727 /* 1728 * Create a descriptor for the associated file and fill in the 1729 * attributes associated with it. 1730 * 1731 * Return 0 for success, -1 otherwise; 1732 */ 1733 int 1734 door_insert(struct file *fp, door_desc_t *dp) 1735 { 1736 struct vnode *vp; 1737 int fd; 1738 door_attr_t attributes = DOOR_DESCRIPTOR; 1739 1740 ASSERT(MUTEX_NOT_HELD(&door_knob)); 1741 if ((fd = ufalloc(0)) == -1) 1742 return (-1); 1743 setf(fd, fp); 1744 dp->d_data.d_desc.d_descriptor = fd; 1745 1746 /* Fill in the attributes */ 1747 if (VOP_REALVP(fp->f_vnode, &vp, NULL)) 1748 vp = fp->f_vnode; 1749 if (vp && vp->v_type == VDOOR) { 1750 if (VTOD(vp)->door_target == curproc) 1751 attributes |= DOOR_LOCAL; 1752 attributes |= VTOD(vp)->door_flags & DOOR_ATTR_MASK; 1753 dp->d_data.d_desc.d_id = VTOD(vp)->door_index; 1754 } 1755 dp->d_attributes = attributes; 1756 return (0); 1757 } 1758 1759 /* 1760 * Return an available thread for this server. A NULL return value indicates 1761 * that either: 1762 * The door has been revoked, or 1763 * a signal was received. 1764 * The two conditions can be differentiated using DOOR_INVALID(dp). 1765 */ 1766 static kthread_t * 1767 door_get_server(door_node_t *dp) 1768 { 1769 kthread_t **ktp; 1770 kthread_t *server_t; 1771 door_pool_t *pool; 1772 door_server_t *st; 1773 int signalled; 1774 1775 disp_lock_t *tlp; 1776 cpu_t *cp; 1777 1778 ASSERT(MUTEX_HELD(&door_knob)); 1779 1780 if (dp->door_flags & DOOR_PRIVATE) 1781 pool = &dp->door_servers; 1782 else 1783 pool = &dp->door_target->p_server_threads; 1784 1785 for (;;) { 1786 /* 1787 * We search the thread pool, looking for a server thread 1788 * ready to take an invocation (i.e. one which is still 1789 * sleeping on a shuttle object). If none are available, 1790 * we sleep on the pool's CV, and will be signaled when a 1791 * thread is added to the pool. 1792 * 1793 * This relies on the fact that once a thread in the thread 1794 * pool wakes up, it *must* remove and add itself to the pool 1795 * before it can receive door calls. 1796 */ 1797 if (DOOR_INVALID(dp)) 1798 return (NULL); /* Target has become invalid */ 1799 1800 for (ktp = &pool->dp_threads; 1801 (server_t = *ktp) != NULL; 1802 ktp = &st->d_servers) { 1803 st = DOOR_SERVER(server_t->t_door); 1804 1805 thread_lock(server_t); 1806 if (server_t->t_state == TS_SLEEP && 1807 SOBJ_TYPE(server_t->t_sobj_ops) == SOBJ_SHUTTLE) 1808 break; 1809 thread_unlock(server_t); 1810 } 1811 if (server_t != NULL) 1812 break; /* we've got a live one! */ 1813 1814 if (!cv_wait_sig_swap_core(&pool->dp_cv, &door_knob, 1815 &signalled)) { 1816 /* 1817 * If we were signaled and the door is still 1818 * valid, pass the signal on to another waiter. 1819 */ 1820 if (signalled && !DOOR_INVALID(dp)) 1821 cv_signal(&pool->dp_cv); 1822 return (NULL); /* Got a signal */ 1823 } 1824 } 1825 1826 /* 1827 * We've got a thread_lock()ed thread which is still on the 1828 * shuttle. Take it off the list of available server threads 1829 * and mark it as ONPROC. We are committed to resuming this 1830 * thread now. 1831 */ 1832 tlp = server_t->t_lockp; 1833 cp = CPU; 1834 1835 *ktp = st->d_servers; 1836 st->d_servers = NULL; 1837 /* 1838 * Setting t_disp_queue prevents erroneous preemptions 1839 * if this thread is still in execution on another processor 1840 */ 1841 server_t->t_disp_queue = cp->cpu_disp; 1842 CL_ACTIVE(server_t); 1843 /* 1844 * We are calling thread_onproc() instead of 1845 * THREAD_ONPROC() because compiler can reorder 1846 * the two stores of t_state and t_lockp in 1847 * THREAD_ONPROC(). 1848 */ 1849 thread_onproc(server_t, cp); 1850 disp_lock_exit(tlp); 1851 return (server_t); 1852 } 1853 1854 /* 1855 * Put a server thread back in the pool. 1856 */ 1857 static void 1858 door_release_server(door_node_t *dp, kthread_t *t) 1859 { 1860 door_server_t *st = DOOR_SERVER(t->t_door); 1861 door_pool_t *pool; 1862 1863 ASSERT(MUTEX_HELD(&door_knob)); 1864 st->d_active = NULL; 1865 st->d_caller = NULL; 1866 st->d_layout_done = 0; 1867 if (dp && (dp->door_flags & DOOR_PRIVATE)) { 1868 ASSERT(dp->door_target == NULL || 1869 dp->door_target == ttoproc(t)); 1870 pool = &dp->door_servers; 1871 } else { 1872 pool = &ttoproc(t)->p_server_threads; 1873 } 1874 1875 st->d_servers = pool->dp_threads; 1876 pool->dp_threads = t; 1877 1878 /* If someone is waiting for a server thread, wake him up */ 1879 cv_signal(&pool->dp_cv); 1880 } 1881 1882 /* 1883 * Remove a server thread from the pool if present. 1884 */ 1885 static void 1886 door_server_exit(proc_t *p, kthread_t *t) 1887 { 1888 door_pool_t *pool; 1889 kthread_t **next; 1890 door_server_t *st = DOOR_SERVER(t->t_door); 1891 1892 ASSERT(MUTEX_HELD(&door_knob)); 1893 if (st->d_pool != NULL) { 1894 ASSERT(st->d_pool->door_flags & DOOR_PRIVATE); 1895 pool = &st->d_pool->door_servers; 1896 } else { 1897 pool = &p->p_server_threads; 1898 } 1899 1900 next = &pool->dp_threads; 1901 while (*next != NULL) { 1902 if (*next == t) { 1903 *next = DOOR_SERVER(t->t_door)->d_servers; 1904 return; 1905 } 1906 next = &(DOOR_SERVER((*next)->t_door)->d_servers); 1907 } 1908 } 1909 1910 /* 1911 * Lookup the door descriptor. Caller must call releasef when finished 1912 * with associated door. 1913 */ 1914 static door_node_t * 1915 door_lookup(int did, file_t **fpp) 1916 { 1917 vnode_t *vp; 1918 file_t *fp; 1919 1920 ASSERT(MUTEX_NOT_HELD(&door_knob)); 1921 if ((fp = getf(did)) == NULL) 1922 return (NULL); 1923 /* 1924 * Use the underlying vnode (we may be namefs mounted) 1925 */ 1926 if (VOP_REALVP(fp->f_vnode, &vp, NULL)) 1927 vp = fp->f_vnode; 1928 1929 if (vp == NULL || vp->v_type != VDOOR) { 1930 releasef(did); 1931 return (NULL); 1932 } 1933 1934 if (fpp) 1935 *fpp = fp; 1936 1937 return (VTOD(vp)); 1938 } 1939 1940 /* 1941 * The current thread is exiting, so clean up any pending 1942 * invocation details 1943 */ 1944 void 1945 door_slam(void) 1946 { 1947 door_node_t *dp; 1948 door_data_t *dt; 1949 door_client_t *ct; 1950 door_server_t *st; 1951 1952 /* 1953 * If we are an active door server, notify our 1954 * client that we are exiting and revoke our door. 1955 */ 1956 if ((dt = door_my_data(0)) == NULL) 1957 return; 1958 ct = DOOR_CLIENT(dt); 1959 st = DOOR_SERVER(dt); 1960 1961 mutex_enter(&door_knob); 1962 for (;;) { 1963 if (DOOR_T_HELD(ct)) 1964 cv_wait(&ct->d_cv, &door_knob); 1965 else if (DOOR_T_HELD(st)) 1966 cv_wait(&st->d_cv, &door_knob); 1967 else 1968 break; /* neither flag is set */ 1969 } 1970 curthread->t_door = NULL; 1971 if ((dp = st->d_active) != NULL) { 1972 kthread_t *t = st->d_caller; 1973 proc_t *p = curproc; 1974 1975 /* Revoke our door if the process is exiting */ 1976 if (dp->door_target == p && (p->p_flag & SEXITING)) { 1977 door_list_delete(dp); 1978 dp->door_target = NULL; 1979 dp->door_flags |= DOOR_REVOKED; 1980 if (dp->door_flags & DOOR_PRIVATE) 1981 cv_broadcast(&dp->door_servers.dp_cv); 1982 else 1983 cv_broadcast(&p->p_server_threads.dp_cv); 1984 } 1985 1986 if (t != NULL) { 1987 /* 1988 * Let the caller know we are gone 1989 */ 1990 DOOR_CLIENT(t->t_door)->d_error = DOOR_EXIT; 1991 thread_lock(t); 1992 if (t->t_state == TS_SLEEP && 1993 SOBJ_TYPE(t->t_sobj_ops) == SOBJ_SHUTTLE) 1994 setrun_locked(t); 1995 thread_unlock(t); 1996 } 1997 } 1998 mutex_exit(&door_knob); 1999 if (st->d_pool) 2000 door_unbind_thread(st->d_pool); /* Implicit door_unbind */ 2001 kmem_free(dt, sizeof (door_data_t)); 2002 } 2003 2004 /* 2005 * Set DOOR_REVOKED for all doors of the current process. This is called 2006 * on exit before all lwp's are being terminated so that door calls will 2007 * return with an error. 2008 */ 2009 void 2010 door_revoke_all() 2011 { 2012 door_node_t *dp; 2013 proc_t *p = ttoproc(curthread); 2014 2015 mutex_enter(&door_knob); 2016 for (dp = p->p_door_list; dp != NULL; dp = dp->door_list) { 2017 ASSERT(dp->door_target == p); 2018 dp->door_flags |= DOOR_REVOKED; 2019 if (dp->door_flags & DOOR_PRIVATE) 2020 cv_broadcast(&dp->door_servers.dp_cv); 2021 } 2022 cv_broadcast(&p->p_server_threads.dp_cv); 2023 mutex_exit(&door_knob); 2024 } 2025 2026 /* 2027 * The process is exiting, and all doors it created need to be revoked. 2028 */ 2029 void 2030 door_exit(void) 2031 { 2032 door_node_t *dp; 2033 proc_t *p = ttoproc(curthread); 2034 2035 ASSERT(p->p_lwpcnt == 1); 2036 /* 2037 * Walk the list of active doors created by this process and 2038 * revoke them all. 2039 */ 2040 mutex_enter(&door_knob); 2041 for (dp = p->p_door_list; dp != NULL; dp = dp->door_list) { 2042 dp->door_target = NULL; 2043 dp->door_flags |= DOOR_REVOKED; 2044 if (dp->door_flags & DOOR_PRIVATE) 2045 cv_broadcast(&dp->door_servers.dp_cv); 2046 } 2047 cv_broadcast(&p->p_server_threads.dp_cv); 2048 /* Clear the list */ 2049 p->p_door_list = NULL; 2050 2051 /* Clean up the unref list */ 2052 while ((dp = p->p_unref_list) != NULL) { 2053 p->p_unref_list = dp->door_ulist; 2054 dp->door_ulist = NULL; 2055 mutex_exit(&door_knob); 2056 VN_RELE(DTOV(dp)); 2057 mutex_enter(&door_knob); 2058 } 2059 mutex_exit(&door_knob); 2060 } 2061 2062 2063 /* 2064 * The process is executing forkall(), and we need to flag threads that 2065 * are bound to a door in the child. This will make the child threads 2066 * return an error to door_return unless they call door_unbind first. 2067 */ 2068 void 2069 door_fork(kthread_t *parent, kthread_t *child) 2070 { 2071 door_data_t *pt = parent->t_door; 2072 door_server_t *st = DOOR_SERVER(pt); 2073 door_data_t *dt; 2074 2075 ASSERT(MUTEX_NOT_HELD(&door_knob)); 2076 if (pt != NULL && (st->d_pool != NULL || st->d_invbound)) { 2077 /* parent thread is bound to a door */ 2078 dt = child->t_door = 2079 kmem_zalloc(sizeof (door_data_t), KM_SLEEP); 2080 DOOR_SERVER(dt)->d_invbound = 1; 2081 } 2082 } 2083 2084 /* 2085 * Deliver queued unrefs to appropriate door server. 2086 */ 2087 static int 2088 door_unref(void) 2089 { 2090 door_node_t *dp; 2091 static door_arg_t unref_args = { DOOR_UNREF_DATA, 0, 0, 0, 0, 0 }; 2092 proc_t *p = ttoproc(curthread); 2093 2094 /* make sure there's only one unref thread per process */ 2095 mutex_enter(&door_knob); 2096 if (p->p_unref_thread) { 2097 mutex_exit(&door_knob); 2098 return (set_errno(EALREADY)); 2099 } 2100 p->p_unref_thread = 1; 2101 mutex_exit(&door_knob); 2102 2103 (void) door_my_data(1); /* create info, if necessary */ 2104 2105 for (;;) { 2106 mutex_enter(&door_knob); 2107 2108 /* Grab a queued request */ 2109 while ((dp = p->p_unref_list) == NULL) { 2110 if (!cv_wait_sig(&p->p_unref_cv, &door_knob)) { 2111 /* 2112 * Interrupted. 2113 * Return so we can finish forkall() or exit(). 2114 */ 2115 p->p_unref_thread = 0; 2116 mutex_exit(&door_knob); 2117 return (set_errno(EINTR)); 2118 } 2119 } 2120 p->p_unref_list = dp->door_ulist; 2121 dp->door_ulist = NULL; 2122 dp->door_flags |= DOOR_UNREF_ACTIVE; 2123 mutex_exit(&door_knob); 2124 2125 (void) door_upcall(DTOV(dp), &unref_args, NULL); 2126 2127 mutex_enter(&door_knob); 2128 ASSERT(dp->door_flags & DOOR_UNREF_ACTIVE); 2129 dp->door_flags &= ~DOOR_UNREF_ACTIVE; 2130 mutex_exit(&door_knob); 2131 VN_RELE(DTOV(dp)); 2132 } 2133 } 2134 2135 2136 /* 2137 * Deliver queued unrefs to kernel door server. 2138 */ 2139 /* ARGSUSED */ 2140 static void 2141 door_unref_kernel(caddr_t arg) 2142 { 2143 door_node_t *dp; 2144 static door_arg_t unref_args = { DOOR_UNREF_DATA, 0, 0, 0, 0, 0 }; 2145 proc_t *p = ttoproc(curthread); 2146 callb_cpr_t cprinfo; 2147 2148 /* should only be one of these */ 2149 mutex_enter(&door_knob); 2150 if (p->p_unref_thread) { 2151 mutex_exit(&door_knob); 2152 return; 2153 } 2154 p->p_unref_thread = 1; 2155 mutex_exit(&door_knob); 2156 2157 (void) door_my_data(1); /* make sure we have a door_data_t */ 2158 2159 CALLB_CPR_INIT(&cprinfo, &door_knob, callb_generic_cpr, "door_unref"); 2160 for (;;) { 2161 mutex_enter(&door_knob); 2162 /* Grab a queued request */ 2163 while ((dp = p->p_unref_list) == NULL) { 2164 CALLB_CPR_SAFE_BEGIN(&cprinfo); 2165 cv_wait(&p->p_unref_cv, &door_knob); 2166 CALLB_CPR_SAFE_END(&cprinfo, &door_knob); 2167 } 2168 p->p_unref_list = dp->door_ulist; 2169 dp->door_ulist = NULL; 2170 dp->door_flags |= DOOR_UNREF_ACTIVE; 2171 mutex_exit(&door_knob); 2172 2173 (*(dp->door_pc))(dp->door_data, &unref_args, NULL, NULL, NULL); 2174 2175 mutex_enter(&door_knob); 2176 ASSERT(dp->door_flags & DOOR_UNREF_ACTIVE); 2177 dp->door_flags &= ~DOOR_UNREF_ACTIVE; 2178 mutex_exit(&door_knob); 2179 VN_RELE(DTOV(dp)); 2180 } 2181 } 2182 2183 2184 /* 2185 * Queue an unref invocation for processing for the current process 2186 * The door may or may not be revoked at this point. 2187 */ 2188 void 2189 door_deliver_unref(door_node_t *d) 2190 { 2191 struct proc *server = d->door_target; 2192 2193 ASSERT(MUTEX_HELD(&door_knob)); 2194 ASSERT(d->door_active == 0); 2195 2196 if (server == NULL) 2197 return; 2198 /* 2199 * Create a lwp to deliver unref calls if one isn't already running. 2200 * 2201 * A separate thread is used to deliver unrefs since the current 2202 * thread may be holding resources (e.g. locks) in user land that 2203 * may be needed by the unref processing. This would cause a 2204 * deadlock. 2205 */ 2206 if (d->door_flags & DOOR_UNREF_MULTI) { 2207 /* multiple unrefs */ 2208 d->door_flags &= ~DOOR_DELAY; 2209 } else { 2210 /* Only 1 unref per door */ 2211 d->door_flags &= ~(DOOR_UNREF|DOOR_DELAY); 2212 } 2213 mutex_exit(&door_knob); 2214 2215 /* 2216 * Need to bump the vnode count before putting the door on the 2217 * list so it doesn't get prematurely released by door_unref. 2218 */ 2219 VN_HOLD(DTOV(d)); 2220 2221 mutex_enter(&door_knob); 2222 /* is this door already on the unref list? */ 2223 if (d->door_flags & DOOR_UNREF_MULTI) { 2224 door_node_t *dp; 2225 for (dp = server->p_unref_list; dp != NULL; 2226 dp = dp->door_ulist) { 2227 if (d == dp) { 2228 /* already there, don't need to add another */ 2229 mutex_exit(&door_knob); 2230 VN_RELE(DTOV(d)); 2231 mutex_enter(&door_knob); 2232 return; 2233 } 2234 } 2235 } 2236 ASSERT(d->door_ulist == NULL); 2237 d->door_ulist = server->p_unref_list; 2238 server->p_unref_list = d; 2239 cv_broadcast(&server->p_unref_cv); 2240 } 2241 2242 /* 2243 * The callers buffer isn't big enough for all of the data/fd's. Allocate 2244 * space in the callers address space for the results and copy the data 2245 * there. 2246 * 2247 * For EOVERFLOW, we must clean up the server's door descriptors. 2248 */ 2249 static int 2250 door_overflow( 2251 kthread_t *caller, 2252 caddr_t data_ptr, /* data location */ 2253 size_t data_size, /* data size */ 2254 door_desc_t *desc_ptr, /* descriptor location */ 2255 uint_t desc_num) /* descriptor size */ 2256 { 2257 proc_t *callerp = ttoproc(caller); 2258 struct as *as = callerp->p_as; 2259 door_client_t *ct = DOOR_CLIENT(caller->t_door); 2260 caddr_t addr; /* Resulting address in target */ 2261 size_t rlen; /* Rounded len */ 2262 size_t len; 2263 uint_t i; 2264 size_t ds = desc_num * sizeof (door_desc_t); 2265 2266 ASSERT(MUTEX_NOT_HELD(&door_knob)); 2267 ASSERT(DOOR_T_HELD(ct) || ct->d_kernel); 2268 2269 /* Do initial overflow check */ 2270 if (!ufcanalloc(callerp, desc_num)) 2271 return (EMFILE); 2272 2273 /* 2274 * Allocate space for this stuff in the callers address space 2275 */ 2276 rlen = roundup(data_size + ds, PAGESIZE); 2277 as_rangelock(as); 2278 map_addr_proc(&addr, rlen, 0, 1, as->a_userlimit, ttoproc(caller), 0); 2279 if (addr == NULL || 2280 as_map(as, addr, rlen, segvn_create, zfod_argsp) != 0) { 2281 /* No virtual memory available, or anon mapping failed */ 2282 as_rangeunlock(as); 2283 if (!ct->d_kernel && desc_num > 0) { 2284 int error = door_release_fds(desc_ptr, desc_num); 2285 if (error) 2286 return (error); 2287 } 2288 return (EOVERFLOW); 2289 } 2290 as_rangeunlock(as); 2291 2292 if (ct->d_kernel) 2293 goto out; 2294 2295 if (data_size != 0) { 2296 caddr_t src = data_ptr; 2297 caddr_t saddr = addr; 2298 2299 /* Copy any data */ 2300 len = data_size; 2301 while (len != 0) { 2302 int amount; 2303 int error; 2304 2305 amount = len > PAGESIZE ? PAGESIZE : len; 2306 if ((error = door_copy(as, src, saddr, amount)) != 0) { 2307 (void) as_unmap(as, addr, rlen); 2308 return (error); 2309 } 2310 saddr += amount; 2311 src += amount; 2312 len -= amount; 2313 } 2314 } 2315 /* Copy any fd's */ 2316 if (desc_num != 0) { 2317 door_desc_t *didpp, *start; 2318 struct file **fpp; 2319 int fpp_size; 2320 2321 start = didpp = kmem_alloc(ds, KM_SLEEP); 2322 if (copyin(desc_ptr, didpp, ds)) { 2323 kmem_free(start, ds); 2324 (void) as_unmap(as, addr, rlen); 2325 return (EFAULT); 2326 } 2327 2328 fpp_size = desc_num * sizeof (struct file *); 2329 if (fpp_size > ct->d_fpp_size) { 2330 /* make more space */ 2331 if (ct->d_fpp_size) 2332 kmem_free(ct->d_fpp, ct->d_fpp_size); 2333 ct->d_fpp_size = fpp_size; 2334 ct->d_fpp = kmem_alloc(ct->d_fpp_size, KM_SLEEP); 2335 } 2336 fpp = ct->d_fpp; 2337 2338 for (i = 0; i < desc_num; i++) { 2339 struct file *fp; 2340 int fd = didpp->d_data.d_desc.d_descriptor; 2341 2342 if (!(didpp->d_attributes & DOOR_DESCRIPTOR) || 2343 (fp = getf(fd)) == NULL) { 2344 /* close translated references */ 2345 door_fp_close(ct->d_fpp, fpp - ct->d_fpp); 2346 /* close untranslated references */ 2347 door_fd_rele(didpp, desc_num - i, 0); 2348 kmem_free(start, ds); 2349 (void) as_unmap(as, addr, rlen); 2350 return (EINVAL); 2351 } 2352 mutex_enter(&fp->f_tlock); 2353 fp->f_count++; 2354 mutex_exit(&fp->f_tlock); 2355 2356 *fpp = fp; 2357 releasef(fd); 2358 2359 if (didpp->d_attributes & DOOR_RELEASE) { 2360 /* release passed reference */ 2361 (void) closeandsetf(fd, NULL); 2362 } 2363 2364 fpp++; didpp++; 2365 } 2366 kmem_free(start, ds); 2367 } 2368 2369 out: 2370 ct->d_overflow = 1; 2371 ct->d_args.rbuf = addr; 2372 ct->d_args.rsize = rlen; 2373 return (0); 2374 } 2375 2376 /* 2377 * Transfer arguments from the client to the server. 2378 */ 2379 static int 2380 door_args(kthread_t *server, int is_private) 2381 { 2382 door_server_t *st = DOOR_SERVER(server->t_door); 2383 door_client_t *ct = DOOR_CLIENT(curthread->t_door); 2384 uint_t ndid; 2385 size_t dsize; 2386 int error; 2387 2388 ASSERT(DOOR_T_HELD(st)); 2389 ASSERT(MUTEX_NOT_HELD(&door_knob)); 2390 2391 ndid = ct->d_args.desc_num; 2392 if (ndid > door_max_desc) 2393 return (E2BIG); 2394 2395 /* 2396 * Get the stack layout, and fail now if it won't fit. 2397 */ 2398 error = door_layout(server, ct->d_args.data_size, ndid, is_private); 2399 if (error != 0) 2400 return (error); 2401 2402 dsize = ndid * sizeof (door_desc_t); 2403 if (ct->d_args.data_size != 0) { 2404 if (ct->d_args.data_size <= door_max_arg) { 2405 /* 2406 * Use a 2 copy method for small amounts of data 2407 * 2408 * Allocate a little more than we need for the 2409 * args, in the hope that the results will fit 2410 * without having to reallocate a buffer 2411 */ 2412 ASSERT(ct->d_buf == NULL); 2413 ct->d_bufsize = roundup(ct->d_args.data_size, 2414 DOOR_ROUND); 2415 ct->d_buf = kmem_alloc(ct->d_bufsize, KM_SLEEP); 2416 if (copyin(ct->d_args.data_ptr, 2417 ct->d_buf, ct->d_args.data_size) != 0) { 2418 kmem_free(ct->d_buf, ct->d_bufsize); 2419 ct->d_buf = NULL; 2420 ct->d_bufsize = 0; 2421 return (EFAULT); 2422 } 2423 } else { 2424 struct as *as; 2425 caddr_t src; 2426 caddr_t dest; 2427 size_t len = ct->d_args.data_size; 2428 uintptr_t base; 2429 2430 /* 2431 * Use a 1 copy method 2432 */ 2433 as = ttoproc(server)->p_as; 2434 src = ct->d_args.data_ptr; 2435 2436 dest = st->d_layout.dl_datap; 2437 base = (uintptr_t)dest; 2438 2439 /* 2440 * Copy data directly into server. We proceed 2441 * downward from the top of the stack, to mimic 2442 * normal stack usage. This allows the guard page 2443 * to stop us before we corrupt anything. 2444 */ 2445 while (len != 0) { 2446 uintptr_t start; 2447 uintptr_t end; 2448 uintptr_t offset; 2449 size_t amount; 2450 2451 /* 2452 * Locate the next part to copy. 2453 */ 2454 end = base + len; 2455 start = P2ALIGN(end - 1, PAGESIZE); 2456 2457 /* 2458 * if we are on the final (first) page, fix 2459 * up the start position. 2460 */ 2461 if (P2ALIGN(base, PAGESIZE) == start) 2462 start = base; 2463 2464 offset = start - base; /* the copy offset */ 2465 amount = end - start; /* # bytes to copy */ 2466 2467 ASSERT(amount > 0 && amount <= len && 2468 amount <= PAGESIZE); 2469 2470 error = door_copy(as, src + offset, 2471 dest + offset, amount); 2472 if (error != 0) 2473 return (error); 2474 len -= amount; 2475 } 2476 } 2477 } 2478 /* 2479 * Copyin the door args and translate them into files 2480 */ 2481 if (ndid != 0) { 2482 door_desc_t *didpp; 2483 door_desc_t *start; 2484 struct file **fpp; 2485 2486 start = didpp = kmem_alloc(dsize, KM_SLEEP); 2487 2488 if (copyin(ct->d_args.desc_ptr, didpp, dsize)) { 2489 kmem_free(start, dsize); 2490 return (EFAULT); 2491 } 2492 ct->d_fpp_size = ndid * sizeof (struct file *); 2493 ct->d_fpp = kmem_alloc(ct->d_fpp_size, KM_SLEEP); 2494 fpp = ct->d_fpp; 2495 while (ndid--) { 2496 struct file *fp; 2497 int fd = didpp->d_data.d_desc.d_descriptor; 2498 2499 /* We only understand file descriptors as passed objs */ 2500 if (!(didpp->d_attributes & DOOR_DESCRIPTOR) || 2501 (fp = getf(fd)) == NULL) { 2502 /* close translated references */ 2503 door_fp_close(ct->d_fpp, fpp - ct->d_fpp); 2504 /* close untranslated references */ 2505 door_fd_rele(didpp, ndid + 1, 0); 2506 kmem_free(start, dsize); 2507 kmem_free(ct->d_fpp, ct->d_fpp_size); 2508 ct->d_fpp = NULL; 2509 ct->d_fpp_size = 0; 2510 return (EINVAL); 2511 } 2512 /* Hold the fp */ 2513 mutex_enter(&fp->f_tlock); 2514 fp->f_count++; 2515 mutex_exit(&fp->f_tlock); 2516 2517 *fpp = fp; 2518 releasef(fd); 2519 2520 if (didpp->d_attributes & DOOR_RELEASE) { 2521 /* release passed reference */ 2522 (void) closeandsetf(fd, NULL); 2523 } 2524 2525 fpp++; didpp++; 2526 } 2527 kmem_free(start, dsize); 2528 } 2529 return (0); 2530 } 2531 2532 /* 2533 * Transfer arguments from a user client to a kernel server. This copies in 2534 * descriptors and translates them into door handles. It doesn't touch the 2535 * other data, letting the kernel server deal with that (to avoid needing 2536 * to copy the data twice). 2537 */ 2538 static int 2539 door_translate_in(void) 2540 { 2541 door_client_t *ct = DOOR_CLIENT(curthread->t_door); 2542 uint_t ndid; 2543 2544 ASSERT(MUTEX_NOT_HELD(&door_knob)); 2545 ndid = ct->d_args.desc_num; 2546 if (ndid > door_max_desc) 2547 return (E2BIG); 2548 /* 2549 * Copyin the door args and translate them into door handles. 2550 */ 2551 if (ndid != 0) { 2552 door_desc_t *didpp; 2553 door_desc_t *start; 2554 size_t dsize = ndid * sizeof (door_desc_t); 2555 struct file *fp; 2556 2557 start = didpp = kmem_alloc(dsize, KM_SLEEP); 2558 2559 if (copyin(ct->d_args.desc_ptr, didpp, dsize)) { 2560 kmem_free(start, dsize); 2561 return (EFAULT); 2562 } 2563 while (ndid--) { 2564 vnode_t *vp; 2565 int fd = didpp->d_data.d_desc.d_descriptor; 2566 2567 /* 2568 * We only understand file descriptors as passed objs 2569 */ 2570 if ((didpp->d_attributes & DOOR_DESCRIPTOR) && 2571 (fp = getf(fd)) != NULL) { 2572 didpp->d_data.d_handle = FTODH(fp); 2573 /* Hold the door */ 2574 door_ki_hold(didpp->d_data.d_handle); 2575 2576 releasef(fd); 2577 2578 if (didpp->d_attributes & DOOR_RELEASE) { 2579 /* release passed reference */ 2580 (void) closeandsetf(fd, NULL); 2581 } 2582 2583 if (VOP_REALVP(fp->f_vnode, &vp, NULL)) 2584 vp = fp->f_vnode; 2585 2586 /* Set attributes */ 2587 didpp->d_attributes = DOOR_HANDLE | 2588 (VTOD(vp)->door_flags & DOOR_ATTR_MASK); 2589 } else { 2590 /* close translated references */ 2591 door_fd_close(start, didpp - start); 2592 /* close untranslated references */ 2593 door_fd_rele(didpp, ndid + 1, 0); 2594 kmem_free(start, dsize); 2595 return (EINVAL); 2596 } 2597 didpp++; 2598 } 2599 ct->d_args.desc_ptr = start; 2600 } 2601 return (0); 2602 } 2603 2604 /* 2605 * Translate door arguments from kernel to user. This copies the passed 2606 * door handles. It doesn't touch other data. It is used by door_upcall, 2607 * and for data returned by a door_call to a kernel server. 2608 */ 2609 static int 2610 door_translate_out(void) 2611 { 2612 door_client_t *ct = DOOR_CLIENT(curthread->t_door); 2613 uint_t ndid; 2614 2615 ASSERT(MUTEX_NOT_HELD(&door_knob)); 2616 ndid = ct->d_args.desc_num; 2617 if (ndid > door_max_desc) { 2618 door_fd_rele(ct->d_args.desc_ptr, ndid, 1); 2619 return (E2BIG); 2620 } 2621 /* 2622 * Translate the door args into files 2623 */ 2624 if (ndid != 0) { 2625 door_desc_t *didpp = ct->d_args.desc_ptr; 2626 struct file **fpp; 2627 2628 ct->d_fpp_size = ndid * sizeof (struct file *); 2629 fpp = ct->d_fpp = kmem_alloc(ct->d_fpp_size, KM_SLEEP); 2630 while (ndid--) { 2631 struct file *fp = NULL; 2632 int fd = -1; 2633 2634 /* 2635 * We understand file descriptors and door 2636 * handles as passed objs. 2637 */ 2638 if (didpp->d_attributes & DOOR_DESCRIPTOR) { 2639 fd = didpp->d_data.d_desc.d_descriptor; 2640 fp = getf(fd); 2641 } else if (didpp->d_attributes & DOOR_HANDLE) 2642 fp = DHTOF(didpp->d_data.d_handle); 2643 if (fp != NULL) { 2644 /* Hold the fp */ 2645 mutex_enter(&fp->f_tlock); 2646 fp->f_count++; 2647 mutex_exit(&fp->f_tlock); 2648 2649 *fpp = fp; 2650 if (didpp->d_attributes & DOOR_DESCRIPTOR) 2651 releasef(fd); 2652 if (didpp->d_attributes & DOOR_RELEASE) { 2653 /* release passed reference */ 2654 if (fd >= 0) 2655 (void) closeandsetf(fd, NULL); 2656 else 2657 (void) closef(fp); 2658 } 2659 } else { 2660 /* close translated references */ 2661 door_fp_close(ct->d_fpp, fpp - ct->d_fpp); 2662 /* close untranslated references */ 2663 door_fd_rele(didpp, ndid + 1, 1); 2664 kmem_free(ct->d_fpp, ct->d_fpp_size); 2665 ct->d_fpp = NULL; 2666 ct->d_fpp_size = 0; 2667 return (EINVAL); 2668 } 2669 fpp++; didpp++; 2670 } 2671 } 2672 return (0); 2673 } 2674 2675 /* 2676 * Move the results from the server to the client 2677 */ 2678 static int 2679 door_results(kthread_t *caller, caddr_t data_ptr, size_t data_size, 2680 door_desc_t *desc_ptr, uint_t desc_num) 2681 { 2682 door_client_t *ct = DOOR_CLIENT(caller->t_door); 2683 size_t dsize; 2684 size_t rlen; 2685 size_t result_size; 2686 2687 ASSERT(DOOR_T_HELD(ct)); 2688 ASSERT(MUTEX_NOT_HELD(&door_knob)); 2689 2690 if (ct->d_noresults) 2691 return (E2BIG); /* No results expected */ 2692 2693 if (desc_num > door_max_desc) 2694 return (E2BIG); /* Too many descriptors */ 2695 2696 dsize = desc_num * sizeof (door_desc_t); 2697 /* 2698 * Check if the results are bigger than the clients buffer 2699 */ 2700 if (dsize) 2701 rlen = roundup(data_size, sizeof (door_desc_t)); 2702 else 2703 rlen = data_size; 2704 if ((result_size = rlen + dsize) == 0) 2705 return (0); 2706 2707 if (ct->d_upcall) { 2708 /* 2709 * Handle upcalls 2710 */ 2711 if (ct->d_args.rbuf == NULL || ct->d_args.rsize < result_size) { 2712 /* 2713 * If there's no return buffer or the buffer is too 2714 * small, allocate a new one. The old buffer (if it 2715 * exists) will be freed by the upcall client. 2716 */ 2717 if (result_size > door_max_upcall_reply) 2718 return (E2BIG); 2719 ct->d_args.rsize = result_size; 2720 ct->d_args.rbuf = kmem_alloc(result_size, KM_SLEEP); 2721 } 2722 ct->d_args.data_ptr = ct->d_args.rbuf; 2723 if (data_size != 0 && 2724 copyin(data_ptr, ct->d_args.data_ptr, data_size) != 0) 2725 return (EFAULT); 2726 } else if (result_size > ct->d_args.rsize) { 2727 return (door_overflow(caller, data_ptr, data_size, 2728 desc_ptr, desc_num)); 2729 } else if (data_size != 0) { 2730 if (data_size <= door_max_arg) { 2731 /* 2732 * Use a 2 copy method for small amounts of data 2733 */ 2734 if (ct->d_buf == NULL) { 2735 ct->d_bufsize = data_size; 2736 ct->d_buf = kmem_alloc(ct->d_bufsize, KM_SLEEP); 2737 } else if (ct->d_bufsize < data_size) { 2738 kmem_free(ct->d_buf, ct->d_bufsize); 2739 ct->d_bufsize = data_size; 2740 ct->d_buf = kmem_alloc(ct->d_bufsize, KM_SLEEP); 2741 } 2742 if (copyin(data_ptr, ct->d_buf, data_size) != 0) 2743 return (EFAULT); 2744 } else { 2745 struct as *as = ttoproc(caller)->p_as; 2746 caddr_t dest = ct->d_args.rbuf; 2747 caddr_t src = data_ptr; 2748 size_t len = data_size; 2749 2750 /* Copy data directly into client */ 2751 while (len != 0) { 2752 uint_t amount; 2753 uint_t max; 2754 uint_t off; 2755 int error; 2756 2757 off = (uintptr_t)dest & PAGEOFFSET; 2758 if (off) 2759 max = PAGESIZE - off; 2760 else 2761 max = PAGESIZE; 2762 amount = len > max ? max : len; 2763 error = door_copy(as, src, dest, amount); 2764 if (error != 0) 2765 return (error); 2766 dest += amount; 2767 src += amount; 2768 len -= amount; 2769 } 2770 } 2771 } 2772 2773 /* 2774 * Copyin the returned door ids and translate them into door_node_t 2775 */ 2776 if (desc_num != 0) { 2777 door_desc_t *start; 2778 door_desc_t *didpp; 2779 struct file **fpp; 2780 size_t fpp_size; 2781 uint_t i; 2782 2783 /* First, check if we would overflow client */ 2784 if (!ufcanalloc(ttoproc(caller), desc_num)) 2785 return (EMFILE); 2786 2787 start = didpp = kmem_alloc(dsize, KM_SLEEP); 2788 if (copyin(desc_ptr, didpp, dsize)) { 2789 kmem_free(start, dsize); 2790 return (EFAULT); 2791 } 2792 fpp_size = desc_num * sizeof (struct file *); 2793 if (fpp_size > ct->d_fpp_size) { 2794 /* make more space */ 2795 if (ct->d_fpp_size) 2796 kmem_free(ct->d_fpp, ct->d_fpp_size); 2797 ct->d_fpp_size = fpp_size; 2798 ct->d_fpp = kmem_alloc(fpp_size, KM_SLEEP); 2799 } 2800 fpp = ct->d_fpp; 2801 2802 for (i = 0; i < desc_num; i++) { 2803 struct file *fp; 2804 int fd = didpp->d_data.d_desc.d_descriptor; 2805 2806 /* Only understand file descriptor results */ 2807 if (!(didpp->d_attributes & DOOR_DESCRIPTOR) || 2808 (fp = getf(fd)) == NULL) { 2809 /* close translated references */ 2810 door_fp_close(ct->d_fpp, fpp - ct->d_fpp); 2811 /* close untranslated references */ 2812 door_fd_rele(didpp, desc_num - i, 0); 2813 kmem_free(start, dsize); 2814 return (EINVAL); 2815 } 2816 2817 mutex_enter(&fp->f_tlock); 2818 fp->f_count++; 2819 mutex_exit(&fp->f_tlock); 2820 2821 *fpp = fp; 2822 releasef(fd); 2823 2824 if (didpp->d_attributes & DOOR_RELEASE) { 2825 /* release passed reference */ 2826 (void) closeandsetf(fd, NULL); 2827 } 2828 2829 fpp++; didpp++; 2830 } 2831 kmem_free(start, dsize); 2832 } 2833 return (0); 2834 } 2835 2836 /* 2837 * Close all the descriptors. 2838 */ 2839 static void 2840 door_fd_close(door_desc_t *d, uint_t n) 2841 { 2842 uint_t i; 2843 2844 ASSERT(MUTEX_NOT_HELD(&door_knob)); 2845 for (i = 0; i < n; i++) { 2846 if (d->d_attributes & DOOR_DESCRIPTOR) { 2847 (void) closeandsetf( 2848 d->d_data.d_desc.d_descriptor, NULL); 2849 } else if (d->d_attributes & DOOR_HANDLE) { 2850 door_ki_rele(d->d_data.d_handle); 2851 } 2852 d++; 2853 } 2854 } 2855 2856 /* 2857 * Close descriptors that have the DOOR_RELEASE attribute set. 2858 */ 2859 void 2860 door_fd_rele(door_desc_t *d, uint_t n, int from_kernel) 2861 { 2862 uint_t i; 2863 2864 ASSERT(MUTEX_NOT_HELD(&door_knob)); 2865 for (i = 0; i < n; i++) { 2866 if (d->d_attributes & DOOR_RELEASE) { 2867 if (d->d_attributes & DOOR_DESCRIPTOR) { 2868 (void) closeandsetf( 2869 d->d_data.d_desc.d_descriptor, NULL); 2870 } else if (from_kernel && 2871 (d->d_attributes & DOOR_HANDLE)) { 2872 door_ki_rele(d->d_data.d_handle); 2873 } 2874 } 2875 d++; 2876 } 2877 } 2878 2879 /* 2880 * Copy descriptors into the kernel so we can release any marked 2881 * DOOR_RELEASE. 2882 */ 2883 int 2884 door_release_fds(door_desc_t *desc_ptr, uint_t ndesc) 2885 { 2886 size_t dsize; 2887 door_desc_t *didpp; 2888 uint_t desc_num; 2889 2890 ASSERT(MUTEX_NOT_HELD(&door_knob)); 2891 ASSERT(ndesc != 0); 2892 2893 desc_num = MIN(ndesc, door_max_desc); 2894 2895 dsize = desc_num * sizeof (door_desc_t); 2896 didpp = kmem_alloc(dsize, KM_SLEEP); 2897 2898 while (ndesc > 0) { 2899 uint_t count = MIN(ndesc, desc_num); 2900 2901 if (copyin(desc_ptr, didpp, count * sizeof (door_desc_t))) { 2902 kmem_free(didpp, dsize); 2903 return (EFAULT); 2904 } 2905 door_fd_rele(didpp, count, 0); 2906 2907 ndesc -= count; 2908 desc_ptr += count; 2909 } 2910 kmem_free(didpp, dsize); 2911 return (0); 2912 } 2913 2914 /* 2915 * Decrement ref count on all the files passed 2916 */ 2917 static void 2918 door_fp_close(struct file **fp, uint_t n) 2919 { 2920 uint_t i; 2921 2922 ASSERT(MUTEX_NOT_HELD(&door_knob)); 2923 2924 for (i = 0; i < n; i++) 2925 (void) closef(fp[i]); 2926 } 2927 2928 /* 2929 * Copy data from 'src' in current address space to 'dest' in 'as' for 'len' 2930 * bytes. 2931 * 2932 * Performs this using 1 mapin and 1 copy operation. 2933 * 2934 * We really should do more than 1 page at a time to improve 2935 * performance, but for now this is treated as an anomalous condition. 2936 */ 2937 static int 2938 door_copy(struct as *as, caddr_t src, caddr_t dest, uint_t len) 2939 { 2940 caddr_t kaddr; 2941 caddr_t rdest; 2942 uint_t off; 2943 page_t **pplist; 2944 page_t *pp = NULL; 2945 int error = 0; 2946 2947 ASSERT(len <= PAGESIZE); 2948 off = (uintptr_t)dest & PAGEOFFSET; /* offset within the page */ 2949 rdest = (caddr_t)((uintptr_t)dest & 2950 (uintptr_t)PAGEMASK); /* Page boundary */ 2951 ASSERT(off + len <= PAGESIZE); 2952 2953 /* 2954 * Lock down destination page. 2955 */ 2956 if (as_pagelock(as, &pplist, rdest, PAGESIZE, S_WRITE)) 2957 return (E2BIG); 2958 /* 2959 * Check if we have a shadow page list from as_pagelock. If not, 2960 * we took the slow path and have to find our page struct the hard 2961 * way. 2962 */ 2963 if (pplist == NULL) { 2964 pfn_t pfnum; 2965 2966 /* MMU mapping is already locked down */ 2967 AS_LOCK_ENTER(as, &as->a_lock, RW_READER); 2968 pfnum = hat_getpfnum(as->a_hat, rdest); 2969 AS_LOCK_EXIT(as, &as->a_lock); 2970 2971 /* 2972 * TODO: The pfn step should not be necessary - need 2973 * a hat_getpp() function. 2974 */ 2975 if (pf_is_memory(pfnum)) { 2976 pp = page_numtopp_nolock(pfnum); 2977 ASSERT(pp == NULL || PAGE_LOCKED(pp)); 2978 } else 2979 pp = NULL; 2980 if (pp == NULL) { 2981 as_pageunlock(as, pplist, rdest, PAGESIZE, S_WRITE); 2982 return (E2BIG); 2983 } 2984 } else { 2985 pp = *pplist; 2986 } 2987 /* 2988 * Map destination page into kernel address 2989 */ 2990 kaddr = (caddr_t)ppmapin(pp, PROT_READ | PROT_WRITE, (caddr_t)-1); 2991 2992 /* 2993 * Copy from src to dest 2994 */ 2995 if (copyin(src, kaddr + off, len) != 0) 2996 error = EFAULT; 2997 /* 2998 * Unmap destination page from kernel 2999 */ 3000 ppmapout(kaddr); 3001 /* 3002 * Unlock destination page 3003 */ 3004 as_pageunlock(as, pplist, rdest, PAGESIZE, S_WRITE); 3005 return (error); 3006 } 3007 3008 /* 3009 * General kernel upcall using doors 3010 * Returns 0 on success, errno for failures. 3011 * Caller must have a hold on the door based vnode, and on any 3012 * references passed in desc_ptr. The references are released 3013 * in the event of an error, and passed without duplication 3014 * otherwise. Note that param->rbuf must be 64-bit aligned in 3015 * a 64-bit kernel, since it may be used to store door descriptors 3016 * if they are returned by the server. The caller is responsible 3017 * for holding a reference to the cred passed in. 3018 */ 3019 int 3020 door_upcall(vnode_t *vp, door_arg_t *param, struct cred *cred) 3021 { 3022 /* Locals */ 3023 door_node_t *dp; 3024 kthread_t *server_thread; 3025 int error = 0; 3026 klwp_t *lwp; 3027 door_client_t *ct; /* curthread door_data */ 3028 door_server_t *st; /* server thread door_data */ 3029 int gotresults = 0; 3030 int cancel_pending; 3031 3032 if (vp->v_type != VDOOR) { 3033 if (param->desc_num) 3034 door_fd_rele(param->desc_ptr, param->desc_num, 1); 3035 return (EINVAL); 3036 } 3037 3038 lwp = ttolwp(curthread); 3039 ct = door_my_client(1); 3040 dp = VTOD(vp); /* Convert to a door_node_t */ 3041 3042 mutex_enter(&door_knob); 3043 if (DOOR_INVALID(dp)) { 3044 mutex_exit(&door_knob); 3045 if (param->desc_num) 3046 door_fd_rele(param->desc_ptr, param->desc_num, 1); 3047 error = EBADF; 3048 goto out; 3049 } 3050 3051 if (dp->door_target == &p0) { 3052 /* Can't do an upcall to a kernel server */ 3053 mutex_exit(&door_knob); 3054 if (param->desc_num) 3055 door_fd_rele(param->desc_ptr, param->desc_num, 1); 3056 error = EINVAL; 3057 goto out; 3058 } 3059 3060 error = door_check_limits(dp, param, 1); 3061 if (error != 0) { 3062 mutex_exit(&door_knob); 3063 if (param->desc_num) 3064 door_fd_rele(param->desc_ptr, param->desc_num, 1); 3065 goto out; 3066 } 3067 3068 /* 3069 * Get a server thread from the target domain 3070 */ 3071 if ((server_thread = door_get_server(dp)) == NULL) { 3072 if (DOOR_INVALID(dp)) 3073 error = EBADF; 3074 else 3075 error = EAGAIN; 3076 mutex_exit(&door_knob); 3077 if (param->desc_num) 3078 door_fd_rele(param->desc_ptr, param->desc_num, 1); 3079 goto out; 3080 } 3081 3082 st = DOOR_SERVER(server_thread->t_door); 3083 ct->d_buf = param->data_ptr; 3084 ct->d_bufsize = param->data_size; 3085 ct->d_args = *param; /* structure assignment */ 3086 3087 if (ct->d_args.desc_num) { 3088 /* 3089 * Move data from client to server 3090 */ 3091 DOOR_T_HOLD(st); 3092 mutex_exit(&door_knob); 3093 error = door_translate_out(); 3094 mutex_enter(&door_knob); 3095 DOOR_T_RELEASE(st); 3096 if (error) { 3097 /* 3098 * We're not going to resume this thread after all 3099 */ 3100 door_release_server(dp, server_thread); 3101 shuttle_sleep(server_thread); 3102 mutex_exit(&door_knob); 3103 goto out; 3104 } 3105 } 3106 3107 ct->d_upcall = 1; 3108 ct->d_cred = cred; 3109 if (param->rsize == 0) 3110 ct->d_noresults = 1; 3111 else 3112 ct->d_noresults = 0; 3113 3114 dp->door_active++; 3115 3116 ct->d_error = DOOR_WAIT; 3117 st->d_caller = curthread; 3118 st->d_active = dp; 3119 3120 shuttle_resume(server_thread, &door_knob); 3121 3122 mutex_enter(&door_knob); 3123 shuttle_return: 3124 if ((error = ct->d_error) < 0) { /* DOOR_WAIT or DOOR_EXIT */ 3125 /* 3126 * Premature wakeup. Find out why (stop, forkall, sig, exit ...) 3127 */ 3128 mutex_exit(&door_knob); /* May block in ISSIG */ 3129 cancel_pending = 0; 3130 if (lwp && (ISSIG(curthread, FORREAL) || lwp->lwp_sysabort || 3131 MUSTRETURN(curproc, curthread) || 3132 (cancel_pending = schedctl_cancel_pending()) != 0)) { 3133 /* Signal, forkall, ... */ 3134 if (cancel_pending) 3135 schedctl_cancel_eintr(); 3136 lwp->lwp_sysabort = 0; 3137 mutex_enter(&door_knob); 3138 error = EINTR; 3139 /* 3140 * If the server has finished processing our call, 3141 * or exited (calling door_slam()), then d_error 3142 * will have changed. If the server hasn't finished 3143 * yet, d_error will still be DOOR_WAIT, and we 3144 * let it know we are not interested in any 3145 * results by sending a SIGCANCEL, unless the door 3146 * is marked with DOOR_NO_CANCEL. 3147 */ 3148 if (ct->d_error == DOOR_WAIT && 3149 st->d_caller == curthread) { 3150 proc_t *p = ttoproc(server_thread); 3151 3152 st->d_active = NULL; 3153 st->d_caller = NULL; 3154 if (!(dp->door_flags & DOOR_NO_CANCEL)) { 3155 DOOR_T_HOLD(st); 3156 mutex_exit(&door_knob); 3157 3158 mutex_enter(&p->p_lock); 3159 sigtoproc(p, server_thread, SIGCANCEL); 3160 mutex_exit(&p->p_lock); 3161 3162 mutex_enter(&door_knob); 3163 DOOR_T_RELEASE(st); 3164 } 3165 } 3166 } else { 3167 /* 3168 * Return from stop(), server exit... 3169 * 3170 * Note that the server could have done a 3171 * door_return while the client was in stop state 3172 * (ISSIG), in which case the error condition 3173 * is updated by the server. 3174 */ 3175 mutex_enter(&door_knob); 3176 if (ct->d_error == DOOR_WAIT) { 3177 /* Still waiting for a reply */ 3178 shuttle_swtch(&door_knob); 3179 mutex_enter(&door_knob); 3180 if (lwp) 3181 lwp->lwp_asleep = 0; 3182 goto shuttle_return; 3183 } else if (ct->d_error == DOOR_EXIT) { 3184 /* Server exit */ 3185 error = EINTR; 3186 } else { 3187 /* Server did a door_return during ISSIG */ 3188 error = ct->d_error; 3189 } 3190 } 3191 /* 3192 * Can't exit if the server is currently copying 3193 * results for me 3194 */ 3195 while (DOOR_T_HELD(ct)) 3196 cv_wait(&ct->d_cv, &door_knob); 3197 3198 /* 3199 * Find out if results were successfully copied. 3200 */ 3201 if (ct->d_error == 0) 3202 gotresults = 1; 3203 } 3204 if (lwp) { 3205 lwp->lwp_asleep = 0; /* /proc */ 3206 lwp->lwp_sysabort = 0; /* /proc */ 3207 } 3208 if (--dp->door_active == 0 && (dp->door_flags & DOOR_DELAY)) 3209 door_deliver_unref(dp); 3210 mutex_exit(&door_knob); 3211 3212 /* 3213 * Translate returned doors (if any) 3214 */ 3215 3216 if (ct->d_noresults) 3217 goto out; 3218 3219 if (error) { 3220 /* 3221 * If server returned results successfully, then we've 3222 * been interrupted and may need to clean up. 3223 */ 3224 if (gotresults) { 3225 ASSERT(error == EINTR); 3226 door_fp_close(ct->d_fpp, ct->d_args.desc_num); 3227 } 3228 goto out; 3229 } 3230 3231 if (ct->d_args.desc_num) { 3232 struct file **fpp; 3233 door_desc_t *didpp; 3234 vnode_t *vp; 3235 uint_t n = ct->d_args.desc_num; 3236 3237 didpp = ct->d_args.desc_ptr = (door_desc_t *)(ct->d_args.rbuf + 3238 roundup(ct->d_args.data_size, sizeof (door_desc_t))); 3239 fpp = ct->d_fpp; 3240 3241 while (n--) { 3242 struct file *fp; 3243 3244 fp = *fpp; 3245 if (VOP_REALVP(fp->f_vnode, &vp, NULL)) 3246 vp = fp->f_vnode; 3247 3248 didpp->d_attributes = DOOR_HANDLE | 3249 (VTOD(vp)->door_flags & DOOR_ATTR_MASK); 3250 didpp->d_data.d_handle = FTODH(fp); 3251 3252 fpp++; didpp++; 3253 } 3254 } 3255 3256 /* on return data is in rbuf */ 3257 *param = ct->d_args; /* structure assignment */ 3258 3259 out: 3260 if (ct->d_fpp) { 3261 kmem_free(ct->d_fpp, ct->d_fpp_size); 3262 ct->d_fpp = NULL; 3263 ct->d_fpp_size = 0; 3264 } 3265 3266 ct->d_cred = NULL; 3267 ct->d_upcall = 0; 3268 ct->d_noresults = 0; 3269 ct->d_buf = NULL; 3270 ct->d_bufsize = 0; 3271 return (error); 3272 } 3273 3274 /* 3275 * Add a door to the per-process list of active doors for which the 3276 * process is a server. 3277 */ 3278 static void 3279 door_list_insert(door_node_t *dp) 3280 { 3281 proc_t *p = dp->door_target; 3282 3283 ASSERT(MUTEX_HELD(&door_knob)); 3284 dp->door_list = p->p_door_list; 3285 p->p_door_list = dp; 3286 } 3287 3288 /* 3289 * Remove a door from the per-process list of active doors. 3290 */ 3291 void 3292 door_list_delete(door_node_t *dp) 3293 { 3294 door_node_t **pp; 3295 3296 ASSERT(MUTEX_HELD(&door_knob)); 3297 /* 3298 * Find the door in the list. If the door belongs to another process, 3299 * it's OK to use p_door_list since that process can't exit until all 3300 * doors have been taken off the list (see door_exit). 3301 */ 3302 pp = &(dp->door_target->p_door_list); 3303 while (*pp != dp) 3304 pp = &((*pp)->door_list); 3305 3306 /* found it, take it off the list */ 3307 *pp = dp->door_list; 3308 } 3309 3310 3311 /* 3312 * External kernel interfaces for doors. These functions are available 3313 * outside the doorfs module for use in creating and using doors from 3314 * within the kernel. 3315 */ 3316 3317 /* 3318 * door_ki_upcall invokes a user-level door server from the kernel, with 3319 * the credentials associated with curthread. 3320 */ 3321 int 3322 door_ki_upcall(door_handle_t dh, door_arg_t *param) 3323 { 3324 return (door_ki_upcall_cred(dh, param, NULL)); 3325 } 3326 3327 /* 3328 * door_ki_upcall_cred invokes a user-level door server from the kernel with 3329 * the given credentials. If the "cred" argument is NULL, uses the credentials 3330 * associated with current thread. 3331 */ 3332 int 3333 door_ki_upcall_cred(door_handle_t dh, door_arg_t *param, struct cred *cred) 3334 { 3335 file_t *fp = DHTOF(dh); 3336 vnode_t *realvp; 3337 3338 if (VOP_REALVP(fp->f_vnode, &realvp, NULL)) 3339 realvp = fp->f_vnode; 3340 return (door_upcall(realvp, param, cred)); 3341 } 3342 3343 /* 3344 * Function call to create a "kernel" door server. A kernel door 3345 * server provides a way for a user-level process to invoke a function 3346 * in the kernel through a door_call. From the caller's point of 3347 * view, a kernel door server looks the same as a user-level one 3348 * (except the server pid is 0). Unlike normal door calls, the 3349 * kernel door function is invoked via a normal function call in the 3350 * same thread and context as the caller. 3351 */ 3352 int 3353 door_ki_create(void (*pc_cookie)(), void *data_cookie, uint_t attributes, 3354 door_handle_t *dhp) 3355 { 3356 int err; 3357 file_t *fp; 3358 3359 /* no DOOR_PRIVATE */ 3360 if ((attributes & ~DOOR_KI_CREATE_MASK) || 3361 (attributes & (DOOR_UNREF | DOOR_UNREF_MULTI)) == 3362 (DOOR_UNREF | DOOR_UNREF_MULTI)) 3363 return (EINVAL); 3364 3365 err = door_create_common(pc_cookie, data_cookie, attributes, 3366 1, NULL, &fp); 3367 if (err == 0 && (attributes & (DOOR_UNREF | DOOR_UNREF_MULTI)) && 3368 p0.p_unref_thread == 0) { 3369 /* need to create unref thread for process 0 */ 3370 (void) thread_create(NULL, 0, door_unref_kernel, NULL, 0, &p0, 3371 TS_RUN, minclsyspri); 3372 } 3373 if (err == 0) { 3374 *dhp = FTODH(fp); 3375 } 3376 return (err); 3377 } 3378 3379 void 3380 door_ki_hold(door_handle_t dh) 3381 { 3382 file_t *fp = DHTOF(dh); 3383 3384 mutex_enter(&fp->f_tlock); 3385 fp->f_count++; 3386 mutex_exit(&fp->f_tlock); 3387 } 3388 3389 void 3390 door_ki_rele(door_handle_t dh) 3391 { 3392 file_t *fp = DHTOF(dh); 3393 3394 (void) closef(fp); 3395 } 3396 3397 int 3398 door_ki_open(char *pathname, door_handle_t *dhp) 3399 { 3400 file_t *fp; 3401 vnode_t *vp; 3402 int err; 3403 3404 if ((err = lookupname(pathname, UIO_SYSSPACE, FOLLOW, NULL, &vp)) != 0) 3405 return (err); 3406 if (err = VOP_OPEN(&vp, FREAD, kcred, NULL)) { 3407 VN_RELE(vp); 3408 return (err); 3409 } 3410 if (vp->v_type != VDOOR) { 3411 VN_RELE(vp); 3412 return (EINVAL); 3413 } 3414 if ((err = falloc(vp, FREAD | FWRITE, &fp, NULL)) != 0) { 3415 VN_RELE(vp); 3416 return (err); 3417 } 3418 /* falloc returns with f_tlock held on success */ 3419 mutex_exit(&fp->f_tlock); 3420 *dhp = FTODH(fp); 3421 return (0); 3422 } 3423 3424 int 3425 door_ki_info(door_handle_t dh, struct door_info *dip) 3426 { 3427 file_t *fp = DHTOF(dh); 3428 vnode_t *vp; 3429 3430 if (VOP_REALVP(fp->f_vnode, &vp, NULL)) 3431 vp = fp->f_vnode; 3432 if (vp->v_type != VDOOR) 3433 return (EINVAL); 3434 door_info_common(VTOD(vp), dip, fp); 3435 return (0); 3436 } 3437 3438 door_handle_t 3439 door_ki_lookup(int did) 3440 { 3441 file_t *fp; 3442 door_handle_t dh; 3443 3444 /* is the descriptor really a door? */ 3445 if (door_lookup(did, &fp) == NULL) 3446 return (NULL); 3447 /* got the door, put a hold on it and release the fd */ 3448 dh = FTODH(fp); 3449 door_ki_hold(dh); 3450 releasef(did); 3451 return (dh); 3452 } 3453 3454 int 3455 door_ki_setparam(door_handle_t dh, int type, size_t val) 3456 { 3457 file_t *fp = DHTOF(dh); 3458 vnode_t *vp; 3459 3460 if (VOP_REALVP(fp->f_vnode, &vp, NULL)) 3461 vp = fp->f_vnode; 3462 if (vp->v_type != VDOOR) 3463 return (EINVAL); 3464 return (door_setparam_common(VTOD(vp), 1, type, val)); 3465 } 3466 3467 int 3468 door_ki_getparam(door_handle_t dh, int type, size_t *out) 3469 { 3470 file_t *fp = DHTOF(dh); 3471 vnode_t *vp; 3472 3473 if (VOP_REALVP(fp->f_vnode, &vp, NULL)) 3474 vp = fp->f_vnode; 3475 if (vp->v_type != VDOOR) 3476 return (EINVAL); 3477 return (door_getparam_common(VTOD(vp), type, out)); 3478 } 3479