1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #include <stdio.h> 27 #include <ctype.h> 28 #include <fcntl.h> 29 #include <errno.h> 30 #include <door.h> 31 #include <unistd.h> 32 #include <stddef.h> 33 #include <stdlib.h> 34 #include <strings.h> 35 #include <pthread.h> 36 #include <atomic.h> 37 #include <signal.h> 38 #include <sys/types.h> 39 #include <sys/varargs.h> 40 #include <sys/sysevent.h> 41 #include <sys/sysevent_impl.h> 42 43 #include "libsysevent.h" 44 #include "libsysevent_impl.h" 45 46 /* 47 * The functions below deal with the General Purpose Event Handling framework 48 * 49 * sysevent_evc_bind - create/bind application to named channel 50 * sysevent_evc_unbind - unbind from previously bound/created channel 51 * sysevent_evc_subscribe - subscribe to existing event channel 52 * sysevent_evc_unsubscribe - unsubscribe from existing event channel 53 * sysevent_evc_publish - generate a system event via an event channel 54 * sysevent_evc_control - various channel based control operation 55 */ 56 57 static void kill_door_servers(evchan_subscr_t *); 58 59 #define misaligned(p) ((uintptr_t)(p) & 3) /* 4-byte alignment required */ 60 61 static pthread_key_t nrkey = PTHREAD_ONCE_KEY_NP; 62 63 /* 64 * If the current thread is a door server thread servicing a door created 65 * for us in sysevent_evc_xsubscribe, then an attempt to unsubscribe from 66 * within door invocation context on the same channel will deadlock in the 67 * kernel waiting for our own invocation to complete. Such calls are 68 * forbidden, and we abort if they are encountered (better than hanging 69 * unkillably). 70 * 71 * We'd like to offer this detection to subscriptions established with 72 * sysevent_evc_subscribe, but we don't have control over the door service 73 * threads in that case. Perhaps the fix is to always use door_xcreate 74 * even for sysevent_evc_subscribe? 75 */ 76 static boolean_t 77 will_deadlock(evchan_t *scp) 78 { 79 evchan_subscr_t *subp = pthread_getspecific(nrkey); 80 evchan_impl_hdl_t *hdl = EVCHAN_IMPL_HNDL(scp); 81 82 return (subp != NULL && subp->ev_subhead == hdl ? B_TRUE : B_FALSE); 83 } 84 85 /* 86 * Check syntax of a channel name 87 */ 88 static int 89 sysevent_is_chan_name(const char *str) 90 { 91 for (; *str != '\0'; str++) { 92 if (!EVCH_ISCHANCHAR(*str)) 93 return (0); 94 } 95 96 return (1); 97 } 98 99 /* 100 * Check for printable characters 101 */ 102 static int 103 strisprint(const char *s) 104 { 105 for (; *s != '\0'; s++) { 106 if (*s < ' ' || *s > '~') 107 return (0); 108 } 109 110 return (1); 111 } 112 113 /* 114 * sysevent_evc_bind - Create/bind application to named channel 115 */ 116 int 117 sysevent_evc_bind(const char *channel, evchan_t **scpp, uint32_t flags) 118 { 119 int chanlen; 120 evchan_t *scp; 121 sev_bind_args_t uargs; 122 int ec; 123 124 if (scpp == NULL || misaligned(scpp)) { 125 return (errno = EINVAL); 126 } 127 128 /* Provide useful value in error case */ 129 *scpp = NULL; 130 131 if (channel == NULL || 132 (chanlen = strlen(channel) + 1) > MAX_CHNAME_LEN) { 133 return (errno = EINVAL); 134 } 135 136 /* Check channel syntax */ 137 if (!sysevent_is_chan_name(channel)) { 138 return (errno = EINVAL); 139 } 140 141 if (flags & ~EVCH_B_FLAGS) { 142 return (errno = EINVAL); 143 } 144 145 scp = calloc(1, sizeof (evchan_impl_hdl_t)); 146 if (scp == NULL) { 147 return (errno = ENOMEM); 148 } 149 150 /* 151 * Enable sysevent driver. Fallback if the device link doesn't exist; 152 * this situation can arise if a channel is bound early in system 153 * startup, prior to devfsadm(1M) being invoked. 154 */ 155 EV_FD(scp) = open(DEVSYSEVENT, O_RDWR); 156 if (EV_FD(scp) == -1) { 157 if (errno != ENOENT) { 158 ec = errno == EACCES ? EPERM : errno; 159 free(scp); 160 return (errno = ec); 161 } 162 163 EV_FD(scp) = open(DEVICESYSEVENT, O_RDWR); 164 if (EV_FD(scp) == -1) { 165 ec = errno == EACCES ? EPERM : errno; 166 free(scp); 167 return (errno = ec); 168 } 169 } 170 171 /* 172 * Force to close the fd's when process is doing exec. 173 * The driver will then release stale binding handles. 174 * The driver will release also the associated subscriptions 175 * if EVCH_SUB_KEEP flag was not set. 176 */ 177 (void) fcntl(EV_FD(scp), F_SETFD, FD_CLOEXEC); 178 179 uargs.chan_name.name = (uintptr_t)channel; 180 uargs.chan_name.len = chanlen; 181 uargs.flags = flags; 182 183 if (ioctl(EV_FD(scp), SEV_CHAN_OPEN, &uargs) != 0) { 184 ec = errno; 185 (void) close(EV_FD(scp)); 186 free(scp); 187 return (errno = ec); 188 } 189 190 /* Needed to detect a fork() */ 191 EV_PID(scp) = getpid(); 192 (void) mutex_init(EV_LOCK(scp), USYNC_THREAD, NULL); 193 194 *scpp = scp; 195 196 return (0); 197 } 198 199 /* 200 * sysevent_evc_unbind - Unbind from previously bound/created channel 201 */ 202 int 203 sysevent_evc_unbind(evchan_t *scp) 204 { 205 sev_unsubscribe_args_t uargs; 206 evchan_subscr_t *subp; 207 int errcp; 208 209 if (scp == NULL || misaligned(scp)) 210 return (errno = EINVAL); 211 212 if (will_deadlock(scp)) 213 return (errno = EDEADLK); 214 215 (void) mutex_lock(EV_LOCK(scp)); 216 217 /* 218 * Unsubscribe, if we are in the process which did the bind. 219 */ 220 if (EV_PID(scp) == getpid()) { 221 uargs.sid.name = NULL; 222 uargs.sid.len = 0; 223 /* 224 * The unsubscribe ioctl will block until all door upcalls have 225 * drained. 226 */ 227 if (ioctl(EV_FD(scp), SEV_UNSUBSCRIBE, (intptr_t)&uargs) != 0) { 228 errcp = errno; 229 (void) mutex_unlock(EV_LOCK(scp)); 230 return (errno = errcp); 231 } 232 } 233 234 while ((subp = EV_SUB_NEXT(scp)) != NULL) { 235 EV_SUB_NEXT(scp) = subp->evsub_next; 236 237 /* If door_xcreate was applied we can clean up */ 238 if (subp->evsub_attr) 239 kill_door_servers(subp); 240 241 if (door_revoke(subp->evsub_door_desc) != 0 && errno == EPERM) 242 (void) close(subp->evsub_door_desc); 243 244 free(subp->evsub_sid); 245 free(subp); 246 } 247 248 (void) mutex_unlock(EV_LOCK(scp)); 249 250 /* 251 * The close of the driver will do the unsubscribe if a) it is the last 252 * close and b) we are in a child which inherited subscriptions. 253 */ 254 (void) close(EV_FD(scp)); 255 (void) mutex_destroy(EV_LOCK(scp)); 256 free(scp); 257 258 return (0); 259 } 260 261 /* 262 * sysevent_evc_publish - Generate a system event via an event channel 263 */ 264 int 265 sysevent_evc_publish(evchan_t *scp, const char *class, 266 const char *subclass, const char *vendor, 267 const char *pub_name, nvlist_t *attr_list, 268 uint32_t flags) 269 { 270 sysevent_t *ev; 271 sev_publish_args_t uargs; 272 int rc; 273 int ec; 274 275 if (scp == NULL || misaligned(scp)) { 276 return (errno = EINVAL); 277 } 278 279 /* No inheritance of binding handles via fork() */ 280 if (EV_PID(scp) != getpid()) { 281 return (errno = EINVAL); 282 } 283 284 ev = sysevent_alloc_event((char *)class, (char *)subclass, 285 (char *)vendor, (char *)pub_name, attr_list); 286 if (ev == NULL) { 287 return (errno); 288 } 289 290 uargs.ev.name = (uintptr_t)ev; 291 uargs.ev.len = SE_SIZE(ev); 292 uargs.flags = flags; 293 294 (void) mutex_lock(EV_LOCK(scp)); 295 296 rc = ioctl(EV_FD(scp), SEV_PUBLISH, (intptr_t)&uargs); 297 ec = errno; 298 299 (void) mutex_unlock(EV_LOCK(scp)); 300 301 sysevent_free(ev); 302 303 if (rc != 0) { 304 return (ec); 305 } 306 return (0); 307 } 308 309 /* 310 * Generic callback which catches events from the kernel and calls 311 * subscribers call back routine. 312 * 313 * Kernel guarantees that door_upcalls are disabled when unsubscription 314 * was issued that's why cookie points always to a valid evchan_subscr_t *. 315 * 316 * Furthermore it's not necessary to lock subp because the sysevent 317 * framework guarantees no unsubscription until door_return. 318 */ 319 /*ARGSUSED3*/ 320 static void 321 door_upcall(void *cookie, char *args, size_t alen, 322 door_desc_t *ddp, uint_t ndid) 323 { 324 evchan_subscr_t *subp = EVCHAN_SUBSCR(cookie); 325 int rval = 0; 326 327 /* 328 * If we've been invoked simply to kill the thread then 329 * exit now. 330 */ 331 if (subp->evsub_state == EVCHAN_SUB_STATE_CLOSING) 332 pthread_exit(NULL); 333 334 if (args == NULL || alen <= (size_t)0) { 335 /* Skip callback execution */ 336 rval = EINVAL; 337 } else { 338 rval = subp->evsub_func((sysevent_t *)(void *)args, 339 subp->evsub_cookie); 340 } 341 342 /* 343 * Fill in return values for door_return 344 */ 345 alen = sizeof (rval); 346 bcopy(&rval, args, alen); 347 348 (void) door_return(args, alen, NULL, 0); 349 } 350 351 static pthread_once_t xsub_thrattr_once = PTHREAD_ONCE_INIT; 352 static pthread_attr_t xsub_thrattr; 353 354 static void 355 xsub_thrattr_init(void) 356 { 357 (void) pthread_attr_init(&xsub_thrattr); 358 (void) pthread_attr_setdetachstate(&xsub_thrattr, 359 PTHREAD_CREATE_DETACHED); 360 (void) pthread_attr_setscope(&xsub_thrattr, PTHREAD_SCOPE_SYSTEM); 361 } 362 363 /* 364 * Our door server create function is only called during initial 365 * door_xcreate since we specify DOOR_NO_DEPLETION_CB. 366 */ 367 int 368 xsub_door_server_create(door_info_t *dip, void *(*startf)(void *), 369 void *startfarg, void *cookie) 370 { 371 evchan_subscr_t *subp = EVCHAN_SUBSCR(cookie); 372 struct sysevent_subattr_impl *xsa = subp->evsub_attr; 373 pthread_attr_t *thrattr; 374 sigset_t oset; 375 int err; 376 377 if (subp->evsub_state == EVCHAN_SUB_STATE_CLOSING) 378 return (0); /* shouldn't happen, but just in case */ 379 380 /* 381 * If sysevent_evc_xsubscribe was called electing to use a 382 * different door server create function then let it take it 383 * from here. 384 */ 385 if (xsa->xs_thrcreate) { 386 return (xsa->xs_thrcreate(dip, startf, startfarg, 387 xsa->xs_thrcreate_cookie)); 388 } 389 390 if (xsa->xs_thrattr == NULL) { 391 (void) pthread_once(&xsub_thrattr_once, xsub_thrattr_init); 392 thrattr = &xsub_thrattr; 393 } else { 394 thrattr = xsa->xs_thrattr; 395 } 396 397 (void) pthread_sigmask(SIG_SETMASK, &xsa->xs_sigmask, &oset); 398 err = pthread_create(NULL, thrattr, startf, startfarg); 399 (void) pthread_sigmask(SIG_SETMASK, &oset, NULL); 400 401 return (err == 0 ? 1 : -1); 402 } 403 404 void 405 xsub_door_server_setup(void *cookie) 406 { 407 evchan_subscr_t *subp = EVCHAN_SUBSCR(cookie); 408 struct sysevent_subattr_impl *xsa = subp->evsub_attr; 409 410 if (xsa->xs_thrsetup == NULL) { 411 (void) pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, NULL); 412 (void) pthread_setcanceltype(PTHREAD_CANCEL_DEFERRED, NULL); 413 } 414 415 (void) pthread_setspecific(nrkey, (void *)subp); 416 417 if (xsa->xs_thrsetup) 418 xsa->xs_thrsetup(xsa->xs_thrsetup_cookie); 419 } 420 421 /* 422 * Cause private door server threads to exit. We have already performed the 423 * unsubscribe ioctl which stops new invocations and waits until all 424 * existing invocations are complete. So all server threads should be 425 * blocked in door_return. The door has not yet been revoked. We will 426 * invoke repeatedly after setting the evsub_state to be noticed on 427 * wakeup; each invocation will result in the death of one server thread. 428 * 429 * You'd think it would be easier to kill these threads, such as through 430 * pthread_cancel. Unfortunately door_return is not a cancellation point, 431 * and if you do cancel a thread blocked in door_return the EINTR check in 432 * the door_return assembly logic causes us to loop with EINTR forever! 433 */ 434 static void 435 kill_door_servers(evchan_subscr_t *subp) 436 { 437 door_arg_t da; 438 int i; 439 440 bzero(&da, sizeof (da)); 441 subp->evsub_state = EVCHAN_SUB_STATE_CLOSING; 442 membar_producer(); 443 444 (void) door_call(subp->evsub_door_desc, &da); 445 } 446 447 static int 448 sysevent_evc_subscribe_cmn(evchan_t *scp, const char *sid, const char *class, 449 int (*event_handler)(sysevent_t *ev, void *cookie), 450 void *cookie, uint32_t flags, struct sysevent_subattr_impl *xsa) 451 { 452 evchan_subscr_t *subp; 453 int upcall_door; 454 sev_subscribe_args_t uargs; 455 uint32_t sid_len; 456 uint32_t class_len; 457 int ec; 458 459 if (scp == NULL || misaligned(scp) || sid == NULL || class == NULL) { 460 return (errno = EINVAL); 461 } 462 463 /* No inheritance of binding handles via fork() */ 464 if (EV_PID(scp) != getpid()) { 465 return (errno = EINVAL); 466 } 467 468 if ((sid_len = strlen(sid) + 1) > MAX_SUBID_LEN || sid_len == 1 || 469 (class_len = strlen(class) + 1) > MAX_CLASS_LEN) { 470 return (errno = EINVAL); 471 } 472 473 /* Check for printable characters */ 474 if (!strisprint(sid)) { 475 return (errno = EINVAL); 476 } 477 478 if (event_handler == NULL) { 479 return (errno = EINVAL); 480 } 481 482 if (pthread_key_create_once_np(&nrkey, NULL) != 0) 483 return (errno); /* ENOMEM or EAGAIN */ 484 485 /* Create subscriber data */ 486 if ((subp = calloc(1, sizeof (evchan_subscr_t))) == NULL) { 487 return (errno); 488 } 489 490 if ((subp->evsub_sid = strdup(sid)) == NULL) { 491 ec = errno; 492 free(subp); 493 return (ec); 494 } 495 496 /* 497 * EC_ALL string will not be copied to kernel - NULL is assumed 498 */ 499 if (strcmp(class, EC_ALL) == 0) { 500 class = NULL; 501 class_len = 0; 502 } 503 504 /* 505 * Fill this in now for the xsub_door_server_setup dance 506 */ 507 subp->ev_subhead = EVCHAN_IMPL_HNDL(scp); 508 subp->evsub_state = EVCHAN_SUB_STATE_ACTIVE; 509 510 if (xsa == NULL) { 511 upcall_door = door_create(door_upcall, (void *)subp, 512 DOOR_REFUSE_DESC | DOOR_NO_CANCEL); 513 } else { 514 subp->evsub_attr = xsa; 515 516 /* 517 * Create a private door with exactly one thread to 518 * service the callbacks (the GPEC kernel implementation 519 * serializes deliveries for each subscriber id). 520 */ 521 upcall_door = door_xcreate(door_upcall, (void *)subp, 522 DOOR_REFUSE_DESC | DOOR_NO_CANCEL | DOOR_NO_DEPLETION_CB, 523 xsub_door_server_create, xsub_door_server_setup, 524 (void *)subp, 1); 525 } 526 527 if (upcall_door == -1) { 528 ec = errno; 529 free(subp->evsub_sid); 530 free(subp); 531 return (ec); 532 } 533 534 /* Complete subscriber information */ 535 subp->evsub_door_desc = upcall_door; 536 subp->evsub_func = event_handler; 537 subp->evsub_cookie = cookie; 538 539 (void) mutex_lock(EV_LOCK(scp)); 540 541 uargs.sid.name = (uintptr_t)sid; 542 uargs.sid.len = sid_len; 543 uargs.class_info.name = (uintptr_t)class; 544 uargs.class_info.len = class_len; 545 uargs.door_desc = subp->evsub_door_desc; 546 uargs.flags = flags; 547 if (ioctl(EV_FD(scp), SEV_SUBSCRIBE, (intptr_t)&uargs) != 0) { 548 ec = errno; 549 (void) mutex_unlock(EV_LOCK(scp)); 550 if (xsa) 551 kill_door_servers(subp); 552 (void) door_revoke(upcall_door); 553 free(subp->evsub_sid); 554 free(subp); 555 return (ec); 556 } 557 558 /* Attach to subscriber list */ 559 subp->evsub_next = EV_SUB_NEXT(scp); 560 EV_SUB_NEXT(scp) = subp; 561 562 (void) mutex_unlock(EV_LOCK(scp)); 563 564 return (0); 565 } 566 567 /* 568 * sysevent_evc_subscribe - subscribe to an existing event channel 569 * using a non-private door (which will create as many server threads 570 * as the apparent maximum concurrency requirements suggest). 571 */ 572 int 573 sysevent_evc_subscribe(evchan_t *scp, const char *sid, const char *class, 574 int (*event_handler)(sysevent_t *ev, void *cookie), 575 void *cookie, uint32_t flags) 576 { 577 return (sysevent_evc_subscribe_cmn(scp, sid, class, event_handler, 578 cookie, flags, NULL)); 579 } 580 581 static void 582 subattr_dfltinit(struct sysevent_subattr_impl *xsa) 583 { 584 (void) sigfillset(&xsa->xs_sigmask); 585 (void) sigdelset(&xsa->xs_sigmask, SIGABRT); 586 } 587 588 static struct sysevent_subattr_impl dfltsa; 589 pthread_once_t dfltsa_inited = PTHREAD_ONCE_INIT; 590 591 static void 592 init_dfltsa(void) 593 { 594 subattr_dfltinit(&dfltsa); 595 } 596 597 /* 598 * sysevent_evc_subscribe - subscribe to an existing event channel 599 * using a private door with control over thread creation. 600 */ 601 int 602 sysevent_evc_xsubscribe(evchan_t *scp, const char *sid, const char *class, 603 int (*event_handler)(sysevent_t *ev, void *cookie), 604 void *cookie, uint32_t flags, sysevent_subattr_t *attr) 605 { 606 struct sysevent_subattr_impl sa; 607 struct sysevent_subattr_impl *xsa; 608 609 if (attr != NULL) { 610 xsa = (struct sysevent_subattr_impl *)attr; 611 } else { 612 xsa = &dfltsa; 613 (void) pthread_once(&dfltsa_inited, init_dfltsa); 614 } 615 616 return (sysevent_evc_subscribe_cmn(scp, sid, class, event_handler, 617 cookie, flags, xsa)); 618 } 619 620 sysevent_subattr_t * 621 sysevent_subattr_alloc(void) 622 { 623 struct sysevent_subattr_impl *xsa = calloc(1, sizeof (*xsa)); 624 625 if (xsa != NULL) 626 subattr_dfltinit(xsa); 627 628 return (xsa != NULL ? (sysevent_subattr_t *)xsa : NULL); 629 } 630 631 void 632 sysevent_subattr_free(sysevent_subattr_t *attr) 633 { 634 struct sysevent_subattr_impl *xsa = 635 (struct sysevent_subattr_impl *)attr; 636 637 free(xsa); 638 } 639 640 void 641 sysevent_subattr_thrcreate(sysevent_subattr_t *attr, 642 door_xcreate_server_func_t *thrcreate, void *cookie) 643 { 644 struct sysevent_subattr_impl *xsa = 645 (struct sysevent_subattr_impl *)attr; 646 647 xsa->xs_thrcreate = thrcreate; 648 xsa->xs_thrcreate_cookie = cookie; 649 } 650 651 void 652 sysevent_subattr_thrsetup(sysevent_subattr_t *attr, 653 door_xcreate_thrsetup_func_t *thrsetup, void *cookie) 654 { 655 struct sysevent_subattr_impl *xsa = 656 (struct sysevent_subattr_impl *)attr; 657 658 xsa->xs_thrsetup = thrsetup; 659 xsa->xs_thrsetup_cookie = cookie; 660 } 661 662 void 663 sysevent_subattr_sigmask(sysevent_subattr_t *attr, sigset_t *set) 664 { 665 struct sysevent_subattr_impl *xsa = 666 (struct sysevent_subattr_impl *)attr; 667 668 if (set) { 669 xsa->xs_sigmask = *set; 670 } else { 671 (void) sigfillset(&xsa->xs_sigmask); 672 (void) sigdelset(&xsa->xs_sigmask, SIGABRT); 673 } 674 } 675 676 void 677 sysevent_subattr_thrattr(sysevent_subattr_t *attr, pthread_attr_t *thrattr) 678 { 679 struct sysevent_subattr_impl *xsa = 680 (struct sysevent_subattr_impl *)attr; 681 682 xsa->xs_thrattr = thrattr; 683 } 684 685 /* 686 * sysevent_evc_unsubscribe - Unsubscribe from an existing event channel 687 */ 688 int 689 sysevent_evc_unsubscribe(evchan_t *scp, const char *sid) 690 { 691 int all_subscribers = 0; 692 sev_unsubscribe_args_t uargs; 693 evchan_subscr_t *subp, *prevsubp, *tofree; 694 int errcp; 695 int rc; 696 697 if (scp == NULL || misaligned(scp)) 698 return (errno = EINVAL); 699 700 if (sid == NULL || strlen(sid) == 0 || 701 (strlen(sid) >= MAX_SUBID_LEN)) 702 return (errno = EINVAL); 703 704 /* No inheritance of binding handles via fork() */ 705 if (EV_PID(scp) != getpid()) 706 return (errno = EINVAL); 707 708 if (strcmp(sid, EVCH_ALLSUB) == 0) { 709 all_subscribers++; 710 /* Indicates all subscriber id's for this channel */ 711 uargs.sid.name = NULL; 712 uargs.sid.len = 0; 713 } else { 714 uargs.sid.name = (uintptr_t)sid; 715 uargs.sid.len = strlen(sid) + 1; 716 } 717 718 if (will_deadlock(scp)) 719 return (errno = EDEADLK); 720 721 (void) mutex_lock(EV_LOCK(scp)); 722 723 /* 724 * The unsubscribe ioctl will block until all door upcalls have drained. 725 */ 726 rc = ioctl(EV_FD(scp), SEV_UNSUBSCRIBE, (intptr_t)&uargs); 727 728 if (rc != 0) { 729 errcp = errno; 730 (void) mutex_unlock(EV_LOCK(scp)); 731 return (errno = errcp); /* EFAULT, ENXIO, EINVAL possible */ 732 } 733 734 735 /* 736 * Search for the matching subscriber. If EVCH_ALLSUB was specified 737 * then the ioctl above will have returned 0 even if there are 738 * no subscriptions, so the initial EV_SUB_NEXT can be NULL. 739 */ 740 prevsubp = NULL; 741 subp = EV_SUB_NEXT(scp); 742 while (subp != NULL) { 743 if (all_subscribers || strcmp(subp->evsub_sid, sid) == 0) { 744 if (prevsubp == NULL) { 745 EV_SUB_NEXT(scp) = subp->evsub_next; 746 } else { 747 prevsubp->evsub_next = subp->evsub_next; 748 } 749 750 tofree = subp; 751 subp = subp->evsub_next; 752 753 /* If door_xcreate was applied we can clean up */ 754 if (tofree->evsub_attr) 755 kill_door_servers(tofree); 756 757 (void) door_revoke(tofree->evsub_door_desc); 758 free(tofree->evsub_sid); 759 free(tofree); 760 761 /* Freed single subscriber already? */ 762 if (all_subscribers == 0) 763 break; 764 } else { 765 prevsubp = subp; 766 subp = subp->evsub_next; 767 } 768 } 769 770 (void) mutex_unlock(EV_LOCK(scp)); 771 772 return (0); 773 } 774 775 /* 776 * sysevent_evc_control - Various channel based control operation 777 */ 778 int 779 sysevent_evc_control(evchan_t *scp, int cmd, /* arg */ ...) 780 { 781 va_list ap; 782 uint32_t *chlenp; 783 sev_control_args_t uargs; 784 int rc = 0; 785 786 if (scp == NULL || misaligned(scp)) { 787 return (errno = EINVAL); 788 } 789 790 /* No inheritance of binding handles via fork() */ 791 if (EV_PID(scp) != getpid()) { 792 return (errno = EINVAL); 793 } 794 795 va_start(ap, cmd); 796 797 uargs.cmd = cmd; 798 799 (void) mutex_lock(EV_LOCK(scp)); 800 801 switch (cmd) { 802 case EVCH_GET_CHAN_LEN: 803 case EVCH_GET_CHAN_LEN_MAX: 804 chlenp = va_arg(ap, uint32_t *); 805 if (chlenp == NULL || misaligned(chlenp)) { 806 rc = EINVAL; 807 break; 808 } 809 rc = ioctl(EV_FD(scp), SEV_CHAN_CONTROL, (intptr_t)&uargs); 810 *chlenp = uargs.value; 811 break; 812 case EVCH_SET_CHAN_LEN: 813 /* Range change will be handled in framework */ 814 uargs.value = va_arg(ap, uint32_t); 815 rc = ioctl(EV_FD(scp), SEV_CHAN_CONTROL, (intptr_t)&uargs); 816 break; 817 default: 818 rc = EINVAL; 819 } 820 821 (void) mutex_unlock(EV_LOCK(scp)); 822 823 if (rc == -1) { 824 rc = errno; 825 } 826 827 va_end(ap); 828 829 return (errno = rc); 830 } 831