17c478bd9Sstevel@tonic-gate /* 27c478bd9Sstevel@tonic-gate * CDDL HEADER START 37c478bd9Sstevel@tonic-gate * 47c478bd9Sstevel@tonic-gate * The contents of this file are subject to the terms of the 5*49b225e1SGavin Maltby * Common Development and Distribution License (the "License"). 6*49b225e1SGavin Maltby * You may not use this file except in compliance with the License. 77c478bd9Sstevel@tonic-gate * 87c478bd9Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 97c478bd9Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 107c478bd9Sstevel@tonic-gate * See the License for the specific language governing permissions 117c478bd9Sstevel@tonic-gate * and limitations under the License. 127c478bd9Sstevel@tonic-gate * 137c478bd9Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 147c478bd9Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 157c478bd9Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 167c478bd9Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 177c478bd9Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 187c478bd9Sstevel@tonic-gate * 197c478bd9Sstevel@tonic-gate * CDDL HEADER END 207c478bd9Sstevel@tonic-gate */ 217c478bd9Sstevel@tonic-gate /* 22*49b225e1SGavin Maltby * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 237c478bd9Sstevel@tonic-gate * Use is subject to license terms. 247c478bd9Sstevel@tonic-gate */ 257c478bd9Sstevel@tonic-gate 267c478bd9Sstevel@tonic-gate #include <stdio.h> 277c478bd9Sstevel@tonic-gate #include <ctype.h> 287c478bd9Sstevel@tonic-gate #include <fcntl.h> 297c478bd9Sstevel@tonic-gate #include <errno.h> 307c478bd9Sstevel@tonic-gate #include <door.h> 317c478bd9Sstevel@tonic-gate #include <unistd.h> 327c478bd9Sstevel@tonic-gate #include <stddef.h> 337c478bd9Sstevel@tonic-gate #include <stdlib.h> 347c478bd9Sstevel@tonic-gate #include <strings.h> 35*49b225e1SGavin Maltby #include <pthread.h> 36*49b225e1SGavin Maltby #include <atomic.h> 37*49b225e1SGavin Maltby #include <signal.h> 387c478bd9Sstevel@tonic-gate #include <sys/types.h> 397c478bd9Sstevel@tonic-gate #include <sys/varargs.h> 407c478bd9Sstevel@tonic-gate #include <sys/sysevent.h> 417c478bd9Sstevel@tonic-gate #include <sys/sysevent_impl.h> 427c478bd9Sstevel@tonic-gate 437c478bd9Sstevel@tonic-gate #include "libsysevent.h" 447c478bd9Sstevel@tonic-gate #include "libsysevent_impl.h" 457c478bd9Sstevel@tonic-gate 467c478bd9Sstevel@tonic-gate /* 477c478bd9Sstevel@tonic-gate * The functions below deal with the General Purpose Event Handling framework 487c478bd9Sstevel@tonic-gate * 497c478bd9Sstevel@tonic-gate * sysevent_evc_bind - create/bind application to named channel 507c478bd9Sstevel@tonic-gate * sysevent_evc_unbind - unbind from previously bound/created channel 517c478bd9Sstevel@tonic-gate * sysevent_evc_subscribe - subscribe to existing event channel 527c478bd9Sstevel@tonic-gate * sysevent_evc_unsubscribe - unsubscribe from existing event channel 537c478bd9Sstevel@tonic-gate * sysevent_evc_publish - generate a system event via an event channel 547c478bd9Sstevel@tonic-gate * sysevent_evc_control - various channel based control operation 557c478bd9Sstevel@tonic-gate */ 567c478bd9Sstevel@tonic-gate 57*49b225e1SGavin Maltby static void kill_door_servers(evchan_subscr_t *); 58*49b225e1SGavin Maltby 597c478bd9Sstevel@tonic-gate #define misaligned(p) ((uintptr_t)(p) & 3) /* 4-byte alignment required */ 607c478bd9Sstevel@tonic-gate 61*49b225e1SGavin Maltby static pthread_key_t nrkey = PTHREAD_ONCE_KEY_NP; 62*49b225e1SGavin Maltby 63*49b225e1SGavin Maltby /* 64*49b225e1SGavin Maltby * If the current thread is a door server thread servicing a door created 65*49b225e1SGavin Maltby * for us in sysevent_evc_xsubscribe, then an attempt to unsubscribe from 66*49b225e1SGavin Maltby * within door invocation context on the same channel will deadlock in the 67*49b225e1SGavin Maltby * kernel waiting for our own invocation to complete. Such calls are 68*49b225e1SGavin Maltby * forbidden, and we abort if they are encountered (better than hanging 69*49b225e1SGavin Maltby * unkillably). 70*49b225e1SGavin Maltby * 71*49b225e1SGavin Maltby * We'd like to offer this detection to subscriptions established with 72*49b225e1SGavin Maltby * sysevent_evc_subscribe, but we don't have control over the door service 73*49b225e1SGavin Maltby * threads in that case. Perhaps the fix is to always use door_xcreate 74*49b225e1SGavin Maltby * even for sysevent_evc_subscribe? 75*49b225e1SGavin Maltby */ 76*49b225e1SGavin Maltby static boolean_t 77*49b225e1SGavin Maltby will_deadlock(evchan_t *scp) 78*49b225e1SGavin Maltby { 79*49b225e1SGavin Maltby evchan_subscr_t *subp = pthread_getspecific(nrkey); 80*49b225e1SGavin Maltby evchan_impl_hdl_t *hdl = EVCHAN_IMPL_HNDL(scp); 81*49b225e1SGavin Maltby 82*49b225e1SGavin Maltby return (subp != NULL && subp->ev_subhead == hdl ? B_TRUE : B_FALSE); 83*49b225e1SGavin Maltby } 84*49b225e1SGavin Maltby 857c478bd9Sstevel@tonic-gate /* 867c478bd9Sstevel@tonic-gate * Check syntax of a channel name 877c478bd9Sstevel@tonic-gate */ 887c478bd9Sstevel@tonic-gate static int 897c478bd9Sstevel@tonic-gate sysevent_is_chan_name(const char *str) 907c478bd9Sstevel@tonic-gate { 917c478bd9Sstevel@tonic-gate for (; *str != '\0'; str++) { 927c478bd9Sstevel@tonic-gate if (!EVCH_ISCHANCHAR(*str)) 937c478bd9Sstevel@tonic-gate return (0); 947c478bd9Sstevel@tonic-gate } 957c478bd9Sstevel@tonic-gate 967c478bd9Sstevel@tonic-gate return (1); 977c478bd9Sstevel@tonic-gate } 987c478bd9Sstevel@tonic-gate 997c478bd9Sstevel@tonic-gate /* 1007c478bd9Sstevel@tonic-gate * Check for printable characters 1017c478bd9Sstevel@tonic-gate */ 1027c478bd9Sstevel@tonic-gate static int 1037c478bd9Sstevel@tonic-gate strisprint(const char *s) 1047c478bd9Sstevel@tonic-gate { 1057c478bd9Sstevel@tonic-gate for (; *s != '\0'; s++) { 1067c478bd9Sstevel@tonic-gate if (*s < ' ' || *s > '~') 1077c478bd9Sstevel@tonic-gate return (0); 1087c478bd9Sstevel@tonic-gate } 1097c478bd9Sstevel@tonic-gate 1107c478bd9Sstevel@tonic-gate return (1); 1117c478bd9Sstevel@tonic-gate } 1127c478bd9Sstevel@tonic-gate 1137c478bd9Sstevel@tonic-gate /* 1147c478bd9Sstevel@tonic-gate * sysevent_evc_bind - Create/bind application to named channel 1157c478bd9Sstevel@tonic-gate */ 1167c478bd9Sstevel@tonic-gate int 1177c478bd9Sstevel@tonic-gate sysevent_evc_bind(const char *channel, evchan_t **scpp, uint32_t flags) 1187c478bd9Sstevel@tonic-gate { 1197c478bd9Sstevel@tonic-gate int chanlen; 1207c478bd9Sstevel@tonic-gate evchan_t *scp; 1217c478bd9Sstevel@tonic-gate sev_bind_args_t uargs; 1227c478bd9Sstevel@tonic-gate int ec; 1237c478bd9Sstevel@tonic-gate 1247c478bd9Sstevel@tonic-gate if (scpp == NULL || misaligned(scpp)) { 1257c478bd9Sstevel@tonic-gate return (errno = EINVAL); 1267c478bd9Sstevel@tonic-gate } 1277c478bd9Sstevel@tonic-gate 1287c478bd9Sstevel@tonic-gate /* Provide useful value in error case */ 1297c478bd9Sstevel@tonic-gate *scpp = NULL; 1307c478bd9Sstevel@tonic-gate 1317c478bd9Sstevel@tonic-gate if (channel == NULL || 1327c478bd9Sstevel@tonic-gate (chanlen = strlen(channel) + 1) > MAX_CHNAME_LEN) { 1337c478bd9Sstevel@tonic-gate return (errno = EINVAL); 1347c478bd9Sstevel@tonic-gate } 1357c478bd9Sstevel@tonic-gate 1367c478bd9Sstevel@tonic-gate /* Check channel syntax */ 1377c478bd9Sstevel@tonic-gate if (!sysevent_is_chan_name(channel)) { 1387c478bd9Sstevel@tonic-gate return (errno = EINVAL); 1397c478bd9Sstevel@tonic-gate } 1407c478bd9Sstevel@tonic-gate 1417c478bd9Sstevel@tonic-gate if (flags & ~EVCH_B_FLAGS) { 1427c478bd9Sstevel@tonic-gate return (errno = EINVAL); 1437c478bd9Sstevel@tonic-gate } 1447c478bd9Sstevel@tonic-gate 1457c478bd9Sstevel@tonic-gate scp = calloc(1, sizeof (evchan_impl_hdl_t)); 1467c478bd9Sstevel@tonic-gate if (scp == NULL) { 1477c478bd9Sstevel@tonic-gate return (errno = ENOMEM); 1487c478bd9Sstevel@tonic-gate } 1497c478bd9Sstevel@tonic-gate 1507c478bd9Sstevel@tonic-gate /* 1517c478bd9Sstevel@tonic-gate * Enable sysevent driver. Fallback if the device link doesn't exist; 1527c478bd9Sstevel@tonic-gate * this situation can arise if a channel is bound early in system 1537c478bd9Sstevel@tonic-gate * startup, prior to devfsadm(1M) being invoked. 1547c478bd9Sstevel@tonic-gate */ 1557c478bd9Sstevel@tonic-gate EV_FD(scp) = open(DEVSYSEVENT, O_RDWR); 1567c478bd9Sstevel@tonic-gate if (EV_FD(scp) == -1) { 1577c478bd9Sstevel@tonic-gate if (errno != ENOENT) { 1587c478bd9Sstevel@tonic-gate ec = errno == EACCES ? EPERM : errno; 1597c478bd9Sstevel@tonic-gate free(scp); 1607c478bd9Sstevel@tonic-gate return (errno = ec); 1617c478bd9Sstevel@tonic-gate } 1627c478bd9Sstevel@tonic-gate 1637c478bd9Sstevel@tonic-gate EV_FD(scp) = open(DEVICESYSEVENT, O_RDWR); 1647c478bd9Sstevel@tonic-gate if (EV_FD(scp) == -1) { 1657c478bd9Sstevel@tonic-gate ec = errno == EACCES ? EPERM : errno; 1667c478bd9Sstevel@tonic-gate free(scp); 1677c478bd9Sstevel@tonic-gate return (errno = ec); 1687c478bd9Sstevel@tonic-gate } 1697c478bd9Sstevel@tonic-gate } 1707c478bd9Sstevel@tonic-gate 1717c478bd9Sstevel@tonic-gate /* 1727c478bd9Sstevel@tonic-gate * Force to close the fd's when process is doing exec. 1737c478bd9Sstevel@tonic-gate * The driver will then release stale binding handles. 1747c478bd9Sstevel@tonic-gate * The driver will release also the associated subscriptions 1757c478bd9Sstevel@tonic-gate * if EVCH_SUB_KEEP flag was not set. 1767c478bd9Sstevel@tonic-gate */ 1777c478bd9Sstevel@tonic-gate (void) fcntl(EV_FD(scp), F_SETFD, FD_CLOEXEC); 1787c478bd9Sstevel@tonic-gate 1797c478bd9Sstevel@tonic-gate uargs.chan_name.name = (uintptr_t)channel; 1807c478bd9Sstevel@tonic-gate uargs.chan_name.len = chanlen; 1817c478bd9Sstevel@tonic-gate uargs.flags = flags; 1827c478bd9Sstevel@tonic-gate 1837c478bd9Sstevel@tonic-gate if (ioctl(EV_FD(scp), SEV_CHAN_OPEN, &uargs) != 0) { 1847c478bd9Sstevel@tonic-gate ec = errno; 1857c478bd9Sstevel@tonic-gate (void) close(EV_FD(scp)); 1867c478bd9Sstevel@tonic-gate free(scp); 1877c478bd9Sstevel@tonic-gate return (errno = ec); 1887c478bd9Sstevel@tonic-gate } 1897c478bd9Sstevel@tonic-gate 1907c478bd9Sstevel@tonic-gate /* Needed to detect a fork() */ 1917c478bd9Sstevel@tonic-gate EV_PID(scp) = getpid(); 1927c478bd9Sstevel@tonic-gate (void) mutex_init(EV_LOCK(scp), USYNC_THREAD, NULL); 1937c478bd9Sstevel@tonic-gate 1947c478bd9Sstevel@tonic-gate *scpp = scp; 1957c478bd9Sstevel@tonic-gate 1967c478bd9Sstevel@tonic-gate return (0); 1977c478bd9Sstevel@tonic-gate } 1987c478bd9Sstevel@tonic-gate 1997c478bd9Sstevel@tonic-gate /* 2007c478bd9Sstevel@tonic-gate * sysevent_evc_unbind - Unbind from previously bound/created channel 2017c478bd9Sstevel@tonic-gate */ 202*49b225e1SGavin Maltby int 2037c478bd9Sstevel@tonic-gate sysevent_evc_unbind(evchan_t *scp) 2047c478bd9Sstevel@tonic-gate { 2057c478bd9Sstevel@tonic-gate sev_unsubscribe_args_t uargs; 206*49b225e1SGavin Maltby evchan_subscr_t *subp; 207*49b225e1SGavin Maltby int errcp; 2087c478bd9Sstevel@tonic-gate 2097c478bd9Sstevel@tonic-gate if (scp == NULL || misaligned(scp)) 210*49b225e1SGavin Maltby return (errno = EINVAL); 211*49b225e1SGavin Maltby 212*49b225e1SGavin Maltby if (will_deadlock(scp)) 213*49b225e1SGavin Maltby return (errno = EDEADLK); 2147c478bd9Sstevel@tonic-gate 2157c478bd9Sstevel@tonic-gate (void) mutex_lock(EV_LOCK(scp)); 2167c478bd9Sstevel@tonic-gate 2177c478bd9Sstevel@tonic-gate /* 2187c478bd9Sstevel@tonic-gate * Unsubscribe, if we are in the process which did the bind. 2197c478bd9Sstevel@tonic-gate */ 2207c478bd9Sstevel@tonic-gate if (EV_PID(scp) == getpid()) { 2217c478bd9Sstevel@tonic-gate uargs.sid.name = NULL; 2227c478bd9Sstevel@tonic-gate uargs.sid.len = 0; 2237c478bd9Sstevel@tonic-gate /* 2247c478bd9Sstevel@tonic-gate * The unsubscribe ioctl will block until all door upcalls have 2257c478bd9Sstevel@tonic-gate * drained. 2267c478bd9Sstevel@tonic-gate */ 2277c478bd9Sstevel@tonic-gate if (ioctl(EV_FD(scp), SEV_UNSUBSCRIBE, (intptr_t)&uargs) != 0) { 228*49b225e1SGavin Maltby errcp = errno; 2297c478bd9Sstevel@tonic-gate (void) mutex_unlock(EV_LOCK(scp)); 230*49b225e1SGavin Maltby return (errno = errcp); 2317c478bd9Sstevel@tonic-gate } 2327c478bd9Sstevel@tonic-gate } 2337c478bd9Sstevel@tonic-gate 234*49b225e1SGavin Maltby while ((subp = EV_SUB_NEXT(scp)) != NULL) { 235*49b225e1SGavin Maltby EV_SUB_NEXT(scp) = subp->evsub_next; 236*49b225e1SGavin Maltby 237*49b225e1SGavin Maltby /* If door_xcreate was applied we can clean up */ 238*49b225e1SGavin Maltby if (subp->evsub_attr) 239*49b225e1SGavin Maltby kill_door_servers(subp); 240*49b225e1SGavin Maltby 241*49b225e1SGavin Maltby if (door_revoke(subp->evsub_door_desc) != 0 && errno == EPERM) 242*49b225e1SGavin Maltby (void) close(subp->evsub_door_desc); 243*49b225e1SGavin Maltby 244*49b225e1SGavin Maltby free(subp->evsub_sid); 245*49b225e1SGavin Maltby free(subp); 2467c478bd9Sstevel@tonic-gate } 2477c478bd9Sstevel@tonic-gate 2487c478bd9Sstevel@tonic-gate (void) mutex_unlock(EV_LOCK(scp)); 2497c478bd9Sstevel@tonic-gate 2507c478bd9Sstevel@tonic-gate /* 2517c478bd9Sstevel@tonic-gate * The close of the driver will do the unsubscribe if a) it is the last 2527c478bd9Sstevel@tonic-gate * close and b) we are in a child which inherited subscriptions. 2537c478bd9Sstevel@tonic-gate */ 2547c478bd9Sstevel@tonic-gate (void) close(EV_FD(scp)); 2557c478bd9Sstevel@tonic-gate (void) mutex_destroy(EV_LOCK(scp)); 2567c478bd9Sstevel@tonic-gate free(scp); 257*49b225e1SGavin Maltby 258*49b225e1SGavin Maltby return (0); 2597c478bd9Sstevel@tonic-gate } 2607c478bd9Sstevel@tonic-gate 2617c478bd9Sstevel@tonic-gate /* 2627c478bd9Sstevel@tonic-gate * sysevent_evc_publish - Generate a system event via an event channel 2637c478bd9Sstevel@tonic-gate */ 2647c478bd9Sstevel@tonic-gate int 2657c478bd9Sstevel@tonic-gate sysevent_evc_publish(evchan_t *scp, const char *class, 2667c478bd9Sstevel@tonic-gate const char *subclass, const char *vendor, 2677c478bd9Sstevel@tonic-gate const char *pub_name, nvlist_t *attr_list, 2687c478bd9Sstevel@tonic-gate uint32_t flags) 2697c478bd9Sstevel@tonic-gate { 2707c478bd9Sstevel@tonic-gate sysevent_t *ev; 2717c478bd9Sstevel@tonic-gate sev_publish_args_t uargs; 2727c478bd9Sstevel@tonic-gate int rc; 2737c478bd9Sstevel@tonic-gate int ec; 2747c478bd9Sstevel@tonic-gate 2757c478bd9Sstevel@tonic-gate if (scp == NULL || misaligned(scp)) { 2767c478bd9Sstevel@tonic-gate return (errno = EINVAL); 2777c478bd9Sstevel@tonic-gate } 2787c478bd9Sstevel@tonic-gate 2797c478bd9Sstevel@tonic-gate /* No inheritance of binding handles via fork() */ 2807c478bd9Sstevel@tonic-gate if (EV_PID(scp) != getpid()) { 2817c478bd9Sstevel@tonic-gate return (errno = EINVAL); 2827c478bd9Sstevel@tonic-gate } 2837c478bd9Sstevel@tonic-gate 2847c478bd9Sstevel@tonic-gate ev = sysevent_alloc_event((char *)class, (char *)subclass, 2857c478bd9Sstevel@tonic-gate (char *)vendor, (char *)pub_name, attr_list); 2867c478bd9Sstevel@tonic-gate if (ev == NULL) { 2877c478bd9Sstevel@tonic-gate return (errno); 2887c478bd9Sstevel@tonic-gate } 2897c478bd9Sstevel@tonic-gate 2907c478bd9Sstevel@tonic-gate uargs.ev.name = (uintptr_t)ev; 2917c478bd9Sstevel@tonic-gate uargs.ev.len = SE_SIZE(ev); 2927c478bd9Sstevel@tonic-gate uargs.flags = flags; 2937c478bd9Sstevel@tonic-gate 2947c478bd9Sstevel@tonic-gate (void) mutex_lock(EV_LOCK(scp)); 2957c478bd9Sstevel@tonic-gate 2967c478bd9Sstevel@tonic-gate rc = ioctl(EV_FD(scp), SEV_PUBLISH, (intptr_t)&uargs); 2977c478bd9Sstevel@tonic-gate ec = errno; 2987c478bd9Sstevel@tonic-gate 2997c478bd9Sstevel@tonic-gate (void) mutex_unlock(EV_LOCK(scp)); 3007c478bd9Sstevel@tonic-gate 3017c478bd9Sstevel@tonic-gate sysevent_free(ev); 3027c478bd9Sstevel@tonic-gate 3037c478bd9Sstevel@tonic-gate if (rc != 0) { 3047c478bd9Sstevel@tonic-gate return (ec); 3057c478bd9Sstevel@tonic-gate } 3067c478bd9Sstevel@tonic-gate return (0); 3077c478bd9Sstevel@tonic-gate } 3087c478bd9Sstevel@tonic-gate 3097c478bd9Sstevel@tonic-gate /* 3107c478bd9Sstevel@tonic-gate * Generic callback which catches events from the kernel and calls 3117c478bd9Sstevel@tonic-gate * subscribers call back routine. 3127c478bd9Sstevel@tonic-gate * 3137c478bd9Sstevel@tonic-gate * Kernel guarantees that door_upcalls are disabled when unsubscription 3147c478bd9Sstevel@tonic-gate * was issued that's why cookie points always to a valid evchan_subscr_t *. 3157c478bd9Sstevel@tonic-gate * 3167c478bd9Sstevel@tonic-gate * Furthermore it's not necessary to lock subp because the sysevent 3177c478bd9Sstevel@tonic-gate * framework guarantees no unsubscription until door_return. 3187c478bd9Sstevel@tonic-gate */ 3197c478bd9Sstevel@tonic-gate /*ARGSUSED3*/ 3207c478bd9Sstevel@tonic-gate static void 3217c478bd9Sstevel@tonic-gate door_upcall(void *cookie, char *args, size_t alen, 3227c478bd9Sstevel@tonic-gate door_desc_t *ddp, uint_t ndid) 3237c478bd9Sstevel@tonic-gate { 3247c478bd9Sstevel@tonic-gate evchan_subscr_t *subp = EVCHAN_SUBSCR(cookie); 3257c478bd9Sstevel@tonic-gate int rval = 0; 3267c478bd9Sstevel@tonic-gate 327*49b225e1SGavin Maltby /* 328*49b225e1SGavin Maltby * If we've been invoked simply to kill the thread then 329*49b225e1SGavin Maltby * exit now. 330*49b225e1SGavin Maltby */ 331*49b225e1SGavin Maltby if (subp->evsub_state == EVCHAN_SUB_STATE_CLOSING) 332*49b225e1SGavin Maltby pthread_exit(NULL); 333*49b225e1SGavin Maltby 3347c478bd9Sstevel@tonic-gate if (args == NULL || alen <= (size_t)0) { 3357c478bd9Sstevel@tonic-gate /* Skip callback execution */ 3367c478bd9Sstevel@tonic-gate rval = EINVAL; 3377c478bd9Sstevel@tonic-gate } else { 3387c478bd9Sstevel@tonic-gate rval = subp->evsub_func((sysevent_t *)(void *)args, 3397c478bd9Sstevel@tonic-gate subp->evsub_cookie); 3407c478bd9Sstevel@tonic-gate } 3417c478bd9Sstevel@tonic-gate 3427c478bd9Sstevel@tonic-gate /* 3437c478bd9Sstevel@tonic-gate * Fill in return values for door_return 3447c478bd9Sstevel@tonic-gate */ 3457c478bd9Sstevel@tonic-gate alen = sizeof (rval); 3467c478bd9Sstevel@tonic-gate bcopy(&rval, args, alen); 3477c478bd9Sstevel@tonic-gate 3487c478bd9Sstevel@tonic-gate (void) door_return(args, alen, NULL, 0); 3497c478bd9Sstevel@tonic-gate } 3507c478bd9Sstevel@tonic-gate 351*49b225e1SGavin Maltby static pthread_once_t xsub_thrattr_once = PTHREAD_ONCE_INIT; 352*49b225e1SGavin Maltby static pthread_attr_t xsub_thrattr; 353*49b225e1SGavin Maltby 354*49b225e1SGavin Maltby static void 355*49b225e1SGavin Maltby xsub_thrattr_init(void) 356*49b225e1SGavin Maltby { 357*49b225e1SGavin Maltby (void) pthread_attr_init(&xsub_thrattr); 358*49b225e1SGavin Maltby (void) pthread_attr_setdetachstate(&xsub_thrattr, 359*49b225e1SGavin Maltby PTHREAD_CREATE_DETACHED); 360*49b225e1SGavin Maltby (void) pthread_attr_setscope(&xsub_thrattr, PTHREAD_SCOPE_SYSTEM); 361*49b225e1SGavin Maltby } 362*49b225e1SGavin Maltby 3637c478bd9Sstevel@tonic-gate /* 364*49b225e1SGavin Maltby * Our door server create function is only called during initial 365*49b225e1SGavin Maltby * door_xcreate since we specify DOOR_NO_DEPLETION_CB. 3667c478bd9Sstevel@tonic-gate */ 3677c478bd9Sstevel@tonic-gate int 368*49b225e1SGavin Maltby xsub_door_server_create(door_info_t *dip, void *(*startf)(void *), 369*49b225e1SGavin Maltby void *startfarg, void *cookie) 370*49b225e1SGavin Maltby { 371*49b225e1SGavin Maltby evchan_subscr_t *subp = EVCHAN_SUBSCR(cookie); 372*49b225e1SGavin Maltby struct sysevent_subattr_impl *xsa = subp->evsub_attr; 373*49b225e1SGavin Maltby pthread_attr_t *thrattr; 374*49b225e1SGavin Maltby sigset_t oset; 375*49b225e1SGavin Maltby int err; 376*49b225e1SGavin Maltby 377*49b225e1SGavin Maltby if (subp->evsub_state == EVCHAN_SUB_STATE_CLOSING) 378*49b225e1SGavin Maltby return (0); /* shouldn't happen, but just in case */ 379*49b225e1SGavin Maltby 380*49b225e1SGavin Maltby /* 381*49b225e1SGavin Maltby * If sysevent_evc_xsubscribe was called electing to use a 382*49b225e1SGavin Maltby * different door server create function then let it take it 383*49b225e1SGavin Maltby * from here. 384*49b225e1SGavin Maltby */ 385*49b225e1SGavin Maltby if (xsa->xs_thrcreate) { 386*49b225e1SGavin Maltby return (xsa->xs_thrcreate(dip, startf, startfarg, 387*49b225e1SGavin Maltby xsa->xs_thrcreate_cookie)); 388*49b225e1SGavin Maltby } 389*49b225e1SGavin Maltby 390*49b225e1SGavin Maltby if (xsa->xs_thrattr == NULL) { 391*49b225e1SGavin Maltby (void) pthread_once(&xsub_thrattr_once, xsub_thrattr_init); 392*49b225e1SGavin Maltby thrattr = &xsub_thrattr; 393*49b225e1SGavin Maltby } else { 394*49b225e1SGavin Maltby thrattr = xsa->xs_thrattr; 395*49b225e1SGavin Maltby } 396*49b225e1SGavin Maltby 397*49b225e1SGavin Maltby (void) pthread_sigmask(SIG_SETMASK, &xsa->xs_sigmask, &oset); 398*49b225e1SGavin Maltby err = pthread_create(NULL, thrattr, startf, startfarg); 399*49b225e1SGavin Maltby (void) pthread_sigmask(SIG_SETMASK, &oset, NULL); 400*49b225e1SGavin Maltby 401*49b225e1SGavin Maltby return (err == 0 ? 1 : -1); 402*49b225e1SGavin Maltby } 403*49b225e1SGavin Maltby 404*49b225e1SGavin Maltby void 405*49b225e1SGavin Maltby xsub_door_server_setup(void *cookie) 406*49b225e1SGavin Maltby { 407*49b225e1SGavin Maltby evchan_subscr_t *subp = EVCHAN_SUBSCR(cookie); 408*49b225e1SGavin Maltby struct sysevent_subattr_impl *xsa = subp->evsub_attr; 409*49b225e1SGavin Maltby 410*49b225e1SGavin Maltby if (xsa->xs_thrsetup == NULL) { 411*49b225e1SGavin Maltby (void) pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, NULL); 412*49b225e1SGavin Maltby (void) pthread_setcanceltype(PTHREAD_CANCEL_DEFERRED, NULL); 413*49b225e1SGavin Maltby } 414*49b225e1SGavin Maltby 415*49b225e1SGavin Maltby (void) pthread_setspecific(nrkey, (void *)subp); 416*49b225e1SGavin Maltby 417*49b225e1SGavin Maltby if (xsa->xs_thrsetup) 418*49b225e1SGavin Maltby xsa->xs_thrsetup(xsa->xs_thrsetup_cookie); 419*49b225e1SGavin Maltby } 420*49b225e1SGavin Maltby 421*49b225e1SGavin Maltby /* 422*49b225e1SGavin Maltby * Cause private door server threads to exit. We have already performed the 423*49b225e1SGavin Maltby * unsubscribe ioctl which stops new invocations and waits until all 424*49b225e1SGavin Maltby * existing invocations are complete. So all server threads should be 425*49b225e1SGavin Maltby * blocked in door_return. The door has not yet been revoked. We will 426*49b225e1SGavin Maltby * invoke repeatedly after setting the evsub_state to be noticed on 427*49b225e1SGavin Maltby * wakeup; each invocation will result in the death of one server thread. 428*49b225e1SGavin Maltby * 429*49b225e1SGavin Maltby * You'd think it would be easier to kill these threads, such as through 430*49b225e1SGavin Maltby * pthread_cancel. Unfortunately door_return is not a cancellation point, 431*49b225e1SGavin Maltby * and if you do cancel a thread blocked in door_return the EINTR check in 432*49b225e1SGavin Maltby * the door_return assembly logic causes us to loop with EINTR forever! 433*49b225e1SGavin Maltby */ 434*49b225e1SGavin Maltby static void 435*49b225e1SGavin Maltby kill_door_servers(evchan_subscr_t *subp) 436*49b225e1SGavin Maltby { 437*49b225e1SGavin Maltby door_arg_t da; 438*49b225e1SGavin Maltby int i; 439*49b225e1SGavin Maltby 440*49b225e1SGavin Maltby bzero(&da, sizeof (da)); 441*49b225e1SGavin Maltby subp->evsub_state = EVCHAN_SUB_STATE_CLOSING; 442*49b225e1SGavin Maltby membar_producer(); 443*49b225e1SGavin Maltby 444*49b225e1SGavin Maltby (void) door_call(subp->evsub_door_desc, &da); 445*49b225e1SGavin Maltby } 446*49b225e1SGavin Maltby 447*49b225e1SGavin Maltby static int 448*49b225e1SGavin Maltby sysevent_evc_subscribe_cmn(evchan_t *scp, const char *sid, const char *class, 4497c478bd9Sstevel@tonic-gate int (*event_handler)(sysevent_t *ev, void *cookie), 450*49b225e1SGavin Maltby void *cookie, uint32_t flags, struct sysevent_subattr_impl *xsa) 4517c478bd9Sstevel@tonic-gate { 4527c478bd9Sstevel@tonic-gate evchan_subscr_t *subp; 4537c478bd9Sstevel@tonic-gate int upcall_door; 4547c478bd9Sstevel@tonic-gate sev_subscribe_args_t uargs; 4557c478bd9Sstevel@tonic-gate uint32_t sid_len; 4567c478bd9Sstevel@tonic-gate uint32_t class_len; 4577c478bd9Sstevel@tonic-gate int ec; 4587c478bd9Sstevel@tonic-gate 4597c478bd9Sstevel@tonic-gate if (scp == NULL || misaligned(scp) || sid == NULL || class == NULL) { 4607c478bd9Sstevel@tonic-gate return (errno = EINVAL); 4617c478bd9Sstevel@tonic-gate } 4627c478bd9Sstevel@tonic-gate 4637c478bd9Sstevel@tonic-gate /* No inheritance of binding handles via fork() */ 4647c478bd9Sstevel@tonic-gate if (EV_PID(scp) != getpid()) { 4657c478bd9Sstevel@tonic-gate return (errno = EINVAL); 4667c478bd9Sstevel@tonic-gate } 4677c478bd9Sstevel@tonic-gate 4687c478bd9Sstevel@tonic-gate if ((sid_len = strlen(sid) + 1) > MAX_SUBID_LEN || sid_len == 1 || 4697c478bd9Sstevel@tonic-gate (class_len = strlen(class) + 1) > MAX_CLASS_LEN) { 4707c478bd9Sstevel@tonic-gate return (errno = EINVAL); 4717c478bd9Sstevel@tonic-gate } 4727c478bd9Sstevel@tonic-gate 4737c478bd9Sstevel@tonic-gate /* Check for printable characters */ 4747c478bd9Sstevel@tonic-gate if (!strisprint(sid)) { 4757c478bd9Sstevel@tonic-gate return (errno = EINVAL); 4767c478bd9Sstevel@tonic-gate } 4777c478bd9Sstevel@tonic-gate 4787c478bd9Sstevel@tonic-gate if (event_handler == NULL) { 4797c478bd9Sstevel@tonic-gate return (errno = EINVAL); 4807c478bd9Sstevel@tonic-gate } 4817c478bd9Sstevel@tonic-gate 482*49b225e1SGavin Maltby if (pthread_key_create_once_np(&nrkey, NULL) != 0) 483*49b225e1SGavin Maltby return (errno); /* ENOMEM or EAGAIN */ 484*49b225e1SGavin Maltby 4857c478bd9Sstevel@tonic-gate /* Create subscriber data */ 4867c478bd9Sstevel@tonic-gate if ((subp = calloc(1, sizeof (evchan_subscr_t))) == NULL) { 4877c478bd9Sstevel@tonic-gate return (errno); 4887c478bd9Sstevel@tonic-gate } 4897c478bd9Sstevel@tonic-gate 4907c478bd9Sstevel@tonic-gate if ((subp->evsub_sid = strdup(sid)) == NULL) { 4917c478bd9Sstevel@tonic-gate ec = errno; 4927c478bd9Sstevel@tonic-gate free(subp); 4937c478bd9Sstevel@tonic-gate return (ec); 4947c478bd9Sstevel@tonic-gate } 4957c478bd9Sstevel@tonic-gate 4967c478bd9Sstevel@tonic-gate /* 4977c478bd9Sstevel@tonic-gate * EC_ALL string will not be copied to kernel - NULL is assumed 4987c478bd9Sstevel@tonic-gate */ 4997c478bd9Sstevel@tonic-gate if (strcmp(class, EC_ALL) == 0) { 5007c478bd9Sstevel@tonic-gate class = NULL; 5017c478bd9Sstevel@tonic-gate class_len = 0; 5027c478bd9Sstevel@tonic-gate } 5037c478bd9Sstevel@tonic-gate 504*49b225e1SGavin Maltby /* 505*49b225e1SGavin Maltby * Fill this in now for the xsub_door_server_setup dance 506*49b225e1SGavin Maltby */ 507*49b225e1SGavin Maltby subp->ev_subhead = EVCHAN_IMPL_HNDL(scp); 508*49b225e1SGavin Maltby subp->evsub_state = EVCHAN_SUB_STATE_ACTIVE; 509*49b225e1SGavin Maltby 510*49b225e1SGavin Maltby if (xsa == NULL) { 5117c478bd9Sstevel@tonic-gate upcall_door = door_create(door_upcall, (void *)subp, 5127c478bd9Sstevel@tonic-gate DOOR_REFUSE_DESC | DOOR_NO_CANCEL); 513*49b225e1SGavin Maltby } else { 514*49b225e1SGavin Maltby subp->evsub_attr = xsa; 515*49b225e1SGavin Maltby 516*49b225e1SGavin Maltby /* 517*49b225e1SGavin Maltby * Create a private door with exactly one thread to 518*49b225e1SGavin Maltby * service the callbacks (the GPEC kernel implementation 519*49b225e1SGavin Maltby * serializes deliveries for each subscriber id). 520*49b225e1SGavin Maltby */ 521*49b225e1SGavin Maltby upcall_door = door_xcreate(door_upcall, (void *)subp, 522*49b225e1SGavin Maltby DOOR_REFUSE_DESC | DOOR_NO_CANCEL | DOOR_NO_DEPLETION_CB, 523*49b225e1SGavin Maltby xsub_door_server_create, xsub_door_server_setup, 524*49b225e1SGavin Maltby (void *)subp, 1); 525*49b225e1SGavin Maltby } 526*49b225e1SGavin Maltby 5277c478bd9Sstevel@tonic-gate if (upcall_door == -1) { 5287c478bd9Sstevel@tonic-gate ec = errno; 5297c478bd9Sstevel@tonic-gate free(subp->evsub_sid); 5307c478bd9Sstevel@tonic-gate free(subp); 5317c478bd9Sstevel@tonic-gate return (ec); 5327c478bd9Sstevel@tonic-gate } 5337c478bd9Sstevel@tonic-gate 5347c478bd9Sstevel@tonic-gate /* Complete subscriber information */ 5357c478bd9Sstevel@tonic-gate subp->evsub_door_desc = upcall_door; 5367c478bd9Sstevel@tonic-gate subp->evsub_func = event_handler; 5377c478bd9Sstevel@tonic-gate subp->evsub_cookie = cookie; 5387c478bd9Sstevel@tonic-gate 5397c478bd9Sstevel@tonic-gate (void) mutex_lock(EV_LOCK(scp)); 5407c478bd9Sstevel@tonic-gate 5417c478bd9Sstevel@tonic-gate uargs.sid.name = (uintptr_t)sid; 5427c478bd9Sstevel@tonic-gate uargs.sid.len = sid_len; 5437c478bd9Sstevel@tonic-gate uargs.class_info.name = (uintptr_t)class; 5447c478bd9Sstevel@tonic-gate uargs.class_info.len = class_len; 5457c478bd9Sstevel@tonic-gate uargs.door_desc = subp->evsub_door_desc; 5467c478bd9Sstevel@tonic-gate uargs.flags = flags; 5477c478bd9Sstevel@tonic-gate if (ioctl(EV_FD(scp), SEV_SUBSCRIBE, (intptr_t)&uargs) != 0) { 5487c478bd9Sstevel@tonic-gate ec = errno; 5497c478bd9Sstevel@tonic-gate (void) mutex_unlock(EV_LOCK(scp)); 550*49b225e1SGavin Maltby if (xsa) 551*49b225e1SGavin Maltby kill_door_servers(subp); 5527c478bd9Sstevel@tonic-gate (void) door_revoke(upcall_door); 5537c478bd9Sstevel@tonic-gate free(subp->evsub_sid); 5547c478bd9Sstevel@tonic-gate free(subp); 5557c478bd9Sstevel@tonic-gate return (ec); 5567c478bd9Sstevel@tonic-gate } 5577c478bd9Sstevel@tonic-gate 5587c478bd9Sstevel@tonic-gate /* Attach to subscriber list */ 5597c478bd9Sstevel@tonic-gate subp->evsub_next = EV_SUB_NEXT(scp); 5607c478bd9Sstevel@tonic-gate EV_SUB_NEXT(scp) = subp; 5617c478bd9Sstevel@tonic-gate 5627c478bd9Sstevel@tonic-gate (void) mutex_unlock(EV_LOCK(scp)); 5637c478bd9Sstevel@tonic-gate 5647c478bd9Sstevel@tonic-gate return (0); 5657c478bd9Sstevel@tonic-gate } 5667c478bd9Sstevel@tonic-gate 5677c478bd9Sstevel@tonic-gate /* 568*49b225e1SGavin Maltby * sysevent_evc_subscribe - subscribe to an existing event channel 569*49b225e1SGavin Maltby * using a non-private door (which will create as many server threads 570*49b225e1SGavin Maltby * as the apparent maximum concurrency requirements suggest). 571*49b225e1SGavin Maltby */ 572*49b225e1SGavin Maltby int 573*49b225e1SGavin Maltby sysevent_evc_subscribe(evchan_t *scp, const char *sid, const char *class, 574*49b225e1SGavin Maltby int (*event_handler)(sysevent_t *ev, void *cookie), 575*49b225e1SGavin Maltby void *cookie, uint32_t flags) 576*49b225e1SGavin Maltby { 577*49b225e1SGavin Maltby return (sysevent_evc_subscribe_cmn(scp, sid, class, event_handler, 578*49b225e1SGavin Maltby cookie, flags, NULL)); 579*49b225e1SGavin Maltby } 580*49b225e1SGavin Maltby 581*49b225e1SGavin Maltby static void 582*49b225e1SGavin Maltby subattr_dfltinit(struct sysevent_subattr_impl *xsa) 583*49b225e1SGavin Maltby { 584*49b225e1SGavin Maltby (void) sigfillset(&xsa->xs_sigmask); 585*49b225e1SGavin Maltby (void) sigdelset(&xsa->xs_sigmask, SIGABRT); 586*49b225e1SGavin Maltby } 587*49b225e1SGavin Maltby 588*49b225e1SGavin Maltby static struct sysevent_subattr_impl dfltsa; 589*49b225e1SGavin Maltby pthread_once_t dfltsa_inited = PTHREAD_ONCE_INIT; 590*49b225e1SGavin Maltby 591*49b225e1SGavin Maltby static void 592*49b225e1SGavin Maltby init_dfltsa(void) 593*49b225e1SGavin Maltby { 594*49b225e1SGavin Maltby subattr_dfltinit(&dfltsa); 595*49b225e1SGavin Maltby } 596*49b225e1SGavin Maltby 597*49b225e1SGavin Maltby /* 598*49b225e1SGavin Maltby * sysevent_evc_subscribe - subscribe to an existing event channel 599*49b225e1SGavin Maltby * using a private door with control over thread creation. 600*49b225e1SGavin Maltby */ 601*49b225e1SGavin Maltby int 602*49b225e1SGavin Maltby sysevent_evc_xsubscribe(evchan_t *scp, const char *sid, const char *class, 603*49b225e1SGavin Maltby int (*event_handler)(sysevent_t *ev, void *cookie), 604*49b225e1SGavin Maltby void *cookie, uint32_t flags, sysevent_subattr_t *attr) 605*49b225e1SGavin Maltby { 606*49b225e1SGavin Maltby struct sysevent_subattr_impl sa; 607*49b225e1SGavin Maltby struct sysevent_subattr_impl *xsa; 608*49b225e1SGavin Maltby 609*49b225e1SGavin Maltby if (attr != NULL) { 610*49b225e1SGavin Maltby xsa = (struct sysevent_subattr_impl *)attr; 611*49b225e1SGavin Maltby } else { 612*49b225e1SGavin Maltby xsa = &dfltsa; 613*49b225e1SGavin Maltby (void) pthread_once(&dfltsa_inited, init_dfltsa); 614*49b225e1SGavin Maltby } 615*49b225e1SGavin Maltby 616*49b225e1SGavin Maltby return (sysevent_evc_subscribe_cmn(scp, sid, class, event_handler, 617*49b225e1SGavin Maltby cookie, flags, xsa)); 618*49b225e1SGavin Maltby } 619*49b225e1SGavin Maltby 620*49b225e1SGavin Maltby sysevent_subattr_t * 621*49b225e1SGavin Maltby sysevent_subattr_alloc(void) 622*49b225e1SGavin Maltby { 623*49b225e1SGavin Maltby struct sysevent_subattr_impl *xsa = calloc(1, sizeof (*xsa)); 624*49b225e1SGavin Maltby 625*49b225e1SGavin Maltby if (xsa != NULL) 626*49b225e1SGavin Maltby subattr_dfltinit(xsa); 627*49b225e1SGavin Maltby 628*49b225e1SGavin Maltby return (xsa != NULL ? (sysevent_subattr_t *)xsa : NULL); 629*49b225e1SGavin Maltby } 630*49b225e1SGavin Maltby 631*49b225e1SGavin Maltby void 632*49b225e1SGavin Maltby sysevent_subattr_free(sysevent_subattr_t *attr) 633*49b225e1SGavin Maltby { 634*49b225e1SGavin Maltby struct sysevent_subattr_impl *xsa = 635*49b225e1SGavin Maltby (struct sysevent_subattr_impl *)attr; 636*49b225e1SGavin Maltby 637*49b225e1SGavin Maltby free(xsa); 638*49b225e1SGavin Maltby } 639*49b225e1SGavin Maltby 640*49b225e1SGavin Maltby void 641*49b225e1SGavin Maltby sysevent_subattr_thrcreate(sysevent_subattr_t *attr, 642*49b225e1SGavin Maltby door_xcreate_server_func_t *thrcreate, void *cookie) 643*49b225e1SGavin Maltby { 644*49b225e1SGavin Maltby struct sysevent_subattr_impl *xsa = 645*49b225e1SGavin Maltby (struct sysevent_subattr_impl *)attr; 646*49b225e1SGavin Maltby 647*49b225e1SGavin Maltby xsa->xs_thrcreate = thrcreate; 648*49b225e1SGavin Maltby xsa->xs_thrcreate_cookie = cookie; 649*49b225e1SGavin Maltby } 650*49b225e1SGavin Maltby 651*49b225e1SGavin Maltby void 652*49b225e1SGavin Maltby sysevent_subattr_thrsetup(sysevent_subattr_t *attr, 653*49b225e1SGavin Maltby door_xcreate_thrsetup_func_t *thrsetup, void *cookie) 654*49b225e1SGavin Maltby { 655*49b225e1SGavin Maltby struct sysevent_subattr_impl *xsa = 656*49b225e1SGavin Maltby (struct sysevent_subattr_impl *)attr; 657*49b225e1SGavin Maltby 658*49b225e1SGavin Maltby xsa->xs_thrsetup = thrsetup; 659*49b225e1SGavin Maltby xsa->xs_thrsetup_cookie = cookie; 660*49b225e1SGavin Maltby } 661*49b225e1SGavin Maltby 662*49b225e1SGavin Maltby void 663*49b225e1SGavin Maltby sysevent_subattr_sigmask(sysevent_subattr_t *attr, sigset_t *set) 664*49b225e1SGavin Maltby { 665*49b225e1SGavin Maltby struct sysevent_subattr_impl *xsa = 666*49b225e1SGavin Maltby (struct sysevent_subattr_impl *)attr; 667*49b225e1SGavin Maltby 668*49b225e1SGavin Maltby if (set) { 669*49b225e1SGavin Maltby xsa->xs_sigmask = *set; 670*49b225e1SGavin Maltby } else { 671*49b225e1SGavin Maltby (void) sigfillset(&xsa->xs_sigmask); 672*49b225e1SGavin Maltby (void) sigdelset(&xsa->xs_sigmask, SIGABRT); 673*49b225e1SGavin Maltby } 674*49b225e1SGavin Maltby } 675*49b225e1SGavin Maltby 676*49b225e1SGavin Maltby void 677*49b225e1SGavin Maltby sysevent_subattr_thrattr(sysevent_subattr_t *attr, pthread_attr_t *thrattr) 678*49b225e1SGavin Maltby { 679*49b225e1SGavin Maltby struct sysevent_subattr_impl *xsa = 680*49b225e1SGavin Maltby (struct sysevent_subattr_impl *)attr; 681*49b225e1SGavin Maltby 682*49b225e1SGavin Maltby xsa->xs_thrattr = thrattr; 683*49b225e1SGavin Maltby } 684*49b225e1SGavin Maltby 685*49b225e1SGavin Maltby /* 6867c478bd9Sstevel@tonic-gate * sysevent_evc_unsubscribe - Unsubscribe from an existing event channel 6877c478bd9Sstevel@tonic-gate */ 688*49b225e1SGavin Maltby int 6897c478bd9Sstevel@tonic-gate sysevent_evc_unsubscribe(evchan_t *scp, const char *sid) 6907c478bd9Sstevel@tonic-gate { 6917c478bd9Sstevel@tonic-gate int all_subscribers = 0; 6927c478bd9Sstevel@tonic-gate sev_unsubscribe_args_t uargs; 693*49b225e1SGavin Maltby evchan_subscr_t *subp, *prevsubp, *tofree; 694*49b225e1SGavin Maltby int errcp; 6957c478bd9Sstevel@tonic-gate int rc; 6967c478bd9Sstevel@tonic-gate 6977c478bd9Sstevel@tonic-gate if (scp == NULL || misaligned(scp)) 698*49b225e1SGavin Maltby return (errno = EINVAL); 6997c478bd9Sstevel@tonic-gate 7007c478bd9Sstevel@tonic-gate if (sid == NULL || strlen(sid) == 0 || 7017c478bd9Sstevel@tonic-gate (strlen(sid) >= MAX_SUBID_LEN)) 702*49b225e1SGavin Maltby return (errno = EINVAL); 7037c478bd9Sstevel@tonic-gate 7047c478bd9Sstevel@tonic-gate /* No inheritance of binding handles via fork() */ 705*49b225e1SGavin Maltby if (EV_PID(scp) != getpid()) 706*49b225e1SGavin Maltby return (errno = EINVAL); 7077c478bd9Sstevel@tonic-gate 7087c478bd9Sstevel@tonic-gate if (strcmp(sid, EVCH_ALLSUB) == 0) { 7097c478bd9Sstevel@tonic-gate all_subscribers++; 7107c478bd9Sstevel@tonic-gate /* Indicates all subscriber id's for this channel */ 7117c478bd9Sstevel@tonic-gate uargs.sid.name = NULL; 7127c478bd9Sstevel@tonic-gate uargs.sid.len = 0; 7137c478bd9Sstevel@tonic-gate } else { 7147c478bd9Sstevel@tonic-gate uargs.sid.name = (uintptr_t)sid; 7157c478bd9Sstevel@tonic-gate uargs.sid.len = strlen(sid) + 1; 7167c478bd9Sstevel@tonic-gate } 7177c478bd9Sstevel@tonic-gate 718*49b225e1SGavin Maltby if (will_deadlock(scp)) 719*49b225e1SGavin Maltby return (errno = EDEADLK); 720*49b225e1SGavin Maltby 7217c478bd9Sstevel@tonic-gate (void) mutex_lock(EV_LOCK(scp)); 7227c478bd9Sstevel@tonic-gate 7237c478bd9Sstevel@tonic-gate /* 7247c478bd9Sstevel@tonic-gate * The unsubscribe ioctl will block until all door upcalls have drained. 7257c478bd9Sstevel@tonic-gate */ 7267c478bd9Sstevel@tonic-gate rc = ioctl(EV_FD(scp), SEV_UNSUBSCRIBE, (intptr_t)&uargs); 7277c478bd9Sstevel@tonic-gate 7287c478bd9Sstevel@tonic-gate if (rc != 0) { 729*49b225e1SGavin Maltby errcp = errno; 7307c478bd9Sstevel@tonic-gate (void) mutex_unlock(EV_LOCK(scp)); 731*49b225e1SGavin Maltby return (errno = errcp); /* EFAULT, ENXIO, EINVAL possible */ 7327c478bd9Sstevel@tonic-gate } 7337c478bd9Sstevel@tonic-gate 7347c478bd9Sstevel@tonic-gate 735*49b225e1SGavin Maltby /* 736*49b225e1SGavin Maltby * Search for the matching subscriber. If EVCH_ALLSUB was specified 737*49b225e1SGavin Maltby * then the ioctl above will have returned 0 even if there are 738*49b225e1SGavin Maltby * no subscriptions, so the initial EV_SUB_NEXT can be NULL. 739*49b225e1SGavin Maltby */ 740*49b225e1SGavin Maltby prevsubp = NULL; 741*49b225e1SGavin Maltby subp = EV_SUB_NEXT(scp); 742*49b225e1SGavin Maltby while (subp != NULL) { 743*49b225e1SGavin Maltby if (all_subscribers || strcmp(subp->evsub_sid, sid) == 0) { 744*49b225e1SGavin Maltby if (prevsubp == NULL) { 745*49b225e1SGavin Maltby EV_SUB_NEXT(scp) = subp->evsub_next; 746*49b225e1SGavin Maltby } else { 747*49b225e1SGavin Maltby prevsubp->evsub_next = subp->evsub_next; 748*49b225e1SGavin Maltby } 7497c478bd9Sstevel@tonic-gate 750*49b225e1SGavin Maltby tofree = subp; 751*49b225e1SGavin Maltby subp = subp->evsub_next; 752*49b225e1SGavin Maltby 753*49b225e1SGavin Maltby /* If door_xcreate was applied we can clean up */ 754*49b225e1SGavin Maltby if (tofree->evsub_attr) 755*49b225e1SGavin Maltby kill_door_servers(tofree); 756*49b225e1SGavin Maltby 7577c478bd9Sstevel@tonic-gate (void) door_revoke(tofree->evsub_door_desc); 7587c478bd9Sstevel@tonic-gate free(tofree->evsub_sid); 7597c478bd9Sstevel@tonic-gate free(tofree); 760*49b225e1SGavin Maltby 761*49b225e1SGavin Maltby /* Freed single subscriber already? */ 762*49b225e1SGavin Maltby if (all_subscribers == 0) 7637c478bd9Sstevel@tonic-gate break; 764*49b225e1SGavin Maltby } else { 765*49b225e1SGavin Maltby prevsubp = subp; 7667c478bd9Sstevel@tonic-gate subp = subp->evsub_next; 7677c478bd9Sstevel@tonic-gate } 768*49b225e1SGavin Maltby } 7697c478bd9Sstevel@tonic-gate 7707c478bd9Sstevel@tonic-gate (void) mutex_unlock(EV_LOCK(scp)); 771*49b225e1SGavin Maltby 772*49b225e1SGavin Maltby return (0); 7737c478bd9Sstevel@tonic-gate } 7747c478bd9Sstevel@tonic-gate 7757c478bd9Sstevel@tonic-gate /* 7767c478bd9Sstevel@tonic-gate * sysevent_evc_control - Various channel based control operation 7777c478bd9Sstevel@tonic-gate */ 7787c478bd9Sstevel@tonic-gate int 7797c478bd9Sstevel@tonic-gate sysevent_evc_control(evchan_t *scp, int cmd, /* arg */ ...) 7807c478bd9Sstevel@tonic-gate { 7817c478bd9Sstevel@tonic-gate va_list ap; 7827c478bd9Sstevel@tonic-gate uint32_t *chlenp; 7837c478bd9Sstevel@tonic-gate sev_control_args_t uargs; 7847c478bd9Sstevel@tonic-gate int rc = 0; 7857c478bd9Sstevel@tonic-gate 7867c478bd9Sstevel@tonic-gate if (scp == NULL || misaligned(scp)) { 7877c478bd9Sstevel@tonic-gate return (errno = EINVAL); 7887c478bd9Sstevel@tonic-gate } 7897c478bd9Sstevel@tonic-gate 7907c478bd9Sstevel@tonic-gate /* No inheritance of binding handles via fork() */ 7917c478bd9Sstevel@tonic-gate if (EV_PID(scp) != getpid()) { 7927c478bd9Sstevel@tonic-gate return (errno = EINVAL); 7937c478bd9Sstevel@tonic-gate } 7947c478bd9Sstevel@tonic-gate 7957c478bd9Sstevel@tonic-gate va_start(ap, cmd); 7967c478bd9Sstevel@tonic-gate 7977c478bd9Sstevel@tonic-gate uargs.cmd = cmd; 7987c478bd9Sstevel@tonic-gate 7997c478bd9Sstevel@tonic-gate (void) mutex_lock(EV_LOCK(scp)); 8007c478bd9Sstevel@tonic-gate 8017c478bd9Sstevel@tonic-gate switch (cmd) { 8027c478bd9Sstevel@tonic-gate case EVCH_GET_CHAN_LEN: 8037c478bd9Sstevel@tonic-gate case EVCH_GET_CHAN_LEN_MAX: 8047c478bd9Sstevel@tonic-gate chlenp = va_arg(ap, uint32_t *); 8057c478bd9Sstevel@tonic-gate if (chlenp == NULL || misaligned(chlenp)) { 8067c478bd9Sstevel@tonic-gate rc = EINVAL; 8077c478bd9Sstevel@tonic-gate break; 8087c478bd9Sstevel@tonic-gate } 8097c478bd9Sstevel@tonic-gate rc = ioctl(EV_FD(scp), SEV_CHAN_CONTROL, (intptr_t)&uargs); 8107c478bd9Sstevel@tonic-gate *chlenp = uargs.value; 8117c478bd9Sstevel@tonic-gate break; 8127c478bd9Sstevel@tonic-gate case EVCH_SET_CHAN_LEN: 8137c478bd9Sstevel@tonic-gate /* Range change will be handled in framework */ 8147c478bd9Sstevel@tonic-gate uargs.value = va_arg(ap, uint32_t); 8157c478bd9Sstevel@tonic-gate rc = ioctl(EV_FD(scp), SEV_CHAN_CONTROL, (intptr_t)&uargs); 8167c478bd9Sstevel@tonic-gate break; 8177c478bd9Sstevel@tonic-gate default: 8187c478bd9Sstevel@tonic-gate rc = EINVAL; 8197c478bd9Sstevel@tonic-gate } 8207c478bd9Sstevel@tonic-gate 8217c478bd9Sstevel@tonic-gate (void) mutex_unlock(EV_LOCK(scp)); 8227c478bd9Sstevel@tonic-gate 8237c478bd9Sstevel@tonic-gate if (rc == -1) { 8247c478bd9Sstevel@tonic-gate rc = errno; 8257c478bd9Sstevel@tonic-gate } 8267c478bd9Sstevel@tonic-gate 8277c478bd9Sstevel@tonic-gate va_end(ap); 8287c478bd9Sstevel@tonic-gate 8297c478bd9Sstevel@tonic-gate return (errno = rc); 8307c478bd9Sstevel@tonic-gate } 831