17c478bd9Sstevel@tonic-gate /* 27c478bd9Sstevel@tonic-gate * CDDL HEADER START 37c478bd9Sstevel@tonic-gate * 47c478bd9Sstevel@tonic-gate * The contents of this file are subject to the terms of the 549b225e1SGavin Maltby * Common Development and Distribution License (the "License"). 649b225e1SGavin Maltby * You may not use this file except in compliance with the License. 77c478bd9Sstevel@tonic-gate * 87c478bd9Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 97c478bd9Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 107c478bd9Sstevel@tonic-gate * See the License for the specific language governing permissions 117c478bd9Sstevel@tonic-gate * and limitations under the License. 127c478bd9Sstevel@tonic-gate * 137c478bd9Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 147c478bd9Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 157c478bd9Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 167c478bd9Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 177c478bd9Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 187c478bd9Sstevel@tonic-gate * 197c478bd9Sstevel@tonic-gate * CDDL HEADER END 207c478bd9Sstevel@tonic-gate */ 217c478bd9Sstevel@tonic-gate /* 22*f6e214c7SGavin Maltby * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. 237c478bd9Sstevel@tonic-gate */ 247c478bd9Sstevel@tonic-gate 257c478bd9Sstevel@tonic-gate #include <stdio.h> 267c478bd9Sstevel@tonic-gate #include <ctype.h> 277c478bd9Sstevel@tonic-gate #include <fcntl.h> 287c478bd9Sstevel@tonic-gate #include <errno.h> 297c478bd9Sstevel@tonic-gate #include <door.h> 307c478bd9Sstevel@tonic-gate #include <unistd.h> 317c478bd9Sstevel@tonic-gate #include <stddef.h> 327c478bd9Sstevel@tonic-gate #include <stdlib.h> 337c478bd9Sstevel@tonic-gate #include <strings.h> 3449b225e1SGavin Maltby #include <pthread.h> 3549b225e1SGavin Maltby #include <atomic.h> 3649b225e1SGavin Maltby #include <signal.h> 377c478bd9Sstevel@tonic-gate #include <sys/types.h> 387c478bd9Sstevel@tonic-gate #include <sys/varargs.h> 397c478bd9Sstevel@tonic-gate #include <sys/sysevent.h> 407c478bd9Sstevel@tonic-gate #include <sys/sysevent_impl.h> 417c478bd9Sstevel@tonic-gate 427c478bd9Sstevel@tonic-gate #include "libsysevent.h" 437c478bd9Sstevel@tonic-gate #include "libsysevent_impl.h" 447c478bd9Sstevel@tonic-gate 457c478bd9Sstevel@tonic-gate /* 467c478bd9Sstevel@tonic-gate * The functions below deal with the General Purpose Event Handling framework 477c478bd9Sstevel@tonic-gate * 487c478bd9Sstevel@tonic-gate * sysevent_evc_bind - create/bind application to named channel 497c478bd9Sstevel@tonic-gate * sysevent_evc_unbind - unbind from previously bound/created channel 507c478bd9Sstevel@tonic-gate * sysevent_evc_subscribe - subscribe to existing event channel 517c478bd9Sstevel@tonic-gate * sysevent_evc_unsubscribe - unsubscribe from existing event channel 527c478bd9Sstevel@tonic-gate * sysevent_evc_publish - generate a system event via an event channel 537c478bd9Sstevel@tonic-gate * sysevent_evc_control - various channel based control operation 547c478bd9Sstevel@tonic-gate */ 557c478bd9Sstevel@tonic-gate 5649b225e1SGavin Maltby static void kill_door_servers(evchan_subscr_t *); 5749b225e1SGavin Maltby 587c478bd9Sstevel@tonic-gate #define misaligned(p) ((uintptr_t)(p) & 3) /* 4-byte alignment required */ 597c478bd9Sstevel@tonic-gate 6049b225e1SGavin Maltby static pthread_key_t nrkey = PTHREAD_ONCE_KEY_NP; 6149b225e1SGavin Maltby 6249b225e1SGavin Maltby /* 6349b225e1SGavin Maltby * If the current thread is a door server thread servicing a door created 6449b225e1SGavin Maltby * for us in sysevent_evc_xsubscribe, then an attempt to unsubscribe from 6549b225e1SGavin Maltby * within door invocation context on the same channel will deadlock in the 6649b225e1SGavin Maltby * kernel waiting for our own invocation to complete. Such calls are 6749b225e1SGavin Maltby * forbidden, and we abort if they are encountered (better than hanging 6849b225e1SGavin Maltby * unkillably). 6949b225e1SGavin Maltby * 7049b225e1SGavin Maltby * We'd like to offer this detection to subscriptions established with 7149b225e1SGavin Maltby * sysevent_evc_subscribe, but we don't have control over the door service 7249b225e1SGavin Maltby * threads in that case. Perhaps the fix is to always use door_xcreate 7349b225e1SGavin Maltby * even for sysevent_evc_subscribe? 7449b225e1SGavin Maltby */ 7549b225e1SGavin Maltby static boolean_t 7649b225e1SGavin Maltby will_deadlock(evchan_t *scp) 7749b225e1SGavin Maltby { 7849b225e1SGavin Maltby evchan_subscr_t *subp = pthread_getspecific(nrkey); 7949b225e1SGavin Maltby evchan_impl_hdl_t *hdl = EVCHAN_IMPL_HNDL(scp); 8049b225e1SGavin Maltby 8149b225e1SGavin Maltby return (subp != NULL && subp->ev_subhead == hdl ? B_TRUE : B_FALSE); 8249b225e1SGavin Maltby } 8349b225e1SGavin Maltby 847c478bd9Sstevel@tonic-gate /* 857c478bd9Sstevel@tonic-gate * Check syntax of a channel name 867c478bd9Sstevel@tonic-gate */ 877c478bd9Sstevel@tonic-gate static int 887c478bd9Sstevel@tonic-gate sysevent_is_chan_name(const char *str) 897c478bd9Sstevel@tonic-gate { 907c478bd9Sstevel@tonic-gate for (; *str != '\0'; str++) { 917c478bd9Sstevel@tonic-gate if (!EVCH_ISCHANCHAR(*str)) 927c478bd9Sstevel@tonic-gate return (0); 937c478bd9Sstevel@tonic-gate } 947c478bd9Sstevel@tonic-gate 957c478bd9Sstevel@tonic-gate return (1); 967c478bd9Sstevel@tonic-gate } 977c478bd9Sstevel@tonic-gate 987c478bd9Sstevel@tonic-gate /* 997c478bd9Sstevel@tonic-gate * Check for printable characters 1007c478bd9Sstevel@tonic-gate */ 1017c478bd9Sstevel@tonic-gate static int 1027c478bd9Sstevel@tonic-gate strisprint(const char *s) 1037c478bd9Sstevel@tonic-gate { 1047c478bd9Sstevel@tonic-gate for (; *s != '\0'; s++) { 1057c478bd9Sstevel@tonic-gate if (*s < ' ' || *s > '~') 1067c478bd9Sstevel@tonic-gate return (0); 1077c478bd9Sstevel@tonic-gate } 1087c478bd9Sstevel@tonic-gate 1097c478bd9Sstevel@tonic-gate return (1); 1107c478bd9Sstevel@tonic-gate } 1117c478bd9Sstevel@tonic-gate 1127c478bd9Sstevel@tonic-gate /* 1137c478bd9Sstevel@tonic-gate * sysevent_evc_bind - Create/bind application to named channel 1147c478bd9Sstevel@tonic-gate */ 1157c478bd9Sstevel@tonic-gate int 1167c478bd9Sstevel@tonic-gate sysevent_evc_bind(const char *channel, evchan_t **scpp, uint32_t flags) 1177c478bd9Sstevel@tonic-gate { 1187c478bd9Sstevel@tonic-gate int chanlen; 1197c478bd9Sstevel@tonic-gate evchan_t *scp; 1207c478bd9Sstevel@tonic-gate sev_bind_args_t uargs; 1217c478bd9Sstevel@tonic-gate int ec; 1227c478bd9Sstevel@tonic-gate 1237c478bd9Sstevel@tonic-gate if (scpp == NULL || misaligned(scpp)) { 1247c478bd9Sstevel@tonic-gate return (errno = EINVAL); 1257c478bd9Sstevel@tonic-gate } 1267c478bd9Sstevel@tonic-gate 1277c478bd9Sstevel@tonic-gate /* Provide useful value in error case */ 1287c478bd9Sstevel@tonic-gate *scpp = NULL; 1297c478bd9Sstevel@tonic-gate 1307c478bd9Sstevel@tonic-gate if (channel == NULL || 1317c478bd9Sstevel@tonic-gate (chanlen = strlen(channel) + 1) > MAX_CHNAME_LEN) { 1327c478bd9Sstevel@tonic-gate return (errno = EINVAL); 1337c478bd9Sstevel@tonic-gate } 1347c478bd9Sstevel@tonic-gate 1357c478bd9Sstevel@tonic-gate /* Check channel syntax */ 1367c478bd9Sstevel@tonic-gate if (!sysevent_is_chan_name(channel)) { 1377c478bd9Sstevel@tonic-gate return (errno = EINVAL); 1387c478bd9Sstevel@tonic-gate } 1397c478bd9Sstevel@tonic-gate 1407c478bd9Sstevel@tonic-gate if (flags & ~EVCH_B_FLAGS) { 1417c478bd9Sstevel@tonic-gate return (errno = EINVAL); 1427c478bd9Sstevel@tonic-gate } 1437c478bd9Sstevel@tonic-gate 1447c478bd9Sstevel@tonic-gate scp = calloc(1, sizeof (evchan_impl_hdl_t)); 1457c478bd9Sstevel@tonic-gate if (scp == NULL) { 1467c478bd9Sstevel@tonic-gate return (errno = ENOMEM); 1477c478bd9Sstevel@tonic-gate } 1487c478bd9Sstevel@tonic-gate 1497c478bd9Sstevel@tonic-gate /* 1507c478bd9Sstevel@tonic-gate * Enable sysevent driver. Fallback if the device link doesn't exist; 1517c478bd9Sstevel@tonic-gate * this situation can arise if a channel is bound early in system 1527c478bd9Sstevel@tonic-gate * startup, prior to devfsadm(1M) being invoked. 1537c478bd9Sstevel@tonic-gate */ 1547c478bd9Sstevel@tonic-gate EV_FD(scp) = open(DEVSYSEVENT, O_RDWR); 1557c478bd9Sstevel@tonic-gate if (EV_FD(scp) == -1) { 1567c478bd9Sstevel@tonic-gate if (errno != ENOENT) { 1577c478bd9Sstevel@tonic-gate ec = errno == EACCES ? EPERM : errno; 1587c478bd9Sstevel@tonic-gate free(scp); 1597c478bd9Sstevel@tonic-gate return (errno = ec); 1607c478bd9Sstevel@tonic-gate } 1617c478bd9Sstevel@tonic-gate 1627c478bd9Sstevel@tonic-gate EV_FD(scp) = open(DEVICESYSEVENT, O_RDWR); 1637c478bd9Sstevel@tonic-gate if (EV_FD(scp) == -1) { 1647c478bd9Sstevel@tonic-gate ec = errno == EACCES ? EPERM : errno; 1657c478bd9Sstevel@tonic-gate free(scp); 1667c478bd9Sstevel@tonic-gate return (errno = ec); 1677c478bd9Sstevel@tonic-gate } 1687c478bd9Sstevel@tonic-gate } 1697c478bd9Sstevel@tonic-gate 1707c478bd9Sstevel@tonic-gate /* 1717c478bd9Sstevel@tonic-gate * Force to close the fd's when process is doing exec. 1727c478bd9Sstevel@tonic-gate * The driver will then release stale binding handles. 1737c478bd9Sstevel@tonic-gate * The driver will release also the associated subscriptions 1747c478bd9Sstevel@tonic-gate * if EVCH_SUB_KEEP flag was not set. 1757c478bd9Sstevel@tonic-gate */ 1767c478bd9Sstevel@tonic-gate (void) fcntl(EV_FD(scp), F_SETFD, FD_CLOEXEC); 1777c478bd9Sstevel@tonic-gate 1787c478bd9Sstevel@tonic-gate uargs.chan_name.name = (uintptr_t)channel; 1797c478bd9Sstevel@tonic-gate uargs.chan_name.len = chanlen; 1807c478bd9Sstevel@tonic-gate uargs.flags = flags; 1817c478bd9Sstevel@tonic-gate 1827c478bd9Sstevel@tonic-gate if (ioctl(EV_FD(scp), SEV_CHAN_OPEN, &uargs) != 0) { 1837c478bd9Sstevel@tonic-gate ec = errno; 1847c478bd9Sstevel@tonic-gate (void) close(EV_FD(scp)); 1857c478bd9Sstevel@tonic-gate free(scp); 1867c478bd9Sstevel@tonic-gate return (errno = ec); 1877c478bd9Sstevel@tonic-gate } 1887c478bd9Sstevel@tonic-gate 1897c478bd9Sstevel@tonic-gate /* Needed to detect a fork() */ 1907c478bd9Sstevel@tonic-gate EV_PID(scp) = getpid(); 1917c478bd9Sstevel@tonic-gate (void) mutex_init(EV_LOCK(scp), USYNC_THREAD, NULL); 1927c478bd9Sstevel@tonic-gate 1937c478bd9Sstevel@tonic-gate *scpp = scp; 1947c478bd9Sstevel@tonic-gate 1957c478bd9Sstevel@tonic-gate return (0); 1967c478bd9Sstevel@tonic-gate } 1977c478bd9Sstevel@tonic-gate 1987c478bd9Sstevel@tonic-gate /* 1997c478bd9Sstevel@tonic-gate * sysevent_evc_unbind - Unbind from previously bound/created channel 2007c478bd9Sstevel@tonic-gate */ 20149b225e1SGavin Maltby int 2027c478bd9Sstevel@tonic-gate sysevent_evc_unbind(evchan_t *scp) 2037c478bd9Sstevel@tonic-gate { 2047c478bd9Sstevel@tonic-gate sev_unsubscribe_args_t uargs; 20549b225e1SGavin Maltby evchan_subscr_t *subp; 20649b225e1SGavin Maltby int errcp; 2077c478bd9Sstevel@tonic-gate 2087c478bd9Sstevel@tonic-gate if (scp == NULL || misaligned(scp)) 20949b225e1SGavin Maltby return (errno = EINVAL); 21049b225e1SGavin Maltby 21149b225e1SGavin Maltby if (will_deadlock(scp)) 21249b225e1SGavin Maltby return (errno = EDEADLK); 2137c478bd9Sstevel@tonic-gate 2147c478bd9Sstevel@tonic-gate (void) mutex_lock(EV_LOCK(scp)); 2157c478bd9Sstevel@tonic-gate 2167c478bd9Sstevel@tonic-gate /* 2177c478bd9Sstevel@tonic-gate * Unsubscribe, if we are in the process which did the bind. 2187c478bd9Sstevel@tonic-gate */ 2197c478bd9Sstevel@tonic-gate if (EV_PID(scp) == getpid()) { 2207c478bd9Sstevel@tonic-gate uargs.sid.name = NULL; 2217c478bd9Sstevel@tonic-gate uargs.sid.len = 0; 2227c478bd9Sstevel@tonic-gate /* 2237c478bd9Sstevel@tonic-gate * The unsubscribe ioctl will block until all door upcalls have 2247c478bd9Sstevel@tonic-gate * drained. 2257c478bd9Sstevel@tonic-gate */ 2267c478bd9Sstevel@tonic-gate if (ioctl(EV_FD(scp), SEV_UNSUBSCRIBE, (intptr_t)&uargs) != 0) { 22749b225e1SGavin Maltby errcp = errno; 2287c478bd9Sstevel@tonic-gate (void) mutex_unlock(EV_LOCK(scp)); 22949b225e1SGavin Maltby return (errno = errcp); 2307c478bd9Sstevel@tonic-gate } 2317c478bd9Sstevel@tonic-gate } 2327c478bd9Sstevel@tonic-gate 23349b225e1SGavin Maltby while ((subp = EV_SUB_NEXT(scp)) != NULL) { 23449b225e1SGavin Maltby EV_SUB_NEXT(scp) = subp->evsub_next; 23549b225e1SGavin Maltby 23649b225e1SGavin Maltby /* If door_xcreate was applied we can clean up */ 23749b225e1SGavin Maltby if (subp->evsub_attr) 23849b225e1SGavin Maltby kill_door_servers(subp); 23949b225e1SGavin Maltby 24049b225e1SGavin Maltby if (door_revoke(subp->evsub_door_desc) != 0 && errno == EPERM) 24149b225e1SGavin Maltby (void) close(subp->evsub_door_desc); 24249b225e1SGavin Maltby 24349b225e1SGavin Maltby free(subp->evsub_sid); 24449b225e1SGavin Maltby free(subp); 2457c478bd9Sstevel@tonic-gate } 2467c478bd9Sstevel@tonic-gate 2477c478bd9Sstevel@tonic-gate (void) mutex_unlock(EV_LOCK(scp)); 2487c478bd9Sstevel@tonic-gate 2497c478bd9Sstevel@tonic-gate /* 2507c478bd9Sstevel@tonic-gate * The close of the driver will do the unsubscribe if a) it is the last 2517c478bd9Sstevel@tonic-gate * close and b) we are in a child which inherited subscriptions. 2527c478bd9Sstevel@tonic-gate */ 2537c478bd9Sstevel@tonic-gate (void) close(EV_FD(scp)); 2547c478bd9Sstevel@tonic-gate (void) mutex_destroy(EV_LOCK(scp)); 2557c478bd9Sstevel@tonic-gate free(scp); 25649b225e1SGavin Maltby 25749b225e1SGavin Maltby return (0); 2587c478bd9Sstevel@tonic-gate } 2597c478bd9Sstevel@tonic-gate 2607c478bd9Sstevel@tonic-gate /* 2617c478bd9Sstevel@tonic-gate * sysevent_evc_publish - Generate a system event via an event channel 2627c478bd9Sstevel@tonic-gate */ 2637c478bd9Sstevel@tonic-gate int 2647c478bd9Sstevel@tonic-gate sysevent_evc_publish(evchan_t *scp, const char *class, 2657c478bd9Sstevel@tonic-gate const char *subclass, const char *vendor, 2667c478bd9Sstevel@tonic-gate const char *pub_name, nvlist_t *attr_list, 2677c478bd9Sstevel@tonic-gate uint32_t flags) 2687c478bd9Sstevel@tonic-gate { 2697c478bd9Sstevel@tonic-gate sysevent_t *ev; 2707c478bd9Sstevel@tonic-gate sev_publish_args_t uargs; 2717c478bd9Sstevel@tonic-gate int rc; 2727c478bd9Sstevel@tonic-gate int ec; 2737c478bd9Sstevel@tonic-gate 2747c478bd9Sstevel@tonic-gate if (scp == NULL || misaligned(scp)) { 2757c478bd9Sstevel@tonic-gate return (errno = EINVAL); 2767c478bd9Sstevel@tonic-gate } 2777c478bd9Sstevel@tonic-gate 2787c478bd9Sstevel@tonic-gate /* No inheritance of binding handles via fork() */ 2797c478bd9Sstevel@tonic-gate if (EV_PID(scp) != getpid()) { 2807c478bd9Sstevel@tonic-gate return (errno = EINVAL); 2817c478bd9Sstevel@tonic-gate } 2827c478bd9Sstevel@tonic-gate 2837c478bd9Sstevel@tonic-gate ev = sysevent_alloc_event((char *)class, (char *)subclass, 2847c478bd9Sstevel@tonic-gate (char *)vendor, (char *)pub_name, attr_list); 2857c478bd9Sstevel@tonic-gate if (ev == NULL) { 2867c478bd9Sstevel@tonic-gate return (errno); 2877c478bd9Sstevel@tonic-gate } 2887c478bd9Sstevel@tonic-gate 2897c478bd9Sstevel@tonic-gate uargs.ev.name = (uintptr_t)ev; 2907c478bd9Sstevel@tonic-gate uargs.ev.len = SE_SIZE(ev); 2917c478bd9Sstevel@tonic-gate uargs.flags = flags; 2927c478bd9Sstevel@tonic-gate 2937c478bd9Sstevel@tonic-gate (void) mutex_lock(EV_LOCK(scp)); 2947c478bd9Sstevel@tonic-gate 2957c478bd9Sstevel@tonic-gate rc = ioctl(EV_FD(scp), SEV_PUBLISH, (intptr_t)&uargs); 2967c478bd9Sstevel@tonic-gate ec = errno; 2977c478bd9Sstevel@tonic-gate 2987c478bd9Sstevel@tonic-gate (void) mutex_unlock(EV_LOCK(scp)); 2997c478bd9Sstevel@tonic-gate 3007c478bd9Sstevel@tonic-gate sysevent_free(ev); 3017c478bd9Sstevel@tonic-gate 3027c478bd9Sstevel@tonic-gate if (rc != 0) { 3037c478bd9Sstevel@tonic-gate return (ec); 3047c478bd9Sstevel@tonic-gate } 3057c478bd9Sstevel@tonic-gate return (0); 3067c478bd9Sstevel@tonic-gate } 3077c478bd9Sstevel@tonic-gate 3087c478bd9Sstevel@tonic-gate /* 3097c478bd9Sstevel@tonic-gate * Generic callback which catches events from the kernel and calls 3107c478bd9Sstevel@tonic-gate * subscribers call back routine. 3117c478bd9Sstevel@tonic-gate * 3127c478bd9Sstevel@tonic-gate * Kernel guarantees that door_upcalls are disabled when unsubscription 3137c478bd9Sstevel@tonic-gate * was issued that's why cookie points always to a valid evchan_subscr_t *. 3147c478bd9Sstevel@tonic-gate * 3157c478bd9Sstevel@tonic-gate * Furthermore it's not necessary to lock subp because the sysevent 3167c478bd9Sstevel@tonic-gate * framework guarantees no unsubscription until door_return. 3177c478bd9Sstevel@tonic-gate */ 3187c478bd9Sstevel@tonic-gate /*ARGSUSED3*/ 3197c478bd9Sstevel@tonic-gate static void 3207c478bd9Sstevel@tonic-gate door_upcall(void *cookie, char *args, size_t alen, 3217c478bd9Sstevel@tonic-gate door_desc_t *ddp, uint_t ndid) 3227c478bd9Sstevel@tonic-gate { 3237c478bd9Sstevel@tonic-gate evchan_subscr_t *subp = EVCHAN_SUBSCR(cookie); 3247c478bd9Sstevel@tonic-gate int rval = 0; 3257c478bd9Sstevel@tonic-gate 32649b225e1SGavin Maltby /* 32749b225e1SGavin Maltby * If we've been invoked simply to kill the thread then 32849b225e1SGavin Maltby * exit now. 32949b225e1SGavin Maltby */ 33049b225e1SGavin Maltby if (subp->evsub_state == EVCHAN_SUB_STATE_CLOSING) 33149b225e1SGavin Maltby pthread_exit(NULL); 33249b225e1SGavin Maltby 3337c478bd9Sstevel@tonic-gate if (args == NULL || alen <= (size_t)0) { 3347c478bd9Sstevel@tonic-gate /* Skip callback execution */ 3357c478bd9Sstevel@tonic-gate rval = EINVAL; 3367c478bd9Sstevel@tonic-gate } else { 3377c478bd9Sstevel@tonic-gate rval = subp->evsub_func((sysevent_t *)(void *)args, 3387c478bd9Sstevel@tonic-gate subp->evsub_cookie); 3397c478bd9Sstevel@tonic-gate } 3407c478bd9Sstevel@tonic-gate 3417c478bd9Sstevel@tonic-gate /* 3427c478bd9Sstevel@tonic-gate * Fill in return values for door_return 3437c478bd9Sstevel@tonic-gate */ 3447c478bd9Sstevel@tonic-gate alen = sizeof (rval); 3457c478bd9Sstevel@tonic-gate bcopy(&rval, args, alen); 3467c478bd9Sstevel@tonic-gate 3477c478bd9Sstevel@tonic-gate (void) door_return(args, alen, NULL, 0); 3487c478bd9Sstevel@tonic-gate } 3497c478bd9Sstevel@tonic-gate 35049b225e1SGavin Maltby static pthread_once_t xsub_thrattr_once = PTHREAD_ONCE_INIT; 35149b225e1SGavin Maltby static pthread_attr_t xsub_thrattr; 35249b225e1SGavin Maltby 35349b225e1SGavin Maltby static void 35449b225e1SGavin Maltby xsub_thrattr_init(void) 35549b225e1SGavin Maltby { 35649b225e1SGavin Maltby (void) pthread_attr_init(&xsub_thrattr); 35749b225e1SGavin Maltby (void) pthread_attr_setdetachstate(&xsub_thrattr, 35849b225e1SGavin Maltby PTHREAD_CREATE_DETACHED); 35949b225e1SGavin Maltby (void) pthread_attr_setscope(&xsub_thrattr, PTHREAD_SCOPE_SYSTEM); 36049b225e1SGavin Maltby } 36149b225e1SGavin Maltby 3627c478bd9Sstevel@tonic-gate /* 36349b225e1SGavin Maltby * Our door server create function is only called during initial 36449b225e1SGavin Maltby * door_xcreate since we specify DOOR_NO_DEPLETION_CB. 3657c478bd9Sstevel@tonic-gate */ 3667c478bd9Sstevel@tonic-gate int 36749b225e1SGavin Maltby xsub_door_server_create(door_info_t *dip, void *(*startf)(void *), 36849b225e1SGavin Maltby void *startfarg, void *cookie) 36949b225e1SGavin Maltby { 37049b225e1SGavin Maltby evchan_subscr_t *subp = EVCHAN_SUBSCR(cookie); 37149b225e1SGavin Maltby struct sysevent_subattr_impl *xsa = subp->evsub_attr; 37249b225e1SGavin Maltby pthread_attr_t *thrattr; 37349b225e1SGavin Maltby sigset_t oset; 37449b225e1SGavin Maltby int err; 37549b225e1SGavin Maltby 37649b225e1SGavin Maltby if (subp->evsub_state == EVCHAN_SUB_STATE_CLOSING) 37749b225e1SGavin Maltby return (0); /* shouldn't happen, but just in case */ 37849b225e1SGavin Maltby 37949b225e1SGavin Maltby /* 38049b225e1SGavin Maltby * If sysevent_evc_xsubscribe was called electing to use a 38149b225e1SGavin Maltby * different door server create function then let it take it 38249b225e1SGavin Maltby * from here. 38349b225e1SGavin Maltby */ 38449b225e1SGavin Maltby if (xsa->xs_thrcreate) { 38549b225e1SGavin Maltby return (xsa->xs_thrcreate(dip, startf, startfarg, 38649b225e1SGavin Maltby xsa->xs_thrcreate_cookie)); 38749b225e1SGavin Maltby } 38849b225e1SGavin Maltby 38949b225e1SGavin Maltby if (xsa->xs_thrattr == NULL) { 39049b225e1SGavin Maltby (void) pthread_once(&xsub_thrattr_once, xsub_thrattr_init); 39149b225e1SGavin Maltby thrattr = &xsub_thrattr; 39249b225e1SGavin Maltby } else { 39349b225e1SGavin Maltby thrattr = xsa->xs_thrattr; 39449b225e1SGavin Maltby } 39549b225e1SGavin Maltby 39649b225e1SGavin Maltby (void) pthread_sigmask(SIG_SETMASK, &xsa->xs_sigmask, &oset); 39749b225e1SGavin Maltby err = pthread_create(NULL, thrattr, startf, startfarg); 39849b225e1SGavin Maltby (void) pthread_sigmask(SIG_SETMASK, &oset, NULL); 39949b225e1SGavin Maltby 40049b225e1SGavin Maltby return (err == 0 ? 1 : -1); 40149b225e1SGavin Maltby } 40249b225e1SGavin Maltby 40349b225e1SGavin Maltby void 40449b225e1SGavin Maltby xsub_door_server_setup(void *cookie) 40549b225e1SGavin Maltby { 40649b225e1SGavin Maltby evchan_subscr_t *subp = EVCHAN_SUBSCR(cookie); 40749b225e1SGavin Maltby struct sysevent_subattr_impl *xsa = subp->evsub_attr; 40849b225e1SGavin Maltby 40949b225e1SGavin Maltby if (xsa->xs_thrsetup == NULL) { 41049b225e1SGavin Maltby (void) pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, NULL); 41149b225e1SGavin Maltby (void) pthread_setcanceltype(PTHREAD_CANCEL_DEFERRED, NULL); 41249b225e1SGavin Maltby } 41349b225e1SGavin Maltby 41449b225e1SGavin Maltby (void) pthread_setspecific(nrkey, (void *)subp); 41549b225e1SGavin Maltby 41649b225e1SGavin Maltby if (xsa->xs_thrsetup) 41749b225e1SGavin Maltby xsa->xs_thrsetup(xsa->xs_thrsetup_cookie); 41849b225e1SGavin Maltby } 41949b225e1SGavin Maltby 42049b225e1SGavin Maltby /* 42149b225e1SGavin Maltby * Cause private door server threads to exit. We have already performed the 42249b225e1SGavin Maltby * unsubscribe ioctl which stops new invocations and waits until all 42349b225e1SGavin Maltby * existing invocations are complete. So all server threads should be 42449b225e1SGavin Maltby * blocked in door_return. The door has not yet been revoked. We will 42549b225e1SGavin Maltby * invoke repeatedly after setting the evsub_state to be noticed on 42649b225e1SGavin Maltby * wakeup; each invocation will result in the death of one server thread. 42749b225e1SGavin Maltby * 42849b225e1SGavin Maltby * You'd think it would be easier to kill these threads, such as through 42949b225e1SGavin Maltby * pthread_cancel. Unfortunately door_return is not a cancellation point, 43049b225e1SGavin Maltby * and if you do cancel a thread blocked in door_return the EINTR check in 43149b225e1SGavin Maltby * the door_return assembly logic causes us to loop with EINTR forever! 43249b225e1SGavin Maltby */ 43349b225e1SGavin Maltby static void 43449b225e1SGavin Maltby kill_door_servers(evchan_subscr_t *subp) 43549b225e1SGavin Maltby { 43649b225e1SGavin Maltby door_arg_t da; 43749b225e1SGavin Maltby 43849b225e1SGavin Maltby bzero(&da, sizeof (da)); 43949b225e1SGavin Maltby subp->evsub_state = EVCHAN_SUB_STATE_CLOSING; 44049b225e1SGavin Maltby membar_producer(); 44149b225e1SGavin Maltby 44249b225e1SGavin Maltby (void) door_call(subp->evsub_door_desc, &da); 44349b225e1SGavin Maltby } 44449b225e1SGavin Maltby 44549b225e1SGavin Maltby static int 44649b225e1SGavin Maltby sysevent_evc_subscribe_cmn(evchan_t *scp, const char *sid, const char *class, 4477c478bd9Sstevel@tonic-gate int (*event_handler)(sysevent_t *ev, void *cookie), 44849b225e1SGavin Maltby void *cookie, uint32_t flags, struct sysevent_subattr_impl *xsa) 4497c478bd9Sstevel@tonic-gate { 4507c478bd9Sstevel@tonic-gate evchan_subscr_t *subp; 4517c478bd9Sstevel@tonic-gate int upcall_door; 4527c478bd9Sstevel@tonic-gate sev_subscribe_args_t uargs; 4537c478bd9Sstevel@tonic-gate uint32_t sid_len; 4547c478bd9Sstevel@tonic-gate uint32_t class_len; 4557c478bd9Sstevel@tonic-gate int ec; 4567c478bd9Sstevel@tonic-gate 4577c478bd9Sstevel@tonic-gate if (scp == NULL || misaligned(scp) || sid == NULL || class == NULL) { 4587c478bd9Sstevel@tonic-gate return (errno = EINVAL); 4597c478bd9Sstevel@tonic-gate } 4607c478bd9Sstevel@tonic-gate 4617c478bd9Sstevel@tonic-gate /* No inheritance of binding handles via fork() */ 4627c478bd9Sstevel@tonic-gate if (EV_PID(scp) != getpid()) { 4637c478bd9Sstevel@tonic-gate return (errno = EINVAL); 4647c478bd9Sstevel@tonic-gate } 4657c478bd9Sstevel@tonic-gate 4667c478bd9Sstevel@tonic-gate if ((sid_len = strlen(sid) + 1) > MAX_SUBID_LEN || sid_len == 1 || 4677c478bd9Sstevel@tonic-gate (class_len = strlen(class) + 1) > MAX_CLASS_LEN) { 4687c478bd9Sstevel@tonic-gate return (errno = EINVAL); 4697c478bd9Sstevel@tonic-gate } 4707c478bd9Sstevel@tonic-gate 4717c478bd9Sstevel@tonic-gate /* Check for printable characters */ 4727c478bd9Sstevel@tonic-gate if (!strisprint(sid)) { 4737c478bd9Sstevel@tonic-gate return (errno = EINVAL); 4747c478bd9Sstevel@tonic-gate } 4757c478bd9Sstevel@tonic-gate 4767c478bd9Sstevel@tonic-gate if (event_handler == NULL) { 4777c478bd9Sstevel@tonic-gate return (errno = EINVAL); 4787c478bd9Sstevel@tonic-gate } 4797c478bd9Sstevel@tonic-gate 48049b225e1SGavin Maltby if (pthread_key_create_once_np(&nrkey, NULL) != 0) 48149b225e1SGavin Maltby return (errno); /* ENOMEM or EAGAIN */ 48249b225e1SGavin Maltby 4837c478bd9Sstevel@tonic-gate /* Create subscriber data */ 4847c478bd9Sstevel@tonic-gate if ((subp = calloc(1, sizeof (evchan_subscr_t))) == NULL) { 4857c478bd9Sstevel@tonic-gate return (errno); 4867c478bd9Sstevel@tonic-gate } 4877c478bd9Sstevel@tonic-gate 4887c478bd9Sstevel@tonic-gate if ((subp->evsub_sid = strdup(sid)) == NULL) { 4897c478bd9Sstevel@tonic-gate ec = errno; 4907c478bd9Sstevel@tonic-gate free(subp); 4917c478bd9Sstevel@tonic-gate return (ec); 4927c478bd9Sstevel@tonic-gate } 4937c478bd9Sstevel@tonic-gate 4947c478bd9Sstevel@tonic-gate /* 4957c478bd9Sstevel@tonic-gate * EC_ALL string will not be copied to kernel - NULL is assumed 4967c478bd9Sstevel@tonic-gate */ 4977c478bd9Sstevel@tonic-gate if (strcmp(class, EC_ALL) == 0) { 4987c478bd9Sstevel@tonic-gate class = NULL; 4997c478bd9Sstevel@tonic-gate class_len = 0; 5007c478bd9Sstevel@tonic-gate } 5017c478bd9Sstevel@tonic-gate 50249b225e1SGavin Maltby /* 50349b225e1SGavin Maltby * Fill this in now for the xsub_door_server_setup dance 50449b225e1SGavin Maltby */ 50549b225e1SGavin Maltby subp->ev_subhead = EVCHAN_IMPL_HNDL(scp); 50649b225e1SGavin Maltby subp->evsub_state = EVCHAN_SUB_STATE_ACTIVE; 50749b225e1SGavin Maltby 50849b225e1SGavin Maltby if (xsa == NULL) { 5097c478bd9Sstevel@tonic-gate upcall_door = door_create(door_upcall, (void *)subp, 5107c478bd9Sstevel@tonic-gate DOOR_REFUSE_DESC | DOOR_NO_CANCEL); 51149b225e1SGavin Maltby } else { 51249b225e1SGavin Maltby subp->evsub_attr = xsa; 51349b225e1SGavin Maltby 51449b225e1SGavin Maltby /* 51549b225e1SGavin Maltby * Create a private door with exactly one thread to 51649b225e1SGavin Maltby * service the callbacks (the GPEC kernel implementation 51749b225e1SGavin Maltby * serializes deliveries for each subscriber id). 51849b225e1SGavin Maltby */ 51949b225e1SGavin Maltby upcall_door = door_xcreate(door_upcall, (void *)subp, 52049b225e1SGavin Maltby DOOR_REFUSE_DESC | DOOR_NO_CANCEL | DOOR_NO_DEPLETION_CB, 52149b225e1SGavin Maltby xsub_door_server_create, xsub_door_server_setup, 52249b225e1SGavin Maltby (void *)subp, 1); 52349b225e1SGavin Maltby } 52449b225e1SGavin Maltby 5257c478bd9Sstevel@tonic-gate if (upcall_door == -1) { 5267c478bd9Sstevel@tonic-gate ec = errno; 5277c478bd9Sstevel@tonic-gate free(subp->evsub_sid); 5287c478bd9Sstevel@tonic-gate free(subp); 5297c478bd9Sstevel@tonic-gate return (ec); 5307c478bd9Sstevel@tonic-gate } 5317c478bd9Sstevel@tonic-gate 5327c478bd9Sstevel@tonic-gate /* Complete subscriber information */ 5337c478bd9Sstevel@tonic-gate subp->evsub_door_desc = upcall_door; 5347c478bd9Sstevel@tonic-gate subp->evsub_func = event_handler; 5357c478bd9Sstevel@tonic-gate subp->evsub_cookie = cookie; 5367c478bd9Sstevel@tonic-gate 5377c478bd9Sstevel@tonic-gate (void) mutex_lock(EV_LOCK(scp)); 5387c478bd9Sstevel@tonic-gate 5397c478bd9Sstevel@tonic-gate uargs.sid.name = (uintptr_t)sid; 5407c478bd9Sstevel@tonic-gate uargs.sid.len = sid_len; 5417c478bd9Sstevel@tonic-gate uargs.class_info.name = (uintptr_t)class; 5427c478bd9Sstevel@tonic-gate uargs.class_info.len = class_len; 5437c478bd9Sstevel@tonic-gate uargs.door_desc = subp->evsub_door_desc; 5447c478bd9Sstevel@tonic-gate uargs.flags = flags; 5457c478bd9Sstevel@tonic-gate if (ioctl(EV_FD(scp), SEV_SUBSCRIBE, (intptr_t)&uargs) != 0) { 5467c478bd9Sstevel@tonic-gate ec = errno; 5477c478bd9Sstevel@tonic-gate (void) mutex_unlock(EV_LOCK(scp)); 54849b225e1SGavin Maltby if (xsa) 54949b225e1SGavin Maltby kill_door_servers(subp); 5507c478bd9Sstevel@tonic-gate (void) door_revoke(upcall_door); 5517c478bd9Sstevel@tonic-gate free(subp->evsub_sid); 5527c478bd9Sstevel@tonic-gate free(subp); 5537c478bd9Sstevel@tonic-gate return (ec); 5547c478bd9Sstevel@tonic-gate } 5557c478bd9Sstevel@tonic-gate 5567c478bd9Sstevel@tonic-gate /* Attach to subscriber list */ 5577c478bd9Sstevel@tonic-gate subp->evsub_next = EV_SUB_NEXT(scp); 5587c478bd9Sstevel@tonic-gate EV_SUB_NEXT(scp) = subp; 5597c478bd9Sstevel@tonic-gate 5607c478bd9Sstevel@tonic-gate (void) mutex_unlock(EV_LOCK(scp)); 5617c478bd9Sstevel@tonic-gate 5627c478bd9Sstevel@tonic-gate return (0); 5637c478bd9Sstevel@tonic-gate } 5647c478bd9Sstevel@tonic-gate 5657c478bd9Sstevel@tonic-gate /* 56649b225e1SGavin Maltby * sysevent_evc_subscribe - subscribe to an existing event channel 56749b225e1SGavin Maltby * using a non-private door (which will create as many server threads 56849b225e1SGavin Maltby * as the apparent maximum concurrency requirements suggest). 56949b225e1SGavin Maltby */ 57049b225e1SGavin Maltby int 57149b225e1SGavin Maltby sysevent_evc_subscribe(evchan_t *scp, const char *sid, const char *class, 57249b225e1SGavin Maltby int (*event_handler)(sysevent_t *ev, void *cookie), 57349b225e1SGavin Maltby void *cookie, uint32_t flags) 57449b225e1SGavin Maltby { 57549b225e1SGavin Maltby return (sysevent_evc_subscribe_cmn(scp, sid, class, event_handler, 57649b225e1SGavin Maltby cookie, flags, NULL)); 57749b225e1SGavin Maltby } 57849b225e1SGavin Maltby 57949b225e1SGavin Maltby static void 58049b225e1SGavin Maltby subattr_dfltinit(struct sysevent_subattr_impl *xsa) 58149b225e1SGavin Maltby { 58249b225e1SGavin Maltby (void) sigfillset(&xsa->xs_sigmask); 58349b225e1SGavin Maltby (void) sigdelset(&xsa->xs_sigmask, SIGABRT); 58449b225e1SGavin Maltby } 58549b225e1SGavin Maltby 58649b225e1SGavin Maltby static struct sysevent_subattr_impl dfltsa; 58749b225e1SGavin Maltby pthread_once_t dfltsa_inited = PTHREAD_ONCE_INIT; 58849b225e1SGavin Maltby 58949b225e1SGavin Maltby static void 59049b225e1SGavin Maltby init_dfltsa(void) 59149b225e1SGavin Maltby { 59249b225e1SGavin Maltby subattr_dfltinit(&dfltsa); 59349b225e1SGavin Maltby } 59449b225e1SGavin Maltby 59549b225e1SGavin Maltby /* 59649b225e1SGavin Maltby * sysevent_evc_subscribe - subscribe to an existing event channel 59749b225e1SGavin Maltby * using a private door with control over thread creation. 59849b225e1SGavin Maltby */ 59949b225e1SGavin Maltby int 60049b225e1SGavin Maltby sysevent_evc_xsubscribe(evchan_t *scp, const char *sid, const char *class, 60149b225e1SGavin Maltby int (*event_handler)(sysevent_t *ev, void *cookie), 60249b225e1SGavin Maltby void *cookie, uint32_t flags, sysevent_subattr_t *attr) 60349b225e1SGavin Maltby { 60449b225e1SGavin Maltby struct sysevent_subattr_impl *xsa; 60549b225e1SGavin Maltby 60649b225e1SGavin Maltby if (attr != NULL) { 60749b225e1SGavin Maltby xsa = (struct sysevent_subattr_impl *)attr; 60849b225e1SGavin Maltby } else { 60949b225e1SGavin Maltby xsa = &dfltsa; 61049b225e1SGavin Maltby (void) pthread_once(&dfltsa_inited, init_dfltsa); 61149b225e1SGavin Maltby } 61249b225e1SGavin Maltby 61349b225e1SGavin Maltby return (sysevent_evc_subscribe_cmn(scp, sid, class, event_handler, 61449b225e1SGavin Maltby cookie, flags, xsa)); 61549b225e1SGavin Maltby } 61649b225e1SGavin Maltby 61749b225e1SGavin Maltby sysevent_subattr_t * 61849b225e1SGavin Maltby sysevent_subattr_alloc(void) 61949b225e1SGavin Maltby { 62049b225e1SGavin Maltby struct sysevent_subattr_impl *xsa = calloc(1, sizeof (*xsa)); 62149b225e1SGavin Maltby 62249b225e1SGavin Maltby if (xsa != NULL) 62349b225e1SGavin Maltby subattr_dfltinit(xsa); 62449b225e1SGavin Maltby 62549b225e1SGavin Maltby return (xsa != NULL ? (sysevent_subattr_t *)xsa : NULL); 62649b225e1SGavin Maltby } 62749b225e1SGavin Maltby 62849b225e1SGavin Maltby void 62949b225e1SGavin Maltby sysevent_subattr_free(sysevent_subattr_t *attr) 63049b225e1SGavin Maltby { 63149b225e1SGavin Maltby struct sysevent_subattr_impl *xsa = 63249b225e1SGavin Maltby (struct sysevent_subattr_impl *)attr; 63349b225e1SGavin Maltby 63449b225e1SGavin Maltby free(xsa); 63549b225e1SGavin Maltby } 63649b225e1SGavin Maltby 63749b225e1SGavin Maltby void 63849b225e1SGavin Maltby sysevent_subattr_thrcreate(sysevent_subattr_t *attr, 63949b225e1SGavin Maltby door_xcreate_server_func_t *thrcreate, void *cookie) 64049b225e1SGavin Maltby { 64149b225e1SGavin Maltby struct sysevent_subattr_impl *xsa = 64249b225e1SGavin Maltby (struct sysevent_subattr_impl *)attr; 64349b225e1SGavin Maltby 64449b225e1SGavin Maltby xsa->xs_thrcreate = thrcreate; 64549b225e1SGavin Maltby xsa->xs_thrcreate_cookie = cookie; 64649b225e1SGavin Maltby } 64749b225e1SGavin Maltby 64849b225e1SGavin Maltby void 64949b225e1SGavin Maltby sysevent_subattr_thrsetup(sysevent_subattr_t *attr, 65049b225e1SGavin Maltby door_xcreate_thrsetup_func_t *thrsetup, void *cookie) 65149b225e1SGavin Maltby { 65249b225e1SGavin Maltby struct sysevent_subattr_impl *xsa = 65349b225e1SGavin Maltby (struct sysevent_subattr_impl *)attr; 65449b225e1SGavin Maltby 65549b225e1SGavin Maltby xsa->xs_thrsetup = thrsetup; 65649b225e1SGavin Maltby xsa->xs_thrsetup_cookie = cookie; 65749b225e1SGavin Maltby } 65849b225e1SGavin Maltby 65949b225e1SGavin Maltby void 66049b225e1SGavin Maltby sysevent_subattr_sigmask(sysevent_subattr_t *attr, sigset_t *set) 66149b225e1SGavin Maltby { 66249b225e1SGavin Maltby struct sysevent_subattr_impl *xsa = 66349b225e1SGavin Maltby (struct sysevent_subattr_impl *)attr; 66449b225e1SGavin Maltby 66549b225e1SGavin Maltby if (set) { 66649b225e1SGavin Maltby xsa->xs_sigmask = *set; 66749b225e1SGavin Maltby } else { 66849b225e1SGavin Maltby (void) sigfillset(&xsa->xs_sigmask); 66949b225e1SGavin Maltby (void) sigdelset(&xsa->xs_sigmask, SIGABRT); 67049b225e1SGavin Maltby } 67149b225e1SGavin Maltby } 67249b225e1SGavin Maltby 67349b225e1SGavin Maltby void 67449b225e1SGavin Maltby sysevent_subattr_thrattr(sysevent_subattr_t *attr, pthread_attr_t *thrattr) 67549b225e1SGavin Maltby { 67649b225e1SGavin Maltby struct sysevent_subattr_impl *xsa = 67749b225e1SGavin Maltby (struct sysevent_subattr_impl *)attr; 67849b225e1SGavin Maltby 67949b225e1SGavin Maltby xsa->xs_thrattr = thrattr; 68049b225e1SGavin Maltby } 68149b225e1SGavin Maltby 68249b225e1SGavin Maltby /* 6837c478bd9Sstevel@tonic-gate * sysevent_evc_unsubscribe - Unsubscribe from an existing event channel 6847c478bd9Sstevel@tonic-gate */ 68549b225e1SGavin Maltby int 6867c478bd9Sstevel@tonic-gate sysevent_evc_unsubscribe(evchan_t *scp, const char *sid) 6877c478bd9Sstevel@tonic-gate { 6887c478bd9Sstevel@tonic-gate int all_subscribers = 0; 6897c478bd9Sstevel@tonic-gate sev_unsubscribe_args_t uargs; 69049b225e1SGavin Maltby evchan_subscr_t *subp, *prevsubp, *tofree; 69149b225e1SGavin Maltby int errcp; 6927c478bd9Sstevel@tonic-gate int rc; 6937c478bd9Sstevel@tonic-gate 6947c478bd9Sstevel@tonic-gate if (scp == NULL || misaligned(scp)) 69549b225e1SGavin Maltby return (errno = EINVAL); 6967c478bd9Sstevel@tonic-gate 6977c478bd9Sstevel@tonic-gate if (sid == NULL || strlen(sid) == 0 || 6987c478bd9Sstevel@tonic-gate (strlen(sid) >= MAX_SUBID_LEN)) 69949b225e1SGavin Maltby return (errno = EINVAL); 7007c478bd9Sstevel@tonic-gate 7017c478bd9Sstevel@tonic-gate /* No inheritance of binding handles via fork() */ 70249b225e1SGavin Maltby if (EV_PID(scp) != getpid()) 70349b225e1SGavin Maltby return (errno = EINVAL); 7047c478bd9Sstevel@tonic-gate 7057c478bd9Sstevel@tonic-gate if (strcmp(sid, EVCH_ALLSUB) == 0) { 7067c478bd9Sstevel@tonic-gate all_subscribers++; 7077c478bd9Sstevel@tonic-gate /* Indicates all subscriber id's for this channel */ 7087c478bd9Sstevel@tonic-gate uargs.sid.name = NULL; 7097c478bd9Sstevel@tonic-gate uargs.sid.len = 0; 7107c478bd9Sstevel@tonic-gate } else { 7117c478bd9Sstevel@tonic-gate uargs.sid.name = (uintptr_t)sid; 7127c478bd9Sstevel@tonic-gate uargs.sid.len = strlen(sid) + 1; 7137c478bd9Sstevel@tonic-gate } 7147c478bd9Sstevel@tonic-gate 71549b225e1SGavin Maltby if (will_deadlock(scp)) 71649b225e1SGavin Maltby return (errno = EDEADLK); 71749b225e1SGavin Maltby 7187c478bd9Sstevel@tonic-gate (void) mutex_lock(EV_LOCK(scp)); 7197c478bd9Sstevel@tonic-gate 7207c478bd9Sstevel@tonic-gate /* 7217c478bd9Sstevel@tonic-gate * The unsubscribe ioctl will block until all door upcalls have drained. 7227c478bd9Sstevel@tonic-gate */ 7237c478bd9Sstevel@tonic-gate rc = ioctl(EV_FD(scp), SEV_UNSUBSCRIBE, (intptr_t)&uargs); 7247c478bd9Sstevel@tonic-gate 7257c478bd9Sstevel@tonic-gate if (rc != 0) { 72649b225e1SGavin Maltby errcp = errno; 7277c478bd9Sstevel@tonic-gate (void) mutex_unlock(EV_LOCK(scp)); 72849b225e1SGavin Maltby return (errno = errcp); /* EFAULT, ENXIO, EINVAL possible */ 7297c478bd9Sstevel@tonic-gate } 7307c478bd9Sstevel@tonic-gate 7317c478bd9Sstevel@tonic-gate 73249b225e1SGavin Maltby /* 73349b225e1SGavin Maltby * Search for the matching subscriber. If EVCH_ALLSUB was specified 73449b225e1SGavin Maltby * then the ioctl above will have returned 0 even if there are 73549b225e1SGavin Maltby * no subscriptions, so the initial EV_SUB_NEXT can be NULL. 73649b225e1SGavin Maltby */ 73749b225e1SGavin Maltby prevsubp = NULL; 73849b225e1SGavin Maltby subp = EV_SUB_NEXT(scp); 73949b225e1SGavin Maltby while (subp != NULL) { 74049b225e1SGavin Maltby if (all_subscribers || strcmp(subp->evsub_sid, sid) == 0) { 74149b225e1SGavin Maltby if (prevsubp == NULL) { 74249b225e1SGavin Maltby EV_SUB_NEXT(scp) = subp->evsub_next; 74349b225e1SGavin Maltby } else { 74449b225e1SGavin Maltby prevsubp->evsub_next = subp->evsub_next; 74549b225e1SGavin Maltby } 7467c478bd9Sstevel@tonic-gate 74749b225e1SGavin Maltby tofree = subp; 74849b225e1SGavin Maltby subp = subp->evsub_next; 74949b225e1SGavin Maltby 75049b225e1SGavin Maltby /* If door_xcreate was applied we can clean up */ 75149b225e1SGavin Maltby if (tofree->evsub_attr) 75249b225e1SGavin Maltby kill_door_servers(tofree); 75349b225e1SGavin Maltby 7547c478bd9Sstevel@tonic-gate (void) door_revoke(tofree->evsub_door_desc); 7557c478bd9Sstevel@tonic-gate free(tofree->evsub_sid); 7567c478bd9Sstevel@tonic-gate free(tofree); 75749b225e1SGavin Maltby 75849b225e1SGavin Maltby /* Freed single subscriber already? */ 75949b225e1SGavin Maltby if (all_subscribers == 0) 7607c478bd9Sstevel@tonic-gate break; 76149b225e1SGavin Maltby } else { 76249b225e1SGavin Maltby prevsubp = subp; 7637c478bd9Sstevel@tonic-gate subp = subp->evsub_next; 7647c478bd9Sstevel@tonic-gate } 76549b225e1SGavin Maltby } 7667c478bd9Sstevel@tonic-gate 7677c478bd9Sstevel@tonic-gate (void) mutex_unlock(EV_LOCK(scp)); 76849b225e1SGavin Maltby 76949b225e1SGavin Maltby return (0); 7707c478bd9Sstevel@tonic-gate } 7717c478bd9Sstevel@tonic-gate 7727c478bd9Sstevel@tonic-gate /* 7737c478bd9Sstevel@tonic-gate * sysevent_evc_control - Various channel based control operation 7747c478bd9Sstevel@tonic-gate */ 7757c478bd9Sstevel@tonic-gate int 7767c478bd9Sstevel@tonic-gate sysevent_evc_control(evchan_t *scp, int cmd, /* arg */ ...) 7777c478bd9Sstevel@tonic-gate { 7787c478bd9Sstevel@tonic-gate va_list ap; 7797c478bd9Sstevel@tonic-gate uint32_t *chlenp; 7807c478bd9Sstevel@tonic-gate sev_control_args_t uargs; 7817c478bd9Sstevel@tonic-gate int rc = 0; 7827c478bd9Sstevel@tonic-gate 7837c478bd9Sstevel@tonic-gate if (scp == NULL || misaligned(scp)) { 7847c478bd9Sstevel@tonic-gate return (errno = EINVAL); 7857c478bd9Sstevel@tonic-gate } 7867c478bd9Sstevel@tonic-gate 7877c478bd9Sstevel@tonic-gate /* No inheritance of binding handles via fork() */ 7887c478bd9Sstevel@tonic-gate if (EV_PID(scp) != getpid()) { 7897c478bd9Sstevel@tonic-gate return (errno = EINVAL); 7907c478bd9Sstevel@tonic-gate } 7917c478bd9Sstevel@tonic-gate 7927c478bd9Sstevel@tonic-gate va_start(ap, cmd); 7937c478bd9Sstevel@tonic-gate 7947c478bd9Sstevel@tonic-gate uargs.cmd = cmd; 7957c478bd9Sstevel@tonic-gate 7967c478bd9Sstevel@tonic-gate (void) mutex_lock(EV_LOCK(scp)); 7977c478bd9Sstevel@tonic-gate 7987c478bd9Sstevel@tonic-gate switch (cmd) { 7997c478bd9Sstevel@tonic-gate case EVCH_GET_CHAN_LEN: 8007c478bd9Sstevel@tonic-gate case EVCH_GET_CHAN_LEN_MAX: 8017c478bd9Sstevel@tonic-gate chlenp = va_arg(ap, uint32_t *); 8027c478bd9Sstevel@tonic-gate if (chlenp == NULL || misaligned(chlenp)) { 8037c478bd9Sstevel@tonic-gate rc = EINVAL; 8047c478bd9Sstevel@tonic-gate break; 8057c478bd9Sstevel@tonic-gate } 8067c478bd9Sstevel@tonic-gate rc = ioctl(EV_FD(scp), SEV_CHAN_CONTROL, (intptr_t)&uargs); 8077c478bd9Sstevel@tonic-gate *chlenp = uargs.value; 8087c478bd9Sstevel@tonic-gate break; 809*f6e214c7SGavin Maltby 8107c478bd9Sstevel@tonic-gate case EVCH_SET_CHAN_LEN: 8117c478bd9Sstevel@tonic-gate /* Range change will be handled in framework */ 8127c478bd9Sstevel@tonic-gate uargs.value = va_arg(ap, uint32_t); 8137c478bd9Sstevel@tonic-gate rc = ioctl(EV_FD(scp), SEV_CHAN_CONTROL, (intptr_t)&uargs); 8147c478bd9Sstevel@tonic-gate break; 815*f6e214c7SGavin Maltby 8167c478bd9Sstevel@tonic-gate default: 8177c478bd9Sstevel@tonic-gate rc = EINVAL; 8187c478bd9Sstevel@tonic-gate } 8197c478bd9Sstevel@tonic-gate 8207c478bd9Sstevel@tonic-gate (void) mutex_unlock(EV_LOCK(scp)); 8217c478bd9Sstevel@tonic-gate 8227c478bd9Sstevel@tonic-gate if (rc == -1) { 8237c478bd9Sstevel@tonic-gate rc = errno; 8247c478bd9Sstevel@tonic-gate } 8257c478bd9Sstevel@tonic-gate 8267c478bd9Sstevel@tonic-gate va_end(ap); 8277c478bd9Sstevel@tonic-gate 8287c478bd9Sstevel@tonic-gate return (errno = rc); 8297c478bd9Sstevel@tonic-gate } 830*f6e214c7SGavin Maltby 831*f6e214c7SGavin Maltby int 832*f6e214c7SGavin Maltby sysevent_evc_setpropnvl(evchan_t *scp, nvlist_t *nvl) 833*f6e214c7SGavin Maltby { 834*f6e214c7SGavin Maltby sev_propnvl_args_t uargs; 835*f6e214c7SGavin Maltby char *buf = NULL; 836*f6e214c7SGavin Maltby size_t nvlsz = 0; 837*f6e214c7SGavin Maltby int rc; 838*f6e214c7SGavin Maltby 839*f6e214c7SGavin Maltby if (scp == NULL || misaligned(scp)) 840*f6e214c7SGavin Maltby return (errno = EINVAL); 841*f6e214c7SGavin Maltby 842*f6e214c7SGavin Maltby if (nvl != NULL && 843*f6e214c7SGavin Maltby nvlist_pack(nvl, &buf, &nvlsz, NV_ENCODE_NATIVE, 0) != 0) 844*f6e214c7SGavin Maltby return (errno); 845*f6e214c7SGavin Maltby 846*f6e214c7SGavin Maltby uargs.packednvl.name = (uint64_t)(uintptr_t)buf; 847*f6e214c7SGavin Maltby uargs.packednvl.len = (uint32_t)nvlsz; 848*f6e214c7SGavin Maltby 849*f6e214c7SGavin Maltby rc = ioctl(EV_FD(scp), SEV_SETPROPNVL, (intptr_t)&uargs); 850*f6e214c7SGavin Maltby 851*f6e214c7SGavin Maltby if (buf) 852*f6e214c7SGavin Maltby free(buf); 853*f6e214c7SGavin Maltby 854*f6e214c7SGavin Maltby return (rc); 855*f6e214c7SGavin Maltby } 856*f6e214c7SGavin Maltby 857*f6e214c7SGavin Maltby int 858*f6e214c7SGavin Maltby sysevent_evc_getpropnvl(evchan_t *scp, nvlist_t **nvlp) 859*f6e214c7SGavin Maltby { 860*f6e214c7SGavin Maltby sev_propnvl_args_t uargs; 861*f6e214c7SGavin Maltby char buf[1024], *bufp = buf; /* stack buffer */ 862*f6e214c7SGavin Maltby size_t sz = sizeof (buf); 863*f6e214c7SGavin Maltby char *buf2 = NULL; /* allocated if stack buf too small */ 864*f6e214c7SGavin Maltby int64_t expgen = -1; 865*f6e214c7SGavin Maltby int rc; 866*f6e214c7SGavin Maltby 867*f6e214c7SGavin Maltby if (scp == NULL || misaligned(scp) || nvlp == NULL) 868*f6e214c7SGavin Maltby return (errno = EINVAL); 869*f6e214c7SGavin Maltby 870*f6e214c7SGavin Maltby *nvlp = NULL; 871*f6e214c7SGavin Maltby 872*f6e214c7SGavin Maltby again: 873*f6e214c7SGavin Maltby uargs.packednvl.name = (uint64_t)(uintptr_t)bufp; 874*f6e214c7SGavin Maltby uargs.packednvl.len = (uint32_t)sz; 875*f6e214c7SGavin Maltby 876*f6e214c7SGavin Maltby rc = ioctl(EV_FD(scp), SEV_GETPROPNVL, (intptr_t)&uargs); 877*f6e214c7SGavin Maltby 878*f6e214c7SGavin Maltby if (rc == E2BIG) 879*f6e214c7SGavin Maltby return (errno = E2BIG); /* driver refuses to copyout */ 880*f6e214c7SGavin Maltby 881*f6e214c7SGavin Maltby /* 882*f6e214c7SGavin Maltby * If the packed nvlist is too big for the buffer size we offered 883*f6e214c7SGavin Maltby * then the ioctl returns EOVERFLOW and indicates in the 'len' 884*f6e214c7SGavin Maltby * the size required for the current property nvlist generation 885*f6e214c7SGavin Maltby * (itself returned in the generation member). 886*f6e214c7SGavin Maltby */ 887*f6e214c7SGavin Maltby if (rc == EOVERFLOW && 888*f6e214c7SGavin Maltby (buf2 == NULL || uargs.generation != expgen)) { 889*f6e214c7SGavin Maltby if (buf2 != NULL) 890*f6e214c7SGavin Maltby free(buf2); 891*f6e214c7SGavin Maltby 892*f6e214c7SGavin Maltby if ((sz = uargs.packednvl.len) > 1024 * 1024) 893*f6e214c7SGavin Maltby return (E2BIG); 894*f6e214c7SGavin Maltby 895*f6e214c7SGavin Maltby bufp = buf2 = malloc(sz); 896*f6e214c7SGavin Maltby 897*f6e214c7SGavin Maltby if (buf2 == NULL) 898*f6e214c7SGavin Maltby return (errno = ENOMEM); 899*f6e214c7SGavin Maltby 900*f6e214c7SGavin Maltby expgen = uargs.generation; 901*f6e214c7SGavin Maltby goto again; 902*f6e214c7SGavin Maltby } 903*f6e214c7SGavin Maltby 904*f6e214c7SGavin Maltby /* 905*f6e214c7SGavin Maltby * The chan prop nvlist can be absent, in which case the ioctl 906*f6e214c7SGavin Maltby * returns success and uargs.packednvl.len of 0; we have already 907*f6e214c7SGavin Maltby * set *nvlp to NULL. Otherwise we must unpack the nvl. 908*f6e214c7SGavin Maltby */ 909*f6e214c7SGavin Maltby if (rc == 0 && uargs.packednvl.len != 0 && 910*f6e214c7SGavin Maltby nvlist_unpack(bufp, uargs.packednvl.len, nvlp, 0) != 0) 911*f6e214c7SGavin Maltby rc = EINVAL; 912*f6e214c7SGavin Maltby 913*f6e214c7SGavin Maltby if (buf2 != NULL) 914*f6e214c7SGavin Maltby free(buf2); 915*f6e214c7SGavin Maltby 916*f6e214c7SGavin Maltby return (rc ? errno = rc : 0); 917*f6e214c7SGavin Maltby } 918