xref: /titanic_41/usr/src/lib/libsysevent/libevchannel.c (revision 989f28072d20c73ae0955d6a1e3e2fc74831cb39)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
23  */
24 
25 #include <stdio.h>
26 #include <ctype.h>
27 #include <fcntl.h>
28 #include <errno.h>
29 #include <door.h>
30 #include <unistd.h>
31 #include <stddef.h>
32 #include <stdlib.h>
33 #include <strings.h>
34 #include <pthread.h>
35 #include <atomic.h>
36 #include <signal.h>
37 #include <sys/types.h>
38 #include <sys/varargs.h>
39 #include <sys/sysevent.h>
40 #include <sys/sysevent_impl.h>
41 
42 #include "libsysevent.h"
43 #include "libsysevent_impl.h"
44 
45 /*
46  * The functions below deal with the General Purpose Event Handling framework
47  *
48  * sysevent_evc_bind	    - create/bind application to named channel
49  * sysevent_evc_unbind	    - unbind from previously bound/created channel
50  * sysevent_evc_subscribe   - subscribe to existing event channel
51  * sysevent_evc_unsubscribe - unsubscribe from existing event channel
52  * sysevent_evc_publish	    - generate a system event via an event channel
53  * sysevent_evc_control	    - various channel based control operation
54  */
55 
56 static void kill_door_servers(evchan_subscr_t *);
57 
58 #define	misaligned(p)	((uintptr_t)(p) & 3)	/* 4-byte alignment required */
59 
60 static pthread_key_t nrkey = PTHREAD_ONCE_KEY_NP;
61 
62 /*
63  * If the current thread is a door server thread servicing a door created
64  * for us in sysevent_evc_xsubscribe, then an attempt to unsubscribe from
65  * within door invocation context on the same channel will deadlock in the
66  * kernel waiting for our own invocation to complete.  Such calls are
67  * forbidden, and we abort if they are encountered (better than hanging
68  * unkillably).
69  *
70  * We'd like to offer this detection to subscriptions established with
71  * sysevent_evc_subscribe, but we don't have control over the door service
72  * threads in that case.  Perhaps the fix is to always use door_xcreate
73  * even for sysevent_evc_subscribe?
74  */
75 static boolean_t
76 will_deadlock(evchan_t *scp)
77 {
78 	evchan_subscr_t *subp = pthread_getspecific(nrkey);
79 	evchan_impl_hdl_t *hdl = EVCHAN_IMPL_HNDL(scp);
80 
81 	return (subp != NULL && subp->ev_subhead == hdl ? B_TRUE : B_FALSE);
82 }
83 
84 /*
85  * Check syntax of a channel name
86  */
87 static int
88 sysevent_is_chan_name(const char *str)
89 {
90 	for (; *str != '\0'; str++) {
91 		if (!EVCH_ISCHANCHAR(*str))
92 			return (0);
93 	}
94 
95 	return (1);
96 }
97 
98 /*
99  * Check for printable characters
100  */
101 static int
102 strisprint(const char *s)
103 {
104 	for (; *s != '\0'; s++) {
105 		if (*s < ' ' || *s > '~')
106 			return (0);
107 	}
108 
109 	return (1);
110 }
111 
112 /*
113  * sysevent_evc_bind - Create/bind application to named channel
114  */
115 int
116 sysevent_evc_bind(const char *channel, evchan_t **scpp, uint32_t flags)
117 {
118 	int chanlen;
119 	evchan_t *scp;
120 	sev_bind_args_t uargs;
121 	int ec;
122 
123 	if (scpp == NULL || misaligned(scpp)) {
124 		return (errno = EINVAL);
125 	}
126 
127 	/* Provide useful value in error case */
128 	*scpp = NULL;
129 
130 	if (channel == NULL ||
131 	    (chanlen = strlen(channel) + 1) > MAX_CHNAME_LEN) {
132 		return (errno = EINVAL);
133 	}
134 
135 	/* Check channel syntax */
136 	if (!sysevent_is_chan_name(channel)) {
137 		return (errno = EINVAL);
138 	}
139 
140 	if (flags & ~EVCH_B_FLAGS) {
141 		return (errno = EINVAL);
142 	}
143 
144 	scp = calloc(1, sizeof (evchan_impl_hdl_t));
145 	if (scp == NULL) {
146 		return (errno = ENOMEM);
147 	}
148 
149 	/*
150 	 * Enable sysevent driver.  Fallback if the device link doesn't exist;
151 	 * this situation can arise if a channel is bound early in system
152 	 * startup, prior to devfsadm(1M) being invoked.
153 	 */
154 	EV_FD(scp) = open(DEVSYSEVENT, O_RDWR);
155 	if (EV_FD(scp) == -1) {
156 		if (errno != ENOENT) {
157 			ec = errno == EACCES ? EPERM : errno;
158 			free(scp);
159 			return (errno = ec);
160 		}
161 
162 		EV_FD(scp) = open(DEVICESYSEVENT, O_RDWR);
163 		if (EV_FD(scp) == -1) {
164 			ec = errno == EACCES ? EPERM : errno;
165 			free(scp);
166 			return (errno = ec);
167 		}
168 	}
169 
170 	/*
171 	 * Force to close the fd's when process is doing exec.
172 	 * The driver will then release stale binding handles.
173 	 * The driver will release also the associated subscriptions
174 	 * if EVCH_SUB_KEEP flag was not set.
175 	 */
176 	(void) fcntl(EV_FD(scp), F_SETFD, FD_CLOEXEC);
177 
178 	uargs.chan_name.name = (uintptr_t)channel;
179 	uargs.chan_name.len = chanlen;
180 	uargs.flags = flags;
181 
182 	if (ioctl(EV_FD(scp), SEV_CHAN_OPEN, &uargs) != 0) {
183 		ec = errno;
184 		(void) close(EV_FD(scp));
185 		free(scp);
186 		return (errno = ec);
187 	}
188 
189 	/* Needed to detect a fork() */
190 	EV_PID(scp) = getpid();
191 	(void) mutex_init(EV_LOCK(scp), USYNC_THREAD, NULL);
192 
193 	*scpp = scp;
194 
195 	return (0);
196 }
197 
198 /*
199  * sysevent_evc_unbind - Unbind from previously bound/created channel
200  */
201 int
202 sysevent_evc_unbind(evchan_t *scp)
203 {
204 	sev_unsubscribe_args_t uargs;
205 	evchan_subscr_t *subp;
206 	int errcp;
207 
208 	if (scp == NULL || misaligned(scp))
209 		return (errno = EINVAL);
210 
211 	if (will_deadlock(scp))
212 		return (errno = EDEADLK);
213 
214 	(void) mutex_lock(EV_LOCK(scp));
215 
216 	/*
217 	 * Unsubscribe, if we are in the process which did the bind.
218 	 */
219 	if (EV_PID(scp) == getpid()) {
220 		uargs.sid.name = NULL;
221 		uargs.sid.len = 0;
222 		/*
223 		 * The unsubscribe ioctl will block until all door upcalls have
224 		 * drained.
225 		 */
226 		if (ioctl(EV_FD(scp), SEV_UNSUBSCRIBE, (intptr_t)&uargs) != 0) {
227 			errcp = errno;
228 			(void) mutex_unlock(EV_LOCK(scp));
229 			return (errno = errcp);
230 		}
231 	}
232 
233 	while ((subp =  EV_SUB_NEXT(scp)) != NULL) {
234 		EV_SUB_NEXT(scp) = subp->evsub_next;
235 
236 		/* If door_xcreate was applied we can clean up */
237 		if (subp->evsub_attr)
238 			kill_door_servers(subp);
239 
240 		if (door_revoke(subp->evsub_door_desc) != 0 && errno == EPERM)
241 			(void) close(subp->evsub_door_desc);
242 
243 		free(subp->evsub_sid);
244 		free(subp);
245 	}
246 
247 	(void) mutex_unlock(EV_LOCK(scp));
248 
249 	/*
250 	 * The close of the driver will do the unsubscribe if a) it is the last
251 	 * close and b) we are in a child which inherited subscriptions.
252 	 */
253 	(void) close(EV_FD(scp));
254 	(void) mutex_destroy(EV_LOCK(scp));
255 	free(scp);
256 
257 	return (0);
258 }
259 
260 /*
261  * sysevent_evc_publish - Generate a system event via an event channel
262  */
263 int
264 sysevent_evc_publish(evchan_t *scp, const char *class,
265     const char *subclass, const char *vendor,
266     const char *pub_name, nvlist_t *attr_list,
267     uint32_t flags)
268 {
269 	sysevent_t *ev;
270 	sev_publish_args_t uargs;
271 	int rc;
272 	int ec;
273 
274 	if (scp == NULL || misaligned(scp)) {
275 		return (errno = EINVAL);
276 	}
277 
278 	/* No inheritance of binding handles via fork() */
279 	if (EV_PID(scp) != getpid()) {
280 		return (errno = EINVAL);
281 	}
282 
283 	ev = sysevent_alloc_event((char *)class, (char *)subclass,
284 	    (char *)vendor, (char *)pub_name, attr_list);
285 	if (ev == NULL) {
286 		return (errno);
287 	}
288 
289 	uargs.ev.name = (uintptr_t)ev;
290 	uargs.ev.len = SE_SIZE(ev);
291 	uargs.flags = flags;
292 
293 	(void) mutex_lock(EV_LOCK(scp));
294 
295 	rc = ioctl(EV_FD(scp), SEV_PUBLISH, (intptr_t)&uargs);
296 	ec = errno;
297 
298 	(void) mutex_unlock(EV_LOCK(scp));
299 
300 	sysevent_free(ev);
301 
302 	if (rc != 0) {
303 		return (ec);
304 	}
305 	return (0);
306 }
307 
308 /*
309  * Generic callback which catches events from the kernel and calls
310  * subscribers call back routine.
311  *
312  * Kernel guarantees that door_upcalls are disabled when unsubscription
313  * was issued that's why cookie points always to a valid evchan_subscr_t *.
314  *
315  * Furthermore it's not necessary to lock subp because the sysevent
316  * framework guarantees no unsubscription until door_return.
317  */
318 /*ARGSUSED3*/
319 static void
320 door_upcall(void *cookie, char *args, size_t alen,
321     door_desc_t *ddp, uint_t ndid)
322 {
323 	evchan_subscr_t *subp = EVCHAN_SUBSCR(cookie);
324 	int rval = 0;
325 
326 	/*
327 	 * If we've been invoked simply to kill the thread then
328 	 * exit now.
329 	 */
330 	if (subp->evsub_state == EVCHAN_SUB_STATE_CLOSING)
331 		pthread_exit(NULL);
332 
333 	if (args == NULL || alen <= (size_t)0) {
334 		/* Skip callback execution */
335 		rval = EINVAL;
336 	} else {
337 		rval = subp->evsub_func((sysevent_t *)(void *)args,
338 		    subp->evsub_cookie);
339 	}
340 
341 	/*
342 	 * Fill in return values for door_return
343 	 */
344 	alen = sizeof (rval);
345 	bcopy(&rval, args, alen);
346 
347 	(void) door_return(args, alen, NULL, 0);
348 }
349 
350 static pthread_once_t xsub_thrattr_once = PTHREAD_ONCE_INIT;
351 static pthread_attr_t xsub_thrattr;
352 
353 static void
354 xsub_thrattr_init(void)
355 {
356 	(void) pthread_attr_init(&xsub_thrattr);
357 	(void) pthread_attr_setdetachstate(&xsub_thrattr,
358 	    PTHREAD_CREATE_DETACHED);
359 	(void) pthread_attr_setscope(&xsub_thrattr, PTHREAD_SCOPE_SYSTEM);
360 }
361 
362 /*
363  * Our door server create function is only called during initial
364  * door_xcreate since we specify DOOR_NO_DEPLETION_CB.
365  */
366 int
367 xsub_door_server_create(door_info_t *dip, void *(*startf)(void *),
368     void *startfarg, void *cookie)
369 {
370 	evchan_subscr_t *subp = EVCHAN_SUBSCR(cookie);
371 	struct sysevent_subattr_impl *xsa = subp->evsub_attr;
372 	pthread_attr_t *thrattr;
373 	sigset_t oset;
374 	int err;
375 
376 	if (subp->evsub_state == EVCHAN_SUB_STATE_CLOSING)
377 		return (0);	/* shouldn't happen, but just in case */
378 
379 	/*
380 	 * If sysevent_evc_xsubscribe was called electing to use a
381 	 * different door server create function then let it take it
382 	 * from here.
383 	 */
384 	if (xsa->xs_thrcreate) {
385 		return (xsa->xs_thrcreate(dip, startf, startfarg,
386 		    xsa->xs_thrcreate_cookie));
387 	}
388 
389 	if (xsa->xs_thrattr == NULL) {
390 		(void) pthread_once(&xsub_thrattr_once, xsub_thrattr_init);
391 		thrattr = &xsub_thrattr;
392 	} else {
393 		thrattr = xsa->xs_thrattr;
394 	}
395 
396 	(void) pthread_sigmask(SIG_SETMASK, &xsa->xs_sigmask, &oset);
397 	err = pthread_create(NULL, thrattr, startf, startfarg);
398 	(void) pthread_sigmask(SIG_SETMASK, &oset, NULL);
399 
400 	return (err == 0 ? 1 : -1);
401 }
402 
403 void
404 xsub_door_server_setup(void *cookie)
405 {
406 	evchan_subscr_t *subp = EVCHAN_SUBSCR(cookie);
407 	struct sysevent_subattr_impl *xsa = subp->evsub_attr;
408 
409 	if (xsa->xs_thrsetup == NULL) {
410 		(void) pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, NULL);
411 		(void) pthread_setcanceltype(PTHREAD_CANCEL_DEFERRED, NULL);
412 	}
413 
414 	(void) pthread_setspecific(nrkey, (void *)subp);
415 
416 	if (xsa->xs_thrsetup)
417 		xsa->xs_thrsetup(xsa->xs_thrsetup_cookie);
418 }
419 
420 /*
421  * Cause private door server threads to exit.  We have already performed the
422  * unsubscribe ioctl which stops new invocations and waits until all
423  * existing invocations are complete.  So all server threads should be
424  * blocked in door_return.  The door has not yet been revoked.  We will
425  * invoke repeatedly after setting the evsub_state to be noticed on
426  * wakeup; each invocation will result in the death of one server thread.
427  *
428  * You'd think it would be easier to kill these threads, such as through
429  * pthread_cancel.  Unfortunately door_return is not a cancellation point,
430  * and if you do cancel a thread blocked in door_return the EINTR check in
431  * the door_return assembly logic causes us to loop with EINTR forever!
432  */
433 static void
434 kill_door_servers(evchan_subscr_t *subp)
435 {
436 	door_arg_t da;
437 
438 	bzero(&da, sizeof (da));
439 	subp->evsub_state = EVCHAN_SUB_STATE_CLOSING;
440 	membar_producer();
441 
442 	(void) door_call(subp->evsub_door_desc, &da);
443 }
444 
445 static int
446 sysevent_evc_subscribe_cmn(evchan_t *scp, const char *sid, const char *class,
447     int (*event_handler)(sysevent_t *ev, void *cookie),
448     void *cookie, uint32_t flags, struct sysevent_subattr_impl *xsa)
449 {
450 	evchan_subscr_t *subp;
451 	int upcall_door;
452 	sev_subscribe_args_t uargs;
453 	uint32_t sid_len;
454 	uint32_t class_len;
455 	int ec;
456 
457 	if (scp == NULL || misaligned(scp) || sid == NULL || class == NULL) {
458 		return (errno = EINVAL);
459 	}
460 
461 	/* No inheritance of binding handles via fork() */
462 	if (EV_PID(scp) != getpid()) {
463 		return (errno = EINVAL);
464 	}
465 
466 	if ((sid_len = strlen(sid) + 1) > MAX_SUBID_LEN || sid_len == 1 ||
467 	    (class_len = strlen(class) + 1) > MAX_CLASS_LEN) {
468 		return (errno = EINVAL);
469 	}
470 
471 	/* Check for printable characters */
472 	if (!strisprint(sid)) {
473 		return (errno = EINVAL);
474 	}
475 
476 	if (event_handler == NULL) {
477 		return (errno = EINVAL);
478 	}
479 
480 	if (pthread_key_create_once_np(&nrkey, NULL) != 0)
481 		return (errno);	/* ENOMEM or EAGAIN */
482 
483 	/* Create subscriber data */
484 	if ((subp = calloc(1, sizeof (evchan_subscr_t))) == NULL) {
485 		return (errno);
486 	}
487 
488 	if ((subp->evsub_sid = strdup(sid)) == NULL) {
489 		ec = errno;
490 		free(subp);
491 		return (ec);
492 	}
493 
494 	/*
495 	 * EC_ALL string will not be copied to kernel - NULL is assumed
496 	 */
497 	if (strcmp(class, EC_ALL) == 0) {
498 		class = NULL;
499 		class_len = 0;
500 	}
501 
502 	/*
503 	 * Fill this in now for the xsub_door_server_setup dance
504 	 */
505 	subp->ev_subhead = EVCHAN_IMPL_HNDL(scp);
506 	subp->evsub_state = EVCHAN_SUB_STATE_ACTIVE;
507 
508 	if (xsa == NULL) {
509 		upcall_door = door_create(door_upcall, (void *)subp,
510 		    DOOR_REFUSE_DESC | DOOR_NO_CANCEL);
511 	} else {
512 		subp->evsub_attr = xsa;
513 
514 		/*
515 		 * Create a private door with exactly one thread to
516 		 * service the callbacks (the GPEC kernel implementation
517 		 * serializes deliveries for each subscriber id).
518 		 */
519 		upcall_door = door_xcreate(door_upcall, (void *)subp,
520 		    DOOR_REFUSE_DESC | DOOR_NO_CANCEL | DOOR_NO_DEPLETION_CB,
521 		    xsub_door_server_create, xsub_door_server_setup,
522 		    (void *)subp, 1);
523 	}
524 
525 	if (upcall_door == -1) {
526 		ec = errno;
527 		free(subp->evsub_sid);
528 		free(subp);
529 		return (ec);
530 	}
531 
532 	/* Complete subscriber information */
533 	subp->evsub_door_desc = upcall_door;
534 	subp->evsub_func = event_handler;
535 	subp->evsub_cookie = cookie;
536 
537 	(void) mutex_lock(EV_LOCK(scp));
538 
539 	uargs.sid.name = (uintptr_t)sid;
540 	uargs.sid.len = sid_len;
541 	uargs.class_info.name = (uintptr_t)class;
542 	uargs.class_info.len = class_len;
543 	uargs.door_desc = subp->evsub_door_desc;
544 	uargs.flags = flags;
545 	if (ioctl(EV_FD(scp), SEV_SUBSCRIBE, (intptr_t)&uargs) != 0) {
546 		ec = errno;
547 		(void) mutex_unlock(EV_LOCK(scp));
548 		if (xsa)
549 			kill_door_servers(subp);
550 		(void) door_revoke(upcall_door);
551 		free(subp->evsub_sid);
552 		free(subp);
553 		return (ec);
554 	}
555 
556 	/* Attach to subscriber list */
557 	subp->evsub_next = EV_SUB_NEXT(scp);
558 	EV_SUB_NEXT(scp) = subp;
559 
560 	(void) mutex_unlock(EV_LOCK(scp));
561 
562 	return (0);
563 }
564 
565 /*
566  * sysevent_evc_subscribe - subscribe to an existing event channel
567  * using a non-private door (which will create as many server threads
568  * as the apparent maximum concurrency requirements suggest).
569  */
570 int
571 sysevent_evc_subscribe(evchan_t *scp, const char *sid, const char *class,
572     int (*event_handler)(sysevent_t *ev, void *cookie),
573     void *cookie, uint32_t flags)
574 {
575 	return (sysevent_evc_subscribe_cmn(scp, sid, class, event_handler,
576 	    cookie, flags, NULL));
577 }
578 
579 static void
580 subattr_dfltinit(struct sysevent_subattr_impl *xsa)
581 {
582 	(void) sigfillset(&xsa->xs_sigmask);
583 	(void) sigdelset(&xsa->xs_sigmask, SIGABRT);
584 }
585 
586 static struct sysevent_subattr_impl dfltsa;
587 pthread_once_t dfltsa_inited = PTHREAD_ONCE_INIT;
588 
589 static void
590 init_dfltsa(void)
591 {
592 	subattr_dfltinit(&dfltsa);
593 }
594 
595 /*
596  * sysevent_evc_subscribe - subscribe to an existing event channel
597  * using a private door with control over thread creation.
598  */
599 int
600 sysevent_evc_xsubscribe(evchan_t *scp, const char *sid, const char *class,
601     int (*event_handler)(sysevent_t *ev, void *cookie),
602     void *cookie, uint32_t flags, sysevent_subattr_t *attr)
603 {
604 	struct sysevent_subattr_impl *xsa;
605 
606 	if (attr != NULL) {
607 		xsa = (struct sysevent_subattr_impl *)attr;
608 	} else {
609 		xsa = &dfltsa;
610 		(void) pthread_once(&dfltsa_inited, init_dfltsa);
611 	}
612 
613 	return (sysevent_evc_subscribe_cmn(scp, sid, class, event_handler,
614 	    cookie, flags, xsa));
615 }
616 
617 sysevent_subattr_t *
618 sysevent_subattr_alloc(void)
619 {
620 	struct sysevent_subattr_impl *xsa = calloc(1, sizeof (*xsa));
621 
622 	if (xsa != NULL)
623 		subattr_dfltinit(xsa);
624 
625 	return (xsa != NULL ? (sysevent_subattr_t *)xsa : NULL);
626 }
627 
628 void
629 sysevent_subattr_free(sysevent_subattr_t *attr)
630 {
631 	struct sysevent_subattr_impl *xsa =
632 	    (struct sysevent_subattr_impl *)attr;
633 
634 	free(xsa);
635 }
636 
637 void
638 sysevent_subattr_thrcreate(sysevent_subattr_t *attr,
639     door_xcreate_server_func_t *thrcreate, void *cookie)
640 {
641 	struct sysevent_subattr_impl *xsa =
642 	    (struct sysevent_subattr_impl *)attr;
643 
644 	xsa->xs_thrcreate = thrcreate;
645 	xsa->xs_thrcreate_cookie = cookie;
646 }
647 
648 void
649 sysevent_subattr_thrsetup(sysevent_subattr_t *attr,
650     door_xcreate_thrsetup_func_t *thrsetup, void *cookie)
651 {
652 	struct sysevent_subattr_impl *xsa =
653 	    (struct sysevent_subattr_impl *)attr;
654 
655 	xsa->xs_thrsetup = thrsetup;
656 	xsa->xs_thrsetup_cookie = cookie;
657 }
658 
659 void
660 sysevent_subattr_sigmask(sysevent_subattr_t *attr, sigset_t *set)
661 {
662 	struct sysevent_subattr_impl *xsa =
663 	    (struct sysevent_subattr_impl *)attr;
664 
665 	if (set) {
666 		xsa->xs_sigmask = *set;
667 	} else {
668 		(void) sigfillset(&xsa->xs_sigmask);
669 		(void) sigdelset(&xsa->xs_sigmask, SIGABRT);
670 	}
671 }
672 
673 void
674 sysevent_subattr_thrattr(sysevent_subattr_t *attr, pthread_attr_t *thrattr)
675 {
676 	struct sysevent_subattr_impl *xsa =
677 	    (struct sysevent_subattr_impl *)attr;
678 
679 	xsa->xs_thrattr = thrattr;
680 }
681 
682 /*
683  * sysevent_evc_unsubscribe - Unsubscribe from an existing event channel
684  */
685 int
686 sysevent_evc_unsubscribe(evchan_t *scp, const char *sid)
687 {
688 	int all_subscribers = 0;
689 	sev_unsubscribe_args_t uargs;
690 	evchan_subscr_t *subp, *prevsubp, *tofree;
691 	int errcp;
692 	int rc;
693 
694 	if (scp == NULL || misaligned(scp))
695 		return (errno = EINVAL);
696 
697 	if (sid == NULL || strlen(sid) == 0 ||
698 	    (strlen(sid) >= MAX_SUBID_LEN))
699 		return (errno = EINVAL);
700 
701 	/* No inheritance of binding handles via fork() */
702 	if (EV_PID(scp) != getpid())
703 		return (errno = EINVAL);
704 
705 	if (strcmp(sid, EVCH_ALLSUB) == 0) {
706 		all_subscribers++;
707 		/* Indicates all subscriber id's for this channel */
708 		uargs.sid.name = NULL;
709 		uargs.sid.len = 0;
710 	} else {
711 		uargs.sid.name = (uintptr_t)sid;
712 		uargs.sid.len = strlen(sid) + 1;
713 	}
714 
715 	if (will_deadlock(scp))
716 		return (errno = EDEADLK);
717 
718 	(void) mutex_lock(EV_LOCK(scp));
719 
720 	/*
721 	 * The unsubscribe ioctl will block until all door upcalls have drained.
722 	 */
723 	rc = ioctl(EV_FD(scp), SEV_UNSUBSCRIBE, (intptr_t)&uargs);
724 
725 	if (rc != 0) {
726 		errcp = errno;
727 		(void) mutex_unlock(EV_LOCK(scp));
728 		return (errno = errcp); /* EFAULT, ENXIO, EINVAL possible */
729 	}
730 
731 
732 	/*
733 	 * Search for the matching subscriber.  If EVCH_ALLSUB was specified
734 	 * then the ioctl above will have returned 0 even if there are
735 	 * no subscriptions, so the initial EV_SUB_NEXT can be NULL.
736 	 */
737 	prevsubp = NULL;
738 	subp =  EV_SUB_NEXT(scp);
739 	while (subp != NULL) {
740 		if (all_subscribers || strcmp(subp->evsub_sid, sid) == 0) {
741 			if (prevsubp == NULL) {
742 				EV_SUB_NEXT(scp) = subp->evsub_next;
743 			} else {
744 				prevsubp->evsub_next = subp->evsub_next;
745 			}
746 
747 			tofree = subp;
748 			subp = subp->evsub_next;
749 
750 			/* If door_xcreate was applied we can clean up */
751 			if (tofree->evsub_attr)
752 				kill_door_servers(tofree);
753 
754 			(void) door_revoke(tofree->evsub_door_desc);
755 			free(tofree->evsub_sid);
756 			free(tofree);
757 
758 			/* Freed single subscriber already? */
759 			if (all_subscribers == 0)
760 				break;
761 		} else {
762 			prevsubp = subp;
763 			subp = subp->evsub_next;
764 		}
765 	}
766 
767 	(void) mutex_unlock(EV_LOCK(scp));
768 
769 	return (0);
770 }
771 
772 /*
773  * sysevent_evc_control - Various channel based control operation
774  */
775 int
776 sysevent_evc_control(evchan_t *scp, int cmd, /* arg */ ...)
777 {
778 	va_list ap;
779 	uint32_t *chlenp;
780 	sev_control_args_t uargs;
781 	int rc = 0;
782 
783 	if (scp == NULL || misaligned(scp)) {
784 		return (errno = EINVAL);
785 	}
786 
787 	/* No inheritance of binding handles via fork() */
788 	if (EV_PID(scp) != getpid()) {
789 		return (errno = EINVAL);
790 	}
791 
792 	va_start(ap, cmd);
793 
794 	uargs.cmd = cmd;
795 
796 	(void) mutex_lock(EV_LOCK(scp));
797 
798 	switch (cmd) {
799 	case EVCH_GET_CHAN_LEN:
800 	case EVCH_GET_CHAN_LEN_MAX:
801 		chlenp = va_arg(ap, uint32_t *);
802 		if (chlenp == NULL || misaligned(chlenp)) {
803 			rc = EINVAL;
804 			break;
805 		}
806 		rc = ioctl(EV_FD(scp), SEV_CHAN_CONTROL, (intptr_t)&uargs);
807 		*chlenp = uargs.value;
808 		break;
809 
810 	case EVCH_SET_CHAN_LEN:
811 		/* Range change will be handled in framework */
812 		uargs.value = va_arg(ap, uint32_t);
813 		rc = ioctl(EV_FD(scp), SEV_CHAN_CONTROL, (intptr_t)&uargs);
814 		break;
815 
816 	default:
817 		rc = EINVAL;
818 	}
819 
820 	(void) mutex_unlock(EV_LOCK(scp));
821 
822 	if (rc == -1) {
823 		rc = errno;
824 	}
825 
826 	va_end(ap);
827 
828 	return (errno = rc);
829 }
830 
831 int
832 sysevent_evc_setpropnvl(evchan_t *scp, nvlist_t *nvl)
833 {
834 	sev_propnvl_args_t uargs;
835 	char *buf = NULL;
836 	size_t nvlsz = 0;
837 	int rc;
838 
839 	if (scp == NULL || misaligned(scp))
840 		return (errno = EINVAL);
841 
842 	if (nvl != NULL &&
843 	    nvlist_pack(nvl, &buf, &nvlsz, NV_ENCODE_NATIVE, 0) != 0)
844 		return (errno);
845 
846 	uargs.packednvl.name = (uint64_t)(uintptr_t)buf;
847 	uargs.packednvl.len = (uint32_t)nvlsz;
848 
849 	rc = ioctl(EV_FD(scp), SEV_SETPROPNVL, (intptr_t)&uargs);
850 
851 	if (buf)
852 		free(buf);
853 
854 	return (rc);
855 }
856 
857 int
858 sysevent_evc_getpropnvl(evchan_t *scp, nvlist_t **nvlp)
859 {
860 	sev_propnvl_args_t uargs;
861 	char buf[1024], *bufp = buf;	/* stack buffer */
862 	size_t sz = sizeof (buf);
863 	char *buf2 = NULL;		/* allocated if stack buf too small */
864 	int64_t expgen = -1;
865 	int rc;
866 
867 	if (scp == NULL || misaligned(scp) || nvlp == NULL)
868 		return (errno = EINVAL);
869 
870 	*nvlp = NULL;
871 
872 again:
873 	uargs.packednvl.name = (uint64_t)(uintptr_t)bufp;
874 	uargs.packednvl.len = (uint32_t)sz;
875 
876 	rc = ioctl(EV_FD(scp), SEV_GETPROPNVL, (intptr_t)&uargs);
877 
878 	if (rc == E2BIG)
879 		return (errno = E2BIG);	/* driver refuses to copyout */
880 
881 	/*
882 	 * If the packed nvlist is too big for the buffer size we offered
883 	 * then the ioctl returns EOVERFLOW and indicates in the 'len'
884 	 * the size required for the current property nvlist generation
885 	 * (itself returned in the generation member).
886 	 */
887 	if (rc == EOVERFLOW &&
888 	    (buf2 == NULL || uargs.generation != expgen)) {
889 		if (buf2 != NULL)
890 			free(buf2);
891 
892 		if ((sz = uargs.packednvl.len) > 1024 * 1024)
893 			return (E2BIG);
894 
895 		bufp = buf2 = malloc(sz);
896 
897 		if (buf2 == NULL)
898 			return (errno = ENOMEM);
899 
900 		expgen = uargs.generation;
901 		goto again;
902 	}
903 
904 	/*
905 	 * The chan prop nvlist can be absent, in which case the ioctl
906 	 * returns success and uargs.packednvl.len of 0;  we have already
907 	 * set *nvlp to NULL.  Otherwise we must unpack the nvl.
908 	 */
909 	if (rc == 0 && uargs.packednvl.len != 0 &&
910 	    nvlist_unpack(bufp, uargs.packednvl.len, nvlp, 0) != 0)
911 		rc = EINVAL;
912 
913 	if (buf2 != NULL)
914 		free(buf2);
915 
916 	return (rc ? errno = rc : 0);
917 }
918