1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 /* 27 * This file contains the source of the general purpose event channel extension 28 * to the sysevent framework. This implementation is made up mainly of four 29 * layers of functionality: the event queues (evch_evq_*()), the handling of 30 * channels (evch_ch*()), the kernel interface (sysevent_evc_*()) and the 31 * interface for the sysevent pseudo driver (evch_usr*()). 32 * Libsysevent.so uses the pseudo driver sysevent's ioctl to access the event 33 * channel extensions. The driver in turn uses the evch_usr*() functions below. 34 * 35 * The interfaces for user land and kernel are declared in sys/sysevent.h 36 * Internal data structures for event channels are defined in 37 * sys/sysevent_impl.h. 38 * 39 * The basic data structure for an event channel is of type evch_chan_t. 40 * All channels are maintained by a list named evch_list. The list head 41 * is of type evch_dlist_t. 42 */ 43 44 #include <sys/types.h> 45 #include <sys/errno.h> 46 #include <sys/stropts.h> 47 #include <sys/debug.h> 48 #include <sys/ddi.h> 49 #include <sys/vmem.h> 50 #include <sys/cmn_err.h> 51 #include <sys/callb.h> 52 #include <sys/sysevent.h> 53 #include <sys/sysevent_impl.h> 54 #include <sys/sysmacros.h> 55 #include <sys/disp.h> 56 #include <sys/atomic.h> 57 #include <sys/door.h> 58 #include <sys/zone.h> 59 #include <sys/sdt.h> 60 61 /* Back-off delay for door_ki_upcall */ 62 #define EVCH_MIN_PAUSE 8 63 #define EVCH_MAX_PAUSE 128 64 65 #define GEVENT(ev) ((evch_gevent_t *)((char *)ev - \ 66 offsetof(evch_gevent_t, ge_payload))) 67 68 #define EVCH_EVQ_EVCOUNT(x) ((&(x)->eq_eventq)->sq_count) 69 #define EVCH_EVQ_HIGHWM(x) ((&(x)->eq_eventq)->sq_highwm) 70 71 #define CH_HOLD_PEND 1 72 #define CH_HOLD_PEND_INDEF 2 73 74 struct evch_globals { 75 evch_dlist_t evch_list; 76 kmutex_t evch_list_lock; 77 }; 78 79 /* Variables used by event channel routines */ 80 static int evq_initcomplete = 0; 81 static zone_key_t evch_zone_key; 82 static uint32_t evch_channels_max; 83 static uint32_t evch_bindings_max = EVCH_MAX_BINDS_PER_CHANNEL; 84 static uint32_t evch_events_max; 85 86 static void evch_evq_unsub(evch_eventq_t *, evch_evqsub_t *); 87 static void evch_evq_destroy(evch_eventq_t *); 88 89 /* 90 * List handling. These functions handle a doubly linked list. The list has 91 * to be protected by the calling functions. evch_dlist_t is the list head. 92 * Every node of the list has to put a evch_dlelem_t data type in its data 93 * structure as its first element. 94 * 95 * evch_dl_init - Initialize list head 96 * evch_dl_fini - Terminate list handling 97 * evch_dl_is_init - Returns one if list is initialized 98 * evch_dl_add - Add element to end of list 99 * evch_dl_del - Remove given element from list 100 * evch_dl_search - Lookup element in list 101 * evch_dl_getnum - Get number of elements in list 102 * evch_dl_next - Get next elements of list 103 */ 104 105 static void 106 evch_dl_init(evch_dlist_t *hp) 107 { 108 hp->dh_head.dl_prev = hp->dh_head.dl_next = &hp->dh_head; 109 hp->dh_count = 0; 110 } 111 112 /* 113 * Assumes that list is empty. 114 */ 115 static void 116 evch_dl_fini(evch_dlist_t *hp) 117 { 118 hp->dh_head.dl_prev = hp->dh_head.dl_next = NULL; 119 } 120 121 static int 122 evch_dl_is_init(evch_dlist_t *hp) 123 { 124 return (hp->dh_head.dl_next != NULL ? 1 : 0); 125 } 126 127 /* 128 * Add an element at the end of the list. 129 */ 130 static void 131 evch_dl_add(evch_dlist_t *hp, evch_dlelem_t *el) 132 { 133 evch_dlelem_t *x = hp->dh_head.dl_prev; 134 evch_dlelem_t *y = &hp->dh_head; 135 136 x->dl_next = el; 137 y->dl_prev = el; 138 el->dl_next = y; 139 el->dl_prev = x; 140 hp->dh_count++; 141 } 142 143 /* 144 * Remove arbitrary element out of dlist. 145 */ 146 static void 147 evch_dl_del(evch_dlist_t *hp, evch_dlelem_t *p) 148 { 149 ASSERT(hp->dh_count > 0 && p != &hp->dh_head); 150 p->dl_prev->dl_next = p->dl_next; 151 p->dl_next->dl_prev = p->dl_prev; 152 p->dl_prev = NULL; 153 p->dl_next = NULL; 154 hp->dh_count--; 155 } 156 157 /* 158 * Search an element in a list. Caller provides comparison callback function. 159 */ 160 static evch_dlelem_t * 161 evch_dl_search(evch_dlist_t *hp, int (*cmp)(evch_dlelem_t *, char *), char *s) 162 { 163 evch_dlelem_t *p; 164 165 for (p = hp->dh_head.dl_next; p != &hp->dh_head; p = p->dl_next) { 166 if (cmp(p, s) == 0) { 167 return (p); 168 } 169 } 170 return (NULL); 171 } 172 173 /* 174 * Return number of elements in the list. 175 */ 176 static int 177 evch_dl_getnum(evch_dlist_t *hp) 178 { 179 return (hp->dh_count); 180 } 181 182 /* 183 * Find next element of a evch_dlist_t list. Find first element if el == NULL. 184 * Returns NULL if end of list is reached. 185 */ 186 static void * 187 evch_dl_next(evch_dlist_t *hp, void *el) 188 { 189 evch_dlelem_t *ep = (evch_dlelem_t *)el; 190 191 if (hp->dh_count == 0) { 192 return (NULL); 193 } 194 if (ep == NULL) { 195 return (hp->dh_head.dl_next); 196 } 197 if ((ep = ep->dl_next) == (evch_dlelem_t *)hp) { 198 return (NULL); 199 } 200 return ((void *)ep); 201 } 202 203 /* 204 * Queue handling routines. Mutexes have to be entered previously. 205 * 206 * evch_q_init - Initialize queue head 207 * evch_q_in - Put element into queue 208 * evch_q_out - Get element out of queue 209 * evch_q_next - Iterate over the elements of a queue 210 */ 211 static void 212 evch_q_init(evch_squeue_t *q) 213 { 214 q->sq_head = NULL; 215 q->sq_tail = (evch_qelem_t *)q; 216 q->sq_count = 0; 217 q->sq_highwm = 0; 218 } 219 220 /* 221 * Put element into the queue q 222 */ 223 static void 224 evch_q_in(evch_squeue_t *q, evch_qelem_t *el) 225 { 226 q->sq_tail->q_next = el; 227 el->q_next = NULL; 228 q->sq_tail = el; 229 q->sq_count++; 230 if (q->sq_count > q->sq_highwm) { 231 q->sq_highwm = q->sq_count; 232 } 233 } 234 235 /* 236 * Returns NULL if queue is empty. 237 */ 238 static evch_qelem_t * 239 evch_q_out(evch_squeue_t *q) 240 { 241 evch_qelem_t *el; 242 243 if ((el = q->sq_head) != NULL) { 244 q->sq_head = el->q_next; 245 q->sq_count--; 246 if (q->sq_head == NULL) { 247 q->sq_tail = (evch_qelem_t *)q; 248 } 249 } 250 return (el); 251 } 252 253 /* 254 * Returns element after *el or first if el == NULL. NULL is returned 255 * if queue is empty or *el points to the last element in the queue. 256 */ 257 static evch_qelem_t * 258 evch_q_next(evch_squeue_t *q, evch_qelem_t *el) 259 { 260 if (el == NULL) 261 return (q->sq_head); 262 return (el->q_next); 263 } 264 265 /* 266 * Event queue handling functions. An event queue is the basic building block 267 * of an event channel. One event queue makes up the publisher-side event queue. 268 * Further event queues build the per-subscriber queues of an event channel. 269 * Each queue is associated an event delivery thread. 270 * These functions support a two-step initialization. First step, when kernel 271 * memory is ready and second when threads are ready. 272 * Events consist of an administrating evch_gevent_t structure with the event 273 * data appended as variable length payload. 274 * The internal interface functions for the event queue handling are: 275 * 276 * evch_evq_create - create an event queue 277 * evch_evq_thrcreate - create thread for an event queue. 278 * evch_evq_destroy - delete an event queue 279 * evch_evq_sub - Subscribe to event delivery from an event queue 280 * evch_evq_unsub - Unsubscribe 281 * evch_evq_pub - Post an event into an event queue 282 * evch_evq_stop - Put delivery thread on hold 283 * evch_evq_continue - Resume event delivery thread 284 * evch_evq_status - Return status of delivery thread, running or on hold 285 * evch_evq_evzalloc - Allocate an event structure 286 * evch_evq_evfree - Free an event structure 287 * evch_evq_evadd_dest - Add a destructor function to an event structure 288 * evch_evq_evnext - Iterate over events non-destructive 289 */ 290 291 /*ARGSUSED*/ 292 static void * 293 evch_zoneinit(zoneid_t zoneid) 294 { 295 struct evch_globals *eg; 296 297 eg = kmem_zalloc(sizeof (*eg), KM_SLEEP); 298 evch_dl_init(&eg->evch_list); 299 return (eg); 300 } 301 302 /*ARGSUSED*/ 303 static void 304 evch_zonefree(zoneid_t zoneid, void *arg) 305 { 306 struct evch_globals *eg = arg; 307 evch_chan_t *chp; 308 evch_subd_t *sdp; 309 310 mutex_enter(&eg->evch_list_lock); 311 312 /* 313 * Keep picking the head element off the list until there are no 314 * more. 315 */ 316 while ((chp = evch_dl_next(&eg->evch_list, NULL)) != NULL) { 317 318 /* 319 * Since all processes are gone, all bindings should be gone, 320 * and only channels with SUB_KEEP subscribers should remain. 321 */ 322 mutex_enter(&chp->ch_mutex); 323 ASSERT(chp->ch_bindings == 0); 324 ASSERT(evch_dl_getnum(&chp->ch_subscr) != 0 || 325 chp->ch_holdpend == CH_HOLD_PEND_INDEF); 326 327 /* Forcibly unsubscribe each remaining subscription */ 328 while ((sdp = evch_dl_next(&chp->ch_subscr, NULL)) != NULL) { 329 /* 330 * We should only be tearing down persistent 331 * subscribers at this point, since all processes 332 * from this zone are gone. 333 */ 334 ASSERT(sdp->sd_active == 0); 335 ASSERT((sdp->sd_persist & EVCH_SUB_KEEP) != 0); 336 /* 337 * Disconnect subscriber queue from main event queue. 338 */ 339 evch_evq_unsub(chp->ch_queue, sdp->sd_msub); 340 341 /* Destruct per subscriber queue */ 342 evch_evq_unsub(sdp->sd_queue, sdp->sd_ssub); 343 evch_evq_destroy(sdp->sd_queue); 344 /* 345 * Eliminate the subscriber data from channel list. 346 */ 347 evch_dl_del(&chp->ch_subscr, &sdp->sd_link); 348 kmem_free(sdp->sd_classname, sdp->sd_clnsize); 349 kmem_free(sdp->sd_ident, strlen(sdp->sd_ident) + 1); 350 kmem_free(sdp, sizeof (evch_subd_t)); 351 } 352 353 /* Channel must now have no subscribers */ 354 ASSERT(evch_dl_getnum(&chp->ch_subscr) == 0); 355 356 /* Just like unbind */ 357 mutex_exit(&chp->ch_mutex); 358 evch_dl_del(&eg->evch_list, &chp->ch_link); 359 evch_evq_destroy(chp->ch_queue); 360 mutex_destroy(&chp->ch_mutex); 361 mutex_destroy(&chp->ch_pubmx); 362 cv_destroy(&chp->ch_pubcv); 363 kmem_free(chp->ch_name, chp->ch_namelen); 364 kmem_free(chp, sizeof (evch_chan_t)); 365 } 366 367 mutex_exit(&eg->evch_list_lock); 368 /* all channels should now be gone */ 369 ASSERT(evch_dl_getnum(&eg->evch_list) == 0); 370 kmem_free(eg, sizeof (*eg)); 371 } 372 373 /* 374 * Frees evch_gevent_t structure including the payload, if the reference count 375 * drops to or below zero. Below zero happens when the event is freed 376 * without beeing queued into a queue. 377 */ 378 static void 379 evch_gevent_free(evch_gevent_t *evp) 380 { 381 int32_t refcnt; 382 383 refcnt = (int32_t)atomic_add_32_nv(&evp->ge_refcount, -1); 384 if (refcnt <= 0) { 385 if (evp->ge_destruct != NULL) { 386 evp->ge_destruct((void *)&(evp->ge_payload), 387 evp->ge_dstcookie); 388 } 389 kmem_free(evp, evp->ge_size); 390 } 391 } 392 393 /* 394 * Deliver is called for every subscription to the current event 395 * It calls the registered filter function and then the registered delivery 396 * callback routine. Returns 0 on success. The callback routine returns 397 * EVQ_AGAIN or EVQ_SLEEP in case the event could not be delivered. 398 */ 399 static int 400 evch_deliver(evch_evqsub_t *sp, evch_gevent_t *ep) 401 { 402 void *uep = &ep->ge_payload; 403 int res = EVQ_DELIVER; 404 405 if (sp->su_filter != NULL) { 406 res = sp->su_filter(uep, sp->su_fcookie); 407 } 408 if (res == EVQ_DELIVER) { 409 return (sp->su_callb(uep, sp->su_cbcookie)); 410 } 411 return (0); 412 } 413 414 /* 415 * Holds event delivery in case of eq_holdmode set or in case the 416 * event queue is empty. Mutex must be held when called. 417 * Wakes up a thread waiting for the delivery thread reaching the hold mode. 418 */ 419 static void 420 evch_delivery_hold(evch_eventq_t *eqp, callb_cpr_t *cpip) 421 { 422 if (eqp->eq_tabortflag == 0) { 423 do { 424 if (eqp->eq_holdmode) { 425 cv_signal(&eqp->eq_onholdcv); 426 } 427 CALLB_CPR_SAFE_BEGIN(cpip); 428 cv_wait(&eqp->eq_thrsleepcv, &eqp->eq_queuemx); 429 CALLB_CPR_SAFE_END(cpip, &eqp->eq_queuemx); 430 } while (eqp->eq_holdmode); 431 } 432 } 433 434 /* 435 * Event delivery thread. Enumerates all subscribers and calls evch_deliver() 436 * for each one. 437 */ 438 static void 439 evch_delivery_thr(evch_eventq_t *eqp) 440 { 441 evch_qelem_t *qep; 442 callb_cpr_t cprinfo; 443 int res; 444 evch_evqsub_t *sub; 445 int deltime; 446 int repeatcount; 447 char thnam[32]; 448 449 (void) snprintf(thnam, sizeof (thnam), "sysevent_chan-%d", 450 (int)eqp->eq_thrid); 451 CALLB_CPR_INIT(&cprinfo, &eqp->eq_queuemx, callb_generic_cpr, thnam); 452 mutex_enter(&eqp->eq_queuemx); 453 while (eqp->eq_tabortflag == 0) { 454 while (eqp->eq_holdmode == 0 && eqp->eq_tabortflag == 0 && 455 (qep = evch_q_out(&eqp->eq_eventq)) != NULL) { 456 457 /* Filter and deliver event to all subscribers */ 458 deltime = EVCH_MIN_PAUSE; 459 repeatcount = EVCH_MAX_TRY_DELIVERY; 460 eqp->eq_curevent = qep->q_objref; 461 sub = evch_dl_next(&eqp->eq_subscr, NULL); 462 while (sub != NULL) { 463 eqp->eq_dactive = 1; 464 mutex_exit(&eqp->eq_queuemx); 465 res = evch_deliver(sub, qep->q_objref); 466 mutex_enter(&eqp->eq_queuemx); 467 eqp->eq_dactive = 0; 468 cv_signal(&eqp->eq_dactivecv); 469 switch (res) { 470 case EVQ_SLEEP: 471 /* 472 * Wait for subscriber to return. 473 */ 474 eqp->eq_holdmode = 1; 475 evch_delivery_hold(eqp, &cprinfo); 476 if (eqp->eq_tabortflag) { 477 break; 478 } 479 continue; 480 case EVQ_AGAIN: 481 CALLB_CPR_SAFE_BEGIN(&cprinfo); 482 mutex_exit(&eqp->eq_queuemx); 483 delay(deltime); 484 deltime = 485 deltime > EVCH_MAX_PAUSE ? 486 deltime : deltime << 1; 487 mutex_enter(&eqp->eq_queuemx); 488 CALLB_CPR_SAFE_END(&cprinfo, 489 &eqp->eq_queuemx); 490 if (repeatcount-- > 0) { 491 continue; 492 } 493 break; 494 } 495 if (eqp->eq_tabortflag) { 496 break; 497 } 498 sub = evch_dl_next(&eqp->eq_subscr, sub); 499 repeatcount = EVCH_MAX_TRY_DELIVERY; 500 } 501 eqp->eq_curevent = NULL; 502 503 /* Free event data and queue element */ 504 evch_gevent_free((evch_gevent_t *)qep->q_objref); 505 kmem_free(qep, qep->q_objsize); 506 } 507 508 /* Wait for next event or end of hold mode if set */ 509 evch_delivery_hold(eqp, &cprinfo); 510 } 511 CALLB_CPR_EXIT(&cprinfo); /* Does mutex_exit of eqp->eq_queuemx */ 512 thread_exit(); 513 } 514 515 /* 516 * Create the event delivery thread for an existing event queue. 517 */ 518 static void 519 evch_evq_thrcreate(evch_eventq_t *eqp) 520 { 521 kthread_t *thp; 522 523 thp = thread_create(NULL, 0, evch_delivery_thr, (char *)eqp, 0, &p0, 524 TS_RUN, minclsyspri); 525 eqp->eq_thrid = thp->t_did; 526 } 527 528 /* 529 * Create event queue. 530 */ 531 static evch_eventq_t * 532 evch_evq_create() 533 { 534 evch_eventq_t *p; 535 536 /* Allocate and initialize event queue descriptor */ 537 p = kmem_zalloc(sizeof (evch_eventq_t), KM_SLEEP); 538 mutex_init(&p->eq_queuemx, NULL, MUTEX_DEFAULT, NULL); 539 cv_init(&p->eq_thrsleepcv, NULL, CV_DEFAULT, NULL); 540 evch_q_init(&p->eq_eventq); 541 evch_dl_init(&p->eq_subscr); 542 cv_init(&p->eq_dactivecv, NULL, CV_DEFAULT, NULL); 543 cv_init(&p->eq_onholdcv, NULL, CV_DEFAULT, NULL); 544 545 /* Create delivery thread */ 546 if (evq_initcomplete) { 547 evch_evq_thrcreate(p); 548 } 549 return (p); 550 } 551 552 /* 553 * Destroy an event queue. All subscribers have to be unsubscribed prior to 554 * this call. 555 */ 556 static void 557 evch_evq_destroy(evch_eventq_t *eqp) 558 { 559 evch_qelem_t *qep; 560 561 ASSERT(evch_dl_getnum(&eqp->eq_subscr) == 0); 562 /* Kill delivery thread */ 563 if (eqp->eq_thrid != NULL) { 564 mutex_enter(&eqp->eq_queuemx); 565 eqp->eq_tabortflag = 1; 566 eqp->eq_holdmode = 0; 567 cv_signal(&eqp->eq_thrsleepcv); 568 mutex_exit(&eqp->eq_queuemx); 569 thread_join(eqp->eq_thrid); 570 } 571 572 /* Get rid of stale events in the event queue */ 573 while ((qep = (evch_qelem_t *)evch_q_out(&eqp->eq_eventq)) != NULL) { 574 evch_gevent_free((evch_gevent_t *)qep->q_objref); 575 kmem_free(qep, qep->q_objsize); 576 } 577 578 /* Wrap up event queue structure */ 579 cv_destroy(&eqp->eq_onholdcv); 580 cv_destroy(&eqp->eq_dactivecv); 581 cv_destroy(&eqp->eq_thrsleepcv); 582 evch_dl_fini(&eqp->eq_subscr); 583 mutex_destroy(&eqp->eq_queuemx); 584 585 /* Free descriptor structure */ 586 kmem_free(eqp, sizeof (evch_eventq_t)); 587 } 588 589 /* 590 * Subscribe to an event queue. Every subscriber provides a filter callback 591 * routine and an event delivery callback routine. 592 */ 593 static evch_evqsub_t * 594 evch_evq_sub(evch_eventq_t *eqp, filter_f filter, void *fcookie, 595 deliver_f callb, void *cbcookie) 596 { 597 evch_evqsub_t *sp = kmem_zalloc(sizeof (evch_evqsub_t), KM_SLEEP); 598 599 /* Initialize subscriber structure */ 600 sp->su_filter = filter; 601 sp->su_fcookie = fcookie; 602 sp->su_callb = callb; 603 sp->su_cbcookie = cbcookie; 604 605 /* Add subscription to queue */ 606 mutex_enter(&eqp->eq_queuemx); 607 evch_dl_add(&eqp->eq_subscr, &sp->su_link); 608 mutex_exit(&eqp->eq_queuemx); 609 return (sp); 610 } 611 612 /* 613 * Unsubscribe from an event queue. 614 */ 615 static void 616 evch_evq_unsub(evch_eventq_t *eqp, evch_evqsub_t *sp) 617 { 618 mutex_enter(&eqp->eq_queuemx); 619 620 /* Wait if delivery is just in progress */ 621 if (eqp->eq_dactive) { 622 cv_wait(&eqp->eq_dactivecv, &eqp->eq_queuemx); 623 } 624 evch_dl_del(&eqp->eq_subscr, &sp->su_link); 625 mutex_exit(&eqp->eq_queuemx); 626 kmem_free(sp, sizeof (evch_evqsub_t)); 627 } 628 629 /* 630 * Publish an event. Returns 0 on success and -1 if memory alloc failed. 631 */ 632 static int 633 evch_evq_pub(evch_eventq_t *eqp, void *ev, int flags) 634 { 635 size_t size; 636 evch_qelem_t *qep; 637 evch_gevent_t *evp = GEVENT(ev); 638 639 size = sizeof (evch_qelem_t); 640 if (flags & EVCH_TRYHARD) { 641 qep = kmem_alloc_tryhard(size, &size, KM_NOSLEEP); 642 } else { 643 qep = kmem_alloc(size, flags & EVCH_NOSLEEP ? 644 KM_NOSLEEP : KM_SLEEP); 645 } 646 if (qep == NULL) { 647 return (-1); 648 } 649 qep->q_objref = (void *)evp; 650 qep->q_objsize = size; 651 atomic_add_32(&evp->ge_refcount, 1); 652 mutex_enter(&eqp->eq_queuemx); 653 evch_q_in(&eqp->eq_eventq, qep); 654 655 /* Wakeup delivery thread */ 656 cv_signal(&eqp->eq_thrsleepcv); 657 mutex_exit(&eqp->eq_queuemx); 658 return (0); 659 } 660 661 /* 662 * Enter hold mode of an event queue. Event delivery thread stops event 663 * handling after delivery of current event (if any). 664 */ 665 static void 666 evch_evq_stop(evch_eventq_t *eqp) 667 { 668 mutex_enter(&eqp->eq_queuemx); 669 eqp->eq_holdmode = 1; 670 if (evq_initcomplete) { 671 cv_signal(&eqp->eq_thrsleepcv); 672 cv_wait(&eqp->eq_onholdcv, &eqp->eq_queuemx); 673 } 674 mutex_exit(&eqp->eq_queuemx); 675 } 676 677 /* 678 * Continue event delivery. 679 */ 680 static void 681 evch_evq_continue(evch_eventq_t *eqp) 682 { 683 mutex_enter(&eqp->eq_queuemx); 684 eqp->eq_holdmode = 0; 685 cv_signal(&eqp->eq_thrsleepcv); 686 mutex_exit(&eqp->eq_queuemx); 687 } 688 689 /* 690 * Returns status of delivery thread. 0 if running and 1 if on hold. 691 */ 692 static int 693 evch_evq_status(evch_eventq_t *eqp) 694 { 695 return (eqp->eq_holdmode); 696 } 697 698 /* 699 * Add a destructor function to an event structure. 700 */ 701 static void 702 evch_evq_evadd_dest(void *ev, destr_f destructor, void *cookie) 703 { 704 evch_gevent_t *evp = GEVENT(ev); 705 706 evp->ge_destruct = destructor; 707 evp->ge_dstcookie = cookie; 708 } 709 710 /* 711 * Allocate evch_gevent_t structure. Return address of payload offset of 712 * evch_gevent_t. If EVCH_TRYHARD allocation is requested, we use 713 * kmem_alloc_tryhard to alloc memory of at least paylsize bytes. 714 * 715 * If either memory allocation is unsuccessful, we return NULL. 716 */ 717 static void * 718 evch_evq_evzalloc(size_t paylsize, int flag) 719 { 720 evch_gevent_t *evp; 721 size_t rsize, evsize, ge_size; 722 723 rsize = offsetof(evch_gevent_t, ge_payload) + paylsize; 724 if (flag & EVCH_TRYHARD) { 725 evp = kmem_alloc_tryhard(rsize, &evsize, KM_NOSLEEP); 726 ge_size = evsize; 727 } else { 728 evp = kmem_alloc(rsize, flag & EVCH_NOSLEEP ? KM_NOSLEEP : 729 KM_SLEEP); 730 ge_size = rsize; 731 } 732 733 if (evp) { 734 bzero(evp, rsize); 735 evp->ge_size = ge_size; 736 return (&evp->ge_payload); 737 } 738 return (evp); 739 } 740 741 /* 742 * Free event structure. Argument ev is address of payload offset. 743 */ 744 static void 745 evch_evq_evfree(void *ev) 746 { 747 evch_gevent_free(GEVENT(ev)); 748 } 749 750 /* 751 * Iterate over all events in the event queue. Begin with an event 752 * which is currently being delivered. No mutexes are grabbed and no 753 * resources allocated so that this function can be called in panic 754 * context too. This function has to be called with ev == NULL initially. 755 * Actually argument ev is only a flag. Internally the member eq_nextev 756 * is used to determine the next event. But ev allows for the convenient 757 * use like 758 * ev = NULL; 759 * while ((ev = evch_evq_evnext(evp, ev)) != NULL) ... 760 */ 761 static void * 762 evch_evq_evnext(evch_eventq_t *evq, void *ev) 763 { 764 if (ev == NULL) { 765 evq->eq_nextev = NULL; 766 if (evq->eq_curevent != NULL) 767 return (&evq->eq_curevent->ge_payload); 768 } 769 evq->eq_nextev = evch_q_next(&evq->eq_eventq, evq->eq_nextev); 770 if (evq->eq_nextev == NULL) 771 return (NULL); 772 return (&((evch_gevent_t *)evq->eq_nextev->q_objref)->ge_payload); 773 } 774 775 /* 776 * Channel handling functions. First some support functions. Functions belonging 777 * to the channel handling interface start with evch_ch. The following functions 778 * make up the channel handling internal interfaces: 779 * 780 * evch_chinit - Initialize channel handling 781 * evch_chinitthr - Second step init: initialize threads 782 * evch_chbind - Bind to a channel 783 * evch_chunbind - Unbind from a channel 784 * evch_chsubscribe - Subscribe to a sysevent class 785 * evch_chunsubscribe - Unsubscribe 786 * evch_chpublish - Publish an event 787 * evch_chgetnames - Get names of all channels 788 * evch_chgetchdata - Get data of a channel 789 * evch_chrdevent_init - Init event q traversal 790 * evch_chgetnextev - Read out events queued for a subscriber 791 * evch_chrdevent_fini - Finish event q traversal 792 */ 793 794 /* 795 * Compare channel name. Used for evch_dl_search to find a channel with the 796 * name s. 797 */ 798 static int 799 evch_namecmp(evch_dlelem_t *ep, char *s) 800 { 801 return (strcmp(((evch_chan_t *)ep)->ch_name, s)); 802 } 803 804 /* 805 * Simple wildcarded match test of event class string 'class' to 806 * wildcarded subscription string 'pat'. Recursive only if 807 * 'pat' includes a wildcard, otherwise essentially just strcmp. 808 */ 809 static int 810 evch_clsmatch(char *class, const char *pat) 811 { 812 char c; 813 814 do { 815 if ((c = *pat++) == '\0') 816 return (*class == '\0'); 817 818 if (c == '*') { 819 while (*pat == '*') 820 pat++; /* consecutive *'s can be collapsed */ 821 822 if (*pat == '\0') 823 return (1); 824 825 while (*class != '\0') { 826 if (evch_clsmatch(class++, pat) != 0) 827 return (1); 828 } 829 830 return (0); 831 } 832 } while (c == *class++); 833 834 return (0); 835 } 836 837 /* 838 * Sysevent filter callback routine. Enables event delivery only if it matches 839 * the event class pattern string given by parameter cookie. 840 */ 841 static int 842 evch_class_filter(void *ev, void *cookie) 843 { 844 const char *pat = (const char *)cookie; 845 846 if (pat == NULL || evch_clsmatch(SE_CLASS_NAME(ev), pat)) 847 return (EVQ_DELIVER); 848 849 return (EVQ_IGNORE); 850 } 851 852 /* 853 * Callback routine to propagate the event into a per subscriber queue. 854 */ 855 static int 856 evch_subq_deliver(void *evp, void *cookie) 857 { 858 evch_subd_t *p = (evch_subd_t *)cookie; 859 860 (void) evch_evq_pub(p->sd_queue, evp, EVCH_SLEEP); 861 return (EVQ_CONT); 862 } 863 864 /* 865 * Call kernel callback routine for sysevent kernel delivery. 866 */ 867 static int 868 evch_kern_deliver(void *evp, void *cookie) 869 { 870 sysevent_impl_t *ev = (sysevent_impl_t *)evp; 871 evch_subd_t *sdp = (evch_subd_t *)cookie; 872 873 return (sdp->sd_callback(ev, sdp->sd_cbcookie)); 874 } 875 876 /* 877 * Door upcall for user land sysevent delivery. 878 */ 879 static int 880 evch_door_deliver(void *evp, void *cookie) 881 { 882 int error; 883 size_t size; 884 sysevent_impl_t *ev = (sysevent_impl_t *)evp; 885 door_arg_t darg; 886 evch_subd_t *sdp = (evch_subd_t *)cookie; 887 int nticks = EVCH_MIN_PAUSE; 888 uint32_t retval; 889 int retry = 20; 890 891 /* Initialize door args */ 892 size = sizeof (sysevent_impl_t) + SE_PAYLOAD_SZ(ev); 893 894 darg.rbuf = (char *)&retval; 895 darg.rsize = sizeof (retval); 896 darg.data_ptr = (char *)ev; 897 darg.data_size = size; 898 darg.desc_ptr = NULL; 899 darg.desc_num = 0; 900 901 for (;;) { 902 if ((error = door_ki_upcall_limited(sdp->sd_door, &darg, 903 NULL, SIZE_MAX, 0)) == 0) { 904 break; 905 } 906 switch (error) { 907 case EAGAIN: 908 /* Cannot deliver event - process may be forking */ 909 delay(nticks); 910 nticks <<= 1; 911 if (nticks > EVCH_MAX_PAUSE) { 912 nticks = EVCH_MAX_PAUSE; 913 } 914 if (retry-- <= 0) { 915 cmn_err(CE_CONT, "event delivery thread: " 916 "door_ki_upcall error EAGAIN\n"); 917 return (EVQ_CONT); 918 } 919 break; 920 case EINTR: 921 case EBADF: 922 /* Process died */ 923 return (EVQ_SLEEP); 924 default: 925 cmn_err(CE_CONT, 926 "event delivery thread: door_ki_upcall error %d\n", 927 error); 928 return (EVQ_CONT); 929 } 930 } 931 if (retval == EAGAIN) { 932 return (EVQ_AGAIN); 933 } 934 return (EVQ_CONT); 935 } 936 937 /* 938 * Callback routine for evch_dl_search() to compare subscriber id's. Used by 939 * evch_subscribe() and evch_chrdevent_init(). 940 */ 941 static int 942 evch_subidcmp(evch_dlelem_t *ep, char *s) 943 { 944 return (strcmp(((evch_subd_t *)ep)->sd_ident, s)); 945 } 946 947 /* 948 * Callback routine for evch_dl_search() to find a subscriber with EVCH_SUB_DUMP 949 * set (indicated by sub->sd_dump != 0). Used by evch_chrdevent_init() and 950 * evch_subscribe(). Needs to returns 0 if subscriber with sd_dump set is 951 * found. 952 */ 953 /*ARGSUSED1*/ 954 static int 955 evch_dumpflgcmp(evch_dlelem_t *ep, char *s) 956 { 957 return (((evch_subd_t *)ep)->sd_dump ? 0 : 1); 958 } 959 960 /* 961 * Event destructor function. Used to maintain the number of events per channel. 962 */ 963 /*ARGSUSED*/ 964 static void 965 evch_destr_event(void *ev, void *ch) 966 { 967 evch_chan_t *chp = (evch_chan_t *)ch; 968 969 mutex_enter(&chp->ch_pubmx); 970 chp->ch_nevents--; 971 cv_signal(&chp->ch_pubcv); 972 mutex_exit(&chp->ch_pubmx); 973 } 974 975 /* 976 * Integer square root according to Newton's iteration. 977 */ 978 static uint32_t 979 evch_isqrt(uint64_t n) 980 { 981 uint64_t x = n >> 1; 982 uint64_t xn = x - 1; 983 static uint32_t lowval[] = { 0, 1, 1, 2 }; 984 985 if (n < 4) { 986 return (lowval[n]); 987 } 988 while (xn < x) { 989 x = xn; 990 xn = (x + n / x) / 2; 991 } 992 return ((uint32_t)xn); 993 } 994 995 /* 996 * First step sysevent channel initialization. Called when kernel memory 997 * allocator is initialized. 998 */ 999 static void 1000 evch_chinit() 1001 { 1002 size_t k; 1003 1004 /* 1005 * Calculate limits: max no of channels and max no of events per 1006 * channel. The smallest machine with 128 MByte will allow for 1007 * >= 8 channels and an upper limit of 2048 events per channel. 1008 * The event limit is the number of channels times 256 (hence 1009 * the shift factor of 8). These number where selected arbitrarily. 1010 */ 1011 k = kmem_maxavail() >> 20; 1012 evch_channels_max = min(evch_isqrt(k), EVCH_MAX_CHANNELS); 1013 evch_events_max = evch_channels_max << 8; 1014 1015 /* 1016 * Will trigger creation of the global zone's evch state. 1017 */ 1018 zone_key_create(&evch_zone_key, evch_zoneinit, NULL, evch_zonefree); 1019 } 1020 1021 /* 1022 * Second step sysevent channel initialization. Called when threads are ready. 1023 */ 1024 static void 1025 evch_chinitthr() 1026 { 1027 struct evch_globals *eg; 1028 evch_chan_t *chp; 1029 evch_subd_t *sdp; 1030 1031 /* 1032 * We're early enough in boot that we know that only the global 1033 * zone exists; we only need to initialize its threads. 1034 */ 1035 eg = zone_getspecific(evch_zone_key, global_zone); 1036 ASSERT(eg != NULL); 1037 1038 for (chp = evch_dl_next(&eg->evch_list, NULL); chp != NULL; 1039 chp = evch_dl_next(&eg->evch_list, chp)) { 1040 for (sdp = evch_dl_next(&chp->ch_subscr, NULL); sdp; 1041 sdp = evch_dl_next(&chp->ch_subscr, sdp)) { 1042 evch_evq_thrcreate(sdp->sd_queue); 1043 } 1044 evch_evq_thrcreate(chp->ch_queue); 1045 } 1046 evq_initcomplete = 1; 1047 } 1048 1049 /* 1050 * Sysevent channel bind. Create channel and allocate binding structure. 1051 */ 1052 static int 1053 evch_chbind(const char *chnam, evch_bind_t **scpp, uint32_t flags) 1054 { 1055 struct evch_globals *eg; 1056 evch_bind_t *bp; 1057 evch_chan_t *p; 1058 char *chn; 1059 size_t namlen; 1060 int rv; 1061 1062 eg = zone_getspecific(evch_zone_key, curproc->p_zone); 1063 ASSERT(eg != NULL); 1064 1065 /* Create channel if it does not exist */ 1066 ASSERT(evch_dl_is_init(&eg->evch_list)); 1067 if ((namlen = strlen(chnam) + 1) > MAX_CHNAME_LEN) { 1068 return (EINVAL); 1069 } 1070 mutex_enter(&eg->evch_list_lock); 1071 if ((p = (evch_chan_t *)evch_dl_search(&eg->evch_list, evch_namecmp, 1072 (char *)chnam)) == NULL) { 1073 if (flags & EVCH_CREAT) { 1074 if (evch_dl_getnum(&eg->evch_list) >= 1075 evch_channels_max) { 1076 mutex_exit(&eg->evch_list_lock); 1077 return (ENOMEM); 1078 } 1079 chn = kmem_alloc(namlen, KM_SLEEP); 1080 bcopy(chnam, chn, namlen); 1081 1082 /* Allocate and initialize channel descriptor */ 1083 p = kmem_zalloc(sizeof (evch_chan_t), KM_SLEEP); 1084 p->ch_name = chn; 1085 p->ch_namelen = namlen; 1086 mutex_init(&p->ch_mutex, NULL, MUTEX_DEFAULT, NULL); 1087 p->ch_queue = evch_evq_create(); 1088 evch_dl_init(&p->ch_subscr); 1089 if (evq_initcomplete) { 1090 p->ch_uid = crgetuid(curthread->t_cred); 1091 p->ch_gid = crgetgid(curthread->t_cred); 1092 } 1093 cv_init(&p->ch_pubcv, NULL, CV_DEFAULT, NULL); 1094 mutex_init(&p->ch_pubmx, NULL, MUTEX_DEFAULT, NULL); 1095 p->ch_maxev = min(EVCH_DEFAULT_EVENTS, evch_events_max); 1096 p->ch_maxsubscr = EVCH_MAX_SUBSCRIPTIONS; 1097 p->ch_maxbinds = evch_bindings_max; 1098 p->ch_ctime = gethrestime_sec(); 1099 1100 if (flags & (EVCH_HOLD_PEND | EVCH_HOLD_PEND_INDEF)) { 1101 if (flags & EVCH_HOLD_PEND_INDEF) 1102 p->ch_holdpend = CH_HOLD_PEND_INDEF; 1103 else 1104 p->ch_holdpend = CH_HOLD_PEND; 1105 1106 evch_evq_stop(p->ch_queue); 1107 } 1108 1109 /* Put new descriptor into channel list */ 1110 evch_dl_add(&eg->evch_list, (evch_dlelem_t *)p); 1111 } else { 1112 mutex_exit(&eg->evch_list_lock); 1113 return (ENOENT); 1114 } 1115 } 1116 1117 /* Check for max binds and create binding */ 1118 mutex_enter(&p->ch_mutex); 1119 if (p->ch_bindings >= p->ch_maxbinds) { 1120 rv = ENOMEM; 1121 /* 1122 * No need to destroy the channel because this call did not 1123 * create it. Other bindings will be present if ch_maxbinds 1124 * is exceeded. 1125 */ 1126 goto errorexit; 1127 } 1128 bp = kmem_alloc(sizeof (evch_bind_t), KM_SLEEP); 1129 bp->bd_channel = p; 1130 bp->bd_sublst = NULL; 1131 p->ch_bindings++; 1132 rv = 0; 1133 *scpp = bp; 1134 errorexit: 1135 mutex_exit(&p->ch_mutex); 1136 mutex_exit(&eg->evch_list_lock); 1137 return (rv); 1138 } 1139 1140 /* 1141 * Unbind: Free bind structure. Remove channel if last binding was freed. 1142 */ 1143 static void 1144 evch_chunbind(evch_bind_t *bp) 1145 { 1146 struct evch_globals *eg; 1147 evch_chan_t *chp = bp->bd_channel; 1148 1149 eg = zone_getspecific(evch_zone_key, curproc->p_zone); 1150 ASSERT(eg != NULL); 1151 1152 mutex_enter(&eg->evch_list_lock); 1153 mutex_enter(&chp->ch_mutex); 1154 ASSERT(chp->ch_bindings > 0); 1155 chp->ch_bindings--; 1156 kmem_free(bp, sizeof (evch_bind_t)); 1157 if (chp->ch_bindings == 0 && evch_dl_getnum(&chp->ch_subscr) == 0 && 1158 (chp->ch_nevents == 0 || chp->ch_holdpend != CH_HOLD_PEND_INDEF)) { 1159 /* 1160 * No more bindings and no persistent subscriber(s). If there 1161 * are no events in the channel then destroy the channel; 1162 * otherwise destroy the channel only if we're not holding 1163 * pending events indefinitely. 1164 */ 1165 mutex_exit(&chp->ch_mutex); 1166 evch_dl_del(&eg->evch_list, &chp->ch_link); 1167 evch_evq_destroy(chp->ch_queue); 1168 mutex_destroy(&chp->ch_mutex); 1169 mutex_destroy(&chp->ch_pubmx); 1170 cv_destroy(&chp->ch_pubcv); 1171 kmem_free(chp->ch_name, chp->ch_namelen); 1172 kmem_free(chp, sizeof (evch_chan_t)); 1173 } else 1174 mutex_exit(&chp->ch_mutex); 1175 mutex_exit(&eg->evch_list_lock); 1176 } 1177 1178 static int 1179 wildcard_count(const char *class) 1180 { 1181 int count = 0; 1182 char c; 1183 1184 if (class == NULL) 1185 return (0); 1186 1187 while ((c = *class++) != '\0') { 1188 if (c == '*') 1189 count++; 1190 } 1191 1192 return (count); 1193 } 1194 1195 /* 1196 * Subscribe to a channel. dtype is either EVCH_DELKERN for kernel callbacks 1197 * or EVCH_DELDOOR for door upcall delivery to user land. Depending on dtype 1198 * dinfo gives the call back routine address or the door handle. 1199 */ 1200 static int 1201 evch_chsubscribe(evch_bind_t *bp, int dtype, const char *sid, const char *class, 1202 void *dinfo, void *cookie, int flags, pid_t pid) 1203 { 1204 evch_chan_t *chp = bp->bd_channel; 1205 evch_eventq_t *eqp = chp->ch_queue; 1206 evch_subd_t *sdp; 1207 evch_subd_t *esp; 1208 int (*delivfkt)(); 1209 char *clb = NULL; 1210 int clblen = 0; 1211 char *subid; 1212 int subidblen; 1213 1214 /* 1215 * Check if only known flags are set. 1216 */ 1217 if (flags & ~(EVCH_SUB_KEEP | EVCH_SUB_DUMP)) 1218 return (EINVAL); 1219 1220 /* 1221 * Enforce a limit on the number of wildcards allowed in the class 1222 * subscription string (limits recursion in pattern matching). 1223 */ 1224 if (wildcard_count(class) > EVCH_WILDCARD_MAX) 1225 return (EINVAL); 1226 1227 /* 1228 * Check if we have already a subscription with that name and if we 1229 * have to reconnect the subscriber to a persistent subscription. 1230 */ 1231 mutex_enter(&chp->ch_mutex); 1232 if ((esp = (evch_subd_t *)evch_dl_search(&chp->ch_subscr, 1233 evch_subidcmp, (char *)sid)) != NULL) { 1234 int error = 0; 1235 if ((flags & EVCH_SUB_KEEP) && (esp->sd_active == 0)) { 1236 /* 1237 * Subscription with the name on hold, reconnect to 1238 * existing queue. 1239 */ 1240 ASSERT(dtype == EVCH_DELDOOR); 1241 esp->sd_subnxt = bp->bd_sublst; 1242 bp->bd_sublst = esp; 1243 esp->sd_pid = pid; 1244 esp->sd_door = (door_handle_t)dinfo; 1245 esp->sd_active++; 1246 evch_evq_continue(esp->sd_queue); 1247 } else { 1248 /* Subscriber with given name already exists */ 1249 error = EEXIST; 1250 } 1251 mutex_exit(&chp->ch_mutex); 1252 return (error); 1253 } 1254 1255 if (evch_dl_getnum(&chp->ch_subscr) >= chp->ch_maxsubscr) { 1256 mutex_exit(&chp->ch_mutex); 1257 return (ENOMEM); 1258 } 1259 1260 if (flags & EVCH_SUB_DUMP && evch_dl_search(&chp->ch_subscr, 1261 evch_dumpflgcmp, NULL) != NULL) { 1262 /* 1263 * Subscription with EVCH_SUB_DUMP flagged already exists. 1264 * Only one subscription with EVCH_SUB_DUMP possible. Return 1265 * error. 1266 */ 1267 mutex_exit(&chp->ch_mutex); 1268 return (EINVAL); 1269 } 1270 1271 if (class != NULL) { 1272 clblen = strlen(class) + 1; 1273 clb = kmem_alloc(clblen, KM_SLEEP); 1274 bcopy(class, clb, clblen); 1275 } 1276 1277 subidblen = strlen(sid) + 1; 1278 subid = kmem_alloc(subidblen, KM_SLEEP); 1279 bcopy(sid, subid, subidblen); 1280 1281 /* Create per subscriber queue */ 1282 sdp = kmem_zalloc(sizeof (evch_subd_t), KM_SLEEP); 1283 sdp->sd_queue = evch_evq_create(); 1284 1285 /* Subscribe to subscriber queue */ 1286 sdp->sd_persist = flags & EVCH_SUB_KEEP ? 1 : 0; 1287 sdp->sd_dump = flags & EVCH_SUB_DUMP ? 1 : 0; 1288 sdp->sd_type = dtype; 1289 sdp->sd_cbcookie = cookie; 1290 sdp->sd_ident = subid; 1291 if (dtype == EVCH_DELKERN) { 1292 sdp->sd_callback = (kerndlv_f)dinfo; 1293 delivfkt = evch_kern_deliver; 1294 } else { 1295 sdp->sd_door = (door_handle_t)dinfo; 1296 delivfkt = evch_door_deliver; 1297 } 1298 sdp->sd_ssub = 1299 evch_evq_sub(sdp->sd_queue, NULL, NULL, delivfkt, (void *)sdp); 1300 1301 /* Connect per subscriber queue to main event queue */ 1302 sdp->sd_msub = evch_evq_sub(eqp, evch_class_filter, clb, 1303 evch_subq_deliver, (void *)sdp); 1304 sdp->sd_classname = clb; 1305 sdp->sd_clnsize = clblen; 1306 sdp->sd_pid = pid; 1307 sdp->sd_active++; 1308 1309 /* Add subscription to binding */ 1310 sdp->sd_subnxt = bp->bd_sublst; 1311 bp->bd_sublst = sdp; 1312 1313 /* Add subscription to channel */ 1314 evch_dl_add(&chp->ch_subscr, &sdp->sd_link); 1315 if (chp->ch_holdpend && evch_dl_getnum(&chp->ch_subscr) == 1) { 1316 1317 /* Let main event queue run in case of HOLDPEND */ 1318 evch_evq_continue(eqp); 1319 } 1320 mutex_exit(&chp->ch_mutex); 1321 1322 return (0); 1323 } 1324 1325 /* 1326 * If flag == EVCH_SUB_KEEP only non-persistent subscriptions are deleted. 1327 * When sid == NULL all subscriptions except the ones with EVCH_SUB_KEEP set 1328 * are removed. 1329 */ 1330 static void 1331 evch_chunsubscribe(evch_bind_t *bp, const char *sid, uint32_t flags) 1332 { 1333 evch_subd_t *sdp; 1334 evch_subd_t *next; 1335 evch_subd_t *prev; 1336 evch_chan_t *chp = bp->bd_channel; 1337 1338 mutex_enter(&chp->ch_mutex); 1339 if (chp->ch_holdpend) { 1340 evch_evq_stop(chp->ch_queue); /* Hold main event queue */ 1341 } 1342 prev = NULL; 1343 for (sdp = bp->bd_sublst; sdp; sdp = next) { 1344 if (sid == NULL || strcmp(sid, sdp->sd_ident) == 0) { 1345 if (flags == 0 || sdp->sd_persist == 0) { 1346 /* 1347 * Disconnect subscriber queue from main event 1348 * queue. 1349 */ 1350 evch_evq_unsub(chp->ch_queue, sdp->sd_msub); 1351 1352 /* Destruct per subscriber queue */ 1353 evch_evq_unsub(sdp->sd_queue, sdp->sd_ssub); 1354 evch_evq_destroy(sdp->sd_queue); 1355 /* 1356 * Eliminate the subscriber data from channel 1357 * list. 1358 */ 1359 evch_dl_del(&chp->ch_subscr, &sdp->sd_link); 1360 kmem_free(sdp->sd_classname, sdp->sd_clnsize); 1361 if (sdp->sd_type == EVCH_DELDOOR) { 1362 door_ki_rele(sdp->sd_door); 1363 } 1364 next = sdp->sd_subnxt; 1365 if (prev) { 1366 prev->sd_subnxt = next; 1367 } else { 1368 bp->bd_sublst = next; 1369 } 1370 kmem_free(sdp->sd_ident, 1371 strlen(sdp->sd_ident) + 1); 1372 kmem_free(sdp, sizeof (evch_subd_t)); 1373 } else { 1374 /* 1375 * EVCH_SUB_KEEP case 1376 */ 1377 evch_evq_stop(sdp->sd_queue); 1378 if (sdp->sd_type == EVCH_DELDOOR) { 1379 door_ki_rele(sdp->sd_door); 1380 } 1381 sdp->sd_active--; 1382 ASSERT(sdp->sd_active == 0); 1383 next = sdp->sd_subnxt; 1384 prev = sdp; 1385 } 1386 if (sid != NULL) { 1387 break; 1388 } 1389 } else { 1390 next = sdp->sd_subnxt; 1391 prev = sdp; 1392 } 1393 } 1394 if (!(chp->ch_holdpend && evch_dl_getnum(&chp->ch_subscr) == 0)) { 1395 /* 1396 * Continue dispatch thread except if no subscribers are present 1397 * in HOLDPEND mode. 1398 */ 1399 evch_evq_continue(chp->ch_queue); 1400 } 1401 mutex_exit(&chp->ch_mutex); 1402 } 1403 1404 /* 1405 * Publish an event. Returns zero on success and an error code else. 1406 */ 1407 static int 1408 evch_chpublish(evch_bind_t *bp, sysevent_impl_t *ev, int flags) 1409 { 1410 evch_chan_t *chp = bp->bd_channel; 1411 1412 DTRACE_SYSEVENT2(post, evch_bind_t *, bp, sysevent_impl_t *, ev); 1413 1414 mutex_enter(&chp->ch_pubmx); 1415 if (chp->ch_nevents >= chp->ch_maxev) { 1416 if (!(flags & EVCH_QWAIT)) { 1417 evch_evq_evfree(ev); 1418 mutex_exit(&chp->ch_pubmx); 1419 return (EAGAIN); 1420 } else { 1421 while (chp->ch_nevents >= chp->ch_maxev) { 1422 if (cv_wait_sig(&chp->ch_pubcv, 1423 &chp->ch_pubmx) == 0) { 1424 1425 /* Got Signal, return EINTR */ 1426 evch_evq_evfree(ev); 1427 mutex_exit(&chp->ch_pubmx); 1428 return (EINTR); 1429 } 1430 } 1431 } 1432 } 1433 chp->ch_nevents++; 1434 mutex_exit(&chp->ch_pubmx); 1435 SE_TIME(ev) = gethrtime(); 1436 SE_SEQ(ev) = log_sysevent_new_id(); 1437 /* 1438 * Add the destructor function to the event structure, now that the 1439 * event is accounted for. The only task of the descructor is to 1440 * decrement the channel event count. The evq_*() routines (including 1441 * the event delivery thread) do not have knowledge of the channel 1442 * data. So the anonymous destructor handles the channel data for it. 1443 */ 1444 evch_evq_evadd_dest(ev, evch_destr_event, (void *)chp); 1445 return (evch_evq_pub(chp->ch_queue, ev, flags) == 0 ? 0 : EAGAIN); 1446 } 1447 1448 /* 1449 * Fills a buffer consecutive with the names of all available channels. 1450 * Returns the length of all name strings or -1 if buffer size was unsufficient. 1451 */ 1452 static int 1453 evch_chgetnames(char *buf, size_t size) 1454 { 1455 struct evch_globals *eg; 1456 int len = 0; 1457 char *addr = buf; 1458 int max = size; 1459 evch_chan_t *chp; 1460 1461 eg = zone_getspecific(evch_zone_key, curproc->p_zone); 1462 ASSERT(eg != NULL); 1463 1464 mutex_enter(&eg->evch_list_lock); 1465 for (chp = evch_dl_next(&eg->evch_list, NULL); chp != NULL; 1466 chp = evch_dl_next(&eg->evch_list, chp)) { 1467 len += chp->ch_namelen; 1468 if (len >= max) { 1469 mutex_exit(&eg->evch_list_lock); 1470 return (-1); 1471 } 1472 bcopy(chp->ch_name, addr, chp->ch_namelen); 1473 addr += chp->ch_namelen; 1474 } 1475 mutex_exit(&eg->evch_list_lock); 1476 addr[0] = 0; 1477 return (len + 1); 1478 } 1479 1480 /* 1481 * Fills the data of one channel and all subscribers of that channel into 1482 * a buffer. Returns -1 if the channel name is invalid and 0 on buffer overflow. 1483 */ 1484 static int 1485 evch_chgetchdata(char *chname, void *buf, size_t size) 1486 { 1487 struct evch_globals *eg; 1488 char *cpaddr; 1489 int bufmax; 1490 int buflen; 1491 evch_chan_t *chp; 1492 sev_chinfo_t *p = (sev_chinfo_t *)buf; 1493 int chdlen; 1494 evch_subd_t *sdp; 1495 sev_subinfo_t *subp; 1496 int idlen; 1497 int len; 1498 1499 eg = zone_getspecific(evch_zone_key, curproc->p_zone); 1500 ASSERT(eg != NULL); 1501 1502 mutex_enter(&eg->evch_list_lock); 1503 chp = (evch_chan_t *)evch_dl_search(&eg->evch_list, evch_namecmp, 1504 chname); 1505 if (chp == NULL) { 1506 mutex_exit(&eg->evch_list_lock); 1507 return (-1); 1508 } 1509 chdlen = offsetof(sev_chinfo_t, cd_subinfo); 1510 if (size < chdlen) { 1511 mutex_exit(&eg->evch_list_lock); 1512 return (0); 1513 } 1514 p->cd_version = 0; 1515 p->cd_suboffs = chdlen; 1516 p->cd_uid = chp->ch_uid; 1517 p->cd_gid = chp->ch_gid; 1518 p->cd_perms = 0; 1519 p->cd_ctime = chp->ch_ctime; 1520 p->cd_maxev = chp->ch_maxev; 1521 p->cd_evhwm = EVCH_EVQ_HIGHWM(chp->ch_queue); 1522 p->cd_nevents = EVCH_EVQ_EVCOUNT(chp->ch_queue); 1523 p->cd_maxsub = chp->ch_maxsubscr; 1524 p->cd_nsub = evch_dl_getnum(&chp->ch_subscr); 1525 p->cd_maxbinds = chp->ch_maxbinds; 1526 p->cd_nbinds = chp->ch_bindings; 1527 p->cd_holdpend = chp->ch_holdpend; 1528 p->cd_limev = evch_events_max; 1529 cpaddr = (char *)p + chdlen; 1530 bufmax = size - chdlen; 1531 buflen = 0; 1532 1533 for (sdp = evch_dl_next(&chp->ch_subscr, NULL); sdp != NULL; 1534 sdp = evch_dl_next(&chp->ch_subscr, sdp)) { 1535 idlen = strlen(sdp->sd_ident) + 1; 1536 len = SE_ALIGN(offsetof(sev_subinfo_t, sb_strings) + idlen + 1537 sdp->sd_clnsize); 1538 buflen += len; 1539 if (buflen >= bufmax) { 1540 mutex_exit(&eg->evch_list_lock); 1541 return (0); 1542 } 1543 subp = (sev_subinfo_t *)cpaddr; 1544 subp->sb_nextoff = len; 1545 subp->sb_stroff = offsetof(sev_subinfo_t, sb_strings); 1546 if (sdp->sd_classname) { 1547 bcopy(sdp->sd_classname, subp->sb_strings + idlen, 1548 sdp->sd_clnsize); 1549 subp->sb_clnamoff = idlen; 1550 } else { 1551 subp->sb_clnamoff = idlen - 1; 1552 } 1553 subp->sb_pid = sdp->sd_pid; 1554 subp->sb_nevents = EVCH_EVQ_EVCOUNT(sdp->sd_queue); 1555 subp->sb_evhwm = EVCH_EVQ_HIGHWM(sdp->sd_queue); 1556 subp->sb_persist = sdp->sd_persist; 1557 subp->sb_status = evch_evq_status(sdp->sd_queue); 1558 subp->sb_active = sdp->sd_active; 1559 subp->sb_dump = sdp->sd_dump; 1560 bcopy(sdp->sd_ident, subp->sb_strings, idlen); 1561 cpaddr += len; 1562 } 1563 mutex_exit(&eg->evch_list_lock); 1564 return (chdlen + buflen); 1565 } 1566 1567 /* 1568 * Init iteration of all events of a channel. This function creates a new 1569 * event queue and puts all events from the channel into that queue. 1570 * Subsequent calls to evch_chgetnextev will deliver the events from that 1571 * queue. Only one thread per channel is allowed to read through the events. 1572 * Returns 0 on success and 1 if there is already someone reading the 1573 * events. 1574 * If argument subid == NULL, we look for a subscriber which has 1575 * flag EVCH_SUB_DUMP set. 1576 */ 1577 /* 1578 * Static variables that are used to traverse events of a channel in panic case. 1579 */ 1580 static evch_chan_t *evch_chan; 1581 static evch_eventq_t *evch_subq; 1582 static sysevent_impl_t *evch_curev; 1583 1584 static evchanq_t * 1585 evch_chrdevent_init(evch_chan_t *chp, char *subid) 1586 { 1587 evch_subd_t *sdp; 1588 void *ev; 1589 int pmqstat; /* Prev status of main queue */ 1590 int psqstat; /* Prev status of subscriber queue */ 1591 evchanq_t *snp; /* Pointer to q with snapshot of ev */ 1592 compare_f compfunc; 1593 1594 compfunc = subid == NULL ? evch_dumpflgcmp : evch_subidcmp; 1595 if (panicstr != NULL) { 1596 evch_chan = chp; 1597 evch_subq = NULL; 1598 evch_curev = NULL; 1599 if ((sdp = (evch_subd_t *)evch_dl_search(&chp->ch_subscr, 1600 compfunc, subid)) != NULL) { 1601 evch_subq = sdp->sd_queue; 1602 } 1603 return (NULL); 1604 } 1605 mutex_enter(&chp->ch_mutex); 1606 sdp = (evch_subd_t *)evch_dl_search(&chp->ch_subscr, compfunc, subid); 1607 /* 1608 * Stop main event queue and subscriber queue if not already 1609 * in stop mode. 1610 */ 1611 pmqstat = evch_evq_status(chp->ch_queue); 1612 if (pmqstat == 0) 1613 evch_evq_stop(chp->ch_queue); 1614 if (sdp != NULL) { 1615 psqstat = evch_evq_status(sdp->sd_queue); 1616 if (psqstat == 0) 1617 evch_evq_stop(sdp->sd_queue); 1618 } 1619 /* 1620 * Create event queue to make a snapshot of all events in the 1621 * channel. 1622 */ 1623 snp = kmem_alloc(sizeof (evchanq_t), KM_SLEEP); 1624 snp->sn_queue = evch_evq_create(); 1625 evch_evq_stop(snp->sn_queue); 1626 /* 1627 * Make a snapshot of the subscriber queue and the main event queue. 1628 */ 1629 if (sdp != NULL) { 1630 ev = NULL; 1631 while ((ev = evch_evq_evnext(sdp->sd_queue, ev)) != NULL) { 1632 (void) evch_evq_pub(snp->sn_queue, ev, EVCH_SLEEP); 1633 } 1634 } 1635 ev = NULL; 1636 while ((ev = evch_evq_evnext(chp->ch_queue, ev)) != NULL) { 1637 (void) evch_evq_pub(snp->sn_queue, ev, EVCH_SLEEP); 1638 } 1639 snp->sn_nxtev = NULL; 1640 /* 1641 * Restart main and subscriber queue if previously stopped 1642 */ 1643 if (sdp != NULL && psqstat == 0) 1644 evch_evq_continue(sdp->sd_queue); 1645 if (pmqstat == 0) 1646 evch_evq_continue(chp->ch_queue); 1647 mutex_exit(&chp->ch_mutex); 1648 return (snp); 1649 } 1650 1651 /* 1652 * Free all resources of the event queue snapshot. In case of panic 1653 * context snp must be NULL and no resources need to be free'ed. 1654 */ 1655 static void 1656 evch_chrdevent_fini(evchanq_t *snp) 1657 { 1658 if (snp != NULL) { 1659 evch_evq_destroy(snp->sn_queue); 1660 kmem_free(snp, sizeof (evchanq_t)); 1661 } 1662 } 1663 1664 /* 1665 * Get address of next event from an event channel. 1666 * This function might be called in a panic context. In that case 1667 * no resources will be allocated and no locks grabbed. 1668 * In normal operation context a snapshot of the event queues of the 1669 * specified event channel will be taken. 1670 */ 1671 static sysevent_impl_t * 1672 evch_chgetnextev(evchanq_t *snp) 1673 { 1674 if (panicstr != NULL) { 1675 if (evch_chan == NULL) 1676 return (NULL); 1677 if (evch_subq != NULL) { 1678 /* 1679 * We have a subscriber queue. Traverse this queue 1680 * first. 1681 */ 1682 if ((evch_curev = (sysevent_impl_t *) 1683 evch_evq_evnext(evch_subq, evch_curev)) != NULL) { 1684 return (evch_curev); 1685 } else { 1686 /* 1687 * All subscriber events traversed. evch_subq 1688 * == NULL indicates to take the main event 1689 * queue now. 1690 */ 1691 evch_subq = NULL; 1692 } 1693 } 1694 /* 1695 * Traverse the main event queue. 1696 */ 1697 if ((evch_curev = (sysevent_impl_t *) 1698 evch_evq_evnext(evch_chan->ch_queue, evch_curev)) == 1699 NULL) { 1700 evch_chan = NULL; 1701 } 1702 return (evch_curev); 1703 } 1704 ASSERT(snp != NULL); 1705 snp->sn_nxtev = (sysevent_impl_t *)evch_evq_evnext(snp->sn_queue, 1706 snp->sn_nxtev); 1707 return (snp->sn_nxtev); 1708 } 1709 1710 /* 1711 * The functions below build up the interface for the kernel to bind/unbind, 1712 * subscribe/unsubscribe and publish to event channels. It consists of the 1713 * following functions: 1714 * 1715 * sysevent_evc_bind - Bind to a channel. Create a channel if required 1716 * sysevent_evc_unbind - Unbind from a channel. Destroy ch. if last unbind 1717 * sysevent_evc_subscribe - Subscribe to events from a channel 1718 * sysevent_evc_unsubscribe - Unsubscribe from an event class 1719 * sysevent_evc_publish - Publish an event to an event channel 1720 * sysevent_evc_control - Various control operation on event channel 1721 * 1722 * The function below are for evaluating a sysevent: 1723 * 1724 * sysevent_get_class_name - Get pointer to event class string 1725 * sysevent_get_subclass_name - Get pointer to event subclass string 1726 * sysevent_get_seq - Get unique event sequence number 1727 * sysevent_get_time - Get hrestime of event publish 1728 * sysevent_get_size - Get size of event structure 1729 * sysevent_get_pub - Get publisher string 1730 * sysevent_get_attr_list - Get copy of attribute list 1731 * 1732 * The following interfaces represent stability level project privat 1733 * and allow to save the events of an event channel even in a panic case. 1734 * 1735 * sysevent_evc_walk_init - Take a snapshot of the events in a channel 1736 * sysevent_evc_walk_step - Read next event from snapshot 1737 * sysevent_evc_walk_fini - Free resources from event channel snapshot 1738 * sysevent_evc_event_attr - Get event payload address and size 1739 */ 1740 /* 1741 * allocate sysevent structure with optional space for attributes 1742 */ 1743 static sysevent_impl_t * 1744 sysevent_evc_alloc(const char *class, const char *subclass, const char *pub, 1745 size_t pub_sz, size_t atsz, uint32_t flag) 1746 { 1747 int payload_sz; 1748 int class_sz, subclass_sz; 1749 int aligned_class_sz, aligned_subclass_sz, aligned_pub_sz; 1750 sysevent_impl_t *ev; 1751 1752 /* 1753 * Calculate and reserve space for the class, subclass and 1754 * publisher strings in the event buffer 1755 */ 1756 class_sz = strlen(class) + 1; 1757 subclass_sz = strlen(subclass) + 1; 1758 1759 ASSERT((class_sz <= MAX_CLASS_LEN) && (subclass_sz <= 1760 MAX_SUBCLASS_LEN) && (pub_sz <= MAX_PUB_LEN)); 1761 1762 /* String sizes must be 64-bit aligned in the event buffer */ 1763 aligned_class_sz = SE_ALIGN(class_sz); 1764 aligned_subclass_sz = SE_ALIGN(subclass_sz); 1765 aligned_pub_sz = SE_ALIGN(pub_sz); 1766 1767 /* 1768 * Calculate payload size. Consider the space needed for alignment 1769 * and subtract the size of the uint64_t placeholder variables of 1770 * sysevent_impl_t. 1771 */ 1772 payload_sz = (aligned_class_sz - sizeof (uint64_t)) + 1773 (aligned_subclass_sz - sizeof (uint64_t)) + 1774 (aligned_pub_sz - sizeof (uint64_t)) - sizeof (uint64_t) + 1775 atsz; 1776 1777 /* 1778 * Allocate event buffer plus additional payload overhead 1779 */ 1780 if ((ev = evch_evq_evzalloc(sizeof (sysevent_impl_t) + 1781 payload_sz, flag)) == NULL) { 1782 return (NULL); 1783 } 1784 1785 /* Initialize the event buffer data */ 1786 SE_VERSION(ev) = SYS_EVENT_VERSION; 1787 bcopy(class, SE_CLASS_NAME(ev), class_sz); 1788 1789 SE_SUBCLASS_OFF(ev) = SE_ALIGN(offsetof(sysevent_impl_t, 1790 se_class_name)) + aligned_class_sz; 1791 bcopy(subclass, SE_SUBCLASS_NAME(ev), subclass_sz); 1792 1793 SE_PUB_OFF(ev) = SE_SUBCLASS_OFF(ev) + aligned_subclass_sz; 1794 bcopy(pub, SE_PUB_NAME(ev), pub_sz); 1795 1796 SE_ATTR_PTR(ev) = (uint64_t)0; 1797 SE_PAYLOAD_SZ(ev) = payload_sz; 1798 1799 return (ev); 1800 } 1801 1802 /* 1803 * Initialize event channel handling queues. 1804 */ 1805 void 1806 sysevent_evc_init() 1807 { 1808 evch_chinit(); 1809 } 1810 1811 /* 1812 * Second initialization step: create threads, if event channels are already 1813 * created 1814 */ 1815 void 1816 sysevent_evc_thrinit() 1817 { 1818 evch_chinitthr(); 1819 } 1820 1821 int 1822 sysevent_evc_bind(const char *ch_name, evchan_t **scpp, uint32_t flags) 1823 { 1824 ASSERT(ch_name != NULL && scpp != NULL); 1825 ASSERT((flags & ~EVCH_B_FLAGS) == 0); 1826 return (evch_chbind(ch_name, (evch_bind_t **)scpp, flags)); 1827 } 1828 1829 int 1830 sysevent_evc_unbind(evchan_t *scp) 1831 { 1832 evch_bind_t *bp = (evch_bind_t *)scp; 1833 1834 ASSERT(scp != NULL); 1835 evch_chunsubscribe(bp, NULL, 0); 1836 evch_chunbind(bp); 1837 1838 return (0); 1839 } 1840 1841 int 1842 sysevent_evc_subscribe(evchan_t *scp, const char *sid, const char *class, 1843 int (*callb)(sysevent_t *ev, void *cookie), 1844 void *cookie, uint32_t flags) 1845 { 1846 ASSERT(scp != NULL && sid != NULL && class != NULL && callb != NULL); 1847 ASSERT(flags == 0); 1848 if (strlen(sid) > MAX_SUBID_LEN) { 1849 return (EINVAL); 1850 } 1851 if (strcmp(class, EC_ALL) == 0) { 1852 class = NULL; 1853 } 1854 return (evch_chsubscribe((evch_bind_t *)scp, EVCH_DELKERN, sid, class, 1855 (void *)callb, cookie, 0, 0)); 1856 } 1857 1858 int 1859 sysevent_evc_unsubscribe(evchan_t *scp, const char *sid) 1860 { 1861 ASSERT(scp != NULL && sid != NULL); 1862 if (strcmp(sid, EVCH_ALLSUB) == 0) { 1863 sid = NULL; 1864 } 1865 evch_chunsubscribe((evch_bind_t *)scp, sid, 0); 1866 1867 return (0); 1868 } 1869 1870 /* 1871 * Publish kernel event. Returns 0 on success, error code else. 1872 * Optional attribute data is packed into the event structure. 1873 */ 1874 int 1875 sysevent_evc_publish(evchan_t *scp, const char *class, const char *subclass, 1876 const char *vendor, const char *pubs, nvlist_t *attr, uint32_t flags) 1877 { 1878 sysevent_impl_t *evp; 1879 char pub[MAX_PUB_LEN]; 1880 int pub_sz; /* includes terminating 0 */ 1881 int km_flags; 1882 size_t asz = 0; 1883 uint64_t attr_offset; 1884 caddr_t patt; 1885 int err; 1886 1887 ASSERT(scp != NULL && class != NULL && subclass != NULL && 1888 vendor != NULL && pubs != NULL); 1889 1890 ASSERT((flags & ~(EVCH_SLEEP | EVCH_NOSLEEP | EVCH_TRYHARD | 1891 EVCH_QWAIT)) == 0); 1892 1893 km_flags = flags & (EVCH_SLEEP | EVCH_NOSLEEP | EVCH_TRYHARD); 1894 ASSERT(km_flags == EVCH_SLEEP || km_flags == EVCH_NOSLEEP || 1895 km_flags == EVCH_TRYHARD); 1896 1897 pub_sz = snprintf(pub, MAX_PUB_LEN, "%s:kern:%s", vendor, pubs) + 1; 1898 if (pub_sz > MAX_PUB_LEN) 1899 return (EINVAL); 1900 1901 if (attr != NULL) { 1902 if ((err = nvlist_size(attr, &asz, NV_ENCODE_NATIVE)) != 0) { 1903 return (err); 1904 } 1905 } 1906 evp = sysevent_evc_alloc(class, subclass, pub, pub_sz, asz, km_flags); 1907 if (evp == NULL) { 1908 return (ENOMEM); 1909 } 1910 if (attr != NULL) { 1911 /* 1912 * Pack attributes into event buffer. Event buffer already 1913 * has enough room for the packed nvlist. 1914 */ 1915 attr_offset = SE_ATTR_OFF(evp); 1916 patt = (caddr_t)evp + attr_offset; 1917 1918 err = nvlist_pack(attr, &patt, &asz, NV_ENCODE_NATIVE, 1919 km_flags & EVCH_SLEEP ? KM_SLEEP : KM_NOSLEEP); 1920 1921 ASSERT(err != ENOMEM); 1922 1923 if (err != 0) { 1924 return (EINVAL); 1925 } 1926 1927 evp->seh_attr_off = attr_offset; 1928 SE_FLAG(evp) = SE_PACKED_BUF; 1929 } 1930 return (evch_chpublish((evch_bind_t *)scp, evp, flags)); 1931 } 1932 1933 int 1934 sysevent_evc_control(evchan_t *scp, int cmd, ...) 1935 { 1936 va_list ap; 1937 evch_chan_t *chp = ((evch_bind_t *)scp)->bd_channel; 1938 uint32_t *chlenp; 1939 uint32_t chlen; 1940 uint32_t ochlen; 1941 int rc = 0; 1942 1943 if (scp == NULL) { 1944 return (EINVAL); 1945 } 1946 1947 va_start(ap, cmd); 1948 mutex_enter(&chp->ch_mutex); 1949 switch (cmd) { 1950 case EVCH_GET_CHAN_LEN: 1951 chlenp = va_arg(ap, uint32_t *); 1952 *chlenp = chp->ch_maxev; 1953 break; 1954 case EVCH_SET_CHAN_LEN: 1955 chlen = va_arg(ap, uint32_t); 1956 ochlen = chp->ch_maxev; 1957 chp->ch_maxev = min(chlen, evch_events_max); 1958 if (ochlen < chp->ch_maxev) { 1959 cv_signal(&chp->ch_pubcv); 1960 } 1961 break; 1962 case EVCH_GET_CHAN_LEN_MAX: 1963 *va_arg(ap, uint32_t *) = evch_events_max; 1964 break; 1965 default: 1966 rc = EINVAL; 1967 } 1968 1969 mutex_exit(&chp->ch_mutex); 1970 va_end(ap); 1971 return (rc); 1972 } 1973 1974 /* 1975 * Project private interface to take a snapshot of all events of the 1976 * specified event channel. Argument subscr may be a subscriber id, the empty 1977 * string "", or NULL. The empty string indicates that no subscriber is 1978 * selected, for example if a previous subscriber died. sysevent_evc_walk_next() 1979 * will deliver events from the main event queue in this case. If subscr is 1980 * NULL, the subscriber with the EVCH_SUB_DUMP flag set (subd->sd_dump != 0) 1981 * will be selected. 1982 * 1983 * In panic case this function returns NULL. This is legal. The NULL has 1984 * to be delivered to sysevent_evc_walk_step() and sysevent_evc_walk_fini(). 1985 */ 1986 evchanq_t * 1987 sysevent_evc_walk_init(evchan_t *scp, char *subscr) 1988 { 1989 if (panicstr != NULL && scp == NULL) 1990 return (NULL); 1991 ASSERT(scp != NULL); 1992 return (evch_chrdevent_init(((evch_bind_t *)scp)->bd_channel, subscr)); 1993 } 1994 1995 /* 1996 * Project private interface to read events from a previously taken 1997 * snapshot (with sysevent_evc_walk_init). In case of panic events 1998 * are retrieved directly from the channel data structures. No resources 1999 * are allocated and no mutexes are grabbed in panic context. 2000 */ 2001 sysevent_t * 2002 sysevent_evc_walk_step(evchanq_t *evcq) 2003 { 2004 return ((sysevent_t *)evch_chgetnextev(evcq)); 2005 } 2006 2007 /* 2008 * Project private interface to free a previously taken snapshot. 2009 */ 2010 void 2011 sysevent_evc_walk_fini(evchanq_t *evcq) 2012 { 2013 evch_chrdevent_fini(evcq); 2014 } 2015 2016 /* 2017 * Get address and size of an event payload. Returns NULL when no 2018 * payload present. 2019 */ 2020 char * 2021 sysevent_evc_event_attr(sysevent_t *ev, size_t *plsize) 2022 { 2023 char *attrp; 2024 size_t aoff; 2025 size_t asz; 2026 2027 aoff = SE_ATTR_OFF(ev); 2028 attrp = (char *)ev + aoff; 2029 asz = *plsize = SE_SIZE(ev) - aoff; 2030 return (asz ? attrp : NULL); 2031 } 2032 2033 /* 2034 * sysevent_get_class_name - Get class name string 2035 */ 2036 char * 2037 sysevent_get_class_name(sysevent_t *ev) 2038 { 2039 return (SE_CLASS_NAME(ev)); 2040 } 2041 2042 /* 2043 * sysevent_get_subclass_name - Get subclass name string 2044 */ 2045 char * 2046 sysevent_get_subclass_name(sysevent_t *ev) 2047 { 2048 return (SE_SUBCLASS_NAME(ev)); 2049 } 2050 2051 /* 2052 * sysevent_get_seq - Get event sequence id 2053 */ 2054 uint64_t 2055 sysevent_get_seq(sysevent_t *ev) 2056 { 2057 return (SE_SEQ(ev)); 2058 } 2059 2060 /* 2061 * sysevent_get_time - Get event timestamp 2062 */ 2063 void 2064 sysevent_get_time(sysevent_t *ev, hrtime_t *etime) 2065 { 2066 *etime = SE_TIME(ev); 2067 } 2068 2069 /* 2070 * sysevent_get_size - Get event buffer size 2071 */ 2072 size_t 2073 sysevent_get_size(sysevent_t *ev) 2074 { 2075 return ((size_t)SE_SIZE(ev)); 2076 } 2077 2078 /* 2079 * sysevent_get_pub - Get publisher name string 2080 */ 2081 char * 2082 sysevent_get_pub(sysevent_t *ev) 2083 { 2084 return (SE_PUB_NAME(ev)); 2085 } 2086 2087 /* 2088 * sysevent_get_attr_list - stores address of a copy of the attribute list 2089 * associated with the given sysevent buffer. The list must be freed by the 2090 * caller. 2091 */ 2092 int 2093 sysevent_get_attr_list(sysevent_t *ev, nvlist_t **nvlist) 2094 { 2095 int error; 2096 caddr_t attr; 2097 size_t attr_len; 2098 uint64_t attr_offset; 2099 2100 *nvlist = NULL; 2101 if (SE_FLAG(ev) != SE_PACKED_BUF) { 2102 return (EINVAL); 2103 } 2104 attr_offset = SE_ATTR_OFF(ev); 2105 if (SE_SIZE(ev) == attr_offset) { 2106 return (EINVAL); 2107 } 2108 2109 /* unpack nvlist */ 2110 attr = (caddr_t)ev + attr_offset; 2111 attr_len = SE_SIZE(ev) - attr_offset; 2112 if ((error = nvlist_unpack(attr, attr_len, nvlist, 0)) != 0) { 2113 error = error != ENOMEM ? EINVAL : error; 2114 return (error); 2115 } 2116 return (0); 2117 } 2118 2119 /* 2120 * Functions called by the sysevent driver for general purpose event channels 2121 * 2122 * evch_usrchanopen - Create/Bind to an event channel 2123 * evch_usrchanclose - Unbind/Destroy event channel 2124 * evch_usrallocev - Allocate event data structure 2125 * evch_usrfreeev - Free event data structure 2126 * evch_usrpostevent - Publish event 2127 * evch_usrsubscribe - Subscribe (register callback function) 2128 * evch_usrunsubscribe - Unsubscribe 2129 * evch_usrcontrol_set - Set channel properties 2130 * evch_usrcontrol_get - Get channel properties 2131 * evch_usrgetchnames - Get list of channel names 2132 * evch_usrgetchdata - Get data of an event channel 2133 */ 2134 evchan_t * 2135 evch_usrchanopen(const char *name, uint32_t flags, int *err) 2136 { 2137 evch_bind_t *bp = NULL; 2138 2139 *err = evch_chbind(name, &bp, flags); 2140 return ((evchan_t *)bp); 2141 } 2142 2143 /* 2144 * Unbind from the channel. 2145 */ 2146 void 2147 evch_usrchanclose(evchan_t *cbp) 2148 { 2149 evch_chunbind((evch_bind_t *)cbp); 2150 } 2151 2152 /* 2153 * Allocates log_evch_eventq_t structure but returns the pointer of the embedded 2154 * sysevent_impl_t structure as the opaque sysevent_t * data type 2155 */ 2156 sysevent_impl_t * 2157 evch_usrallocev(size_t evsize, uint32_t flags) 2158 { 2159 return ((sysevent_impl_t *)evch_evq_evzalloc(evsize, flags)); 2160 } 2161 2162 /* 2163 * Free evch_eventq_t structure 2164 */ 2165 void 2166 evch_usrfreeev(sysevent_impl_t *ev) 2167 { 2168 evch_evq_evfree((void *)ev); 2169 } 2170 2171 /* 2172 * Posts an event to the given channel. The event structure has to be 2173 * allocated by evch_usrallocev(). Returns zero on success and an error 2174 * code else. Attributes have to be packed and included in the event structure. 2175 * 2176 */ 2177 int 2178 evch_usrpostevent(evchan_t *bp, sysevent_impl_t *ev, uint32_t flags) 2179 { 2180 return (evch_chpublish((evch_bind_t *)bp, ev, flags)); 2181 } 2182 2183 /* 2184 * Subscribe function for user land subscriptions 2185 */ 2186 int 2187 evch_usrsubscribe(evchan_t *bp, const char *sid, const char *class, 2188 int d, uint32_t flags) 2189 { 2190 door_handle_t dh = door_ki_lookup(d); 2191 int rv; 2192 2193 if (dh == NULL) { 2194 return (EINVAL); 2195 } 2196 if ((rv = evch_chsubscribe((evch_bind_t *)bp, EVCH_DELDOOR, sid, class, 2197 (void *)dh, NULL, flags, curproc->p_pid)) != 0) { 2198 door_ki_rele(dh); 2199 } 2200 return (rv); 2201 } 2202 2203 /* 2204 * Flag can be EVCH_SUB_KEEP or 0. EVCH_SUB_KEEP preserves persistent 2205 * subscribers 2206 */ 2207 void 2208 evch_usrunsubscribe(evchan_t *bp, const char *subid, uint32_t flags) 2209 { 2210 evch_chunsubscribe((evch_bind_t *)bp, subid, flags); 2211 } 2212 2213 /*ARGSUSED*/ 2214 int 2215 evch_usrcontrol_set(evchan_t *bp, int cmd, uint32_t value) 2216 { 2217 evch_chan_t *chp = ((evch_bind_t *)bp)->bd_channel; 2218 uid_t uid = crgetuid(curthread->t_cred); 2219 int rc = 0; 2220 2221 mutex_enter(&chp->ch_mutex); 2222 switch (cmd) { 2223 case EVCH_SET_CHAN_LEN: 2224 if (uid && uid != chp->ch_uid) { 2225 rc = EACCES; 2226 break; 2227 } 2228 chp->ch_maxev = min(value, evch_events_max); 2229 break; 2230 default: 2231 rc = EINVAL; 2232 } 2233 mutex_exit(&chp->ch_mutex); 2234 return (rc); 2235 } 2236 2237 /*ARGSUSED*/ 2238 int 2239 evch_usrcontrol_get(evchan_t *bp, int cmd, uint32_t *value) 2240 { 2241 evch_chan_t *chp = ((evch_bind_t *)bp)->bd_channel; 2242 int rc = 0; 2243 2244 mutex_enter(&chp->ch_mutex); 2245 switch (cmd) { 2246 case EVCH_GET_CHAN_LEN: 2247 *value = chp->ch_maxev; 2248 break; 2249 case EVCH_GET_CHAN_LEN_MAX: 2250 *value = evch_events_max; 2251 break; 2252 default: 2253 rc = EINVAL; 2254 } 2255 mutex_exit(&chp->ch_mutex); 2256 return (rc); 2257 } 2258 2259 int 2260 evch_usrgetchnames(char *buf, size_t size) 2261 { 2262 return (evch_chgetnames(buf, size)); 2263 } 2264 2265 int 2266 evch_usrgetchdata(char *chname, void *buf, size_t size) 2267 { 2268 return (evch_chgetchdata(chname, buf, size)); 2269 } 2270