1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 /* 30 * syseventd - The system event daemon 31 * 32 * This daemon dispatches event buffers received from the 33 * kernel to all interested SLM clients. SLMs in turn 34 * deliver the buffers to their particular application 35 * clients. 36 */ 37 #include <stdio.h> 38 #include <sys/types.h> 39 #include <dirent.h> 40 #include <stdarg.h> 41 #include <stddef.h> 42 #include <stdlib.h> 43 #include <dlfcn.h> 44 #include <door.h> 45 #include <errno.h> 46 #include <fcntl.h> 47 #include <signal.h> 48 #include <strings.h> 49 #include <unistd.h> 50 #include <synch.h> 51 #include <syslog.h> 52 #include <thread.h> 53 #include <libsysevent.h> 54 #include <limits.h> 55 #include <locale.h> 56 #include <sys/sysevent.h> 57 #include <sys/sysevent_impl.h> 58 #include <sys/modctl.h> 59 #include <sys/stat.h> 60 #include <sys/systeminfo.h> 61 #include <sys/wait.h> 62 63 #include "sysevent_signal.h" 64 #include "syseventd.h" 65 #include "message.h" 66 67 extern int insert_client(void *client, int client_type, int retry_limit); 68 extern void delete_client(int id); 69 extern void initialize_client_tbl(void); 70 71 extern struct sysevent_client *sysevent_client_tbl[]; 72 extern mutex_t client_tbl_lock; 73 74 #define DEBUG_LEVEL_FORK 9 /* will run in background at all */ 75 /* levels less than DEBUG_LEVEL_FORK */ 76 77 int debug_level = 0; 78 char *root_dir = ""; /* Relative root for lock and door */ 79 80 /* Maximum number of outstanding events dispatched */ 81 #define SE_EVENT_DISPATCH_CNT 100 82 83 static int upcall_door; /* Kernel event door */ 84 static int door_upcall_retval; /* Kernel event posting return value */ 85 static int fini_pending = 0; /* fini pending flag */ 86 static int deliver_buf = 0; /* Current event buffer from kernel */ 87 static int dispatch_buf = 0; /* Current event buffer dispatched */ 88 static sysevent_t **eventbuf; /* Global array of event buffers */ 89 static struct ev_completion *event_compq; /* Event completion queue */ 90 static mutex_t ev_comp_lock; /* Event completion queue lock */ 91 static mutex_t err_mutex; /* error logging lock */ 92 static mutex_t door_lock; /* sync door return access */ 93 static rwlock_t mod_unload_lock; /* sync module unloading */ 94 95 /* declarations and definitions for avoiding multiple daemons running */ 96 #define DAEMON_LOCK_FILE "/etc/sysevent/syseventd_lock" 97 char local_lock_file[PATH_MAX + 1]; 98 static int hold_daemon_lock; 99 static int daemon_lock_fd; 100 101 /* 102 * sema_eventbuf - guards against the global buffer eventbuf 103 * being written to before it has been dispatched to clients 104 * 105 * sema_dispatch - synchronizes between the kernel uploading thread 106 * (producer) and the userland dispatch_message thread (consumer). 107 * 108 * sema_resource - throttles outstanding event consumption. 109 * 110 * event_comp_cv - synchronizes threads waiting for the event completion queue 111 * to empty or become active. 112 */ 113 static sema_t sema_eventbuf, sema_dispatch, sema_resource; 114 static cond_t event_comp_cv; 115 116 /* Self-tuning concurrency level */ 117 #define MIN_CONCURRENCY_LEVEL 4 118 static int concurrency_level = MIN_CONCURRENCY_LEVEL; 119 120 121 /* SLM defines */ 122 #define MODULE_SUFFIX ".so" 123 #define EVENT_FINI "slm_fini" 124 #define EVENT_INIT "slm_init" 125 126 #define SE_TIMEOUT 60 /* Client dispatch timeout (seconds) */ 127 128 /* syslog message related */ 129 static int logflag = 0; 130 static char *prog; 131 132 /* function prototypes */ 133 static void door_upcall(void *cookie, char *args, size_t alen, door_desc_t *ddp, 134 uint_t ndid); 135 static void dispatch_message(void); 136 static int dispatch(void); 137 static void event_completion_thr(void); 138 static void usage(void); 139 140 static void syseventd_init(void); 141 static void syseventd_fini(int sig); 142 143 static pid_t enter_daemon_lock(void); 144 static void exit_daemon_lock(void); 145 146 static void 147 usage() { 148 (void) fprintf(stderr, "usage: syseventd [-d <debug_level>] " 149 "[-r <root_dir>]\n"); 150 (void) fprintf(stderr, "higher debug levels get progressively "); 151 (void) fprintf(stderr, "more detailed debug information.\n"); 152 (void) fprintf(stderr, "syseventd will run in background if "); 153 (void) fprintf(stderr, "run with a debug_level less than %d.\n", 154 DEBUG_LEVEL_FORK); 155 exit(2); 156 } 157 158 159 /* common exit function which ensures releasing locks */ 160 void 161 syseventd_exit(int status) 162 { 163 syseventd_print(1, "exit status = %d\n", status); 164 165 if (hold_daemon_lock) { 166 exit_daemon_lock(); 167 } 168 169 exit(status); 170 } 171 172 173 /* 174 * hup_handler - SIGHUP handler. SIGHUP is used to force a reload of 175 * all SLMs. During fini, events are drained from all 176 * client event queues. The events that have been consumed 177 * by all clients are freed from the kernel event queue. 178 * 179 * Events that have not yet been delivered to all clients 180 * are not freed and will be replayed after all SLMs have 181 * been (re)loaded. 182 * 183 * After all client event queues have been drained, each 184 * SLM client is unloaded. The init phase will (re)load 185 * each SLM and initiate event replay and delivery from 186 * the kernel. 187 * 188 */ 189 /*ARGSUSED*/ 190 static void 191 hup_handler(int sig) 192 { 193 syseventd_err_print(SIGHUP_CAUGHT); 194 (void) fflush(0); 195 syseventd_fini(sig); 196 syseventd_init(); 197 syseventd_err_print(DAEMON_RESTARTED); 198 (void) fflush(0); 199 } 200 201 /* 202 * Fault handler for other signals caught 203 */ 204 /*ARGSUSED*/ 205 static void 206 flt_handler(int sig) 207 { 208 char signame[SIG2STR_MAX]; 209 210 if (sig2str(sig, signame) == -1) { 211 syseventd_err_print(UNKNOWN_SIGNAL_CAUGHT, sig); 212 } 213 214 (void) se_signal_sethandler(sig, SIG_DFL, NULL); 215 216 switch (sig) { 217 case SIGINT: 218 case SIGSTOP: 219 case SIGTERM: 220 /* Close kernel door */ 221 (void) door_revoke(upcall_door); 222 223 /* Gracefully exit current event delivery threads */ 224 syseventd_fini(sig); 225 226 (void) fflush(0); 227 (void) se_signal_unblockall(); 228 syseventd_exit(1); 229 /*NOTREACHED*/ 230 default: 231 syseventd_err_print(FATAL_ERROR); 232 (void) fflush(0); 233 234 } 235 } 236 237 static void 238 sigwait_thr() 239 { 240 int sig; 241 int err; 242 sigset_t signal_set; 243 244 for (;;) { 245 syseventd_print(3, "sigwait thread waiting for signal\n"); 246 (void) sigfillset(&signal_set); 247 err = sigwait(&signal_set, &sig); 248 if (err) { 249 syseventd_exit(2); 250 } 251 252 /* 253 * Block all signals until the signal handler completes 254 */ 255 if (sig == SIGHUP) { 256 hup_handler(sig); 257 } else { 258 flt_handler(sig); 259 } 260 } 261 /* NOTREACHED */ 262 } 263 264 static void 265 set_root_dir(char *dir) 266 { 267 root_dir = malloc(strlen(dir) + 1); 268 if (root_dir == NULL) { 269 syseventd_err_print(INIT_ROOT_DIR_ERR, strerror(errno)); 270 syseventd_exit(2); 271 } 272 (void) strcpy(root_dir, dir); 273 } 274 275 int 276 main(int argc, char **argv) 277 { 278 int i, c; 279 int fd; 280 pid_t pid; 281 extern char *optarg; 282 283 (void) setlocale(LC_ALL, ""); 284 (void) textdomain(TEXT_DOMAIN); 285 286 if (getuid() != 0) { 287 (void) fprintf(stderr, "Must be root to run syseventd\n"); 288 syseventd_exit(1); 289 } 290 291 if (argc > 5) { 292 usage(); 293 } 294 295 if ((prog = strrchr(argv[0], '/')) == NULL) { 296 prog = argv[0]; 297 } else { 298 prog++; 299 } 300 301 if ((c = getopt(argc, argv, "d:r:")) != EOF) { 302 switch (c) { 303 case 'd': 304 debug_level = atoi(optarg); 305 break; 306 case 'r': 307 /* 308 * Private flag for suninstall to run 309 * daemon during install. 310 */ 311 set_root_dir(optarg); 312 break; 313 case '?': 314 default: 315 usage(); 316 } 317 } 318 319 /* demonize ourselves */ 320 if (debug_level < DEBUG_LEVEL_FORK) { 321 322 if (fork()) { 323 syseventd_exit(0); 324 } 325 326 /* child */ 327 328 (void) chdir("/"); 329 (void) setsid(); 330 if (debug_level <= 1) { 331 closefrom(0); 332 fd = open("/dev/null", 0); 333 (void) dup2(fd, 1); 334 (void) dup2(fd, 2); 335 logflag = 1; 336 } 337 } 338 339 openlog("syseventd", LOG_PID, LOG_DAEMON); 340 341 (void) mutex_init(&err_mutex, USYNC_THREAD, NULL); 342 343 syseventd_print(8, 344 "syseventd started, debug level = %d\n", debug_level); 345 346 /* only one instance of syseventd can run at a time */ 347 if ((pid = enter_daemon_lock()) != getpid()) { 348 syseventd_print(1, 349 "event daemon pid %ld already running\n", pid); 350 exit(3); 351 } 352 353 /* initialize semaphores and eventbuf */ 354 (void) sema_init(&sema_eventbuf, SE_EVENT_DISPATCH_CNT, 355 USYNC_THREAD, NULL); 356 (void) sema_init(&sema_dispatch, 0, USYNC_THREAD, NULL); 357 (void) sema_init(&sema_resource, SE_EVENT_DISPATCH_CNT, 358 USYNC_THREAD, NULL); 359 (void) cond_init(&event_comp_cv, USYNC_THREAD, NULL); 360 eventbuf = (sysevent_t **)calloc(SE_EVENT_DISPATCH_CNT, 361 sizeof (sysevent_t *)); 362 if (eventbuf == NULL) { 363 syseventd_print(1, "Unable to allocate event buffer array\n"); 364 exit(2); 365 } 366 for (i = 0; i < SE_EVENT_DISPATCH_CNT; ++i) { 367 eventbuf[i] = malloc(LOGEVENT_BUFSIZE); 368 if (eventbuf[i] == NULL) { 369 syseventd_print(1, "Unable to allocate event " 370 "buffers\n"); 371 exit(2); 372 } 373 } 374 375 (void) mutex_init(&client_tbl_lock, USYNC_THREAD, NULL); 376 (void) mutex_init(&ev_comp_lock, USYNC_THREAD, NULL); 377 (void) mutex_init(&door_lock, USYNC_THREAD, NULL); 378 (void) rwlock_init(&mod_unload_lock, USYNC_THREAD, NULL); 379 380 event_compq = NULL; 381 382 syseventd_print(8, "start the message thread running\n"); 383 384 /* 385 * Block all signals to all threads include the main thread. 386 * The sigwait_thr thread will process any signals and initiate 387 * a graceful recovery if possible. 388 */ 389 if (se_signal_blockall() < 0) { 390 syseventd_err_print(INIT_SIG_BLOCK_ERR); 391 syseventd_exit(2); 392 } 393 394 if (thr_create(NULL, NULL, (void *(*)(void *))dispatch_message, 395 (void *)0, 0, NULL) < 0) { 396 syseventd_err_print(INIT_THR_CREATE_ERR, strerror(errno)); 397 syseventd_exit(2); 398 } 399 if (thr_create(NULL, NULL, 400 (void *(*)(void *))event_completion_thr, NULL, 401 THR_BOUND, NULL) != 0) { 402 syseventd_err_print(INIT_THR_CREATE_ERR, strerror(errno)); 403 syseventd_exit(2); 404 } 405 /* Create signal catching thread */ 406 if (thr_create(NULL, NULL, (void *(*)(void *))sigwait_thr, 407 NULL, 0, NULL) < 0) { 408 syseventd_err_print(INIT_THR_CREATE_ERR, strerror(errno)); 409 syseventd_exit(2); 410 } 411 412 setbuf(stdout, (char *)NULL); 413 414 /* Initialize and load SLM clients */ 415 initialize_client_tbl(); 416 syseventd_init(); 417 418 syseventd_print(8, "Pausing\n"); 419 420 for (;;) { 421 (void) pause(); 422 } 423 /* NOTREACHED */ 424 return (0); 425 } 426 427 /* 428 * door_upcall - called from the kernel via kernel sysevent door 429 * to upload event(s). 430 * 431 * This routine should never block. If resources are 432 * not available to immediately accept the event buffer 433 * EAGAIN is returned to the kernel. 434 * 435 * Once resources are available, the kernel is notified 436 * via a modctl interface to resume event delivery to 437 * syseventd. 438 * 439 */ 440 /*ARGSUSED*/ 441 static void 442 door_upcall(void *cookie, char *args, size_t alen, 443 door_desc_t *ddp, uint_t ndid) 444 { 445 sysevent_t *ev; 446 int rval; 447 448 449 (void) mutex_lock(&door_lock); 450 if (args == NULL) { 451 rval = EINVAL; 452 } else if (sema_trywait(&sema_eventbuf)) { 453 ev = (sysevent_t *) 454 &((log_event_upcall_arg_t *)(void *)args)->buf; 455 syseventd_print(2, "door_upcall: busy event %llx " 456 "retry\n", sysevent_get_seq(ev)); 457 rval = door_upcall_retval = EAGAIN; 458 } else { 459 /* 460 * Copy received message to local buffer. 461 */ 462 size_t size; 463 ev = (sysevent_t *) 464 &((log_event_upcall_arg_t *)(void *)args)->buf; 465 466 syseventd_print(2, "door_upcall: event %llx in eventbuf %d\n", 467 sysevent_get_seq(ev), deliver_buf); 468 size = sysevent_get_size(ev) > LOGEVENT_BUFSIZE ? 469 LOGEVENT_BUFSIZE : sysevent_get_size(ev); 470 (void) bcopy(ev, eventbuf[deliver_buf], size); 471 deliver_buf = (deliver_buf + 1) % SE_EVENT_DISPATCH_CNT; 472 rval = 0; 473 (void) sema_post(&sema_dispatch); 474 } 475 476 (void) mutex_unlock(&door_lock); 477 478 /* 479 * Filling in return values for door_return 480 */ 481 (void) door_return((void *)&rval, sizeof (rval), NULL, 0); 482 (void) door_return(NULL, 0, NULL, 0); 483 } 484 485 /* 486 * dispatch_message - dispatch message thread 487 * This thread spins until an event buffer is delivered 488 * delivered from the kernel. 489 * 490 * It will wait to dispatch an event to any clients 491 * until adequate resources are available to process 492 * the event buffer. 493 */ 494 static void 495 dispatch_message(void) 496 { 497 int error; 498 499 for (;;) { 500 syseventd_print(3, "dispatch_message: thread started\n"); 501 /* 502 * Spin till a message comes 503 */ 504 while (sema_wait(&sema_dispatch) != 0) { 505 syseventd_print(1, 506 "dispatch_message: sema_wait failed\n"); 507 (void) sleep(1); 508 } 509 510 syseventd_print(3, "dispatch_message: sema_dispatch\n"); 511 512 /* 513 * Wait for available resources 514 */ 515 while (sema_wait(&sema_resource) != 0) { 516 syseventd_print(1, "dispatch_message: sema_wait " 517 "failed\n"); 518 (void) sleep(1); 519 } 520 521 syseventd_print(2, "dispatch_message: eventbuf %d\n", 522 dispatch_buf); 523 524 /* 525 * Client dispatch 526 */ 527 do { 528 error = dispatch(); 529 } while (error == EAGAIN); 530 531 syseventd_print(2, "eventbuf %d dispatched\n", dispatch_buf); 532 dispatch_buf = (dispatch_buf + 1) % SE_EVENT_DISPATCH_CNT; 533 534 /* 535 * kernel received a busy signal - 536 * kickstart the kernel delivery thread 537 * door_lock blocks the kernel so we hold it for the 538 * shortest time possible. 539 */ 540 (void) mutex_lock(&door_lock); 541 if (door_upcall_retval == EAGAIN && !fini_pending) { 542 syseventd_print(3, "dispatch_message: retrigger " 543 "door_upcall_retval = %d\n", 544 door_upcall_retval); 545 (void) modctl(MODEVENTS, (uintptr_t)MODEVENTS_FLUSH, 546 NULL, NULL, NULL, 0); 547 door_upcall_retval = 0; 548 } 549 (void) mutex_unlock(&door_lock); 550 } 551 /* NOTREACHED */ 552 } 553 554 /* 555 * drain_eventq - Called to drain all pending events from the client's 556 * event queue. 557 */ 558 static void 559 drain_eventq(struct sysevent_client *scp, int status) 560 { 561 struct event_dispatch_pkg *d_pkg; 562 struct event_dispatchq *eventq, *eventq_next; 563 564 syseventd_print(3, "Draining eventq for client %d\n", 565 scp->client_num); 566 567 eventq = scp->eventq; 568 while (eventq) { 569 /* 570 * Mark all dispatched events as completed, but indicate the 571 * error status 572 */ 573 d_pkg = eventq->d_pkg; 574 575 syseventd_print(4, "drain event 0X%llx for client %d\n", 576 sysevent_get_seq(d_pkg->ev), scp->client_num); 577 578 if (d_pkg->completion_state == SE_NOT_DISPATCHED) { 579 d_pkg->completion_status = status; 580 d_pkg->completion_state = SE_COMPLETE; 581 (void) sema_post(d_pkg->completion_sema); 582 } 583 584 eventq_next = eventq->next; 585 free(eventq); 586 eventq = eventq_next; 587 scp->eventq = eventq; 588 } 589 } 590 591 /* 592 * client_deliver_event_thr - Client delivery thread 593 * This thread will process any events on this 594 * client's eventq. 595 */ 596 static void 597 client_deliver_event_thr(void *arg) 598 { 599 int flag, error, i; 600 sysevent_t *ev; 601 hrtime_t now; 602 module_t *mod; 603 struct event_dispatchq *eventq; 604 struct sysevent_client *scp; 605 struct event_dispatch_pkg *d_pkg; 606 607 scp = (struct sysevent_client *)arg; 608 mod = (module_t *)scp->client_data; 609 610 (void) mutex_lock(&scp->client_lock); 611 for (;;) { 612 while (scp->eventq == NULL) { 613 614 /* 615 * Client has been suspended or unloaded, go no further. 616 */ 617 if (fini_pending) { 618 scp->client_flags &= ~SE_CLIENT_THR_RUNNING; 619 syseventd_print(3, "Client %d delivery thread " 620 "exiting flags: 0X%x\n", 621 scp->client_num, scp->client_flags); 622 (void) mutex_unlock(&scp->client_lock); 623 return; 624 } 625 626 (void) cond_wait(&scp->client_cv, &scp->client_lock); 627 628 } 629 630 /* 631 * Process events from the head of the eventq, eventq is locked 632 * going into the processing. 633 */ 634 eventq = scp->eventq; 635 while (eventq != NULL) { 636 d_pkg = eventq->d_pkg; 637 d_pkg->completion_state = SE_OUTSTANDING; 638 (void) mutex_unlock(&scp->client_lock); 639 640 641 flag = error = 0; 642 ev = d_pkg->ev; 643 644 syseventd_print(3, "Start delivery for client %d " 645 "with retry count %d\n", 646 scp->client_num, d_pkg->retry_count); 647 648 /* 649 * Retry limit has been reached by this client, indicate 650 * that no further retries are allowed 651 */ 652 for (i = 0; i <= scp->retry_limit; ++i) { 653 if (i == scp->retry_limit) 654 flag = SE_NO_RETRY; 655 656 /* Start the clock for the event delivery */ 657 d_pkg->start_time = gethrtime(); 658 659 syseventd_print(9, "Deliver to module client " 660 "%s\n", mod->name); 661 662 error = mod->deliver_event(ev, flag); 663 664 /* Can not allow another retry */ 665 if (i == scp->retry_limit) 666 error = 0; 667 668 /* Stop the clock */ 669 now = gethrtime(); 670 671 /* 672 * Suspend event processing and drain the 673 * event q for latent clients 674 */ 675 if (now - d_pkg->start_time > 676 ((hrtime_t)SE_TIMEOUT * NANOSEC)) { 677 syseventd_print(1, "Unresponsive " 678 "client %d: Draining eventq and " 679 "suspending event delivery\n", 680 scp->client_num); 681 (void) mutex_lock(&scp->client_lock); 682 scp->client_flags &= 683 ~SE_CLIENT_THR_RUNNING; 684 scp->client_flags |= 685 SE_CLIENT_SUSPENDED; 686 687 /* Cleanup current event */ 688 d_pkg->completion_status = EFAULT; 689 d_pkg->completion_state = SE_COMPLETE; 690 (void) sema_post( 691 d_pkg->completion_sema); 692 693 /* 694 * Drain the remaining events from the 695 * queue. 696 */ 697 drain_eventq(scp, EINVAL); 698 (void) mutex_unlock(&scp->client_lock); 699 return; 700 } 701 702 /* Event delivery retry requested */ 703 if (fini_pending || error != EAGAIN) { 704 break; 705 } else { 706 (void) sleep(SE_RETRY_TIME); 707 } 708 } 709 710 (void) mutex_lock(&scp->client_lock); 711 d_pkg->completion_status = error; 712 d_pkg->completion_state = SE_COMPLETE; 713 (void) sema_post(d_pkg->completion_sema); 714 715 /* Update eventq pointer */ 716 if (scp->eventq != NULL) { 717 scp->eventq = eventq->next; 718 free(eventq); 719 eventq = scp->eventq; 720 } else { 721 free(eventq); 722 break; 723 } 724 725 syseventd_print(3, "Completed delivery with " 726 "error %d\n", error); 727 } 728 729 syseventd_print(3, "No more events to process for client %d\n", 730 scp->client_num); 731 732 /* Return if this was a synchronous delivery */ 733 if (!SE_CLIENT_IS_THR_RUNNING(scp)) { 734 (void) mutex_unlock(&scp->client_lock); 735 return; 736 } 737 738 } 739 } 740 741 /* 742 * client_deliver_event - Client specific event delivery 743 * This routine will allocate and initialize the 744 * neccessary per-client dispatch data. 745 * 746 * If the eventq is not empty, it may be assumed that 747 * a delivery thread exists for this client and the 748 * dispatch data is appended to the eventq. 749 * 750 * The dispatch package is freed by the event completion 751 * thread (event_completion_thr) and the eventq entry 752 * is freed by the event delivery thread. 753 */ 754 static struct event_dispatch_pkg * 755 client_deliver_event(struct sysevent_client *scp, sysevent_t *ev, 756 sema_t *completion_sema) 757 { 758 size_t ev_sz = sysevent_get_size(ev); 759 struct event_dispatchq *newq, *tmp; 760 struct event_dispatch_pkg *d_pkg; 761 762 syseventd_print(3, "client_deliver_event: id 0x%llx size %d\n", 763 (longlong_t)sysevent_get_seq(ev), ev_sz); 764 if (debug_level == 9) { 765 se_print(stdout, ev); 766 } 767 768 /* 769 * Check for suspended client 770 */ 771 (void) mutex_lock(&scp->client_lock); 772 if (SE_CLIENT_IS_SUSPENDED(scp) || !SE_CLIENT_IS_THR_RUNNING(scp)) { 773 (void) mutex_unlock(&scp->client_lock); 774 return (NULL); 775 } 776 777 /* 778 * Allocate a new dispatch package and eventq entry 779 */ 780 newq = (struct event_dispatchq *)malloc( 781 sizeof (struct event_dispatchq)); 782 if (newq == NULL) { 783 (void) mutex_unlock(&scp->client_lock); 784 return (NULL); 785 } 786 787 d_pkg = (struct event_dispatch_pkg *)malloc( 788 sizeof (struct event_dispatch_pkg)); 789 if (d_pkg == NULL) { 790 free(newq); 791 (void) mutex_unlock(&scp->client_lock); 792 return (NULL); 793 } 794 795 /* Initialize the dispatch package */ 796 d_pkg->scp = scp; 797 d_pkg->retry_count = 0; 798 d_pkg->completion_status = 0; 799 d_pkg->completion_state = SE_NOT_DISPATCHED; 800 d_pkg->completion_sema = completion_sema; 801 d_pkg->ev = ev; 802 newq->d_pkg = d_pkg; 803 newq->next = NULL; 804 805 if (scp->eventq != NULL) { 806 807 /* Add entry to the end of the eventq */ 808 tmp = scp->eventq; 809 while (tmp->next != NULL) 810 tmp = tmp->next; 811 tmp->next = newq; 812 } else { 813 /* event queue empty, wakeup delivery thread */ 814 scp->eventq = newq; 815 (void) cond_signal(&scp->client_cv); 816 } 817 (void) mutex_unlock(&scp->client_lock); 818 819 return (d_pkg); 820 } 821 822 /* 823 * event_completion_thr - Event completion thread. This thread routine 824 * waits for all client delivery thread to complete 825 * delivery of a particular event. 826 */ 827 static void 828 event_completion_thr() 829 { 830 int ret, i, client_count, ok_to_free; 831 sysevent_id_t eid; 832 struct sysevent_client *scp; 833 struct ev_completion *ev_comp; 834 struct event_dispatchq *dispatchq; 835 struct event_dispatch_pkg *d_pkg; 836 837 (void) mutex_lock(&ev_comp_lock); 838 for (;;) { 839 while (event_compq == NULL) { 840 (void) cond_wait(&event_comp_cv, &ev_comp_lock); 841 } 842 843 /* 844 * Process event completions from the head of the 845 * completion queue 846 */ 847 ev_comp = event_compq; 848 while (ev_comp) { 849 (void) mutex_unlock(&ev_comp_lock); 850 eid.eid_seq = sysevent_get_seq(ev_comp->ev); 851 sysevent_get_time(ev_comp->ev, &eid.eid_ts); 852 client_count = ev_comp->client_count; 853 ok_to_free = 1; 854 855 syseventd_print(3, "Wait for event completion of " 856 "event 0X%llx on %d clients\n", 857 eid.eid_seq, client_count); 858 859 while (client_count) { 860 syseventd_print(9, "Waiting for %d clients on " 861 "event id 0X%llx\n", client_count, 862 eid.eid_seq); 863 864 (void) sema_wait(&ev_comp->client_sema); 865 --client_count; 866 } 867 868 syseventd_print(3, "Cleaning up clients for event " 869 "0X%llx\n", eid.eid_seq); 870 dispatchq = ev_comp->dispatch_list; 871 while (dispatchq != NULL) { 872 d_pkg = dispatchq->d_pkg; 873 scp = d_pkg->scp; 874 875 if (d_pkg->completion_status == EAGAIN) 876 ok_to_free = 0; 877 878 syseventd_print(4, "Delivery of 0X%llx " 879 "complete for client %d retry count %d " 880 "status %d\n", eid.eid_seq, 881 scp->client_num, 882 d_pkg->retry_count, 883 d_pkg->completion_status); 884 885 free(d_pkg); 886 ev_comp->dispatch_list = dispatchq->next; 887 free(dispatchq); 888 dispatchq = ev_comp->dispatch_list; 889 } 890 891 if (ok_to_free) { 892 for (i = 0; i < MAX_MODCTL_RETRY; ++i) { 893 if ((ret = modctl(MODEVENTS, 894 (uintptr_t)MODEVENTS_FREEDATA, 895 (uintptr_t)&eid, NULL, 896 NULL, 0)) != 0) { 897 syseventd_print(1, "attempting " 898 "to free event 0X%llx\n", 899 eid.eid_seq); 900 901 /* 902 * Kernel may need time to 903 * move this event buffer to 904 * the sysevent sent queue 905 */ 906 (void) sleep(1); 907 } else { 908 break; 909 } 910 } 911 if (ret) { 912 syseventd_print(1, "Unable to free " 913 "event 0X%llx from the " 914 "kernel\n", eid.eid_seq); 915 } 916 } else { 917 syseventd_print(1, "Not freeing event 0X%llx\n", 918 eid.eid_seq); 919 } 920 921 syseventd_print(2, "Event delivery complete for id " 922 "0X%llx\n", eid.eid_seq); 923 924 (void) mutex_lock(&ev_comp_lock); 925 event_compq = ev_comp->next; 926 free(ev_comp->ev); 927 free(ev_comp); 928 ev_comp = event_compq; 929 (void) sema_post(&sema_resource); 930 } 931 932 /* 933 * Event completion queue is empty, signal possible unload 934 * operation 935 */ 936 (void) cond_signal(&event_comp_cv); 937 938 syseventd_print(3, "No more events\n"); 939 } 940 } 941 942 /* 943 * dispatch - Dispatch the current event buffer to all valid SLM clients. 944 */ 945 static int 946 dispatch(void) 947 { 948 int ev_sz, i, client_count = 0; 949 sysevent_t *new_ev; 950 sysevent_id_t eid; 951 struct ev_completion *ev_comp, *tmp; 952 struct event_dispatchq *dispatchq, *client_list; 953 struct event_dispatch_pkg *d_pkg; 954 955 /* Check for module unload operation */ 956 if (rw_tryrdlock(&mod_unload_lock) != 0) { 957 syseventd_print(2, "unload in progress abort delivery\n"); 958 (void) sema_post(&sema_eventbuf); 959 (void) sema_post(&sema_resource); 960 return (0); 961 } 962 963 syseventd_print(3, "deliver dispatch buffer %d", dispatch_buf); 964 eid.eid_seq = sysevent_get_seq(eventbuf[dispatch_buf]); 965 sysevent_get_time(eventbuf[dispatch_buf], &eid.eid_ts); 966 syseventd_print(3, "deliver msg id: 0x%llx\n", eid.eid_seq); 967 968 /* 969 * ev_comp is used to hold event completion data. It is freed 970 * by the event completion thread (event_completion_thr). 971 */ 972 ev_comp = (struct ev_completion *) 973 malloc(sizeof (struct ev_completion)); 974 if (ev_comp == NULL) { 975 (void) rw_unlock(&mod_unload_lock); 976 syseventd_print(1, "Can not allocate event completion buffer " 977 "for event id 0X%llx\n", eid.eid_seq); 978 return (EAGAIN); 979 } 980 ev_comp->dispatch_list = NULL; 981 ev_comp->next = NULL; 982 (void) sema_init(&ev_comp->client_sema, 0, USYNC_THREAD, NULL); 983 984 ev_sz = sysevent_get_size(eventbuf[dispatch_buf]); 985 new_ev = calloc(1, ev_sz); 986 if (new_ev == NULL) { 987 free(ev_comp); 988 (void) rw_unlock(&mod_unload_lock); 989 syseventd_print(1, "Can not allocate new event buffer " 990 "for event id 0X%llx\n", eid.eid_seq); 991 return (EAGAIN); 992 } 993 994 995 /* 996 * For long messages, copy additional data from kernel 997 */ 998 if (ev_sz > LOGEVENT_BUFSIZE) { 999 int ret = 0; 1000 1001 /* Ok to release eventbuf for next event buffer from kernel */ 1002 (void) sema_post(&sema_eventbuf); 1003 1004 for (i = 0; i < MAX_MODCTL_RETRY; ++i) { 1005 if ((ret = modctl(MODEVENTS, 1006 (uintptr_t)MODEVENTS_GETDATA, 1007 (uintptr_t)&eid, 1008 (uintptr_t)ev_sz, 1009 (uintptr_t)new_ev, 0)) 1010 == 0) 1011 break; 1012 else 1013 (void) sleep(1); 1014 } 1015 if (ret) { 1016 syseventd_print(1, "GET_DATA failed for 0X%llx:%llx\n", 1017 eid.eid_ts, eid.eid_seq); 1018 free(new_ev); 1019 free(ev_comp); 1020 (void) rw_unlock(&mod_unload_lock); 1021 return (EAGAIN); 1022 } 1023 } else { 1024 (void) bcopy(eventbuf[dispatch_buf], new_ev, ev_sz); 1025 /* Ok to release eventbuf for next event buffer from kernel */ 1026 (void) sema_post(&sema_eventbuf); 1027 } 1028 1029 1030 /* 1031 * Deliver a copy of eventbuf to clients so 1032 * eventbuf can be used for the next message 1033 */ 1034 for (i = 0; i < MAX_SLM; ++i) { 1035 1036 /* Don't bother for suspended or unloaded clients */ 1037 if (!SE_CLIENT_IS_LOADED(sysevent_client_tbl[i]) || 1038 SE_CLIENT_IS_SUSPENDED(sysevent_client_tbl[i])) 1039 continue; 1040 1041 /* 1042 * Allocate event dispatch queue entry. All queue entries 1043 * are freed by the event completion thread as client 1044 * delivery completes. 1045 */ 1046 dispatchq = (struct event_dispatchq *)malloc( 1047 sizeof (struct event_dispatchq)); 1048 if (dispatchq == NULL) { 1049 syseventd_print(1, "Can not allocate dispatch q " 1050 "for event id 0X%llx client %d\n", eid.eid_seq, i); 1051 continue; 1052 } 1053 dispatchq->next = NULL; 1054 1055 /* Initiate client delivery */ 1056 d_pkg = client_deliver_event(sysevent_client_tbl[i], 1057 new_ev, &ev_comp->client_sema); 1058 if (d_pkg == NULL) { 1059 syseventd_print(1, "Can not allocate dispatch " 1060 "package for event id 0X%llx client %d\n", 1061 eid.eid_seq, i); 1062 free(dispatchq); 1063 continue; 1064 } 1065 dispatchq->d_pkg = d_pkg; 1066 ++client_count; 1067 1068 if (ev_comp->dispatch_list == NULL) { 1069 ev_comp->dispatch_list = dispatchq; 1070 client_list = dispatchq; 1071 } else { 1072 client_list->next = dispatchq; 1073 client_list = client_list->next; 1074 } 1075 } 1076 1077 ev_comp->client_count = client_count; 1078 ev_comp->ev = new_ev; 1079 1080 (void) mutex_lock(&ev_comp_lock); 1081 1082 if (event_compq == NULL) { 1083 syseventd_print(3, "Wakeup event completion thread for " 1084 "id 0X%llx\n", eid.eid_seq); 1085 event_compq = ev_comp; 1086 (void) cond_signal(&event_comp_cv); 1087 } else { 1088 1089 /* Add entry to the end of the event completion queue */ 1090 tmp = event_compq; 1091 while (tmp->next != NULL) 1092 tmp = tmp->next; 1093 tmp->next = ev_comp; 1094 syseventd_print(3, "event added to completion queue for " 1095 "id 0X%llx\n", eid.eid_seq); 1096 } 1097 (void) mutex_unlock(&ev_comp_lock); 1098 (void) rw_unlock(&mod_unload_lock); 1099 1100 return (0); 1101 } 1102 1103 #define MODULE_DIR_HW "/usr/platform/%s/lib/sysevent/modules/" 1104 #define MODULE_DIR_GEN "/usr/lib/sysevent/modules/" 1105 #define MOD_DIR_NUM 3 1106 static char dirname[MOD_DIR_NUM][MAXPATHLEN]; 1107 1108 static char * 1109 dir_num2name(int dirnum) 1110 { 1111 char infobuf[MAXPATHLEN]; 1112 1113 if (dirnum >= MOD_DIR_NUM) 1114 return (NULL); 1115 1116 if (dirname[0][0] == '\0') { 1117 if (sysinfo(SI_PLATFORM, infobuf, MAXPATHLEN) == -1) { 1118 syseventd_print(1, "dir_num2name: " 1119 "sysinfo error %s\n", strerror(errno)); 1120 return (NULL); 1121 } else if (snprintf(dirname[0], sizeof (dirname[0]), 1122 MODULE_DIR_HW, infobuf) >= sizeof (dirname[0])) { 1123 syseventd_print(1, "dir_num2name: " 1124 "platform name too long: %s\n", 1125 infobuf); 1126 return (NULL); 1127 } 1128 if (sysinfo(SI_MACHINE, infobuf, MAXPATHLEN) == -1) { 1129 syseventd_print(1, "dir_num2name: " 1130 "sysinfo error %s\n", strerror(errno)); 1131 return (NULL); 1132 } else if (snprintf(dirname[1], sizeof (dirname[1]), 1133 MODULE_DIR_HW, infobuf) >= sizeof (dirname[1])) { 1134 syseventd_print(1, "dir_num2name: " 1135 "machine name too long: %s\n", 1136 infobuf); 1137 return (NULL); 1138 } 1139 (void) strcpy(dirname[2], MODULE_DIR_GEN); 1140 } 1141 1142 return (dirname[dirnum]); 1143 } 1144 1145 1146 /* 1147 * load_modules - Load modules found in the common syseventd module directories 1148 * Modules that do not provide valid interfaces are rejected. 1149 */ 1150 static void 1151 load_modules(char *dirname) 1152 { 1153 int client_id; 1154 DIR *mod_dir; 1155 module_t *mod; 1156 struct dirent *entp; 1157 struct slm_mod_ops *mod_ops; 1158 struct sysevent_client *scp; 1159 1160 if (dirname == NULL) 1161 return; 1162 1163 /* Return silently if module directory does not exist */ 1164 if ((mod_dir = opendir(dirname)) == NULL) { 1165 syseventd_print(1, "Unable to open module directory %s: %s\n", 1166 dirname, strerror(errno)); 1167 return; 1168 } 1169 1170 syseventd_print(3, "loading modules from %s\n", dirname); 1171 1172 /* 1173 * Go through directory, looking for files ending with .so 1174 */ 1175 while ((entp = readdir(mod_dir)) != NULL) { 1176 void *dlh, *f; 1177 char *tmp, modpath[MAXPATHLEN]; 1178 1179 if (((tmp = strstr(entp->d_name, MODULE_SUFFIX)) == NULL) || 1180 (tmp[strlen(MODULE_SUFFIX)] != '\0')) { 1181 continue; 1182 } 1183 1184 if (snprintf(modpath, sizeof (modpath), "%s%s", 1185 dirname, entp->d_name) >= sizeof (modpath)) { 1186 syseventd_err_print(INIT_PATH_ERR, modpath); 1187 continue; 1188 } 1189 if ((dlh = dlopen(modpath, RTLD_LAZY)) == NULL) { 1190 syseventd_err_print(LOAD_MOD_DLOPEN_ERR, 1191 modpath, dlerror()); 1192 continue; 1193 } else if ((f = dlsym(dlh, EVENT_INIT)) == NULL) { 1194 syseventd_err_print(LOAD_MOD_NO_INIT, 1195 modpath, dlerror()); 1196 (void) dlclose(dlh); 1197 continue; 1198 } 1199 1200 mod = malloc(sizeof (*mod)); 1201 if (mod == NULL) { 1202 syseventd_err_print(LOAD_MOD_ALLOC_ERR, "mod", 1203 strerror(errno)); 1204 (void) dlclose(dlh); 1205 continue; 1206 } 1207 1208 mod->name = strdup(entp->d_name); 1209 if (mod->name == NULL) { 1210 syseventd_err_print(LOAD_MOD_ALLOC_ERR, "mod->name", 1211 strerror(errno)); 1212 (void) dlclose(dlh); 1213 free(mod); 1214 continue; 1215 } 1216 1217 mod->dlhandle = dlh; 1218 mod->event_mod_init = (struct slm_mod_ops *(*)())f; 1219 1220 /* load in other module functions */ 1221 mod->event_mod_fini = (void (*)())dlsym(dlh, EVENT_FINI); 1222 if (mod->event_mod_fini == NULL) { 1223 syseventd_err_print(LOAD_MOD_DLSYM_ERR, mod->name, 1224 dlerror()); 1225 free(mod->name); 1226 free(mod); 1227 (void) dlclose(dlh); 1228 continue; 1229 } 1230 1231 /* Call module init routine */ 1232 if ((mod_ops = mod->event_mod_init()) == NULL) { 1233 syseventd_err_print(LOAD_MOD_EINVAL, mod->name); 1234 free(mod->name); 1235 free(mod); 1236 (void) dlclose(dlh); 1237 continue; 1238 } 1239 if (mod_ops->major_version != SE_MAJOR_VERSION) { 1240 syseventd_err_print(LOAD_MOD_VERSION_MISMATCH, 1241 mod->name, SE_MAJOR_VERSION, 1242 mod_ops->major_version); 1243 mod->event_mod_fini(); 1244 free(mod->name); 1245 free(mod); 1246 (void) dlclose(dlh); 1247 continue; 1248 } 1249 1250 mod->deliver_event = mod_ops->deliver_event; 1251 /* Add module entry to client list */ 1252 if ((client_id = insert_client((void *)mod, SLM_CLIENT, 1253 (mod_ops->retry_limit <= SE_MAX_RETRY_LIMIT ? 1254 mod_ops->retry_limit : SE_MAX_RETRY_LIMIT))) 1255 < 0) {; 1256 syseventd_err_print(LOAD_MOD_ALLOC_ERR, "insert_client", 1257 strerror(errno)); 1258 mod->event_mod_fini(); 1259 free(mod->name); 1260 free(mod); 1261 (void) dlclose(dlh); 1262 continue; 1263 } 1264 1265 scp = sysevent_client_tbl[client_id]; 1266 ++concurrency_level; 1267 (void) thr_setconcurrency(concurrency_level); 1268 if (thr_create(NULL, 0, 1269 (void *(*)(void *))client_deliver_event_thr, 1270 (void *)scp, THR_BOUND, &scp->tid) != 0) { 1271 1272 syseventd_err_print(LOAD_MOD_ALLOC_ERR, "insert_client", 1273 strerror(errno)); 1274 mod->event_mod_fini(); 1275 free(mod->name); 1276 free(mod); 1277 (void) dlclose(dlh); 1278 continue; 1279 } 1280 scp->client_flags |= SE_CLIENT_THR_RUNNING; 1281 1282 syseventd_print(3, "loaded module %s\n", entp->d_name); 1283 } 1284 1285 (void) closedir(mod_dir); 1286 syseventd_print(3, "modules loaded\n"); 1287 } 1288 1289 /* 1290 * unload_modules - modules are unloaded prior to graceful shutdown or 1291 * before restarting the daemon upon receipt of 1292 * SIGHUP. 1293 */ 1294 static void 1295 unload_modules(int sig) 1296 { 1297 int i, count, done; 1298 module_t *mod; 1299 struct sysevent_client *scp; 1300 1301 /* 1302 * unload modules that are ready, skip those that have not 1303 * drained their event queues. 1304 */ 1305 count = done = 0; 1306 while (done < MAX_SLM) { 1307 /* Don't wait indefinitely for unresponsive clients */ 1308 if (sig != SIGHUP && count > SE_TIMEOUT) { 1309 break; 1310 } 1311 1312 done = 0; 1313 1314 /* Shutdown clients */ 1315 for (i = 0; i < MAX_SLM; ++i) { 1316 scp = sysevent_client_tbl[i]; 1317 if (mutex_trylock(&scp->client_lock) == 0) { 1318 if (scp->client_type != SLM_CLIENT || 1319 scp->client_data == NULL) { 1320 (void) mutex_unlock(&scp->client_lock); 1321 done++; 1322 continue; 1323 } 1324 } else { 1325 syseventd_print(3, "Skipping unload of " 1326 "client %d: client locked\n", 1327 scp->client_num); 1328 continue; 1329 } 1330 1331 /* 1332 * Drain the eventq and wait for delivery thread to 1333 * cleanly exit 1334 */ 1335 drain_eventq(scp, EAGAIN); 1336 (void) cond_signal(&scp->client_cv); 1337 (void) mutex_unlock(&scp->client_lock); 1338 (void) thr_join(scp->tid, NULL, NULL); 1339 1340 /* 1341 * It is now safe to unload the module 1342 */ 1343 mod = (module_t *)scp->client_data; 1344 syseventd_print(2, "Unload %s\n", mod->name); 1345 mod->event_mod_fini(); 1346 (void) dlclose(mod->dlhandle); 1347 free(mod->name); 1348 (void) mutex_lock(&client_tbl_lock); 1349 delete_client(i); 1350 (void) mutex_unlock(&client_tbl_lock); 1351 ++done; 1352 1353 } 1354 ++count; 1355 (void) sleep(1); 1356 } 1357 1358 /* 1359 * Wait for event completions 1360 */ 1361 syseventd_print(2, "waiting for event completions\n"); 1362 (void) mutex_lock(&ev_comp_lock); 1363 while (event_compq != NULL) { 1364 (void) cond_wait(&event_comp_cv, &ev_comp_lock); 1365 } 1366 (void) mutex_unlock(&ev_comp_lock); 1367 } 1368 1369 /* 1370 * syseventd_init - Called at daemon (re)start-up time to load modules 1371 * and kickstart the kernel delivery engine. 1372 */ 1373 static void 1374 syseventd_init() 1375 { 1376 int i, fd; 1377 char local_door_file[PATH_MAX + 1]; 1378 1379 fini_pending = 0; 1380 1381 concurrency_level = MIN_CONCURRENCY_LEVEL; 1382 (void) thr_setconcurrency(concurrency_level); 1383 1384 /* 1385 * Load client modules for event delivering 1386 */ 1387 for (i = 0; i < MOD_DIR_NUM; ++i) { 1388 load_modules(dir_num2name(i)); 1389 } 1390 1391 /* 1392 * Create kernel delivery door service 1393 */ 1394 syseventd_print(8, "Create a door for kernel upcalls\n"); 1395 if (snprintf(local_door_file, sizeof (local_door_file), "%s%s", 1396 root_dir, LOGEVENT_DOOR_UPCALL) >= sizeof (local_door_file)) { 1397 syseventd_err_print(INIT_PATH_ERR, local_door_file); 1398 syseventd_exit(5); 1399 } 1400 1401 /* 1402 * Remove door file for robustness. 1403 */ 1404 if (unlink(local_door_file) != 0) 1405 syseventd_print(8, "Unlink of %s failed.\n", local_door_file); 1406 1407 fd = open(local_door_file, O_CREAT|O_RDWR, S_IREAD|S_IWRITE); 1408 if ((fd == -1) && (errno != EEXIST)) { 1409 syseventd_err_print(INIT_OPEN_DOOR_ERR, strerror(errno)); 1410 syseventd_exit(5); 1411 } 1412 (void) close(fd); 1413 1414 upcall_door = door_create(door_upcall, NULL, 1415 DOOR_REFUSE_DESC | DOOR_NO_CANCEL); 1416 if (upcall_door == -1) { 1417 syseventd_err_print(INIT_CREATE_DOOR_ERR, strerror(errno)); 1418 syseventd_exit(5); 1419 } 1420 1421 (void) fdetach(local_door_file); 1422 retry: 1423 if (fattach(upcall_door, local_door_file) != 0) { 1424 if (errno == EBUSY) 1425 goto retry; 1426 syseventd_err_print(INIT_FATTACH_ERR, strerror(errno)); 1427 (void) door_revoke(upcall_door); 1428 syseventd_exit(5); 1429 } 1430 1431 /* 1432 * Tell kernel the door name and start delivery 1433 */ 1434 syseventd_print(2, 1435 "local_door_file = %s\n", local_door_file); 1436 if (modctl(MODEVENTS, 1437 (uintptr_t)MODEVENTS_SET_DOOR_UPCALL_FILENAME, 1438 (uintptr_t)local_door_file, NULL, NULL, 0) < 0) { 1439 syseventd_err_print(INIT_DOOR_NAME_ERR, strerror(errno)); 1440 syseventd_exit(6); 1441 } 1442 1443 door_upcall_retval = 0; 1444 1445 if (modctl(MODEVENTS, (uintptr_t)MODEVENTS_FLUSH, NULL, NULL, NULL, 0) 1446 < 0) { 1447 syseventd_err_print(KERNEL_REPLAY_ERR, strerror(errno)); 1448 syseventd_exit(7); 1449 } 1450 } 1451 1452 /* 1453 * syseventd_fini - shut down daemon, but do not exit 1454 */ 1455 static void 1456 syseventd_fini(int sig) 1457 { 1458 /* 1459 * Indicate that event queues should be drained and no 1460 * additional events be accepted 1461 */ 1462 fini_pending = 1; 1463 1464 /* Close the kernel event door to halt delivery */ 1465 (void) door_revoke(upcall_door); 1466 1467 syseventd_print(1, "Unloading modules\n"); 1468 (void) rw_wrlock(&mod_unload_lock); 1469 unload_modules(sig); 1470 (void) rw_unlock(&mod_unload_lock); 1471 1472 } 1473 1474 /* 1475 * enter_daemon_lock - lock the daemon file lock 1476 * 1477 * Use an advisory lock to ensure that only one daemon process is active 1478 * in the system at any point in time. If the lock is held by another 1479 * process, do not block but return the pid owner of the lock to the 1480 * caller immediately. The lock is cleared if the holding daemon process 1481 * exits for any reason even if the lock file remains, so the daemon can 1482 * be restarted if necessary. The lock file is DAEMON_LOCK_FILE. 1483 */ 1484 static pid_t 1485 enter_daemon_lock(void) 1486 { 1487 struct flock lock; 1488 1489 syseventd_print(8, "enter_daemon_lock: lock file = %s\n", 1490 DAEMON_LOCK_FILE); 1491 1492 if (snprintf(local_lock_file, sizeof (local_lock_file), "%s%s", 1493 root_dir, DAEMON_LOCK_FILE) >= sizeof (local_lock_file)) { 1494 syseventd_err_print(INIT_PATH_ERR, local_lock_file); 1495 syseventd_exit(8); 1496 } 1497 daemon_lock_fd = open(local_lock_file, O_CREAT|O_RDWR, 0644); 1498 if (daemon_lock_fd < 0) { 1499 syseventd_err_print(INIT_LOCK_OPEN_ERR, 1500 local_lock_file, strerror(errno)); 1501 syseventd_exit(8); 1502 } 1503 1504 lock.l_type = F_WRLCK; 1505 lock.l_whence = SEEK_SET; 1506 lock.l_start = 0; 1507 lock.l_len = 0; 1508 1509 if (fcntl(daemon_lock_fd, F_SETLK, &lock) == -1) { 1510 if (fcntl(daemon_lock_fd, F_GETLK, &lock) == -1) { 1511 syseventd_err_print(INIT_LOCK_ERR, 1512 local_lock_file, strerror(errno)); 1513 exit(2); 1514 } 1515 return (lock.l_pid); 1516 } 1517 hold_daemon_lock = 1; 1518 1519 return (getpid()); 1520 } 1521 1522 /* 1523 * exit_daemon_lock - release the daemon file lock 1524 */ 1525 static void 1526 exit_daemon_lock(void) 1527 { 1528 struct flock lock; 1529 1530 lock.l_type = F_UNLCK; 1531 lock.l_whence = SEEK_SET; 1532 lock.l_start = 0; 1533 lock.l_len = 0; 1534 1535 if (fcntl(daemon_lock_fd, F_SETLK, &lock) == -1) { 1536 syseventd_err_print(INIT_UNLOCK_ERR, 1537 local_lock_file, strerror(errno)); 1538 } 1539 1540 if (close(daemon_lock_fd) == -1) { 1541 syseventd_err_print(INIT_LOCK_CLOSE_ERR, 1542 local_lock_file, strerror(errno)); 1543 exit(-1); 1544 } 1545 } 1546 1547 /* 1548 * syseventd_err_print - print error messages to the terminal if not 1549 * yet daemonized or to syslog. 1550 */ 1551 /*PRINTFLIKE1*/ 1552 void 1553 syseventd_err_print(char *message, ...) 1554 { 1555 va_list ap; 1556 1557 (void) mutex_lock(&err_mutex); 1558 va_start(ap, message); 1559 1560 if (logflag) { 1561 (void) vsyslog(LOG_ERR, message, ap); 1562 } else { 1563 (void) fprintf(stderr, "%s: ", prog); 1564 (void) vfprintf(stderr, message, ap); 1565 } 1566 va_end(ap); 1567 (void) mutex_unlock(&err_mutex); 1568 } 1569 1570 /* 1571 * syseventd_print - print messages to the terminal or to syslog 1572 * the following levels are implemented: 1573 * 1574 * 1 - transient errors that does not affect normal program flow 1575 * 2 - upcall/dispatch interaction 1576 * 3 - program flow trace as each message goes through the daemon 1577 * 8 - all the nit-gritty details of startup and shutdown 1578 * 9 - very verbose event flow tracing (no daemonization of syseventd) 1579 * 1580 */ 1581 /*PRINTFLIKE2*/ 1582 void 1583 syseventd_print(int level, char *message, ...) 1584 { 1585 va_list ap; 1586 static int newline = 1; 1587 1588 if (level > debug_level) { 1589 return; 1590 } 1591 1592 (void) mutex_lock(&err_mutex); 1593 va_start(ap, message); 1594 if (logflag) { 1595 (void) syslog(LOG_DEBUG, "%s[%ld]: ", 1596 prog, getpid()); 1597 (void) vsyslog(LOG_DEBUG, message, ap); 1598 } else { 1599 if (newline) { 1600 (void) fprintf(stdout, "%s[%ld]: ", 1601 prog, getpid()); 1602 (void) vfprintf(stdout, message, ap); 1603 } else { 1604 (void) vfprintf(stdout, message, ap); 1605 } 1606 } 1607 if (message[strlen(message)-1] == '\n') { 1608 newline = 1; 1609 } else { 1610 newline = 0; 1611 } 1612 va_end(ap); 1613 (void) mutex_unlock(&err_mutex); 1614 } 1615