1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 /* 28 * syseventd - The system event daemon 29 * 30 * This daemon dispatches event buffers received from the 31 * kernel to all interested SLM clients. SLMs in turn 32 * deliver the buffers to their particular application 33 * clients. 34 */ 35 #include <stdio.h> 36 #include <sys/types.h> 37 #include <dirent.h> 38 #include <stdarg.h> 39 #include <stddef.h> 40 #include <stdlib.h> 41 #include <dlfcn.h> 42 #include <door.h> 43 #include <errno.h> 44 #include <fcntl.h> 45 #include <signal.h> 46 #include <strings.h> 47 #include <unistd.h> 48 #include <synch.h> 49 #include <syslog.h> 50 #include <thread.h> 51 #include <libsysevent.h> 52 #include <limits.h> 53 #include <locale.h> 54 #include <sys/sysevent.h> 55 #include <sys/sysevent_impl.h> 56 #include <sys/modctl.h> 57 #include <sys/stat.h> 58 #include <sys/systeminfo.h> 59 #include <sys/wait.h> 60 61 #include "sysevent_signal.h" 62 #include "syseventd.h" 63 #include "message.h" 64 65 extern int insert_client(void *client, int client_type, int retry_limit); 66 extern void delete_client(int id); 67 extern void initialize_client_tbl(void); 68 69 extern struct sysevent_client *sysevent_client_tbl[]; 70 extern mutex_t client_tbl_lock; 71 72 #define DEBUG_LEVEL_FORK 9 /* will run in background at all */ 73 /* levels less than DEBUG_LEVEL_FORK */ 74 75 int debug_level = 0; 76 char *root_dir = ""; /* Relative root for lock and door */ 77 78 /* Maximum number of outstanding events dispatched */ 79 #define SE_EVENT_DISPATCH_CNT 100 80 81 static int upcall_door; /* Kernel event door */ 82 static int door_upcall_retval; /* Kernel event posting return value */ 83 static int fini_pending = 0; /* fini pending flag */ 84 static int deliver_buf = 0; /* Current event buffer from kernel */ 85 static int dispatch_buf = 0; /* Current event buffer dispatched */ 86 static sysevent_t **eventbuf; /* Global array of event buffers */ 87 static struct ev_completion *event_compq; /* Event completion queue */ 88 static mutex_t ev_comp_lock; /* Event completion queue lock */ 89 static mutex_t err_mutex; /* error logging lock */ 90 static mutex_t door_lock; /* sync door return access */ 91 static rwlock_t mod_unload_lock; /* sync module unloading */ 92 93 /* declarations and definitions for avoiding multiple daemons running */ 94 #define DAEMON_LOCK_FILE "/var/run/syseventd.lock" 95 char local_lock_file[PATH_MAX + 1]; 96 static int hold_daemon_lock; 97 static int daemon_lock_fd; 98 99 /* 100 * sema_eventbuf - guards against the global buffer eventbuf 101 * being written to before it has been dispatched to clients 102 * 103 * sema_dispatch - synchronizes between the kernel uploading thread 104 * (producer) and the userland dispatch_message thread (consumer). 105 * 106 * sema_resource - throttles outstanding event consumption. 107 * 108 * event_comp_cv - synchronizes threads waiting for the event completion queue 109 * to empty or become active. 110 */ 111 static sema_t sema_eventbuf, sema_dispatch, sema_resource; 112 static cond_t event_comp_cv; 113 114 /* Self-tuning concurrency level */ 115 #define MIN_CONCURRENCY_LEVEL 4 116 static int concurrency_level = MIN_CONCURRENCY_LEVEL; 117 118 119 /* SLM defines */ 120 #define MODULE_SUFFIX ".so" 121 #define EVENT_FINI "slm_fini" 122 #define EVENT_INIT "slm_init" 123 124 #define SE_TIMEOUT 60 /* Client dispatch timeout (seconds) */ 125 126 /* syslog message related */ 127 static int logflag = 0; 128 static char *prog; 129 130 /* function prototypes */ 131 static void door_upcall(void *cookie, char *args, size_t alen, door_desc_t *ddp, 132 uint_t ndid); 133 static void dispatch_message(void); 134 static int dispatch(void); 135 static void event_completion_thr(void); 136 static void usage(void); 137 138 static void syseventd_init(void); 139 static void syseventd_fini(int sig); 140 141 static pid_t enter_daemon_lock(void); 142 static void exit_daemon_lock(void); 143 144 static void 145 usage() { 146 (void) fprintf(stderr, "usage: syseventd [-d <debug_level>] " 147 "[-r <root_dir>]\n"); 148 (void) fprintf(stderr, "higher debug levels get progressively "); 149 (void) fprintf(stderr, "more detailed debug information.\n"); 150 (void) fprintf(stderr, "syseventd will run in background if "); 151 (void) fprintf(stderr, "run with a debug_level less than %d.\n", 152 DEBUG_LEVEL_FORK); 153 exit(2); 154 } 155 156 157 /* common exit function which ensures releasing locks */ 158 void 159 syseventd_exit(int status) 160 { 161 syseventd_print(1, "exit status = %d\n", status); 162 163 if (hold_daemon_lock) { 164 exit_daemon_lock(); 165 } 166 167 exit(status); 168 } 169 170 171 /* 172 * hup_handler - SIGHUP handler. SIGHUP is used to force a reload of 173 * all SLMs. During fini, events are drained from all 174 * client event queues. The events that have been consumed 175 * by all clients are freed from the kernel event queue. 176 * 177 * Events that have not yet been delivered to all clients 178 * are not freed and will be replayed after all SLMs have 179 * been (re)loaded. 180 * 181 * After all client event queues have been drained, each 182 * SLM client is unloaded. The init phase will (re)load 183 * each SLM and initiate event replay and delivery from 184 * the kernel. 185 * 186 */ 187 /*ARGSUSED*/ 188 static void 189 hup_handler(int sig) 190 { 191 syseventd_err_print(SIGHUP_CAUGHT); 192 (void) fflush(0); 193 syseventd_fini(sig); 194 syseventd_init(); 195 syseventd_err_print(DAEMON_RESTARTED); 196 (void) fflush(0); 197 } 198 199 /* 200 * Fault handler for other signals caught 201 */ 202 /*ARGSUSED*/ 203 static void 204 flt_handler(int sig) 205 { 206 char signame[SIG2STR_MAX]; 207 208 if (sig2str(sig, signame) == -1) { 209 syseventd_err_print(UNKNOWN_SIGNAL_CAUGHT, sig); 210 } 211 212 (void) se_signal_sethandler(sig, SIG_DFL, NULL); 213 214 switch (sig) { 215 case SIGINT: 216 case SIGSTOP: 217 case SIGTERM: 218 /* Close kernel door */ 219 (void) door_revoke(upcall_door); 220 221 /* Gracefully exit current event delivery threads */ 222 syseventd_fini(sig); 223 224 (void) fflush(0); 225 (void) se_signal_unblockall(); 226 syseventd_exit(1); 227 /*NOTREACHED*/ 228 case SIGCLD: 229 case SIGPWR: 230 case SIGWINCH: 231 case SIGURG: 232 case SIGCONT: 233 case SIGWAITING: 234 case SIGLWP: 235 case SIGFREEZE: 236 case SIGTHAW: 237 case SIGCANCEL: 238 case SIGXRES: 239 case SIGJVM1: 240 case SIGJVM2: 241 /* No need to abort */ 242 break; 243 default: 244 syseventd_err_print(FATAL_ERROR); 245 abort(); 246 247 } 248 } 249 250 /* 251 * Daemon parent process only. 252 * Child process signal to indicate successful daemon initialization. 253 * This is the normal and expected exit path of the daemon parent. 254 */ 255 /*ARGSUSED*/ 256 static void 257 sigusr1(int sig) 258 { 259 syseventd_exit(0); 260 } 261 262 static void 263 sigwait_thr() 264 { 265 int sig; 266 int err; 267 sigset_t signal_set; 268 269 for (;;) { 270 syseventd_print(3, "sigwait thread waiting for signal\n"); 271 (void) sigfillset(&signal_set); 272 err = sigwait(&signal_set, &sig); 273 if (err) { 274 syseventd_exit(2); 275 } 276 277 /* 278 * Block all signals until the signal handler completes 279 */ 280 if (sig == SIGHUP) { 281 hup_handler(sig); 282 } else { 283 flt_handler(sig); 284 } 285 } 286 /* NOTREACHED */ 287 } 288 289 static void 290 set_root_dir(char *dir) 291 { 292 root_dir = malloc(strlen(dir) + 1); 293 if (root_dir == NULL) { 294 syseventd_err_print(INIT_ROOT_DIR_ERR, strerror(errno)); 295 syseventd_exit(2); 296 } 297 (void) strcpy(root_dir, dir); 298 } 299 300 int 301 main(int argc, char **argv) 302 { 303 int i, c; 304 int fd; 305 pid_t pid; 306 int has_forked = 0; 307 extern char *optarg; 308 309 (void) setlocale(LC_ALL, ""); 310 (void) textdomain(TEXT_DOMAIN); 311 312 if (getuid() != 0) { 313 (void) fprintf(stderr, "Must be root to run syseventd\n"); 314 syseventd_exit(1); 315 } 316 317 if (argc > 5) { 318 usage(); 319 } 320 321 if ((prog = strrchr(argv[0], '/')) == NULL) { 322 prog = argv[0]; 323 } else { 324 prog++; 325 } 326 327 while ((c = getopt(argc, argv, "d:r:")) != EOF) { 328 switch (c) { 329 case 'd': 330 debug_level = atoi(optarg); 331 break; 332 case 'r': 333 /* 334 * Private flag for suninstall to run 335 * daemon during install. 336 */ 337 set_root_dir(optarg); 338 break; 339 case '?': 340 default: 341 usage(); 342 } 343 } 344 345 /* demonize ourselves */ 346 if (debug_level < DEBUG_LEVEL_FORK) { 347 348 sigset_t mask; 349 350 (void) sigset(SIGUSR1, sigusr1); 351 352 (void) sigemptyset(&mask); 353 (void) sigaddset(&mask, SIGUSR1); 354 (void) sigprocmask(SIG_BLOCK, &mask, NULL); 355 356 if ((pid = fork()) == (pid_t)-1) { 357 (void) fprintf(stderr, 358 "syseventd: fork failed - %s\n", strerror(errno)); 359 syseventd_exit(1); 360 } 361 362 if (pid != 0) { 363 /* 364 * parent 365 * handshake with the daemon so that dependents 366 * of the syseventd service don't start up until 367 * the service is actually functional 368 */ 369 int status; 370 (void) sigprocmask(SIG_UNBLOCK, &mask, NULL); 371 372 if (waitpid(pid, &status, 0) != pid) { 373 /* 374 * child process signal indicating 375 * successful daemon initialization 376 */ 377 syseventd_exit(0); 378 } 379 /* child exited implying unsuccessful startup */ 380 syseventd_exit(1); 381 } 382 383 /* child */ 384 385 has_forked = 1; 386 (void) sigset(SIGUSR1, SIG_DFL); 387 (void) sigprocmask(SIG_UNBLOCK, &mask, NULL); 388 389 (void) chdir("/"); 390 (void) setsid(); 391 if (debug_level <= 1) { 392 closefrom(0); 393 fd = open("/dev/null", 0); 394 (void) dup2(fd, 1); 395 (void) dup2(fd, 2); 396 logflag = 1; 397 } 398 } 399 400 openlog("syseventd", LOG_PID, LOG_DAEMON); 401 402 (void) mutex_init(&err_mutex, USYNC_THREAD, NULL); 403 404 syseventd_print(8, 405 "syseventd started, debug level = %d\n", debug_level); 406 407 /* only one instance of syseventd can run at a time */ 408 if ((pid = enter_daemon_lock()) != getpid()) { 409 syseventd_print(1, 410 "event daemon pid %ld already running\n", pid); 411 exit(3); 412 } 413 414 /* initialize semaphores and eventbuf */ 415 (void) sema_init(&sema_eventbuf, SE_EVENT_DISPATCH_CNT, 416 USYNC_THREAD, NULL); 417 (void) sema_init(&sema_dispatch, 0, USYNC_THREAD, NULL); 418 (void) sema_init(&sema_resource, SE_EVENT_DISPATCH_CNT, 419 USYNC_THREAD, NULL); 420 (void) cond_init(&event_comp_cv, USYNC_THREAD, NULL); 421 eventbuf = (sysevent_t **)calloc(SE_EVENT_DISPATCH_CNT, 422 sizeof (sysevent_t *)); 423 if (eventbuf == NULL) { 424 syseventd_print(1, "Unable to allocate event buffer array\n"); 425 exit(2); 426 } 427 for (i = 0; i < SE_EVENT_DISPATCH_CNT; ++i) { 428 eventbuf[i] = malloc(LOGEVENT_BUFSIZE); 429 if (eventbuf[i] == NULL) { 430 syseventd_print(1, "Unable to allocate event " 431 "buffers\n"); 432 exit(2); 433 } 434 } 435 436 (void) mutex_init(&client_tbl_lock, USYNC_THREAD, NULL); 437 (void) mutex_init(&ev_comp_lock, USYNC_THREAD, NULL); 438 (void) mutex_init(&door_lock, USYNC_THREAD, NULL); 439 (void) rwlock_init(&mod_unload_lock, USYNC_THREAD, NULL); 440 441 event_compq = NULL; 442 443 syseventd_print(8, "start the message thread running\n"); 444 445 /* 446 * Block all signals to all threads include the main thread. 447 * The sigwait_thr thread will process any signals and initiate 448 * a graceful recovery if possible. 449 */ 450 if (se_signal_blockall() < 0) { 451 syseventd_err_print(INIT_SIG_BLOCK_ERR); 452 syseventd_exit(2); 453 } 454 455 if (thr_create(NULL, NULL, (void *(*)(void *))dispatch_message, 456 (void *)0, 0, NULL) < 0) { 457 syseventd_err_print(INIT_THR_CREATE_ERR, strerror(errno)); 458 syseventd_exit(2); 459 } 460 if (thr_create(NULL, NULL, 461 (void *(*)(void *))event_completion_thr, NULL, 462 THR_BOUND, NULL) != 0) { 463 syseventd_err_print(INIT_THR_CREATE_ERR, strerror(errno)); 464 syseventd_exit(2); 465 } 466 /* Create signal catching thread */ 467 if (thr_create(NULL, NULL, (void *(*)(void *))sigwait_thr, 468 NULL, 0, NULL) < 0) { 469 syseventd_err_print(INIT_THR_CREATE_ERR, strerror(errno)); 470 syseventd_exit(2); 471 } 472 473 setbuf(stdout, (char *)NULL); 474 475 /* Initialize and load SLM clients */ 476 initialize_client_tbl(); 477 syseventd_init(); 478 479 /* signal parent to indicate successful daemon initialization */ 480 if (has_forked) { 481 if (kill(getppid(), SIGUSR1) != 0) { 482 syseventd_err_print( 483 "signal to the parent failed - %s\n", 484 strerror(errno)); 485 syseventd_exit(2); 486 } 487 } 488 489 syseventd_print(8, "Pausing\n"); 490 491 for (;;) { 492 (void) pause(); 493 } 494 /* NOTREACHED */ 495 return (0); 496 } 497 498 /* 499 * door_upcall - called from the kernel via kernel sysevent door 500 * to upload event(s). 501 * 502 * This routine should never block. If resources are 503 * not available to immediately accept the event buffer 504 * EAGAIN is returned to the kernel. 505 * 506 * Once resources are available, the kernel is notified 507 * via a modctl interface to resume event delivery to 508 * syseventd. 509 * 510 */ 511 /*ARGSUSED*/ 512 static void 513 door_upcall(void *cookie, char *args, size_t alen, 514 door_desc_t *ddp, uint_t ndid) 515 { 516 sysevent_t *ev; 517 int rval; 518 519 520 (void) mutex_lock(&door_lock); 521 if (args == NULL) { 522 rval = EINVAL; 523 } else if (sema_trywait(&sema_eventbuf)) { 524 ev = (sysevent_t *) 525 &((log_event_upcall_arg_t *)(void *)args)->buf; 526 syseventd_print(2, "door_upcall: busy event %llx " 527 "retry\n", sysevent_get_seq(ev)); 528 rval = door_upcall_retval = EAGAIN; 529 } else { 530 /* 531 * Copy received message to local buffer. 532 */ 533 size_t size; 534 ev = (sysevent_t *) 535 &((log_event_upcall_arg_t *)(void *)args)->buf; 536 537 syseventd_print(2, "door_upcall: event %llx in eventbuf %d\n", 538 sysevent_get_seq(ev), deliver_buf); 539 size = sysevent_get_size(ev) > LOGEVENT_BUFSIZE ? 540 LOGEVENT_BUFSIZE : sysevent_get_size(ev); 541 (void) bcopy(ev, eventbuf[deliver_buf], size); 542 deliver_buf = (deliver_buf + 1) % SE_EVENT_DISPATCH_CNT; 543 rval = 0; 544 (void) sema_post(&sema_dispatch); 545 } 546 547 (void) mutex_unlock(&door_lock); 548 549 /* 550 * Filling in return values for door_return 551 */ 552 (void) door_return((void *)&rval, sizeof (rval), NULL, 0); 553 (void) door_return(NULL, 0, NULL, 0); 554 } 555 556 /* 557 * dispatch_message - dispatch message thread 558 * This thread spins until an event buffer is delivered 559 * delivered from the kernel. 560 * 561 * It will wait to dispatch an event to any clients 562 * until adequate resources are available to process 563 * the event buffer. 564 */ 565 static void 566 dispatch_message(void) 567 { 568 int error; 569 570 for (;;) { 571 syseventd_print(3, "dispatch_message: thread started\n"); 572 /* 573 * Spin till a message comes 574 */ 575 while (sema_wait(&sema_dispatch) != 0) { 576 syseventd_print(1, 577 "dispatch_message: sema_wait failed\n"); 578 (void) sleep(1); 579 } 580 581 syseventd_print(3, "dispatch_message: sema_dispatch\n"); 582 583 /* 584 * Wait for available resources 585 */ 586 while (sema_wait(&sema_resource) != 0) { 587 syseventd_print(1, "dispatch_message: sema_wait " 588 "failed\n"); 589 (void) sleep(1); 590 } 591 592 syseventd_print(2, "dispatch_message: eventbuf %d\n", 593 dispatch_buf); 594 595 /* 596 * Client dispatch 597 */ 598 do { 599 error = dispatch(); 600 } while (error == EAGAIN); 601 602 syseventd_print(2, "eventbuf %d dispatched\n", dispatch_buf); 603 dispatch_buf = (dispatch_buf + 1) % SE_EVENT_DISPATCH_CNT; 604 605 /* 606 * kernel received a busy signal - 607 * kickstart the kernel delivery thread 608 * door_lock blocks the kernel so we hold it for the 609 * shortest time possible. 610 */ 611 (void) mutex_lock(&door_lock); 612 if (door_upcall_retval == EAGAIN && !fini_pending) { 613 syseventd_print(3, "dispatch_message: retrigger " 614 "door_upcall_retval = %d\n", 615 door_upcall_retval); 616 (void) modctl(MODEVENTS, (uintptr_t)MODEVENTS_FLUSH, 617 NULL, NULL, NULL, 0); 618 door_upcall_retval = 0; 619 } 620 (void) mutex_unlock(&door_lock); 621 } 622 /* NOTREACHED */ 623 } 624 625 /* 626 * drain_eventq - Called to drain all pending events from the client's 627 * event queue. 628 */ 629 static void 630 drain_eventq(struct sysevent_client *scp, int status) 631 { 632 struct event_dispatch_pkg *d_pkg; 633 struct event_dispatchq *eventq, *eventq_next; 634 635 syseventd_print(3, "Draining eventq for client %d\n", 636 scp->client_num); 637 638 eventq = scp->eventq; 639 while (eventq) { 640 /* 641 * Mark all dispatched events as completed, but indicate the 642 * error status 643 */ 644 d_pkg = eventq->d_pkg; 645 646 syseventd_print(4, "drain event 0X%llx for client %d\n", 647 sysevent_get_seq(d_pkg->ev), scp->client_num); 648 649 if (d_pkg->completion_state == SE_NOT_DISPATCHED) { 650 d_pkg->completion_status = status; 651 d_pkg->completion_state = SE_COMPLETE; 652 (void) sema_post(d_pkg->completion_sema); 653 } 654 655 eventq_next = eventq->next; 656 free(eventq); 657 eventq = eventq_next; 658 scp->eventq = eventq; 659 } 660 } 661 662 /* 663 * client_deliver_event_thr - Client delivery thread 664 * This thread will process any events on this 665 * client's eventq. 666 */ 667 static void 668 client_deliver_event_thr(void *arg) 669 { 670 int flag, error, i; 671 sysevent_t *ev; 672 hrtime_t now; 673 module_t *mod; 674 struct event_dispatchq *eventq; 675 struct sysevent_client *scp; 676 struct event_dispatch_pkg *d_pkg; 677 678 scp = (struct sysevent_client *)arg; 679 mod = (module_t *)scp->client_data; 680 681 (void) mutex_lock(&scp->client_lock); 682 for (;;) { 683 while (scp->eventq == NULL) { 684 685 /* 686 * Client has been suspended or unloaded, go no further. 687 */ 688 if (fini_pending) { 689 scp->client_flags &= ~SE_CLIENT_THR_RUNNING; 690 syseventd_print(3, "Client %d delivery thread " 691 "exiting flags: 0X%x\n", 692 scp->client_num, scp->client_flags); 693 (void) mutex_unlock(&scp->client_lock); 694 return; 695 } 696 697 (void) cond_wait(&scp->client_cv, &scp->client_lock); 698 699 } 700 701 /* 702 * Process events from the head of the eventq, eventq is locked 703 * going into the processing. 704 */ 705 eventq = scp->eventq; 706 while (eventq != NULL) { 707 d_pkg = eventq->d_pkg; 708 d_pkg->completion_state = SE_OUTSTANDING; 709 scp->eventq = eventq->next; 710 free(eventq); 711 (void) mutex_unlock(&scp->client_lock); 712 713 714 flag = error = 0; 715 ev = d_pkg->ev; 716 717 syseventd_print(3, "Start delivery for client %d " 718 "with retry count %d\n", 719 scp->client_num, d_pkg->retry_count); 720 721 /* 722 * Retry limit has been reached by this client, indicate 723 * that no further retries are allowed 724 */ 725 for (i = 0; i <= scp->retry_limit; ++i) { 726 if (i == scp->retry_limit) 727 flag = SE_NO_RETRY; 728 729 /* Start the clock for the event delivery */ 730 d_pkg->start_time = gethrtime(); 731 732 syseventd_print(9, "Deliver to module client " 733 "%s\n", mod->name); 734 735 error = mod->deliver_event(ev, flag); 736 737 /* Can not allow another retry */ 738 if (i == scp->retry_limit) 739 error = 0; 740 741 /* Stop the clock */ 742 now = gethrtime(); 743 744 /* 745 * Suspend event processing and drain the 746 * event q for latent clients 747 */ 748 if (now - d_pkg->start_time > 749 ((hrtime_t)SE_TIMEOUT * NANOSEC)) { 750 syseventd_print(1, "Unresponsive " 751 "client %d: Draining eventq and " 752 "suspending event delivery\n", 753 scp->client_num); 754 (void) mutex_lock(&scp->client_lock); 755 scp->client_flags &= 756 ~SE_CLIENT_THR_RUNNING; 757 scp->client_flags |= 758 SE_CLIENT_SUSPENDED; 759 760 /* Cleanup current event */ 761 d_pkg->completion_status = EFAULT; 762 d_pkg->completion_state = SE_COMPLETE; 763 (void) sema_post( 764 d_pkg->completion_sema); 765 766 /* 767 * Drain the remaining events from the 768 * queue. 769 */ 770 drain_eventq(scp, EINVAL); 771 (void) mutex_unlock(&scp->client_lock); 772 return; 773 } 774 775 /* Event delivery retry requested */ 776 if (fini_pending || error != EAGAIN) { 777 break; 778 } else { 779 (void) sleep(SE_RETRY_TIME); 780 } 781 } 782 783 (void) mutex_lock(&scp->client_lock); 784 d_pkg->completion_status = error; 785 d_pkg->completion_state = SE_COMPLETE; 786 (void) sema_post(d_pkg->completion_sema); 787 syseventd_print(3, "Completed delivery with " 788 "error %d\n", error); 789 eventq = scp->eventq; 790 } 791 792 syseventd_print(3, "No more events to process for client %d\n", 793 scp->client_num); 794 795 /* Return if this was a synchronous delivery */ 796 if (!SE_CLIENT_IS_THR_RUNNING(scp)) { 797 (void) mutex_unlock(&scp->client_lock); 798 return; 799 } 800 801 } 802 } 803 804 /* 805 * client_deliver_event - Client specific event delivery 806 * This routine will allocate and initialize the 807 * neccessary per-client dispatch data. 808 * 809 * If the eventq is not empty, it may be assumed that 810 * a delivery thread exists for this client and the 811 * dispatch data is appended to the eventq. 812 * 813 * The dispatch package is freed by the event completion 814 * thread (event_completion_thr) and the eventq entry 815 * is freed by the event delivery thread. 816 */ 817 static struct event_dispatch_pkg * 818 client_deliver_event(struct sysevent_client *scp, sysevent_t *ev, 819 sema_t *completion_sema) 820 { 821 size_t ev_sz = sysevent_get_size(ev); 822 struct event_dispatchq *newq, *tmp; 823 struct event_dispatch_pkg *d_pkg; 824 825 syseventd_print(3, "client_deliver_event: id 0x%llx size %d\n", 826 (longlong_t)sysevent_get_seq(ev), ev_sz); 827 if (debug_level == 9) { 828 se_print(stdout, ev); 829 } 830 831 /* 832 * Check for suspended client 833 */ 834 (void) mutex_lock(&scp->client_lock); 835 if (SE_CLIENT_IS_SUSPENDED(scp) || !SE_CLIENT_IS_THR_RUNNING(scp)) { 836 (void) mutex_unlock(&scp->client_lock); 837 return (NULL); 838 } 839 840 /* 841 * Allocate a new dispatch package and eventq entry 842 */ 843 newq = (struct event_dispatchq *)malloc( 844 sizeof (struct event_dispatchq)); 845 if (newq == NULL) { 846 (void) mutex_unlock(&scp->client_lock); 847 return (NULL); 848 } 849 850 d_pkg = (struct event_dispatch_pkg *)malloc( 851 sizeof (struct event_dispatch_pkg)); 852 if (d_pkg == NULL) { 853 free(newq); 854 (void) mutex_unlock(&scp->client_lock); 855 return (NULL); 856 } 857 858 /* Initialize the dispatch package */ 859 d_pkg->scp = scp; 860 d_pkg->retry_count = 0; 861 d_pkg->completion_status = 0; 862 d_pkg->completion_state = SE_NOT_DISPATCHED; 863 d_pkg->completion_sema = completion_sema; 864 d_pkg->ev = ev; 865 newq->d_pkg = d_pkg; 866 newq->next = NULL; 867 868 if (scp->eventq != NULL) { 869 870 /* Add entry to the end of the eventq */ 871 tmp = scp->eventq; 872 while (tmp->next != NULL) 873 tmp = tmp->next; 874 tmp->next = newq; 875 } else { 876 /* event queue empty, wakeup delivery thread */ 877 scp->eventq = newq; 878 (void) cond_signal(&scp->client_cv); 879 } 880 (void) mutex_unlock(&scp->client_lock); 881 882 return (d_pkg); 883 } 884 885 /* 886 * event_completion_thr - Event completion thread. This thread routine 887 * waits for all client delivery thread to complete 888 * delivery of a particular event. 889 */ 890 static void 891 event_completion_thr() 892 { 893 int ret, i, client_count, ok_to_free; 894 sysevent_id_t eid; 895 struct sysevent_client *scp; 896 struct ev_completion *ev_comp; 897 struct event_dispatchq *dispatchq; 898 struct event_dispatch_pkg *d_pkg; 899 900 (void) mutex_lock(&ev_comp_lock); 901 for (;;) { 902 while (event_compq == NULL) { 903 (void) cond_wait(&event_comp_cv, &ev_comp_lock); 904 } 905 906 /* 907 * Process event completions from the head of the 908 * completion queue 909 */ 910 ev_comp = event_compq; 911 while (ev_comp) { 912 (void) mutex_unlock(&ev_comp_lock); 913 eid.eid_seq = sysevent_get_seq(ev_comp->ev); 914 sysevent_get_time(ev_comp->ev, &eid.eid_ts); 915 client_count = ev_comp->client_count; 916 ok_to_free = 1; 917 918 syseventd_print(3, "Wait for event completion of " 919 "event 0X%llx on %d clients\n", 920 eid.eid_seq, client_count); 921 922 while (client_count) { 923 syseventd_print(9, "Waiting for %d clients on " 924 "event id 0X%llx\n", client_count, 925 eid.eid_seq); 926 927 (void) sema_wait(&ev_comp->client_sema); 928 --client_count; 929 } 930 931 syseventd_print(3, "Cleaning up clients for event " 932 "0X%llx\n", eid.eid_seq); 933 dispatchq = ev_comp->dispatch_list; 934 while (dispatchq != NULL) { 935 d_pkg = dispatchq->d_pkg; 936 scp = d_pkg->scp; 937 938 if (d_pkg->completion_status == EAGAIN) 939 ok_to_free = 0; 940 941 syseventd_print(4, "Delivery of 0X%llx " 942 "complete for client %d retry count %d " 943 "status %d\n", eid.eid_seq, 944 scp->client_num, 945 d_pkg->retry_count, 946 d_pkg->completion_status); 947 948 free(d_pkg); 949 ev_comp->dispatch_list = dispatchq->next; 950 free(dispatchq); 951 dispatchq = ev_comp->dispatch_list; 952 } 953 954 if (ok_to_free) { 955 for (i = 0; i < MAX_MODCTL_RETRY; ++i) { 956 if ((ret = modctl(MODEVENTS, 957 (uintptr_t)MODEVENTS_FREEDATA, 958 (uintptr_t)&eid, NULL, 959 NULL, 0)) != 0) { 960 syseventd_print(1, "attempting " 961 "to free event 0X%llx\n", 962 eid.eid_seq); 963 964 /* 965 * Kernel may need time to 966 * move this event buffer to 967 * the sysevent sent queue 968 */ 969 (void) sleep(1); 970 } else { 971 break; 972 } 973 } 974 if (ret) { 975 syseventd_print(1, "Unable to free " 976 "event 0X%llx from the " 977 "kernel\n", eid.eid_seq); 978 } 979 } else { 980 syseventd_print(1, "Not freeing event 0X%llx\n", 981 eid.eid_seq); 982 } 983 984 syseventd_print(2, "Event delivery complete for id " 985 "0X%llx\n", eid.eid_seq); 986 987 (void) mutex_lock(&ev_comp_lock); 988 event_compq = ev_comp->next; 989 free(ev_comp->ev); 990 free(ev_comp); 991 ev_comp = event_compq; 992 (void) sema_post(&sema_resource); 993 } 994 995 /* 996 * Event completion queue is empty, signal possible unload 997 * operation 998 */ 999 (void) cond_signal(&event_comp_cv); 1000 1001 syseventd_print(3, "No more events\n"); 1002 } 1003 } 1004 1005 /* 1006 * dispatch - Dispatch the current event buffer to all valid SLM clients. 1007 */ 1008 static int 1009 dispatch(void) 1010 { 1011 int ev_sz, i, client_count = 0; 1012 sysevent_t *new_ev; 1013 sysevent_id_t eid; 1014 struct ev_completion *ev_comp, *tmp; 1015 struct event_dispatchq *dispatchq, *client_list; 1016 struct event_dispatch_pkg *d_pkg; 1017 1018 /* Check for module unload operation */ 1019 if (rw_tryrdlock(&mod_unload_lock) != 0) { 1020 syseventd_print(2, "unload in progress abort delivery\n"); 1021 (void) sema_post(&sema_eventbuf); 1022 (void) sema_post(&sema_resource); 1023 return (0); 1024 } 1025 1026 syseventd_print(3, "deliver dispatch buffer %d", dispatch_buf); 1027 eid.eid_seq = sysevent_get_seq(eventbuf[dispatch_buf]); 1028 sysevent_get_time(eventbuf[dispatch_buf], &eid.eid_ts); 1029 syseventd_print(3, "deliver msg id: 0x%llx\n", eid.eid_seq); 1030 1031 /* 1032 * ev_comp is used to hold event completion data. It is freed 1033 * by the event completion thread (event_completion_thr). 1034 */ 1035 ev_comp = (struct ev_completion *) 1036 malloc(sizeof (struct ev_completion)); 1037 if (ev_comp == NULL) { 1038 (void) rw_unlock(&mod_unload_lock); 1039 syseventd_print(1, "Can not allocate event completion buffer " 1040 "for event id 0X%llx\n", eid.eid_seq); 1041 return (EAGAIN); 1042 } 1043 ev_comp->dispatch_list = NULL; 1044 ev_comp->next = NULL; 1045 (void) sema_init(&ev_comp->client_sema, 0, USYNC_THREAD, NULL); 1046 1047 ev_sz = sysevent_get_size(eventbuf[dispatch_buf]); 1048 new_ev = calloc(1, ev_sz); 1049 if (new_ev == NULL) { 1050 free(ev_comp); 1051 (void) rw_unlock(&mod_unload_lock); 1052 syseventd_print(1, "Can not allocate new event buffer " 1053 "for event id 0X%llx\n", eid.eid_seq); 1054 return (EAGAIN); 1055 } 1056 1057 1058 /* 1059 * For long messages, copy additional data from kernel 1060 */ 1061 if (ev_sz > LOGEVENT_BUFSIZE) { 1062 int ret = 0; 1063 1064 /* Ok to release eventbuf for next event buffer from kernel */ 1065 (void) sema_post(&sema_eventbuf); 1066 1067 for (i = 0; i < MAX_MODCTL_RETRY; ++i) { 1068 if ((ret = modctl(MODEVENTS, 1069 (uintptr_t)MODEVENTS_GETDATA, 1070 (uintptr_t)&eid, 1071 (uintptr_t)ev_sz, 1072 (uintptr_t)new_ev, 0)) 1073 == 0) 1074 break; 1075 else 1076 (void) sleep(1); 1077 } 1078 if (ret) { 1079 syseventd_print(1, "GET_DATA failed for 0X%llx:%llx\n", 1080 eid.eid_ts, eid.eid_seq); 1081 free(new_ev); 1082 free(ev_comp); 1083 (void) rw_unlock(&mod_unload_lock); 1084 return (EAGAIN); 1085 } 1086 } else { 1087 (void) bcopy(eventbuf[dispatch_buf], new_ev, ev_sz); 1088 /* Ok to release eventbuf for next event buffer from kernel */ 1089 (void) sema_post(&sema_eventbuf); 1090 } 1091 1092 1093 /* 1094 * Deliver a copy of eventbuf to clients so 1095 * eventbuf can be used for the next message 1096 */ 1097 for (i = 0; i < MAX_SLM; ++i) { 1098 1099 /* Don't bother for suspended or unloaded clients */ 1100 if (!SE_CLIENT_IS_LOADED(sysevent_client_tbl[i]) || 1101 SE_CLIENT_IS_SUSPENDED(sysevent_client_tbl[i])) 1102 continue; 1103 1104 /* 1105 * Allocate event dispatch queue entry. All queue entries 1106 * are freed by the event completion thread as client 1107 * delivery completes. 1108 */ 1109 dispatchq = (struct event_dispatchq *)malloc( 1110 sizeof (struct event_dispatchq)); 1111 if (dispatchq == NULL) { 1112 syseventd_print(1, "Can not allocate dispatch q " 1113 "for event id 0X%llx client %d\n", eid.eid_seq, i); 1114 continue; 1115 } 1116 dispatchq->next = NULL; 1117 1118 /* Initiate client delivery */ 1119 d_pkg = client_deliver_event(sysevent_client_tbl[i], 1120 new_ev, &ev_comp->client_sema); 1121 if (d_pkg == NULL) { 1122 syseventd_print(1, "Can not allocate dispatch " 1123 "package for event id 0X%llx client %d\n", 1124 eid.eid_seq, i); 1125 free(dispatchq); 1126 continue; 1127 } 1128 dispatchq->d_pkg = d_pkg; 1129 ++client_count; 1130 1131 if (ev_comp->dispatch_list == NULL) { 1132 ev_comp->dispatch_list = dispatchq; 1133 client_list = dispatchq; 1134 } else { 1135 client_list->next = dispatchq; 1136 client_list = client_list->next; 1137 } 1138 } 1139 1140 ev_comp->client_count = client_count; 1141 ev_comp->ev = new_ev; 1142 1143 (void) mutex_lock(&ev_comp_lock); 1144 1145 if (event_compq == NULL) { 1146 syseventd_print(3, "Wakeup event completion thread for " 1147 "id 0X%llx\n", eid.eid_seq); 1148 event_compq = ev_comp; 1149 (void) cond_signal(&event_comp_cv); 1150 } else { 1151 1152 /* Add entry to the end of the event completion queue */ 1153 tmp = event_compq; 1154 while (tmp->next != NULL) 1155 tmp = tmp->next; 1156 tmp->next = ev_comp; 1157 syseventd_print(3, "event added to completion queue for " 1158 "id 0X%llx\n", eid.eid_seq); 1159 } 1160 (void) mutex_unlock(&ev_comp_lock); 1161 (void) rw_unlock(&mod_unload_lock); 1162 1163 return (0); 1164 } 1165 1166 #define MODULE_DIR_HW "/usr/platform/%s/lib/sysevent/modules/" 1167 #define MODULE_DIR_GEN "/usr/lib/sysevent/modules/" 1168 #define MOD_DIR_NUM 3 1169 static char dirname[MOD_DIR_NUM][MAXPATHLEN]; 1170 1171 static char * 1172 dir_num2name(int dirnum) 1173 { 1174 char infobuf[MAXPATHLEN]; 1175 1176 if (dirnum >= MOD_DIR_NUM) 1177 return (NULL); 1178 1179 if (dirname[0][0] == '\0') { 1180 if (sysinfo(SI_PLATFORM, infobuf, MAXPATHLEN) == -1) { 1181 syseventd_print(1, "dir_num2name: " 1182 "sysinfo error %s\n", strerror(errno)); 1183 return (NULL); 1184 } else if (snprintf(dirname[0], sizeof (dirname[0]), 1185 MODULE_DIR_HW, infobuf) >= sizeof (dirname[0])) { 1186 syseventd_print(1, "dir_num2name: " 1187 "platform name too long: %s\n", 1188 infobuf); 1189 return (NULL); 1190 } 1191 if (sysinfo(SI_MACHINE, infobuf, MAXPATHLEN) == -1) { 1192 syseventd_print(1, "dir_num2name: " 1193 "sysinfo error %s\n", strerror(errno)); 1194 return (NULL); 1195 } else if (snprintf(dirname[1], sizeof (dirname[1]), 1196 MODULE_DIR_HW, infobuf) >= sizeof (dirname[1])) { 1197 syseventd_print(1, "dir_num2name: " 1198 "machine name too long: %s\n", 1199 infobuf); 1200 return (NULL); 1201 } 1202 (void) strcpy(dirname[2], MODULE_DIR_GEN); 1203 } 1204 1205 return (dirname[dirnum]); 1206 } 1207 1208 1209 /* 1210 * load_modules - Load modules found in the common syseventd module directories 1211 * Modules that do not provide valid interfaces are rejected. 1212 */ 1213 static void 1214 load_modules(char *dirname) 1215 { 1216 int client_id; 1217 DIR *mod_dir; 1218 module_t *mod; 1219 struct dirent *entp; 1220 struct slm_mod_ops *mod_ops; 1221 struct sysevent_client *scp; 1222 1223 if (dirname == NULL) 1224 return; 1225 1226 /* Return silently if module directory does not exist */ 1227 if ((mod_dir = opendir(dirname)) == NULL) { 1228 syseventd_print(1, "Unable to open module directory %s: %s\n", 1229 dirname, strerror(errno)); 1230 return; 1231 } 1232 1233 syseventd_print(3, "loading modules from %s\n", dirname); 1234 1235 /* 1236 * Go through directory, looking for files ending with .so 1237 */ 1238 while ((entp = readdir(mod_dir)) != NULL) { 1239 void *dlh, *f; 1240 char *tmp, modpath[MAXPATHLEN]; 1241 1242 if (((tmp = strstr(entp->d_name, MODULE_SUFFIX)) == NULL) || 1243 (tmp[strlen(MODULE_SUFFIX)] != '\0')) { 1244 continue; 1245 } 1246 1247 if (snprintf(modpath, sizeof (modpath), "%s%s", 1248 dirname, entp->d_name) >= sizeof (modpath)) { 1249 syseventd_err_print(INIT_PATH_ERR, modpath); 1250 continue; 1251 } 1252 if ((dlh = dlopen(modpath, RTLD_LAZY)) == NULL) { 1253 syseventd_err_print(LOAD_MOD_DLOPEN_ERR, 1254 modpath, dlerror()); 1255 continue; 1256 } else if ((f = dlsym(dlh, EVENT_INIT)) == NULL) { 1257 syseventd_err_print(LOAD_MOD_NO_INIT, 1258 modpath, dlerror()); 1259 (void) dlclose(dlh); 1260 continue; 1261 } 1262 1263 mod = malloc(sizeof (*mod)); 1264 if (mod == NULL) { 1265 syseventd_err_print(LOAD_MOD_ALLOC_ERR, "mod", 1266 strerror(errno)); 1267 (void) dlclose(dlh); 1268 continue; 1269 } 1270 1271 mod->name = strdup(entp->d_name); 1272 if (mod->name == NULL) { 1273 syseventd_err_print(LOAD_MOD_ALLOC_ERR, "mod->name", 1274 strerror(errno)); 1275 (void) dlclose(dlh); 1276 free(mod); 1277 continue; 1278 } 1279 1280 mod->dlhandle = dlh; 1281 mod->event_mod_init = (struct slm_mod_ops *(*)())f; 1282 1283 /* load in other module functions */ 1284 mod->event_mod_fini = (void (*)())dlsym(dlh, EVENT_FINI); 1285 if (mod->event_mod_fini == NULL) { 1286 syseventd_err_print(LOAD_MOD_DLSYM_ERR, mod->name, 1287 dlerror()); 1288 free(mod->name); 1289 free(mod); 1290 (void) dlclose(dlh); 1291 continue; 1292 } 1293 1294 /* Call module init routine */ 1295 if ((mod_ops = mod->event_mod_init()) == NULL) { 1296 syseventd_err_print(LOAD_MOD_EINVAL, mod->name); 1297 free(mod->name); 1298 free(mod); 1299 (void) dlclose(dlh); 1300 continue; 1301 } 1302 if (mod_ops->major_version != SE_MAJOR_VERSION) { 1303 syseventd_err_print(LOAD_MOD_VERSION_MISMATCH, 1304 mod->name, SE_MAJOR_VERSION, 1305 mod_ops->major_version); 1306 mod->event_mod_fini(); 1307 free(mod->name); 1308 free(mod); 1309 (void) dlclose(dlh); 1310 continue; 1311 } 1312 1313 mod->deliver_event = mod_ops->deliver_event; 1314 /* Add module entry to client list */ 1315 if ((client_id = insert_client((void *)mod, SLM_CLIENT, 1316 (mod_ops->retry_limit <= SE_MAX_RETRY_LIMIT ? 1317 mod_ops->retry_limit : SE_MAX_RETRY_LIMIT))) < 0) { 1318 syseventd_err_print(LOAD_MOD_ALLOC_ERR, "insert_client", 1319 strerror(errno)); 1320 mod->event_mod_fini(); 1321 free(mod->name); 1322 free(mod); 1323 (void) dlclose(dlh); 1324 continue; 1325 } 1326 1327 scp = sysevent_client_tbl[client_id]; 1328 ++concurrency_level; 1329 (void) thr_setconcurrency(concurrency_level); 1330 if (thr_create(NULL, 0, 1331 (void *(*)(void *))client_deliver_event_thr, 1332 (void *)scp, THR_BOUND, &scp->tid) != 0) { 1333 1334 syseventd_err_print(LOAD_MOD_ALLOC_ERR, "insert_client", 1335 strerror(errno)); 1336 mod->event_mod_fini(); 1337 free(mod->name); 1338 free(mod); 1339 (void) dlclose(dlh); 1340 continue; 1341 } 1342 scp->client_flags |= SE_CLIENT_THR_RUNNING; 1343 1344 syseventd_print(3, "loaded module %s\n", entp->d_name); 1345 } 1346 1347 (void) closedir(mod_dir); 1348 syseventd_print(3, "modules loaded\n"); 1349 } 1350 1351 /* 1352 * unload_modules - modules are unloaded prior to graceful shutdown or 1353 * before restarting the daemon upon receipt of 1354 * SIGHUP. 1355 */ 1356 static void 1357 unload_modules(int sig) 1358 { 1359 int i, count, done; 1360 module_t *mod; 1361 struct sysevent_client *scp; 1362 1363 /* 1364 * unload modules that are ready, skip those that have not 1365 * drained their event queues. 1366 */ 1367 count = done = 0; 1368 while (done < MAX_SLM) { 1369 /* Don't wait indefinitely for unresponsive clients */ 1370 if (sig != SIGHUP && count > SE_TIMEOUT) { 1371 break; 1372 } 1373 1374 done = 0; 1375 1376 /* Shutdown clients */ 1377 for (i = 0; i < MAX_SLM; ++i) { 1378 scp = sysevent_client_tbl[i]; 1379 if (mutex_trylock(&scp->client_lock) == 0) { 1380 if (scp->client_type != SLM_CLIENT || 1381 scp->client_data == NULL) { 1382 (void) mutex_unlock(&scp->client_lock); 1383 done++; 1384 continue; 1385 } 1386 } else { 1387 syseventd_print(3, "Skipping unload of " 1388 "client %d: client locked\n", 1389 scp->client_num); 1390 continue; 1391 } 1392 1393 /* 1394 * Drain the eventq and wait for delivery thread to 1395 * cleanly exit 1396 */ 1397 drain_eventq(scp, EAGAIN); 1398 (void) cond_signal(&scp->client_cv); 1399 (void) mutex_unlock(&scp->client_lock); 1400 (void) thr_join(scp->tid, NULL, NULL); 1401 1402 /* 1403 * It is now safe to unload the module 1404 */ 1405 mod = (module_t *)scp->client_data; 1406 syseventd_print(2, "Unload %s\n", mod->name); 1407 mod->event_mod_fini(); 1408 (void) dlclose(mod->dlhandle); 1409 free(mod->name); 1410 (void) mutex_lock(&client_tbl_lock); 1411 delete_client(i); 1412 (void) mutex_unlock(&client_tbl_lock); 1413 ++done; 1414 1415 } 1416 ++count; 1417 (void) sleep(1); 1418 } 1419 1420 /* 1421 * Wait for event completions 1422 */ 1423 syseventd_print(2, "waiting for event completions\n"); 1424 (void) mutex_lock(&ev_comp_lock); 1425 while (event_compq != NULL) { 1426 (void) cond_wait(&event_comp_cv, &ev_comp_lock); 1427 } 1428 (void) mutex_unlock(&ev_comp_lock); 1429 } 1430 1431 /* 1432 * syseventd_init - Called at daemon (re)start-up time to load modules 1433 * and kickstart the kernel delivery engine. 1434 */ 1435 static void 1436 syseventd_init() 1437 { 1438 int i, fd; 1439 char local_door_file[PATH_MAX + 1]; 1440 1441 fini_pending = 0; 1442 1443 concurrency_level = MIN_CONCURRENCY_LEVEL; 1444 (void) thr_setconcurrency(concurrency_level); 1445 1446 /* 1447 * Load client modules for event delivering 1448 */ 1449 for (i = 0; i < MOD_DIR_NUM; ++i) { 1450 load_modules(dir_num2name(i)); 1451 } 1452 1453 /* 1454 * Create kernel delivery door service 1455 */ 1456 syseventd_print(8, "Create a door for kernel upcalls\n"); 1457 if (snprintf(local_door_file, sizeof (local_door_file), "%s%s", 1458 root_dir, LOGEVENT_DOOR_UPCALL) >= sizeof (local_door_file)) { 1459 syseventd_err_print(INIT_PATH_ERR, local_door_file); 1460 syseventd_exit(5); 1461 } 1462 1463 /* 1464 * Remove door file for robustness. 1465 */ 1466 if (unlink(local_door_file) != 0) 1467 syseventd_print(8, "Unlink of %s failed.\n", local_door_file); 1468 1469 fd = open(local_door_file, O_CREAT|O_RDWR, S_IREAD|S_IWRITE); 1470 if ((fd == -1) && (errno != EEXIST)) { 1471 syseventd_err_print(INIT_OPEN_DOOR_ERR, strerror(errno)); 1472 syseventd_exit(5); 1473 } 1474 (void) close(fd); 1475 1476 upcall_door = door_create(door_upcall, NULL, 1477 DOOR_REFUSE_DESC | DOOR_NO_CANCEL); 1478 if (upcall_door == -1) { 1479 syseventd_err_print(INIT_CREATE_DOOR_ERR, strerror(errno)); 1480 syseventd_exit(5); 1481 } 1482 1483 (void) fdetach(local_door_file); 1484 retry: 1485 if (fattach(upcall_door, local_door_file) != 0) { 1486 if (errno == EBUSY) 1487 goto retry; 1488 syseventd_err_print(INIT_FATTACH_ERR, strerror(errno)); 1489 (void) door_revoke(upcall_door); 1490 syseventd_exit(5); 1491 } 1492 1493 /* 1494 * Tell kernel the door name and start delivery 1495 */ 1496 syseventd_print(2, 1497 "local_door_file = %s\n", local_door_file); 1498 if (modctl(MODEVENTS, 1499 (uintptr_t)MODEVENTS_SET_DOOR_UPCALL_FILENAME, 1500 (uintptr_t)local_door_file, NULL, NULL, 0) < 0) { 1501 syseventd_err_print(INIT_DOOR_NAME_ERR, strerror(errno)); 1502 syseventd_exit(6); 1503 } 1504 1505 door_upcall_retval = 0; 1506 1507 if (modctl(MODEVENTS, (uintptr_t)MODEVENTS_FLUSH, NULL, NULL, NULL, 0) 1508 < 0) { 1509 syseventd_err_print(KERNEL_REPLAY_ERR, strerror(errno)); 1510 syseventd_exit(7); 1511 } 1512 } 1513 1514 /* 1515 * syseventd_fini - shut down daemon, but do not exit 1516 */ 1517 static void 1518 syseventd_fini(int sig) 1519 { 1520 /* 1521 * Indicate that event queues should be drained and no 1522 * additional events be accepted 1523 */ 1524 fini_pending = 1; 1525 1526 /* Close the kernel event door to halt delivery */ 1527 (void) door_revoke(upcall_door); 1528 1529 syseventd_print(1, "Unloading modules\n"); 1530 (void) rw_wrlock(&mod_unload_lock); 1531 unload_modules(sig); 1532 (void) rw_unlock(&mod_unload_lock); 1533 1534 } 1535 1536 /* 1537 * enter_daemon_lock - lock the daemon file lock 1538 * 1539 * Use an advisory lock to ensure that only one daemon process is active 1540 * in the system at any point in time. If the lock is held by another 1541 * process, do not block but return the pid owner of the lock to the 1542 * caller immediately. The lock is cleared if the holding daemon process 1543 * exits for any reason even if the lock file remains, so the daemon can 1544 * be restarted if necessary. The lock file is DAEMON_LOCK_FILE. 1545 */ 1546 static pid_t 1547 enter_daemon_lock(void) 1548 { 1549 struct flock lock; 1550 1551 syseventd_print(8, "enter_daemon_lock: lock file = %s\n", 1552 DAEMON_LOCK_FILE); 1553 1554 if (snprintf(local_lock_file, sizeof (local_lock_file), "%s%s", 1555 root_dir, DAEMON_LOCK_FILE) >= sizeof (local_lock_file)) { 1556 syseventd_err_print(INIT_PATH_ERR, local_lock_file); 1557 syseventd_exit(8); 1558 } 1559 daemon_lock_fd = open(local_lock_file, O_CREAT|O_RDWR, 0644); 1560 if (daemon_lock_fd < 0) { 1561 syseventd_err_print(INIT_LOCK_OPEN_ERR, 1562 local_lock_file, strerror(errno)); 1563 syseventd_exit(8); 1564 } 1565 1566 lock.l_type = F_WRLCK; 1567 lock.l_whence = SEEK_SET; 1568 lock.l_start = 0; 1569 lock.l_len = 0; 1570 1571 if (fcntl(daemon_lock_fd, F_SETLK, &lock) == -1) { 1572 if (fcntl(daemon_lock_fd, F_GETLK, &lock) == -1) { 1573 syseventd_err_print(INIT_LOCK_ERR, 1574 local_lock_file, strerror(errno)); 1575 exit(2); 1576 } 1577 return (lock.l_pid); 1578 } 1579 hold_daemon_lock = 1; 1580 1581 return (getpid()); 1582 } 1583 1584 /* 1585 * exit_daemon_lock - release the daemon file lock 1586 */ 1587 static void 1588 exit_daemon_lock(void) 1589 { 1590 struct flock lock; 1591 1592 lock.l_type = F_UNLCK; 1593 lock.l_whence = SEEK_SET; 1594 lock.l_start = 0; 1595 lock.l_len = 0; 1596 1597 if (fcntl(daemon_lock_fd, F_SETLK, &lock) == -1) { 1598 syseventd_err_print(INIT_UNLOCK_ERR, 1599 local_lock_file, strerror(errno)); 1600 } 1601 1602 if (close(daemon_lock_fd) == -1) { 1603 syseventd_err_print(INIT_LOCK_CLOSE_ERR, 1604 local_lock_file, strerror(errno)); 1605 exit(-1); 1606 } 1607 } 1608 1609 /* 1610 * syseventd_err_print - print error messages to the terminal if not 1611 * yet daemonized or to syslog. 1612 */ 1613 /*PRINTFLIKE1*/ 1614 void 1615 syseventd_err_print(char *message, ...) 1616 { 1617 va_list ap; 1618 1619 (void) mutex_lock(&err_mutex); 1620 va_start(ap, message); 1621 1622 if (logflag) { 1623 (void) vsyslog(LOG_ERR, message, ap); 1624 } else { 1625 (void) fprintf(stderr, "%s: ", prog); 1626 (void) vfprintf(stderr, message, ap); 1627 } 1628 va_end(ap); 1629 (void) mutex_unlock(&err_mutex); 1630 } 1631 1632 /* 1633 * syseventd_print - print messages to the terminal or to syslog 1634 * the following levels are implemented: 1635 * 1636 * 1 - transient errors that does not affect normal program flow 1637 * 2 - upcall/dispatch interaction 1638 * 3 - program flow trace as each message goes through the daemon 1639 * 8 - all the nit-gritty details of startup and shutdown 1640 * 9 - very verbose event flow tracing (no daemonization of syseventd) 1641 * 1642 */ 1643 /*PRINTFLIKE2*/ 1644 void 1645 syseventd_print(int level, char *message, ...) 1646 { 1647 va_list ap; 1648 static int newline = 1; 1649 1650 if (level > debug_level) { 1651 return; 1652 } 1653 1654 (void) mutex_lock(&err_mutex); 1655 va_start(ap, message); 1656 if (logflag) { 1657 (void) syslog(LOG_DEBUG, "%s[%ld]: ", 1658 prog, getpid()); 1659 (void) vsyslog(LOG_DEBUG, message, ap); 1660 } else { 1661 if (newline) { 1662 (void) fprintf(stdout, "%s[%ld]: ", 1663 prog, getpid()); 1664 (void) vfprintf(stdout, message, ap); 1665 } else { 1666 (void) vfprintf(stdout, message, ap); 1667 } 1668 } 1669 if (message[strlen(message)-1] == '\n') { 1670 newline = 1; 1671 } else { 1672 newline = 0; 1673 } 1674 va_end(ap); 1675 (void) mutex_unlock(&err_mutex); 1676 } 1677