1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 /* 28 * syseventd - The system event daemon 29 * 30 * This daemon dispatches event buffers received from the 31 * kernel to all interested SLM clients. SLMs in turn 32 * deliver the buffers to their particular application 33 * clients. 34 */ 35 #include <stdio.h> 36 #include <sys/types.h> 37 #include <dirent.h> 38 #include <stdarg.h> 39 #include <stddef.h> 40 #include <stdlib.h> 41 #include <dlfcn.h> 42 #include <door.h> 43 #include <errno.h> 44 #include <fcntl.h> 45 #include <signal.h> 46 #include <strings.h> 47 #include <unistd.h> 48 #include <synch.h> 49 #include <syslog.h> 50 #include <thread.h> 51 #include <libsysevent.h> 52 #include <limits.h> 53 #include <locale.h> 54 #include <sys/sysevent.h> 55 #include <sys/sysevent_impl.h> 56 #include <sys/modctl.h> 57 #include <sys/stat.h> 58 #include <sys/systeminfo.h> 59 #include <sys/wait.h> 60 61 #include "sysevent_signal.h" 62 #include "syseventd.h" 63 #include "message.h" 64 65 extern int insert_client(void *client, int client_type, int retry_limit); 66 extern void delete_client(int id); 67 extern void initialize_client_tbl(void); 68 69 extern struct sysevent_client *sysevent_client_tbl[]; 70 extern mutex_t client_tbl_lock; 71 72 #define DEBUG_LEVEL_FORK 9 /* will run in background at all */ 73 /* levels less than DEBUG_LEVEL_FORK */ 74 75 int debug_level = 0; 76 char *root_dir = ""; /* Relative root for lock and door */ 77 78 /* Maximum number of outstanding events dispatched */ 79 #define SE_EVENT_DISPATCH_CNT 100 80 81 static int upcall_door; /* Kernel event door */ 82 static int door_upcall_retval; /* Kernel event posting return value */ 83 static int fini_pending = 0; /* fini pending flag */ 84 static int deliver_buf = 0; /* Current event buffer from kernel */ 85 static int dispatch_buf = 0; /* Current event buffer dispatched */ 86 static sysevent_t **eventbuf; /* Global array of event buffers */ 87 static struct ev_completion *event_compq; /* Event completion queue */ 88 static mutex_t ev_comp_lock; /* Event completion queue lock */ 89 static mutex_t err_mutex; /* error logging lock */ 90 static mutex_t door_lock; /* sync door return access */ 91 static rwlock_t mod_unload_lock; /* sync module unloading */ 92 93 /* declarations and definitions for avoiding multiple daemons running */ 94 #define DAEMON_LOCK_FILE "/var/run/syseventd.lock" 95 char local_lock_file[PATH_MAX + 1]; 96 static int hold_daemon_lock; 97 static int daemon_lock_fd; 98 99 /* 100 * sema_eventbuf - guards against the global buffer eventbuf 101 * being written to before it has been dispatched to clients 102 * 103 * sema_dispatch - synchronizes between the kernel uploading thread 104 * (producer) and the userland dispatch_message thread (consumer). 105 * 106 * sema_resource - throttles outstanding event consumption. 107 * 108 * event_comp_cv - synchronizes threads waiting for the event completion queue 109 * to empty or become active. 110 */ 111 static sema_t sema_eventbuf, sema_dispatch, sema_resource; 112 static cond_t event_comp_cv; 113 114 /* Self-tuning concurrency level */ 115 #define MIN_CONCURRENCY_LEVEL 4 116 static int concurrency_level = MIN_CONCURRENCY_LEVEL; 117 118 119 /* SLM defines */ 120 #define MODULE_SUFFIX ".so" 121 #define EVENT_FINI "slm_fini" 122 #define EVENT_INIT "slm_init" 123 124 #define SE_TIMEOUT 60 /* Client dispatch timeout (seconds) */ 125 126 /* syslog message related */ 127 static int logflag = 0; 128 static char *prog; 129 130 /* function prototypes */ 131 static void door_upcall(void *cookie, char *args, size_t alen, door_desc_t *ddp, 132 uint_t ndid); 133 static void dispatch_message(void); 134 static int dispatch(void); 135 static void event_completion_thr(void); 136 static void usage(void); 137 138 static void syseventd_init(void); 139 static void syseventd_fini(int sig); 140 141 static pid_t enter_daemon_lock(void); 142 static void exit_daemon_lock(void); 143 144 static void 145 usage() { 146 (void) fprintf(stderr, "usage: syseventd [-d <debug_level>] " 147 "[-r <root_dir>]\n"); 148 (void) fprintf(stderr, "higher debug levels get progressively "); 149 (void) fprintf(stderr, "more detailed debug information.\n"); 150 (void) fprintf(stderr, "syseventd will run in background if "); 151 (void) fprintf(stderr, "run with a debug_level less than %d.\n", 152 DEBUG_LEVEL_FORK); 153 exit(2); 154 } 155 156 157 /* common exit function which ensures releasing locks */ 158 void 159 syseventd_exit(int status) 160 { 161 syseventd_print(1, "exit status = %d\n", status); 162 163 if (hold_daemon_lock) { 164 exit_daemon_lock(); 165 } 166 167 exit(status); 168 } 169 170 171 /* 172 * hup_handler - SIGHUP handler. SIGHUP is used to force a reload of 173 * all SLMs. During fini, events are drained from all 174 * client event queues. The events that have been consumed 175 * by all clients are freed from the kernel event queue. 176 * 177 * Events that have not yet been delivered to all clients 178 * are not freed and will be replayed after all SLMs have 179 * been (re)loaded. 180 * 181 * After all client event queues have been drained, each 182 * SLM client is unloaded. The init phase will (re)load 183 * each SLM and initiate event replay and delivery from 184 * the kernel. 185 * 186 */ 187 /*ARGSUSED*/ 188 static void 189 hup_handler(int sig) 190 { 191 syseventd_err_print(SIGHUP_CAUGHT); 192 (void) fflush(0); 193 syseventd_fini(sig); 194 syseventd_init(); 195 syseventd_err_print(DAEMON_RESTARTED); 196 (void) fflush(0); 197 } 198 199 /* 200 * Fault handler for other signals caught 201 */ 202 /*ARGSUSED*/ 203 static void 204 flt_handler(int sig) 205 { 206 char signame[SIG2STR_MAX]; 207 208 if (sig2str(sig, signame) == -1) { 209 syseventd_err_print(UNKNOWN_SIGNAL_CAUGHT, sig); 210 } 211 212 (void) se_signal_sethandler(sig, SIG_DFL, NULL); 213 214 switch (sig) { 215 case SIGINT: 216 case SIGSTOP: 217 case SIGTERM: 218 /* Close kernel door */ 219 (void) door_revoke(upcall_door); 220 221 /* Gracefully exit current event delivery threads */ 222 syseventd_fini(sig); 223 224 (void) fflush(0); 225 (void) se_signal_unblockall(); 226 syseventd_exit(1); 227 /*NOTREACHED*/ 228 case SIGCLD: 229 /* No need to abort on a SIGCLD */ 230 break; 231 default: 232 syseventd_err_print(FATAL_ERROR); 233 abort(); 234 235 } 236 } 237 238 /* 239 * Daemon parent process only. 240 * Child process signal to indicate successful daemon initialization. 241 * This is the normal and expected exit path of the daemon parent. 242 */ 243 /*ARGSUSED*/ 244 static void 245 sigusr1(int sig) 246 { 247 syseventd_exit(0); 248 } 249 250 static void 251 sigwait_thr() 252 { 253 int sig; 254 int err; 255 sigset_t signal_set; 256 257 for (;;) { 258 syseventd_print(3, "sigwait thread waiting for signal\n"); 259 (void) sigfillset(&signal_set); 260 err = sigwait(&signal_set, &sig); 261 if (err) { 262 syseventd_exit(2); 263 } 264 265 /* 266 * Block all signals until the signal handler completes 267 */ 268 if (sig == SIGHUP) { 269 hup_handler(sig); 270 } else { 271 flt_handler(sig); 272 } 273 } 274 /* NOTREACHED */ 275 } 276 277 static void 278 set_root_dir(char *dir) 279 { 280 root_dir = malloc(strlen(dir) + 1); 281 if (root_dir == NULL) { 282 syseventd_err_print(INIT_ROOT_DIR_ERR, strerror(errno)); 283 syseventd_exit(2); 284 } 285 (void) strcpy(root_dir, dir); 286 } 287 288 int 289 main(int argc, char **argv) 290 { 291 int i, c; 292 int fd; 293 pid_t pid; 294 int has_forked = 0; 295 extern char *optarg; 296 297 (void) setlocale(LC_ALL, ""); 298 (void) textdomain(TEXT_DOMAIN); 299 300 if (getuid() != 0) { 301 (void) fprintf(stderr, "Must be root to run syseventd\n"); 302 syseventd_exit(1); 303 } 304 305 if (argc > 5) { 306 usage(); 307 } 308 309 if ((prog = strrchr(argv[0], '/')) == NULL) { 310 prog = argv[0]; 311 } else { 312 prog++; 313 } 314 315 while ((c = getopt(argc, argv, "d:r:")) != EOF) { 316 switch (c) { 317 case 'd': 318 debug_level = atoi(optarg); 319 break; 320 case 'r': 321 /* 322 * Private flag for suninstall to run 323 * daemon during install. 324 */ 325 set_root_dir(optarg); 326 break; 327 case '?': 328 default: 329 usage(); 330 } 331 } 332 333 /* demonize ourselves */ 334 if (debug_level < DEBUG_LEVEL_FORK) { 335 336 sigset_t mask; 337 338 (void) sigset(SIGUSR1, sigusr1); 339 340 (void) sigemptyset(&mask); 341 (void) sigaddset(&mask, SIGUSR1); 342 (void) sigprocmask(SIG_BLOCK, &mask, NULL); 343 344 if ((pid = fork()) == (pid_t)-1) { 345 (void) fprintf(stderr, 346 "syseventd: fork failed - %s\n", strerror(errno)); 347 syseventd_exit(1); 348 } 349 350 if (pid != 0) { 351 /* 352 * parent 353 * handshake with the daemon so that dependents 354 * of the syseventd service don't start up until 355 * the service is actually functional 356 */ 357 int status; 358 (void) sigprocmask(SIG_UNBLOCK, &mask, NULL); 359 360 if (waitpid(pid, &status, 0) != pid) { 361 /* 362 * child process signal indicating 363 * successful daemon initialization 364 */ 365 syseventd_exit(0); 366 } 367 /* child exited implying unsuccessful startup */ 368 syseventd_exit(1); 369 } 370 371 /* child */ 372 373 has_forked = 1; 374 (void) sigset(SIGUSR1, SIG_DFL); 375 (void) sigprocmask(SIG_UNBLOCK, &mask, NULL); 376 377 (void) chdir("/"); 378 (void) setsid(); 379 if (debug_level <= 1) { 380 closefrom(0); 381 fd = open("/dev/null", 0); 382 (void) dup2(fd, 1); 383 (void) dup2(fd, 2); 384 logflag = 1; 385 } 386 } 387 388 openlog("syseventd", LOG_PID, LOG_DAEMON); 389 390 (void) mutex_init(&err_mutex, USYNC_THREAD, NULL); 391 392 syseventd_print(8, 393 "syseventd started, debug level = %d\n", debug_level); 394 395 /* only one instance of syseventd can run at a time */ 396 if ((pid = enter_daemon_lock()) != getpid()) { 397 syseventd_print(1, 398 "event daemon pid %ld already running\n", pid); 399 exit(3); 400 } 401 402 /* initialize semaphores and eventbuf */ 403 (void) sema_init(&sema_eventbuf, SE_EVENT_DISPATCH_CNT, 404 USYNC_THREAD, NULL); 405 (void) sema_init(&sema_dispatch, 0, USYNC_THREAD, NULL); 406 (void) sema_init(&sema_resource, SE_EVENT_DISPATCH_CNT, 407 USYNC_THREAD, NULL); 408 (void) cond_init(&event_comp_cv, USYNC_THREAD, NULL); 409 eventbuf = (sysevent_t **)calloc(SE_EVENT_DISPATCH_CNT, 410 sizeof (sysevent_t *)); 411 if (eventbuf == NULL) { 412 syseventd_print(1, "Unable to allocate event buffer array\n"); 413 exit(2); 414 } 415 for (i = 0; i < SE_EVENT_DISPATCH_CNT; ++i) { 416 eventbuf[i] = malloc(LOGEVENT_BUFSIZE); 417 if (eventbuf[i] == NULL) { 418 syseventd_print(1, "Unable to allocate event " 419 "buffers\n"); 420 exit(2); 421 } 422 } 423 424 (void) mutex_init(&client_tbl_lock, USYNC_THREAD, NULL); 425 (void) mutex_init(&ev_comp_lock, USYNC_THREAD, NULL); 426 (void) mutex_init(&door_lock, USYNC_THREAD, NULL); 427 (void) rwlock_init(&mod_unload_lock, USYNC_THREAD, NULL); 428 429 event_compq = NULL; 430 431 syseventd_print(8, "start the message thread running\n"); 432 433 /* 434 * Block all signals to all threads include the main thread. 435 * The sigwait_thr thread will process any signals and initiate 436 * a graceful recovery if possible. 437 */ 438 if (se_signal_blockall() < 0) { 439 syseventd_err_print(INIT_SIG_BLOCK_ERR); 440 syseventd_exit(2); 441 } 442 443 if (thr_create(NULL, NULL, (void *(*)(void *))dispatch_message, 444 (void *)0, 0, NULL) < 0) { 445 syseventd_err_print(INIT_THR_CREATE_ERR, strerror(errno)); 446 syseventd_exit(2); 447 } 448 if (thr_create(NULL, NULL, 449 (void *(*)(void *))event_completion_thr, NULL, 450 THR_BOUND, NULL) != 0) { 451 syseventd_err_print(INIT_THR_CREATE_ERR, strerror(errno)); 452 syseventd_exit(2); 453 } 454 /* Create signal catching thread */ 455 if (thr_create(NULL, NULL, (void *(*)(void *))sigwait_thr, 456 NULL, 0, NULL) < 0) { 457 syseventd_err_print(INIT_THR_CREATE_ERR, strerror(errno)); 458 syseventd_exit(2); 459 } 460 461 setbuf(stdout, (char *)NULL); 462 463 /* Initialize and load SLM clients */ 464 initialize_client_tbl(); 465 syseventd_init(); 466 467 /* signal parent to indicate successful daemon initialization */ 468 if (has_forked) { 469 if (kill(getppid(), SIGUSR1) != 0) { 470 syseventd_err_print( 471 "signal to the parent failed - %s\n", 472 strerror(errno)); 473 syseventd_exit(2); 474 } 475 } 476 477 syseventd_print(8, "Pausing\n"); 478 479 for (;;) { 480 (void) pause(); 481 } 482 /* NOTREACHED */ 483 return (0); 484 } 485 486 /* 487 * door_upcall - called from the kernel via kernel sysevent door 488 * to upload event(s). 489 * 490 * This routine should never block. If resources are 491 * not available to immediately accept the event buffer 492 * EAGAIN is returned to the kernel. 493 * 494 * Once resources are available, the kernel is notified 495 * via a modctl interface to resume event delivery to 496 * syseventd. 497 * 498 */ 499 /*ARGSUSED*/ 500 static void 501 door_upcall(void *cookie, char *args, size_t alen, 502 door_desc_t *ddp, uint_t ndid) 503 { 504 sysevent_t *ev; 505 int rval; 506 507 508 (void) mutex_lock(&door_lock); 509 if (args == NULL) { 510 rval = EINVAL; 511 } else if (sema_trywait(&sema_eventbuf)) { 512 ev = (sysevent_t *) 513 &((log_event_upcall_arg_t *)(void *)args)->buf; 514 syseventd_print(2, "door_upcall: busy event %llx " 515 "retry\n", sysevent_get_seq(ev)); 516 rval = door_upcall_retval = EAGAIN; 517 } else { 518 /* 519 * Copy received message to local buffer. 520 */ 521 size_t size; 522 ev = (sysevent_t *) 523 &((log_event_upcall_arg_t *)(void *)args)->buf; 524 525 syseventd_print(2, "door_upcall: event %llx in eventbuf %d\n", 526 sysevent_get_seq(ev), deliver_buf); 527 size = sysevent_get_size(ev) > LOGEVENT_BUFSIZE ? 528 LOGEVENT_BUFSIZE : sysevent_get_size(ev); 529 (void) bcopy(ev, eventbuf[deliver_buf], size); 530 deliver_buf = (deliver_buf + 1) % SE_EVENT_DISPATCH_CNT; 531 rval = 0; 532 (void) sema_post(&sema_dispatch); 533 } 534 535 (void) mutex_unlock(&door_lock); 536 537 /* 538 * Filling in return values for door_return 539 */ 540 (void) door_return((void *)&rval, sizeof (rval), NULL, 0); 541 (void) door_return(NULL, 0, NULL, 0); 542 } 543 544 /* 545 * dispatch_message - dispatch message thread 546 * This thread spins until an event buffer is delivered 547 * delivered from the kernel. 548 * 549 * It will wait to dispatch an event to any clients 550 * until adequate resources are available to process 551 * the event buffer. 552 */ 553 static void 554 dispatch_message(void) 555 { 556 int error; 557 558 for (;;) { 559 syseventd_print(3, "dispatch_message: thread started\n"); 560 /* 561 * Spin till a message comes 562 */ 563 while (sema_wait(&sema_dispatch) != 0) { 564 syseventd_print(1, 565 "dispatch_message: sema_wait failed\n"); 566 (void) sleep(1); 567 } 568 569 syseventd_print(3, "dispatch_message: sema_dispatch\n"); 570 571 /* 572 * Wait for available resources 573 */ 574 while (sema_wait(&sema_resource) != 0) { 575 syseventd_print(1, "dispatch_message: sema_wait " 576 "failed\n"); 577 (void) sleep(1); 578 } 579 580 syseventd_print(2, "dispatch_message: eventbuf %d\n", 581 dispatch_buf); 582 583 /* 584 * Client dispatch 585 */ 586 do { 587 error = dispatch(); 588 } while (error == EAGAIN); 589 590 syseventd_print(2, "eventbuf %d dispatched\n", dispatch_buf); 591 dispatch_buf = (dispatch_buf + 1) % SE_EVENT_DISPATCH_CNT; 592 593 /* 594 * kernel received a busy signal - 595 * kickstart the kernel delivery thread 596 * door_lock blocks the kernel so we hold it for the 597 * shortest time possible. 598 */ 599 (void) mutex_lock(&door_lock); 600 if (door_upcall_retval == EAGAIN && !fini_pending) { 601 syseventd_print(3, "dispatch_message: retrigger " 602 "door_upcall_retval = %d\n", 603 door_upcall_retval); 604 (void) modctl(MODEVENTS, (uintptr_t)MODEVENTS_FLUSH, 605 NULL, NULL, NULL, 0); 606 door_upcall_retval = 0; 607 } 608 (void) mutex_unlock(&door_lock); 609 } 610 /* NOTREACHED */ 611 } 612 613 /* 614 * drain_eventq - Called to drain all pending events from the client's 615 * event queue. 616 */ 617 static void 618 drain_eventq(struct sysevent_client *scp, int status) 619 { 620 struct event_dispatch_pkg *d_pkg; 621 struct event_dispatchq *eventq, *eventq_next; 622 623 syseventd_print(3, "Draining eventq for client %d\n", 624 scp->client_num); 625 626 eventq = scp->eventq; 627 while (eventq) { 628 /* 629 * Mark all dispatched events as completed, but indicate the 630 * error status 631 */ 632 d_pkg = eventq->d_pkg; 633 634 syseventd_print(4, "drain event 0X%llx for client %d\n", 635 sysevent_get_seq(d_pkg->ev), scp->client_num); 636 637 if (d_pkg->completion_state == SE_NOT_DISPATCHED) { 638 d_pkg->completion_status = status; 639 d_pkg->completion_state = SE_COMPLETE; 640 (void) sema_post(d_pkg->completion_sema); 641 } 642 643 eventq_next = eventq->next; 644 free(eventq); 645 eventq = eventq_next; 646 scp->eventq = eventq; 647 } 648 } 649 650 /* 651 * client_deliver_event_thr - Client delivery thread 652 * This thread will process any events on this 653 * client's eventq. 654 */ 655 static void 656 client_deliver_event_thr(void *arg) 657 { 658 int flag, error, i; 659 sysevent_t *ev; 660 hrtime_t now; 661 module_t *mod; 662 struct event_dispatchq *eventq; 663 struct sysevent_client *scp; 664 struct event_dispatch_pkg *d_pkg; 665 666 scp = (struct sysevent_client *)arg; 667 mod = (module_t *)scp->client_data; 668 669 (void) mutex_lock(&scp->client_lock); 670 for (;;) { 671 while (scp->eventq == NULL) { 672 673 /* 674 * Client has been suspended or unloaded, go no further. 675 */ 676 if (fini_pending) { 677 scp->client_flags &= ~SE_CLIENT_THR_RUNNING; 678 syseventd_print(3, "Client %d delivery thread " 679 "exiting flags: 0X%x\n", 680 scp->client_num, scp->client_flags); 681 (void) mutex_unlock(&scp->client_lock); 682 return; 683 } 684 685 (void) cond_wait(&scp->client_cv, &scp->client_lock); 686 687 } 688 689 /* 690 * Process events from the head of the eventq, eventq is locked 691 * going into the processing. 692 */ 693 eventq = scp->eventq; 694 while (eventq != NULL) { 695 d_pkg = eventq->d_pkg; 696 d_pkg->completion_state = SE_OUTSTANDING; 697 scp->eventq = eventq->next; 698 free(eventq); 699 eventq = scp->eventq; 700 (void) mutex_unlock(&scp->client_lock); 701 702 703 flag = error = 0; 704 ev = d_pkg->ev; 705 706 syseventd_print(3, "Start delivery for client %d " 707 "with retry count %d\n", 708 scp->client_num, d_pkg->retry_count); 709 710 /* 711 * Retry limit has been reached by this client, indicate 712 * that no further retries are allowed 713 */ 714 for (i = 0; i <= scp->retry_limit; ++i) { 715 if (i == scp->retry_limit) 716 flag = SE_NO_RETRY; 717 718 /* Start the clock for the event delivery */ 719 d_pkg->start_time = gethrtime(); 720 721 syseventd_print(9, "Deliver to module client " 722 "%s\n", mod->name); 723 724 error = mod->deliver_event(ev, flag); 725 726 /* Can not allow another retry */ 727 if (i == scp->retry_limit) 728 error = 0; 729 730 /* Stop the clock */ 731 now = gethrtime(); 732 733 /* 734 * Suspend event processing and drain the 735 * event q for latent clients 736 */ 737 if (now - d_pkg->start_time > 738 ((hrtime_t)SE_TIMEOUT * NANOSEC)) { 739 syseventd_print(1, "Unresponsive " 740 "client %d: Draining eventq and " 741 "suspending event delivery\n", 742 scp->client_num); 743 (void) mutex_lock(&scp->client_lock); 744 scp->client_flags &= 745 ~SE_CLIENT_THR_RUNNING; 746 scp->client_flags |= 747 SE_CLIENT_SUSPENDED; 748 749 /* Cleanup current event */ 750 d_pkg->completion_status = EFAULT; 751 d_pkg->completion_state = SE_COMPLETE; 752 (void) sema_post( 753 d_pkg->completion_sema); 754 755 /* 756 * Drain the remaining events from the 757 * queue. 758 */ 759 drain_eventq(scp, EINVAL); 760 (void) mutex_unlock(&scp->client_lock); 761 return; 762 } 763 764 /* Event delivery retry requested */ 765 if (fini_pending || error != EAGAIN) { 766 break; 767 } else { 768 (void) sleep(SE_RETRY_TIME); 769 } 770 } 771 772 (void) mutex_lock(&scp->client_lock); 773 d_pkg->completion_status = error; 774 d_pkg->completion_state = SE_COMPLETE; 775 (void) sema_post(d_pkg->completion_sema); 776 syseventd_print(3, "Completed delivery with " 777 "error %d\n", error); 778 } 779 780 syseventd_print(3, "No more events to process for client %d\n", 781 scp->client_num); 782 783 /* Return if this was a synchronous delivery */ 784 if (!SE_CLIENT_IS_THR_RUNNING(scp)) { 785 (void) mutex_unlock(&scp->client_lock); 786 return; 787 } 788 789 } 790 } 791 792 /* 793 * client_deliver_event - Client specific event delivery 794 * This routine will allocate and initialize the 795 * neccessary per-client dispatch data. 796 * 797 * If the eventq is not empty, it may be assumed that 798 * a delivery thread exists for this client and the 799 * dispatch data is appended to the eventq. 800 * 801 * The dispatch package is freed by the event completion 802 * thread (event_completion_thr) and the eventq entry 803 * is freed by the event delivery thread. 804 */ 805 static struct event_dispatch_pkg * 806 client_deliver_event(struct sysevent_client *scp, sysevent_t *ev, 807 sema_t *completion_sema) 808 { 809 size_t ev_sz = sysevent_get_size(ev); 810 struct event_dispatchq *newq, *tmp; 811 struct event_dispatch_pkg *d_pkg; 812 813 syseventd_print(3, "client_deliver_event: id 0x%llx size %d\n", 814 (longlong_t)sysevent_get_seq(ev), ev_sz); 815 if (debug_level == 9) { 816 se_print(stdout, ev); 817 } 818 819 /* 820 * Check for suspended client 821 */ 822 (void) mutex_lock(&scp->client_lock); 823 if (SE_CLIENT_IS_SUSPENDED(scp) || !SE_CLIENT_IS_THR_RUNNING(scp)) { 824 (void) mutex_unlock(&scp->client_lock); 825 return (NULL); 826 } 827 828 /* 829 * Allocate a new dispatch package and eventq entry 830 */ 831 newq = (struct event_dispatchq *)malloc( 832 sizeof (struct event_dispatchq)); 833 if (newq == NULL) { 834 (void) mutex_unlock(&scp->client_lock); 835 return (NULL); 836 } 837 838 d_pkg = (struct event_dispatch_pkg *)malloc( 839 sizeof (struct event_dispatch_pkg)); 840 if (d_pkg == NULL) { 841 free(newq); 842 (void) mutex_unlock(&scp->client_lock); 843 return (NULL); 844 } 845 846 /* Initialize the dispatch package */ 847 d_pkg->scp = scp; 848 d_pkg->retry_count = 0; 849 d_pkg->completion_status = 0; 850 d_pkg->completion_state = SE_NOT_DISPATCHED; 851 d_pkg->completion_sema = completion_sema; 852 d_pkg->ev = ev; 853 newq->d_pkg = d_pkg; 854 newq->next = NULL; 855 856 if (scp->eventq != NULL) { 857 858 /* Add entry to the end of the eventq */ 859 tmp = scp->eventq; 860 while (tmp->next != NULL) 861 tmp = tmp->next; 862 tmp->next = newq; 863 } else { 864 /* event queue empty, wakeup delivery thread */ 865 scp->eventq = newq; 866 (void) cond_signal(&scp->client_cv); 867 } 868 (void) mutex_unlock(&scp->client_lock); 869 870 return (d_pkg); 871 } 872 873 /* 874 * event_completion_thr - Event completion thread. This thread routine 875 * waits for all client delivery thread to complete 876 * delivery of a particular event. 877 */ 878 static void 879 event_completion_thr() 880 { 881 int ret, i, client_count, ok_to_free; 882 sysevent_id_t eid; 883 struct sysevent_client *scp; 884 struct ev_completion *ev_comp; 885 struct event_dispatchq *dispatchq; 886 struct event_dispatch_pkg *d_pkg; 887 888 (void) mutex_lock(&ev_comp_lock); 889 for (;;) { 890 while (event_compq == NULL) { 891 (void) cond_wait(&event_comp_cv, &ev_comp_lock); 892 } 893 894 /* 895 * Process event completions from the head of the 896 * completion queue 897 */ 898 ev_comp = event_compq; 899 while (ev_comp) { 900 (void) mutex_unlock(&ev_comp_lock); 901 eid.eid_seq = sysevent_get_seq(ev_comp->ev); 902 sysevent_get_time(ev_comp->ev, &eid.eid_ts); 903 client_count = ev_comp->client_count; 904 ok_to_free = 1; 905 906 syseventd_print(3, "Wait for event completion of " 907 "event 0X%llx on %d clients\n", 908 eid.eid_seq, client_count); 909 910 while (client_count) { 911 syseventd_print(9, "Waiting for %d clients on " 912 "event id 0X%llx\n", client_count, 913 eid.eid_seq); 914 915 (void) sema_wait(&ev_comp->client_sema); 916 --client_count; 917 } 918 919 syseventd_print(3, "Cleaning up clients for event " 920 "0X%llx\n", eid.eid_seq); 921 dispatchq = ev_comp->dispatch_list; 922 while (dispatchq != NULL) { 923 d_pkg = dispatchq->d_pkg; 924 scp = d_pkg->scp; 925 926 if (d_pkg->completion_status == EAGAIN) 927 ok_to_free = 0; 928 929 syseventd_print(4, "Delivery of 0X%llx " 930 "complete for client %d retry count %d " 931 "status %d\n", eid.eid_seq, 932 scp->client_num, 933 d_pkg->retry_count, 934 d_pkg->completion_status); 935 936 free(d_pkg); 937 ev_comp->dispatch_list = dispatchq->next; 938 free(dispatchq); 939 dispatchq = ev_comp->dispatch_list; 940 } 941 942 if (ok_to_free) { 943 for (i = 0; i < MAX_MODCTL_RETRY; ++i) { 944 if ((ret = modctl(MODEVENTS, 945 (uintptr_t)MODEVENTS_FREEDATA, 946 (uintptr_t)&eid, NULL, 947 NULL, 0)) != 0) { 948 syseventd_print(1, "attempting " 949 "to free event 0X%llx\n", 950 eid.eid_seq); 951 952 /* 953 * Kernel may need time to 954 * move this event buffer to 955 * the sysevent sent queue 956 */ 957 (void) sleep(1); 958 } else { 959 break; 960 } 961 } 962 if (ret) { 963 syseventd_print(1, "Unable to free " 964 "event 0X%llx from the " 965 "kernel\n", eid.eid_seq); 966 } 967 } else { 968 syseventd_print(1, "Not freeing event 0X%llx\n", 969 eid.eid_seq); 970 } 971 972 syseventd_print(2, "Event delivery complete for id " 973 "0X%llx\n", eid.eid_seq); 974 975 (void) mutex_lock(&ev_comp_lock); 976 event_compq = ev_comp->next; 977 free(ev_comp->ev); 978 free(ev_comp); 979 ev_comp = event_compq; 980 (void) sema_post(&sema_resource); 981 } 982 983 /* 984 * Event completion queue is empty, signal possible unload 985 * operation 986 */ 987 (void) cond_signal(&event_comp_cv); 988 989 syseventd_print(3, "No more events\n"); 990 } 991 } 992 993 /* 994 * dispatch - Dispatch the current event buffer to all valid SLM clients. 995 */ 996 static int 997 dispatch(void) 998 { 999 int ev_sz, i, client_count = 0; 1000 sysevent_t *new_ev; 1001 sysevent_id_t eid; 1002 struct ev_completion *ev_comp, *tmp; 1003 struct event_dispatchq *dispatchq, *client_list; 1004 struct event_dispatch_pkg *d_pkg; 1005 1006 /* Check for module unload operation */ 1007 if (rw_tryrdlock(&mod_unload_lock) != 0) { 1008 syseventd_print(2, "unload in progress abort delivery\n"); 1009 (void) sema_post(&sema_eventbuf); 1010 (void) sema_post(&sema_resource); 1011 return (0); 1012 } 1013 1014 syseventd_print(3, "deliver dispatch buffer %d", dispatch_buf); 1015 eid.eid_seq = sysevent_get_seq(eventbuf[dispatch_buf]); 1016 sysevent_get_time(eventbuf[dispatch_buf], &eid.eid_ts); 1017 syseventd_print(3, "deliver msg id: 0x%llx\n", eid.eid_seq); 1018 1019 /* 1020 * ev_comp is used to hold event completion data. It is freed 1021 * by the event completion thread (event_completion_thr). 1022 */ 1023 ev_comp = (struct ev_completion *) 1024 malloc(sizeof (struct ev_completion)); 1025 if (ev_comp == NULL) { 1026 (void) rw_unlock(&mod_unload_lock); 1027 syseventd_print(1, "Can not allocate event completion buffer " 1028 "for event id 0X%llx\n", eid.eid_seq); 1029 return (EAGAIN); 1030 } 1031 ev_comp->dispatch_list = NULL; 1032 ev_comp->next = NULL; 1033 (void) sema_init(&ev_comp->client_sema, 0, USYNC_THREAD, NULL); 1034 1035 ev_sz = sysevent_get_size(eventbuf[dispatch_buf]); 1036 new_ev = calloc(1, ev_sz); 1037 if (new_ev == NULL) { 1038 free(ev_comp); 1039 (void) rw_unlock(&mod_unload_lock); 1040 syseventd_print(1, "Can not allocate new event buffer " 1041 "for event id 0X%llx\n", eid.eid_seq); 1042 return (EAGAIN); 1043 } 1044 1045 1046 /* 1047 * For long messages, copy additional data from kernel 1048 */ 1049 if (ev_sz > LOGEVENT_BUFSIZE) { 1050 int ret = 0; 1051 1052 /* Ok to release eventbuf for next event buffer from kernel */ 1053 (void) sema_post(&sema_eventbuf); 1054 1055 for (i = 0; i < MAX_MODCTL_RETRY; ++i) { 1056 if ((ret = modctl(MODEVENTS, 1057 (uintptr_t)MODEVENTS_GETDATA, 1058 (uintptr_t)&eid, 1059 (uintptr_t)ev_sz, 1060 (uintptr_t)new_ev, 0)) 1061 == 0) 1062 break; 1063 else 1064 (void) sleep(1); 1065 } 1066 if (ret) { 1067 syseventd_print(1, "GET_DATA failed for 0X%llx:%llx\n", 1068 eid.eid_ts, eid.eid_seq); 1069 free(new_ev); 1070 free(ev_comp); 1071 (void) rw_unlock(&mod_unload_lock); 1072 return (EAGAIN); 1073 } 1074 } else { 1075 (void) bcopy(eventbuf[dispatch_buf], new_ev, ev_sz); 1076 /* Ok to release eventbuf for next event buffer from kernel */ 1077 (void) sema_post(&sema_eventbuf); 1078 } 1079 1080 1081 /* 1082 * Deliver a copy of eventbuf to clients so 1083 * eventbuf can be used for the next message 1084 */ 1085 for (i = 0; i < MAX_SLM; ++i) { 1086 1087 /* Don't bother for suspended or unloaded clients */ 1088 if (!SE_CLIENT_IS_LOADED(sysevent_client_tbl[i]) || 1089 SE_CLIENT_IS_SUSPENDED(sysevent_client_tbl[i])) 1090 continue; 1091 1092 /* 1093 * Allocate event dispatch queue entry. All queue entries 1094 * are freed by the event completion thread as client 1095 * delivery completes. 1096 */ 1097 dispatchq = (struct event_dispatchq *)malloc( 1098 sizeof (struct event_dispatchq)); 1099 if (dispatchq == NULL) { 1100 syseventd_print(1, "Can not allocate dispatch q " 1101 "for event id 0X%llx client %d\n", eid.eid_seq, i); 1102 continue; 1103 } 1104 dispatchq->next = NULL; 1105 1106 /* Initiate client delivery */ 1107 d_pkg = client_deliver_event(sysevent_client_tbl[i], 1108 new_ev, &ev_comp->client_sema); 1109 if (d_pkg == NULL) { 1110 syseventd_print(1, "Can not allocate dispatch " 1111 "package for event id 0X%llx client %d\n", 1112 eid.eid_seq, i); 1113 free(dispatchq); 1114 continue; 1115 } 1116 dispatchq->d_pkg = d_pkg; 1117 ++client_count; 1118 1119 if (ev_comp->dispatch_list == NULL) { 1120 ev_comp->dispatch_list = dispatchq; 1121 client_list = dispatchq; 1122 } else { 1123 client_list->next = dispatchq; 1124 client_list = client_list->next; 1125 } 1126 } 1127 1128 ev_comp->client_count = client_count; 1129 ev_comp->ev = new_ev; 1130 1131 (void) mutex_lock(&ev_comp_lock); 1132 1133 if (event_compq == NULL) { 1134 syseventd_print(3, "Wakeup event completion thread for " 1135 "id 0X%llx\n", eid.eid_seq); 1136 event_compq = ev_comp; 1137 (void) cond_signal(&event_comp_cv); 1138 } else { 1139 1140 /* Add entry to the end of the event completion queue */ 1141 tmp = event_compq; 1142 while (tmp->next != NULL) 1143 tmp = tmp->next; 1144 tmp->next = ev_comp; 1145 syseventd_print(3, "event added to completion queue for " 1146 "id 0X%llx\n", eid.eid_seq); 1147 } 1148 (void) mutex_unlock(&ev_comp_lock); 1149 (void) rw_unlock(&mod_unload_lock); 1150 1151 return (0); 1152 } 1153 1154 #define MODULE_DIR_HW "/usr/platform/%s/lib/sysevent/modules/" 1155 #define MODULE_DIR_GEN "/usr/lib/sysevent/modules/" 1156 #define MOD_DIR_NUM 3 1157 static char dirname[MOD_DIR_NUM][MAXPATHLEN]; 1158 1159 static char * 1160 dir_num2name(int dirnum) 1161 { 1162 char infobuf[MAXPATHLEN]; 1163 1164 if (dirnum >= MOD_DIR_NUM) 1165 return (NULL); 1166 1167 if (dirname[0][0] == '\0') { 1168 if (sysinfo(SI_PLATFORM, infobuf, MAXPATHLEN) == -1) { 1169 syseventd_print(1, "dir_num2name: " 1170 "sysinfo error %s\n", strerror(errno)); 1171 return (NULL); 1172 } else if (snprintf(dirname[0], sizeof (dirname[0]), 1173 MODULE_DIR_HW, infobuf) >= sizeof (dirname[0])) { 1174 syseventd_print(1, "dir_num2name: " 1175 "platform name too long: %s\n", 1176 infobuf); 1177 return (NULL); 1178 } 1179 if (sysinfo(SI_MACHINE, infobuf, MAXPATHLEN) == -1) { 1180 syseventd_print(1, "dir_num2name: " 1181 "sysinfo error %s\n", strerror(errno)); 1182 return (NULL); 1183 } else if (snprintf(dirname[1], sizeof (dirname[1]), 1184 MODULE_DIR_HW, infobuf) >= sizeof (dirname[1])) { 1185 syseventd_print(1, "dir_num2name: " 1186 "machine name too long: %s\n", 1187 infobuf); 1188 return (NULL); 1189 } 1190 (void) strcpy(dirname[2], MODULE_DIR_GEN); 1191 } 1192 1193 return (dirname[dirnum]); 1194 } 1195 1196 1197 /* 1198 * load_modules - Load modules found in the common syseventd module directories 1199 * Modules that do not provide valid interfaces are rejected. 1200 */ 1201 static void 1202 load_modules(char *dirname) 1203 { 1204 int client_id; 1205 DIR *mod_dir; 1206 module_t *mod; 1207 struct dirent *entp; 1208 struct slm_mod_ops *mod_ops; 1209 struct sysevent_client *scp; 1210 1211 if (dirname == NULL) 1212 return; 1213 1214 /* Return silently if module directory does not exist */ 1215 if ((mod_dir = opendir(dirname)) == NULL) { 1216 syseventd_print(1, "Unable to open module directory %s: %s\n", 1217 dirname, strerror(errno)); 1218 return; 1219 } 1220 1221 syseventd_print(3, "loading modules from %s\n", dirname); 1222 1223 /* 1224 * Go through directory, looking for files ending with .so 1225 */ 1226 while ((entp = readdir(mod_dir)) != NULL) { 1227 void *dlh, *f; 1228 char *tmp, modpath[MAXPATHLEN]; 1229 1230 if (((tmp = strstr(entp->d_name, MODULE_SUFFIX)) == NULL) || 1231 (tmp[strlen(MODULE_SUFFIX)] != '\0')) { 1232 continue; 1233 } 1234 1235 if (snprintf(modpath, sizeof (modpath), "%s%s", 1236 dirname, entp->d_name) >= sizeof (modpath)) { 1237 syseventd_err_print(INIT_PATH_ERR, modpath); 1238 continue; 1239 } 1240 if ((dlh = dlopen(modpath, RTLD_LAZY)) == NULL) { 1241 syseventd_err_print(LOAD_MOD_DLOPEN_ERR, 1242 modpath, dlerror()); 1243 continue; 1244 } else if ((f = dlsym(dlh, EVENT_INIT)) == NULL) { 1245 syseventd_err_print(LOAD_MOD_NO_INIT, 1246 modpath, dlerror()); 1247 (void) dlclose(dlh); 1248 continue; 1249 } 1250 1251 mod = malloc(sizeof (*mod)); 1252 if (mod == NULL) { 1253 syseventd_err_print(LOAD_MOD_ALLOC_ERR, "mod", 1254 strerror(errno)); 1255 (void) dlclose(dlh); 1256 continue; 1257 } 1258 1259 mod->name = strdup(entp->d_name); 1260 if (mod->name == NULL) { 1261 syseventd_err_print(LOAD_MOD_ALLOC_ERR, "mod->name", 1262 strerror(errno)); 1263 (void) dlclose(dlh); 1264 free(mod); 1265 continue; 1266 } 1267 1268 mod->dlhandle = dlh; 1269 mod->event_mod_init = (struct slm_mod_ops *(*)())f; 1270 1271 /* load in other module functions */ 1272 mod->event_mod_fini = (void (*)())dlsym(dlh, EVENT_FINI); 1273 if (mod->event_mod_fini == NULL) { 1274 syseventd_err_print(LOAD_MOD_DLSYM_ERR, mod->name, 1275 dlerror()); 1276 free(mod->name); 1277 free(mod); 1278 (void) dlclose(dlh); 1279 continue; 1280 } 1281 1282 /* Call module init routine */ 1283 if ((mod_ops = mod->event_mod_init()) == NULL) { 1284 syseventd_err_print(LOAD_MOD_EINVAL, mod->name); 1285 free(mod->name); 1286 free(mod); 1287 (void) dlclose(dlh); 1288 continue; 1289 } 1290 if (mod_ops->major_version != SE_MAJOR_VERSION) { 1291 syseventd_err_print(LOAD_MOD_VERSION_MISMATCH, 1292 mod->name, SE_MAJOR_VERSION, 1293 mod_ops->major_version); 1294 mod->event_mod_fini(); 1295 free(mod->name); 1296 free(mod); 1297 (void) dlclose(dlh); 1298 continue; 1299 } 1300 1301 mod->deliver_event = mod_ops->deliver_event; 1302 /* Add module entry to client list */ 1303 if ((client_id = insert_client((void *)mod, SLM_CLIENT, 1304 (mod_ops->retry_limit <= SE_MAX_RETRY_LIMIT ? 1305 mod_ops->retry_limit : SE_MAX_RETRY_LIMIT))) < 0) { 1306 syseventd_err_print(LOAD_MOD_ALLOC_ERR, "insert_client", 1307 strerror(errno)); 1308 mod->event_mod_fini(); 1309 free(mod->name); 1310 free(mod); 1311 (void) dlclose(dlh); 1312 continue; 1313 } 1314 1315 scp = sysevent_client_tbl[client_id]; 1316 ++concurrency_level; 1317 (void) thr_setconcurrency(concurrency_level); 1318 if (thr_create(NULL, 0, 1319 (void *(*)(void *))client_deliver_event_thr, 1320 (void *)scp, THR_BOUND, &scp->tid) != 0) { 1321 1322 syseventd_err_print(LOAD_MOD_ALLOC_ERR, "insert_client", 1323 strerror(errno)); 1324 mod->event_mod_fini(); 1325 free(mod->name); 1326 free(mod); 1327 (void) dlclose(dlh); 1328 continue; 1329 } 1330 scp->client_flags |= SE_CLIENT_THR_RUNNING; 1331 1332 syseventd_print(3, "loaded module %s\n", entp->d_name); 1333 } 1334 1335 (void) closedir(mod_dir); 1336 syseventd_print(3, "modules loaded\n"); 1337 } 1338 1339 /* 1340 * unload_modules - modules are unloaded prior to graceful shutdown or 1341 * before restarting the daemon upon receipt of 1342 * SIGHUP. 1343 */ 1344 static void 1345 unload_modules(int sig) 1346 { 1347 int i, count, done; 1348 module_t *mod; 1349 struct sysevent_client *scp; 1350 1351 /* 1352 * unload modules that are ready, skip those that have not 1353 * drained their event queues. 1354 */ 1355 count = done = 0; 1356 while (done < MAX_SLM) { 1357 /* Don't wait indefinitely for unresponsive clients */ 1358 if (sig != SIGHUP && count > SE_TIMEOUT) { 1359 break; 1360 } 1361 1362 done = 0; 1363 1364 /* Shutdown clients */ 1365 for (i = 0; i < MAX_SLM; ++i) { 1366 scp = sysevent_client_tbl[i]; 1367 if (mutex_trylock(&scp->client_lock) == 0) { 1368 if (scp->client_type != SLM_CLIENT || 1369 scp->client_data == NULL) { 1370 (void) mutex_unlock(&scp->client_lock); 1371 done++; 1372 continue; 1373 } 1374 } else { 1375 syseventd_print(3, "Skipping unload of " 1376 "client %d: client locked\n", 1377 scp->client_num); 1378 continue; 1379 } 1380 1381 /* 1382 * Drain the eventq and wait for delivery thread to 1383 * cleanly exit 1384 */ 1385 drain_eventq(scp, EAGAIN); 1386 (void) cond_signal(&scp->client_cv); 1387 (void) mutex_unlock(&scp->client_lock); 1388 (void) thr_join(scp->tid, NULL, NULL); 1389 1390 /* 1391 * It is now safe to unload the module 1392 */ 1393 mod = (module_t *)scp->client_data; 1394 syseventd_print(2, "Unload %s\n", mod->name); 1395 mod->event_mod_fini(); 1396 (void) dlclose(mod->dlhandle); 1397 free(mod->name); 1398 (void) mutex_lock(&client_tbl_lock); 1399 delete_client(i); 1400 (void) mutex_unlock(&client_tbl_lock); 1401 ++done; 1402 1403 } 1404 ++count; 1405 (void) sleep(1); 1406 } 1407 1408 /* 1409 * Wait for event completions 1410 */ 1411 syseventd_print(2, "waiting for event completions\n"); 1412 (void) mutex_lock(&ev_comp_lock); 1413 while (event_compq != NULL) { 1414 (void) cond_wait(&event_comp_cv, &ev_comp_lock); 1415 } 1416 (void) mutex_unlock(&ev_comp_lock); 1417 } 1418 1419 /* 1420 * syseventd_init - Called at daemon (re)start-up time to load modules 1421 * and kickstart the kernel delivery engine. 1422 */ 1423 static void 1424 syseventd_init() 1425 { 1426 int i, fd; 1427 char local_door_file[PATH_MAX + 1]; 1428 1429 fini_pending = 0; 1430 1431 concurrency_level = MIN_CONCURRENCY_LEVEL; 1432 (void) thr_setconcurrency(concurrency_level); 1433 1434 /* 1435 * Load client modules for event delivering 1436 */ 1437 for (i = 0; i < MOD_DIR_NUM; ++i) { 1438 load_modules(dir_num2name(i)); 1439 } 1440 1441 /* 1442 * Create kernel delivery door service 1443 */ 1444 syseventd_print(8, "Create a door for kernel upcalls\n"); 1445 if (snprintf(local_door_file, sizeof (local_door_file), "%s%s", 1446 root_dir, LOGEVENT_DOOR_UPCALL) >= sizeof (local_door_file)) { 1447 syseventd_err_print(INIT_PATH_ERR, local_door_file); 1448 syseventd_exit(5); 1449 } 1450 1451 /* 1452 * Remove door file for robustness. 1453 */ 1454 if (unlink(local_door_file) != 0) 1455 syseventd_print(8, "Unlink of %s failed.\n", local_door_file); 1456 1457 fd = open(local_door_file, O_CREAT|O_RDWR, S_IREAD|S_IWRITE); 1458 if ((fd == -1) && (errno != EEXIST)) { 1459 syseventd_err_print(INIT_OPEN_DOOR_ERR, strerror(errno)); 1460 syseventd_exit(5); 1461 } 1462 (void) close(fd); 1463 1464 upcall_door = door_create(door_upcall, NULL, 1465 DOOR_REFUSE_DESC | DOOR_NO_CANCEL); 1466 if (upcall_door == -1) { 1467 syseventd_err_print(INIT_CREATE_DOOR_ERR, strerror(errno)); 1468 syseventd_exit(5); 1469 } 1470 1471 (void) fdetach(local_door_file); 1472 retry: 1473 if (fattach(upcall_door, local_door_file) != 0) { 1474 if (errno == EBUSY) 1475 goto retry; 1476 syseventd_err_print(INIT_FATTACH_ERR, strerror(errno)); 1477 (void) door_revoke(upcall_door); 1478 syseventd_exit(5); 1479 } 1480 1481 /* 1482 * Tell kernel the door name and start delivery 1483 */ 1484 syseventd_print(2, 1485 "local_door_file = %s\n", local_door_file); 1486 if (modctl(MODEVENTS, 1487 (uintptr_t)MODEVENTS_SET_DOOR_UPCALL_FILENAME, 1488 (uintptr_t)local_door_file, NULL, NULL, 0) < 0) { 1489 syseventd_err_print(INIT_DOOR_NAME_ERR, strerror(errno)); 1490 syseventd_exit(6); 1491 } 1492 1493 door_upcall_retval = 0; 1494 1495 if (modctl(MODEVENTS, (uintptr_t)MODEVENTS_FLUSH, NULL, NULL, NULL, 0) 1496 < 0) { 1497 syseventd_err_print(KERNEL_REPLAY_ERR, strerror(errno)); 1498 syseventd_exit(7); 1499 } 1500 } 1501 1502 /* 1503 * syseventd_fini - shut down daemon, but do not exit 1504 */ 1505 static void 1506 syseventd_fini(int sig) 1507 { 1508 /* 1509 * Indicate that event queues should be drained and no 1510 * additional events be accepted 1511 */ 1512 fini_pending = 1; 1513 1514 /* Close the kernel event door to halt delivery */ 1515 (void) door_revoke(upcall_door); 1516 1517 syseventd_print(1, "Unloading modules\n"); 1518 (void) rw_wrlock(&mod_unload_lock); 1519 unload_modules(sig); 1520 (void) rw_unlock(&mod_unload_lock); 1521 1522 } 1523 1524 /* 1525 * enter_daemon_lock - lock the daemon file lock 1526 * 1527 * Use an advisory lock to ensure that only one daemon process is active 1528 * in the system at any point in time. If the lock is held by another 1529 * process, do not block but return the pid owner of the lock to the 1530 * caller immediately. The lock is cleared if the holding daemon process 1531 * exits for any reason even if the lock file remains, so the daemon can 1532 * be restarted if necessary. The lock file is DAEMON_LOCK_FILE. 1533 */ 1534 static pid_t 1535 enter_daemon_lock(void) 1536 { 1537 struct flock lock; 1538 1539 syseventd_print(8, "enter_daemon_lock: lock file = %s\n", 1540 DAEMON_LOCK_FILE); 1541 1542 if (snprintf(local_lock_file, sizeof (local_lock_file), "%s%s", 1543 root_dir, DAEMON_LOCK_FILE) >= sizeof (local_lock_file)) { 1544 syseventd_err_print(INIT_PATH_ERR, local_lock_file); 1545 syseventd_exit(8); 1546 } 1547 daemon_lock_fd = open(local_lock_file, O_CREAT|O_RDWR, 0644); 1548 if (daemon_lock_fd < 0) { 1549 syseventd_err_print(INIT_LOCK_OPEN_ERR, 1550 local_lock_file, strerror(errno)); 1551 syseventd_exit(8); 1552 } 1553 1554 lock.l_type = F_WRLCK; 1555 lock.l_whence = SEEK_SET; 1556 lock.l_start = 0; 1557 lock.l_len = 0; 1558 1559 if (fcntl(daemon_lock_fd, F_SETLK, &lock) == -1) { 1560 if (fcntl(daemon_lock_fd, F_GETLK, &lock) == -1) { 1561 syseventd_err_print(INIT_LOCK_ERR, 1562 local_lock_file, strerror(errno)); 1563 exit(2); 1564 } 1565 return (lock.l_pid); 1566 } 1567 hold_daemon_lock = 1; 1568 1569 return (getpid()); 1570 } 1571 1572 /* 1573 * exit_daemon_lock - release the daemon file lock 1574 */ 1575 static void 1576 exit_daemon_lock(void) 1577 { 1578 struct flock lock; 1579 1580 lock.l_type = F_UNLCK; 1581 lock.l_whence = SEEK_SET; 1582 lock.l_start = 0; 1583 lock.l_len = 0; 1584 1585 if (fcntl(daemon_lock_fd, F_SETLK, &lock) == -1) { 1586 syseventd_err_print(INIT_UNLOCK_ERR, 1587 local_lock_file, strerror(errno)); 1588 } 1589 1590 if (close(daemon_lock_fd) == -1) { 1591 syseventd_err_print(INIT_LOCK_CLOSE_ERR, 1592 local_lock_file, strerror(errno)); 1593 exit(-1); 1594 } 1595 } 1596 1597 /* 1598 * syseventd_err_print - print error messages to the terminal if not 1599 * yet daemonized or to syslog. 1600 */ 1601 /*PRINTFLIKE1*/ 1602 void 1603 syseventd_err_print(char *message, ...) 1604 { 1605 va_list ap; 1606 1607 (void) mutex_lock(&err_mutex); 1608 va_start(ap, message); 1609 1610 if (logflag) { 1611 (void) vsyslog(LOG_ERR, message, ap); 1612 } else { 1613 (void) fprintf(stderr, "%s: ", prog); 1614 (void) vfprintf(stderr, message, ap); 1615 } 1616 va_end(ap); 1617 (void) mutex_unlock(&err_mutex); 1618 } 1619 1620 /* 1621 * syseventd_print - print messages to the terminal or to syslog 1622 * the following levels are implemented: 1623 * 1624 * 1 - transient errors that does not affect normal program flow 1625 * 2 - upcall/dispatch interaction 1626 * 3 - program flow trace as each message goes through the daemon 1627 * 8 - all the nit-gritty details of startup and shutdown 1628 * 9 - very verbose event flow tracing (no daemonization of syseventd) 1629 * 1630 */ 1631 /*PRINTFLIKE2*/ 1632 void 1633 syseventd_print(int level, char *message, ...) 1634 { 1635 va_list ap; 1636 static int newline = 1; 1637 1638 if (level > debug_level) { 1639 return; 1640 } 1641 1642 (void) mutex_lock(&err_mutex); 1643 va_start(ap, message); 1644 if (logflag) { 1645 (void) syslog(LOG_DEBUG, "%s[%ld]: ", 1646 prog, getpid()); 1647 (void) vsyslog(LOG_DEBUG, message, ap); 1648 } else { 1649 if (newline) { 1650 (void) fprintf(stdout, "%s[%ld]: ", 1651 prog, getpid()); 1652 (void) vfprintf(stdout, message, ap); 1653 } else { 1654 (void) vfprintf(stdout, message, ap); 1655 } 1656 } 1657 if (message[strlen(message)-1] == '\n') { 1658 newline = 1; 1659 } else { 1660 newline = 0; 1661 } 1662 va_end(ap); 1663 (void) mutex_unlock(&err_mutex); 1664 } 1665