1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 /* 23 * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 /* 30 * syseventd - The system event daemon 31 * 32 * This daemon dispatches event buffers received from the 33 * kernel to all interested SLM clients. SLMs in turn 34 * deliver the buffers to their particular application 35 * clients. 36 */ 37 #include <stdio.h> 38 #include <sys/types.h> 39 #include <dirent.h> 40 #include <stdarg.h> 41 #include <stddef.h> 42 #include <stdlib.h> 43 #include <dlfcn.h> 44 #include <door.h> 45 #include <errno.h> 46 #include <fcntl.h> 47 #include <signal.h> 48 #include <strings.h> 49 #include <unistd.h> 50 #include <synch.h> 51 #include <syslog.h> 52 #include <thread.h> 53 #include <libsysevent.h> 54 #include <limits.h> 55 #include <locale.h> 56 #include <sys/sysevent.h> 57 #include <sys/sysevent_impl.h> 58 #include <sys/modctl.h> 59 #include <sys/stat.h> 60 #include <sys/systeminfo.h> 61 #include <sys/wait.h> 62 63 #include "sysevent_signal.h" 64 #include "syseventd.h" 65 #include "message.h" 66 67 extern int insert_client(void *client, int client_type, int retry_limit); 68 extern void delete_client(int id); 69 extern void initialize_client_tbl(void); 70 71 extern struct sysevent_client *sysevent_client_tbl[]; 72 extern mutex_t client_tbl_lock; 73 74 #define DEBUG_LEVEL_FORK 9 /* will run in background at all */ 75 /* levels less than DEBUG_LEVEL_FORK */ 76 77 int debug_level = 0; 78 char *root_dir = ""; /* Relative root for lock and door */ 79 80 /* Maximum number of outstanding events dispatched */ 81 #define SE_EVENT_DISPATCH_CNT 100 82 83 static int upcall_door; /* Kernel event door */ 84 static int door_upcall_retval; /* Kernel event posting return value */ 85 static int fini_pending = 0; /* fini pending flag */ 86 static int deliver_buf = 0; /* Current event buffer from kernel */ 87 static int dispatch_buf = 0; /* Current event buffer dispatched */ 88 static sysevent_t **eventbuf; /* Global array of event buffers */ 89 static struct ev_completion *event_compq; /* Event completion queue */ 90 static mutex_t ev_comp_lock; /* Event completion queue lock */ 91 static mutex_t err_mutex; /* error logging lock */ 92 static mutex_t door_lock; /* sync door return access */ 93 static rwlock_t mod_unload_lock; /* sync module unloading */ 94 95 /* declarations and definitions for avoiding multiple daemons running */ 96 #define DAEMON_LOCK_FILE "/etc/sysevent/syseventd_lock" 97 char local_lock_file[PATH_MAX + 1]; 98 static int hold_daemon_lock; 99 static int daemon_lock_fd; 100 101 /* 102 * sema_eventbuf - guards against the global buffer eventbuf 103 * being written to before it has been dispatched to clients 104 * 105 * sema_dispatch - synchronizes between the kernel uploading thread 106 * (producer) and the userland dispatch_message thread (consumer). 107 * 108 * sema_resource - throttles outstanding event consumption. 109 * 110 * event_comp_cv - synchronizes threads waiting for the event completion queue 111 * to empty or become active. 112 */ 113 static sema_t sema_eventbuf, sema_dispatch, sema_resource; 114 static cond_t event_comp_cv; 115 116 /* Self-tuning concurrency level */ 117 #define MIN_CONCURRENCY_LEVEL 4 118 static int concurrency_level = MIN_CONCURRENCY_LEVEL; 119 120 121 /* SLM defines */ 122 #define MODULE_SUFFIX ".so" 123 #define EVENT_FINI "slm_fini" 124 #define EVENT_INIT "slm_init" 125 126 #define SE_TIMEOUT 60 /* Client dispatch timeout (seconds) */ 127 128 /* syslog message related */ 129 static int logflag = 0; 130 static char *prog; 131 132 /* function prototypes */ 133 static void door_upcall(void *cookie, char *args, size_t alen, door_desc_t *ddp, 134 uint_t ndid); 135 static void dispatch_message(void); 136 static int dispatch(void); 137 static void event_completion_thr(void); 138 static void usage(void); 139 140 static void syseventd_init(void); 141 static void syseventd_fini(int sig); 142 143 static pid_t enter_daemon_lock(void); 144 static void exit_daemon_lock(void); 145 146 static void 147 usage() { 148 (void) fprintf(stderr, "usage: syseventd [-d <debug_level>] " 149 "[-r <root_dir>]\n"); 150 (void) fprintf(stderr, "higher debug levels get progressively "); 151 (void) fprintf(stderr, "more detailed debug information.\n"); 152 (void) fprintf(stderr, "syseventd will run in background if "); 153 (void) fprintf(stderr, "run with a debug_level less than %d.\n", 154 DEBUG_LEVEL_FORK); 155 exit(2); 156 } 157 158 159 /* common exit function which ensures releasing locks */ 160 void 161 syseventd_exit(int status) 162 { 163 syseventd_print(1, "exit status = %d\n", status); 164 165 if (hold_daemon_lock) { 166 exit_daemon_lock(); 167 } 168 169 exit(status); 170 } 171 172 173 /* 174 * hup_handler - SIGHUP handler. SIGHUP is used to force a reload of 175 * all SLMs. During fini, events are drained from all 176 * client event queues. The events that have been consumed 177 * by all clients are freed from the kernel event queue. 178 * 179 * Events that have not yet been delivered to all clients 180 * are not freed and will be replayed after all SLMs have 181 * been (re)loaded. 182 * 183 * After all client event queues have been drained, each 184 * SLM client is unloaded. The init phase will (re)load 185 * each SLM and initiate event replay and delivery from 186 * the kernel. 187 * 188 */ 189 /*ARGSUSED*/ 190 static void 191 hup_handler(int sig) 192 { 193 syseventd_err_print(SIGHUP_CAUGHT); 194 (void) fflush(0); 195 syseventd_fini(sig); 196 syseventd_init(); 197 syseventd_err_print(DAEMON_RESTARTED); 198 (void) fflush(0); 199 } 200 201 /* 202 * Fault handler for other signals caught 203 */ 204 /*ARGSUSED*/ 205 static void 206 flt_handler(int sig) 207 { 208 char signame[SIG2STR_MAX]; 209 210 if (sig2str(sig, signame) == -1) { 211 syseventd_err_print(UNKNOWN_SIGNAL_CAUGHT, sig); 212 } 213 214 (void) se_signal_sethandler(sig, SIG_DFL, NULL); 215 216 switch (sig) { 217 case SIGINT: 218 case SIGSTOP: 219 case SIGTERM: 220 /* Close kernel door */ 221 (void) door_revoke(upcall_door); 222 223 /* Gracefully exit current event delivery threads */ 224 syseventd_fini(sig); 225 226 (void) fflush(0); 227 (void) se_signal_unblockall(); 228 syseventd_exit(1); 229 /*NOTREACHED*/ 230 default: 231 syseventd_err_print(FATAL_ERROR); 232 (void) fflush(0); 233 234 } 235 } 236 237 static void 238 sigwait_thr() 239 { 240 int sig; 241 int err; 242 sigset_t signal_set; 243 244 for (;;) { 245 syseventd_print(3, "sigwait thread waiting for signal\n"); 246 (void) sigfillset(&signal_set); 247 err = sigwait(&signal_set, &sig); 248 if (err) { 249 syseventd_exit(2); 250 } 251 252 /* 253 * Block all signals until the signal handler completes 254 */ 255 if (sig == SIGHUP) { 256 hup_handler(sig); 257 } else { 258 flt_handler(sig); 259 } 260 } 261 /* NOTREACHED */ 262 } 263 264 static void 265 set_root_dir(char *dir) 266 { 267 root_dir = malloc(strlen(dir) + 1); 268 if (root_dir == NULL) { 269 syseventd_err_print(INIT_ROOT_DIR_ERR, strerror(errno)); 270 syseventd_exit(2); 271 } 272 (void) strcpy(root_dir, dir); 273 } 274 275 void 276 main(int argc, char **argv) 277 { 278 int i, c; 279 int fd; 280 pid_t pid; 281 extern char *optarg; 282 283 (void) setlocale(LC_ALL, ""); 284 (void) textdomain(TEXT_DOMAIN); 285 286 if (getuid() != 0) { 287 (void) fprintf(stderr, "Must be root to run syseventd\n"); 288 syseventd_exit(1); 289 } 290 291 if (argc > 5) { 292 usage(); 293 } 294 295 if ((prog = strrchr(argv[0], '/')) == NULL) { 296 prog = argv[0]; 297 } else { 298 prog++; 299 } 300 301 if ((c = getopt(argc, argv, "d:r:")) != EOF) { 302 switch (c) { 303 case 'd': 304 debug_level = atoi(optarg); 305 break; 306 case 'r': 307 /* 308 * Private flag for suninstall to run 309 * daemon during install. 310 */ 311 set_root_dir(optarg); 312 break; 313 case '?': 314 default: 315 usage(); 316 } 317 } 318 319 /* demonize ourselves */ 320 if (debug_level < DEBUG_LEVEL_FORK) { 321 322 if (fork()) { 323 syseventd_exit(0); 324 } 325 326 /* child */ 327 328 (void) chdir("/"); 329 (void) setsid(); 330 if (debug_level <= 1) { 331 closefrom(0); 332 fd = open("/dev/null", 0); 333 (void) dup2(fd, 1); 334 (void) dup2(fd, 2); 335 logflag = 1; 336 } 337 } 338 339 openlog("syseventd", LOG_PID, LOG_DAEMON); 340 341 (void) mutex_init(&err_mutex, USYNC_THREAD, NULL); 342 343 syseventd_print(8, 344 "syseventd started, debug level = %d\n", debug_level); 345 346 /* only one instance of syseventd can run at a time */ 347 if ((pid = enter_daemon_lock()) != getpid()) { 348 syseventd_print(1, 349 "event daemon pid %ld already running\n", pid); 350 exit(3); 351 } 352 353 /* initialize semaphores and eventbuf */ 354 (void) sema_init(&sema_eventbuf, SE_EVENT_DISPATCH_CNT, 355 USYNC_THREAD, NULL); 356 (void) sema_init(&sema_dispatch, 0, USYNC_THREAD, NULL); 357 (void) sema_init(&sema_resource, SE_EVENT_DISPATCH_CNT, 358 USYNC_THREAD, NULL); 359 (void) cond_init(&event_comp_cv, USYNC_THREAD, NULL); 360 eventbuf = (sysevent_t **)calloc(SE_EVENT_DISPATCH_CNT, 361 sizeof (sysevent_t *)); 362 if (eventbuf == NULL) { 363 syseventd_print(1, "Unable to allocate event buffer array\n"); 364 exit(2); 365 } 366 for (i = 0; i < SE_EVENT_DISPATCH_CNT; ++i) { 367 eventbuf[i] = malloc(LOGEVENT_BUFSIZE); 368 if (eventbuf[i] == NULL) { 369 syseventd_print(1, "Unable to allocate event " 370 "buffers\n"); 371 exit(2); 372 } 373 } 374 375 (void) mutex_init(&client_tbl_lock, USYNC_THREAD, NULL); 376 (void) mutex_init(&ev_comp_lock, USYNC_THREAD, NULL); 377 (void) mutex_init(&door_lock, USYNC_THREAD, NULL); 378 (void) rwlock_init(&mod_unload_lock, USYNC_THREAD, NULL); 379 380 event_compq = NULL; 381 382 syseventd_print(8, "start the message thread running\n"); 383 384 /* 385 * Block all signals to all threads include the main thread. 386 * The sigwait_thr thread will process any signals and initiate 387 * a graceful recovery if possible. 388 */ 389 if (se_signal_blockall() < 0) { 390 syseventd_err_print(INIT_SIG_BLOCK_ERR); 391 syseventd_exit(2); 392 } 393 394 if (thr_create(NULL, NULL, (void *(*)(void *))dispatch_message, 395 (void *)0, 0, NULL) < 0) { 396 syseventd_err_print(INIT_THR_CREATE_ERR, strerror(errno)); 397 syseventd_exit(2); 398 } 399 if (thr_create(NULL, NULL, 400 (void *(*)(void *))event_completion_thr, NULL, 401 THR_BOUND, NULL) != 0) { 402 syseventd_err_print(INIT_THR_CREATE_ERR, strerror(errno)); 403 syseventd_exit(2); 404 } 405 /* Create signal catching thread */ 406 if (thr_create(NULL, NULL, (void *(*)(void *))sigwait_thr, 407 NULL, 0, NULL) < 0) { 408 syseventd_err_print(INIT_THR_CREATE_ERR, strerror(errno)); 409 syseventd_exit(2); 410 } 411 412 setbuf(stdout, (char *)NULL); 413 414 /* Initialize and load SLM clients */ 415 initialize_client_tbl(); 416 syseventd_init(); 417 418 syseventd_print(8, "Pausing\n"); 419 420 for (;;) { 421 (void) pause(); 422 } 423 /* NOTREACHED */ 424 } 425 426 /* 427 * door_upcall - called from the kernel via kernel sysevent door 428 * to upload event(s). 429 * 430 * This routine should never block. If resources are 431 * not available to immediately accept the event buffer 432 * EAGAIN is returned to the kernel. 433 * 434 * Once resources are available, the kernel is notified 435 * via a modctl interface to resume event delivery to 436 * syseventd. 437 * 438 */ 439 /*ARGSUSED*/ 440 static void 441 door_upcall(void *cookie, char *args, size_t alen, 442 door_desc_t *ddp, uint_t ndid) 443 { 444 sysevent_t *ev; 445 int rval; 446 447 448 (void) mutex_lock(&door_lock); 449 if (args == NULL) { 450 rval = EINVAL; 451 } else if (sema_trywait(&sema_eventbuf)) { 452 ev = (sysevent_t *) 453 &((log_event_upcall_arg_t *)(void *)args)->buf; 454 syseventd_print(2, "door_upcall: busy event %llx " 455 "retry\n", sysevent_get_seq(ev)); 456 rval = door_upcall_retval = EAGAIN; 457 } else { 458 /* 459 * Copy received message to local buffer. 460 */ 461 size_t size; 462 ev = (sysevent_t *) 463 &((log_event_upcall_arg_t *)(void *)args)->buf; 464 465 syseventd_print(2, "door_upcall: event %llx in eventbuf %d\n", 466 sysevent_get_seq(ev), deliver_buf); 467 size = sysevent_get_size(ev) > LOGEVENT_BUFSIZE ? 468 LOGEVENT_BUFSIZE : sysevent_get_size(ev); 469 (void) bcopy(ev, eventbuf[deliver_buf], size); 470 deliver_buf = (deliver_buf + 1) % SE_EVENT_DISPATCH_CNT; 471 rval = 0; 472 (void) sema_post(&sema_dispatch); 473 } 474 475 (void) mutex_unlock(&door_lock); 476 477 /* 478 * Filling in return values for door_return 479 */ 480 (void) door_return((void *)&rval, sizeof (rval), NULL, 0); 481 (void) door_return(NULL, 0, NULL, 0); 482 } 483 484 /* 485 * dispatch_message - dispatch message thread 486 * This thread spins until an event buffer is delivered 487 * delivered from the kernel. 488 * 489 * It will wait to dispatch an event to any clients 490 * until adequate resources are available to process 491 * the event buffer. 492 */ 493 static void 494 dispatch_message(void) 495 { 496 int error; 497 498 for (;;) { 499 syseventd_print(3, "dispatch_message: thread started\n"); 500 /* 501 * Spin till a message comes 502 */ 503 while (sema_wait(&sema_dispatch) != 0) { 504 syseventd_print(1, 505 "dispatch_message: sema_wait failed\n"); 506 (void) sleep(1); 507 } 508 509 syseventd_print(3, "dispatch_message: sema_dispatch\n"); 510 511 /* 512 * Wait for available resources 513 */ 514 while (sema_wait(&sema_resource) != 0) { 515 syseventd_print(1, "dispatch_message: sema_wait " 516 "failed\n"); 517 (void) sleep(1); 518 } 519 520 syseventd_print(2, "dispatch_message: eventbuf %d\n", 521 dispatch_buf); 522 523 /* 524 * Client dispatch 525 */ 526 do { 527 error = dispatch(); 528 } while (error == EAGAIN); 529 530 syseventd_print(2, "eventbuf %d dispatched\n", dispatch_buf); 531 dispatch_buf = (dispatch_buf + 1) % SE_EVENT_DISPATCH_CNT; 532 533 /* 534 * kernel received a busy signal - 535 * kickstart the kernel delivery thread 536 * door_lock blocks the kernel so we hold it for the 537 * shortest time possible. 538 */ 539 (void) mutex_lock(&door_lock); 540 if (door_upcall_retval == EAGAIN && !fini_pending) { 541 syseventd_print(3, "dispatch_message: retrigger " 542 "door_upcall_retval = %d\n", 543 door_upcall_retval); 544 (void) modctl(MODEVENTS, (uintptr_t)MODEVENTS_FLUSH, 545 NULL, NULL, NULL, 0); 546 door_upcall_retval = 0; 547 } 548 (void) mutex_unlock(&door_lock); 549 } 550 /* NOTREACHED */ 551 } 552 553 /* 554 * drain_eventq - Called to drain all pending events from the client's 555 * event queue. 556 */ 557 static void 558 drain_eventq(struct sysevent_client *scp, int status) 559 { 560 struct event_dispatch_pkg *d_pkg; 561 struct event_dispatchq *eventq, *eventq_next; 562 563 syseventd_print(3, "Draining eventq for client %d\n", 564 scp->client_num); 565 566 eventq = scp->eventq; 567 while (eventq) { 568 /* 569 * Mark all dispatched events as completed, but indicate the 570 * error status 571 */ 572 d_pkg = eventq->d_pkg; 573 574 syseventd_print(4, "drain event 0X%llx for client %d\n", 575 sysevent_get_seq(d_pkg->ev), scp->client_num); 576 577 if (d_pkg->completion_state == SE_NOT_DISPATCHED) { 578 d_pkg->completion_status = status; 579 d_pkg->completion_state = SE_COMPLETE; 580 (void) sema_post(d_pkg->completion_sema); 581 } 582 583 eventq_next = eventq->next; 584 free(eventq); 585 eventq = eventq_next; 586 scp->eventq = eventq; 587 } 588 } 589 590 /* 591 * client_deliver_event_thr - Client delivery thread 592 * This thread will process any events on this 593 * client's eventq. 594 */ 595 static void 596 client_deliver_event_thr(void *arg) 597 { 598 int flag, error, i; 599 sysevent_t *ev; 600 hrtime_t now; 601 module_t *mod; 602 struct event_dispatchq *eventq; 603 struct sysevent_client *scp; 604 struct event_dispatch_pkg *d_pkg; 605 606 scp = (struct sysevent_client *)arg; 607 mod = (module_t *)scp->client_data; 608 609 (void) mutex_lock(&scp->client_lock); 610 for (;;) { 611 while (scp->eventq == NULL) { 612 613 /* 614 * Client has been suspended or unloaded, go no further. 615 */ 616 if (fini_pending) { 617 scp->client_flags &= ~SE_CLIENT_THR_RUNNING; 618 syseventd_print(3, "Client %d delivery thread " 619 "exiting flags: 0X%x\n", 620 scp->client_num, scp->client_flags); 621 (void) mutex_unlock(&scp->client_lock); 622 return; 623 } 624 625 (void) cond_wait(&scp->client_cv, &scp->client_lock); 626 627 } 628 629 /* 630 * Process events from the head of the eventq, eventq is locked 631 * going into the processing. 632 */ 633 eventq = scp->eventq; 634 while (eventq != NULL) { 635 d_pkg = eventq->d_pkg; 636 d_pkg->completion_state = SE_OUTSTANDING; 637 (void) mutex_unlock(&scp->client_lock); 638 639 640 flag = error = 0; 641 ev = d_pkg->ev; 642 643 syseventd_print(3, "Start delivery for client %d " 644 "with retry count %d\n", 645 scp->client_num, d_pkg->retry_count); 646 647 /* 648 * Retry limit has been reached by this client, indicate 649 * that no further retries are allowed 650 */ 651 for (i = 0; i <= scp->retry_limit; ++i) { 652 if (i == scp->retry_limit) 653 flag = SE_NO_RETRY; 654 655 /* Start the clock for the event delivery */ 656 d_pkg->start_time = gethrtime(); 657 658 syseventd_print(9, "Deliver to module client " 659 "%s\n", mod->name); 660 661 error = mod->deliver_event(ev, flag); 662 663 /* Can not allow another retry */ 664 if (i == scp->retry_limit) 665 error = 0; 666 667 /* Stop the clock */ 668 now = gethrtime(); 669 670 /* 671 * Suspend event processing and drain the 672 * event q for latent clients 673 */ 674 if (now - d_pkg->start_time > 675 ((hrtime_t)SE_TIMEOUT * NANOSEC)) { 676 syseventd_print(1, "Unresponsive " 677 "client %d: Draining eventq and " 678 "suspending event delivery\n", 679 scp->client_num); 680 (void) mutex_lock(&scp->client_lock); 681 scp->client_flags &= 682 ~SE_CLIENT_THR_RUNNING; 683 scp->client_flags |= 684 SE_CLIENT_SUSPENDED; 685 686 /* Cleanup current event */ 687 d_pkg->completion_status = EFAULT; 688 d_pkg->completion_state = SE_COMPLETE; 689 (void) sema_post( 690 d_pkg->completion_sema); 691 692 /* 693 * Drain the remaining events from the 694 * queue. 695 */ 696 drain_eventq(scp, EINVAL); 697 (void) mutex_unlock(&scp->client_lock); 698 return; 699 } 700 701 /* Event delivery retry requested */ 702 if (fini_pending || error != EAGAIN) { 703 break; 704 } else { 705 (void) sleep(SE_RETRY_TIME); 706 } 707 } 708 709 (void) mutex_lock(&scp->client_lock); 710 d_pkg->completion_status = error; 711 d_pkg->completion_state = SE_COMPLETE; 712 (void) sema_post(d_pkg->completion_sema); 713 714 /* Update eventq pointer */ 715 if (scp->eventq != NULL) { 716 scp->eventq = eventq->next; 717 free(eventq); 718 eventq = scp->eventq; 719 } else { 720 free(eventq); 721 break; 722 } 723 724 syseventd_print(3, "Completed delivery with " 725 "error %d\n", error); 726 } 727 728 syseventd_print(3, "No more events to process for client %d\n", 729 scp->client_num); 730 731 /* Return if this was a synchronous delivery */ 732 if (!SE_CLIENT_IS_THR_RUNNING(scp)) { 733 (void) mutex_unlock(&scp->client_lock); 734 return; 735 } 736 737 } 738 } 739 740 /* 741 * client_deliver_event - Client specific event delivery 742 * This routine will allocate and initialize the 743 * neccessary per-client dispatch data. 744 * 745 * If the eventq is not empty, it may be assumed that 746 * a delivery thread exists for this client and the 747 * dispatch data is appended to the eventq. 748 * 749 * The dispatch package is freed by the event completion 750 * thread (event_completion_thr) and the eventq entry 751 * is freed by the event delivery thread. 752 */ 753 static struct event_dispatch_pkg * 754 client_deliver_event(struct sysevent_client *scp, sysevent_t *ev, 755 sema_t *completion_sema) 756 { 757 size_t ev_sz = sysevent_get_size(ev); 758 struct event_dispatchq *newq, *tmp; 759 struct event_dispatch_pkg *d_pkg; 760 761 syseventd_print(3, "client_deliver_event: id 0x%llx size %d\n", 762 (longlong_t)sysevent_get_seq(ev), ev_sz); 763 if (debug_level == 9) { 764 se_print(stdout, ev); 765 } 766 767 /* 768 * Check for suspended client 769 */ 770 (void) mutex_lock(&scp->client_lock); 771 if (SE_CLIENT_IS_SUSPENDED(scp) || !SE_CLIENT_IS_THR_RUNNING(scp)) { 772 (void) mutex_unlock(&scp->client_lock); 773 return (NULL); 774 } 775 776 /* 777 * Allocate a new dispatch package and eventq entry 778 */ 779 newq = (struct event_dispatchq *)malloc( 780 sizeof (struct event_dispatchq)); 781 if (newq == NULL) { 782 (void) mutex_unlock(&scp->client_lock); 783 return (NULL); 784 } 785 786 d_pkg = (struct event_dispatch_pkg *)malloc( 787 sizeof (struct event_dispatch_pkg)); 788 if (d_pkg == NULL) { 789 free(newq); 790 (void) mutex_unlock(&scp->client_lock); 791 return (NULL); 792 } 793 794 /* Initialize the dispatch package */ 795 d_pkg->scp = scp; 796 d_pkg->retry_count = 0; 797 d_pkg->completion_status = 0; 798 d_pkg->completion_state = SE_NOT_DISPATCHED; 799 d_pkg->completion_sema = completion_sema; 800 d_pkg->ev = ev; 801 newq->d_pkg = d_pkg; 802 newq->next = NULL; 803 804 if (scp->eventq != NULL) { 805 806 /* Add entry to the end of the eventq */ 807 tmp = scp->eventq; 808 while (tmp->next != NULL) 809 tmp = tmp->next; 810 tmp->next = newq; 811 } else { 812 /* event queue empty, wakeup delivery thread */ 813 scp->eventq = newq; 814 (void) cond_signal(&scp->client_cv); 815 } 816 (void) mutex_unlock(&scp->client_lock); 817 818 return (d_pkg); 819 } 820 821 /* 822 * event_completion_thr - Event completion thread. This thread routine 823 * waits for all client delivery thread to complete 824 * delivery of a particular event. 825 */ 826 static void 827 event_completion_thr() 828 { 829 int ret, i, client_count, ok_to_free; 830 sysevent_id_t eid; 831 struct sysevent_client *scp; 832 struct ev_completion *ev_comp; 833 struct event_dispatchq *dispatchq; 834 struct event_dispatch_pkg *d_pkg; 835 836 (void) mutex_lock(&ev_comp_lock); 837 for (;;) { 838 while (event_compq == NULL) { 839 (void) cond_wait(&event_comp_cv, &ev_comp_lock); 840 } 841 842 /* 843 * Process event completions from the head of the 844 * completion queue 845 */ 846 ev_comp = event_compq; 847 while (ev_comp) { 848 (void) mutex_unlock(&ev_comp_lock); 849 eid.eid_seq = sysevent_get_seq(ev_comp->ev); 850 sysevent_get_time(ev_comp->ev, &eid.eid_ts); 851 client_count = ev_comp->client_count; 852 ok_to_free = 1; 853 854 syseventd_print(3, "Wait for event completion of " 855 "event 0X%llx on %d clients\n", 856 eid.eid_seq, client_count); 857 858 while (client_count) { 859 syseventd_print(9, "Waiting for %d clients on " 860 "event id 0X%llx\n", client_count, 861 eid.eid_seq); 862 863 (void) sema_wait(&ev_comp->client_sema); 864 --client_count; 865 } 866 867 syseventd_print(3, "Cleaning up clients for event " 868 "0X%llx\n", eid.eid_seq); 869 dispatchq = ev_comp->dispatch_list; 870 while (dispatchq != NULL) { 871 d_pkg = dispatchq->d_pkg; 872 scp = d_pkg->scp; 873 874 if (d_pkg->completion_status == EAGAIN) 875 ok_to_free = 0; 876 877 syseventd_print(4, "Delivery of 0X%llx " 878 "complete for client %d retry count %d " 879 "status %d\n", eid.eid_seq, 880 scp->client_num, 881 d_pkg->retry_count, 882 d_pkg->completion_status); 883 884 free(d_pkg); 885 ev_comp->dispatch_list = dispatchq->next; 886 free(dispatchq); 887 dispatchq = ev_comp->dispatch_list; 888 } 889 890 if (ok_to_free) { 891 for (i = 0; i < MAX_MODCTL_RETRY; ++i) { 892 if ((ret = modctl(MODEVENTS, 893 (uintptr_t)MODEVENTS_FREEDATA, 894 (uintptr_t)&eid, NULL, 895 NULL, 0)) != 0) { 896 syseventd_print(1, "attempting " 897 "to free event 0X%llx\n", 898 eid.eid_seq); 899 900 /* 901 * Kernel may need time to 902 * move this event buffer to 903 * the sysevent sent queue 904 */ 905 (void) sleep(1); 906 } else { 907 break; 908 } 909 } 910 if (ret) { 911 syseventd_print(1, "Unable to free " 912 "event 0X%llx from the " 913 "kernel\n", eid.eid_seq); 914 } 915 } else { 916 syseventd_print(1, "Not freeing event 0X%llx\n", 917 eid.eid_seq); 918 } 919 920 syseventd_print(2, "Event delivery complete for id " 921 "0X%llx\n", eid.eid_seq); 922 923 (void) mutex_lock(&ev_comp_lock); 924 event_compq = ev_comp->next; 925 free(ev_comp->ev); 926 free(ev_comp); 927 ev_comp = event_compq; 928 (void) sema_post(&sema_resource); 929 } 930 931 /* 932 * Event completion queue is empty, signal possible unload 933 * operation 934 */ 935 (void) cond_signal(&event_comp_cv); 936 937 syseventd_print(3, "No more events\n"); 938 } 939 } 940 941 /* 942 * dispatch - Dispatch the current event buffer to all valid SLM clients. 943 */ 944 static int 945 dispatch(void) 946 { 947 int ev_sz, i, client_count = 0; 948 sysevent_t *new_ev; 949 sysevent_id_t eid; 950 struct ev_completion *ev_comp, *tmp; 951 struct event_dispatchq *dispatchq, *client_list; 952 struct event_dispatch_pkg *d_pkg; 953 954 /* Check for module unload operation */ 955 if (rw_tryrdlock(&mod_unload_lock) != 0) { 956 syseventd_print(2, "unload in progress abort delivery\n"); 957 (void) sema_post(&sema_eventbuf); 958 (void) sema_post(&sema_resource); 959 return (0); 960 } 961 962 syseventd_print(3, "deliver dispatch buffer %d", dispatch_buf); 963 eid.eid_seq = sysevent_get_seq(eventbuf[dispatch_buf]); 964 sysevent_get_time(eventbuf[dispatch_buf], &eid.eid_ts); 965 syseventd_print(3, "deliver msg id: 0x%llx\n", eid.eid_seq); 966 967 /* 968 * ev_comp is used to hold event completion data. It is freed 969 * by the event completion thread (event_completion_thr). 970 */ 971 ev_comp = (struct ev_completion *) 972 malloc(sizeof (struct ev_completion)); 973 if (ev_comp == NULL) { 974 (void) rw_unlock(&mod_unload_lock); 975 syseventd_print(1, "Can not allocate event completion buffer " 976 "for event id 0X%llx\n", eid.eid_seq); 977 return (EAGAIN); 978 } 979 ev_comp->dispatch_list = NULL; 980 ev_comp->next = NULL; 981 (void) sema_init(&ev_comp->client_sema, 0, USYNC_THREAD, NULL); 982 983 ev_sz = sysevent_get_size(eventbuf[dispatch_buf]); 984 new_ev = calloc(1, ev_sz); 985 if (new_ev == NULL) { 986 free(ev_comp); 987 (void) rw_unlock(&mod_unload_lock); 988 syseventd_print(1, "Can not allocate new event buffer " 989 "for event id 0X%llx\n", eid.eid_seq); 990 return (EAGAIN); 991 } 992 993 994 /* 995 * For long messages, copy additional data from kernel 996 */ 997 if (ev_sz > LOGEVENT_BUFSIZE) { 998 int ret = 0; 999 1000 /* Ok to release eventbuf for next event buffer from kernel */ 1001 (void) sema_post(&sema_eventbuf); 1002 1003 for (i = 0; i < MAX_MODCTL_RETRY; ++i) { 1004 if ((ret = modctl(MODEVENTS, 1005 (uintptr_t)MODEVENTS_GETDATA, 1006 (uintptr_t)&eid, 1007 (uintptr_t)ev_sz, 1008 (uintptr_t)new_ev, 0)) 1009 == 0) 1010 break; 1011 else 1012 (void) sleep(1); 1013 } 1014 if (ret) { 1015 syseventd_print(1, "GET_DATA failed for 0X%llx:%llx\n", 1016 eid.eid_ts, eid.eid_seq); 1017 free(new_ev); 1018 free(ev_comp); 1019 (void) rw_unlock(&mod_unload_lock); 1020 return (EAGAIN); 1021 } 1022 } else { 1023 (void) bcopy(eventbuf[dispatch_buf], new_ev, ev_sz); 1024 /* Ok to release eventbuf for next event buffer from kernel */ 1025 (void) sema_post(&sema_eventbuf); 1026 } 1027 1028 1029 /* 1030 * Deliver a copy of eventbuf to clients so 1031 * eventbuf can be used for the next message 1032 */ 1033 for (i = 0; i < MAX_SLM; ++i) { 1034 1035 /* Don't bother for suspended or unloaded clients */ 1036 if (!SE_CLIENT_IS_LOADED(sysevent_client_tbl[i]) || 1037 SE_CLIENT_IS_SUSPENDED(sysevent_client_tbl[i])) 1038 continue; 1039 1040 /* 1041 * Allocate event dispatch queue entry. All queue entries 1042 * are freed by the event completion thread as client 1043 * delivery completes. 1044 */ 1045 dispatchq = (struct event_dispatchq *)malloc( 1046 sizeof (struct event_dispatchq)); 1047 if (dispatchq == NULL) { 1048 syseventd_print(1, "Can not allocate dispatch q " 1049 "for event id 0X%llx client %d\n", eid.eid_seq, i); 1050 continue; 1051 } 1052 dispatchq->next = NULL; 1053 1054 /* Initiate client delivery */ 1055 d_pkg = client_deliver_event(sysevent_client_tbl[i], 1056 new_ev, &ev_comp->client_sema); 1057 if (d_pkg == NULL) { 1058 syseventd_print(1, "Can not allocate dispatch " 1059 "package for event id 0X%llx client %d\n", 1060 eid.eid_seq, i); 1061 free(dispatchq); 1062 continue; 1063 } 1064 dispatchq->d_pkg = d_pkg; 1065 ++client_count; 1066 1067 if (ev_comp->dispatch_list == NULL) { 1068 ev_comp->dispatch_list = dispatchq; 1069 client_list = dispatchq; 1070 } else { 1071 client_list->next = dispatchq; 1072 client_list = client_list->next; 1073 } 1074 } 1075 1076 ev_comp->client_count = client_count; 1077 ev_comp->ev = new_ev; 1078 1079 (void) mutex_lock(&ev_comp_lock); 1080 1081 if (event_compq == NULL) { 1082 syseventd_print(3, "Wakeup event completion thread for " 1083 "id 0X%llx\n", eid.eid_seq); 1084 event_compq = ev_comp; 1085 (void) cond_signal(&event_comp_cv); 1086 } else { 1087 1088 /* Add entry to the end of the event completion queue */ 1089 tmp = event_compq; 1090 while (tmp->next != NULL) 1091 tmp = tmp->next; 1092 tmp->next = ev_comp; 1093 syseventd_print(3, "event added to completion queue for " 1094 "id 0X%llx\n", eid.eid_seq); 1095 } 1096 (void) mutex_unlock(&ev_comp_lock); 1097 (void) rw_unlock(&mod_unload_lock); 1098 1099 return (0); 1100 } 1101 1102 #define MODULE_DIR_HW "/usr/platform/%s/lib/sysevent/modules/" 1103 #define MODULE_DIR_GEN "/usr/lib/sysevent/modules/" 1104 #define MOD_DIR_NUM 3 1105 static char dirname[MOD_DIR_NUM][MAXPATHLEN]; 1106 1107 static char * 1108 dir_num2name(int dirnum) 1109 { 1110 char infobuf[MAXPATHLEN]; 1111 1112 if (dirnum >= MOD_DIR_NUM) 1113 return (NULL); 1114 1115 if (dirname[0][0] == '\0') { 1116 if (sysinfo(SI_PLATFORM, infobuf, MAXPATHLEN) == -1) { 1117 syseventd_print(1, "dir_num2name: " 1118 "sysinfo error %s\n", strerror(errno)); 1119 return (NULL); 1120 } else if (snprintf(dirname[0], sizeof (dirname[0]), 1121 MODULE_DIR_HW, infobuf) >= sizeof (dirname[0])) { 1122 syseventd_print(1, "dir_num2name: " 1123 "platform name too long: %s\n", 1124 infobuf); 1125 return (NULL); 1126 } 1127 if (sysinfo(SI_MACHINE, infobuf, MAXPATHLEN) == -1) { 1128 syseventd_print(1, "dir_num2name: " 1129 "sysinfo error %s\n", strerror(errno)); 1130 return (NULL); 1131 } else if (snprintf(dirname[1], sizeof (dirname[1]), 1132 MODULE_DIR_HW, infobuf) >= sizeof (dirname[1])) { 1133 syseventd_print(1, "dir_num2name: " 1134 "machine name too long: %s\n", 1135 infobuf); 1136 return (NULL); 1137 } 1138 (void) strcpy(dirname[2], MODULE_DIR_GEN); 1139 } 1140 1141 return (dirname[dirnum]); 1142 } 1143 1144 1145 /* 1146 * load_modules - Load modules found in the common syseventd module directories 1147 * Modules that do not provide valid interfaces are rejected. 1148 */ 1149 static void 1150 load_modules(char *dirname) 1151 { 1152 int client_id; 1153 DIR *mod_dir; 1154 module_t *mod; 1155 struct dirent *entp; 1156 struct slm_mod_ops *mod_ops; 1157 struct sysevent_client *scp; 1158 1159 if (dirname == NULL) 1160 return; 1161 1162 /* Return silently if module directory does not exist */ 1163 if ((mod_dir = opendir(dirname)) == NULL) { 1164 syseventd_print(1, "Unable to open module directory %s: %s\n", 1165 dirname, strerror(errno)); 1166 return; 1167 } 1168 1169 syseventd_print(3, "loading modules from %s\n", dirname); 1170 1171 /* 1172 * Go through directory, looking for files ending with .so 1173 */ 1174 while ((entp = readdir(mod_dir)) != NULL) { 1175 void *dlh, *f; 1176 char *tmp, modpath[MAXPATHLEN]; 1177 1178 if (((tmp = strstr(entp->d_name, MODULE_SUFFIX)) == NULL) || 1179 (tmp[strlen(MODULE_SUFFIX)] != '\0')) { 1180 continue; 1181 } 1182 1183 if (snprintf(modpath, sizeof (modpath), "%s%s", 1184 dirname, entp->d_name) >= sizeof (modpath)) { 1185 syseventd_err_print(INIT_PATH_ERR, modpath); 1186 continue; 1187 } 1188 if ((dlh = dlopen(modpath, RTLD_LAZY)) == NULL) { 1189 syseventd_err_print(LOAD_MOD_DLOPEN_ERR, 1190 modpath, dlerror()); 1191 continue; 1192 } else if ((f = dlsym(dlh, EVENT_INIT)) == NULL) { 1193 syseventd_err_print(LOAD_MOD_NO_INIT, 1194 modpath, dlerror()); 1195 (void) dlclose(dlh); 1196 continue; 1197 } 1198 1199 mod = malloc(sizeof (*mod)); 1200 if (mod == NULL) { 1201 syseventd_err_print(LOAD_MOD_ALLOC_ERR, "mod", 1202 strerror(errno)); 1203 (void) dlclose(dlh); 1204 continue; 1205 } 1206 1207 mod->name = strdup(entp->d_name); 1208 if (mod->name == NULL) { 1209 syseventd_err_print(LOAD_MOD_ALLOC_ERR, "mod->name", 1210 strerror(errno)); 1211 (void) dlclose(dlh); 1212 free(mod); 1213 continue; 1214 } 1215 1216 mod->dlhandle = dlh; 1217 mod->event_mod_init = (struct slm_mod_ops *(*)())f; 1218 1219 /* load in other module functions */ 1220 mod->event_mod_fini = (void (*)())dlsym(dlh, EVENT_FINI); 1221 if (mod->event_mod_fini == NULL) { 1222 syseventd_err_print(LOAD_MOD_DLSYM_ERR, mod->name, 1223 dlerror()); 1224 free(mod->name); 1225 free(mod); 1226 (void) dlclose(dlh); 1227 continue; 1228 } 1229 1230 /* Call module init routine */ 1231 if ((mod_ops = mod->event_mod_init()) == NULL) { 1232 syseventd_err_print(LOAD_MOD_EINVAL, mod->name); 1233 free(mod->name); 1234 free(mod); 1235 (void) dlclose(dlh); 1236 continue; 1237 } 1238 if (mod_ops->major_version != SE_MAJOR_VERSION) { 1239 syseventd_err_print(LOAD_MOD_VERSION_MISMATCH, 1240 mod->name, SE_MAJOR_VERSION, 1241 mod_ops->major_version); 1242 mod->event_mod_fini(); 1243 free(mod->name); 1244 free(mod); 1245 (void) dlclose(dlh); 1246 continue; 1247 } 1248 1249 mod->deliver_event = mod_ops->deliver_event; 1250 /* Add module entry to client list */ 1251 if ((client_id = insert_client((void *)mod, SLM_CLIENT, 1252 (mod_ops->retry_limit <= SE_MAX_RETRY_LIMIT ? 1253 mod_ops->retry_limit : SE_MAX_RETRY_LIMIT))) 1254 < 0) {; 1255 syseventd_err_print(LOAD_MOD_ALLOC_ERR, "insert_client", 1256 strerror(errno)); 1257 mod->event_mod_fini(); 1258 free(mod->name); 1259 free(mod); 1260 (void) dlclose(dlh); 1261 continue; 1262 } 1263 1264 scp = sysevent_client_tbl[client_id]; 1265 ++concurrency_level; 1266 (void) thr_setconcurrency(concurrency_level); 1267 if (thr_create(NULL, 0, 1268 (void *(*)(void *))client_deliver_event_thr, 1269 (void *)scp, THR_BOUND, &scp->tid) != 0) { 1270 1271 syseventd_err_print(LOAD_MOD_ALLOC_ERR, "insert_client", 1272 strerror(errno)); 1273 mod->event_mod_fini(); 1274 free(mod->name); 1275 free(mod); 1276 (void) dlclose(dlh); 1277 continue; 1278 } 1279 scp->client_flags |= SE_CLIENT_THR_RUNNING; 1280 1281 syseventd_print(3, "loaded module %s\n", entp->d_name); 1282 } 1283 1284 (void) closedir(mod_dir); 1285 syseventd_print(3, "modules loaded\n"); 1286 } 1287 1288 /* 1289 * unload_modules - modules are unloaded prior to graceful shutdown or 1290 * before restarting the daemon upon receipt of 1291 * SIGHUP. 1292 */ 1293 static void 1294 unload_modules(int sig) 1295 { 1296 int i, count, done; 1297 module_t *mod; 1298 struct sysevent_client *scp; 1299 1300 /* 1301 * unload modules that are ready, skip those that have not 1302 * drained their event queues. 1303 */ 1304 count = done = 0; 1305 while (done < MAX_SLM) { 1306 /* Don't wait indefinitely for unresponsive clients */ 1307 if (sig != SIGHUP && count > SE_TIMEOUT) { 1308 break; 1309 } 1310 1311 done = 0; 1312 1313 /* Shutdown clients */ 1314 for (i = 0; i < MAX_SLM; ++i) { 1315 scp = sysevent_client_tbl[i]; 1316 if (mutex_trylock(&scp->client_lock) == 0) { 1317 if (scp->client_type != SLM_CLIENT || 1318 scp->client_data == NULL) { 1319 (void) mutex_unlock(&scp->client_lock); 1320 done++; 1321 continue; 1322 } 1323 } else { 1324 syseventd_print(3, "Skipping unload of " 1325 "client %d: client locked\n", 1326 scp->client_num); 1327 continue; 1328 } 1329 1330 /* 1331 * Drain the eventq and wait for delivery thread to 1332 * cleanly exit 1333 */ 1334 drain_eventq(scp, EAGAIN); 1335 (void) cond_signal(&scp->client_cv); 1336 (void) mutex_unlock(&scp->client_lock); 1337 (void) thr_join(scp->tid, NULL, NULL); 1338 1339 /* 1340 * It is now safe to unload the module 1341 */ 1342 mod = (module_t *)scp->client_data; 1343 syseventd_print(2, "Unload %s\n", mod->name); 1344 mod->event_mod_fini(); 1345 (void) dlclose(mod->dlhandle); 1346 free(mod->name); 1347 (void) mutex_lock(&client_tbl_lock); 1348 delete_client(i); 1349 (void) mutex_unlock(&client_tbl_lock); 1350 ++done; 1351 1352 } 1353 ++count; 1354 (void) sleep(1); 1355 } 1356 1357 /* 1358 * Wait for event completions 1359 */ 1360 syseventd_print(2, "waiting for event completions\n"); 1361 (void) mutex_lock(&ev_comp_lock); 1362 while (event_compq != NULL) { 1363 (void) cond_wait(&event_comp_cv, &ev_comp_lock); 1364 } 1365 (void) mutex_unlock(&ev_comp_lock); 1366 } 1367 1368 /* 1369 * syseventd_init - Called at daemon (re)start-up time to load modules 1370 * and kickstart the kernel delivery engine. 1371 */ 1372 static void 1373 syseventd_init() 1374 { 1375 int i, fd; 1376 char local_door_file[PATH_MAX + 1]; 1377 1378 fini_pending = 0; 1379 1380 concurrency_level = MIN_CONCURRENCY_LEVEL; 1381 (void) thr_setconcurrency(concurrency_level); 1382 1383 /* 1384 * Load client modules for event delivering 1385 */ 1386 for (i = 0; i < MOD_DIR_NUM; ++i) { 1387 load_modules(dir_num2name(i)); 1388 } 1389 1390 /* 1391 * Create kernel delivery door service 1392 */ 1393 syseventd_print(8, "Create a door for kernel upcalls\n"); 1394 if (snprintf(local_door_file, sizeof (local_door_file), "%s%s", 1395 root_dir, LOGEVENT_DOOR_UPCALL) >= sizeof (local_door_file)) { 1396 syseventd_err_print(INIT_PATH_ERR, local_door_file); 1397 syseventd_exit(5); 1398 } 1399 1400 /* 1401 * Remove door file for robustness. 1402 */ 1403 if (unlink(local_door_file) != 0) 1404 syseventd_print(8, "Unlink of %s failed.\n", local_door_file); 1405 1406 fd = open(local_door_file, O_CREAT|O_RDWR, S_IREAD|S_IWRITE); 1407 if ((fd == -1) && (errno != EEXIST)) { 1408 syseventd_err_print(INIT_OPEN_DOOR_ERR, strerror(errno)); 1409 syseventd_exit(5); 1410 } 1411 (void) close(fd); 1412 1413 upcall_door = door_create(door_upcall, NULL, 1414 DOOR_REFUSE_DESC | DOOR_NO_CANCEL); 1415 if (upcall_door == -1) { 1416 syseventd_err_print(INIT_CREATE_DOOR_ERR, strerror(errno)); 1417 syseventd_exit(5); 1418 } 1419 1420 (void) fdetach(local_door_file); 1421 retry: 1422 if (fattach(upcall_door, local_door_file) != 0) { 1423 if (errno == EBUSY) 1424 goto retry; 1425 syseventd_err_print(INIT_FATTACH_ERR, strerror(errno)); 1426 (void) door_revoke(upcall_door); 1427 syseventd_exit(5); 1428 } 1429 1430 /* 1431 * Tell kernel the door name and start delivery 1432 */ 1433 syseventd_print(2, 1434 "local_door_file = %s\n", local_door_file); 1435 if (modctl(MODEVENTS, 1436 (uintptr_t)MODEVENTS_SET_DOOR_UPCALL_FILENAME, 1437 (uintptr_t)local_door_file, NULL, NULL, 0) < 0) { 1438 syseventd_err_print(INIT_DOOR_NAME_ERR, strerror(errno)); 1439 syseventd_exit(6); 1440 } 1441 1442 door_upcall_retval = 0; 1443 1444 if (modctl(MODEVENTS, (uintptr_t)MODEVENTS_FLUSH, NULL, NULL, NULL, 0) 1445 < 0) { 1446 syseventd_err_print(KERNEL_REPLAY_ERR, strerror(errno)); 1447 syseventd_exit(7); 1448 } 1449 } 1450 1451 /* 1452 * syseventd_fini - shut down daemon, but do not exit 1453 */ 1454 static void 1455 syseventd_fini(int sig) 1456 { 1457 /* 1458 * Indicate that event queues should be drained and no 1459 * additional events be accepted 1460 */ 1461 fini_pending = 1; 1462 1463 /* Close the kernel event door to halt delivery */ 1464 (void) door_revoke(upcall_door); 1465 1466 syseventd_print(1, "Unloading modules\n"); 1467 (void) rw_wrlock(&mod_unload_lock); 1468 unload_modules(sig); 1469 (void) rw_unlock(&mod_unload_lock); 1470 1471 } 1472 1473 /* 1474 * enter_daemon_lock - lock the daemon file lock 1475 * 1476 * Use an advisory lock to ensure that only one daemon process is active 1477 * in the system at any point in time. If the lock is held by another 1478 * process, do not block but return the pid owner of the lock to the 1479 * caller immediately. The lock is cleared if the holding daemon process 1480 * exits for any reason even if the lock file remains, so the daemon can 1481 * be restarted if necessary. The lock file is DAEMON_LOCK_FILE. 1482 */ 1483 static pid_t 1484 enter_daemon_lock(void) 1485 { 1486 struct flock lock; 1487 1488 syseventd_print(8, "enter_daemon_lock: lock file = %s\n", 1489 DAEMON_LOCK_FILE); 1490 1491 if (snprintf(local_lock_file, sizeof (local_lock_file), "%s%s", 1492 root_dir, DAEMON_LOCK_FILE) >= sizeof (local_lock_file)) { 1493 syseventd_err_print(INIT_PATH_ERR, local_lock_file); 1494 syseventd_exit(8); 1495 } 1496 daemon_lock_fd = open(local_lock_file, O_CREAT|O_RDWR, 0644); 1497 if (daemon_lock_fd < 0) { 1498 syseventd_err_print(INIT_LOCK_OPEN_ERR, 1499 local_lock_file, strerror(errno)); 1500 syseventd_exit(8); 1501 } 1502 1503 lock.l_type = F_WRLCK; 1504 lock.l_whence = SEEK_SET; 1505 lock.l_start = 0; 1506 lock.l_len = 0; 1507 1508 if (fcntl(daemon_lock_fd, F_SETLK, &lock) == -1) { 1509 if (fcntl(daemon_lock_fd, F_GETLK, &lock) == -1) { 1510 syseventd_err_print(INIT_LOCK_ERR, 1511 local_lock_file, strerror(errno)); 1512 exit(2); 1513 } 1514 return (lock.l_pid); 1515 } 1516 hold_daemon_lock = 1; 1517 1518 return (getpid()); 1519 } 1520 1521 /* 1522 * exit_daemon_lock - release the daemon file lock 1523 */ 1524 static void 1525 exit_daemon_lock(void) 1526 { 1527 struct flock lock; 1528 1529 lock.l_type = F_UNLCK; 1530 lock.l_whence = SEEK_SET; 1531 lock.l_start = 0; 1532 lock.l_len = 0; 1533 1534 if (fcntl(daemon_lock_fd, F_SETLK, &lock) == -1) { 1535 syseventd_err_print(INIT_UNLOCK_ERR, 1536 local_lock_file, strerror(errno)); 1537 } 1538 1539 if (close(daemon_lock_fd) == -1) { 1540 syseventd_err_print(INIT_LOCK_CLOSE_ERR, 1541 local_lock_file, strerror(errno)); 1542 exit(-1); 1543 } 1544 } 1545 1546 /* 1547 * syseventd_err_print - print error messages to the terminal if not 1548 * yet daemonized or to syslog. 1549 */ 1550 /*PRINTFLIKE1*/ 1551 void 1552 syseventd_err_print(char *message, ...) 1553 { 1554 va_list ap; 1555 1556 (void) mutex_lock(&err_mutex); 1557 va_start(ap, message); 1558 1559 if (logflag) { 1560 (void) vsyslog(LOG_ERR, message, ap); 1561 } else { 1562 (void) fprintf(stderr, "%s: ", prog); 1563 (void) vfprintf(stderr, message, ap); 1564 } 1565 va_end(ap); 1566 (void) mutex_unlock(&err_mutex); 1567 } 1568 1569 /* 1570 * syseventd_print - print messages to the terminal or to syslog 1571 * the following levels are implemented: 1572 * 1573 * 1 - transient errors that does not affect normal program flow 1574 * 2 - upcall/dispatch interaction 1575 * 3 - program flow trace as each message goes through the daemon 1576 * 8 - all the nit-gritty details of startup and shutdown 1577 * 9 - very verbose event flow tracing (no daemonization of syseventd) 1578 * 1579 */ 1580 /*PRINTFLIKE2*/ 1581 void 1582 syseventd_print(int level, char *message, ...) 1583 { 1584 va_list ap; 1585 static int newline = 1; 1586 1587 if (level > debug_level) { 1588 return; 1589 } 1590 1591 (void) mutex_lock(&err_mutex); 1592 va_start(ap, message); 1593 if (logflag) { 1594 (void) syslog(LOG_DEBUG, "%s[%ld]: ", 1595 prog, getpid()); 1596 (void) vsyslog(LOG_DEBUG, message, ap); 1597 } else { 1598 if (newline) { 1599 (void) fprintf(stdout, "%s[%ld]: ", 1600 prog, getpid()); 1601 (void) vfprintf(stdout, message, ap); 1602 } else { 1603 (void) vfprintf(stdout, message, ap); 1604 } 1605 } 1606 if (message[strlen(message)-1] == '\n') { 1607 newline = 1; 1608 } else { 1609 newline = 0; 1610 } 1611 va_end(ap); 1612 (void) mutex_unlock(&err_mutex); 1613 } 1614