1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 /* 23 * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 /* 30 * syseventd - The system event daemon 31 * 32 * This daemon dispatches event buffers received from the 33 * kernel to all interested SLM clients. SLMs in turn 34 * deliver the buffers to their particular application 35 * clients. 36 */ 37 #include <stdio.h> 38 #include <sys/types.h> 39 #include <dirent.h> 40 #include <stdarg.h> 41 #include <stddef.h> 42 #include <stdlib.h> 43 #include <dlfcn.h> 44 #include <door.h> 45 #include <errno.h> 46 #include <fcntl.h> 47 #include <signal.h> 48 #include <strings.h> 49 #include <unistd.h> 50 #include <synch.h> 51 #include <syslog.h> 52 #include <thread.h> 53 #include <libsysevent.h> 54 #include <limits.h> 55 #include <locale.h> 56 #include <sys/sysevent.h> 57 #include <sys/sysevent_impl.h> 58 #include <sys/modctl.h> 59 #include <sys/stat.h> 60 #include <sys/systeminfo.h> 61 #include <sys/wait.h> 62 63 #include "sysevent_signal.h" 64 #include "syseventd.h" 65 #include "message.h" 66 67 extern int insert_client(void *client, int client_type, int retry_limit); 68 extern void delete_client(int id); 69 extern void initialize_client_tbl(void); 70 71 extern struct sysevent_client *sysevent_client_tbl[]; 72 extern mutex_t client_tbl_lock; 73 74 #define DEBUG_LEVEL_FORK 9 /* will run in background at all */ 75 /* levels less than DEBUG_LEVEL_FORK */ 76 77 int debug_level = 0; 78 char *root_dir = ""; /* Relative root for lock and door */ 79 80 /* Maximum number of outstanding events dispatched */ 81 #define SE_EVENT_DISPATCH_CNT 100 82 83 static int upcall_door; /* Kernel event door */ 84 static int door_upcall_retval; /* Kernel event posting return value */ 85 static int fini_pending = 0; /* fini pending flag */ 86 static int deliver_buf = 0; /* Current event buffer from kernel */ 87 static int dispatch_buf = 0; /* Current event buffer dispatched */ 88 static sysevent_t **eventbuf; /* Global array of event buffers */ 89 static struct ev_completion *event_compq; /* Event completion queue */ 90 static mutex_t ev_comp_lock; /* Event completion queue lock */ 91 static mutex_t err_mutex; /* error logging lock */ 92 static mutex_t door_lock; /* sync door return access */ 93 static rwlock_t mod_unload_lock; /* sync module unloading */ 94 95 /* declarations and definitions for avoiding multiple daemons running */ 96 #define DAEMON_LOCK_FILE "/etc/sysevent/syseventd_lock" 97 char local_lock_file[PATH_MAX + 1]; 98 static int hold_daemon_lock; 99 static int daemon_lock_fd; 100 101 /* 102 * sema_eventbuf - guards against the global buffer eventbuf 103 * being written to before it has been dispatched to clients 104 * 105 * sema_dispatch - synchronizes between the kernel uploading thread 106 * (producer) and the userland dispatch_message thread (consumer). 107 * 108 * sema_resource - throttles outstanding event consumption. 109 * 110 * event_comp_cv - synchronizes threads waiting for the event completion queue 111 * to empty or become active. 112 */ 113 static sema_t sema_eventbuf, sema_dispatch, sema_resource; 114 static cond_t event_comp_cv; 115 116 /* Self-tuning concurrency level */ 117 #define MIN_CONCURRENCY_LEVEL 4 118 static int concurrency_level = MIN_CONCURRENCY_LEVEL; 119 120 121 /* SLM defines */ 122 #define MODULE_SUFFIX ".so" 123 #define EVENT_FINI "slm_fini" 124 #define EVENT_INIT "slm_init" 125 126 #define SE_TIMEOUT 60 /* Client dispatch timeout (seconds) */ 127 128 /* syslog message related */ 129 static int logflag = 0; 130 static char *prog; 131 132 /* function prototypes */ 133 static void door_upcall(void *cookie, char *args, size_t alen, door_desc_t *ddp, 134 uint_t ndid); 135 static void dispatch_message(void); 136 static int dispatch(void); 137 static void event_completion_thr(void); 138 static void usage(void); 139 140 static void syseventd_init(void); 141 static void syseventd_fini(int sig); 142 143 static pid_t enter_daemon_lock(void); 144 static void exit_daemon_lock(void); 145 146 static void 147 usage() { 148 (void) fprintf(stderr, "usage: syseventd [-d <debug_level>] " 149 "[-r <root_dir>]\n"); 150 (void) fprintf(stderr, "higher debug levels get progressively "); 151 (void) fprintf(stderr, "more detailed debug information.\n"); 152 (void) fprintf(stderr, "syseventd will run in background if "); 153 (void) fprintf(stderr, "run with a debug_level less than %d.\n", 154 DEBUG_LEVEL_FORK); 155 exit(2); 156 } 157 158 159 /* common exit function which ensures releasing locks */ 160 void 161 syseventd_exit(int status) 162 { 163 syseventd_print(1, "exit status = %d\n", status); 164 165 if (hold_daemon_lock) { 166 exit_daemon_lock(); 167 } 168 169 exit(status); 170 } 171 172 173 /* 174 * hup_handler - SIGHUP handler. SIGHUP is used to force a reload of 175 * all SLMs. During fini, events are drained from all 176 * client event queues. The events that have been consumed 177 * by all clients are freed from the kernel event queue. 178 * 179 * Events that have not yet been delivered to all clients 180 * are not freed and will be replayed after all SLMs have 181 * been (re)loaded. 182 * 183 * After all client event queues have been drained, each 184 * SLM client is unloaded. The init phase will (re)load 185 * each SLM and initiate event replay and delivery from 186 * the kernel. 187 * 188 */ 189 /*ARGSUSED*/ 190 static void 191 hup_handler(int sig) 192 { 193 syseventd_err_print(SIGHUP_CAUGHT); 194 (void) fflush(0); 195 syseventd_fini(sig); 196 syseventd_init(); 197 syseventd_err_print(DAEMON_RESTARTED); 198 (void) fflush(0); 199 } 200 201 /* 202 * Fault handler for other signals caught 203 */ 204 /*ARGSUSED*/ 205 static void 206 flt_handler(int sig) 207 { 208 char signame[SIG2STR_MAX]; 209 210 if (sig2str(sig, signame) == -1) { 211 syseventd_err_print(UNKNOWN_SIGNAL_CAUGHT, sig); 212 } 213 214 (void) se_signal_sethandler(sig, SIG_DFL, NULL); 215 216 switch (sig) { 217 case SIGINT: 218 case SIGSTOP: 219 case SIGTERM: 220 /* Close kernel door */ 221 (void) door_revoke(upcall_door); 222 223 /* Gracefully exit current event delivery threads */ 224 syseventd_fini(sig); 225 226 (void) fflush(0); 227 (void) se_signal_unblockall(); 228 syseventd_exit(1); 229 /*NOTREACHED*/ 230 default: 231 syseventd_err_print(FATAL_ERROR); 232 (void) fflush(0); 233 234 } 235 } 236 237 static void 238 sigwait_thr() 239 { 240 int sig; 241 int err; 242 sigset_t signal_set; 243 244 for (;;) { 245 syseventd_print(3, "sigwait thread waiting for signal\n"); 246 (void) sigfillset(&signal_set); 247 err = sigwait(&signal_set, &sig); 248 if (err) { 249 syseventd_exit(2); 250 } 251 252 /* 253 * Block all signals until the signal handler completes 254 */ 255 if (sig == SIGHUP) { 256 hup_handler(sig); 257 } else { 258 flt_handler(sig); 259 } 260 } 261 /* NOTREACHED */ 262 } 263 264 static void 265 set_root_dir(char *dir) 266 { 267 root_dir = malloc(strlen(dir) + 1); 268 if (root_dir == NULL) { 269 syseventd_err_print(INIT_ROOT_DIR_ERR, strerror(errno)); 270 syseventd_exit(2); 271 } 272 (void) strcpy(root_dir, dir); 273 } 274 275 void 276 main(int argc, char **argv) 277 { 278 int i, c; 279 int fd; 280 pid_t pid; 281 extern char *optarg; 282 283 (void) setlocale(LC_ALL, ""); 284 (void) textdomain(TEXT_DOMAIN); 285 286 if (getuid() != 0) { 287 (void) fprintf(stderr, "Must be root to run syseventd\n"); 288 syseventd_exit(1); 289 } 290 291 if (argc > 5) { 292 usage(); 293 } 294 295 if ((prog = strrchr(argv[0], '/')) == NULL) { 296 prog = argv[0]; 297 } else { 298 prog++; 299 } 300 301 if ((c = getopt(argc, argv, "d:r:")) != EOF) { 302 switch (c) { 303 case 'd': 304 debug_level = atoi(optarg); 305 break; 306 case 'r': 307 /* 308 * Private flag for suninstall to run 309 * daemon during install. 310 */ 311 set_root_dir(optarg); 312 break; 313 case '?': 314 default: 315 usage(); 316 } 317 } 318 319 /* demonize ourselves */ 320 if (debug_level < DEBUG_LEVEL_FORK) { 321 322 if (fork()) { 323 syseventd_exit(0); 324 } 325 326 /* child */ 327 328 (void) chdir("/"); 329 (void) setsid(); 330 if (debug_level <= 1) { 331 closefrom(0); 332 fd = open("/dev/null", 0); 333 (void) dup2(fd, 1); 334 (void) dup2(fd, 2); 335 logflag = 1; 336 } 337 } 338 339 openlog("syseventd", LOG_PID, LOG_DAEMON); 340 341 (void) mutex_init(&err_mutex, USYNC_THREAD, NULL); 342 343 syseventd_print(8, 344 "syseventd started, debug level = %d\n", debug_level); 345 346 /* only one instance of syseventd can run at a time */ 347 if ((pid = enter_daemon_lock()) != getpid()) { 348 syseventd_print(1, 349 "event daemon pid %ld already running\n", pid); 350 exit(3); 351 } 352 353 /* initialize semaphores and eventbuf */ 354 (void) sema_init(&sema_eventbuf, SE_EVENT_DISPATCH_CNT, 355 USYNC_THREAD, NULL); 356 (void) sema_init(&sema_dispatch, 0, USYNC_THREAD, NULL); 357 (void) sema_init(&sema_resource, SE_EVENT_DISPATCH_CNT, 358 USYNC_THREAD, NULL); 359 (void) cond_init(&event_comp_cv, USYNC_THREAD, NULL); 360 eventbuf = (sysevent_t **)calloc(SE_EVENT_DISPATCH_CNT, 361 sizeof (sysevent_t *)); 362 if (eventbuf == NULL) { 363 syseventd_print(1, "Unable to allocate event buffer array\n"); 364 exit(2); 365 } 366 for (i = 0; i < SE_EVENT_DISPATCH_CNT; ++i) { 367 eventbuf[i] = malloc(LOGEVENT_BUFSIZE); 368 if (eventbuf[i] == NULL) { 369 syseventd_print(1, "Unable to allocate event " 370 "buffers\n"); 371 exit(2); 372 } 373 } 374 375 (void) mutex_init(&client_tbl_lock, USYNC_THREAD, NULL); 376 (void) mutex_init(&ev_comp_lock, USYNC_THREAD, NULL); 377 (void) mutex_init(&door_lock, USYNC_THREAD, NULL); 378 (void) rwlock_init(&mod_unload_lock, USYNC_THREAD, NULL); 379 380 event_compq = NULL; 381 382 syseventd_print(8, "start the message thread running\n"); 383 384 /* 385 * Block all signals to all threads include the main thread. 386 * The sigwait_thr thread will process any signals and initiate 387 * a graceful recovery if possible. 388 */ 389 if (se_signal_blockall() < 0) { 390 syseventd_err_print(INIT_SIG_BLOCK_ERR); 391 syseventd_exit(2); 392 } 393 394 if (thr_create(NULL, NULL, (void *(*)(void *))dispatch_message, 395 (void *)0, 0, NULL) < 0) { 396 syseventd_err_print(INIT_THR_CREATE_ERR, strerror(errno)); 397 syseventd_exit(2); 398 } 399 if (thr_create(NULL, NULL, 400 (void *(*)(void *))event_completion_thr, NULL, 401 THR_BOUND, NULL) != 0) { 402 syseventd_err_print(INIT_THR_CREATE_ERR, strerror(errno)); 403 syseventd_exit(2); 404 } 405 /* Create signal catching thread */ 406 if (thr_create(NULL, NULL, (void *(*)(void *))sigwait_thr, 407 NULL, 0, NULL) < 0) { 408 syseventd_err_print(INIT_THR_CREATE_ERR, strerror(errno)); 409 syseventd_exit(2); 410 } 411 412 setbuf(stdout, (char *)NULL); 413 414 /* Initialize and load SLM clients */ 415 initialize_client_tbl(); 416 syseventd_init(); 417 418 syseventd_print(8, "Pausing\n"); 419 420 for (;;) { 421 (void) pause(); 422 } 423 /* NOTREACHED */ 424 } 425 426 /* 427 * door_upcall - called from the kernel via kernel sysevent door 428 * to upload event(s). 429 * 430 * This routine should never block. If resources are 431 * not available to immediately accept the event buffer 432 * EAGAIN is returned to the kernel. 433 * 434 * Once resources are available, the kernel is notified 435 * via a modctl interface to resume event delivery to 436 * syseventd. 437 * 438 */ 439 /*ARGSUSED*/ 440 static void 441 door_upcall(void *cookie, char *args, size_t alen, 442 door_desc_t *ddp, uint_t ndid) 443 { 444 sysevent_t *ev; 445 int rval; 446 447 448 (void) mutex_lock(&door_lock); 449 if (args == NULL) { 450 rval = EINVAL; 451 } else if (sema_trywait(&sema_eventbuf)) { 452 ev = (sysevent_t *) 453 &((log_event_upcall_arg_t *)(void *)args)->buf; 454 syseventd_print(2, "door_upcall: busy event %llx " 455 "retry\n", sysevent_get_seq(ev)); 456 rval = door_upcall_retval = EAGAIN; 457 } else { 458 /* 459 * Copy received message to local buffer. 460 */ 461 size_t size; 462 ev = (sysevent_t *) 463 &((log_event_upcall_arg_t *)(void *)args)->buf; 464 465 syseventd_print(2, "door_upcall: event %llx in eventbuf %d\n", 466 sysevent_get_seq(ev), deliver_buf); 467 size = sysevent_get_size(ev) > LOGEVENT_BUFSIZE ? 468 LOGEVENT_BUFSIZE : sysevent_get_size(ev); 469 (void) bcopy(ev, eventbuf[deliver_buf], size); 470 deliver_buf = (deliver_buf + 1) % SE_EVENT_DISPATCH_CNT; 471 rval = 0; 472 (void) sema_post(&sema_dispatch); 473 } 474 475 (void) mutex_unlock(&door_lock); 476 477 /* 478 * Filling in return values for door_return 479 */ 480 (void) door_return((void *)&rval, sizeof (rval), NULL, 0); 481 (void) door_return(NULL, 0, NULL, 0); 482 } 483 484 /* 485 * dispatch_message - dispatch message thread 486 * This thread spins until an event buffer is delivered 487 * delivered from the kernel. 488 * 489 * It will wait to dispatch an event to any clients 490 * until adequate resources are available to process 491 * the event buffer. 492 */ 493 static void 494 dispatch_message(void) 495 { 496 int error; 497 498 for (;;) { 499 syseventd_print(3, "dispatch_message: thread started\n"); 500 /* 501 * Spin till a message comes 502 */ 503 while (sema_wait(&sema_dispatch) != 0) { 504 syseventd_print(1, 505 "dispatch_message: sema_wait failed\n"); 506 (void) sleep(1); 507 } 508 509 syseventd_print(3, "dispatch_message: sema_dispatch\n"); 510 511 /* 512 * Wait for available resources 513 */ 514 while (sema_wait(&sema_resource) != 0) { 515 syseventd_print(1, "dispatch_message: sema_wait " 516 "failed\n"); 517 (void) sleep(1); 518 } 519 520 syseventd_print(2, "dispatch_message: eventbuf %d\n", 521 dispatch_buf); 522 523 /* 524 * Client dispatch 525 */ 526 do { 527 error = dispatch(); 528 } while (error == EAGAIN); 529 530 syseventd_print(2, "eventbuf %d dispatched\n", dispatch_buf); 531 dispatch_buf = (dispatch_buf + 1) % SE_EVENT_DISPATCH_CNT; 532 533 /* 534 * kernel received a busy signal - 535 * kickstart the kernel delivery thread 536 * door_lock blocks the kernel so we hold it for the 537 * shortest time possible. 538 */ 539 (void) mutex_lock(&door_lock); 540 if (door_upcall_retval == EAGAIN && !fini_pending) { 541 syseventd_print(3, "dispatch_message: retrigger " 542 "door_upcall_retval = %d\n", 543 door_upcall_retval); 544 (void) modctl(MODEVENTS, (uintptr_t)MODEVENTS_FLUSH, 545 NULL, NULL, NULL, 0); 546 door_upcall_retval = 0; 547 } 548 (void) mutex_unlock(&door_lock); 549 } 550 /* NOTREACHED */ 551 } 552 553 /* 554 * drain_eventq - Called to drain all pending events from the client's 555 * event queue. 556 */ 557 static void 558 drain_eventq(struct sysevent_client *scp, int status) 559 { 560 struct event_dispatch_pkg *d_pkg; 561 struct event_dispatchq *eventq, *eventq_next; 562 563 syseventd_print(3, "Draining eventq for client %d\n", 564 scp->client_num); 565 566 eventq = scp->eventq; 567 while (eventq) { 568 /* 569 * Mark all dispatched events as completed, but indicate the 570 * error status 571 */ 572 d_pkg = eventq->d_pkg; 573 574 syseventd_print(4, "drain event 0X%llx for client %d\n", 575 sysevent_get_seq(d_pkg->ev), scp->client_num); 576 577 if (d_pkg->completion_state == SE_NOT_DISPATCHED) { 578 d_pkg->completion_status = status; 579 d_pkg->completion_state = SE_COMPLETE; 580 (void) sema_post(d_pkg->completion_sema); 581 } 582 583 eventq_next = eventq->next; 584 free(eventq); 585 eventq = eventq_next; 586 scp->eventq = eventq; 587 } 588 } 589 590 /* 591 * client_deliver_event_thr - Client delivery thread 592 * This thread will process any events on this 593 * client's eventq. 594 */ 595 static void 596 client_deliver_event_thr(void *arg) 597 { 598 int flag, error, i; 599 sysevent_t *ev; 600 hrtime_t now; 601 module_t *mod; 602 struct event_dispatchq *eventq; 603 struct sysevent_client *scp; 604 struct event_dispatch_pkg *d_pkg; 605 606 scp = (struct sysevent_client *)arg; 607 mod = (module_t *)scp->client_data; 608 609 (void) mutex_lock(&scp->client_lock); 610 for (;;) { 611 while (scp->eventq == NULL) { 612 613 /* 614 * Client has been suspended or unloaded, go no further. 615 */ 616 if (fini_pending) { 617 scp->client_flags &= ~SE_CLIENT_THR_RUNNING; 618 syseventd_print(3, "Client %d delivery thread " 619 "exiting flags: 0X%x\n", 620 scp->client_num, scp->client_flags); 621 (void) mutex_unlock(&scp->client_lock); 622 return; 623 } 624 625 (void) cond_wait(&scp->client_cv, &scp->client_lock); 626 627 } 628 629 /* 630 * Process events from the head of the eventq, eventq is locked 631 * going into the processing. 632 */ 633 eventq = scp->eventq; 634 while (eventq != NULL) { 635 d_pkg = eventq->d_pkg; 636 d_pkg->completion_state = SE_OUTSTANDING; 637 (void) mutex_unlock(&scp->client_lock); 638 639 640 flag = error = 0; 641 ev = d_pkg->ev; 642 643 syseventd_print(3, "Start delivery for client %d " 644 "with retry count %d\n", 645 scp->client_num, d_pkg->retry_count); 646 647 /* 648 * Retry limit has been reached by this client, indicate 649 * that no further retries are allowed 650 */ 651 for (i = 0; i <= scp->retry_limit; ++i) { 652 if (i == scp->retry_limit) 653 flag = SE_NO_RETRY; 654 655 /* Start the clock for the event delivery */ 656 d_pkg->start_time = gethrtime(); 657 658 syseventd_print(9, "Deliver to module client " 659 "%s\n", mod->name); 660 661 error = mod->deliver_event(ev, flag); 662 663 /* Can not allow another retry */ 664 if (i == scp->retry_limit) 665 error = 0; 666 667 /* Stop the clock */ 668 now = gethrtime(); 669 670 /* 671 * Suspend event processing and drain the 672 * event q for latent clients 673 */ 674 if (now - d_pkg->start_time > 675 ((hrtime_t)SE_TIMEOUT * NANOSEC)) { 676 syseventd_print(1, "Unresponsive " 677 "client %d: Draining eventq and " 678 "suspending event delivery\n", 679 scp->client_num); 680 (void) mutex_lock(&scp->client_lock); 681 scp->client_flags &= 682 ~SE_CLIENT_THR_RUNNING; 683 scp->client_flags |= 684 SE_CLIENT_SUSPENDED; 685 686 /* Cleanup current event */ 687 d_pkg->completion_status = EFAULT; 688 d_pkg->completion_state = SE_COMPLETE; 689 (void) sema_post( 690 d_pkg->completion_sema); 691 692 /* 693 * Drain the remaining events from the 694 * queue. 695 */ 696 drain_eventq(scp, EINVAL); 697 (void) mutex_unlock(&scp->client_lock); 698 return; 699 } 700 701 /* Event delivery retry requested */ 702 if (fini_pending || error != EAGAIN) { 703 break; 704 } else { 705 (void) sleep(SE_RETRY_TIME); 706 } 707 } 708 709 (void) mutex_lock(&scp->client_lock); 710 d_pkg->completion_status = error; 711 d_pkg->completion_state = SE_COMPLETE; 712 (void) sema_post(d_pkg->completion_sema); 713 714 /* Update eventq pointer */ 715 if (scp->eventq != NULL) { 716 scp->eventq = eventq->next; 717 free(eventq); 718 eventq = scp->eventq; 719 } else { 720 free(eventq); 721 break; 722 } 723 724 syseventd_print(3, "Completed delivery with " 725 "error %d\n", error); 726 } 727 728 syseventd_print(3, "No more events to process for client %d\n", 729 scp->client_num); 730 731 /* Return if this was a synchronous delivery */ 732 if (!SE_CLIENT_IS_THR_RUNNING(scp)) { 733 (void) mutex_unlock(&scp->client_lock); 734 return; 735 } 736 737 } 738 } 739 740 /* 741 * client_deliver_event - Client specific event delivery 742 * This routine will allocate and initialize the 743 * neccessary per-client dispatch data. 744 * 745 * If the eventq is not empty, it may be assumed that 746 * a delivery thread exists for this client and the 747 * dispatch data is appended to the eventq. 748 * 749 * The dispatch package is freed by the event completion 750 * thread (event_completion_thr) and the eventq entry 751 * is freed by the event delivery thread. 752 */ 753 static struct event_dispatch_pkg * 754 client_deliver_event(struct sysevent_client *scp, sysevent_t *ev, 755 sema_t *completion_sema) 756 { 757 size_t ev_sz = sysevent_get_size(ev); 758 struct event_dispatchq *newq, *tmp; 759 struct event_dispatch_pkg *d_pkg; 760 761 syseventd_print(3, "client_deliver_event: id 0x%llx size %d\n", 762 (longlong_t)sysevent_get_seq(ev), ev_sz); 763 if (debug_level == 9) { 764 se_print(stdout, ev); 765 } 766 767 /* 768 * Check for suspended client 769 */ 770 (void) mutex_lock(&scp->client_lock); 771 if (SE_CLIENT_IS_SUSPENDED(scp) || !SE_CLIENT_IS_THR_RUNNING(scp)) { 772 (void) mutex_unlock(&scp->client_lock); 773 return (NULL); 774 } 775 776 /* 777 * Allocate a new dispatch package and eventq entry 778 */ 779 newq = (struct event_dispatchq *)malloc( 780 sizeof (struct event_dispatchq)); 781 if (newq == NULL) { 782 (void) mutex_unlock(&scp->client_lock); 783 return (NULL); 784 } 785 786 d_pkg = (struct event_dispatch_pkg *)malloc( 787 sizeof (struct event_dispatch_pkg)); 788 if (d_pkg == NULL) { 789 free(newq); 790 (void) mutex_unlock(&scp->client_lock); 791 return (NULL); 792 } 793 794 /* Initialize the dispatch package */ 795 d_pkg->scp = scp; 796 d_pkg->retry_count = 0; 797 d_pkg->completion_status = 0; 798 d_pkg->completion_state = SE_NOT_DISPATCHED; 799 d_pkg->completion_sema = completion_sema; 800 d_pkg->ev = ev; 801 newq->d_pkg = d_pkg; 802 newq->next = NULL; 803 804 if (scp->eventq != NULL) { 805 806 /* Add entry to the end of the eventq */ 807 tmp = scp->eventq; 808 while (tmp->next != NULL) 809 tmp = tmp->next; 810 tmp->next = newq; 811 } else { 812 /* event queue empty, wakeup delivery thread */ 813 scp->eventq = newq; 814 (void) cond_signal(&scp->client_cv); 815 } 816 (void) mutex_unlock(&scp->client_lock); 817 818 return (d_pkg); 819 } 820 821 /* 822 * event_completion_thr - Event completion thread. This thread routine 823 * waits for all client delivery thread to complete 824 * delivery of a particular event. 825 */ 826 static void 827 event_completion_thr() 828 { 829 int ret, i, client_count, ok_to_free; 830 sysevent_id_t eid; 831 struct sysevent_client *scp; 832 struct ev_completion *ev_comp; 833 struct event_dispatchq *dispatchq; 834 struct event_dispatch_pkg *d_pkg; 835 836 (void) mutex_lock(&ev_comp_lock); 837 for (;;) { 838 while (event_compq == NULL) { 839 (void) cond_wait(&event_comp_cv, &ev_comp_lock); 840 } 841 842 /* 843 * Process event completions from the head of the 844 * completion queue 845 */ 846 ev_comp = event_compq; 847 while (ev_comp) { 848 (void) mutex_unlock(&ev_comp_lock); 849 eid.eid_seq = sysevent_get_seq(ev_comp->ev); 850 sysevent_get_time(ev_comp->ev, &eid.eid_ts); 851 client_count = ev_comp->client_count; 852 ok_to_free = 1; 853 854 syseventd_print(3, "Wait for event completion of " 855 "event 0X%llx on %d clients\n", 856 eid.eid_seq, client_count); 857 858 while (client_count) { 859 syseventd_print(9, "Waiting for %d clients on " 860 "event id 0X%llx\n", client_count, 861 eid.eid_seq); 862 863 (void) sema_wait(&ev_comp->client_sema); 864 --client_count; 865 } 866 867 syseventd_print(3, "Cleaning up clients for event " 868 "0X%llx\n", eid.eid_seq); 869 dispatchq = ev_comp->dispatch_list; 870 while (dispatchq != NULL) { 871 d_pkg = dispatchq->d_pkg; 872 scp = d_pkg->scp; 873 874 if (d_pkg->completion_status == EAGAIN) 875 ok_to_free = 0; 876 877 syseventd_print(4, "Delivery of 0X%llx " 878 "complete for client %d retry count %d " 879 "status %d\n", eid.eid_seq, 880 scp->client_num, 881 d_pkg->retry_count, 882 d_pkg->completion_status); 883 884 free(d_pkg); 885 ev_comp->dispatch_list = dispatchq->next; 886 free(dispatchq); 887 dispatchq = ev_comp->dispatch_list; 888 } 889 890 if (ok_to_free) { 891 for (i = 0; i < MAX_MODCTL_RETRY; ++i) { 892 if ((ret = modctl(MODEVENTS, 893 (uintptr_t)MODEVENTS_FREEDATA, 894 (uintptr_t)&eid, NULL, 895 NULL, 0)) != 0) { 896 syseventd_print(1, "attempting " 897 "to free event 0X%llx\n", 898 eid.eid_seq); 899 900 /* 901 * Kernel may need time to 902 * move this event buffer to 903 * the sysevent sent queue 904 */ 905 (void) sleep(1); 906 } else { 907 break; 908 } 909 } 910 if (ret) { 911 syseventd_print(1, "Unable to free " 912 "event 0X%llx from the " 913 "kernel\n", eid.eid_seq); 914 } 915 } else { 916 syseventd_print(1, "Not freeing event 0X%llx\n", 917 eid.eid_seq); 918 } 919 920 syseventd_print(2, "Event delivery complete for id " 921 "0X%llx\n", eid.eid_seq); 922 923 (void) mutex_lock(&ev_comp_lock); 924 event_compq = ev_comp->next; 925 free(ev_comp->ev); 926 free(ev_comp); 927 ev_comp = event_compq; 928 (void) sema_post(&sema_resource); 929 } 930 931 /* 932 * Event completion queue is empty, signal possible unload 933 * operation 934 */ 935 (void) cond_signal(&event_comp_cv); 936 937 syseventd_print(3, "No more events\n"); 938 } 939 } 940 941 /* 942 * dispatch - Dispatch the current event buffer to all valid SLM clients. 943 */ 944 static int 945 dispatch(void) 946 { 947 int ev_sz, i, client_count = 0; 948 sysevent_t *new_ev; 949 sysevent_id_t eid; 950 struct ev_completion *ev_comp, *tmp; 951 struct event_dispatchq *dispatchq, *client_list; 952 struct event_dispatch_pkg *d_pkg; 953 954 /* Check for module unload operation */ 955 if (rw_tryrdlock(&mod_unload_lock) != 0) { 956 syseventd_print(2, "unload in progress abort delivery\n"); 957 (void) sema_post(&sema_eventbuf); 958 (void) sema_post(&sema_resource); 959 return (0); 960 } 961 962 syseventd_print(3, "deliver dispatch buffer %d", dispatch_buf); 963 eid.eid_seq = sysevent_get_seq(eventbuf[dispatch_buf]); 964 sysevent_get_time(eventbuf[dispatch_buf], &eid.eid_ts); 965 syseventd_print(3, "deliver msg id: 0x%llx\n", eid.eid_seq); 966 967 /* 968 * ev_comp is used to hold event completion data. It is freed 969 * by the event completion thread (event_completion_thr). 970 */ 971 ev_comp = (struct ev_completion *) 972 malloc(sizeof (struct ev_completion)); 973 if (ev_comp == NULL) { 974 (void) rw_unlock(&mod_unload_lock); 975 syseventd_print(1, "Can not allocate event completion buffer " 976 "for event id 0X%llx\n", eid.eid_seq); 977 return (EAGAIN); 978 } 979 ev_comp->dispatch_list = NULL; 980 ev_comp->next = NULL; 981 (void) sema_init(&ev_comp->client_sema, 0, USYNC_THREAD, NULL); 982 983 ev_sz = sysevent_get_size(eventbuf[dispatch_buf]); 984 new_ev = calloc(1, ev_sz); 985 if (new_ev == NULL) { 986 free(ev_comp); 987 (void) rw_unlock(&mod_unload_lock); 988 syseventd_print(1, "Can not allocate new event buffer " 989 "for event id 0X%llx\n", eid.eid_seq); 990 return (EAGAIN); 991 } 992 993 994 /* 995 * For long messages, copy additional data from kernel 996 */ 997 if (ev_sz > LOGEVENT_BUFSIZE) { 998 int ret = 0; 999 1000 /* Ok to release eventbuf for next event buffer from kernel */ 1001 (void) sema_post(&sema_eventbuf); 1002 1003 for (i = 0; i < MAX_MODCTL_RETRY; ++i) { 1004 if ((ret = modctl(MODEVENTS, 1005 (uintptr_t)MODEVENTS_GETDATA, 1006 (uintptr_t)&eid, 1007 (uintptr_t)ev_sz, 1008 (uintptr_t)new_ev, 0)) 1009 == 0) 1010 break; 1011 else 1012 (void) sleep(1); 1013 } 1014 if (ret) { 1015 syseventd_print(1, "GET_DATA failed for 0X%llx:%llx\n", 1016 eid.eid_ts, eid.eid_seq); 1017 free(new_ev); 1018 free(ev_comp); 1019 (void) rw_unlock(&mod_unload_lock); 1020 return (EAGAIN); 1021 } 1022 } else { 1023 (void) bcopy(eventbuf[dispatch_buf], new_ev, ev_sz); 1024 /* Ok to release eventbuf for next event buffer from kernel */ 1025 (void) sema_post(&sema_eventbuf); 1026 } 1027 1028 1029 /* 1030 * Deliver a copy of eventbuf to clients so 1031 * eventbuf can be used for the next message 1032 */ 1033 for (i = 0; i < MAX_SLM; ++i) { 1034 1035 /* Don't bother for suspended or unloaded clients */ 1036 if (!SE_CLIENT_IS_LOADED(sysevent_client_tbl[i]) || 1037 SE_CLIENT_IS_SUSPENDED(sysevent_client_tbl[i])) 1038 continue; 1039 1040 /* 1041 * Allocate event dispatch queue entry. All queue entries 1042 * are freed by the event completion thread as client 1043 * delivery completes. 1044 */ 1045 dispatchq = (struct event_dispatchq *)malloc( 1046 sizeof (struct event_dispatchq)); 1047 if (dispatchq == NULL) { 1048 syseventd_print(1, "Can not allocate dispatch q " 1049 "for event id 0X%llx client %d\n", eid.eid_seq, i); 1050 continue; 1051 } 1052 dispatchq->next = NULL; 1053 1054 /* Initiate client delivery */ 1055 d_pkg = client_deliver_event(sysevent_client_tbl[i], 1056 new_ev, &ev_comp->client_sema); 1057 if (d_pkg == NULL) { 1058 syseventd_print(1, "Can not allocate dispatch " 1059 "package for event id 0X%llx client %d\n", 1060 eid.eid_seq, i); 1061 free(dispatchq); 1062 continue; 1063 } 1064 dispatchq->d_pkg = d_pkg; 1065 ++client_count; 1066 1067 if (ev_comp->dispatch_list == NULL) { 1068 ev_comp->dispatch_list = dispatchq; 1069 client_list = dispatchq; 1070 } else { 1071 client_list->next = dispatchq; 1072 client_list = client_list->next; 1073 } 1074 } 1075 1076 ev_comp->client_count = client_count; 1077 ev_comp->ev = new_ev; 1078 1079 (void) mutex_lock(&ev_comp_lock); 1080 1081 if (event_compq == NULL) { 1082 syseventd_print(3, "Wakeup event completion thread for " 1083 "id 0X%llx\n", eid.eid_seq); 1084 event_compq = ev_comp; 1085 (void) cond_signal(&event_comp_cv); 1086 } else { 1087 1088 /* Add entry to the end of the event completion queue */ 1089 tmp = event_compq; 1090 while (tmp->next != NULL) 1091 tmp = tmp->next; 1092 tmp->next = ev_comp; 1093 syseventd_print(3, "event added to completion queue for " 1094 "id 0X%llx\n", eid.eid_seq); 1095 } 1096 (void) mutex_unlock(&ev_comp_lock); 1097 (void) rw_unlock(&mod_unload_lock); 1098 1099 return (0); 1100 } 1101 1102 #define MODULE_DIR_HW "/usr/platform/%s/lib/sysevent/modules/" 1103 #define MODULE_DIR_GEN "/usr/lib/sysevent/modules/" 1104 #define MOD_DIR_NUM 3 1105 static char dirname[MOD_DIR_NUM][MAXPATHLEN]; 1106 1107 static char * 1108 dir_num2name(int dirnum) 1109 { 1110 char infobuf[MAXPATHLEN]; 1111 1112 if (dirnum >= MOD_DIR_NUM) 1113 return (NULL); 1114 1115 if (dirname[0][0] == '\0') { 1116 if (sysinfo(SI_PLATFORM, infobuf, MAXPATHLEN) == -1) { 1117 syseventd_print(1, "dir_num2name: " 1118 "sysinfo error %s\n", strerror(errno)); 1119 return (NULL); 1120 } else if (snprintf(dirname[0], sizeof (dirname[0]), 1121 MODULE_DIR_HW, infobuf) >= sizeof (dirname[0])) { 1122 syseventd_print(1, "dir_num2name: " 1123 "platform name too long: %s\n", 1124 infobuf); 1125 return (NULL); 1126 } 1127 if (sysinfo(SI_MACHINE, infobuf, MAXPATHLEN) == -1) { 1128 syseventd_print(1, "dir_num2name: " 1129 "sysinfo error %s\n", strerror(errno)); 1130 return (NULL); 1131 } else if (snprintf(dirname[1], sizeof (dirname[1]), 1132 MODULE_DIR_HW, infobuf) >= sizeof (dirname[1])) { 1133 syseventd_print(1, "dir_num2name: " 1134 "machine name too long: %s\n", 1135 infobuf); 1136 return (NULL); 1137 } 1138 (void) strcpy(dirname[2], MODULE_DIR_GEN); 1139 } 1140 1141 return (dirname[dirnum]); 1142 } 1143 1144 1145 /* 1146 * load_modules - Load modules found in the common syseventd module directories 1147 * Modules that do not provide valid interfaces are rejected. 1148 */ 1149 static void 1150 load_modules(char *dirname) 1151 { 1152 int client_id; 1153 DIR *mod_dir; 1154 module_t *mod; 1155 struct dirent *retp, *entp; 1156 struct slm_mod_ops *mod_ops; 1157 struct sysevent_client *scp; 1158 1159 if (dirname == NULL) 1160 return; 1161 1162 /* Return silently if module directory does not exist */ 1163 if ((mod_dir = opendir(dirname)) == NULL) { 1164 syseventd_print(1, "Unable to open module directory %s: %s\n", 1165 dirname, strerror(errno)); 1166 return; 1167 } 1168 1169 syseventd_print(3, "loading modules from %s\n", dirname); 1170 1171 entp = malloc(PATH_MAX + 1 + sizeof (struct dirent)); 1172 if (entp == NULL) { 1173 syseventd_err_print(LOAD_MOD_ALLOC_ERR, "entp", 1174 strerror(errno)); 1175 (void) closedir(mod_dir); 1176 return; 1177 } 1178 1179 /* 1180 * Go through directory, looking for files ending with .so 1181 */ 1182 while (readdir_r(mod_dir, entp, &retp) == 0) { 1183 void *dlh, *f; 1184 char *tmp, modpath[MAXPATHLEN]; 1185 1186 if (retp == NULL) { 1187 break; 1188 } 1189 1190 if (((tmp = strstr(entp->d_name, MODULE_SUFFIX)) == NULL) || 1191 (tmp[strlen(MODULE_SUFFIX)] != '\0')) { 1192 continue; 1193 } 1194 1195 if (snprintf(modpath, sizeof (modpath), "%s%s", 1196 dirname, entp->d_name) >= sizeof (modpath)) { 1197 syseventd_err_print(INIT_PATH_ERR, modpath); 1198 continue; 1199 } 1200 if ((dlh = dlopen(modpath, RTLD_LAZY)) == NULL) { 1201 syseventd_err_print(LOAD_MOD_DLOPEN_ERR, 1202 modpath, dlerror()); 1203 continue; 1204 } else if ((f = dlsym(dlh, EVENT_INIT)) == NULL) { 1205 syseventd_err_print(LOAD_MOD_NO_INIT, 1206 modpath, dlerror()); 1207 (void) dlclose(dlh); 1208 continue; 1209 } 1210 1211 mod = malloc(sizeof (*mod)); 1212 if (mod == NULL) { 1213 syseventd_err_print(LOAD_MOD_ALLOC_ERR, "mod", 1214 strerror(errno)); 1215 (void) dlclose(dlh); 1216 continue; 1217 } 1218 1219 mod->name = strdup(entp->d_name); 1220 if (mod->name == NULL) { 1221 syseventd_err_print(LOAD_MOD_ALLOC_ERR, "mod->name", 1222 strerror(errno)); 1223 (void) dlclose(dlh); 1224 free(mod); 1225 continue; 1226 } 1227 1228 mod->dlhandle = dlh; 1229 mod->event_mod_init = (struct slm_mod_ops *(*)())f; 1230 1231 /* load in other module functions */ 1232 mod->event_mod_fini = (void (*)())dlsym(dlh, EVENT_FINI); 1233 if (mod->event_mod_fini == NULL) { 1234 syseventd_err_print(LOAD_MOD_DLSYM_ERR, mod->name, 1235 dlerror()); 1236 free(mod->name); 1237 free(mod); 1238 (void) dlclose(dlh); 1239 continue; 1240 } 1241 1242 /* Call module init routine */ 1243 if ((mod_ops = mod->event_mod_init()) == NULL) { 1244 syseventd_err_print(LOAD_MOD_EINVAL, mod->name); 1245 free(mod->name); 1246 free(mod); 1247 (void) dlclose(dlh); 1248 continue; 1249 } 1250 if (mod_ops->major_version != SE_MAJOR_VERSION) { 1251 syseventd_err_print(LOAD_MOD_VERSION_MISMATCH, 1252 mod->name, SE_MAJOR_VERSION, 1253 mod_ops->major_version); 1254 mod->event_mod_fini(); 1255 free(mod->name); 1256 free(mod); 1257 (void) dlclose(dlh); 1258 continue; 1259 } 1260 1261 mod->deliver_event = mod_ops->deliver_event; 1262 /* Add module entry to client list */ 1263 if ((client_id = insert_client((void *)mod, SLM_CLIENT, 1264 (mod_ops->retry_limit <= SE_MAX_RETRY_LIMIT ? 1265 mod_ops->retry_limit : SE_MAX_RETRY_LIMIT))) 1266 < 0) {; 1267 syseventd_err_print(LOAD_MOD_ALLOC_ERR, "insert_client", 1268 strerror(errno)); 1269 mod->event_mod_fini(); 1270 free(mod->name); 1271 free(mod); 1272 (void) dlclose(dlh); 1273 continue; 1274 } 1275 1276 scp = sysevent_client_tbl[client_id]; 1277 ++concurrency_level; 1278 (void) thr_setconcurrency(concurrency_level); 1279 if (thr_create(NULL, 0, 1280 (void *(*)(void *))client_deliver_event_thr, 1281 (void *)scp, THR_BOUND, &scp->tid) != 0) { 1282 1283 syseventd_err_print(LOAD_MOD_ALLOC_ERR, "insert_client", 1284 strerror(errno)); 1285 mod->event_mod_fini(); 1286 free(mod->name); 1287 free(mod); 1288 (void) dlclose(dlh); 1289 continue; 1290 } 1291 scp->client_flags |= SE_CLIENT_THR_RUNNING; 1292 1293 syseventd_print(3, "loaded module %s\n", entp->d_name); 1294 } 1295 1296 free(entp); 1297 (void) closedir(mod_dir); 1298 syseventd_print(3, "modules loaded\n"); 1299 } 1300 1301 /* 1302 * unload_modules - modules are unloaded prior to graceful shutdown or 1303 * before restarting the daemon upon receipt of 1304 * SIGHUP. 1305 */ 1306 static void 1307 unload_modules(int sig) 1308 { 1309 int i, count, done; 1310 module_t *mod; 1311 struct sysevent_client *scp; 1312 1313 /* 1314 * unload modules that are ready, skip those that have not 1315 * drained their event queues. 1316 */ 1317 count = done = 0; 1318 while (done < MAX_SLM) { 1319 /* Don't wait indefinitely for unresponsive clients */ 1320 if (sig != SIGHUP && count > SE_TIMEOUT) { 1321 break; 1322 } 1323 1324 done = 0; 1325 1326 /* Shutdown clients */ 1327 for (i = 0; i < MAX_SLM; ++i) { 1328 scp = sysevent_client_tbl[i]; 1329 if (mutex_trylock(&scp->client_lock) == 0) { 1330 if (scp->client_type != SLM_CLIENT || 1331 scp->client_data == NULL) { 1332 (void) mutex_unlock(&scp->client_lock); 1333 done++; 1334 continue; 1335 } 1336 } else { 1337 syseventd_print(3, "Skipping unload of " 1338 "client %d: client locked\n", 1339 scp->client_num); 1340 continue; 1341 } 1342 1343 /* 1344 * Drain the eventq and wait for delivery thread to 1345 * cleanly exit 1346 */ 1347 drain_eventq(scp, EAGAIN); 1348 (void) cond_signal(&scp->client_cv); 1349 (void) mutex_unlock(&scp->client_lock); 1350 (void) thr_join(scp->tid, NULL, NULL); 1351 1352 /* 1353 * It is now safe to unload the module 1354 */ 1355 mod = (module_t *)scp->client_data; 1356 syseventd_print(2, "Unload %s\n", mod->name); 1357 mod->event_mod_fini(); 1358 (void) dlclose(mod->dlhandle); 1359 free(mod->name); 1360 (void) mutex_lock(&client_tbl_lock); 1361 delete_client(i); 1362 (void) mutex_unlock(&client_tbl_lock); 1363 ++done; 1364 1365 } 1366 ++count; 1367 (void) sleep(1); 1368 } 1369 1370 /* 1371 * Wait for event completions 1372 */ 1373 syseventd_print(2, "waiting for event completions\n"); 1374 (void) mutex_lock(&ev_comp_lock); 1375 while (event_compq != NULL) { 1376 (void) cond_wait(&event_comp_cv, &ev_comp_lock); 1377 } 1378 (void) mutex_unlock(&ev_comp_lock); 1379 } 1380 1381 /* 1382 * syseventd_init - Called at daemon (re)start-up time to load modules 1383 * and kickstart the kernel delivery engine. 1384 */ 1385 static void 1386 syseventd_init() 1387 { 1388 int i, fd; 1389 char local_door_file[PATH_MAX + 1]; 1390 1391 fini_pending = 0; 1392 1393 concurrency_level = MIN_CONCURRENCY_LEVEL; 1394 (void) thr_setconcurrency(concurrency_level); 1395 1396 /* 1397 * Load client modules for event delivering 1398 */ 1399 for (i = 0; i < MOD_DIR_NUM; ++i) { 1400 load_modules(dir_num2name(i)); 1401 } 1402 1403 /* 1404 * Create kernel delivery door service 1405 */ 1406 syseventd_print(8, "Create a door for kernel upcalls\n"); 1407 if (snprintf(local_door_file, sizeof (local_door_file), "%s%s", 1408 root_dir, LOGEVENT_DOOR_UPCALL) >= sizeof (local_door_file)) { 1409 syseventd_err_print(INIT_PATH_ERR, local_door_file); 1410 syseventd_exit(5); 1411 } 1412 1413 /* 1414 * Remove door file for robustness. 1415 */ 1416 if (unlink(local_door_file) != 0) 1417 syseventd_print(8, "Unlink of %s failed.\n", local_door_file); 1418 1419 fd = open(local_door_file, O_CREAT|O_RDWR, S_IREAD|S_IWRITE); 1420 if ((fd == -1) && (errno != EEXIST)) { 1421 syseventd_err_print(INIT_OPEN_DOOR_ERR, strerror(errno)); 1422 syseventd_exit(5); 1423 } 1424 (void) close(fd); 1425 1426 upcall_door = door_create(door_upcall, NULL, 1427 DOOR_REFUSE_DESC | DOOR_NO_CANCEL); 1428 if (upcall_door == -1) { 1429 syseventd_err_print(INIT_CREATE_DOOR_ERR, strerror(errno)); 1430 syseventd_exit(5); 1431 } 1432 1433 (void) fdetach(local_door_file); 1434 retry: 1435 if (fattach(upcall_door, local_door_file) != 0) { 1436 if (errno == EBUSY) 1437 goto retry; 1438 syseventd_err_print(INIT_FATTACH_ERR, strerror(errno)); 1439 (void) door_revoke(upcall_door); 1440 syseventd_exit(5); 1441 } 1442 1443 /* 1444 * Tell kernel the door name and start delivery 1445 */ 1446 syseventd_print(2, 1447 "local_door_file = %s\n", local_door_file); 1448 if (modctl(MODEVENTS, 1449 (uintptr_t)MODEVENTS_SET_DOOR_UPCALL_FILENAME, 1450 (uintptr_t)local_door_file, NULL, NULL, 0) < 0) { 1451 syseventd_err_print(INIT_DOOR_NAME_ERR, strerror(errno)); 1452 syseventd_exit(6); 1453 } 1454 1455 door_upcall_retval = 0; 1456 1457 if (modctl(MODEVENTS, (uintptr_t)MODEVENTS_FLUSH, NULL, NULL, NULL, 0) 1458 < 0) { 1459 syseventd_err_print(KERNEL_REPLAY_ERR, strerror(errno)); 1460 syseventd_exit(7); 1461 } 1462 } 1463 1464 /* 1465 * syseventd_fini - shut down daemon, but do not exit 1466 */ 1467 static void 1468 syseventd_fini(int sig) 1469 { 1470 /* 1471 * Indicate that event queues should be drained and no 1472 * additional events be accepted 1473 */ 1474 fini_pending = 1; 1475 1476 /* Close the kernel event door to halt delivery */ 1477 (void) door_revoke(upcall_door); 1478 1479 syseventd_print(1, "Unloading modules\n"); 1480 (void) rw_wrlock(&mod_unload_lock); 1481 unload_modules(sig); 1482 (void) rw_unlock(&mod_unload_lock); 1483 1484 } 1485 1486 /* 1487 * enter_daemon_lock - lock the daemon file lock 1488 * 1489 * Use an advisory lock to ensure that only one daemon process is active 1490 * in the system at any point in time. If the lock is held by another 1491 * process, do not block but return the pid owner of the lock to the 1492 * caller immediately. The lock is cleared if the holding daemon process 1493 * exits for any reason even if the lock file remains, so the daemon can 1494 * be restarted if necessary. The lock file is DAEMON_LOCK_FILE. 1495 */ 1496 static pid_t 1497 enter_daemon_lock(void) 1498 { 1499 struct flock lock; 1500 1501 syseventd_print(8, "enter_daemon_lock: lock file = %s\n", 1502 DAEMON_LOCK_FILE); 1503 1504 if (snprintf(local_lock_file, sizeof (local_lock_file), "%s%s", 1505 root_dir, DAEMON_LOCK_FILE) >= sizeof (local_lock_file)) { 1506 syseventd_err_print(INIT_PATH_ERR, local_lock_file); 1507 syseventd_exit(8); 1508 } 1509 daemon_lock_fd = open(local_lock_file, O_CREAT|O_RDWR, 0644); 1510 if (daemon_lock_fd < 0) { 1511 syseventd_err_print(INIT_LOCK_OPEN_ERR, 1512 local_lock_file, strerror(errno)); 1513 syseventd_exit(8); 1514 } 1515 1516 lock.l_type = F_WRLCK; 1517 lock.l_whence = SEEK_SET; 1518 lock.l_start = 0; 1519 lock.l_len = 0; 1520 1521 if (fcntl(daemon_lock_fd, F_SETLK, &lock) == -1) { 1522 if (fcntl(daemon_lock_fd, F_GETLK, &lock) == -1) { 1523 syseventd_err_print(INIT_LOCK_ERR, 1524 local_lock_file, strerror(errno)); 1525 exit(2); 1526 } 1527 return (lock.l_pid); 1528 } 1529 hold_daemon_lock = 1; 1530 1531 return (getpid()); 1532 } 1533 1534 /* 1535 * exit_daemon_lock - release the daemon file lock 1536 */ 1537 static void 1538 exit_daemon_lock(void) 1539 { 1540 struct flock lock; 1541 1542 lock.l_type = F_UNLCK; 1543 lock.l_whence = SEEK_SET; 1544 lock.l_start = 0; 1545 lock.l_len = 0; 1546 1547 if (fcntl(daemon_lock_fd, F_SETLK, &lock) == -1) { 1548 syseventd_err_print(INIT_UNLOCK_ERR, 1549 local_lock_file, strerror(errno)); 1550 } 1551 1552 if (close(daemon_lock_fd) == -1) { 1553 syseventd_err_print(INIT_LOCK_CLOSE_ERR, 1554 local_lock_file, strerror(errno)); 1555 exit(-1); 1556 } 1557 } 1558 1559 /* 1560 * syseventd_err_print - print error messages to the terminal if not 1561 * yet daemonized or to syslog. 1562 */ 1563 /*PRINTFLIKE1*/ 1564 void 1565 syseventd_err_print(char *message, ...) 1566 { 1567 va_list ap; 1568 1569 (void) mutex_lock(&err_mutex); 1570 va_start(ap, message); 1571 1572 if (logflag) { 1573 (void) vsyslog(LOG_ERR, message, ap); 1574 } else { 1575 (void) fprintf(stderr, "%s: ", prog); 1576 (void) vfprintf(stderr, message, ap); 1577 } 1578 va_end(ap); 1579 (void) mutex_unlock(&err_mutex); 1580 } 1581 1582 /* 1583 * syseventd_print - print messages to the terminal or to syslog 1584 * the following levels are implemented: 1585 * 1586 * 1 - transient errors that does not affect normal program flow 1587 * 2 - upcall/dispatch interaction 1588 * 3 - program flow trace as each message goes through the daemon 1589 * 8 - all the nit-gritty details of startup and shutdown 1590 * 9 - very verbose event flow tracing (no daemonization of syseventd) 1591 * 1592 */ 1593 /*PRINTFLIKE2*/ 1594 void 1595 syseventd_print(int level, char *message, ...) 1596 { 1597 va_list ap; 1598 static int newline = 1; 1599 1600 if (level > debug_level) { 1601 return; 1602 } 1603 1604 (void) mutex_lock(&err_mutex); 1605 va_start(ap, message); 1606 if (logflag) { 1607 (void) syslog(LOG_DEBUG, "%s[%ld]: ", 1608 prog, getpid()); 1609 (void) vsyslog(LOG_DEBUG, message, ap); 1610 } else { 1611 if (newline) { 1612 (void) fprintf(stdout, "%s[%ld]: ", 1613 prog, getpid()); 1614 (void) vfprintf(stdout, message, ap); 1615 } else { 1616 (void) vfprintf(stdout, message, ap); 1617 } 1618 } 1619 if (message[strlen(message)-1] == '\n') { 1620 newline = 1; 1621 } else { 1622 newline = 0; 1623 } 1624 va_end(ap); 1625 (void) mutex_unlock(&err_mutex); 1626 } 1627