1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 /*
28 * syseventd - The system event daemon
29 *
30 * This daemon dispatches event buffers received from the
31 * kernel to all interested SLM clients. SLMs in turn
32 * deliver the buffers to their particular application
33 * clients.
34 */
35 #include <stdio.h>
36 #include <sys/types.h>
37 #include <dirent.h>
38 #include <stdarg.h>
39 #include <stddef.h>
40 #include <stdlib.h>
41 #include <dlfcn.h>
42 #include <door.h>
43 #include <errno.h>
44 #include <fcntl.h>
45 #include <signal.h>
46 #include <strings.h>
47 #include <unistd.h>
48 #include <synch.h>
49 #include <syslog.h>
50 #include <thread.h>
51 #include <libsysevent.h>
52 #include <limits.h>
53 #include <locale.h>
54 #include <sys/sysevent.h>
55 #include <sys/sysevent_impl.h>
56 #include <sys/modctl.h>
57 #include <sys/stat.h>
58 #include <sys/systeminfo.h>
59 #include <sys/wait.h>
60
61 #include "sysevent_signal.h"
62 #include "syseventd.h"
63 #include "message.h"
64
65 extern int insert_client(void *client, int client_type, int retry_limit);
66 extern void delete_client(int id);
67 extern void initialize_client_tbl(void);
68
69 extern struct sysevent_client *sysevent_client_tbl[];
70 extern mutex_t client_tbl_lock;
71
72 #define DEBUG_LEVEL_FORK 9 /* will run in background at all */
73 /* levels less than DEBUG_LEVEL_FORK */
74
75 int debug_level = 0;
76 char *root_dir = ""; /* Relative root for lock and door */
77
78 /* Maximum number of outstanding events dispatched */
79 #define SE_EVENT_DISPATCH_CNT 100
80
81 static int upcall_door; /* Kernel event door */
82 static int door_upcall_retval; /* Kernel event posting return value */
83 static int fini_pending = 0; /* fini pending flag */
84 static int deliver_buf = 0; /* Current event buffer from kernel */
85 static int dispatch_buf = 0; /* Current event buffer dispatched */
86 static sysevent_t **eventbuf; /* Global array of event buffers */
87 static struct ev_completion *event_compq; /* Event completion queue */
88 static mutex_t ev_comp_lock; /* Event completion queue lock */
89 static mutex_t err_mutex; /* error logging lock */
90 static mutex_t door_lock; /* sync door return access */
91 static rwlock_t mod_unload_lock; /* sync module unloading */
92
93 /* declarations and definitions for avoiding multiple daemons running */
94 #define DAEMON_LOCK_FILE "/var/run/syseventd.lock"
95 char local_lock_file[PATH_MAX + 1];
96 static int hold_daemon_lock;
97 static int daemon_lock_fd;
98
99 /*
100 * sema_eventbuf - guards against the global buffer eventbuf
101 * being written to before it has been dispatched to clients
102 *
103 * sema_dispatch - synchronizes between the kernel uploading thread
104 * (producer) and the userland dispatch_message thread (consumer).
105 *
106 * sema_resource - throttles outstanding event consumption.
107 *
108 * event_comp_cv - synchronizes threads waiting for the event completion queue
109 * to empty or become active.
110 */
111 static sema_t sema_eventbuf, sema_dispatch, sema_resource;
112 static cond_t event_comp_cv;
113
114 /* Self-tuning concurrency level */
115 #define MIN_CONCURRENCY_LEVEL 4
116 static int concurrency_level = MIN_CONCURRENCY_LEVEL;
117
118
119 /* SLM defines */
120 #define MODULE_SUFFIX ".so"
121 #define EVENT_FINI "slm_fini"
122 #define EVENT_INIT "slm_init"
123
124 #define SE_TIMEOUT 60 /* Client dispatch timeout (seconds) */
125
126 /* syslog message related */
127 static int logflag = 0;
128 static char *prog;
129
130 /* function prototypes */
131 static void door_upcall(void *cookie, char *args, size_t alen, door_desc_t *ddp,
132 uint_t ndid);
133 static void dispatch_message(void);
134 static int dispatch(void);
135 static void event_completion_thr(void);
136 static void usage(void);
137
138 static void syseventd_init(void);
139 static void syseventd_fini(int sig);
140
141 static pid_t enter_daemon_lock(void);
142 static void exit_daemon_lock(void);
143
144 static void
usage()145 usage() {
146 (void) fprintf(stderr, "usage: syseventd [-d <debug_level>] "
147 "[-r <root_dir>]\n");
148 (void) fprintf(stderr, "higher debug levels get progressively ");
149 (void) fprintf(stderr, "more detailed debug information.\n");
150 (void) fprintf(stderr, "syseventd will run in background if ");
151 (void) fprintf(stderr, "run with a debug_level less than %d.\n",
152 DEBUG_LEVEL_FORK);
153 exit(2);
154 }
155
156
157 /* common exit function which ensures releasing locks */
158 void
syseventd_exit(int status)159 syseventd_exit(int status)
160 {
161 syseventd_print(1, "exit status = %d\n", status);
162
163 if (hold_daemon_lock) {
164 exit_daemon_lock();
165 }
166
167 exit(status);
168 }
169
170
171 /*
172 * hup_handler - SIGHUP handler. SIGHUP is used to force a reload of
173 * all SLMs. During fini, events are drained from all
174 * client event queues. The events that have been consumed
175 * by all clients are freed from the kernel event queue.
176 *
177 * Events that have not yet been delivered to all clients
178 * are not freed and will be replayed after all SLMs have
179 * been (re)loaded.
180 *
181 * After all client event queues have been drained, each
182 * SLM client is unloaded. The init phase will (re)load
183 * each SLM and initiate event replay and delivery from
184 * the kernel.
185 *
186 */
187 /*ARGSUSED*/
188 static void
hup_handler(int sig)189 hup_handler(int sig)
190 {
191 syseventd_err_print(SIGHUP_CAUGHT);
192 (void) fflush(0);
193 syseventd_fini(sig);
194 syseventd_init();
195 syseventd_err_print(DAEMON_RESTARTED);
196 (void) fflush(0);
197 }
198
199 /*
200 * Fault handler for other signals caught
201 */
202 /*ARGSUSED*/
203 static void
flt_handler(int sig)204 flt_handler(int sig)
205 {
206 char signame[SIG2STR_MAX];
207
208 if (sig2str(sig, signame) == -1) {
209 syseventd_err_print(UNKNOWN_SIGNAL_CAUGHT, sig);
210 }
211
212 (void) se_signal_sethandler(sig, SIG_DFL, NULL);
213
214 switch (sig) {
215 case SIGINT:
216 case SIGSTOP:
217 case SIGTERM:
218 /* Close kernel door */
219 (void) door_revoke(upcall_door);
220
221 /* Gracefully exit current event delivery threads */
222 syseventd_fini(sig);
223
224 (void) fflush(0);
225 (void) se_signal_unblockall();
226 syseventd_exit(1);
227 /*NOTREACHED*/
228 case SIGCLD:
229 case SIGPWR:
230 case SIGWINCH:
231 case SIGURG:
232 case SIGCONT:
233 case SIGWAITING:
234 case SIGLWP:
235 case SIGFREEZE:
236 case SIGTHAW:
237 case SIGCANCEL:
238 case SIGXRES:
239 case SIGJVM1:
240 case SIGJVM2:
241 case SIGINFO:
242 /* No need to abort */
243 break;
244 default:
245 syseventd_err_print(FATAL_ERROR);
246 abort();
247
248 }
249 }
250
251 /*
252 * Daemon parent process only.
253 * Child process signal to indicate successful daemon initialization.
254 * This is the normal and expected exit path of the daemon parent.
255 */
256 /*ARGSUSED*/
257 static void
sigusr1(int sig)258 sigusr1(int sig)
259 {
260 syseventd_exit(0);
261 }
262
263 static void
sigwait_thr()264 sigwait_thr()
265 {
266 int sig;
267 int err;
268 sigset_t signal_set;
269
270 for (;;) {
271 syseventd_print(3, "sigwait thread waiting for signal\n");
272 (void) sigfillset(&signal_set);
273 err = sigwait(&signal_set, &sig);
274 if (err) {
275 syseventd_exit(2);
276 }
277
278 /*
279 * Block all signals until the signal handler completes
280 */
281 if (sig == SIGHUP) {
282 hup_handler(sig);
283 } else {
284 flt_handler(sig);
285 }
286 }
287 /* NOTREACHED */
288 }
289
290 static void
set_root_dir(char * dir)291 set_root_dir(char *dir)
292 {
293 root_dir = malloc(strlen(dir) + 1);
294 if (root_dir == NULL) {
295 syseventd_err_print(INIT_ROOT_DIR_ERR, strerror(errno));
296 syseventd_exit(2);
297 }
298 (void) strcpy(root_dir, dir);
299 }
300
301 int
main(int argc,char ** argv)302 main(int argc, char **argv)
303 {
304 int i, c;
305 int fd;
306 pid_t pid;
307 int has_forked = 0;
308 extern char *optarg;
309
310 (void) setlocale(LC_ALL, "");
311 (void) textdomain(TEXT_DOMAIN);
312
313 if (getuid() != 0) {
314 (void) fprintf(stderr, "Must be root to run syseventd\n");
315 syseventd_exit(1);
316 }
317
318 if (argc > 5) {
319 usage();
320 }
321
322 if ((prog = strrchr(argv[0], '/')) == NULL) {
323 prog = argv[0];
324 } else {
325 prog++;
326 }
327
328 while ((c = getopt(argc, argv, "d:r:")) != EOF) {
329 switch (c) {
330 case 'd':
331 debug_level = atoi(optarg);
332 break;
333 case 'r':
334 /*
335 * Private flag for suninstall to run
336 * daemon during install.
337 */
338 set_root_dir(optarg);
339 break;
340 case '?':
341 default:
342 usage();
343 }
344 }
345
346 /* demonize ourselves */
347 if (debug_level < DEBUG_LEVEL_FORK) {
348
349 sigset_t mask;
350
351 (void) sigset(SIGUSR1, sigusr1);
352
353 (void) sigemptyset(&mask);
354 (void) sigaddset(&mask, SIGUSR1);
355 (void) sigprocmask(SIG_BLOCK, &mask, NULL);
356
357 if ((pid = fork()) == (pid_t)-1) {
358 (void) fprintf(stderr,
359 "syseventd: fork failed - %s\n", strerror(errno));
360 syseventd_exit(1);
361 }
362
363 if (pid != 0) {
364 /*
365 * parent
366 * handshake with the daemon so that dependents
367 * of the syseventd service don't start up until
368 * the service is actually functional
369 */
370 int status;
371 (void) sigprocmask(SIG_UNBLOCK, &mask, NULL);
372
373 if (waitpid(pid, &status, 0) != pid) {
374 /*
375 * child process signal indicating
376 * successful daemon initialization
377 */
378 syseventd_exit(0);
379 }
380 /* child exited implying unsuccessful startup */
381 syseventd_exit(1);
382 }
383
384 /* child */
385
386 has_forked = 1;
387 (void) sigset(SIGUSR1, SIG_DFL);
388 (void) sigprocmask(SIG_UNBLOCK, &mask, NULL);
389
390 (void) chdir("/");
391 (void) setsid();
392 if (debug_level <= 1) {
393 closefrom(0);
394 fd = open("/dev/null", 0);
395 (void) dup2(fd, 1);
396 (void) dup2(fd, 2);
397 logflag = 1;
398 }
399 }
400
401 openlog("syseventd", LOG_PID, LOG_DAEMON);
402
403 (void) mutex_init(&err_mutex, USYNC_THREAD, NULL);
404
405 syseventd_print(8,
406 "syseventd started, debug level = %d\n", debug_level);
407
408 /* only one instance of syseventd can run at a time */
409 if ((pid = enter_daemon_lock()) != getpid()) {
410 syseventd_print(1,
411 "event daemon pid %ld already running\n", pid);
412 exit(3);
413 }
414
415 /* initialize semaphores and eventbuf */
416 (void) sema_init(&sema_eventbuf, SE_EVENT_DISPATCH_CNT,
417 USYNC_THREAD, NULL);
418 (void) sema_init(&sema_dispatch, 0, USYNC_THREAD, NULL);
419 (void) sema_init(&sema_resource, SE_EVENT_DISPATCH_CNT,
420 USYNC_THREAD, NULL);
421 (void) cond_init(&event_comp_cv, USYNC_THREAD, NULL);
422 eventbuf = (sysevent_t **)calloc(SE_EVENT_DISPATCH_CNT,
423 sizeof (sysevent_t *));
424 if (eventbuf == NULL) {
425 syseventd_print(1, "Unable to allocate event buffer array\n");
426 exit(2);
427 }
428 for (i = 0; i < SE_EVENT_DISPATCH_CNT; ++i) {
429 eventbuf[i] = malloc(LOGEVENT_BUFSIZE);
430 if (eventbuf[i] == NULL) {
431 syseventd_print(1, "Unable to allocate event "
432 "buffers\n");
433 exit(2);
434 }
435 }
436
437 (void) mutex_init(&client_tbl_lock, USYNC_THREAD, NULL);
438 (void) mutex_init(&ev_comp_lock, USYNC_THREAD, NULL);
439 (void) mutex_init(&door_lock, USYNC_THREAD, NULL);
440 (void) rwlock_init(&mod_unload_lock, USYNC_THREAD, NULL);
441
442 event_compq = NULL;
443
444 syseventd_print(8, "start the message thread running\n");
445
446 /*
447 * Block all signals to all threads include the main thread.
448 * The sigwait_thr thread will process any signals and initiate
449 * a graceful recovery if possible.
450 */
451 if (se_signal_blockall() < 0) {
452 syseventd_err_print(INIT_SIG_BLOCK_ERR);
453 syseventd_exit(2);
454 }
455
456 if (thr_create(NULL, NULL, (void *(*)(void *))dispatch_message,
457 (void *)0, 0, NULL) < 0) {
458 syseventd_err_print(INIT_THR_CREATE_ERR, strerror(errno));
459 syseventd_exit(2);
460 }
461 if (thr_create(NULL, NULL,
462 (void *(*)(void *))event_completion_thr, NULL,
463 THR_BOUND, NULL) != 0) {
464 syseventd_err_print(INIT_THR_CREATE_ERR, strerror(errno));
465 syseventd_exit(2);
466 }
467 /* Create signal catching thread */
468 if (thr_create(NULL, NULL, (void *(*)(void *))sigwait_thr,
469 NULL, 0, NULL) < 0) {
470 syseventd_err_print(INIT_THR_CREATE_ERR, strerror(errno));
471 syseventd_exit(2);
472 }
473
474 setbuf(stdout, (char *)NULL);
475
476 /* Initialize and load SLM clients */
477 initialize_client_tbl();
478 syseventd_init();
479
480 /* signal parent to indicate successful daemon initialization */
481 if (has_forked) {
482 if (kill(getppid(), SIGUSR1) != 0) {
483 syseventd_err_print(
484 "signal to the parent failed - %s\n",
485 strerror(errno));
486 syseventd_exit(2);
487 }
488 }
489
490 syseventd_print(8, "Pausing\n");
491
492 for (;;) {
493 (void) pause();
494 }
495 /* NOTREACHED */
496 return (0);
497 }
498
499 /*
500 * door_upcall - called from the kernel via kernel sysevent door
501 * to upload event(s).
502 *
503 * This routine should never block. If resources are
504 * not available to immediately accept the event buffer
505 * EAGAIN is returned to the kernel.
506 *
507 * Once resources are available, the kernel is notified
508 * via a modctl interface to resume event delivery to
509 * syseventd.
510 *
511 */
512 /*ARGSUSED*/
513 static void
door_upcall(void * cookie,char * args,size_t alen,door_desc_t * ddp,uint_t ndid)514 door_upcall(void *cookie, char *args, size_t alen,
515 door_desc_t *ddp, uint_t ndid)
516 {
517 sysevent_t *ev;
518 int rval;
519
520
521 (void) mutex_lock(&door_lock);
522 if (args == NULL) {
523 rval = EINVAL;
524 } else if (sema_trywait(&sema_eventbuf)) {
525 ev = (sysevent_t *)
526 &((log_event_upcall_arg_t *)(void *)args)->buf;
527 syseventd_print(2, "door_upcall: busy event %llx "
528 "retry\n", sysevent_get_seq(ev));
529 rval = door_upcall_retval = EAGAIN;
530 } else {
531 /*
532 * Copy received message to local buffer.
533 */
534 size_t size;
535 ev = (sysevent_t *)
536 &((log_event_upcall_arg_t *)(void *)args)->buf;
537
538 syseventd_print(2, "door_upcall: event %llx in eventbuf %d\n",
539 sysevent_get_seq(ev), deliver_buf);
540 size = sysevent_get_size(ev) > LOGEVENT_BUFSIZE ?
541 LOGEVENT_BUFSIZE : sysevent_get_size(ev);
542 (void) bcopy(ev, eventbuf[deliver_buf], size);
543 deliver_buf = (deliver_buf + 1) % SE_EVENT_DISPATCH_CNT;
544 rval = 0;
545 (void) sema_post(&sema_dispatch);
546 }
547
548 (void) mutex_unlock(&door_lock);
549
550 /*
551 * Filling in return values for door_return
552 */
553 (void) door_return((void *)&rval, sizeof (rval), NULL, 0);
554 (void) door_return(NULL, 0, NULL, 0);
555 }
556
557 /*
558 * dispatch_message - dispatch message thread
559 * This thread spins until an event buffer is delivered
560 * delivered from the kernel.
561 *
562 * It will wait to dispatch an event to any clients
563 * until adequate resources are available to process
564 * the event buffer.
565 */
566 static void
dispatch_message(void)567 dispatch_message(void)
568 {
569 int error;
570
571 for (;;) {
572 syseventd_print(3, "dispatch_message: thread started\n");
573 /*
574 * Spin till a message comes
575 */
576 while (sema_wait(&sema_dispatch) != 0) {
577 syseventd_print(1,
578 "dispatch_message: sema_wait failed\n");
579 (void) sleep(1);
580 }
581
582 syseventd_print(3, "dispatch_message: sema_dispatch\n");
583
584 /*
585 * Wait for available resources
586 */
587 while (sema_wait(&sema_resource) != 0) {
588 syseventd_print(1, "dispatch_message: sema_wait "
589 "failed\n");
590 (void) sleep(1);
591 }
592
593 syseventd_print(2, "dispatch_message: eventbuf %d\n",
594 dispatch_buf);
595
596 /*
597 * Client dispatch
598 */
599 do {
600 error = dispatch();
601 } while (error == EAGAIN);
602
603 syseventd_print(2, "eventbuf %d dispatched\n", dispatch_buf);
604 dispatch_buf = (dispatch_buf + 1) % SE_EVENT_DISPATCH_CNT;
605
606 /*
607 * kernel received a busy signal -
608 * kickstart the kernel delivery thread
609 * door_lock blocks the kernel so we hold it for the
610 * shortest time possible.
611 */
612 (void) mutex_lock(&door_lock);
613 if (door_upcall_retval == EAGAIN && !fini_pending) {
614 syseventd_print(3, "dispatch_message: retrigger "
615 "door_upcall_retval = %d\n",
616 door_upcall_retval);
617 (void) modctl(MODEVENTS, (uintptr_t)MODEVENTS_FLUSH,
618 NULL, NULL, NULL, 0);
619 door_upcall_retval = 0;
620 }
621 (void) mutex_unlock(&door_lock);
622 }
623 /* NOTREACHED */
624 }
625
626 /*
627 * drain_eventq - Called to drain all pending events from the client's
628 * event queue.
629 */
630 static void
drain_eventq(struct sysevent_client * scp,int status)631 drain_eventq(struct sysevent_client *scp, int status)
632 {
633 struct event_dispatch_pkg *d_pkg;
634 struct event_dispatchq *eventq, *eventq_next;
635
636 syseventd_print(3, "Draining eventq for client %d\n",
637 scp->client_num);
638
639 eventq = scp->eventq;
640 while (eventq) {
641 /*
642 * Mark all dispatched events as completed, but indicate the
643 * error status
644 */
645 d_pkg = eventq->d_pkg;
646
647 syseventd_print(4, "drain event 0X%llx for client %d\n",
648 sysevent_get_seq(d_pkg->ev), scp->client_num);
649
650 if (d_pkg->completion_state == SE_NOT_DISPATCHED) {
651 d_pkg->completion_status = status;
652 d_pkg->completion_state = SE_COMPLETE;
653 (void) sema_post(d_pkg->completion_sema);
654 }
655
656 eventq_next = eventq->next;
657 free(eventq);
658 eventq = eventq_next;
659 scp->eventq = eventq;
660 }
661 }
662
663 /*
664 * client_deliver_event_thr - Client delivery thread
665 * This thread will process any events on this
666 * client's eventq.
667 */
668 static void
client_deliver_event_thr(void * arg)669 client_deliver_event_thr(void *arg)
670 {
671 int flag, error, i;
672 sysevent_t *ev;
673 hrtime_t now;
674 module_t *mod;
675 struct event_dispatchq *eventq;
676 struct sysevent_client *scp;
677 struct event_dispatch_pkg *d_pkg;
678
679 scp = (struct sysevent_client *)arg;
680 mod = (module_t *)scp->client_data;
681
682 (void) mutex_lock(&scp->client_lock);
683 for (;;) {
684 while (scp->eventq == NULL) {
685
686 /*
687 * Client has been suspended or unloaded, go no further.
688 */
689 if (fini_pending) {
690 scp->client_flags &= ~SE_CLIENT_THR_RUNNING;
691 syseventd_print(3, "Client %d delivery thread "
692 "exiting flags: 0X%x\n",
693 scp->client_num, scp->client_flags);
694 (void) mutex_unlock(&scp->client_lock);
695 return;
696 }
697
698 (void) cond_wait(&scp->client_cv, &scp->client_lock);
699
700 }
701
702 /*
703 * Process events from the head of the eventq, eventq is locked
704 * going into the processing.
705 */
706 eventq = scp->eventq;
707 while (eventq != NULL) {
708 d_pkg = eventq->d_pkg;
709 d_pkg->completion_state = SE_OUTSTANDING;
710 scp->eventq = eventq->next;
711 free(eventq);
712 (void) mutex_unlock(&scp->client_lock);
713
714
715 flag = error = 0;
716 ev = d_pkg->ev;
717
718 syseventd_print(3, "Start delivery for client %d "
719 "with retry count %d\n",
720 scp->client_num, d_pkg->retry_count);
721
722 /*
723 * Retry limit has been reached by this client, indicate
724 * that no further retries are allowed
725 */
726 for (i = 0; i <= scp->retry_limit; ++i) {
727 if (i == scp->retry_limit)
728 flag = SE_NO_RETRY;
729
730 /* Start the clock for the event delivery */
731 d_pkg->start_time = gethrtime();
732
733 syseventd_print(9, "Deliver to module client "
734 "%s\n", mod->name);
735
736 error = mod->deliver_event(ev, flag);
737
738 /* Can not allow another retry */
739 if (i == scp->retry_limit)
740 error = 0;
741
742 /* Stop the clock */
743 now = gethrtime();
744
745 /*
746 * Suspend event processing and drain the
747 * event q for latent clients
748 */
749 if (now - d_pkg->start_time >
750 ((hrtime_t)SE_TIMEOUT * NANOSEC)) {
751 syseventd_print(1, "Unresponsive "
752 "client %d: Draining eventq and "
753 "suspending event delivery\n",
754 scp->client_num);
755 (void) mutex_lock(&scp->client_lock);
756 scp->client_flags &=
757 ~SE_CLIENT_THR_RUNNING;
758 scp->client_flags |=
759 SE_CLIENT_SUSPENDED;
760
761 /* Cleanup current event */
762 d_pkg->completion_status = EFAULT;
763 d_pkg->completion_state = SE_COMPLETE;
764 (void) sema_post(
765 d_pkg->completion_sema);
766
767 /*
768 * Drain the remaining events from the
769 * queue.
770 */
771 drain_eventq(scp, EINVAL);
772 (void) mutex_unlock(&scp->client_lock);
773 return;
774 }
775
776 /* Event delivery retry requested */
777 if (fini_pending || error != EAGAIN) {
778 break;
779 } else {
780 (void) sleep(SE_RETRY_TIME);
781 }
782 }
783
784 (void) mutex_lock(&scp->client_lock);
785 d_pkg->completion_status = error;
786 d_pkg->completion_state = SE_COMPLETE;
787 (void) sema_post(d_pkg->completion_sema);
788 syseventd_print(3, "Completed delivery with "
789 "error %d\n", error);
790 eventq = scp->eventq;
791 }
792
793 syseventd_print(3, "No more events to process for client %d\n",
794 scp->client_num);
795
796 /* Return if this was a synchronous delivery */
797 if (!SE_CLIENT_IS_THR_RUNNING(scp)) {
798 (void) mutex_unlock(&scp->client_lock);
799 return;
800 }
801
802 }
803 }
804
805 /*
806 * client_deliver_event - Client specific event delivery
807 * This routine will allocate and initialize the
808 * neccessary per-client dispatch data.
809 *
810 * If the eventq is not empty, it may be assumed that
811 * a delivery thread exists for this client and the
812 * dispatch data is appended to the eventq.
813 *
814 * The dispatch package is freed by the event completion
815 * thread (event_completion_thr) and the eventq entry
816 * is freed by the event delivery thread.
817 */
818 static struct event_dispatch_pkg *
client_deliver_event(struct sysevent_client * scp,sysevent_t * ev,sema_t * completion_sema)819 client_deliver_event(struct sysevent_client *scp, sysevent_t *ev,
820 sema_t *completion_sema)
821 {
822 size_t ev_sz = sysevent_get_size(ev);
823 struct event_dispatchq *newq, *tmp;
824 struct event_dispatch_pkg *d_pkg;
825
826 syseventd_print(3, "client_deliver_event: id 0x%llx size %d\n",
827 (longlong_t)sysevent_get_seq(ev), ev_sz);
828 if (debug_level == 9) {
829 se_print(stdout, ev);
830 }
831
832 /*
833 * Check for suspended client
834 */
835 (void) mutex_lock(&scp->client_lock);
836 if (SE_CLIENT_IS_SUSPENDED(scp) || !SE_CLIENT_IS_THR_RUNNING(scp)) {
837 (void) mutex_unlock(&scp->client_lock);
838 return (NULL);
839 }
840
841 /*
842 * Allocate a new dispatch package and eventq entry
843 */
844 newq = (struct event_dispatchq *)malloc(
845 sizeof (struct event_dispatchq));
846 if (newq == NULL) {
847 (void) mutex_unlock(&scp->client_lock);
848 return (NULL);
849 }
850
851 d_pkg = (struct event_dispatch_pkg *)malloc(
852 sizeof (struct event_dispatch_pkg));
853 if (d_pkg == NULL) {
854 free(newq);
855 (void) mutex_unlock(&scp->client_lock);
856 return (NULL);
857 }
858
859 /* Initialize the dispatch package */
860 d_pkg->scp = scp;
861 d_pkg->retry_count = 0;
862 d_pkg->completion_status = 0;
863 d_pkg->completion_state = SE_NOT_DISPATCHED;
864 d_pkg->completion_sema = completion_sema;
865 d_pkg->ev = ev;
866 newq->d_pkg = d_pkg;
867 newq->next = NULL;
868
869 if (scp->eventq != NULL) {
870
871 /* Add entry to the end of the eventq */
872 tmp = scp->eventq;
873 while (tmp->next != NULL)
874 tmp = tmp->next;
875 tmp->next = newq;
876 } else {
877 /* event queue empty, wakeup delivery thread */
878 scp->eventq = newq;
879 (void) cond_signal(&scp->client_cv);
880 }
881 (void) mutex_unlock(&scp->client_lock);
882
883 return (d_pkg);
884 }
885
886 /*
887 * event_completion_thr - Event completion thread. This thread routine
888 * waits for all client delivery thread to complete
889 * delivery of a particular event.
890 */
891 static void
event_completion_thr()892 event_completion_thr()
893 {
894 int ret, i, client_count, ok_to_free;
895 sysevent_id_t eid;
896 struct sysevent_client *scp;
897 struct ev_completion *ev_comp;
898 struct event_dispatchq *dispatchq;
899 struct event_dispatch_pkg *d_pkg;
900
901 (void) mutex_lock(&ev_comp_lock);
902 for (;;) {
903 while (event_compq == NULL) {
904 (void) cond_wait(&event_comp_cv, &ev_comp_lock);
905 }
906
907 /*
908 * Process event completions from the head of the
909 * completion queue
910 */
911 ev_comp = event_compq;
912 while (ev_comp) {
913 (void) mutex_unlock(&ev_comp_lock);
914 eid.eid_seq = sysevent_get_seq(ev_comp->ev);
915 sysevent_get_time(ev_comp->ev, &eid.eid_ts);
916 client_count = ev_comp->client_count;
917 ok_to_free = 1;
918
919 syseventd_print(3, "Wait for event completion of "
920 "event 0X%llx on %d clients\n",
921 eid.eid_seq, client_count);
922
923 while (client_count) {
924 syseventd_print(9, "Waiting for %d clients on "
925 "event id 0X%llx\n", client_count,
926 eid.eid_seq);
927
928 (void) sema_wait(&ev_comp->client_sema);
929 --client_count;
930 }
931
932 syseventd_print(3, "Cleaning up clients for event "
933 "0X%llx\n", eid.eid_seq);
934 dispatchq = ev_comp->dispatch_list;
935 while (dispatchq != NULL) {
936 d_pkg = dispatchq->d_pkg;
937 scp = d_pkg->scp;
938
939 if (d_pkg->completion_status == EAGAIN)
940 ok_to_free = 0;
941
942 syseventd_print(4, "Delivery of 0X%llx "
943 "complete for client %d retry count %d "
944 "status %d\n", eid.eid_seq,
945 scp->client_num,
946 d_pkg->retry_count,
947 d_pkg->completion_status);
948
949 free(d_pkg);
950 ev_comp->dispatch_list = dispatchq->next;
951 free(dispatchq);
952 dispatchq = ev_comp->dispatch_list;
953 }
954
955 if (ok_to_free) {
956 for (i = 0; i < MAX_MODCTL_RETRY; ++i) {
957 if ((ret = modctl(MODEVENTS,
958 (uintptr_t)MODEVENTS_FREEDATA,
959 (uintptr_t)&eid, NULL,
960 NULL, 0)) != 0) {
961 syseventd_print(1, "attempting "
962 "to free event 0X%llx\n",
963 eid.eid_seq);
964
965 /*
966 * Kernel may need time to
967 * move this event buffer to
968 * the sysevent sent queue
969 */
970 (void) sleep(1);
971 } else {
972 break;
973 }
974 }
975 if (ret) {
976 syseventd_print(1, "Unable to free "
977 "event 0X%llx from the "
978 "kernel\n", eid.eid_seq);
979 }
980 } else {
981 syseventd_print(1, "Not freeing event 0X%llx\n",
982 eid.eid_seq);
983 }
984
985 syseventd_print(2, "Event delivery complete for id "
986 "0X%llx\n", eid.eid_seq);
987
988 (void) mutex_lock(&ev_comp_lock);
989 event_compq = ev_comp->next;
990 free(ev_comp->ev);
991 free(ev_comp);
992 ev_comp = event_compq;
993 (void) sema_post(&sema_resource);
994 }
995
996 /*
997 * Event completion queue is empty, signal possible unload
998 * operation
999 */
1000 (void) cond_signal(&event_comp_cv);
1001
1002 syseventd_print(3, "No more events\n");
1003 }
1004 }
1005
1006 /*
1007 * dispatch - Dispatch the current event buffer to all valid SLM clients.
1008 */
1009 static int
dispatch(void)1010 dispatch(void)
1011 {
1012 int ev_sz, i, client_count = 0;
1013 sysevent_t *new_ev;
1014 sysevent_id_t eid;
1015 struct ev_completion *ev_comp, *tmp;
1016 struct event_dispatchq *dispatchq, *client_list;
1017 struct event_dispatch_pkg *d_pkg;
1018
1019 /* Check for module unload operation */
1020 if (rw_tryrdlock(&mod_unload_lock) != 0) {
1021 syseventd_print(2, "unload in progress abort delivery\n");
1022 (void) sema_post(&sema_eventbuf);
1023 (void) sema_post(&sema_resource);
1024 return (0);
1025 }
1026
1027 syseventd_print(3, "deliver dispatch buffer %d", dispatch_buf);
1028 eid.eid_seq = sysevent_get_seq(eventbuf[dispatch_buf]);
1029 sysevent_get_time(eventbuf[dispatch_buf], &eid.eid_ts);
1030 syseventd_print(3, "deliver msg id: 0x%llx\n", eid.eid_seq);
1031
1032 /*
1033 * ev_comp is used to hold event completion data. It is freed
1034 * by the event completion thread (event_completion_thr).
1035 */
1036 ev_comp = (struct ev_completion *)
1037 malloc(sizeof (struct ev_completion));
1038 if (ev_comp == NULL) {
1039 (void) rw_unlock(&mod_unload_lock);
1040 syseventd_print(1, "Can not allocate event completion buffer "
1041 "for event id 0X%llx\n", eid.eid_seq);
1042 return (EAGAIN);
1043 }
1044 ev_comp->dispatch_list = NULL;
1045 ev_comp->next = NULL;
1046 (void) sema_init(&ev_comp->client_sema, 0, USYNC_THREAD, NULL);
1047
1048 ev_sz = sysevent_get_size(eventbuf[dispatch_buf]);
1049 new_ev = calloc(1, ev_sz);
1050 if (new_ev == NULL) {
1051 free(ev_comp);
1052 (void) rw_unlock(&mod_unload_lock);
1053 syseventd_print(1, "Can not allocate new event buffer "
1054 "for event id 0X%llx\n", eid.eid_seq);
1055 return (EAGAIN);
1056 }
1057
1058
1059 /*
1060 * For long messages, copy additional data from kernel
1061 */
1062 if (ev_sz > LOGEVENT_BUFSIZE) {
1063 int ret = 0;
1064
1065 /* Ok to release eventbuf for next event buffer from kernel */
1066 (void) sema_post(&sema_eventbuf);
1067
1068 for (i = 0; i < MAX_MODCTL_RETRY; ++i) {
1069 if ((ret = modctl(MODEVENTS,
1070 (uintptr_t)MODEVENTS_GETDATA,
1071 (uintptr_t)&eid,
1072 (uintptr_t)ev_sz,
1073 (uintptr_t)new_ev, 0))
1074 == 0)
1075 break;
1076 else
1077 (void) sleep(1);
1078 }
1079 if (ret) {
1080 syseventd_print(1, "GET_DATA failed for 0X%llx:%llx\n",
1081 eid.eid_ts, eid.eid_seq);
1082 free(new_ev);
1083 free(ev_comp);
1084 (void) rw_unlock(&mod_unload_lock);
1085 return (EAGAIN);
1086 }
1087 } else {
1088 (void) bcopy(eventbuf[dispatch_buf], new_ev, ev_sz);
1089 /* Ok to release eventbuf for next event buffer from kernel */
1090 (void) sema_post(&sema_eventbuf);
1091 }
1092
1093
1094 /*
1095 * Deliver a copy of eventbuf to clients so
1096 * eventbuf can be used for the next message
1097 */
1098 for (i = 0; i < MAX_SLM; ++i) {
1099
1100 /* Don't bother for suspended or unloaded clients */
1101 if (!SE_CLIENT_IS_LOADED(sysevent_client_tbl[i]) ||
1102 SE_CLIENT_IS_SUSPENDED(sysevent_client_tbl[i]))
1103 continue;
1104
1105 /*
1106 * Allocate event dispatch queue entry. All queue entries
1107 * are freed by the event completion thread as client
1108 * delivery completes.
1109 */
1110 dispatchq = (struct event_dispatchq *)malloc(
1111 sizeof (struct event_dispatchq));
1112 if (dispatchq == NULL) {
1113 syseventd_print(1, "Can not allocate dispatch q "
1114 "for event id 0X%llx client %d\n", eid.eid_seq, i);
1115 continue;
1116 }
1117 dispatchq->next = NULL;
1118
1119 /* Initiate client delivery */
1120 d_pkg = client_deliver_event(sysevent_client_tbl[i],
1121 new_ev, &ev_comp->client_sema);
1122 if (d_pkg == NULL) {
1123 syseventd_print(1, "Can not allocate dispatch "
1124 "package for event id 0X%llx client %d\n",
1125 eid.eid_seq, i);
1126 free(dispatchq);
1127 continue;
1128 }
1129 dispatchq->d_pkg = d_pkg;
1130 ++client_count;
1131
1132 if (ev_comp->dispatch_list == NULL) {
1133 ev_comp->dispatch_list = dispatchq;
1134 client_list = dispatchq;
1135 } else {
1136 client_list->next = dispatchq;
1137 client_list = client_list->next;
1138 }
1139 }
1140
1141 ev_comp->client_count = client_count;
1142 ev_comp->ev = new_ev;
1143
1144 (void) mutex_lock(&ev_comp_lock);
1145
1146 if (event_compq == NULL) {
1147 syseventd_print(3, "Wakeup event completion thread for "
1148 "id 0X%llx\n", eid.eid_seq);
1149 event_compq = ev_comp;
1150 (void) cond_signal(&event_comp_cv);
1151 } else {
1152
1153 /* Add entry to the end of the event completion queue */
1154 tmp = event_compq;
1155 while (tmp->next != NULL)
1156 tmp = tmp->next;
1157 tmp->next = ev_comp;
1158 syseventd_print(3, "event added to completion queue for "
1159 "id 0X%llx\n", eid.eid_seq);
1160 }
1161 (void) mutex_unlock(&ev_comp_lock);
1162 (void) rw_unlock(&mod_unload_lock);
1163
1164 return (0);
1165 }
1166
1167 #define MODULE_DIR_HW "/usr/platform/%s/lib/sysevent/modules/"
1168 #define MODULE_DIR_GEN "/usr/lib/sysevent/modules/"
1169 #define MOD_DIR_NUM 3
1170 static char dirname[MOD_DIR_NUM][MAXPATHLEN];
1171
1172 static char *
dir_num2name(int dirnum)1173 dir_num2name(int dirnum)
1174 {
1175 char infobuf[MAXPATHLEN];
1176
1177 if (dirnum >= MOD_DIR_NUM)
1178 return (NULL);
1179
1180 if (dirname[0][0] == '\0') {
1181 if (sysinfo(SI_PLATFORM, infobuf, MAXPATHLEN) == -1) {
1182 syseventd_print(1, "dir_num2name: "
1183 "sysinfo error %s\n", strerror(errno));
1184 return (NULL);
1185 } else if (snprintf(dirname[0], sizeof (dirname[0]),
1186 MODULE_DIR_HW, infobuf) >= sizeof (dirname[0])) {
1187 syseventd_print(1, "dir_num2name: "
1188 "platform name too long: %s\n",
1189 infobuf);
1190 return (NULL);
1191 }
1192 if (sysinfo(SI_MACHINE, infobuf, MAXPATHLEN) == -1) {
1193 syseventd_print(1, "dir_num2name: "
1194 "sysinfo error %s\n", strerror(errno));
1195 return (NULL);
1196 } else if (snprintf(dirname[1], sizeof (dirname[1]),
1197 MODULE_DIR_HW, infobuf) >= sizeof (dirname[1])) {
1198 syseventd_print(1, "dir_num2name: "
1199 "machine name too long: %s\n",
1200 infobuf);
1201 return (NULL);
1202 }
1203 (void) strcpy(dirname[2], MODULE_DIR_GEN);
1204 }
1205
1206 return (dirname[dirnum]);
1207 }
1208
1209
1210 /*
1211 * load_modules - Load modules found in the common syseventd module directories
1212 * Modules that do not provide valid interfaces are rejected.
1213 */
1214 static void
load_modules(char * dirname)1215 load_modules(char *dirname)
1216 {
1217 int client_id;
1218 DIR *mod_dir;
1219 module_t *mod;
1220 struct dirent *entp;
1221 struct slm_mod_ops *mod_ops;
1222 struct sysevent_client *scp;
1223
1224 if (dirname == NULL)
1225 return;
1226
1227 /* Return silently if module directory does not exist */
1228 if ((mod_dir = opendir(dirname)) == NULL) {
1229 syseventd_print(1, "Unable to open module directory %s: %s\n",
1230 dirname, strerror(errno));
1231 return;
1232 }
1233
1234 syseventd_print(3, "loading modules from %s\n", dirname);
1235
1236 /*
1237 * Go through directory, looking for files ending with .so
1238 */
1239 while ((entp = readdir(mod_dir)) != NULL) {
1240 void *dlh, *f;
1241 char *tmp, modpath[MAXPATHLEN];
1242
1243 if (((tmp = strstr(entp->d_name, MODULE_SUFFIX)) == NULL) ||
1244 (tmp[strlen(MODULE_SUFFIX)] != '\0')) {
1245 continue;
1246 }
1247
1248 if (snprintf(modpath, sizeof (modpath), "%s%s",
1249 dirname, entp->d_name) >= sizeof (modpath)) {
1250 syseventd_err_print(INIT_PATH_ERR, modpath);
1251 continue;
1252 }
1253 if ((dlh = dlopen(modpath, RTLD_LAZY)) == NULL) {
1254 syseventd_err_print(LOAD_MOD_DLOPEN_ERR,
1255 modpath, dlerror());
1256 continue;
1257 } else if ((f = dlsym(dlh, EVENT_INIT)) == NULL) {
1258 syseventd_err_print(LOAD_MOD_NO_INIT,
1259 modpath, dlerror());
1260 (void) dlclose(dlh);
1261 continue;
1262 }
1263
1264 mod = malloc(sizeof (*mod));
1265 if (mod == NULL) {
1266 syseventd_err_print(LOAD_MOD_ALLOC_ERR, "mod",
1267 strerror(errno));
1268 (void) dlclose(dlh);
1269 continue;
1270 }
1271
1272 mod->name = strdup(entp->d_name);
1273 if (mod->name == NULL) {
1274 syseventd_err_print(LOAD_MOD_ALLOC_ERR, "mod->name",
1275 strerror(errno));
1276 (void) dlclose(dlh);
1277 free(mod);
1278 continue;
1279 }
1280
1281 mod->dlhandle = dlh;
1282 mod->event_mod_init = (struct slm_mod_ops *(*)())f;
1283
1284 /* load in other module functions */
1285 mod->event_mod_fini = (void (*)())dlsym(dlh, EVENT_FINI);
1286 if (mod->event_mod_fini == NULL) {
1287 syseventd_err_print(LOAD_MOD_DLSYM_ERR, mod->name,
1288 dlerror());
1289 free(mod->name);
1290 free(mod);
1291 (void) dlclose(dlh);
1292 continue;
1293 }
1294
1295 /* Call module init routine */
1296 if ((mod_ops = mod->event_mod_init()) == NULL) {
1297 syseventd_err_print(LOAD_MOD_EINVAL, mod->name);
1298 free(mod->name);
1299 free(mod);
1300 (void) dlclose(dlh);
1301 continue;
1302 }
1303 if (mod_ops->major_version != SE_MAJOR_VERSION) {
1304 syseventd_err_print(LOAD_MOD_VERSION_MISMATCH,
1305 mod->name, SE_MAJOR_VERSION,
1306 mod_ops->major_version);
1307 mod->event_mod_fini();
1308 free(mod->name);
1309 free(mod);
1310 (void) dlclose(dlh);
1311 continue;
1312 }
1313
1314 mod->deliver_event = mod_ops->deliver_event;
1315 /* Add module entry to client list */
1316 if ((client_id = insert_client((void *)mod, SLM_CLIENT,
1317 (mod_ops->retry_limit <= SE_MAX_RETRY_LIMIT ?
1318 mod_ops->retry_limit : SE_MAX_RETRY_LIMIT))) < 0) {
1319 syseventd_err_print(LOAD_MOD_ALLOC_ERR, "insert_client",
1320 strerror(errno));
1321 mod->event_mod_fini();
1322 free(mod->name);
1323 free(mod);
1324 (void) dlclose(dlh);
1325 continue;
1326 }
1327
1328 scp = sysevent_client_tbl[client_id];
1329 ++concurrency_level;
1330 (void) thr_setconcurrency(concurrency_level);
1331 if (thr_create(NULL, 0,
1332 (void *(*)(void *))client_deliver_event_thr,
1333 (void *)scp, THR_BOUND, &scp->tid) != 0) {
1334
1335 syseventd_err_print(LOAD_MOD_ALLOC_ERR, "insert_client",
1336 strerror(errno));
1337 mod->event_mod_fini();
1338 free(mod->name);
1339 free(mod);
1340 (void) dlclose(dlh);
1341 continue;
1342 }
1343 scp->client_flags |= SE_CLIENT_THR_RUNNING;
1344
1345 syseventd_print(3, "loaded module %s\n", entp->d_name);
1346 }
1347
1348 (void) closedir(mod_dir);
1349 syseventd_print(3, "modules loaded\n");
1350 }
1351
1352 /*
1353 * unload_modules - modules are unloaded prior to graceful shutdown or
1354 * before restarting the daemon upon receipt of
1355 * SIGHUP.
1356 */
1357 static void
unload_modules(int sig)1358 unload_modules(int sig)
1359 {
1360 int i, count, done;
1361 module_t *mod;
1362 struct sysevent_client *scp;
1363
1364 /*
1365 * unload modules that are ready, skip those that have not
1366 * drained their event queues.
1367 */
1368 count = done = 0;
1369 while (done < MAX_SLM) {
1370 /* Don't wait indefinitely for unresponsive clients */
1371 if (sig != SIGHUP && count > SE_TIMEOUT) {
1372 break;
1373 }
1374
1375 done = 0;
1376
1377 /* Shutdown clients */
1378 for (i = 0; i < MAX_SLM; ++i) {
1379 scp = sysevent_client_tbl[i];
1380 if (mutex_trylock(&scp->client_lock) == 0) {
1381 if (scp->client_type != SLM_CLIENT ||
1382 scp->client_data == NULL) {
1383 (void) mutex_unlock(&scp->client_lock);
1384 done++;
1385 continue;
1386 }
1387 } else {
1388 syseventd_print(3, "Skipping unload of "
1389 "client %d: client locked\n",
1390 scp->client_num);
1391 continue;
1392 }
1393
1394 /*
1395 * Drain the eventq and wait for delivery thread to
1396 * cleanly exit
1397 */
1398 drain_eventq(scp, EAGAIN);
1399 (void) cond_signal(&scp->client_cv);
1400 (void) mutex_unlock(&scp->client_lock);
1401 (void) thr_join(scp->tid, NULL, NULL);
1402
1403 /*
1404 * It is now safe to unload the module
1405 */
1406 mod = (module_t *)scp->client_data;
1407 syseventd_print(2, "Unload %s\n", mod->name);
1408 mod->event_mod_fini();
1409 (void) dlclose(mod->dlhandle);
1410 free(mod->name);
1411 (void) mutex_lock(&client_tbl_lock);
1412 delete_client(i);
1413 (void) mutex_unlock(&client_tbl_lock);
1414 ++done;
1415
1416 }
1417 ++count;
1418 (void) sleep(1);
1419 }
1420
1421 /*
1422 * Wait for event completions
1423 */
1424 syseventd_print(2, "waiting for event completions\n");
1425 (void) mutex_lock(&ev_comp_lock);
1426 while (event_compq != NULL) {
1427 (void) cond_wait(&event_comp_cv, &ev_comp_lock);
1428 }
1429 (void) mutex_unlock(&ev_comp_lock);
1430 }
1431
1432 /*
1433 * syseventd_init - Called at daemon (re)start-up time to load modules
1434 * and kickstart the kernel delivery engine.
1435 */
1436 static void
syseventd_init()1437 syseventd_init()
1438 {
1439 int i, fd;
1440 char local_door_file[PATH_MAX + 1];
1441
1442 fini_pending = 0;
1443
1444 concurrency_level = MIN_CONCURRENCY_LEVEL;
1445 (void) thr_setconcurrency(concurrency_level);
1446
1447 /*
1448 * Load client modules for event delivering
1449 */
1450 for (i = 0; i < MOD_DIR_NUM; ++i) {
1451 load_modules(dir_num2name(i));
1452 }
1453
1454 /*
1455 * Create kernel delivery door service
1456 */
1457 syseventd_print(8, "Create a door for kernel upcalls\n");
1458 if (snprintf(local_door_file, sizeof (local_door_file), "%s%s",
1459 root_dir, LOGEVENT_DOOR_UPCALL) >= sizeof (local_door_file)) {
1460 syseventd_err_print(INIT_PATH_ERR, local_door_file);
1461 syseventd_exit(5);
1462 }
1463
1464 /*
1465 * Remove door file for robustness.
1466 */
1467 if (unlink(local_door_file) != 0)
1468 syseventd_print(8, "Unlink of %s failed.\n", local_door_file);
1469
1470 fd = open(local_door_file, O_CREAT|O_RDWR, S_IREAD|S_IWRITE);
1471 if ((fd == -1) && (errno != EEXIST)) {
1472 syseventd_err_print(INIT_OPEN_DOOR_ERR, strerror(errno));
1473 syseventd_exit(5);
1474 }
1475 (void) close(fd);
1476
1477 upcall_door = door_create(door_upcall, NULL,
1478 DOOR_REFUSE_DESC | DOOR_NO_CANCEL);
1479 if (upcall_door == -1) {
1480 syseventd_err_print(INIT_CREATE_DOOR_ERR, strerror(errno));
1481 syseventd_exit(5);
1482 }
1483
1484 (void) fdetach(local_door_file);
1485 retry:
1486 if (fattach(upcall_door, local_door_file) != 0) {
1487 if (errno == EBUSY)
1488 goto retry;
1489 syseventd_err_print(INIT_FATTACH_ERR, strerror(errno));
1490 (void) door_revoke(upcall_door);
1491 syseventd_exit(5);
1492 }
1493
1494 /*
1495 * Tell kernel the door name and start delivery
1496 */
1497 syseventd_print(2,
1498 "local_door_file = %s\n", local_door_file);
1499 if (modctl(MODEVENTS,
1500 (uintptr_t)MODEVENTS_SET_DOOR_UPCALL_FILENAME,
1501 (uintptr_t)local_door_file, NULL, NULL, 0) < 0) {
1502 syseventd_err_print(INIT_DOOR_NAME_ERR, strerror(errno));
1503 syseventd_exit(6);
1504 }
1505
1506 door_upcall_retval = 0;
1507
1508 if (modctl(MODEVENTS, (uintptr_t)MODEVENTS_FLUSH, NULL, NULL, NULL, 0)
1509 < 0) {
1510 syseventd_err_print(KERNEL_REPLAY_ERR, strerror(errno));
1511 syseventd_exit(7);
1512 }
1513 }
1514
1515 /*
1516 * syseventd_fini - shut down daemon, but do not exit
1517 */
1518 static void
syseventd_fini(int sig)1519 syseventd_fini(int sig)
1520 {
1521 /*
1522 * Indicate that event queues should be drained and no
1523 * additional events be accepted
1524 */
1525 fini_pending = 1;
1526
1527 /* Close the kernel event door to halt delivery */
1528 (void) door_revoke(upcall_door);
1529
1530 syseventd_print(1, "Unloading modules\n");
1531 (void) rw_wrlock(&mod_unload_lock);
1532 unload_modules(sig);
1533 (void) rw_unlock(&mod_unload_lock);
1534
1535 }
1536
1537 /*
1538 * enter_daemon_lock - lock the daemon file lock
1539 *
1540 * Use an advisory lock to ensure that only one daemon process is active
1541 * in the system at any point in time. If the lock is held by another
1542 * process, do not block but return the pid owner of the lock to the
1543 * caller immediately. The lock is cleared if the holding daemon process
1544 * exits for any reason even if the lock file remains, so the daemon can
1545 * be restarted if necessary. The lock file is DAEMON_LOCK_FILE.
1546 */
1547 static pid_t
enter_daemon_lock(void)1548 enter_daemon_lock(void)
1549 {
1550 struct flock lock;
1551
1552 syseventd_print(8, "enter_daemon_lock: lock file = %s\n",
1553 DAEMON_LOCK_FILE);
1554
1555 if (snprintf(local_lock_file, sizeof (local_lock_file), "%s%s",
1556 root_dir, DAEMON_LOCK_FILE) >= sizeof (local_lock_file)) {
1557 syseventd_err_print(INIT_PATH_ERR, local_lock_file);
1558 syseventd_exit(8);
1559 }
1560 daemon_lock_fd = open(local_lock_file, O_CREAT|O_RDWR, 0644);
1561 if (daemon_lock_fd < 0) {
1562 syseventd_err_print(INIT_LOCK_OPEN_ERR,
1563 local_lock_file, strerror(errno));
1564 syseventd_exit(8);
1565 }
1566
1567 lock.l_type = F_WRLCK;
1568 lock.l_whence = SEEK_SET;
1569 lock.l_start = 0;
1570 lock.l_len = 0;
1571
1572 if (fcntl(daemon_lock_fd, F_SETLK, &lock) == -1) {
1573 if (fcntl(daemon_lock_fd, F_GETLK, &lock) == -1) {
1574 syseventd_err_print(INIT_LOCK_ERR,
1575 local_lock_file, strerror(errno));
1576 exit(2);
1577 }
1578 return (lock.l_pid);
1579 }
1580 hold_daemon_lock = 1;
1581
1582 return (getpid());
1583 }
1584
1585 /*
1586 * exit_daemon_lock - release the daemon file lock
1587 */
1588 static void
exit_daemon_lock(void)1589 exit_daemon_lock(void)
1590 {
1591 struct flock lock;
1592
1593 lock.l_type = F_UNLCK;
1594 lock.l_whence = SEEK_SET;
1595 lock.l_start = 0;
1596 lock.l_len = 0;
1597
1598 if (fcntl(daemon_lock_fd, F_SETLK, &lock) == -1) {
1599 syseventd_err_print(INIT_UNLOCK_ERR,
1600 local_lock_file, strerror(errno));
1601 }
1602
1603 if (close(daemon_lock_fd) == -1) {
1604 syseventd_err_print(INIT_LOCK_CLOSE_ERR,
1605 local_lock_file, strerror(errno));
1606 exit(-1);
1607 }
1608 }
1609
1610 /*
1611 * syseventd_err_print - print error messages to the terminal if not
1612 * yet daemonized or to syslog.
1613 */
1614 /*PRINTFLIKE1*/
1615 void
syseventd_err_print(char * message,...)1616 syseventd_err_print(char *message, ...)
1617 {
1618 va_list ap;
1619
1620 (void) mutex_lock(&err_mutex);
1621 va_start(ap, message);
1622
1623 if (logflag) {
1624 (void) vsyslog(LOG_ERR, message, ap);
1625 } else {
1626 (void) fprintf(stderr, "%s: ", prog);
1627 (void) vfprintf(stderr, message, ap);
1628 }
1629 va_end(ap);
1630 (void) mutex_unlock(&err_mutex);
1631 }
1632
1633 /*
1634 * syseventd_print - print messages to the terminal or to syslog
1635 * the following levels are implemented:
1636 *
1637 * 1 - transient errors that does not affect normal program flow
1638 * 2 - upcall/dispatch interaction
1639 * 3 - program flow trace as each message goes through the daemon
1640 * 8 - all the nit-gritty details of startup and shutdown
1641 * 9 - very verbose event flow tracing (no daemonization of syseventd)
1642 *
1643 */
1644 /*PRINTFLIKE2*/
1645 void
syseventd_print(int level,char * message,...)1646 syseventd_print(int level, char *message, ...)
1647 {
1648 va_list ap;
1649 static int newline = 1;
1650
1651 if (level > debug_level) {
1652 return;
1653 }
1654
1655 (void) mutex_lock(&err_mutex);
1656 va_start(ap, message);
1657 if (logflag) {
1658 (void) syslog(LOG_DEBUG, "%s[%ld]: ",
1659 prog, getpid());
1660 (void) vsyslog(LOG_DEBUG, message, ap);
1661 } else {
1662 if (newline) {
1663 (void) fprintf(stdout, "%s[%ld]: ",
1664 prog, getpid());
1665 (void) vfprintf(stdout, message, ap);
1666 } else {
1667 (void) vfprintf(stdout, message, ap);
1668 }
1669 }
1670 if (message[strlen(message)-1] == '\n') {
1671 newline = 1;
1672 } else {
1673 newline = 0;
1674 }
1675 va_end(ap);
1676 (void) mutex_unlock(&err_mutex);
1677 }
1678