xref: /illumos-gate/usr/src/cmd/syseventd/daemons/syseventd/syseventd.c (revision d7b72f7b52f902da47cc7210a9121f4caabbcb9c)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*
28  *	syseventd - The system event daemon
29  *
30  *		This daemon dispatches event buffers received from the
31  *		kernel to all interested SLM clients.  SLMs in turn
32  *		deliver the buffers to their particular application
33  *		clients.
34  */
35 #include <stdio.h>
36 #include <sys/types.h>
37 #include <dirent.h>
38 #include <stdarg.h>
39 #include <stddef.h>
40 #include <stdlib.h>
41 #include <dlfcn.h>
42 #include <door.h>
43 #include <errno.h>
44 #include <fcntl.h>
45 #include <signal.h>
46 #include <strings.h>
47 #include <unistd.h>
48 #include <synch.h>
49 #include <syslog.h>
50 #include <thread.h>
51 #include <libsysevent.h>
52 #include <limits.h>
53 #include <locale.h>
54 #include <sys/sysevent.h>
55 #include <sys/sysevent_impl.h>
56 #include <sys/modctl.h>
57 #include <sys/stat.h>
58 #include <sys/systeminfo.h>
59 #include <sys/wait.h>
60 
61 #include "sysevent_signal.h"
62 #include "syseventd.h"
63 #include "message.h"
64 
65 extern int insert_client(void *client, int client_type, int retry_limit);
66 extern void delete_client(int id);
67 extern void initialize_client_tbl(void);
68 
69 extern struct sysevent_client *sysevent_client_tbl[];
70 extern mutex_t client_tbl_lock;
71 
72 #define	DEBUG_LEVEL_FORK	9	/* will run in background at all */
73 					/* levels less than DEBUG_LEVEL_FORK */
74 
75 int debug_level = 0;
76 char *root_dir = "";			/* Relative root for lock and door */
77 
78 /* Maximum number of outstanding events dispatched */
79 #define	SE_EVENT_DISPATCH_CNT	100
80 
81 static int upcall_door;			/* Kernel event door */
82 static int door_upcall_retval;		/* Kernel event posting return value */
83 static int fini_pending = 0;		/* fini pending flag */
84 static int deliver_buf = 0;		/* Current event buffer from kernel */
85 static int dispatch_buf = 0;		/* Current event buffer dispatched */
86 static sysevent_t **eventbuf;		/* Global array of event buffers */
87 static struct ev_completion *event_compq;	/* Event completion queue */
88 static mutex_t ev_comp_lock;		/* Event completion queue lock */
89 static mutex_t err_mutex;		/* error logging lock */
90 static mutex_t door_lock;		/* sync door return access */
91 static rwlock_t mod_unload_lock;		/* sync module unloading */
92 
93 /* declarations and definitions for avoiding multiple daemons running */
94 #define	DAEMON_LOCK_FILE "/var/run/syseventd.lock"
95 char local_lock_file[PATH_MAX + 1];
96 static int hold_daemon_lock;
97 static int daemon_lock_fd;
98 
99 /*
100  * sema_eventbuf - guards against the global buffer eventbuf
101  *	being written to before it has been dispatched to clients
102  *
103  * sema_dispatch - synchronizes between the kernel uploading thread
104  *	(producer) and the userland dispatch_message thread (consumer).
105  *
106  * sema_resource - throttles outstanding event consumption.
107  *
108  * event_comp_cv - synchronizes threads waiting for the event completion queue
109  *			to empty or become active.
110  */
111 static sema_t sema_eventbuf, sema_dispatch, sema_resource;
112 static cond_t event_comp_cv;
113 
114 /* Self-tuning concurrency level */
115 #define	MIN_CONCURRENCY_LEVEL	4
116 static int concurrency_level = MIN_CONCURRENCY_LEVEL;
117 
118 
119 /* SLM defines */
120 #define	MODULE_SUFFIX	".so"
121 #define	EVENT_FINI	"slm_fini"
122 #define	EVENT_INIT	"slm_init"
123 
124 #define	SE_TIMEOUT	60	/* Client dispatch timeout (seconds) */
125 
126 /* syslog message related */
127 static int logflag = 0;
128 static char *prog;
129 
130 /* function prototypes */
131 static void door_upcall(void *cookie, char *args, size_t alen, door_desc_t *ddp,
132 	uint_t ndid);
133 static void dispatch_message(void);
134 static int dispatch(void);
135 static void event_completion_thr(void);
136 static void usage(void);
137 
138 static void syseventd_init(void);
139 static void syseventd_fini(int sig);
140 
141 static pid_t enter_daemon_lock(void);
142 static void exit_daemon_lock(void);
143 
144 static void
145 usage() {
146 	(void) fprintf(stderr, "usage: syseventd [-d <debug_level>] "
147 	    "[-r <root_dir>]\n");
148 	(void) fprintf(stderr, "higher debug levels get progressively ");
149 	(void) fprintf(stderr, "more detailed debug information.\n");
150 	(void) fprintf(stderr, "syseventd will run in background if ");
151 	(void) fprintf(stderr, "run with a debug_level less than %d.\n",
152 	    DEBUG_LEVEL_FORK);
153 	exit(2);
154 }
155 
156 
157 /* common exit function which ensures releasing locks */
158 void
159 syseventd_exit(int status)
160 {
161 	syseventd_print(1, "exit status = %d\n", status);
162 
163 	if (hold_daemon_lock) {
164 		exit_daemon_lock();
165 	}
166 
167 	exit(status);
168 }
169 
170 
171 /*
172  * hup_handler - SIGHUP handler.  SIGHUP is used to force a reload of
173  *		 all SLMs.  During fini, events are drained from all
174  *		 client event queues.  The events that have been consumed
175  *		 by all clients are freed from the kernel event queue.
176  *
177  *		 Events that have not yet been delivered to all clients
178  *		 are not freed and will be replayed after all SLMs have
179  *		 been (re)loaded.
180  *
181  *		 After all client event queues have been drained, each
182  *		 SLM client is unloaded.  The init phase will (re)load
183  *		 each SLM and initiate event replay and delivery from
184  *		 the kernel.
185  *
186  */
187 /*ARGSUSED*/
188 static void
189 hup_handler(int sig)
190 {
191 	syseventd_err_print(SIGHUP_CAUGHT);
192 	(void) fflush(0);
193 	syseventd_fini(sig);
194 	syseventd_init();
195 	syseventd_err_print(DAEMON_RESTARTED);
196 	(void) fflush(0);
197 }
198 
199 /*
200  * Fault handler for other signals caught
201  */
202 /*ARGSUSED*/
203 static void
204 flt_handler(int sig)
205 {
206 	char signame[SIG2STR_MAX];
207 
208 	if (sig2str(sig, signame) == -1) {
209 		syseventd_err_print(UNKNOWN_SIGNAL_CAUGHT, sig);
210 	}
211 
212 	(void) se_signal_sethandler(sig, SIG_DFL, NULL);
213 
214 	switch (sig) {
215 		case SIGINT:
216 		case SIGSTOP:
217 		case SIGTERM:
218 			/* Close kernel door */
219 			(void) door_revoke(upcall_door);
220 
221 			/* Gracefully exit current event delivery threads */
222 			syseventd_fini(sig);
223 
224 			(void) fflush(0);
225 			(void) se_signal_unblockall();
226 			syseventd_exit(1);
227 			/*NOTREACHED*/
228 		case SIGCLD:
229 		case SIGPWR:
230 		case SIGWINCH:
231 		case SIGURG:
232 		case SIGCONT:
233 		case SIGWAITING:
234 		case SIGLWP:
235 		case SIGFREEZE:
236 		case SIGTHAW:
237 		case SIGCANCEL:
238 		case SIGXRES:
239 		case SIGJVM1:
240 		case SIGJVM2:
241 		case SIGINFO:
242 			/* No need to abort */
243 			break;
244 		default:
245 			syseventd_err_print(FATAL_ERROR);
246 			abort();
247 
248 	}
249 }
250 
251 /*
252  * Daemon parent process only.
253  * Child process signal to indicate successful daemon initialization.
254  * This is the normal and expected exit path of the daemon parent.
255  */
256 /*ARGSUSED*/
257 static void
258 sigusr1(int sig)
259 {
260 	syseventd_exit(0);
261 }
262 
263 static void *
264 sigwait_thr(void *arg __unused)
265 {
266 	int	sig;
267 	int	err;
268 	sigset_t signal_set;
269 
270 	for (;;) {
271 		syseventd_print(3, "sigwait thread waiting for signal\n");
272 		(void) sigfillset(&signal_set);
273 		err = sigwait(&signal_set, &sig);
274 		if (err) {
275 			syseventd_exit(2);
276 		}
277 
278 		/*
279 		 * Block all signals until the signal handler completes
280 		 */
281 		if (sig == SIGHUP) {
282 			hup_handler(sig);
283 		} else {
284 			flt_handler(sig);
285 		}
286 	}
287 	return (NULL);
288 }
289 
290 static void
291 set_root_dir(char *dir)
292 {
293 	root_dir = malloc(strlen(dir) + 1);
294 	if (root_dir == NULL) {
295 		syseventd_err_print(INIT_ROOT_DIR_ERR, strerror(errno));
296 		syseventd_exit(2);
297 	}
298 	(void) strcpy(root_dir, dir);
299 }
300 
301 int
302 main(int argc, char **argv)
303 {
304 	int i, c;
305 	int fd;
306 	pid_t pid;
307 	int has_forked = 0;
308 	extern char *optarg;
309 
310 	(void) setlocale(LC_ALL, "");
311 	(void) textdomain(TEXT_DOMAIN);
312 
313 	if (getuid() != 0) {
314 		(void) fprintf(stderr, "Must be root to run syseventd\n");
315 		syseventd_exit(1);
316 	}
317 
318 	if (argc > 5) {
319 		usage();
320 	}
321 
322 	if ((prog = strrchr(argv[0], '/')) == NULL) {
323 		prog = argv[0];
324 	} else {
325 		prog++;
326 	}
327 
328 	while ((c = getopt(argc, argv, "d:r:")) != EOF) {
329 		switch (c) {
330 		case 'd':
331 			debug_level = atoi(optarg);
332 			break;
333 		case 'r':
334 			/*
335 			 * Private flag for suninstall to run
336 			 * daemon during install.
337 			 */
338 			set_root_dir(optarg);
339 			break;
340 		case '?':
341 		default:
342 			usage();
343 		}
344 	}
345 
346 	/* demonize ourselves */
347 	if (debug_level < DEBUG_LEVEL_FORK) {
348 
349 		sigset_t mask;
350 
351 		(void) sigset(SIGUSR1, sigusr1);
352 
353 		(void) sigemptyset(&mask);
354 		(void) sigaddset(&mask, SIGUSR1);
355 		(void) sigprocmask(SIG_BLOCK, &mask, NULL);
356 
357 		if ((pid = fork()) == (pid_t)-1) {
358 			(void) fprintf(stderr,
359 			    "syseventd: fork failed - %s\n", strerror(errno));
360 			syseventd_exit(1);
361 		}
362 
363 		if (pid != 0) {
364 			/*
365 			 * parent
366 			 * handshake with the daemon so that dependents
367 			 * of the syseventd service don't start up until
368 			 * the service is actually functional
369 			 */
370 			int status;
371 			(void) sigprocmask(SIG_UNBLOCK, &mask, NULL);
372 
373 			if (waitpid(pid, &status, 0) != pid) {
374 				/*
375 				 * child process signal indicating
376 				 * successful daemon initialization
377 				 */
378 				syseventd_exit(0);
379 			}
380 			/* child exited implying unsuccessful startup */
381 			syseventd_exit(1);
382 		}
383 
384 		/* child */
385 
386 		has_forked = 1;
387 		(void) sigset(SIGUSR1, SIG_DFL);
388 		(void) sigprocmask(SIG_UNBLOCK, &mask, NULL);
389 
390 		(void) chdir("/");
391 		(void) setsid();
392 		if (debug_level <= 1) {
393 			closefrom(0);
394 			fd = open("/dev/null", 0);
395 			(void) dup2(fd, 1);
396 			(void) dup2(fd, 2);
397 			logflag = 1;
398 		}
399 	}
400 
401 	openlog("syseventd", LOG_PID, LOG_DAEMON);
402 
403 	(void) mutex_init(&err_mutex, USYNC_THREAD, NULL);
404 
405 	syseventd_print(8,
406 	    "syseventd started, debug level = %d\n", debug_level);
407 
408 	/* only one instance of syseventd can run at a time */
409 	if ((pid = enter_daemon_lock()) != getpid()) {
410 		syseventd_print(1,
411 		    "event daemon pid %ld already running\n", pid);
412 		exit(3);
413 	}
414 
415 	/* initialize semaphores and eventbuf */
416 	(void) sema_init(&sema_eventbuf, SE_EVENT_DISPATCH_CNT,
417 	    USYNC_THREAD, NULL);
418 	(void) sema_init(&sema_dispatch, 0, USYNC_THREAD, NULL);
419 	(void) sema_init(&sema_resource, SE_EVENT_DISPATCH_CNT,
420 	    USYNC_THREAD, NULL);
421 	(void) cond_init(&event_comp_cv, USYNC_THREAD, NULL);
422 	eventbuf = (sysevent_t **)calloc(SE_EVENT_DISPATCH_CNT,
423 	    sizeof (sysevent_t *));
424 	if (eventbuf == NULL) {
425 		syseventd_print(1, "Unable to allocate event buffer array\n");
426 		exit(2);
427 	}
428 	for (i = 0; i < SE_EVENT_DISPATCH_CNT; ++i) {
429 		eventbuf[i] = malloc(LOGEVENT_BUFSIZE);
430 		if (eventbuf[i] == NULL) {
431 			syseventd_print(1, "Unable to allocate event "
432 			    "buffers\n");
433 			exit(2);
434 		}
435 	}
436 
437 	(void) mutex_init(&client_tbl_lock, USYNC_THREAD, NULL);
438 	(void) mutex_init(&ev_comp_lock, USYNC_THREAD, NULL);
439 	(void) mutex_init(&door_lock, USYNC_THREAD, NULL);
440 	(void) rwlock_init(&mod_unload_lock, USYNC_THREAD, NULL);
441 
442 	event_compq = NULL;
443 
444 	syseventd_print(8, "start the message thread running\n");
445 
446 	/*
447 	 * Block all signals to all threads include the main thread.
448 	 * The sigwait_thr thread will process any signals and initiate
449 	 * a graceful recovery if possible.
450 	 */
451 	if (se_signal_blockall() < 0) {
452 		syseventd_err_print(INIT_SIG_BLOCK_ERR);
453 		syseventd_exit(2);
454 	}
455 
456 	if (thr_create(NULL, 0, (void *(*)(void *))dispatch_message,
457 	    (void *)0, 0, NULL) < 0) {
458 		syseventd_err_print(INIT_THR_CREATE_ERR, strerror(errno));
459 		syseventd_exit(2);
460 	}
461 	if (thr_create(NULL, 0,
462 	    (void *(*)(void *))event_completion_thr, NULL,
463 	    THR_BOUND, NULL) != 0) {
464 		syseventd_err_print(INIT_THR_CREATE_ERR, strerror(errno));
465 		syseventd_exit(2);
466 	}
467 	/* Create signal catching thread */
468 	if (thr_create(NULL, 0, sigwait_thr, NULL, 0, NULL) < 0) {
469 		syseventd_err_print(INIT_THR_CREATE_ERR, strerror(errno));
470 		syseventd_exit(2);
471 	}
472 
473 	setbuf(stdout, (char *)NULL);
474 
475 	/* Initialize and load SLM clients */
476 	initialize_client_tbl();
477 	syseventd_init();
478 
479 	/* signal parent to indicate successful daemon initialization */
480 	if (has_forked) {
481 		if (kill(getppid(), SIGUSR1) != 0) {
482 			syseventd_err_print(
483 			    "signal to the parent failed - %s\n",
484 			    strerror(errno));
485 			syseventd_exit(2);
486 		}
487 	}
488 
489 	syseventd_print(8, "Pausing\n");
490 
491 	for (;;) {
492 		(void) pause();
493 	}
494 	/* NOTREACHED */
495 	return (0);
496 }
497 
498 /*
499  * door_upcall - called from the kernel via kernel sysevent door
500  *		to upload event(s).
501  *
502  *		This routine should never block.  If resources are
503  *		not available to immediately accept the event buffer
504  *		EAGAIN is returned to the kernel.
505  *
506  *		Once resources are available, the kernel is notified
507  *		via a modctl interface to resume event delivery to
508  *		syseventd.
509  *
510  */
511 /*ARGSUSED*/
512 static void
513 door_upcall(void *cookie, char *args, size_t alen,
514     door_desc_t *ddp, uint_t ndid)
515 {
516 	sysevent_t *ev;
517 	int rval;
518 
519 
520 	(void) mutex_lock(&door_lock);
521 	if (args == NULL) {
522 		rval = EINVAL;
523 	} else if (sema_trywait(&sema_eventbuf)) {
524 		ev = (sysevent_t *)
525 		    &((log_event_upcall_arg_t *)(void *)args)->buf;
526 		syseventd_print(2, "door_upcall: busy event %llx "
527 		    "retry\n", sysevent_get_seq(ev));
528 		rval = door_upcall_retval = EAGAIN;
529 	} else {
530 		/*
531 		 * Copy received message to local buffer.
532 		 */
533 		size_t size;
534 		ev = (sysevent_t *)
535 		    &((log_event_upcall_arg_t *)(void *)args)->buf;
536 
537 		syseventd_print(2, "door_upcall: event %llx in eventbuf %d\n",
538 		    sysevent_get_seq(ev), deliver_buf);
539 		size = sysevent_get_size(ev) > LOGEVENT_BUFSIZE ?
540 		    LOGEVENT_BUFSIZE : sysevent_get_size(ev);
541 		(void) bcopy(ev, eventbuf[deliver_buf], size);
542 		deliver_buf = (deliver_buf + 1) % SE_EVENT_DISPATCH_CNT;
543 		rval = 0;
544 		(void) sema_post(&sema_dispatch);
545 	}
546 
547 	(void) mutex_unlock(&door_lock);
548 
549 	/*
550 	 * Filling in return values for door_return
551 	 */
552 	(void) door_return((void *)&rval, sizeof (rval), NULL, 0);
553 	(void) door_return(NULL, 0, NULL, 0);
554 }
555 
556 /*
557  * dispatch_message - dispatch message thread
558  *			This thread spins until an event buffer is delivered
559  *			delivered from the kernel.
560  *
561  *			It will wait to dispatch an event to any clients
562  *			until adequate resources are available to process
563  *			the event buffer.
564  */
565 static void
566 dispatch_message(void)
567 {
568 	int error;
569 
570 	for (;;) {
571 		syseventd_print(3, "dispatch_message: thread started\n");
572 		/*
573 		 * Spin till a message comes
574 		 */
575 		while (sema_wait(&sema_dispatch) != 0) {
576 			syseventd_print(1,
577 			    "dispatch_message: sema_wait failed\n");
578 			(void) sleep(1);
579 		}
580 
581 		syseventd_print(3, "dispatch_message: sema_dispatch\n");
582 
583 		/*
584 		 * Wait for available resources
585 		 */
586 		while (sema_wait(&sema_resource) != 0) {
587 			syseventd_print(1, "dispatch_message: sema_wait "
588 			    "failed\n");
589 			(void) sleep(1);
590 		}
591 
592 		syseventd_print(2, "dispatch_message: eventbuf %d\n",
593 		    dispatch_buf);
594 
595 		/*
596 		 * Client dispatch
597 		 */
598 		do {
599 			error = dispatch();
600 		} while (error == EAGAIN);
601 
602 		syseventd_print(2, "eventbuf %d dispatched\n", dispatch_buf);
603 		dispatch_buf = (dispatch_buf + 1) % SE_EVENT_DISPATCH_CNT;
604 
605 		/*
606 		 * kernel received a busy signal -
607 		 * kickstart the kernel delivery thread
608 		 * door_lock blocks the kernel so we hold it for the
609 		 * shortest time possible.
610 		 */
611 		(void) mutex_lock(&door_lock);
612 		if (door_upcall_retval == EAGAIN && !fini_pending) {
613 			syseventd_print(3, "dispatch_message: retrigger "
614 			    "door_upcall_retval = %d\n",
615 			    door_upcall_retval);
616 			(void) modctl(MODEVENTS, (uintptr_t)MODEVENTS_FLUSH,
617 			    NULL, NULL, NULL, 0);
618 			door_upcall_retval = 0;
619 		}
620 		(void) mutex_unlock(&door_lock);
621 	}
622 	/* NOTREACHED */
623 }
624 
625 /*
626  * drain_eventq - Called to drain all pending events from the client's
627  *		event queue.
628  */
629 static void
630 drain_eventq(struct sysevent_client *scp, int status)
631 {
632 	struct event_dispatch_pkg *d_pkg;
633 	struct event_dispatchq *eventq, *eventq_next;
634 
635 	syseventd_print(3, "Draining eventq for client %d\n",
636 	    scp->client_num);
637 
638 	eventq = scp->eventq;
639 	while (eventq) {
640 		/*
641 		 * Mark all dispatched events as completed, but indicate the
642 		 * error status
643 		 */
644 		d_pkg = eventq->d_pkg;
645 
646 		syseventd_print(4, "drain event 0X%llx for client %d\n",
647 		    sysevent_get_seq(d_pkg->ev), scp->client_num);
648 
649 		if (d_pkg->completion_state == SE_NOT_DISPATCHED) {
650 			d_pkg->completion_status = status;
651 			d_pkg->completion_state = SE_COMPLETE;
652 			(void) sema_post(d_pkg->completion_sema);
653 		}
654 
655 		eventq_next = eventq->next;
656 		free(eventq);
657 		eventq = eventq_next;
658 		scp->eventq = eventq;
659 	}
660 }
661 
662 /*
663  * client_deliver_event_thr - Client delivery thread
664  *				This thread will process any events on this
665  *				client's eventq.
666  */
667 static void *
668 client_deliver_event_thr(void *arg)
669 {
670 	int flag, error, i;
671 	sysevent_t *ev;
672 	hrtime_t now;
673 	module_t *mod;
674 	struct event_dispatchq *eventq;
675 	struct sysevent_client *scp;
676 	struct event_dispatch_pkg *d_pkg;
677 
678 	scp = (struct sysevent_client *)arg;
679 	mod = (module_t *)scp->client_data;
680 
681 	(void) mutex_lock(&scp->client_lock);
682 	for (;;) {
683 		while (scp->eventq == NULL) {
684 
685 			/*
686 			 * Client has been suspended or unloaded, go no further.
687 			 */
688 			if (fini_pending) {
689 				scp->client_flags &= ~SE_CLIENT_THR_RUNNING;
690 				syseventd_print(3, "Client %d delivery thread "
691 				    "exiting flags: 0X%x\n",
692 				    scp->client_num, scp->client_flags);
693 				(void) mutex_unlock(&scp->client_lock);
694 				return (NULL);
695 			}
696 
697 			(void) cond_wait(&scp->client_cv, &scp->client_lock);
698 
699 		}
700 
701 		/*
702 		 * Process events from the head of the eventq, eventq is locked
703 		 * going into the processing.
704 		 */
705 		eventq = scp->eventq;
706 		while (eventq != NULL) {
707 			d_pkg = eventq->d_pkg;
708 			d_pkg->completion_state = SE_OUTSTANDING;
709 			scp->eventq = eventq->next;
710 			free(eventq);
711 			(void) mutex_unlock(&scp->client_lock);
712 
713 
714 			flag = error = 0;
715 			ev = d_pkg->ev;
716 
717 			syseventd_print(3, "Start delivery for client %d "
718 			    "with retry count %d\n",
719 			    scp->client_num, d_pkg->retry_count);
720 
721 			/*
722 			 * Retry limit has been reached by this client, indicate
723 			 * that no further retries are allowed
724 			 */
725 			for (i = 0; i <= scp->retry_limit; ++i) {
726 				if (i == scp->retry_limit)
727 					flag = SE_NO_RETRY;
728 
729 				/* Start the clock for the event delivery */
730 				d_pkg->start_time = gethrtime();
731 
732 				syseventd_print(9, "Deliver to module client "
733 				    "%s\n", mod->name);
734 
735 				error = mod->deliver_event(ev, flag);
736 
737 				/* Can not allow another retry */
738 				if (i == scp->retry_limit)
739 					error = 0;
740 
741 				/* Stop the clock */
742 				now = gethrtime();
743 
744 				/*
745 				 * Suspend event processing and drain the
746 				 * event q for latent clients
747 				 */
748 				if (now - d_pkg->start_time >
749 				    ((hrtime_t)SE_TIMEOUT * NANOSEC)) {
750 					syseventd_print(1, "Unresponsive "
751 					    "client %d: Draining eventq and "
752 					    "suspending event delivery\n",
753 					    scp->client_num);
754 					(void) mutex_lock(&scp->client_lock);
755 					scp->client_flags &=
756 					    ~SE_CLIENT_THR_RUNNING;
757 					scp->client_flags |=
758 					    SE_CLIENT_SUSPENDED;
759 
760 					/* Cleanup current event */
761 					d_pkg->completion_status = EFAULT;
762 					d_pkg->completion_state = SE_COMPLETE;
763 					(void) sema_post(
764 					    d_pkg->completion_sema);
765 
766 					/*
767 					 * Drain the remaining events from the
768 					 * queue.
769 					 */
770 					drain_eventq(scp, EINVAL);
771 					(void) mutex_unlock(&scp->client_lock);
772 					return (NULL);
773 				}
774 
775 				/* Event delivery retry requested */
776 				if (fini_pending || error != EAGAIN) {
777 					break;
778 				} else {
779 					(void) sleep(SE_RETRY_TIME);
780 				}
781 			}
782 
783 			(void) mutex_lock(&scp->client_lock);
784 			d_pkg->completion_status = error;
785 			d_pkg->completion_state = SE_COMPLETE;
786 			(void) sema_post(d_pkg->completion_sema);
787 			syseventd_print(3, "Completed delivery with "
788 			    "error %d\n", error);
789 			eventq = scp->eventq;
790 		}
791 
792 		syseventd_print(3, "No more events to process for client %d\n",
793 		    scp->client_num);
794 
795 		/* Return if this was a synchronous delivery */
796 		if (!SE_CLIENT_IS_THR_RUNNING(scp)) {
797 			(void) mutex_unlock(&scp->client_lock);
798 			return (NULL);
799 		}
800 
801 	}
802 }
803 
804 /*
805  * client_deliver_event - Client specific event delivery
806  *			This routine will allocate and initialize the
807  *			neccessary per-client dispatch data.
808  *
809  *			If the eventq is not empty, it may be assumed that
810  *			a delivery thread exists for this client and the
811  *			dispatch data is appended to the eventq.
812  *
813  *			The dispatch package is freed by the event completion
814  *			thread (event_completion_thr) and the eventq entry
815  *			is freed by the event delivery thread.
816  */
817 static struct event_dispatch_pkg *
818 client_deliver_event(struct sysevent_client *scp, sysevent_t *ev,
819 	sema_t *completion_sema)
820 {
821 	size_t ev_sz = sysevent_get_size(ev);
822 	struct event_dispatchq *newq, *tmp;
823 	struct event_dispatch_pkg *d_pkg;
824 
825 	syseventd_print(3, "client_deliver_event: id 0x%llx size %d\n",
826 	    (longlong_t)sysevent_get_seq(ev), ev_sz);
827 	if (debug_level == 9) {
828 		se_print(stdout, ev);
829 	}
830 
831 	/*
832 	 * Check for suspended client
833 	 */
834 	(void) mutex_lock(&scp->client_lock);
835 	if (SE_CLIENT_IS_SUSPENDED(scp) || !SE_CLIENT_IS_THR_RUNNING(scp)) {
836 		(void) mutex_unlock(&scp->client_lock);
837 		return (NULL);
838 	}
839 
840 	/*
841 	 * Allocate a new dispatch package and eventq entry
842 	 */
843 	newq = (struct event_dispatchq *)malloc(
844 	    sizeof (struct event_dispatchq));
845 	if (newq == NULL) {
846 		(void) mutex_unlock(&scp->client_lock);
847 		return (NULL);
848 	}
849 
850 	d_pkg = (struct event_dispatch_pkg *)malloc(
851 	    sizeof (struct event_dispatch_pkg));
852 	if (d_pkg == NULL) {
853 		free(newq);
854 		(void) mutex_unlock(&scp->client_lock);
855 		return (NULL);
856 	}
857 
858 	/* Initialize the dispatch package */
859 	d_pkg->scp = scp;
860 	d_pkg->retry_count = 0;
861 	d_pkg->completion_status = 0;
862 	d_pkg->completion_state = SE_NOT_DISPATCHED;
863 	d_pkg->completion_sema = completion_sema;
864 	d_pkg->ev = ev;
865 	newq->d_pkg = d_pkg;
866 	newq->next = NULL;
867 
868 	if (scp->eventq != NULL) {
869 
870 		/* Add entry to the end of the eventq */
871 		tmp = scp->eventq;
872 		while (tmp->next != NULL)
873 			tmp = tmp->next;
874 		tmp->next = newq;
875 	} else {
876 		/* event queue empty, wakeup delivery thread */
877 		scp->eventq = newq;
878 		(void) cond_signal(&scp->client_cv);
879 	}
880 	(void) mutex_unlock(&scp->client_lock);
881 
882 	return (d_pkg);
883 }
884 
885 /*
886  * event_completion_thr - Event completion thread.  This thread routine
887  *			waits for all client delivery thread to complete
888  *			delivery of a particular event.
889  */
890 static void
891 event_completion_thr()
892 {
893 	int ret, i, client_count, ok_to_free;
894 	sysevent_id_t eid;
895 	struct sysevent_client *scp;
896 	struct ev_completion *ev_comp;
897 	struct event_dispatchq *dispatchq;
898 	struct event_dispatch_pkg *d_pkg;
899 
900 	(void) mutex_lock(&ev_comp_lock);
901 	for (;;) {
902 		while (event_compq == NULL) {
903 			(void) cond_wait(&event_comp_cv, &ev_comp_lock);
904 		}
905 
906 		/*
907 		 * Process event completions from the head of the
908 		 * completion queue
909 		 */
910 		ev_comp = event_compq;
911 		while (ev_comp) {
912 			(void) mutex_unlock(&ev_comp_lock);
913 			eid.eid_seq = sysevent_get_seq(ev_comp->ev);
914 			sysevent_get_time(ev_comp->ev, &eid.eid_ts);
915 			client_count = ev_comp->client_count;
916 			ok_to_free = 1;
917 
918 			syseventd_print(3, "Wait for event completion of "
919 			    "event 0X%llx on %d clients\n",
920 			    eid.eid_seq, client_count);
921 
922 			while (client_count) {
923 				syseventd_print(9, "Waiting for %d clients on "
924 				    "event id 0X%llx\n", client_count,
925 				    eid.eid_seq);
926 
927 				(void) sema_wait(&ev_comp->client_sema);
928 				--client_count;
929 			}
930 
931 			syseventd_print(3, "Cleaning up clients for event "
932 			    "0X%llx\n", eid.eid_seq);
933 			dispatchq = ev_comp->dispatch_list;
934 			while (dispatchq != NULL) {
935 				d_pkg = dispatchq->d_pkg;
936 				scp = d_pkg->scp;
937 
938 				if (d_pkg->completion_status == EAGAIN)
939 					ok_to_free = 0;
940 
941 				syseventd_print(4, "Delivery of 0X%llx "
942 				    "complete for client %d retry count %d "
943 				    "status %d\n", eid.eid_seq,
944 				    scp->client_num,
945 				    d_pkg->retry_count,
946 				    d_pkg->completion_status);
947 
948 				free(d_pkg);
949 				ev_comp->dispatch_list = dispatchq->next;
950 				free(dispatchq);
951 				dispatchq = ev_comp->dispatch_list;
952 			}
953 
954 			if (ok_to_free) {
955 				for (i = 0; i < MAX_MODCTL_RETRY; ++i) {
956 					if ((ret = modctl(MODEVENTS,
957 					    (uintptr_t)MODEVENTS_FREEDATA,
958 					    (uintptr_t)&eid, NULL,
959 					    NULL, 0)) != 0) {
960 						syseventd_print(1, "attempting "
961 						    "to free event 0X%llx\n",
962 						    eid.eid_seq);
963 
964 						/*
965 						 * Kernel may need time to
966 						 * move this event buffer to
967 						 * the sysevent sent queue
968 						 */
969 						(void) sleep(1);
970 					} else {
971 						break;
972 					}
973 				}
974 				if (ret) {
975 					syseventd_print(1, "Unable to free "
976 					    "event 0X%llx from the "
977 					    "kernel\n", eid.eid_seq);
978 				}
979 			} else {
980 				syseventd_print(1, "Not freeing event 0X%llx\n",
981 				    eid.eid_seq);
982 			}
983 
984 			syseventd_print(2, "Event delivery complete for id "
985 			    "0X%llx\n", eid.eid_seq);
986 
987 			(void) mutex_lock(&ev_comp_lock);
988 			event_compq = ev_comp->next;
989 			free(ev_comp->ev);
990 			free(ev_comp);
991 			ev_comp = event_compq;
992 			(void) sema_post(&sema_resource);
993 		}
994 
995 		/*
996 		 * Event completion queue is empty, signal possible unload
997 		 * operation
998 		 */
999 		(void) cond_signal(&event_comp_cv);
1000 
1001 		syseventd_print(3, "No more events\n");
1002 	}
1003 }
1004 
1005 /*
1006  * dispatch - Dispatch the current event buffer to all valid SLM clients.
1007  */
1008 static int
1009 dispatch(void)
1010 {
1011 	int ev_sz, i, client_count = 0;
1012 	sysevent_t *new_ev;
1013 	sysevent_id_t eid;
1014 	struct ev_completion *ev_comp, *tmp;
1015 	struct event_dispatchq *dispatchq, *client_list;
1016 	struct event_dispatch_pkg *d_pkg;
1017 
1018 	/* Check for module unload operation */
1019 	if (rw_tryrdlock(&mod_unload_lock) != 0) {
1020 		syseventd_print(2, "unload in progress abort delivery\n");
1021 		(void) sema_post(&sema_eventbuf);
1022 		(void) sema_post(&sema_resource);
1023 		return (0);
1024 	}
1025 
1026 	syseventd_print(3, "deliver dispatch buffer %d", dispatch_buf);
1027 	eid.eid_seq = sysevent_get_seq(eventbuf[dispatch_buf]);
1028 	sysevent_get_time(eventbuf[dispatch_buf], &eid.eid_ts);
1029 	syseventd_print(3, "deliver msg id: 0x%llx\n", eid.eid_seq);
1030 
1031 	/*
1032 	 * ev_comp is used to hold event completion data.  It is freed
1033 	 * by the event completion thread (event_completion_thr).
1034 	 */
1035 	ev_comp = (struct ev_completion *)
1036 	    malloc(sizeof (struct ev_completion));
1037 	if (ev_comp == NULL) {
1038 		(void) rw_unlock(&mod_unload_lock);
1039 		syseventd_print(1, "Can not allocate event completion buffer "
1040 		    "for event id 0X%llx\n", eid.eid_seq);
1041 		return (EAGAIN);
1042 	}
1043 	ev_comp->dispatch_list = NULL;
1044 	ev_comp->next = NULL;
1045 	(void) sema_init(&ev_comp->client_sema, 0, USYNC_THREAD, NULL);
1046 
1047 	ev_sz = sysevent_get_size(eventbuf[dispatch_buf]);
1048 	new_ev = calloc(1, ev_sz);
1049 	if (new_ev == NULL) {
1050 		free(ev_comp);
1051 		(void) rw_unlock(&mod_unload_lock);
1052 		syseventd_print(1, "Can not allocate new event buffer "
1053 		"for event id 0X%llx\n", eid.eid_seq);
1054 		return (EAGAIN);
1055 	}
1056 
1057 
1058 	/*
1059 	 * For long messages, copy additional data from kernel
1060 	 */
1061 	if (ev_sz > LOGEVENT_BUFSIZE) {
1062 		int ret = 0;
1063 
1064 		/* Ok to release eventbuf for next event buffer from kernel */
1065 		(void) sema_post(&sema_eventbuf);
1066 
1067 		for (i = 0; i < MAX_MODCTL_RETRY; ++i) {
1068 			if ((ret = modctl(MODEVENTS,
1069 			    (uintptr_t)MODEVENTS_GETDATA,
1070 			    (uintptr_t)&eid,
1071 			    (uintptr_t)ev_sz,
1072 			    (uintptr_t)new_ev, 0))
1073 			    == 0)
1074 				break;
1075 			else
1076 				(void) sleep(1);
1077 		}
1078 		if (ret) {
1079 			syseventd_print(1, "GET_DATA failed for 0X%llx:%llx\n",
1080 			    eid.eid_ts, eid.eid_seq);
1081 			free(new_ev);
1082 			free(ev_comp);
1083 			(void) rw_unlock(&mod_unload_lock);
1084 			return (EAGAIN);
1085 		}
1086 	} else {
1087 		(void) bcopy(eventbuf[dispatch_buf], new_ev, ev_sz);
1088 		/* Ok to release eventbuf for next event buffer from kernel */
1089 		(void) sema_post(&sema_eventbuf);
1090 	}
1091 
1092 
1093 	/*
1094 	 * Deliver a copy of eventbuf to clients so
1095 	 * eventbuf can be used for the next message
1096 	 */
1097 	for (i = 0; i < MAX_SLM; ++i) {
1098 
1099 		/* Don't bother for suspended or unloaded clients */
1100 		if (!SE_CLIENT_IS_LOADED(sysevent_client_tbl[i]) ||
1101 		    SE_CLIENT_IS_SUSPENDED(sysevent_client_tbl[i]))
1102 			continue;
1103 
1104 		/*
1105 		 * Allocate event dispatch queue entry.  All queue entries
1106 		 * are freed by the event completion thread as client
1107 		 * delivery completes.
1108 		 */
1109 		dispatchq = (struct event_dispatchq *)malloc(
1110 		    sizeof (struct event_dispatchq));
1111 		if (dispatchq == NULL) {
1112 			syseventd_print(1, "Can not allocate dispatch q "
1113 			"for event id 0X%llx client %d\n", eid.eid_seq, i);
1114 			continue;
1115 		}
1116 		dispatchq->next = NULL;
1117 
1118 		/* Initiate client delivery */
1119 		d_pkg = client_deliver_event(sysevent_client_tbl[i],
1120 		    new_ev, &ev_comp->client_sema);
1121 		if (d_pkg == NULL) {
1122 			syseventd_print(1, "Can not allocate dispatch "
1123 			    "package for event id 0X%llx client %d\n",
1124 			    eid.eid_seq, i);
1125 			free(dispatchq);
1126 			continue;
1127 		}
1128 		dispatchq->d_pkg = d_pkg;
1129 		++client_count;
1130 
1131 		if (ev_comp->dispatch_list == NULL) {
1132 			ev_comp->dispatch_list = dispatchq;
1133 			client_list = dispatchq;
1134 		} else {
1135 			client_list->next = dispatchq;
1136 			client_list = client_list->next;
1137 		}
1138 	}
1139 
1140 	ev_comp->client_count = client_count;
1141 	ev_comp->ev = new_ev;
1142 
1143 	(void) mutex_lock(&ev_comp_lock);
1144 
1145 	if (event_compq == NULL) {
1146 		syseventd_print(3, "Wakeup event completion thread for "
1147 		    "id 0X%llx\n", eid.eid_seq);
1148 		event_compq = ev_comp;
1149 		(void) cond_signal(&event_comp_cv);
1150 	} else {
1151 
1152 		/* Add entry to the end of the event completion queue */
1153 		tmp = event_compq;
1154 		while (tmp->next != NULL)
1155 			tmp = tmp->next;
1156 		tmp->next = ev_comp;
1157 		syseventd_print(3, "event added to completion queue for "
1158 		    "id 0X%llx\n", eid.eid_seq);
1159 	}
1160 	(void) mutex_unlock(&ev_comp_lock);
1161 	(void) rw_unlock(&mod_unload_lock);
1162 
1163 	return (0);
1164 }
1165 
1166 #define	MODULE_DIR_HW	"/usr/platform/%s/lib/sysevent/modules/"
1167 #define	MODULE_DIR_GEN	"/usr/lib/sysevent/modules/"
1168 #define	MOD_DIR_NUM	3
1169 static char dirname[MOD_DIR_NUM][MAXPATHLEN];
1170 
1171 static char *
1172 dir_num2name(int dirnum)
1173 {
1174 	char infobuf[MAXPATHLEN];
1175 
1176 	if (dirnum >= MOD_DIR_NUM)
1177 		return (NULL);
1178 
1179 	if (dirname[0][0] == '\0') {
1180 		if (sysinfo(SI_PLATFORM, infobuf, MAXPATHLEN) == -1) {
1181 			syseventd_print(1, "dir_num2name: "
1182 			    "sysinfo error %s\n", strerror(errno));
1183 			return (NULL);
1184 		} else if (snprintf(dirname[0], sizeof (dirname[0]),
1185 		    MODULE_DIR_HW, infobuf) >= sizeof (dirname[0])) {
1186 			syseventd_print(1, "dir_num2name: "
1187 			    "platform name too long: %s\n",
1188 			    infobuf);
1189 			return (NULL);
1190 		}
1191 		if (sysinfo(SI_MACHINE, infobuf, MAXPATHLEN) == -1) {
1192 			syseventd_print(1, "dir_num2name: "
1193 			    "sysinfo error %s\n", strerror(errno));
1194 			return (NULL);
1195 		} else if (snprintf(dirname[1], sizeof (dirname[1]),
1196 		    MODULE_DIR_HW, infobuf) >= sizeof (dirname[1])) {
1197 			syseventd_print(1, "dir_num2name: "
1198 			    "machine name too long: %s\n",
1199 			    infobuf);
1200 			return (NULL);
1201 		}
1202 		(void) strcpy(dirname[2], MODULE_DIR_GEN);
1203 	}
1204 
1205 	return (dirname[dirnum]);
1206 }
1207 
1208 
1209 /*
1210  * load_modules - Load modules found in the common syseventd module directories
1211  *		Modules that do not provide valid interfaces are rejected.
1212  */
1213 static void
1214 load_modules(char *dirname)
1215 {
1216 	int client_id;
1217 	DIR *mod_dir;
1218 	module_t *mod;
1219 	struct dirent *entp;
1220 	struct slm_mod_ops *mod_ops;
1221 	struct sysevent_client *scp;
1222 
1223 	if (dirname == NULL)
1224 		return;
1225 
1226 	/* Return silently if module directory does not exist */
1227 	if ((mod_dir = opendir(dirname)) == NULL) {
1228 		syseventd_print(1, "Unable to open module directory %s: %s\n",
1229 		    dirname, strerror(errno));
1230 		return;
1231 	}
1232 
1233 	syseventd_print(3, "loading modules from %s\n", dirname);
1234 
1235 	/*
1236 	 * Go through directory, looking for files ending with .so
1237 	 */
1238 	while ((entp = readdir(mod_dir)) != NULL) {
1239 		void *dlh, *f;
1240 		char *tmp, modpath[MAXPATHLEN];
1241 
1242 		if (((tmp = strstr(entp->d_name, MODULE_SUFFIX)) == NULL) ||
1243 		    (tmp[strlen(MODULE_SUFFIX)] != '\0')) {
1244 			continue;
1245 		}
1246 
1247 		if (snprintf(modpath, sizeof (modpath), "%s%s",
1248 		    dirname, entp->d_name) >= sizeof (modpath)) {
1249 			syseventd_err_print(INIT_PATH_ERR, modpath);
1250 			continue;
1251 		}
1252 		if ((dlh = dlopen(modpath, RTLD_LAZY)) == NULL) {
1253 			syseventd_err_print(LOAD_MOD_DLOPEN_ERR,
1254 			    modpath, dlerror());
1255 			continue;
1256 		} else if ((f = dlsym(dlh, EVENT_INIT)) == NULL) {
1257 			syseventd_err_print(LOAD_MOD_NO_INIT,
1258 			    modpath, dlerror());
1259 			(void) dlclose(dlh);
1260 			continue;
1261 		}
1262 
1263 		mod = malloc(sizeof (*mod));
1264 		if (mod == NULL) {
1265 			syseventd_err_print(LOAD_MOD_ALLOC_ERR, "mod",
1266 			    strerror(errno));
1267 			(void) dlclose(dlh);
1268 			continue;
1269 		}
1270 
1271 		mod->name = strdup(entp->d_name);
1272 		if (mod->name == NULL) {
1273 			syseventd_err_print(LOAD_MOD_ALLOC_ERR, "mod->name",
1274 			    strerror(errno));
1275 			(void) dlclose(dlh);
1276 			free(mod);
1277 			continue;
1278 		}
1279 
1280 		mod->dlhandle = dlh;
1281 		mod->event_mod_init = (struct slm_mod_ops *(*)())f;
1282 
1283 		/* load in other module functions */
1284 		mod->event_mod_fini = (void (*)())dlsym(dlh, EVENT_FINI);
1285 		if (mod->event_mod_fini == NULL) {
1286 			syseventd_err_print(LOAD_MOD_DLSYM_ERR, mod->name,
1287 			    dlerror());
1288 			free(mod->name);
1289 			free(mod);
1290 			(void) dlclose(dlh);
1291 			continue;
1292 		}
1293 
1294 		/* Call module init routine */
1295 		if ((mod_ops = mod->event_mod_init()) == NULL) {
1296 			syseventd_err_print(LOAD_MOD_EINVAL, mod->name);
1297 			free(mod->name);
1298 			free(mod);
1299 			(void) dlclose(dlh);
1300 			continue;
1301 		}
1302 		if (mod_ops->major_version != SE_MAJOR_VERSION) {
1303 			syseventd_err_print(LOAD_MOD_VERSION_MISMATCH,
1304 			    mod->name, SE_MAJOR_VERSION,
1305 			    mod_ops->major_version);
1306 			mod->event_mod_fini();
1307 			free(mod->name);
1308 			free(mod);
1309 			(void) dlclose(dlh);
1310 			continue;
1311 		}
1312 
1313 		mod->deliver_event = mod_ops->deliver_event;
1314 		/* Add module entry to client list */
1315 		if ((client_id = insert_client((void *)mod, SLM_CLIENT,
1316 		    (mod_ops->retry_limit <= SE_MAX_RETRY_LIMIT ?
1317 		    mod_ops->retry_limit : SE_MAX_RETRY_LIMIT))) < 0) {
1318 			syseventd_err_print(LOAD_MOD_ALLOC_ERR, "insert_client",
1319 			    strerror(errno));
1320 			mod->event_mod_fini();
1321 			free(mod->name);
1322 			free(mod);
1323 			(void) dlclose(dlh);
1324 			continue;
1325 		}
1326 
1327 		scp = sysevent_client_tbl[client_id];
1328 		++concurrency_level;
1329 		(void) thr_setconcurrency(concurrency_level);
1330 		if (thr_create(NULL, 0, client_deliver_event_thr,
1331 		    scp, THR_BOUND, &scp->tid) != 0) {
1332 
1333 			syseventd_err_print(LOAD_MOD_ALLOC_ERR, "insert_client",
1334 			    strerror(errno));
1335 			mod->event_mod_fini();
1336 			free(mod->name);
1337 			free(mod);
1338 			(void) dlclose(dlh);
1339 			continue;
1340 		}
1341 		scp->client_flags |= SE_CLIENT_THR_RUNNING;
1342 
1343 		syseventd_print(3, "loaded module %s\n", entp->d_name);
1344 	}
1345 
1346 	(void) closedir(mod_dir);
1347 	syseventd_print(3, "modules loaded\n");
1348 }
1349 
1350 /*
1351  * unload_modules - modules are unloaded prior to graceful shutdown or
1352  *			before restarting the daemon upon receipt of
1353  *			SIGHUP.
1354  */
1355 static void
1356 unload_modules(int sig)
1357 {
1358 	int			i, count, done;
1359 	module_t		*mod;
1360 	struct sysevent_client	*scp;
1361 
1362 	/*
1363 	 * unload modules that are ready, skip those that have not
1364 	 * drained their event queues.
1365 	 */
1366 	count = done = 0;
1367 	while (done < MAX_SLM) {
1368 		/* Don't wait indefinitely for unresponsive clients */
1369 		if (sig != SIGHUP && count > SE_TIMEOUT) {
1370 			break;
1371 		}
1372 
1373 		done = 0;
1374 
1375 		/* Shutdown clients */
1376 		for (i = 0; i < MAX_SLM; ++i) {
1377 			scp = sysevent_client_tbl[i];
1378 			if (mutex_trylock(&scp->client_lock) == 0) {
1379 				if (scp->client_type != SLM_CLIENT ||
1380 				    scp->client_data == NULL) {
1381 					(void) mutex_unlock(&scp->client_lock);
1382 					done++;
1383 					continue;
1384 				}
1385 			} else {
1386 				syseventd_print(3, "Skipping unload of "
1387 				    "client %d: client locked\n",
1388 				    scp->client_num);
1389 				continue;
1390 			}
1391 
1392 			/*
1393 			 * Drain the eventq and wait for delivery thread to
1394 			 * cleanly exit
1395 			 */
1396 			drain_eventq(scp, EAGAIN);
1397 			(void) cond_signal(&scp->client_cv);
1398 			(void) mutex_unlock(&scp->client_lock);
1399 			(void) thr_join(scp->tid, NULL, NULL);
1400 
1401 			/*
1402 			 * It is now safe to unload the module
1403 			 */
1404 			mod = (module_t *)scp->client_data;
1405 			syseventd_print(2, "Unload %s\n", mod->name);
1406 			mod->event_mod_fini();
1407 			(void) dlclose(mod->dlhandle);
1408 			free(mod->name);
1409 			(void) mutex_lock(&client_tbl_lock);
1410 			delete_client(i);
1411 			(void) mutex_unlock(&client_tbl_lock);
1412 			++done;
1413 
1414 		}
1415 		++count;
1416 		(void) sleep(1);
1417 	}
1418 
1419 	/*
1420 	 * Wait for event completions
1421 	 */
1422 	syseventd_print(2, "waiting for event completions\n");
1423 	(void) mutex_lock(&ev_comp_lock);
1424 	while (event_compq != NULL) {
1425 		(void) cond_wait(&event_comp_cv, &ev_comp_lock);
1426 	}
1427 	(void) mutex_unlock(&ev_comp_lock);
1428 }
1429 
1430 /*
1431  * syseventd_init - Called at daemon (re)start-up time to load modules
1432  *			and kickstart the kernel delivery engine.
1433  */
1434 static void
1435 syseventd_init()
1436 {
1437 	int i, fd;
1438 	char local_door_file[PATH_MAX + 1];
1439 
1440 	fini_pending = 0;
1441 
1442 	concurrency_level = MIN_CONCURRENCY_LEVEL;
1443 	(void) thr_setconcurrency(concurrency_level);
1444 
1445 	/*
1446 	 * Load client modules for event delivering
1447 	 */
1448 	for (i = 0; i < MOD_DIR_NUM; ++i) {
1449 		load_modules(dir_num2name(i));
1450 	}
1451 
1452 	/*
1453 	 * Create kernel delivery door service
1454 	 */
1455 	syseventd_print(8, "Create a door for kernel upcalls\n");
1456 	if (snprintf(local_door_file, sizeof (local_door_file), "%s%s",
1457 	    root_dir, LOGEVENT_DOOR_UPCALL) >= sizeof (local_door_file)) {
1458 		syseventd_err_print(INIT_PATH_ERR, local_door_file);
1459 		syseventd_exit(5);
1460 	}
1461 
1462 	/*
1463 	 * Remove door file for robustness.
1464 	 */
1465 	if (unlink(local_door_file) != 0)
1466 		syseventd_print(8, "Unlink of %s failed.\n", local_door_file);
1467 
1468 	fd = open(local_door_file, O_CREAT|O_RDWR, S_IREAD|S_IWRITE);
1469 	if ((fd == -1) && (errno != EEXIST)) {
1470 		syseventd_err_print(INIT_OPEN_DOOR_ERR, strerror(errno));
1471 		syseventd_exit(5);
1472 	}
1473 	(void) close(fd);
1474 
1475 	upcall_door = door_create(door_upcall, NULL,
1476 	    DOOR_REFUSE_DESC | DOOR_NO_CANCEL);
1477 	if (upcall_door == -1) {
1478 		syseventd_err_print(INIT_CREATE_DOOR_ERR, strerror(errno));
1479 		syseventd_exit(5);
1480 	}
1481 
1482 	(void) fdetach(local_door_file);
1483 retry:
1484 	if (fattach(upcall_door, local_door_file) != 0) {
1485 		if (errno == EBUSY)
1486 			goto retry;
1487 		syseventd_err_print(INIT_FATTACH_ERR, strerror(errno));
1488 		(void) door_revoke(upcall_door);
1489 		syseventd_exit(5);
1490 	}
1491 
1492 	/*
1493 	 * Tell kernel the door name and start delivery
1494 	 */
1495 	syseventd_print(2,
1496 	    "local_door_file = %s\n", local_door_file);
1497 	if (modctl(MODEVENTS,
1498 	    (uintptr_t)MODEVENTS_SET_DOOR_UPCALL_FILENAME,
1499 	    (uintptr_t)local_door_file, NULL, NULL, 0) < 0) {
1500 		syseventd_err_print(INIT_DOOR_NAME_ERR, strerror(errno));
1501 		syseventd_exit(6);
1502 	}
1503 
1504 	door_upcall_retval = 0;
1505 
1506 	if (modctl(MODEVENTS, (uintptr_t)MODEVENTS_FLUSH, NULL, NULL, NULL, 0)
1507 	    < 0) {
1508 		syseventd_err_print(KERNEL_REPLAY_ERR, strerror(errno));
1509 		syseventd_exit(7);
1510 	}
1511 }
1512 
1513 /*
1514  * syseventd_fini - shut down daemon, but do not exit
1515  */
1516 static void
1517 syseventd_fini(int sig)
1518 {
1519 	/*
1520 	 * Indicate that event queues should be drained and no
1521 	 * additional events be accepted
1522 	 */
1523 	fini_pending = 1;
1524 
1525 	/* Close the kernel event door to halt delivery */
1526 	(void) door_revoke(upcall_door);
1527 
1528 	syseventd_print(1, "Unloading modules\n");
1529 	(void) rw_wrlock(&mod_unload_lock);
1530 	unload_modules(sig);
1531 	(void) rw_unlock(&mod_unload_lock);
1532 
1533 }
1534 
1535 /*
1536  * enter_daemon_lock - lock the daemon file lock
1537  *
1538  * Use an advisory lock to ensure that only one daemon process is active
1539  * in the system at any point in time.	If the lock is held by another
1540  * process, do not block but return the pid owner of the lock to the
1541  * caller immediately.	The lock is cleared if the holding daemon process
1542  * exits for any reason even if the lock file remains, so the daemon can
1543  * be restarted if necessary.  The lock file is DAEMON_LOCK_FILE.
1544  */
1545 static pid_t
1546 enter_daemon_lock(void)
1547 {
1548 	struct flock	lock;
1549 
1550 	syseventd_print(8, "enter_daemon_lock: lock file = %s\n",
1551 	    DAEMON_LOCK_FILE);
1552 
1553 	if (snprintf(local_lock_file, sizeof (local_lock_file), "%s%s",
1554 	    root_dir, DAEMON_LOCK_FILE) >= sizeof (local_lock_file)) {
1555 		syseventd_err_print(INIT_PATH_ERR, local_lock_file);
1556 		syseventd_exit(8);
1557 	}
1558 	daemon_lock_fd = open(local_lock_file, O_CREAT|O_RDWR, 0644);
1559 	if (daemon_lock_fd < 0) {
1560 		syseventd_err_print(INIT_LOCK_OPEN_ERR,
1561 		    local_lock_file, strerror(errno));
1562 		syseventd_exit(8);
1563 	}
1564 
1565 	lock.l_type = F_WRLCK;
1566 	lock.l_whence = SEEK_SET;
1567 	lock.l_start = 0;
1568 	lock.l_len = 0;
1569 
1570 	if (fcntl(daemon_lock_fd, F_SETLK, &lock) == -1) {
1571 		if (fcntl(daemon_lock_fd, F_GETLK, &lock) == -1) {
1572 			syseventd_err_print(INIT_LOCK_ERR,
1573 			    local_lock_file, strerror(errno));
1574 			exit(2);
1575 		}
1576 		return (lock.l_pid);
1577 	}
1578 	hold_daemon_lock = 1;
1579 
1580 	return (getpid());
1581 }
1582 
1583 /*
1584  * exit_daemon_lock - release the daemon file lock
1585  */
1586 static void
1587 exit_daemon_lock(void)
1588 {
1589 	struct flock lock;
1590 
1591 	lock.l_type = F_UNLCK;
1592 	lock.l_whence = SEEK_SET;
1593 	lock.l_start = 0;
1594 	lock.l_len = 0;
1595 
1596 	if (fcntl(daemon_lock_fd, F_SETLK, &lock) == -1) {
1597 		syseventd_err_print(INIT_UNLOCK_ERR,
1598 		    local_lock_file, strerror(errno));
1599 	}
1600 
1601 	if (close(daemon_lock_fd) == -1) {
1602 		syseventd_err_print(INIT_LOCK_CLOSE_ERR,
1603 		    local_lock_file, strerror(errno));
1604 		exit(-1);
1605 	}
1606 }
1607 
1608 /*
1609  * syseventd_err_print - print error messages to the terminal if not
1610  *			yet daemonized or to syslog.
1611  */
1612 /*PRINTFLIKE1*/
1613 void
1614 syseventd_err_print(char *message, ...)
1615 {
1616 	va_list ap;
1617 
1618 	(void) mutex_lock(&err_mutex);
1619 	va_start(ap, message);
1620 
1621 	if (logflag) {
1622 		(void) vsyslog(LOG_ERR, message, ap);
1623 	} else {
1624 		(void) fprintf(stderr, "%s: ", prog);
1625 		(void) vfprintf(stderr, message, ap);
1626 	}
1627 	va_end(ap);
1628 	(void) mutex_unlock(&err_mutex);
1629 }
1630 
1631 /*
1632  * syseventd_print -  print messages to the terminal or to syslog
1633  *			the following levels are implemented:
1634  *
1635  * 1 - transient errors that does not affect normal program flow
1636  * 2 - upcall/dispatch interaction
1637  * 3 - program flow trace as each message goes through the daemon
1638  * 8 - all the nit-gritty details of startup and shutdown
1639  * 9 - very verbose event flow tracing (no daemonization of syseventd)
1640  *
1641  */
1642 /*PRINTFLIKE2*/
1643 void
1644 syseventd_print(int level, char *message, ...)
1645 {
1646 	va_list ap;
1647 	static int newline = 1;
1648 
1649 	if (level > debug_level) {
1650 		return;
1651 	}
1652 
1653 	(void) mutex_lock(&err_mutex);
1654 	va_start(ap, message);
1655 	if (logflag) {
1656 		(void) syslog(LOG_DEBUG, "%s[%ld]: ",
1657 		    prog, getpid());
1658 		(void) vsyslog(LOG_DEBUG, message, ap);
1659 	} else {
1660 		if (newline) {
1661 			(void) fprintf(stdout, "%s[%ld]: ",
1662 			    prog, getpid());
1663 			(void) vfprintf(stdout, message, ap);
1664 		} else {
1665 			(void) vfprintf(stdout, message, ap);
1666 		}
1667 	}
1668 	if (message[strlen(message)-1] == '\n') {
1669 		newline = 1;
1670 	} else {
1671 		newline = 0;
1672 	}
1673 	va_end(ap);
1674 	(void) mutex_unlock(&err_mutex);
1675 }
1676