xref: /titanic_44/usr/src/cmd/fm/modules/sun4v/etm/etm.c (revision e4b86885570d77af552e9cf94f142f4d744fb8c8)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*
28  * etm.c	FMA Event Transport Module implementation, a plugin of FMD
29  *		for sun4v/Ontario
30  *
31  * plugin for sending/receiving FMA events to/from service processor
32  */
33 
34 #pragma ident	"%Z%%M%	%I%	%E% SMI"
35 
36 /*
37  * --------------------------------- includes --------------------------------
38  */
39 
40 #include <sys/fm/protocol.h>
41 #include <sys/fm/util.h>
42 #include <sys/fm/ldom.h>
43 #include <sys/strlog.h>
44 #include <sys/syslog.h>
45 #include <netinet/in.h>
46 #include <fm/fmd_api.h>
47 
48 #include "etm_xport_api.h"
49 #include "etm_etm_proto.h"
50 #include "etm_impl.h"
51 
52 #include <pthread.h>
53 #include <signal.h>
54 #include <stropts.h>
55 #include <locale.h>
56 #include <strings.h>
57 #include <stdlib.h>
58 #include <unistd.h>
59 #include <limits.h>
60 #include <values.h>
61 #include <alloca.h>
62 #include <errno.h>
63 #include <fcntl.h>
64 #include <time.h>
65 
66 /*
67  * ----------------------------- forward decls -------------------------------
68  */
69 
70 static void
71 etm_recv(fmd_hdl_t *hdl, fmd_event_t *ep, nvlist_t *nvl, const char *class);
72 
73 /*
74  * ------------------------- data structs for FMD ----------------------------
75  */
76 
77 static const fmd_hdl_ops_t fmd_ops = {
78 	etm_recv,	/* fmdo_recv */
79 	NULL,		/* fmdo_timeout */
80 	NULL,		/* fmdo_close */
81 	NULL,		/* fmdo_stats */
82 	NULL,		/* fmdo_gc */
83 	NULL,		/* fmdo_send */
84 };
85 
86 static const fmd_prop_t fmd_props[] = {
87 	{ ETM_PROP_NM_XPORT_ADDRS,	FMD_TYPE_STRING, "" },
88 	{ ETM_PROP_NM_DEBUG_LVL,	FMD_TYPE_INT32, "0" },
89 	{ ETM_PROP_NM_DEBUG_MAX_EV_CNT,	FMD_TYPE_INT32, "-1" },
90 	{ ETM_PROP_NM_CONSOLE,		FMD_TYPE_BOOL, "false" },
91 	{ ETM_PROP_NM_SYSLOGD,		FMD_TYPE_BOOL, "true" },
92 	{ ETM_PROP_NM_FACILITY,		FMD_TYPE_STRING, "LOG_DAEMON" },
93 	{ ETM_PROP_NM_MAX_RESP_Q_LEN,	FMD_TYPE_UINT32, "512" },
94 	{ ETM_PROP_NM_BAD_ACC_TO_SEC,	FMD_TYPE_UINT32, "1" },
95 	{ NULL, 0, NULL }
96 };
97 
98 
99 static const fmd_hdl_info_t fmd_info = {
100 	"FMA Event Transport Module", "1.1", &fmd_ops, fmd_props
101 };
102 
103 /*
104  * ----------------------- private consts and defns --------------------------
105  */
106 
107 /* misc buffer for variable sized protocol header fields */
108 
109 #define	ETM_MISC_BUF_SZ	(4 * 1024)
110 
111 /* try limit for IO operations w/ capped exp backoff sleep on retry */
112 
113 /*
114  * Design_Note:	ETM will potentially retry forever IO operations that the
115  *		transport fails with EAGAIN (aka EWOULDBLOCK) rather than
116  *		giving up after some number of seconds. This avoids
117  *		dropping FMA events while the service processor is down,
118  *		but at the risk of pending fmdo_recv() forever and
119  *		overflowing FMD's event queue for ETM.
120  *		A future TBD enhancement would be to always recv
121  *		and send each ETM msg in a single read/write() to reduce
122  *		the risk of failure between ETM msg hdr and body,
123  *		assuming the MTU_SZ is large enough.
124  */
125 
126 #define	ETM_TRY_MAX_CNT		(MAXINT - 1)
127 #define	ETM_TRY_BACKOFF_RATE	(4)
128 #define	ETM_TRY_BACKOFF_CAP	(60)
129 
130 /* amount to increment protocol transaction id on each new send */
131 
132 #define	ETM_XID_INC	(2)
133 
134 typedef struct etm_resp_q_ele {
135 
136 	etm_xport_conn_t	rqe_conn;	/* open connection to send on */
137 	etm_proto_v1_pp_t	*rqe_hdrp;	/* ptr to ETM msg hdr */
138 	size_t			rqe_hdr_sz;	/* sizeof ETM msg hdr */
139 	int32_t			rqe_resp_code;	/* response code to send */
140 
141 	struct etm_resp_q_ele	*rqe_nextp;	/* PRIVATE - next ele ptr */
142 
143 } etm_resp_q_ele_t;	/* responder queue element */
144 
145 /*
146  * ---------------------------- global data ----------------------------------
147  */
148 
149 static fmd_hdl_t
150 *init_hdl = NULL;	/* used in mem allocator at init time */
151 
152 static int
153 etm_debug_lvl = 0;	/* debug level: 0 is off, 1 is on, 2 is more, etc */
154 
155 static int
156 etm_debug_max_ev_cnt = -1; /* max allowed event count for debugging */
157 
158 static fmd_xprt_t
159 *etm_fmd_xprt = NULL;	/* FMD transport layer handle */
160 
161 static pthread_t
162 etm_svr_tid = NULL;	/* thread id of connection acceptance server */
163 
164 static pthread_t
165 etm_resp_tid = NULL;	/* thread id of msg responder */
166 
167 static etm_resp_q_ele_t
168 *etm_resp_q_head = NULL; /* ptr to cur head of responder queue */
169 
170 static etm_resp_q_ele_t
171 *etm_resp_q_tail = NULL; /* ptr to cur tail of responder queue */
172 
173 static uint32_t
174 etm_resp_q_cur_len = 0;	/* cur length (ele cnt) of responder queue */
175 
176 static uint32_t
177 etm_resp_q_max_len = 0;	/* max length (ele cnt) of responder queue */
178 
179 static uint32_t
180 etm_bad_acc_to_sec = 0;	/* sleep timeout (in sec) after bad conn accept */
181 
182 static pthread_mutex_t
183 etm_resp_q_lock = PTHREAD_MUTEX_INITIALIZER;	/* protects responder queue */
184 
185 static pthread_cond_t
186 etm_resp_q_cv = PTHREAD_COND_INITIALIZER;	/* nudges msg responder */
187 
188 static volatile int
189 etm_is_dying = 0;	/* bool for dying (killing self) */
190 
191 static uint32_t
192 etm_xid_cur = 0;	/* current transaction id for sends */
193 
194 static uint32_t
195 etm_xid_ping = 0;	/* xid of last CONTROL msg sent requesting ping */
196 
197 static uint32_t
198 etm_xid_ver_negot = 0;	/* xid of last CONTROL msg sent requesting ver negot */
199 
200 static uint32_t
201 etm_xid_posted_ev = 0;	/* xid of last FMA_EVENT msg/event posted OK to FMD */
202 
203 static uint32_t
204 etm_xid_posted_sa = 0;	/* xid of last ALERT msg/event posted OK to syslog */
205 
206 static uint8_t
207 etm_resp_ver = ETM_PROTO_V1; /* proto ver [negotiated] for msg sends */
208 
209 static pthread_mutex_t
210 etm_write_lock = PTHREAD_MUTEX_INITIALIZER;	/* for write operations */
211 
212 static log_ctl_t syslog_ctl;	/* log(7D) meta-data for each msg */
213 static int syslog_facility;	/* log(7D) facility (part of priority) */
214 static int syslog_logfd = -1;	/* log(7D) file descriptor */
215 static int syslog_msgfd = -1;	/* sysmsg(7D) file descriptor */
216 static int syslog_file = 0;	/* log to syslog_logfd */
217 static int syslog_cons = 0;	/* log to syslog_msgfd */
218 
219 static const struct facility {
220 	const char *fac_name;
221 	int fac_value;
222 } syslog_facs[] = {
223 	{ "LOG_DAEMON", LOG_DAEMON },
224 	{ "LOG_LOCAL0", LOG_LOCAL0 },
225 	{ "LOG_LOCAL1", LOG_LOCAL1 },
226 	{ "LOG_LOCAL2", LOG_LOCAL2 },
227 	{ "LOG_LOCAL3", LOG_LOCAL3 },
228 	{ "LOG_LOCAL4", LOG_LOCAL4 },
229 	{ "LOG_LOCAL5", LOG_LOCAL5 },
230 	{ "LOG_LOCAL6", LOG_LOCAL6 },
231 	{ "LOG_LOCAL7", LOG_LOCAL7 },
232 	{ NULL, 0 }
233 };
234 
235 static struct stats {
236 
237 	/* ETM msg counters */
238 
239 	fmd_stat_t etm_rd_hdr_fmaevent;
240 	fmd_stat_t etm_rd_hdr_control;
241 	fmd_stat_t etm_rd_hdr_alert;
242 	fmd_stat_t etm_rd_hdr_response;
243 	fmd_stat_t etm_rd_body_fmaevent;
244 	fmd_stat_t etm_rd_body_control;
245 	fmd_stat_t etm_rd_body_alert;
246 	fmd_stat_t etm_rd_body_response;
247 	fmd_stat_t etm_wr_hdr_fmaevent;
248 	fmd_stat_t etm_wr_hdr_control;
249 	fmd_stat_t etm_wr_hdr_response;
250 	fmd_stat_t etm_wr_body_fmaevent;
251 	fmd_stat_t etm_wr_body_control;
252 	fmd_stat_t etm_wr_body_response;
253 
254 	fmd_stat_t etm_rd_max_ev_per_msg;
255 	fmd_stat_t etm_wr_max_ev_per_msg;
256 
257 	fmd_stat_t etm_resp_q_cur_len;
258 	fmd_stat_t etm_resp_q_max_len;
259 
260 	/* ETM byte counters */
261 
262 	fmd_stat_t etm_wr_fmd_bytes;
263 	fmd_stat_t etm_rd_fmd_bytes;
264 	fmd_stat_t etm_wr_xport_bytes;
265 	fmd_stat_t etm_rd_xport_bytes;
266 
267 	fmd_stat_t etm_magic_drop_bytes;
268 
269 	/* ETM [dropped] FMA event counters */
270 
271 	fmd_stat_t etm_rd_fmd_fmaevent;
272 	fmd_stat_t etm_wr_fmd_fmaevent;
273 
274 	fmd_stat_t etm_rd_drop_fmaevent;
275 	fmd_stat_t etm_wr_drop_fmaevent;
276 
277 	fmd_stat_t etm_rd_dup_fmaevent;
278 	fmd_stat_t etm_wr_dup_fmaevent;
279 
280 	fmd_stat_t etm_rd_dup_alert;
281 	fmd_stat_t etm_wr_dup_alert;
282 
283 	fmd_stat_t etm_enq_drop_resp_q;
284 	fmd_stat_t etm_deq_drop_resp_q;
285 
286 	/* ETM protocol failures */
287 
288 	fmd_stat_t etm_magic_bad;
289 	fmd_stat_t etm_ver_bad;
290 	fmd_stat_t etm_msgtype_bad;
291 	fmd_stat_t etm_subtype_bad;
292 	fmd_stat_t etm_xid_bad;
293 	fmd_stat_t etm_fmaeventlen_bad;
294 	fmd_stat_t etm_respcode_bad;
295 	fmd_stat_t etm_timeout_bad;
296 	fmd_stat_t etm_evlens_bad;
297 
298 	/* IO operation failures */
299 
300 	fmd_stat_t etm_xport_wr_fail;
301 	fmd_stat_t etm_xport_rd_fail;
302 	fmd_stat_t etm_xport_pk_fail;
303 
304 	/* IO operation retries */
305 
306 	fmd_stat_t etm_xport_wr_retry;
307 	fmd_stat_t etm_xport_rd_retry;
308 	fmd_stat_t etm_xport_pk_retry;
309 
310 	/* system and library failures */
311 
312 	fmd_stat_t etm_os_nvlist_pack_fail;
313 	fmd_stat_t etm_os_nvlist_unpack_fail;
314 	fmd_stat_t etm_os_nvlist_size_fail;
315 	fmd_stat_t etm_os_pthread_create_fail;
316 
317 	/* xport API failures */
318 
319 	fmd_stat_t etm_xport_get_ev_addrv_fail;
320 	fmd_stat_t etm_xport_open_fail;
321 	fmd_stat_t etm_xport_close_fail;
322 	fmd_stat_t etm_xport_accept_fail;
323 	fmd_stat_t etm_xport_open_retry;
324 
325 	/* FMD entry point bad arguments */
326 
327 	fmd_stat_t etm_fmd_recv_badargs;
328 	fmd_stat_t etm_fmd_init_badargs;
329 	fmd_stat_t etm_fmd_fini_badargs;
330 
331 	/* Alert logging errors */
332 
333 	fmd_stat_t etm_log_err;
334 	fmd_stat_t etm_msg_err;
335 
336 	/* miscellaneous stats */
337 
338 	fmd_stat_t etm_reset_xport;
339 
340 } etm_stats = {
341 
342 	/* ETM msg counters */
343 
344 	{ "etm_rd_hdr_fmaevent", FMD_TYPE_UINT64,
345 		"ETM fmaevent msg headers rcvd from xport" },
346 	{ "etm_rd_hdr_control", FMD_TYPE_UINT64,
347 		"ETM control msg headers rcvd from xport" },
348 	{ "etm_rd_hdr_alert", FMD_TYPE_UINT64,
349 		"ETM alert msg headers rcvd from xport" },
350 	{ "etm_rd_hdr_response", FMD_TYPE_UINT64,
351 		"ETM response msg headers rcvd from xport" },
352 	{ "etm_rd_body_fmaevent", FMD_TYPE_UINT64,
353 		"ETM fmaevent msg bodies rcvd from xport" },
354 	{ "etm_rd_body_control", FMD_TYPE_UINT64,
355 		"ETM control msg bodies rcvd from xport" },
356 	{ "etm_rd_body_alert", FMD_TYPE_UINT64,
357 		"ETM alert msg bodies rcvd from xport" },
358 	{ "etm_rd_body_response", FMD_TYPE_UINT64,
359 		"ETM response msg bodies rcvd from xport" },
360 	{ "etm_wr_hdr_fmaevent", FMD_TYPE_UINT64,
361 		"ETM fmaevent msg headers sent to xport" },
362 	{ "etm_wr_hdr_control", FMD_TYPE_UINT64,
363 		"ETM control msg headers sent to xport" },
364 	{ "etm_wr_hdr_response", FMD_TYPE_UINT64,
365 		"ETM response msg headers sent to xport" },
366 	{ "etm_wr_body_fmaevent", FMD_TYPE_UINT64,
367 		"ETM fmaevent msg bodies sent to xport" },
368 	{ "etm_wr_body_control", FMD_TYPE_UINT64,
369 		"ETM control msg bodies sent to xport" },
370 	{ "etm_wr_body_response", FMD_TYPE_UINT64,
371 		"ETM response msg bodies sent to xport" },
372 
373 	{ "etm_rd_max_ev_per_msg", FMD_TYPE_UINT64,
374 		"max FMA events per ETM msg from xport" },
375 	{ "etm_wr_max_ev_per_msg", FMD_TYPE_UINT64,
376 		"max FMA events per ETM msg to xport" },
377 
378 	{ "etm_resp_q_cur_len", FMD_TYPE_UINT64,
379 		"cur enqueued response msgs to xport" },
380 	{ "etm_resp_q_max_len", FMD_TYPE_UINT64,
381 		"max enqueable response msgs to xport" },
382 
383 	/* ETM byte counters */
384 
385 	{ "etm_wr_fmd_bytes", FMD_TYPE_UINT64,
386 		"bytes of FMA events sent to FMD" },
387 	{ "etm_rd_fmd_bytes", FMD_TYPE_UINT64,
388 		"bytes of FMA events rcvd from FMD" },
389 	{ "etm_wr_xport_bytes", FMD_TYPE_UINT64,
390 		"bytes of FMA events sent to xport" },
391 	{ "etm_rd_xport_bytes", FMD_TYPE_UINT64,
392 		"bytes of FMA events rcvd from xport" },
393 
394 	{ "etm_magic_drop_bytes", FMD_TYPE_UINT64,
395 		"bytes dropped from xport pre magic num" },
396 
397 	/* ETM [dropped] FMA event counters */
398 
399 	{ "etm_rd_fmd_fmaevent", FMD_TYPE_UINT64,
400 		"FMA events rcvd from FMD" },
401 	{ "etm_wr_fmd_fmaevent", FMD_TYPE_UINT64,
402 		"FMA events sent to FMD" },
403 
404 	{ "etm_rd_drop_fmaevent", FMD_TYPE_UINT64,
405 		"dropped FMA events from xport" },
406 	{ "etm_wr_drop_fmaevent", FMD_TYPE_UINT64,
407 		"dropped FMA events to xport" },
408 
409 	{ "etm_rd_dup_fmaevent", FMD_TYPE_UINT64,
410 	    "duplicate FMA events rcvd from xport" },
411 	{ "etm_wr_dup_fmaevent", FMD_TYPE_UINT64,
412 	    "duplicate FMA events sent to xport" },
413 
414 	{ "etm_rd_dup_alert", FMD_TYPE_UINT64,
415 	    "duplicate ALERTs rcvd from xport" },
416 	{ "etm_wr_dup_alert", FMD_TYPE_UINT64,
417 	    "duplicate ALERTs sent to xport" },
418 
419 	{ "etm_enq_drop_resp_q", FMD_TYPE_UINT64,
420 	    "dropped response msgs on enq" },
421 	{ "etm_deq_drop_resp_q", FMD_TYPE_UINT64,
422 	    "dropped response msgs on deq" },
423 
424 	/* ETM protocol failures */
425 
426 	{ "etm_magic_bad", FMD_TYPE_UINT64,
427 		"ETM msgs w/ invalid magic num" },
428 	{ "etm_ver_bad", FMD_TYPE_UINT64,
429 		"ETM msgs w/ invalid protocol version" },
430 	{ "etm_msgtype_bad", FMD_TYPE_UINT64,
431 		"ETM msgs w/ invalid message type" },
432 	{ "etm_subtype_bad", FMD_TYPE_UINT64,
433 		"ETM msgs w/ invalid sub type" },
434 	{ "etm_xid_bad", FMD_TYPE_UINT64,
435 		"ETM msgs w/ unmatched xid" },
436 	{ "etm_fmaeventlen_bad", FMD_TYPE_UINT64,
437 		"ETM msgs w/ invalid FMA event length" },
438 	{ "etm_respcode_bad", FMD_TYPE_UINT64,
439 		"ETM msgs w/ invalid response code" },
440 	{ "etm_timeout_bad", FMD_TYPE_UINT64,
441 		"ETM msgs w/ invalid timeout value" },
442 	{ "etm_evlens_bad", FMD_TYPE_UINT64,
443 		"ETM msgs w/ too many event lengths" },
444 
445 	/* IO operation failures */
446 
447 	{ "etm_xport_wr_fail", FMD_TYPE_UINT64,
448 		"xport write failures" },
449 	{ "etm_xport_rd_fail", FMD_TYPE_UINT64,
450 		"xport read failures" },
451 	{ "etm_xport_pk_fail", FMD_TYPE_UINT64,
452 		"xport peek failures" },
453 
454 	/* IO operation retries */
455 
456 	{ "etm_xport_wr_retry", FMD_TYPE_UINT64,
457 		"xport write retries" },
458 	{ "etm_xport_rd_retry", FMD_TYPE_UINT64,
459 		"xport read retries" },
460 	{ "etm_xport_pk_retry", FMD_TYPE_UINT64,
461 		"xport peek retries" },
462 
463 	/* system and library failures */
464 
465 	{ "etm_os_nvlist_pack_fail", FMD_TYPE_UINT64,
466 		"nvlist_pack failures" },
467 	{ "etm_os_nvlist_unpack_fail", FMD_TYPE_UINT64,
468 		"nvlist_unpack failures" },
469 	{ "etm_os_nvlist_size_fail", FMD_TYPE_UINT64,
470 		"nvlist_size failures" },
471 	{ "etm_os_pthread_create_fail", FMD_TYPE_UINT64,
472 		"pthread_create failures" },
473 
474 	/* transport API failures */
475 
476 	{ "etm_xport_get_ev_addrv_fail", FMD_TYPE_UINT64,
477 		"xport get event addrv API failures" },
478 	{ "etm_xport_open_fail", FMD_TYPE_UINT64,
479 		"xport open API failures" },
480 	{ "etm_xport_close_fail", FMD_TYPE_UINT64,
481 		"xport close API failures" },
482 	{ "etm_xport_accept_fail", FMD_TYPE_UINT64,
483 		"xport accept API failures" },
484 	{ "etm_xport_open_retry", FMD_TYPE_UINT64,
485 		"xport open API retries" },
486 
487 	/* FMD entry point bad arguments */
488 
489 	{ "etm_fmd_recv_badargs", FMD_TYPE_UINT64,
490 		"bad arguments from fmd_recv entry point" },
491 	{ "etm_fmd_init_badargs", FMD_TYPE_UINT64,
492 		"bad arguments from fmd_init entry point" },
493 	{ "etm_fmd_fini_badargs", FMD_TYPE_UINT64,
494 		"bad arguments from fmd_fini entry point" },
495 
496 	/* Alert logging errors */
497 
498 	{ "etm_log_err", FMD_TYPE_UINT64,
499 		"failed to log message to log(7D)" },
500 	{ "etm_msg_err", FMD_TYPE_UINT64,
501 		"failed to log message to sysmsg(7D)" },
502 
503 	/* miscellaneous stats */
504 
505 	{ "etm_reset_xport", FMD_TYPE_UINT64,
506 		"xport resets after xport API failure" }
507 };
508 
509 /*
510  * -------------------------- support functions ------------------------------
511  */
512 
513 /*
514  * Design_Note:	Each failure worth reporting to FMD should be done using
515  *		a single call to fmd_hdl_error() as it logs an FMA event
516  *		for each call. Also be aware that all the fmd_hdl_*()
517  *		format strings currently use platform specific *printf()
518  *		routines; so "%p" under Solaris does not prepend "0x" to
519  *		the outputted hex digits, while Linux and VxWorks do.
520  */
521 
522 /*
523  * etm_show_time - display the current time of day (for debugging) using
524  *		the given FMD module handle and annotation string
525  */
526 
527 static void
528 etm_show_time(fmd_hdl_t *hdl, char *note_str)
529 {
530 	struct timeval		tmv;		/* timeval */
531 
532 	(void) gettimeofday(&tmv, NULL);
533 	fmd_hdl_debug(hdl, "info: %s: cur Unix Epoch time %d.%06d\n",
534 	    note_str, tmv.tv_sec, tmv.tv_usec);
535 
536 } /* etm_show_time() */
537 
538 /*
539  * etm_hexdump - hexdump the given buffer (for debugging) using
540  *		the given FMD module handle
541  */
542 
543 static void
544 etm_hexdump(fmd_hdl_t *hdl, void *buf, size_t byte_cnt)
545 {
546 	uint8_t		*bp;		/* byte ptr */
547 	int		i, j;		/* index */
548 	char		cb[80];		/* char buf */
549 	unsigned int	n;		/* a byte of data for sprintf() */
550 
551 	bp = buf;
552 	j = 0;
553 
554 	/*
555 	 * Design_Note:	fmd_hdl_debug() auto adds a newline if missing;
556 	 *		hence cb exists to accumulate a longer string.
557 	 */
558 
559 	for (i = 1; i <= byte_cnt; i++) {
560 		n = *bp++;
561 		(void) sprintf(&cb[j], "%2.2x ", n);
562 		j += 3;
563 		/* add a newline every 16 bytes or at the buffer's end */
564 		if (((i % 16) == 0) || (i >= byte_cnt)) {
565 			cb[j-1] = '\0';
566 			fmd_hdl_debug(hdl, "%s\n", cb);
567 			j = 0;
568 		}
569 	} /* for each byte in the buffer */
570 
571 } /* etm_hexdump() */
572 
573 /*
574  * etm_sleep - sleep the caller for the given number of seconds,
575  *		return 0 or -errno value
576  *
577  * Design_Note:	To avoid interfering with FMD's signal mask (SIGALRM)
578  *		do not use [Solaris] sleep(3C) and instead use
579  *		pthread_cond_wait() or nanosleep(), both of which
580  *		are POSIX spec-ed to leave signal masks alone.
581  *		This is needed for Solaris and Linux (domain and SP).
582  */
583 
584 static int
585 etm_sleep(unsigned sleep_sec)
586 {
587 	struct timespec	tms;	/* for nanosleep() */
588 
589 	tms.tv_sec = sleep_sec;
590 	tms.tv_nsec = 0;
591 
592 	if (nanosleep(&tms, NULL) < 0) {
593 		/* errno assumed set by above call */
594 		return (-errno);
595 	}
596 	return (0);
597 
598 } /* etm_sleep() */
599 
600 /*
601  * etm_conn_open - open a connection to the given transport address,
602  *		return 0 and the opened connection handle
603  *		or -errno value
604  *
605  * caveats:	the err_substr is used in failure cases for calling
606  *		fmd_hdl_error()
607  */
608 
609 static int
610 etm_conn_open(fmd_hdl_t *hdl, char *err_substr,
611 		etm_xport_addr_t addr, etm_xport_conn_t *connp)
612 {
613 	etm_xport_conn_t	conn;	/* connection to return */
614 	int			nev;	/* -errno value */
615 
616 	if ((conn = etm_xport_open(hdl, addr)) == NULL) {
617 		nev = (-errno);
618 		fmd_hdl_error(hdl, "error: %s: errno %d\n",
619 		    err_substr, errno);
620 		etm_stats.etm_xport_open_fail.fmds_value.ui64++;
621 		return (nev);
622 	} else {
623 		*connp = conn;
624 		return (0);
625 	}
626 } /* etm_conn_open() */
627 
628 /*
629  * etm_conn_close - close the given connection,
630  *		return 0 or -errno value
631  *
632  * caveats:	the err_substr is used in failure cases for calling
633  *		fmd_hdl_error()
634  */
635 
636 static int
637 etm_conn_close(fmd_hdl_t *hdl, char *err_substr, etm_xport_conn_t conn)
638 {
639 	int	nev;	/* -errno value */
640 
641 	if (etm_xport_close(hdl, conn) == NULL) {
642 		nev = (-errno);
643 		fmd_hdl_error(hdl, "warning: %s: errno %d\n",
644 		    err_substr, errno);
645 		etm_stats.etm_xport_close_fail.fmds_value.ui64++;
646 		return (nev);
647 	} else {
648 		return (0);
649 	}
650 } /* etm_conn_close() */
651 
652 /*
653  * etm_io_op - perform an IO operation on the given connection
654  *		with the given buffer,
655  *		accommodating MTU size and retrying op if needed,
656  *		return how many bytes actually done by the op
657  *		or -errno value
658  *
659  * caveats:	the err_substr is used in failure cases for calling
660  *		fmd_hdl_error()
661  */
662 
663 static ssize_t
664 etm_io_op(fmd_hdl_t *hdl, char *err_substr, etm_xport_conn_t conn,
665 				void *buf, size_t byte_cnt, int io_op)
666 {
667 	ssize_t		rv;		/* ret val / byte count */
668 	ssize_t		n;		/* gen use */
669 	uint8_t		*datap;		/* ptr to data */
670 	size_t		mtu_sz;		/* MTU size in bytes */
671 	int		(*io_func_ptr)(fmd_hdl_t *, etm_xport_conn_t,
672 	    void *, size_t);
673 	size_t		io_sz;		/* byte count for io_func_ptr */
674 	int		try_cnt;	/* number of tries done */
675 	int		sleep_sec;	/* exp backoff sleep period in sec */
676 	int		sleep_rv;	/* ret val from sleeping */
677 	fmd_stat_t	io_retry_stat;	/* IO retry stat to update */
678 	fmd_stat_t	io_fail_stat;	/* IO failure stat to update */
679 
680 	if ((conn == NULL) || (buf == NULL)) {
681 		return (-EINVAL);
682 	}
683 	switch (io_op) {
684 		case ETM_IO_OP_RD:
685 			io_func_ptr = etm_xport_read;
686 			io_retry_stat = etm_stats.etm_xport_rd_retry;
687 			io_fail_stat = etm_stats.etm_xport_rd_fail;
688 			break;
689 		case ETM_IO_OP_WR:
690 			io_func_ptr = etm_xport_write;
691 			io_retry_stat = etm_stats.etm_xport_wr_retry;
692 			io_fail_stat = etm_stats.etm_xport_wr_fail;
693 			break;
694 		default:
695 			return (-EINVAL);
696 	}
697 	if (byte_cnt == 0) {
698 		return (byte_cnt);	/* nop */
699 	}
700 
701 	/* obtain [current] MTU size */
702 
703 	if ((n = etm_xport_get_opt(hdl, conn, ETM_XPORT_OPT_MTU_SZ)) < 0) {
704 		mtu_sz = ETM_XPORT_MTU_SZ_DEF;
705 	} else {
706 		mtu_sz = n;
707 	}
708 
709 	/* loop until all IO done, try limit exceeded, or real failure */
710 
711 	rv = 0;
712 	datap = buf;
713 	while (rv < byte_cnt) {
714 		io_sz = MIN((byte_cnt - rv), mtu_sz);
715 		try_cnt = 0;
716 		sleep_sec = 0;
717 
718 		/* when give up, return -errno value even if partly done */
719 
720 		while ((n = (*io_func_ptr)(hdl, conn, datap, io_sz)) ==
721 		    (-EAGAIN)) {
722 			try_cnt++;
723 			if (try_cnt > ETM_TRY_MAX_CNT) {
724 				rv = n;
725 				goto func_ret;
726 			}
727 			if (etm_is_dying) {
728 				rv = (-EINTR);
729 				goto func_ret;
730 			}
731 			if ((sleep_rv = etm_sleep(sleep_sec)) < 0) {
732 				rv = sleep_rv;
733 				goto func_ret;
734 			}
735 			sleep_sec = ((sleep_sec == 0) ? 1 :
736 			    (sleep_sec * ETM_TRY_BACKOFF_RATE));
737 			sleep_sec = MIN(sleep_sec, ETM_TRY_BACKOFF_CAP);
738 			io_retry_stat.fmds_value.ui64++;
739 			if (etm_debug_lvl >= 1) {
740 				fmd_hdl_debug(hdl, "info: retrying io op %d "
741 				    "due to EAGAIN\n", io_op);
742 			}
743 		} /* while trying the io operation */
744 
745 		if (etm_is_dying) {
746 			rv = (-EINTR);
747 			goto func_ret;
748 		}
749 		if (n < 0) {
750 			rv = n;
751 			goto func_ret;
752 		}
753 		/* avoid spinning CPU when given 0 bytes but no error */
754 		if (n == 0) {
755 			if ((sleep_rv = etm_sleep(ETM_SLEEP_QUIK)) < 0) {
756 				rv = sleep_rv;
757 				goto func_ret;
758 			}
759 		}
760 		rv += n;
761 		datap += n;
762 	} /* while still have more data */
763 
764 func_ret:
765 
766 	if (rv < 0) {
767 		io_fail_stat.fmds_value.ui64++;
768 		fmd_hdl_debug(hdl, "error: %s: errno %d\n",
769 		    err_substr, (int)(-rv));
770 	}
771 	if (etm_debug_lvl >= 3) {
772 		fmd_hdl_debug(hdl, "info: io op %d ret %d of %d\n",
773 		    io_op, (int)rv, (int)byte_cnt);
774 	}
775 	return (rv);
776 
777 } /* etm_io_op() */
778 
779 /*
780  * etm_magic_read - read the magic number of an ETM message header
781  *		from the given connection into the given buffer,
782  *		return 0 or -errno value
783  *
784  * Design_Note:	This routine is intended to help protect ETM from protocol
785  *		framing errors as might be caused by an SP reset / crash in
786  *		the middle of an ETM message send; the connection will be
787  *		read from for as many bytes as needed until the magic number
788  *		is found using a sliding buffer for comparisons.
789  */
790 
791 static int
792 etm_magic_read(fmd_hdl_t *hdl, etm_xport_conn_t conn, uint32_t *magic_ptr)
793 {
794 	int		rv;		/* ret val */
795 	uint32_t	magic_num;	/* magic number */
796 	int		byte_cnt;	/* count of bytes read */
797 	uint8_t		buf5[4+1];	/* sliding input buffer */
798 	int		i, j;		/* indices into buf5 */
799 	ssize_t		n;		/* gen use */
800 	uint8_t		drop_buf[1024];	/* dropped bytes buffer */
801 
802 	rv = 0;		/* assume success */
803 	magic_num = 0;
804 	byte_cnt = 0;
805 	j = 0;
806 
807 	/* magic number bytes are sent in network (big endian) order */
808 
809 	while (magic_num != ETM_PROTO_MAGIC_NUM) {
810 		if ((n = etm_io_op(hdl, "bad io read on magic",
811 		    conn, &buf5[j], 1, ETM_IO_OP_RD)) < 0) {
812 			rv = n;
813 			goto func_ret;
814 		}
815 		byte_cnt++;
816 		j = MIN((j + 1), sizeof (magic_num));
817 		if (byte_cnt < sizeof (magic_num)) {
818 			continue;
819 		}
820 
821 		if (byte_cnt > sizeof (magic_num)) {
822 			etm_stats.etm_magic_drop_bytes.fmds_value.ui64++;
823 			i = MIN(byte_cnt - j - 1, sizeof (drop_buf) - 1);
824 			drop_buf[i] = buf5[0];
825 			for (i = 0; i < j; i++) {
826 				buf5[i] = buf5[i+1];
827 			} /* for sliding the buffer contents */
828 		}
829 		(void) memcpy(&magic_num, &buf5[0], sizeof (magic_num));
830 		magic_num = ntohl(magic_num);
831 	} /* for reading bytes until find magic number */
832 
833 func_ret:
834 
835 	if (byte_cnt != sizeof (magic_num)) {
836 		fmd_hdl_debug(hdl, "warning: bad proto frame "
837 		    "implies corrupt/lost msg(s)\n");
838 	}
839 	if ((byte_cnt > sizeof (magic_num)) && (etm_debug_lvl >= 2)) {
840 		i = MIN(byte_cnt - sizeof (magic_num), sizeof (drop_buf));
841 		fmd_hdl_debug(hdl, "info: magic drop hexdump "
842 		    "first %d of %d bytes:\n", i,
843 		    byte_cnt - sizeof (magic_num));
844 		etm_hexdump(hdl, drop_buf, i);
845 	}
846 
847 	if (rv == 0) {
848 		*magic_ptr = magic_num;
849 	}
850 	return (rv);
851 
852 } /* etm_magic_read() */
853 
854 /*
855  * etm_hdr_read - allocate, read, and validate a [variable sized]
856  *		ETM message header from the given connection,
857  *		return the allocated ETM message header
858  *		(which is guaranteed to be large enough to reuse as a
859  *		RESPONSE msg hdr) and its size
860  *		or NULL and set errno on failure
861  */
862 
863 static void *
864 etm_hdr_read(fmd_hdl_t *hdl, etm_xport_conn_t conn, size_t *szp)
865 {
866 	uint8_t			*hdrp;		/* ptr to header to return */
867 	size_t			hdr_sz;		/* sizeof *hdrp */
868 	etm_proto_v1_pp_t	pp; 		/* protocol preamble */
869 	etm_proto_v1_ev_hdr_t	*ev_hdrp;	/* for FMA_EVENT msg */
870 	etm_proto_v1_ctl_hdr_t	*ctl_hdrp;	/* for CONTROL msg */
871 	etm_proto_v1_resp_hdr_t *resp_hdrp;	/* for RESPONSE msg */
872 	etm_proto_v3_sa_hdr_t	*sa_hdrp;	/* for ALERT msg */
873 	uint32_t		*lenp;		/* ptr to FMA event length */
874 	ssize_t			i, n;		/* gen use */
875 	uint8_t	misc_buf[ETM_MISC_BUF_SZ];	/* for var sized hdrs */
876 	int			dummy_int;	/* dummy var to appease lint */
877 
878 	hdrp = NULL; hdr_sz = 0;
879 
880 	/* read the magic number which starts the protocol preamble */
881 
882 	if ((n = etm_magic_read(hdl, conn, &pp.pp_magic_num)) < 0) {
883 		errno = (-n);
884 		etm_stats.etm_magic_bad.fmds_value.ui64++;
885 		return (NULL);
886 	}
887 
888 	/* read the rest of the protocol preamble all at once */
889 
890 	if ((n = etm_io_op(hdl, "bad io read on preamble",
891 	    conn, &pp.pp_proto_ver, sizeof (pp) - sizeof (pp.pp_magic_num),
892 	    ETM_IO_OP_RD)) < 0) {
893 		errno = (-n);
894 		return (NULL);
895 	}
896 
897 	/*
898 	 * Design_Note:	The magic number was already network decoded; but
899 	 *		some other preamble fields also need to be decoded,
900 	 *		specifically pp_xid and pp_timeout. The rest of the
901 	 *		preamble fields are byte sized and hence need no
902 	 *		decoding.
903 	 */
904 
905 	pp.pp_xid = ntohl(pp.pp_xid);
906 	pp.pp_timeout = ntohl(pp.pp_timeout);
907 
908 	/* sanity check the header as best we can */
909 
910 	if ((pp.pp_proto_ver < ETM_PROTO_V1) ||
911 	    (pp.pp_proto_ver > ETM_PROTO_V3)) {
912 		fmd_hdl_error(hdl, "error: bad proto ver %d\n",
913 		    (int)pp.pp_proto_ver);
914 		errno = EPROTO;
915 		etm_stats.etm_ver_bad.fmds_value.ui64++;
916 		return (NULL);
917 	}
918 
919 	dummy_int = pp.pp_msg_type;
920 	if ((dummy_int <= ETM_MSG_TYPE_TOO_LOW) ||
921 	    (dummy_int >= ETM_MSG_TYPE_TOO_BIG)) {
922 		fmd_hdl_error(hdl, "error: bad msg type %d", dummy_int);
923 		errno = EBADMSG;
924 		etm_stats.etm_msgtype_bad.fmds_value.ui64++;
925 		return (NULL);
926 	}
927 
928 	/* handle [var sized] hdrs for FMA_EVENT, CONTROL, RESPONSE msgs */
929 
930 	if (pp.pp_msg_type == ETM_MSG_TYPE_FMA_EVENT) {
931 
932 		ev_hdrp = (void*)&misc_buf[0];
933 		hdr_sz = sizeof (*ev_hdrp);
934 		(void) memcpy(&ev_hdrp->ev_pp, &pp, sizeof (pp));
935 
936 		/* sanity check the header's timeout */
937 
938 		if ((ev_hdrp->ev_pp.pp_proto_ver == ETM_PROTO_V1) &&
939 		    (ev_hdrp->ev_pp.pp_timeout != ETM_PROTO_V1_TIMEOUT_NONE)) {
940 			errno = ETIME;
941 			etm_stats.etm_timeout_bad.fmds_value.ui64++;
942 			return (NULL);
943 		}
944 
945 		/* get all FMA event lengths from the header */
946 
947 		lenp = (uint32_t *)&ev_hdrp->ev_lens[0]; lenp--;
948 		i = -1;	/* cnt of length entries preceding 0 */
949 		do {
950 			i++; lenp++;
951 			if ((sizeof (*ev_hdrp) + (i * sizeof (*lenp))) >=
952 			    ETM_MISC_BUF_SZ) {
953 				errno = E2BIG;	/* ridiculous size */
954 				etm_stats.etm_evlens_bad.fmds_value.ui64++;
955 				return (NULL);
956 			}
957 			if ((n = etm_io_op(hdl, "bad io read on event len",
958 			    conn, lenp, sizeof (*lenp), ETM_IO_OP_RD)) < 0) {
959 				errno = (-n);
960 				return (NULL);
961 			}
962 			*lenp = ntohl(*lenp);
963 
964 		} while (*lenp != 0);
965 		i += 0; /* first len already counted by sizeof(ev_hdr) */
966 		hdr_sz += (i * sizeof (*lenp));
967 
968 		etm_stats.etm_rd_hdr_fmaevent.fmds_value.ui64++;
969 
970 	} else if (pp.pp_msg_type == ETM_MSG_TYPE_CONTROL) {
971 
972 		ctl_hdrp = (void*)&misc_buf[0];
973 		hdr_sz = sizeof (*ctl_hdrp);
974 		(void) memcpy(&ctl_hdrp->ctl_pp, &pp, sizeof (pp));
975 
976 		/* sanity check the header's sub type (control selector) */
977 
978 		if ((ctl_hdrp->ctl_pp.pp_sub_type <= ETM_CTL_SEL_TOO_LOW) ||
979 		    (ctl_hdrp->ctl_pp.pp_sub_type >= ETM_CTL_SEL_TOO_BIG)) {
980 			fmd_hdl_error(hdl, "error: bad ctl sub type %d\n",
981 			    (int)ctl_hdrp->ctl_pp.pp_sub_type);
982 			errno = EBADMSG;
983 			etm_stats.etm_subtype_bad.fmds_value.ui64++;
984 			return (NULL);
985 		}
986 
987 		/* get the control length */
988 
989 		if ((n = etm_io_op(hdl, "bad io read on ctl len",
990 		    conn, &ctl_hdrp->ctl_len, sizeof (ctl_hdrp->ctl_len),
991 		    ETM_IO_OP_RD)) < 0) {
992 			errno = (-n);
993 			return (NULL);
994 		}
995 
996 		ctl_hdrp->ctl_len = ntohl(ctl_hdrp->ctl_len);
997 
998 		etm_stats.etm_rd_hdr_control.fmds_value.ui64++;
999 
1000 	} else if (pp.pp_msg_type == ETM_MSG_TYPE_RESPONSE) {
1001 
1002 		resp_hdrp = (void*)&misc_buf[0];
1003 		hdr_sz = sizeof (*resp_hdrp);
1004 		(void) memcpy(&resp_hdrp->resp_pp, &pp, sizeof (pp));
1005 
1006 		/* sanity check the header's timeout */
1007 
1008 		if (resp_hdrp->resp_pp.pp_timeout !=
1009 		    ETM_PROTO_V1_TIMEOUT_NONE) {
1010 			errno = ETIME;
1011 			etm_stats.etm_timeout_bad.fmds_value.ui64++;
1012 			return (NULL);
1013 		}
1014 
1015 		/* get the response code and length */
1016 
1017 		if ((n = etm_io_op(hdl, "bad io read on resp code+len",
1018 		    conn, &resp_hdrp->resp_code,
1019 		    sizeof (resp_hdrp->resp_code)
1020 		    + sizeof (resp_hdrp->resp_len),
1021 		    ETM_IO_OP_RD)) < 0) {
1022 			errno = (-n);
1023 			return (NULL);
1024 		}
1025 
1026 		resp_hdrp->resp_code = ntohl(resp_hdrp->resp_code);
1027 		resp_hdrp->resp_len = ntohl(resp_hdrp->resp_len);
1028 
1029 		etm_stats.etm_rd_hdr_response.fmds_value.ui64++;
1030 
1031 	} else if (pp.pp_msg_type == ETM_MSG_TYPE_ALERT) {
1032 
1033 		sa_hdrp = (void*)&misc_buf[0];
1034 		hdr_sz = sizeof (*sa_hdrp);
1035 		(void) memcpy(&sa_hdrp->sa_pp, &pp, sizeof (pp));
1036 
1037 		/* sanity check the header's protocol version */
1038 
1039 		if (sa_hdrp->sa_pp.pp_proto_ver != ETM_PROTO_V3) {
1040 			errno = EPROTO;
1041 			etm_stats.etm_ver_bad.fmds_value.ui64++;
1042 			return (NULL);
1043 		}
1044 
1045 		/* get the priority and length */
1046 
1047 		if ((n = etm_io_op(hdl, "bad io read on sa priority+len",
1048 		    conn, &sa_hdrp->sa_priority,
1049 		    sizeof (sa_hdrp->sa_priority)
1050 		    + sizeof (sa_hdrp->sa_len),
1051 		    ETM_IO_OP_RD)) < 0) {
1052 			errno = (-n);
1053 			return (NULL);
1054 		}
1055 
1056 		sa_hdrp->sa_priority = ntohl(sa_hdrp->sa_priority);
1057 		sa_hdrp->sa_len = ntohl(sa_hdrp->sa_len);
1058 
1059 		etm_stats.etm_rd_hdr_alert.fmds_value.ui64++;
1060 
1061 	} /* whether we have FMA_EVENT, ALERT, CONTROL, or RESPONSE msg */
1062 
1063 	/*
1064 	 * choose a header size that allows hdr reuse for RESPONSE msgs,
1065 	 * allocate and populate the message header, and
1066 	 * return alloc size to caller for later free of hdrp
1067 	 */
1068 
1069 	hdr_sz = MAX(hdr_sz, sizeof (*resp_hdrp));
1070 	hdrp = fmd_hdl_zalloc(hdl, hdr_sz, FMD_SLEEP);
1071 	(void) memcpy(hdrp, misc_buf, hdr_sz);
1072 
1073 	if (etm_debug_lvl >= 3) {
1074 		fmd_hdl_debug(hdl, "info: msg hdr hexdump %d bytes:\n", hdr_sz);
1075 		etm_hexdump(hdl, hdrp, hdr_sz);
1076 	}
1077 	*szp = hdr_sz;
1078 	return (hdrp);
1079 
1080 } /* etm_hdr_read() */
1081 
1082 /*
1083  * etm_hdr_write - create and write a [variable sized] ETM message header
1084  *		to the given connection appropriate for the given FMA event
1085  *		and type of nvlist encoding,
1086  *		return the allocated ETM message header and its size
1087  *		or NULL and set errno on failure
1088  */
1089 
1090 static void*
1091 etm_hdr_write(fmd_hdl_t *hdl, etm_xport_conn_t conn, nvlist_t *evp,
1092 						int encoding, size_t *szp)
1093 {
1094 	etm_proto_v1_ev_hdr_t	*hdrp;		/* for FMA_EVENT msg */
1095 	size_t			hdr_sz;		/* sizeof *hdrp */
1096 	uint32_t		*lenp;		/* ptr to FMA event length */
1097 	size_t			evsz;		/* packed FMA event size */
1098 	ssize_t			n;		/* gen use */
1099 
1100 	/* allocate and populate the message header for 1 FMA event */
1101 
1102 	hdr_sz = sizeof (*hdrp) + (1 * sizeof (hdrp->ev_lens[0]));
1103 
1104 	hdrp = fmd_hdl_zalloc(hdl, hdr_sz, FMD_SLEEP);
1105 
1106 	/*
1107 	 * Design_Note: Although the ETM protocol supports it, we do not (yet)
1108 	 *		want responses/ACKs on FMA events that we send. All
1109 	 *		such messages are sent with ETM_PROTO_V1_TIMEOUT_NONE.
1110 	 */
1111 
1112 	hdrp->ev_pp.pp_magic_num = ETM_PROTO_MAGIC_NUM;
1113 	hdrp->ev_pp.pp_magic_num = htonl(hdrp->ev_pp.pp_magic_num);
1114 	hdrp->ev_pp.pp_proto_ver = ETM_PROTO_V1;
1115 	hdrp->ev_pp.pp_msg_type = ETM_MSG_TYPE_FMA_EVENT;
1116 	hdrp->ev_pp.pp_sub_type = 0;
1117 	hdrp->ev_pp.pp_rsvd_pad = 0;
1118 	hdrp->ev_pp.pp_xid = etm_xid_cur;
1119 	hdrp->ev_pp.pp_xid = htonl(hdrp->ev_pp.pp_xid);
1120 	etm_xid_cur += ETM_XID_INC;
1121 	hdrp->ev_pp.pp_timeout = ETM_PROTO_V1_TIMEOUT_NONE;
1122 	hdrp->ev_pp.pp_timeout = htonl(hdrp->ev_pp.pp_timeout);
1123 
1124 	lenp = &hdrp->ev_lens[0];
1125 
1126 	if ((n = nvlist_size(evp, &evsz, encoding)) != 0) {
1127 		errno = n;
1128 		fmd_hdl_free(hdl, hdrp, hdr_sz);
1129 		etm_stats.etm_os_nvlist_size_fail.fmds_value.ui64++;
1130 		return (NULL);
1131 	}
1132 
1133 	/* indicate 1 FMA event, network encode its length, and 0-terminate */
1134 
1135 	etm_stats.etm_wr_max_ev_per_msg.fmds_value.ui64 = 1;
1136 
1137 	*lenp = evsz; *lenp = htonl(*lenp); lenp++;
1138 	*lenp = 0; *lenp = htonl(*lenp); lenp++;
1139 
1140 	/*
1141 	 * write the network encoded header to the transport, and
1142 	 * return alloc size to caller for later free
1143 	 */
1144 
1145 	if ((n = etm_io_op(hdl, "bad io write on event hdr",
1146 	    conn, hdrp, hdr_sz, ETM_IO_OP_WR)) < 0) {
1147 		errno = (-n);
1148 		fmd_hdl_free(hdl, hdrp, hdr_sz);
1149 		return (NULL);
1150 	}
1151 
1152 	*szp = hdr_sz;
1153 	return (hdrp);
1154 
1155 } /* etm_hdr_write() */
1156 
1157 /*
1158  * etm_post_to_fmd - post the given FMA event to FMD
1159  *			via a FMD transport API call,
1160  *			return 0 or -errno value
1161  *
1162  * caveats:	the FMA event (evp) is freed by FMD,
1163  *		thus callers of this function should
1164  *		immediately discard any ptr they have to the
1165  *		nvlist without freeing or dereferencing it
1166  */
1167 
1168 static int
1169 etm_post_to_fmd(fmd_hdl_t *hdl, nvlist_t *evp)
1170 {
1171 	ssize_t			ev_sz;		/* sizeof *evp */
1172 
1173 	(void) nvlist_size(evp, (size_t *)&ev_sz, NV_ENCODE_XDR);
1174 
1175 	if (etm_debug_lvl >= 2) {
1176 		etm_show_time(hdl, "ante ev post");
1177 	}
1178 	fmd_xprt_post(hdl, etm_fmd_xprt, evp, 0);
1179 	etm_stats.etm_wr_fmd_fmaevent.fmds_value.ui64++;
1180 	etm_stats.etm_wr_fmd_bytes.fmds_value.ui64 += ev_sz;
1181 	if (etm_debug_lvl >= 1) {
1182 		fmd_hdl_debug(hdl, "info: event %p post ok to FMD\n", evp);
1183 	}
1184 	if (etm_debug_lvl >= 2) {
1185 		etm_show_time(hdl, "post ev post");
1186 	}
1187 	return (0);
1188 
1189 } /* etm_post_to_fmd() */
1190 
1191 /*
1192  * Ideally we would just use syslog(3C) for outputting our messages.
1193  * Unfortunately, as this module is running within the FMA daemon context,
1194  * that would create the situation where this module's openlog() would
1195  * have the monopoly on syslog(3C) for the daemon and all its modules.
1196  * To avoid that situation, this module uses the same logic as the
1197  * syslog-msgs FM module to directly call into the log(7D) and sysmsg(7D)
1198  * devices for syslog and console.
1199  */
1200 
1201 static int
1202 etm_post_to_syslog(fmd_hdl_t *hdl, uint32_t priority, uint32_t body_sz,
1203 							uint8_t *body_buf)
1204 {
1205 	char		*sysmessage;	/* Formatted message */
1206 	size_t		formatlen;	/* maximum length of sysmessage */
1207 	struct strbuf	ctl, dat;	/* structs pushed to the logfd */
1208 	uint32_t	msgid;		/* syslog message ID number */
1209 
1210 	if ((syslog_file == 0) && (syslog_cons == 0)) {
1211 		return (0);
1212 	}
1213 
1214 	if (etm_debug_lvl >= 2) {
1215 		etm_show_time(hdl, "ante syslog post");
1216 	}
1217 
1218 	formatlen = body_sz + 64; /* +64 for prefix strings added below */
1219 	sysmessage = fmd_hdl_zalloc(hdl, formatlen, FMD_SLEEP);
1220 
1221 	if (syslog_file) {
1222 		STRLOG_MAKE_MSGID(body_buf, msgid);
1223 		(void) snprintf(sysmessage, formatlen,
1224 		    "SC Alert: [ID %u FACILITY_AND_PRIORITY] %s", msgid,
1225 		    body_buf);
1226 
1227 		syslog_ctl.pri = syslog_facility | priority;
1228 
1229 		ctl.buf = (void *)&syslog_ctl;
1230 		ctl.len = sizeof (syslog_ctl);
1231 
1232 		dat.buf = sysmessage;
1233 		dat.len = strlen(sysmessage) + 1;
1234 
1235 		if (putmsg(syslog_logfd, &ctl, &dat, 0) != 0) {
1236 			fmd_hdl_debug(hdl, "putmsg failed: %s\n",
1237 			    strerror(errno));
1238 			etm_stats.etm_log_err.fmds_value.ui64++;
1239 		}
1240 	}
1241 
1242 	if (syslog_cons) {
1243 		(void) snprintf(sysmessage, formatlen,
1244 		    "SC Alert: %s\r\n", body_buf);
1245 
1246 		dat.buf = sysmessage;
1247 		dat.len = strlen(sysmessage) + 1;
1248 
1249 		if (write(syslog_msgfd, dat.buf, dat.len) != dat.len) {
1250 			fmd_hdl_debug(hdl, "write failed: %s\n",
1251 			    strerror(errno));
1252 			etm_stats.etm_msg_err.fmds_value.ui64++;
1253 		}
1254 	}
1255 
1256 	fmd_hdl_free(hdl, sysmessage, formatlen);
1257 
1258 	if (etm_debug_lvl >= 2) {
1259 		etm_show_time(hdl, "post syslog post");
1260 	}
1261 
1262 	return (0);
1263 }
1264 
1265 
1266 /*
1267  * etm_req_ver_negot - send an ETM control message to the other end requesting
1268  *			that the ETM protocol version be negotiated/set
1269  */
1270 
1271 static void
1272 etm_req_ver_negot(fmd_hdl_t *hdl)
1273 {
1274 	etm_xport_addr_t	*addrv;		/* default dst addr(s) */
1275 	etm_xport_conn_t	conn;		/* connection to other end */
1276 	etm_proto_v1_ctl_hdr_t	*ctl_hdrp;	/* for CONTROL msg */
1277 	size_t			hdr_sz;		/* sizeof header */
1278 	uint8_t			*body_buf;	/* msg body buffer */
1279 	uint32_t		body_sz;	/* sizeof *body_buf */
1280 	ssize_t			i;		/* gen use */
1281 
1282 	/* populate an ETM control msg to send */
1283 
1284 	hdr_sz = sizeof (*ctl_hdrp);
1285 	body_sz = (3 + 1);		/* version bytes plus null byte */
1286 
1287 	ctl_hdrp = fmd_hdl_zalloc(hdl, hdr_sz + body_sz, FMD_SLEEP);
1288 
1289 	ctl_hdrp->ctl_pp.pp_magic_num = htonl(ETM_PROTO_MAGIC_NUM);
1290 	ctl_hdrp->ctl_pp.pp_proto_ver = ETM_PROTO_V1;
1291 	ctl_hdrp->ctl_pp.pp_msg_type = ETM_MSG_TYPE_CONTROL;
1292 	ctl_hdrp->ctl_pp.pp_sub_type = ETM_CTL_SEL_VER_NEGOT_REQ;
1293 	ctl_hdrp->ctl_pp.pp_rsvd_pad = 0;
1294 	etm_xid_ver_negot = etm_xid_cur;
1295 	etm_xid_cur += ETM_XID_INC;
1296 	ctl_hdrp->ctl_pp.pp_xid = htonl(etm_xid_ver_negot);
1297 	ctl_hdrp->ctl_pp.pp_timeout = htonl(ETM_PROTO_V1_TIMEOUT_FOREVER);
1298 	ctl_hdrp->ctl_len = htonl(body_sz);
1299 
1300 	body_buf = (void*)&ctl_hdrp->ctl_len;
1301 	body_buf += sizeof (ctl_hdrp->ctl_len);
1302 	*body_buf++ = ETM_PROTO_V3;
1303 	*body_buf++ = ETM_PROTO_V2;
1304 	*body_buf++ = ETM_PROTO_V1;
1305 	*body_buf++ = '\0';
1306 
1307 	/*
1308 	 * open and close a connection to send the ETM control msg
1309 	 * to any/all of the default dst addrs
1310 	 */
1311 
1312 	if ((addrv = etm_xport_get_ev_addrv(hdl, NULL)) == NULL) {
1313 		fmd_hdl_error(hdl,
1314 		    "error: bad ctl dst addrs errno %d\n", errno);
1315 		etm_stats.etm_xport_get_ev_addrv_fail.fmds_value.ui64++;
1316 		goto func_ret;
1317 	}
1318 
1319 	for (i = 0; addrv[i] != NULL; i++) {
1320 
1321 		if (etm_conn_open(hdl, "bad conn open during ver negot",
1322 		    addrv[i], &conn) < 0) {
1323 			continue;
1324 		}
1325 		if (etm_io_op(hdl, "bad io write on ctl hdr+body",
1326 		    conn, ctl_hdrp, hdr_sz + body_sz, ETM_IO_OP_WR) >= 0) {
1327 			etm_stats.etm_wr_hdr_control.fmds_value.ui64++;
1328 			etm_stats.etm_wr_body_control.fmds_value.ui64++;
1329 		}
1330 		(void) etm_conn_close(hdl, "bad conn close during ver negot",
1331 		    conn);
1332 
1333 	} /* foreach dst addr */
1334 
1335 func_ret:
1336 
1337 	if (addrv != NULL) {
1338 		etm_xport_free_addrv(hdl, addrv);
1339 	}
1340 	fmd_hdl_free(hdl, ctl_hdrp, hdr_sz + body_sz);
1341 
1342 } /* etm_req_ver_negot() */
1343 
1344 /*
1345  * Design_Note:	For all etm_resp_q_*() functions and etm_resp_q_* globals,
1346  *		the mutex etm_resp_q_lock must be held by the caller.
1347  */
1348 
1349 /*
1350  * etm_resp_q_enq - add element to tail of ETM responder queue
1351  * etm_resp_q_deq - del element from head of ETM responder queue
1352  *
1353  * return >0 for success, or -errno value
1354  */
1355 
1356 static int
1357 etm_resp_q_enq(fmd_hdl_t *hdl, etm_resp_q_ele_t *rqep)
1358 {
1359 	etm_resp_q_ele_t	*newp;	/* ptr to new resp q ele */
1360 
1361 	if (etm_resp_q_cur_len >= etm_resp_q_max_len) {
1362 		fmd_hdl_debug(hdl, "warning: enq to full responder queue\n");
1363 		etm_stats.etm_enq_drop_resp_q.fmds_value.ui64++;
1364 		return (-E2BIG);
1365 	}
1366 
1367 	newp = fmd_hdl_zalloc(hdl, sizeof (*newp), FMD_SLEEP);
1368 	(void) memcpy(newp, rqep, sizeof (*newp));
1369 	newp->rqe_nextp = NULL;
1370 
1371 	if (etm_resp_q_cur_len == 0) {
1372 		etm_resp_q_head = newp;
1373 	} else {
1374 		etm_resp_q_tail->rqe_nextp = newp;
1375 	}
1376 	etm_resp_q_tail = newp;
1377 	etm_resp_q_cur_len++;
1378 	etm_stats.etm_resp_q_cur_len.fmds_value.ui64 = etm_resp_q_cur_len;
1379 
1380 	return (1);
1381 
1382 } /* etm_resp_q_enq() */
1383 
1384 static int
1385 etm_resp_q_deq(fmd_hdl_t *hdl, etm_resp_q_ele_t *rqep)
1386 {
1387 	etm_resp_q_ele_t	*oldp;	/* ptr to old resp q ele */
1388 
1389 	if (etm_resp_q_cur_len == 0) {
1390 		fmd_hdl_debug(hdl, "warning: deq from empty responder queue\n");
1391 		etm_stats.etm_deq_drop_resp_q.fmds_value.ui64++;
1392 		return (-ENOENT);
1393 	}
1394 
1395 	(void) memcpy(rqep, etm_resp_q_head, sizeof (*rqep));
1396 	rqep->rqe_nextp = NULL;
1397 
1398 	oldp = etm_resp_q_head;
1399 	etm_resp_q_head = etm_resp_q_head->rqe_nextp;
1400 	fmd_hdl_free(hdl, oldp, sizeof (*oldp));
1401 
1402 	etm_resp_q_cur_len--;
1403 	etm_stats.etm_resp_q_cur_len.fmds_value.ui64 = etm_resp_q_cur_len;
1404 	if (etm_resp_q_cur_len == 0) {
1405 		etm_resp_q_tail = NULL;
1406 	}
1407 
1408 	return (1);
1409 
1410 } /* etm_resp_q_deq() */
1411 
1412 /*
1413  * etm_maybe_enq_response - check the given message header to see
1414  *				whether a response has been requested,
1415  *				if so then enqueue the given connection
1416  *				and header for later transport by the
1417  *				responder thread as an ETM response msg,
1418  *				return 0 for nop, >0 success, or -errno value
1419  */
1420 
1421 static ssize_t
1422 etm_maybe_enq_response(fmd_hdl_t *hdl, etm_xport_conn_t conn,
1423     void *hdrp, uint32_t hdr_sz, int32_t resp_code)
1424 {
1425 	ssize_t			rv;		/* ret val */
1426 	etm_proto_v1_pp_t	*ppp;		/* protocol preamble ptr */
1427 	uint8_t			orig_msg_type;	/* orig hdr's message type */
1428 	uint32_t		orig_timeout;	/* orig hdr's timeout */
1429 	etm_resp_q_ele_t	rqe;		/* responder queue ele */
1430 
1431 	ppp = hdrp;
1432 	orig_msg_type = ppp->pp_msg_type;
1433 	orig_timeout = ppp->pp_timeout;
1434 
1435 	/* bail out now if no response is to be sent */
1436 
1437 	if (orig_timeout == ETM_PROTO_V1_TIMEOUT_NONE) {
1438 		return (0);
1439 	} /* if a nop */
1440 
1441 	if ((orig_msg_type != ETM_MSG_TYPE_FMA_EVENT) &&
1442 	    (orig_msg_type != ETM_MSG_TYPE_ALERT) &&
1443 	    (orig_msg_type != ETM_MSG_TYPE_CONTROL)) {
1444 		fmd_hdl_debug(hdl, "warning: bad msg type 0x%x\n",
1445 		    orig_msg_type);
1446 		return (-EINVAL);
1447 	} /* if inappropriate hdr for a response msg */
1448 
1449 	/*
1450 	 * enqueue the msg hdr and nudge the responder thread
1451 	 * if the responder queue was previously empty
1452 	 */
1453 
1454 	rqe.rqe_conn = conn;
1455 	rqe.rqe_hdrp = hdrp;
1456 	rqe.rqe_hdr_sz = hdr_sz;
1457 	rqe.rqe_resp_code = resp_code;
1458 
1459 	(void) pthread_mutex_lock(&etm_resp_q_lock);
1460 	rv = etm_resp_q_enq(hdl, &rqe);
1461 	if (etm_resp_q_cur_len == 1)
1462 		(void) pthread_cond_signal(&etm_resp_q_cv);
1463 	(void) pthread_mutex_unlock(&etm_resp_q_lock);
1464 
1465 	return (rv);
1466 
1467 } /* etm_maybe_enq_response() */
1468 
1469 /*
1470  * Design_Note:	We rely on the fact that all message types have
1471  *		a common protocol preamble; if this fact should
1472  *		ever change it may break the code below. We also
1473  *		rely on the fact that FMA_EVENT and CONTROL headers
1474  *		returned by etm_hdr_read() will be sized large enough
1475  *		to reuse them as RESPONSE headers if the remote endpt
1476  *		asked for a response via the pp_timeout field.
1477  */
1478 
1479 /*
1480  * etm_send_response - use the given message header and response code
1481  *			to construct an appropriate response message,
1482  *			and send it back on the given connection,
1483  *			return >0 for success, or -errno value
1484  */
1485 
1486 static ssize_t
1487 etm_send_response(fmd_hdl_t *hdl, etm_xport_conn_t conn,
1488     void *hdrp, int32_t resp_code)
1489 {
1490 	ssize_t			rv;		/* ret val */
1491 	etm_proto_v1_pp_t	*ppp;		/* protocol preamble ptr */
1492 	etm_proto_v1_resp_hdr_t *resp_hdrp;	/* for RESPONSE msg */
1493 	uint8_t			resp_body[4];	/* response body if needed */
1494 	uint8_t			*resp_msg;	/* response hdr+body */
1495 	size_t			hdr_sz;		/* sizeof response hdr */
1496 	uint8_t			orig_msg_type;	/* orig hdr's message type */
1497 
1498 	ppp = hdrp;
1499 	orig_msg_type = ppp->pp_msg_type;
1500 
1501 	if (etm_debug_lvl >= 2) {
1502 		etm_show_time(hdl, "ante resp send");
1503 	}
1504 
1505 	/* reuse the given header as a response header */
1506 
1507 	resp_hdrp = hdrp;
1508 	resp_hdrp->resp_code = resp_code;
1509 	resp_hdrp->resp_len = 0;		/* default is empty body */
1510 
1511 	if ((orig_msg_type == ETM_MSG_TYPE_CONTROL) &&
1512 	    (ppp->pp_sub_type == ETM_CTL_SEL_VER_NEGOT_REQ)) {
1513 		resp_body[0] = ETM_PROTO_V2;
1514 		resp_body[1] = ETM_PROTO_V3;
1515 		resp_body[2] = 0;
1516 		resp_hdrp->resp_len = 3;
1517 	} /* if should send our/negotiated proto ver in resp body */
1518 
1519 	/* respond with the proto ver that was negotiated */
1520 
1521 	resp_hdrp->resp_pp.pp_proto_ver = etm_resp_ver;
1522 	resp_hdrp->resp_pp.pp_msg_type = ETM_MSG_TYPE_RESPONSE;
1523 	resp_hdrp->resp_pp.pp_timeout = ETM_PROTO_V1_TIMEOUT_NONE;
1524 
1525 	/*
1526 	 * send the whole response msg in one write, header and body;
1527 	 * avoid the alloc-and-copy if we can reuse the hdr as the msg,
1528 	 * ie, if the body is empty. update the response stats.
1529 	 */
1530 
1531 	hdr_sz = sizeof (etm_proto_v1_resp_hdr_t);
1532 
1533 	resp_msg = hdrp;
1534 	if (resp_hdrp->resp_len > 0) {
1535 		resp_msg = fmd_hdl_zalloc(hdl, hdr_sz + resp_hdrp->resp_len,
1536 		    FMD_SLEEP);
1537 		(void) memcpy(resp_msg, resp_hdrp, hdr_sz);
1538 		(void) memcpy(resp_msg + hdr_sz, resp_body,
1539 		    resp_hdrp->resp_len);
1540 	}
1541 
1542 	(void) pthread_mutex_lock(&etm_write_lock);
1543 	rv = etm_io_op(hdl, "bad io write on resp msg", conn,
1544 	    resp_msg, hdr_sz + resp_hdrp->resp_len, ETM_IO_OP_WR);
1545 	(void) pthread_mutex_unlock(&etm_write_lock);
1546 	if (rv < 0) {
1547 		goto func_ret;
1548 	}
1549 
1550 	etm_stats.etm_wr_hdr_response.fmds_value.ui64++;
1551 	etm_stats.etm_wr_body_response.fmds_value.ui64++;
1552 
1553 	fmd_hdl_debug(hdl, "info: sent V%u RESPONSE msg to xport "
1554 	    "xid 0x%x code %d len %u\n",
1555 	    (unsigned int)resp_hdrp->resp_pp.pp_proto_ver,
1556 	    resp_hdrp->resp_pp.pp_xid, resp_hdrp->resp_code,
1557 	    resp_hdrp->resp_len);
1558 func_ret:
1559 
1560 	if (resp_hdrp->resp_len > 0) {
1561 		fmd_hdl_free(hdl, resp_msg, hdr_sz + resp_hdrp->resp_len);
1562 	}
1563 	if (etm_debug_lvl >= 2) {
1564 		etm_show_time(hdl, "post resp send");
1565 	}
1566 	return (rv);
1567 
1568 } /* etm_send_response() */
1569 
1570 /*
1571  * etm_reset_xport - reset the transport layer (via fini;init)
1572  *			presumably for an error condition we cannot
1573  *			otherwise recover from (ex: hung LDC channel)
1574  *
1575  * caveats - no checking/locking is done to ensure an existing connection
1576  *		is idle during an xport reset; we don't want to deadlock
1577  *		and presumably the transport is stuck/unusable anyway
1578  */
1579 
1580 static void
1581 etm_reset_xport(fmd_hdl_t *hdl)
1582 {
1583 	(void) etm_xport_fini(hdl);
1584 	(void) etm_xport_init(hdl);
1585 	etm_stats.etm_reset_xport.fmds_value.ui64++;
1586 
1587 } /* etm_reset_xport() */
1588 
1589 /*
1590  * etm_handle_new_conn - receive an ETM message sent from the other end via
1591  *			the given open connection, pull out any FMA events
1592  *			and post them to the local FMD (or handle any ETM
1593  *			control or response msg); when done, close the
1594  *			connection
1595  */
1596 
1597 static void
1598 etm_handle_new_conn(fmd_hdl_t *hdl, etm_xport_conn_t conn)
1599 {
1600 	etm_proto_v1_ev_hdr_t	*ev_hdrp;	/* for FMA_EVENT msg */
1601 	etm_proto_v1_ctl_hdr_t	*ctl_hdrp;	/* for CONTROL msg */
1602 	etm_proto_v1_resp_hdr_t *resp_hdrp;	/* for RESPONSE msg */
1603 	etm_proto_v3_sa_hdr_t	*sa_hdrp;	/* for ALERT msg */
1604 	int32_t			resp_code;	/* response code */
1605 	ssize_t			enq_rv;		/* resp_q enqueue status */
1606 	size_t			hdr_sz;		/* sizeof header */
1607 	uint8_t			*body_buf;	/* msg body buffer */
1608 	uint32_t		body_sz;	/* sizeof body_buf */
1609 	uint32_t		ev_cnt;		/* count of FMA events */
1610 	uint8_t			*bp;		/* byte ptr within body_buf */
1611 	nvlist_t		*evp;		/* ptr to unpacked FMA event */
1612 	char			*class;		/* FMA event class */
1613 	ssize_t			i, n;		/* gen use */
1614 	int			should_reset_xport; /* bool to reset xport */
1615 
1616 	if (etm_debug_lvl >= 2) {
1617 		etm_show_time(hdl, "ante conn handle");
1618 	}
1619 	fmd_hdl_debug(hdl, "info: handling new conn %p\n", conn);
1620 
1621 	should_reset_xport = 0;
1622 	ev_hdrp = NULL;
1623 	ctl_hdrp = NULL;
1624 	resp_hdrp = NULL;
1625 	sa_hdrp = NULL;
1626 	body_buf = NULL;
1627 	class = NULL;
1628 	evp = NULL;
1629 	resp_code = 0;	/* default is success */
1630 	enq_rv = 0;	/* default is nop, ie, did not enqueue */
1631 
1632 	/* read a network decoded message header from the connection */
1633 
1634 	if ((ev_hdrp = etm_hdr_read(hdl, conn, &hdr_sz)) == NULL) {
1635 		/* errno assumed set by above call */
1636 		should_reset_xport = (errno == ENOTACTIVE);
1637 		fmd_hdl_debug(hdl, "error: FMA event dropped: "
1638 		    "bad hdr read errno %d\n", errno);
1639 		etm_stats.etm_rd_drop_fmaevent.fmds_value.ui64++;
1640 		goto func_ret;
1641 	}
1642 
1643 	/*
1644 	 * handle the message based on its preamble pp_msg_type
1645 	 * which is known to be valid from etm_hdr_read() checks
1646 	 */
1647 
1648 	if (ev_hdrp->ev_pp.pp_msg_type == ETM_MSG_TYPE_FMA_EVENT) {
1649 
1650 		fmd_hdl_debug(hdl, "info: rcvd FMA_EVENT msg from xport\n");
1651 
1652 		/* allocate buf large enough for whole body / all FMA events */
1653 
1654 		body_sz = 0;
1655 		for (i = 0; ev_hdrp->ev_lens[i] != 0; i++) {
1656 			body_sz += ev_hdrp->ev_lens[i];
1657 		} /* for summing sizes of all FMA events */
1658 		if (i > etm_stats.etm_rd_max_ev_per_msg.fmds_value.ui64)
1659 			etm_stats.etm_rd_max_ev_per_msg.fmds_value.ui64 = i;
1660 		ev_cnt = i;
1661 
1662 		if (etm_debug_lvl >= 1) {
1663 			fmd_hdl_debug(hdl, "info: event lengths %u sum %u\n",
1664 			    ev_cnt, body_sz);
1665 		}
1666 
1667 		body_buf = fmd_hdl_zalloc(hdl, body_sz, FMD_SLEEP);
1668 
1669 		/* read all the FMA events at once */
1670 
1671 		if ((n = etm_io_op(hdl, "FMA event dropped: "
1672 		    "bad io read on event bodies", conn, body_buf, body_sz,
1673 		    ETM_IO_OP_RD)) < 0) {
1674 			should_reset_xport = (n == -ENOTACTIVE);
1675 			etm_stats.etm_rd_drop_fmaevent.fmds_value.ui64++;
1676 			goto func_ret;
1677 		}
1678 
1679 		etm_stats.etm_rd_xport_bytes.fmds_value.ui64 += body_sz;
1680 		etm_stats.etm_rd_body_fmaevent.fmds_value.ui64 += ev_cnt;
1681 
1682 		/*
1683 		 * now that we've read the entire ETM msg from the conn,
1684 		 * which avoids later ETM protocol framing errors if we didn't,
1685 		 * check for dup msg/xid against last good FMD posting,
1686 		 * if a dup then resend response but skip repost to FMD
1687 		 */
1688 
1689 		if (ev_hdrp->ev_pp.pp_xid == etm_xid_posted_ev) {
1690 			enq_rv = etm_maybe_enq_response(hdl, conn,
1691 			    ev_hdrp, hdr_sz, 0);
1692 			fmd_hdl_debug(hdl, "info: skipping dup FMA event post "
1693 			    "xid 0x%x\n", etm_xid_posted_ev);
1694 			etm_stats.etm_rd_dup_fmaevent.fmds_value.ui64++;
1695 			goto func_ret;
1696 		}
1697 
1698 		/* unpack each FMA event and post it to FMD */
1699 
1700 		bp = body_buf;
1701 		for (i = 0; i < ev_cnt; i++) {
1702 			if ((n = nvlist_unpack((char *)bp,
1703 			    ev_hdrp->ev_lens[i], &evp, 0)) != 0) {
1704 				resp_code = (-n);
1705 				enq_rv = etm_maybe_enq_response(hdl, conn,
1706 				    ev_hdrp, hdr_sz, resp_code);
1707 				fmd_hdl_error(hdl, "error: FMA event dropped: "
1708 				    "bad event body unpack errno %d\n", n);
1709 				if (etm_debug_lvl >= 2) {
1710 					fmd_hdl_debug(hdl, "info: FMA event "
1711 					    "hexdump %d bytes:\n",
1712 					    ev_hdrp->ev_lens[i]);
1713 					etm_hexdump(hdl, bp,
1714 					    ev_hdrp->ev_lens[i]);
1715 				}
1716 				etm_stats.etm_os_nvlist_unpack_fail.fmds_value.
1717 				    ui64++;
1718 				etm_stats.etm_rd_drop_fmaevent.fmds_value.
1719 				    ui64++;
1720 				bp += ev_hdrp->ev_lens[i];
1721 				continue;
1722 			}
1723 			if (etm_debug_lvl >= 1) {
1724 				(void) nvlist_lookup_string(evp, FM_CLASS,
1725 				    &class);
1726 				if (class == NULL) {
1727 					class = "NULL";
1728 				}
1729 				fmd_hdl_debug(hdl, "info: FMA event %p "
1730 				    "class %s\n", evp, class);
1731 			}
1732 			resp_code = etm_post_to_fmd(hdl, evp);
1733 			if (resp_code >= 0) {
1734 				etm_xid_posted_ev = ev_hdrp->ev_pp.pp_xid;
1735 			}
1736 			evp = NULL;
1737 			enq_rv = etm_maybe_enq_response(hdl, conn,
1738 			    ev_hdrp, hdr_sz, resp_code);
1739 			bp += ev_hdrp->ev_lens[i];
1740 		} /* foreach FMA event in the body buffer */
1741 
1742 	} else if (ev_hdrp->ev_pp.pp_msg_type == ETM_MSG_TYPE_CONTROL) {
1743 
1744 		ctl_hdrp = (void*)ev_hdrp;
1745 
1746 		fmd_hdl_debug(hdl, "info: rcvd CONTROL msg from xport\n");
1747 		if (etm_debug_lvl >= 1) {
1748 			fmd_hdl_debug(hdl, "info: ctl sel %d xid 0x%x\n",
1749 			    (int)ctl_hdrp->ctl_pp.pp_sub_type,
1750 			    ctl_hdrp->ctl_pp.pp_xid);
1751 		}
1752 
1753 		/*
1754 		 * if we have a VER_NEGOT_REQ read the body and validate
1755 		 * the protocol version set contained therein,
1756 		 * otherwise we have a PING_REQ (which has no body)
1757 		 * and we [also] fall thru to the code which sends a
1758 		 * response msg if the pp_timeout field requested one
1759 		 */
1760 
1761 		if (ctl_hdrp->ctl_pp.pp_sub_type == ETM_CTL_SEL_VER_NEGOT_REQ) {
1762 
1763 			body_sz = ctl_hdrp->ctl_len;
1764 			body_buf = fmd_hdl_zalloc(hdl, body_sz, FMD_SLEEP);
1765 
1766 			if ((n = etm_io_op(hdl, "bad io read on ctl body",
1767 			    conn, body_buf, body_sz, ETM_IO_OP_RD)) < 0) {
1768 				should_reset_xport = (n == -ENOTACTIVE);
1769 				goto func_ret;
1770 			}
1771 
1772 			/* complain if version set completely incompatible */
1773 
1774 			for (i = 0; i < body_sz; i++) {
1775 				if ((body_buf[i] == ETM_PROTO_V1) ||
1776 				    (body_buf[i] == ETM_PROTO_V2) ||
1777 				    (body_buf[i] == ETM_PROTO_V3)) {
1778 					break;
1779 				}
1780 			}
1781 			if (i >= body_sz) {
1782 				etm_stats.etm_ver_bad.fmds_value.ui64++;
1783 				resp_code = (-EPROTO);
1784 			}
1785 
1786 		} /* if got version set request */
1787 
1788 		etm_stats.etm_rd_body_control.fmds_value.ui64++;
1789 
1790 		enq_rv = etm_maybe_enq_response(hdl, conn,
1791 		    ctl_hdrp, hdr_sz, resp_code);
1792 
1793 	} else if (ev_hdrp->ev_pp.pp_msg_type == ETM_MSG_TYPE_RESPONSE) {
1794 
1795 		resp_hdrp = (void*)ev_hdrp;
1796 
1797 		fmd_hdl_debug(hdl, "info: rcvd RESPONSE msg from xport\n");
1798 		if (etm_debug_lvl >= 1) {
1799 			fmd_hdl_debug(hdl, "info: resp xid 0x%x\n",
1800 			    (int)resp_hdrp->resp_pp.pp_xid);
1801 		}
1802 
1803 		body_sz = resp_hdrp->resp_len;
1804 		body_buf = fmd_hdl_zalloc(hdl, body_sz, FMD_SLEEP);
1805 
1806 		if ((n = etm_io_op(hdl, "bad io read on resp len",
1807 		    conn, body_buf, body_sz, ETM_IO_OP_RD)) < 0) {
1808 			should_reset_xport = (n == -ENOTACTIVE);
1809 			goto func_ret;
1810 		}
1811 
1812 		etm_stats.etm_rd_body_response.fmds_value.ui64++;
1813 
1814 		/*
1815 		 * look up the xid to interpret the response body
1816 		 *
1817 		 * ping is a nop; for ver negot confirm that a supported
1818 		 * protocol version was negotiated and remember which one
1819 		 */
1820 
1821 		if ((resp_hdrp->resp_pp.pp_xid != etm_xid_ping) &&
1822 		    (resp_hdrp->resp_pp.pp_xid != etm_xid_ver_negot)) {
1823 			etm_stats.etm_xid_bad.fmds_value.ui64++;
1824 			goto func_ret;
1825 		}
1826 
1827 		if (resp_hdrp->resp_pp.pp_xid == etm_xid_ver_negot) {
1828 			if ((body_buf[0] < ETM_PROTO_V1) ||
1829 			    (body_buf[0] > ETM_PROTO_V3)) {
1830 				etm_stats.etm_ver_bad.fmds_value.ui64++;
1831 				goto func_ret;
1832 			}
1833 			etm_resp_ver = body_buf[0];
1834 		} /* if have resp to last req to negotiate proto ver */
1835 
1836 	} else if (ev_hdrp->ev_pp.pp_msg_type == ETM_MSG_TYPE_ALERT) {
1837 
1838 		sa_hdrp = (void*)ev_hdrp;
1839 
1840 		fmd_hdl_debug(hdl, "info: rcvd ALERT msg from xport\n");
1841 		if (etm_debug_lvl >= 1) {
1842 			fmd_hdl_debug(hdl, "info: sa sel %d xid 0x%x\n",
1843 			    (int)sa_hdrp->sa_pp.pp_sub_type,
1844 			    sa_hdrp->sa_pp.pp_xid);
1845 		}
1846 
1847 		body_sz = sa_hdrp->sa_len;
1848 		body_buf = fmd_hdl_zalloc(hdl, body_sz, FMD_SLEEP);
1849 
1850 		if ((n = etm_io_op(hdl, "bad io read on sa body",
1851 		    conn, body_buf, body_sz, ETM_IO_OP_RD)) < 0) {
1852 			should_reset_xport = (n == -ENOTACTIVE);
1853 			goto func_ret;
1854 		}
1855 
1856 		etm_stats.etm_rd_body_alert.fmds_value.ui64++;
1857 
1858 		/*
1859 		 * now that we've read the entire ETM msg from the conn,
1860 		 * which avoids later ETM protocol framing errors if we didn't,
1861 		 * check for dup msg/xid against last good syslog posting,
1862 		 * if a dup then resend response but skip repost to syslog
1863 		 */
1864 
1865 		if (sa_hdrp->sa_pp.pp_xid == etm_xid_posted_sa) {
1866 			enq_rv = etm_maybe_enq_response(hdl, conn,
1867 			    sa_hdrp, hdr_sz, 0);
1868 			fmd_hdl_debug(hdl, "info: skipping dup ALERT post "
1869 			    "xid 0x%x\n", etm_xid_posted_sa);
1870 			etm_stats.etm_rd_dup_alert.fmds_value.ui64++;
1871 			goto func_ret;
1872 		}
1873 
1874 		resp_code = etm_post_to_syslog(hdl, sa_hdrp->sa_priority,
1875 		    body_sz, body_buf);
1876 		if (resp_code >= 0) {
1877 			etm_xid_posted_sa = sa_hdrp->sa_pp.pp_xid;
1878 		}
1879 		enq_rv = etm_maybe_enq_response(hdl, conn,
1880 		    sa_hdrp, hdr_sz, resp_code);
1881 	} /* whether we have a FMA_EVENT, CONTROL, RESPONSE or ALERT msg */
1882 
1883 func_ret:
1884 
1885 	if (etm_debug_lvl >= 2) {
1886 		etm_show_time(hdl, "post conn handle");
1887 	}
1888 
1889 	/*
1890 	 * if no responder ele was enqueued, close the conn now
1891 	 * and free the ETM msg hdr; the ETM msg body is not needed
1892 	 * by the responder thread and should always be freed here
1893 	 */
1894 
1895 	if (enq_rv <= 0) {
1896 		(void) etm_conn_close(hdl, "bad conn close after msg recv",
1897 		    conn);
1898 		if (ev_hdrp != NULL) {
1899 			fmd_hdl_free(hdl, ev_hdrp, hdr_sz);
1900 		}
1901 	}
1902 	if (body_buf != NULL) {
1903 		fmd_hdl_free(hdl, body_buf, body_sz);
1904 	}
1905 	if (should_reset_xport) {
1906 		etm_reset_xport(hdl);
1907 	}
1908 } /* etm_handle_new_conn() */
1909 
1910 /*
1911  * etm_handle_bad_accept - recover from a failed connection acceptance
1912  */
1913 
1914 static void
1915 etm_handle_bad_accept(fmd_hdl_t *hdl, int nev)
1916 {
1917 	int	should_reset_xport; /* bool to reset xport */
1918 
1919 	should_reset_xport = (nev == -ENOTACTIVE);
1920 	fmd_hdl_debug(hdl, "error: bad conn accept errno %d\n", (-nev));
1921 	etm_stats.etm_xport_accept_fail.fmds_value.ui64++;
1922 	(void) etm_sleep(etm_bad_acc_to_sec); /* avoid spinning CPU */
1923 	if (should_reset_xport) {
1924 		etm_reset_xport(hdl);
1925 	}
1926 } /* etm_handle_bad_accept() */
1927 
1928 /*
1929  * etm_server - loop forever accepting new connections
1930  *		using the given FMD handle,
1931  *		handling any ETM msgs sent from the other side
1932  *		via each such connection
1933  */
1934 
1935 static void
1936 etm_server(void *arg)
1937 {
1938 	etm_xport_conn_t	conn;		/* connection handle */
1939 	int			nev;		/* -errno val */
1940 	fmd_hdl_t		*hdl;		/* FMD handle */
1941 
1942 	hdl = arg;
1943 
1944 	fmd_hdl_debug(hdl, "info: connection server starting\n");
1945 
1946 	while (!etm_is_dying) {
1947 
1948 		if ((conn = etm_xport_accept(hdl, NULL)) == NULL) {
1949 			/* errno assumed set by above call */
1950 			nev = (-errno);
1951 			if (etm_is_dying) {
1952 				break;
1953 			}
1954 			etm_handle_bad_accept(hdl, nev);
1955 			continue;
1956 		}
1957 
1958 		/* handle the new message/connection, closing it when done */
1959 
1960 		etm_handle_new_conn(hdl, conn);
1961 
1962 	} /* while accepting new connections until ETM dies */
1963 
1964 	/* ETM is dying (probably due to "fmadm unload etm") */
1965 
1966 	fmd_hdl_debug(hdl, "info: connection server is dying\n");
1967 
1968 } /* etm_server() */
1969 
1970 /*
1971  * etm_responder - loop forever waiting for new responder queue elements
1972  *		to be enqueued, for each one constructing and sending
1973  *		an ETM response msg to the other side, and closing its
1974  *		associated connection when appropriate
1975  *
1976  *	this thread exists to ensure that the etm_server() thread
1977  *	never pends indefinitely waiting on the xport write lock, and is
1978  *	hence always available to accept new connections and handle
1979  *	incoming messages
1980  *
1981  *	this design relies on the fact that each connection accepted and
1982  *	returned by the ETM xport layer is unique, and each can be closed
1983  *	independently of the others while multiple connections are
1984  *	outstanding
1985  */
1986 
1987 static void
1988 etm_responder(void *arg)
1989 {
1990 	ssize_t			n;		/* gen use */
1991 	fmd_hdl_t		*hdl;		/* FMD handle */
1992 	etm_resp_q_ele_t	rqe;		/* responder queue ele */
1993 
1994 	hdl = arg;
1995 
1996 	fmd_hdl_debug(hdl, "info: responder server starting\n");
1997 
1998 	while (!etm_is_dying) {
1999 
2000 		(void) pthread_mutex_lock(&etm_resp_q_lock);
2001 
2002 		while (etm_resp_q_cur_len == 0) {
2003 			(void) pthread_cond_wait(&etm_resp_q_cv,
2004 			    &etm_resp_q_lock);
2005 			if (etm_is_dying) {
2006 				(void) pthread_mutex_unlock(&etm_resp_q_lock);
2007 				goto func_ret;
2008 			}
2009 		} /* while the responder queue is empty, wait to be nudged */
2010 
2011 		/*
2012 		 * for every responder ele that has been enqueued,
2013 		 * dequeue and send it as an ETM response msg,
2014 		 * closing its associated conn and freeing its hdr
2015 		 *
2016 		 * enter the queue draining loop holding the responder
2017 		 * queue lock, but do not hold the lock indefinitely
2018 		 * (the actual send may pend us indefinitely),
2019 		 * so that other threads will never pend for long
2020 		 * trying to enqueue a new element
2021 		 */
2022 
2023 		while (etm_resp_q_cur_len > 0) {
2024 
2025 			(void) etm_resp_q_deq(hdl, &rqe);
2026 			(void) pthread_mutex_unlock(&etm_resp_q_lock);
2027 
2028 			if ((n = etm_send_response(hdl, rqe.rqe_conn,
2029 			    rqe.rqe_hdrp, rqe.rqe_resp_code)) < 0) {
2030 				fmd_hdl_error(hdl, "error: bad resp send "
2031 				    "errno %d\n", (-n));
2032 			}
2033 
2034 			(void) etm_conn_close(hdl, "bad conn close after resp",
2035 			    rqe.rqe_conn);
2036 			fmd_hdl_free(hdl, rqe.rqe_hdrp, rqe.rqe_hdr_sz);
2037 
2038 			if (etm_is_dying) {
2039 				goto func_ret;
2040 			}
2041 			(void) pthread_mutex_lock(&etm_resp_q_lock);
2042 
2043 		} /* while draining the responder queue */
2044 
2045 		(void) pthread_mutex_unlock(&etm_resp_q_lock);
2046 
2047 	} /* while awaiting and sending resp msgs until ETM dies */
2048 
2049 func_ret:
2050 
2051 	/* ETM is dying (probably due to "fmadm unload etm") */
2052 
2053 	fmd_hdl_debug(hdl, "info: responder server is dying\n");
2054 
2055 	(void) pthread_mutex_lock(&etm_resp_q_lock);
2056 	if (etm_resp_q_cur_len > 0) {
2057 		fmd_hdl_error(hdl, "warning: %d response msgs dropped\n",
2058 		    (int)etm_resp_q_cur_len);
2059 		while (etm_resp_q_cur_len > 0) {
2060 			(void) etm_resp_q_deq(hdl, &rqe);
2061 			(void) etm_conn_close(hdl, "bad conn close after deq",
2062 			    rqe.rqe_conn);
2063 			fmd_hdl_free(hdl, rqe.rqe_hdrp, rqe.rqe_hdr_sz);
2064 		}
2065 	}
2066 	(void) pthread_mutex_unlock(&etm_resp_q_lock);
2067 
2068 } /* etm_responder() */
2069 
2070 static void *
2071 etm_init_alloc(size_t size)
2072 {
2073 	return (fmd_hdl_alloc(init_hdl, size, FMD_SLEEP));
2074 }
2075 
2076 static void
2077 etm_init_free(void *addr, size_t size)
2078 {
2079 	fmd_hdl_free(init_hdl, addr, size);
2080 }
2081 
2082 /*
2083  * -------------------------- FMD entry points -------------------------------
2084  */
2085 
2086 /*
2087  * _fmd_init - initialize the transport for use by ETM and start the
2088  *		server daemon to accept new connections to us
2089  *
2090  *		FMD will read our *.conf and subscribe us to FMA events
2091  */
2092 
2093 void
2094 _fmd_init(fmd_hdl_t *hdl)
2095 {
2096 	struct timeval		tmv;		/* timeval */
2097 	ssize_t			n;		/* gen use */
2098 	ldom_hdl_t		*lhp;		/* ldom pointer */
2099 	const struct facility	*fp;		/* syslog facility matching */
2100 	char			*facname;	/* syslog facility property */
2101 
2102 	if (fmd_hdl_register(hdl, FMD_API_VERSION, &fmd_info) != 0) {
2103 		return; /* invalid data in configuration file */
2104 	}
2105 
2106 	fmd_hdl_debug(hdl, "info: module initializing\n");
2107 
2108 	init_hdl = hdl;
2109 	lhp = ldom_init(etm_init_alloc, etm_init_free);
2110 
2111 	/*
2112 	 * Do not load this module if it is runing on a guest ldom.
2113 	 */
2114 	if (ldom_major_version(lhp) == 1 && ldom_on_service(lhp) == 0) {
2115 		fmd_hdl_debug(hdl, "info: module unregistering\n");
2116 		ldom_fini(lhp);
2117 		fmd_hdl_unregister(hdl);
2118 		return;
2119 	} else {
2120 		ldom_fini(lhp);
2121 	}
2122 
2123 	/* setup statistics and properties from FMD */
2124 
2125 	(void) fmd_stat_create(hdl, FMD_STAT_NOALLOC,
2126 	    sizeof (etm_stats) / sizeof (fmd_stat_t), (fmd_stat_t *)&etm_stats);
2127 
2128 	etm_debug_lvl = fmd_prop_get_int32(hdl, ETM_PROP_NM_DEBUG_LVL);
2129 	etm_debug_max_ev_cnt = fmd_prop_get_int32(hdl,
2130 	    ETM_PROP_NM_DEBUG_MAX_EV_CNT);
2131 	fmd_hdl_debug(hdl, "info: etm_debug_lvl %d "
2132 	    "etm_debug_max_ev_cnt %d\n", etm_debug_lvl, etm_debug_max_ev_cnt);
2133 
2134 	etm_resp_q_max_len = fmd_prop_get_int32(hdl,
2135 	    ETM_PROP_NM_MAX_RESP_Q_LEN);
2136 	etm_stats.etm_resp_q_max_len.fmds_value.ui64 = etm_resp_q_max_len;
2137 	etm_bad_acc_to_sec = fmd_prop_get_int32(hdl,
2138 	    ETM_PROP_NM_BAD_ACC_TO_SEC);
2139 
2140 	/* obtain an FMD transport handle so we can post FMA events later */
2141 
2142 	etm_fmd_xprt = fmd_xprt_open(hdl, FMD_XPRT_RDONLY, NULL, NULL);
2143 
2144 	/* encourage protocol transaction id to be unique per module load */
2145 
2146 	(void) gettimeofday(&tmv, NULL);
2147 	etm_xid_cur = (uint32_t)((tmv.tv_sec << 10) |
2148 	    ((unsigned long)tmv.tv_usec >> 10));
2149 
2150 	/* init the ETM transport */
2151 
2152 	if ((n = etm_xport_init(hdl)) != 0) {
2153 		fmd_hdl_error(hdl, "error: bad xport init errno %d\n", (-n));
2154 		fmd_hdl_unregister(hdl);
2155 		return;
2156 	}
2157 
2158 	/*
2159 	 * Cache any properties we use every time we receive an alert.
2160 	 */
2161 	syslog_file = fmd_prop_get_int32(hdl, ETM_PROP_NM_SYSLOGD);
2162 	syslog_cons = fmd_prop_get_int32(hdl, ETM_PROP_NM_CONSOLE);
2163 
2164 	if (syslog_file && (syslog_logfd = open("/dev/conslog",
2165 	    O_WRONLY | O_NOCTTY)) == -1) {
2166 		fmd_hdl_error(hdl, "error: failed to open /dev/conslog");
2167 		syslog_file = 0;
2168 	}
2169 
2170 	if (syslog_cons && (syslog_msgfd = open("/dev/sysmsg",
2171 	    O_WRONLY | O_NOCTTY)) == -1) {
2172 		fmd_hdl_error(hdl, "error: failed to open /dev/sysmsg");
2173 		syslog_cons = 0;
2174 	}
2175 
2176 	if (syslog_file) {
2177 		/*
2178 		 * Look up the value of the "facility" property and use it to
2179 		 * determine * what syslog LOG_* facility value we use to
2180 		 * fill in our log_ctl_t.
2181 		 */
2182 		facname = fmd_prop_get_string(hdl, ETM_PROP_NM_FACILITY);
2183 
2184 		for (fp = syslog_facs; fp->fac_name != NULL; fp++) {
2185 			if (strcmp(fp->fac_name, facname) == 0)
2186 				break;
2187 		}
2188 
2189 		if (fp->fac_name == NULL) {
2190 			fmd_hdl_error(hdl, "error: invalid 'facility'"
2191 			    " setting: %s\n", facname);
2192 			syslog_file = 0;
2193 		} else {
2194 			syslog_facility = fp->fac_value;
2195 			syslog_ctl.flags = SL_CONSOLE | SL_LOGONLY;
2196 		}
2197 
2198 		fmd_prop_free_string(hdl, facname);
2199 	}
2200 
2201 	/*
2202 	 * start the message responder and the connection acceptance server;
2203 	 * request protocol version be negotiated after waiting a second
2204 	 * for the receiver to be ready to start handshaking
2205 	 */
2206 
2207 	etm_resp_tid = fmd_thr_create(hdl, etm_responder, hdl);
2208 	etm_svr_tid = fmd_thr_create(hdl, etm_server, hdl);
2209 
2210 	(void) etm_sleep(ETM_SLEEP_QUIK);
2211 	etm_req_ver_negot(hdl);
2212 
2213 	fmd_hdl_debug(hdl, "info: module initialized ok\n");
2214 
2215 } /* _fmd_init() */
2216 
2217 /*
2218  * etm_recv - receive an FMA event from FMD and transport it
2219  *		to the remote endpoint
2220  */
2221 
2222 /*ARGSUSED*/
2223 void
2224 etm_recv(fmd_hdl_t *hdl, fmd_event_t *ep, nvlist_t *evp, const char *class)
2225 {
2226 	etm_xport_addr_t	*addrv;	/* vector of transport addresses */
2227 	etm_xport_conn_t	conn;	/* connection handle */
2228 	etm_proto_v1_ev_hdr_t	*hdrp;	/* for FMA_EVENT msg */
2229 	ssize_t			i, n;	/* gen use */
2230 	size_t			sz;	/* header size */
2231 	size_t			buflen;	/* size of packed FMA event */
2232 	uint8_t			*buf;	/* tmp buffer for packed FMA event */
2233 
2234 	buflen = 0;
2235 	if ((n = nvlist_size(evp, &buflen, NV_ENCODE_XDR)) != 0) {
2236 		fmd_hdl_error(hdl, "error: FMA event dropped: "
2237 		    "event size errno %d class %s\n", n, class);
2238 		etm_stats.etm_os_nvlist_size_fail.fmds_value.ui64++;
2239 		etm_stats.etm_wr_drop_fmaevent.fmds_value.ui64++;
2240 		return;
2241 	}
2242 
2243 	fmd_hdl_debug(hdl, "info: rcvd event %p from FMD\n", evp);
2244 	fmd_hdl_debug(hdl, "info: cnt %llu class %s\n",
2245 	    etm_stats.etm_rd_fmd_fmaevent.fmds_value.ui64, class);
2246 
2247 	etm_stats.etm_rd_fmd_bytes.fmds_value.ui64 += buflen;
2248 	etm_stats.etm_rd_fmd_fmaevent.fmds_value.ui64++;
2249 
2250 	/*
2251 	 * if the debug limit has been set, avoid excessive traffic,
2252 	 * for example, an infinite cycle using loopback nodes
2253 	 */
2254 
2255 	if ((etm_debug_max_ev_cnt >= 0) &&
2256 	    (etm_stats.etm_rd_fmd_fmaevent.fmds_value.ui64 >
2257 	    etm_debug_max_ev_cnt)) {
2258 		fmd_hdl_debug(hdl, "warning: FMA event dropped: "
2259 		    "event %p cnt %llu > debug max %d\n", evp,
2260 		    etm_stats.etm_rd_fmd_fmaevent.fmds_value.ui64,
2261 		    etm_debug_max_ev_cnt);
2262 		etm_stats.etm_wr_drop_fmaevent.fmds_value.ui64++;
2263 		return;
2264 	}
2265 
2266 	/* allocate a buffer for the FMA event and nvlist pack it */
2267 
2268 	buf = fmd_hdl_zalloc(hdl, buflen, FMD_SLEEP);
2269 
2270 	if ((n = nvlist_pack(evp, (char **)&buf, &buflen,
2271 	    NV_ENCODE_XDR, 0)) != 0) {
2272 		fmd_hdl_error(hdl, "error: FMA event dropped: "
2273 		    "event pack errno %d class %s\n", n, class);
2274 		etm_stats.etm_os_nvlist_pack_fail.fmds_value.ui64++;
2275 		etm_stats.etm_wr_drop_fmaevent.fmds_value.ui64++;
2276 		fmd_hdl_free(hdl, buf, buflen);
2277 		return;
2278 	}
2279 
2280 	/* get vector of dst addrs and send the FMA event to each one */
2281 
2282 	if ((addrv = etm_xport_get_ev_addrv(hdl, evp)) == NULL) {
2283 		fmd_hdl_error(hdl, "error: FMA event dropped: "
2284 		    "bad event dst addrs errno %d\n", errno);
2285 		etm_stats.etm_xport_get_ev_addrv_fail.fmds_value.ui64++;
2286 		etm_stats.etm_wr_drop_fmaevent.fmds_value.ui64++;
2287 		fmd_hdl_free(hdl, buf, buflen);
2288 		return;
2289 	}
2290 
2291 	for (i = 0; addrv[i] != NULL; i++) {
2292 
2293 		/* open a new connection to this dst addr */
2294 
2295 		if ((n = etm_conn_open(hdl, "FMA event dropped: "
2296 		    "bad conn open on new ev", addrv[i], &conn)) < 0) {
2297 			etm_stats.etm_wr_drop_fmaevent.fmds_value.ui64++;
2298 			continue;
2299 		}
2300 
2301 		(void) pthread_mutex_lock(&etm_write_lock);
2302 
2303 		/* write the ETM message header */
2304 
2305 		if ((hdrp = etm_hdr_write(hdl, conn, evp, NV_ENCODE_XDR,
2306 		    &sz)) == NULL) {
2307 			(void) pthread_mutex_unlock(&etm_write_lock);
2308 			fmd_hdl_error(hdl, "error: FMA event dropped: "
2309 			    "bad hdr write errno %d\n", errno);
2310 			(void) etm_conn_close(hdl,
2311 			    "bad conn close per bad hdr wr", conn);
2312 			etm_stats.etm_wr_drop_fmaevent.fmds_value.ui64++;
2313 			continue;
2314 		}
2315 
2316 		fmd_hdl_free(hdl, hdrp, sz);	/* header not needed */
2317 		etm_stats.etm_wr_hdr_fmaevent.fmds_value.ui64++;
2318 		fmd_hdl_debug(hdl, "info: hdr xport write ok for event %p\n",
2319 		    evp);
2320 
2321 		/* write the ETM message body, ie, the packed nvlist */
2322 
2323 		if ((n = etm_io_op(hdl, "FMA event dropped: "
2324 		    "bad io write on event", conn,
2325 		    buf, buflen, ETM_IO_OP_WR)) < 0) {
2326 			(void) pthread_mutex_unlock(&etm_write_lock);
2327 			(void) etm_conn_close(hdl,
2328 			    "bad conn close per bad body wr", conn);
2329 			etm_stats.etm_wr_drop_fmaevent.fmds_value.ui64++;
2330 			continue;
2331 		}
2332 
2333 		(void) pthread_mutex_unlock(&etm_write_lock);
2334 
2335 		etm_stats.etm_wr_body_fmaevent.fmds_value.ui64++;
2336 		etm_stats.etm_wr_xport_bytes.fmds_value.ui64 += buflen;
2337 		fmd_hdl_debug(hdl, "info: body xport write ok for event %p\n",
2338 		    evp);
2339 
2340 		/* close the connection */
2341 
2342 		(void) etm_conn_close(hdl, "bad conn close after event send",
2343 		    conn);
2344 	} /* foreach dst addr in the vector */
2345 
2346 	etm_xport_free_addrv(hdl, addrv);
2347 	fmd_hdl_free(hdl, buf, buflen);
2348 
2349 } /* etm_recv() */
2350 
2351 /*
2352  * _fmd_fini - stop the server daemon and teardown the transport
2353  */
2354 
2355 void
2356 _fmd_fini(fmd_hdl_t *hdl)
2357 {
2358 	ssize_t	n;	/* gen use */
2359 
2360 	fmd_hdl_debug(hdl, "info: module finalizing\n");
2361 
2362 	/* kill the connection server and responder ; wait for them to die */
2363 
2364 	etm_is_dying = 1;
2365 
2366 	if (etm_svr_tid != NULL) {
2367 		fmd_thr_signal(hdl, etm_svr_tid);
2368 		fmd_thr_destroy(hdl, etm_svr_tid);
2369 		etm_svr_tid = NULL;
2370 	} /* if server thread was successfully created */
2371 
2372 	if (etm_resp_tid != NULL) {
2373 		fmd_thr_signal(hdl, etm_resp_tid);
2374 		fmd_thr_destroy(hdl, etm_resp_tid);
2375 		etm_resp_tid = NULL;
2376 	} /* if responder thread was successfully created */
2377 
2378 	/* teardown the transport and cleanup syslogging */
2379 
2380 	if ((n = etm_xport_fini(hdl)) != 0) {
2381 		fmd_hdl_error(hdl, "warning: xport fini errno %d\n", (-n));
2382 	}
2383 	if (etm_fmd_xprt != NULL) {
2384 		fmd_xprt_close(hdl, etm_fmd_xprt);
2385 	}
2386 
2387 	if (syslog_logfd != -1) {
2388 		(void) close(syslog_logfd);
2389 	}
2390 	if (syslog_msgfd != -1) {
2391 		(void) close(syslog_msgfd);
2392 	}
2393 
2394 	fmd_hdl_debug(hdl, "info: module finalized ok\n");
2395 
2396 } /* _fmd_fini() */
2397