xref: /titanic_41/usr/src/cmd/fm/modules/sun4v/etm/etm.c (revision 53391baf4e45c693cf123555e9617b5e1e0b641a)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 
23 /*
24  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
25  * Use is subject to license terms.
26  */
27 
28 /*
29  * etm.c	FMA Event Transport Module implementation, a plugin of FMD
30  *		for sun4v/Ontario
31  *
32  * plugin for sending/receiving FMA events to/from service processor
33  */
34 
35 #pragma ident	"%Z%%M%	%I%	%E% SMI"
36 
37 /*
38  * --------------------------------- includes --------------------------------
39  */
40 
41 #include <sys/fm/protocol.h>
42 #include <sys/sysevent/eventdefs.h>
43 #include <sys/fm/util.h>
44 #include <netinet/in.h>
45 #include <fm/fmd_api.h>
46 #include <libsysevent.h>
47 
48 #include "etm_xport_api.h"
49 #include "etm_etm_proto.h"
50 #include "etm_impl.h"
51 
52 #include <pthread.h>
53 #include <signal.h>
54 #include <stropts.h>
55 #include <locale.h>
56 #include <strings.h>
57 #include <stdlib.h>
58 #include <unistd.h>
59 #include <limits.h>
60 #include <values.h>
61 #include <alloca.h>
62 #include <errno.h>
63 #include <fcntl.h>
64 #include <time.h>
65 
66 /*
67  * ----------------------------- forward decls -------------------------------
68  */
69 
70 static void
71 etm_recv(fmd_hdl_t *hdl, fmd_event_t *ep, nvlist_t *nvl, const char *class);
72 
73 /*
74  * ------------------------- data structs for FMD ----------------------------
75  */
76 
77 static const fmd_hdl_ops_t fmd_ops = {
78 	etm_recv,	/* fmdo_recv */
79 	NULL,		/* fmdo_timeout */
80 	NULL,		/* fmdo_close */
81 	NULL,		/* fmdo_stats */
82 	NULL,		/* fmdo_gc */
83 };
84 
85 static const fmd_prop_t fmd_props[] = {
86 	{ ETM_PROP_NM_XPORT_ADDRS,	FMD_TYPE_STRING, "" },
87 	{ ETM_PROP_NM_DEBUG_LVL,	FMD_TYPE_INT32, "0" },
88 	{ ETM_PROP_NM_DEBUG_MAX_EV_CNT,	FMD_TYPE_INT32, "-1" },
89 	{ NULL, 0, NULL }
90 };
91 
92 static const fmd_hdl_info_t fmd_info = {
93 	"FMA Event Transport Module", "1.0", &fmd_ops, fmd_props
94 };
95 
96 /*
97  * ----------------------- private consts and defns --------------------------
98  */
99 
100 /* misc buffer for variable sized protocol header fields */
101 
102 #define	ETM_MISC_BUF_SZ	(4 * 1024)
103 
104 /* try limit for IO operations w/ capped exp backoff sleep on retry */
105 
106 /*
107  * Design_Note:	ETM will potentially retry forever IO operations that the
108  *		transport fails with EAGAIN (aka EWOULDBLOCK) rather than
109  *		giving up after some number of seconds. This avoids
110  *		dropping FMA events while the service processor is down,
111  *		but at the risk of pending fmdo_recv() forever and
112  *		overflowing FMD's event queue for ETM.
113  *		A future TBD enhancement would be to always recv
114  *		and send each ETM msg in a single read/write() to reduce
115  *		the risk of failure between ETM msg hdr and body,
116  *		assuming the MTU_SZ is large enough.
117  */
118 
119 #define	ETM_TRY_MAX_CNT		(MAXINT - 1)
120 #define	ETM_TRY_BACKOFF_RATE	(4)
121 #define	ETM_TRY_BACKOFF_CAP	(60)
122 
123 /* amount to increment protocol transaction id on each new send */
124 
125 #define	ETM_XID_INC	(2)
126 
127 /*
128  * ---------------------------- global data ----------------------------------
129  */
130 
131 static int
132 etm_debug_lvl = 0;	/* debug level: 0 is off, 1 is on, 2 is more, etc */
133 
134 static int
135 etm_debug_max_ev_cnt = -1; /* max allowed event count for debugging */
136 
137 static pthread_t
138 etm_svr_tid = NULL;	/* thread id of connection acceptance server */
139 
140 static volatile int
141 etm_is_dying = 0;	/* bool for dying (killing self) */
142 
143 static uint32_t
144 etm_xid_cur = 0;	/* current transaction id for sends */
145 
146 static uint32_t
147 etm_xid_ping = 0;	/* xid of last CONTROL msg sent requesting ping */
148 
149 static uint32_t
150 etm_xid_ver_negot = 0;	/* xid of last CONTROL msg sent requesting ver negot */
151 
152 static uint32_t
153 etm_xid_posted_ev = 0;	/* xid of last FMA_EVENT msg/event posted OK to FMD */
154 
155 static uint8_t
156 etm_resp_ver = ETM_PROTO_V1; /* proto ver [negotiated] for msg sends */
157 
158 static struct stats {
159 
160 	/* ETM msg counters */
161 
162 	fmd_stat_t etm_rd_hdr_fmaevent;
163 	fmd_stat_t etm_rd_hdr_control;
164 	fmd_stat_t etm_rd_hdr_response;
165 	fmd_stat_t etm_rd_body_fmaevent;
166 	fmd_stat_t etm_rd_body_control;
167 	fmd_stat_t etm_rd_body_response;
168 	fmd_stat_t etm_wr_hdr_fmaevent;
169 	fmd_stat_t etm_wr_hdr_control;
170 	fmd_stat_t etm_wr_hdr_response;
171 	fmd_stat_t etm_wr_body_fmaevent;
172 	fmd_stat_t etm_wr_body_control;
173 	fmd_stat_t etm_wr_body_response;
174 
175 	/* ETM byte counters */
176 
177 	fmd_stat_t etm_wr_fmd_bytes;
178 	fmd_stat_t etm_rd_fmd_bytes;
179 	fmd_stat_t etm_wr_xport_bytes;
180 	fmd_stat_t etm_rd_xport_bytes;
181 
182 	fmd_stat_t etm_magic_drop_bytes;
183 
184 	/* ETM [dropped] FMA event counters */
185 
186 	fmd_stat_t etm_rd_fmd_fmaevent;
187 	fmd_stat_t etm_wr_fmd_fmaevent;
188 
189 	fmd_stat_t etm_rd_drop_fmaevent;
190 	fmd_stat_t etm_wr_drop_fmaevent;
191 
192 	fmd_stat_t etm_rd_dup_fmaevent;
193 	fmd_stat_t etm_wr_dup_fmaevent;
194 
195 	/* ETM protocol failures */
196 
197 	fmd_stat_t etm_magic_bad;
198 	fmd_stat_t etm_ver_bad;
199 	fmd_stat_t etm_msgtype_bad;
200 	fmd_stat_t etm_subtype_bad;
201 	fmd_stat_t etm_xid_bad;
202 	fmd_stat_t etm_fmaeventlen_bad;
203 	fmd_stat_t etm_respcode_bad;
204 	fmd_stat_t etm_timeout_bad;
205 	fmd_stat_t etm_evlens_bad;
206 
207 	/* IO operation failures */
208 
209 	fmd_stat_t etm_xport_wr_fail;
210 	fmd_stat_t etm_xport_rd_fail;
211 	fmd_stat_t etm_xport_pk_fail;
212 
213 	/* IO operation retries */
214 
215 	fmd_stat_t etm_xport_wr_retry;
216 	fmd_stat_t etm_xport_rd_retry;
217 	fmd_stat_t etm_xport_pk_retry;
218 
219 	/* system and library failures */
220 
221 	fmd_stat_t etm_os_sysevent_publish_fail;
222 	fmd_stat_t etm_os_sysevent_bind_fail;
223 	fmd_stat_t etm_os_nvlist_pack_fail;
224 	fmd_stat_t etm_os_nvlist_unpack_fail;
225 	fmd_stat_t etm_os_nvlist_size_fail;
226 	fmd_stat_t etm_os_pthread_create_fail;
227 
228 	/* xport API failures */
229 
230 	fmd_stat_t etm_xport_get_ev_addrv_fail;
231 	fmd_stat_t etm_xport_open_fail;
232 	fmd_stat_t etm_xport_close_fail;
233 	fmd_stat_t etm_xport_accept_fail;
234 	fmd_stat_t etm_xport_open_retry;
235 
236 	/* FMD entry point bad arguments */
237 
238 	fmd_stat_t etm_fmd_recv_badargs;
239 	fmd_stat_t etm_fmd_init_badargs;
240 	fmd_stat_t etm_fmd_fini_badargs;
241 
242 } etm_stats = {
243 
244 	/* ETM msg counters */
245 
246 	{ "etm_rd_hdr_fmaevent", FMD_TYPE_UINT64,
247 		"ETM fmaevent msg headers rcvd from xport" },
248 	{ "etm_rd_hdr_control", FMD_TYPE_UINT64,
249 		"ETM control msg headers rcvd from xport" },
250 	{ "etm_rd_hdr_response", FMD_TYPE_UINT64,
251 		"ETM response msg headers rcvd from xport" },
252 	{ "etm_rd_body_fmaevent", FMD_TYPE_UINT64,
253 		"ETM fmaevent msg bodies rcvd from xport" },
254 	{ "etm_rd_body_control", FMD_TYPE_UINT64,
255 		"ETM control msg bodies rcvd from xport" },
256 	{ "etm_rd_body_response", FMD_TYPE_UINT64,
257 		"ETM response msg bodies rcvd from xport" },
258 	{ "etm_wr_hdr_fmaevent", FMD_TYPE_UINT64,
259 		"ETM fmaevent msg headers sent to xport" },
260 	{ "etm_wr_hdr_control", FMD_TYPE_UINT64,
261 		"ETM control msg headers sent to xport" },
262 	{ "etm_wr_hdr_response", FMD_TYPE_UINT64,
263 		"ETM response msg headers sent to xport" },
264 	{ "etm_wr_body_fmaevent", FMD_TYPE_UINT64,
265 		"ETM fmaevent msg bodies sent to xport" },
266 	{ "etm_wr_body_control", FMD_TYPE_UINT64,
267 		"ETM control msg bodies sent to xport" },
268 	{ "etm_wr_body_response", FMD_TYPE_UINT64,
269 		"ETM response msg bodies sent to xport" },
270 
271 	/* ETM byte counters */
272 
273 	{ "etm_wr_fmd_bytes", FMD_TYPE_UINT64,
274 		"bytes of FMA events sent to FMD" },
275 	{ "etm_rd_fmd_bytes", FMD_TYPE_UINT64,
276 		"bytes of FMA events rcvd from FMD" },
277 	{ "etm_wr_xport_bytes", FMD_TYPE_UINT64,
278 		"bytes of FMA events sent to xport" },
279 	{ "etm_rd_xport_bytes", FMD_TYPE_UINT64,
280 		"bytes of FMA events rcvd from xport" },
281 
282 	{ "etm_magic_drop_bytes", FMD_TYPE_UINT64,
283 		"bytes dropped from xport pre magic num" },
284 
285 	/* ETM [dropped] FMA event counters */
286 
287 	{ "etm_rd_fmd_fmaevent", FMD_TYPE_UINT64,
288 		"FMA events rcvd from FMD" },
289 	{ "etm_wr_fmd_fmaevent", FMD_TYPE_UINT64,
290 		"FMA events sent to FMD" },
291 
292 	{ "etm_rd_drop_fmaevent", FMD_TYPE_UINT64,
293 		"dropped FMA events from xport" },
294 	{ "etm_wr_drop_fmaevent", FMD_TYPE_UINT64,
295 		"dropped FMA events to xport" },
296 
297 	{ "etm_rd_dup_fmaevent", FMD_TYPE_UINT64,
298 	    "duplicate FMA events from xport" },
299 	{ "etm_wr_dup_fmaevent", FMD_TYPE_UINT64,
300 	    "duplicate FMA events to xport" },
301 
302 	/* ETM protocol failures */
303 
304 	{ "etm_magic_bad", FMD_TYPE_UINT64,
305 		"ETM msgs w/ invalid magic num" },
306 	{ "etm_ver_bad", FMD_TYPE_UINT64,
307 		"ETM msgs w/ invalid protocol version" },
308 	{ "etm_msgtype_bad", FMD_TYPE_UINT64,
309 		"ETM msgs w/ invalid message type" },
310 	{ "etm_subtype_bad", FMD_TYPE_UINT64,
311 		"ETM msgs w/ invalid sub type" },
312 	{ "etm_xid_bad", FMD_TYPE_UINT64,
313 		"ETM msgs w/ unmatched xid" },
314 	{ "etm_fmaeventlen_bad", FMD_TYPE_UINT64,
315 		"ETM msgs w/ invalid FMA event length" },
316 	{ "etm_respcode_bad", FMD_TYPE_UINT64,
317 		"ETM msgs w/ invalid response code" },
318 	{ "etm_timeout_bad", FMD_TYPE_UINT64,
319 		"ETM msgs w/ invalid timeout value" },
320 	{ "etm_evlens_bad", FMD_TYPE_UINT64,
321 		"ETM msgs w/ too many event lengths" },
322 
323 	/* IO operation failures */
324 
325 	{ "etm_xport_wr_fail", FMD_TYPE_UINT64,
326 		"xport write failures" },
327 	{ "etm_xport_rd_fail", FMD_TYPE_UINT64,
328 		"xport read failures" },
329 	{ "etm_xport_pk_fail", FMD_TYPE_UINT64,
330 		"xport peek failures" },
331 
332 	/* IO operation retries */
333 
334 	{ "etm_xport_wr_retry", FMD_TYPE_UINT64,
335 		"xport write retries" },
336 	{ "etm_xport_rd_retry", FMD_TYPE_UINT64,
337 		"xport read retries" },
338 	{ "etm_xport_pk_retry", FMD_TYPE_UINT64,
339 		"xport peek retries" },
340 
341 	/* system and library failures */
342 
343 	{ "etm_os_sysevent_publish_fail", FMD_TYPE_UINT64,
344 		"sysevent_evc_publish failures" },
345 	{ "etm_os_sysevent_bind_fail", FMD_TYPE_UINT64,
346 		"sysevent_evc_bind failures" },
347 	{ "etm_os_nvlist_pack_fail", FMD_TYPE_UINT64,
348 		"nvlist_pack failures" },
349 	{ "etm_os_nvlist_unpack_fail", FMD_TYPE_UINT64,
350 		"nvlist_unpack failures" },
351 	{ "etm_os_nvlist_size_fail", FMD_TYPE_UINT64,
352 		"nvlist_size failures" },
353 	{ "etm_os_pthread_create_fail", FMD_TYPE_UINT64,
354 		"pthread_create failures" },
355 
356 	/* transport API failures */
357 
358 	{ "etm_xport_get_ev_addrv_fail", FMD_TYPE_UINT64,
359 		"xport get event addrv API failures" },
360 	{ "etm_xport_open_fail", FMD_TYPE_UINT64,
361 		"xport open API failures" },
362 	{ "etm_xport_close_fail", FMD_TYPE_UINT64,
363 		"xport close API failures" },
364 	{ "etm_xport_accept_fail", FMD_TYPE_UINT64,
365 		"xport accept API failures" },
366 	{ "etm_xport_open_retry", FMD_TYPE_UINT64,
367 		"xport open API retries" },
368 
369 	/* FMD entry point bad arguments */
370 
371 	{ "etm_fmd_recv_badargs", FMD_TYPE_UINT64,
372 		"bad arguments from fmd_recv entry point" },
373 	{ "etm_fmd_init_badargs", FMD_TYPE_UINT64,
374 		"bad arguments from fmd_init entry point" },
375 	{ "etm_fmd_fini_badargs", FMD_TYPE_UINT64,
376 		"bad arguments from fmd_fini entry point" }
377 };
378 
379 /*
380  * -------------------------- support functions ------------------------------
381  */
382 
383 /*
384  * Design_Note:	Each failure worth reporting to FMD should be done using
385  *		a single call to fmd_hdl_error() as it logs an FMA event
386  *		for each call. Also be aware that all the fmd_hdl_*()
387  *		format strings currently use platform specific *printf()
388  *		routines; so "%p" under Solaris does not prepend "0x" to
389  *		the outputted hex digits, while Linux and VxWorks do.
390  */
391 
392 /*
393  * etm_show_time - display the current time of day (for debugging) using
394  *		the given FMD module handle and annotation string
395  */
396 
397 static void
398 etm_show_time(fmd_hdl_t *hdl, char *note_str)
399 {
400 	struct timeval		tmv;		/* timeval */
401 
402 	(void) gettimeofday(&tmv, NULL);
403 	fmd_hdl_debug(hdl, "info: %s: cur Unix Epoch time %d.%06d\n",
404 	    note_str, tmv.tv_sec, tmv.tv_usec);
405 
406 } /* etm_show_time() */
407 
408 /*
409  * etm_hexdump - hexdump the given buffer (for debugging) using
410  *		the given FMD module handle
411  */
412 
413 static void
414 etm_hexdump(fmd_hdl_t *hdl, void *buf, size_t byte_cnt)
415 {
416 	uint8_t		*bp;		/* byte ptr */
417 	int		i, j;		/* index */
418 	char		cb[80];		/* char buf */
419 	unsigned int	n;		/* a byte of data for sprintf() */
420 
421 	bp = buf;
422 	j = 0;
423 
424 	/*
425 	 * Design_Note:	fmd_hdl_debug() auto adds a newline if missing;
426 	 *		hence cb exists to accumulate a longer string.
427 	 */
428 
429 	for (i = 1; i <= byte_cnt; i++) {
430 		n = *bp++;
431 		(void) sprintf(&cb[j], "%2.2x ", n);
432 		j += 3;
433 		/* add a newline every 16 bytes or at the buffer's end */
434 		if (((i % 16) == 0) || (i >= byte_cnt)) {
435 			cb[j-1] = '\0';
436 			fmd_hdl_debug(hdl, "%s\n", cb);
437 			j = 0;
438 		}
439 	} /* for each byte in the buffer */
440 
441 } /* etm_hexdump() */
442 
443 /*
444  * etm_sleep - sleep the caller for the given number of seconds,
445  *		return 0 or -errno value
446  *
447  * Design_Note:	To avoid interfering with FMD's signal mask (SIGALRM)
448  *		do not use [Solaris] sleep(3C) and instead use
449  *		pthread_cond_wait() or nanosleep(), both of which
450  *		are POSIX spec-ed to leave signal masks alone.
451  *		This is needed for Solaris and Linux (domain and SP).
452  */
453 
454 static int
455 etm_sleep(unsigned sleep_sec)
456 {
457 	struct timespec	tms;	/* for nanosleep() */
458 
459 	tms.tv_sec = sleep_sec;
460 	tms.tv_nsec = 0;
461 
462 	if (nanosleep(&tms, NULL) < 0) {
463 		/* errno assumed set by above call */
464 		return (-errno);
465 	}
466 	return (0);
467 
468 } /* etm_sleep() */
469 
470 /*
471  * etm_conn_open - open a connection to the given transport address,
472  *		return 0 and the opened connection handle
473  *		or -errno value
474  *
475  * caveats:	the err_substr is used in failure cases for calling
476  *		fmd_hdl_error()
477  */
478 
479 static int
480 etm_conn_open(fmd_hdl_t *hdl, char *err_substr,
481 		etm_xport_addr_t addr, etm_xport_conn_t *connp)
482 {
483 	etm_xport_conn_t	conn;	/* connection to return */
484 	int			nev;	/* -errno value */
485 
486 	if ((conn = etm_xport_open(hdl, addr)) == NULL) {
487 		nev = (-errno);
488 		fmd_hdl_error(hdl, "error: %s: errno %d\n",
489 					err_substr, errno);
490 		etm_stats.etm_xport_open_fail.fmds_value.ui64++;
491 		return (nev);
492 	} else {
493 		*connp = conn;
494 		return (0);
495 	}
496 } /* etm_conn_open() */
497 
498 /*
499  * etm_conn_close - close the given connection,
500  *		return 0 or -errno value
501  *
502  * caveats:	the err_substr is used in failure cases for calling
503  *		fmd_hdl_error()
504  */
505 
506 static int
507 etm_conn_close(fmd_hdl_t *hdl, char *err_substr, etm_xport_conn_t conn)
508 {
509 	int	nev;	/* -errno value */
510 
511 	if (etm_xport_close(hdl, conn) == NULL) {
512 		nev = (-errno);
513 		fmd_hdl_error(hdl, "warning: %s: errno %d\n",
514 					err_substr, errno);
515 		etm_stats.etm_xport_close_fail.fmds_value.ui64++;
516 		return (nev);
517 	} else {
518 		return (0);
519 	}
520 } /* etm_conn_close() */
521 
522 /*
523  * etm_io_op - perform an IO operation on the given connection
524  *		with the given buffer,
525  *		accommodating MTU size and retrying op if needed,
526  *		return how many bytes actually done by the op
527  *		or -errno value
528  *
529  * caveats:	the err_substr is used in failure cases for calling
530  *		fmd_hdl_error()
531  */
532 
533 static ssize_t
534 etm_io_op(fmd_hdl_t *hdl, char *err_substr, etm_xport_conn_t conn,
535 				void *buf, size_t byte_cnt, int io_op)
536 {
537 	ssize_t		rv;		/* ret val / byte count */
538 	ssize_t		n;		/* gen use */
539 	uint8_t		*datap;		/* ptr to data */
540 	size_t		mtu_sz;		/* MTU size in bytes */
541 	int		(*io_func_ptr)(fmd_hdl_t *, etm_xport_conn_t,
542 							void *, size_t);
543 	size_t		io_sz;		/* byte count for io_func_ptr */
544 	int		try_cnt;	/* number of tries done */
545 	int		sleep_sec;	/* exp backoff sleep period in sec */
546 	int		sleep_rv;	/* ret val from sleeping */
547 	fmd_stat_t	io_retry_stat;	/* IO retry stat to update */
548 	fmd_stat_t	io_fail_stat;	/* IO failure stat to update */
549 
550 	if ((conn == NULL) || (buf == NULL)) {
551 		return (-EINVAL);
552 	}
553 	switch (io_op) {
554 		case ETM_IO_OP_RD:
555 			io_func_ptr = etm_xport_read;
556 			io_retry_stat = etm_stats.etm_xport_rd_retry;
557 			io_fail_stat = etm_stats.etm_xport_rd_fail;
558 			break;
559 		case ETM_IO_OP_WR:
560 			io_func_ptr = etm_xport_write;
561 			io_retry_stat = etm_stats.etm_xport_wr_retry;
562 			io_fail_stat = etm_stats.etm_xport_wr_fail;
563 			break;
564 		case ETM_IO_OP_PK:
565 			io_func_ptr = etm_xport_peek;
566 			io_retry_stat = etm_stats.etm_xport_pk_retry;
567 			io_fail_stat = etm_stats.etm_xport_pk_fail;
568 			break;
569 		default:
570 			return (-EINVAL);
571 	}
572 	if (byte_cnt == 0) {
573 		return (byte_cnt);	/* nop */
574 	}
575 
576 	/* obtain [current] MTU size */
577 
578 	if ((n = etm_xport_get_opt(hdl, conn, ETM_XPORT_OPT_MTU_SZ)) < 0) {
579 		mtu_sz = ETM_XPORT_MTU_SZ_DEF;
580 	} else {
581 		mtu_sz = n;
582 	}
583 
584 	/* loop until all IO done, try limit exceeded, or real failure */
585 
586 	rv = 0;
587 	datap = buf;
588 	while (rv < byte_cnt) {
589 		io_sz = MIN((byte_cnt - rv), mtu_sz);
590 		try_cnt = 0;
591 		sleep_sec = 0;
592 
593 		/* when give up, return -errno value even if partly done */
594 
595 		while ((n = (*io_func_ptr)(hdl, conn, datap, io_sz)) ==
596 								(-EAGAIN)) {
597 			try_cnt++;
598 			if (try_cnt > ETM_TRY_MAX_CNT) {
599 				rv = n;
600 				goto func_ret;
601 			}
602 			if (etm_is_dying) {
603 				rv = (-EINTR);
604 				goto func_ret;
605 			}
606 			if ((sleep_rv = etm_sleep(sleep_sec)) < 0) {
607 				rv = sleep_rv;
608 				goto func_ret;
609 			}
610 			sleep_sec = ((sleep_sec == 0) ? 1 :
611 					(sleep_sec * ETM_TRY_BACKOFF_RATE));
612 			sleep_sec = MIN(sleep_sec, ETM_TRY_BACKOFF_CAP);
613 			io_retry_stat.fmds_value.ui64++;
614 			if (etm_debug_lvl >= 1) {
615 				fmd_hdl_debug(hdl, "info: retrying io op %d "
616 						"due to EAGAIN\n", io_op);
617 			}
618 		} /* while trying the io operation */
619 
620 		if (etm_is_dying) {
621 			rv = (-EINTR);
622 			goto func_ret;
623 		}
624 		if (n < 0) {
625 			rv = n;
626 			goto func_ret;
627 		}
628 		/* avoid spinning CPU when given 0 bytes but no error */
629 		if (n == 0) {
630 			if ((sleep_rv = etm_sleep(ETM_SLEEP_QUIK)) < 0) {
631 				rv = sleep_rv;
632 				goto func_ret;
633 			}
634 		}
635 		rv += n;
636 		datap += n;
637 	} /* while still have more data */
638 
639 func_ret:
640 
641 	if (rv < 0) {
642 		io_fail_stat.fmds_value.ui64++;
643 		fmd_hdl_error(hdl, "error: %s: errno %d\n",
644 					err_substr, (int)(-rv));
645 	}
646 	if (etm_debug_lvl >= 3) {
647 		fmd_hdl_debug(hdl, "info: io op %d ret %d of %d\n",
648 					io_op, (int)rv, (int)byte_cnt);
649 	}
650 	return (rv);
651 
652 } /* etm_io_op() */
653 
654 /*
655  * etm_magic_read - read the magic number of an ETM message header
656  *		from the given connection into the given buffer,
657  *		return 0 or -errno value
658  *
659  * Design_Note:	This routine is intended to help protect ETM from protocol
660  *		framing errors as might be caused by an SP reset / crash in
661  *		the middle of an ETM message send; the connection will be
662  *		read from for as many bytes as needed until the magic number
663  *		is found using a sliding buffer for comparisons.
664  */
665 
666 static int
667 etm_magic_read(fmd_hdl_t *hdl, etm_xport_conn_t conn, uint32_t *magic_ptr)
668 {
669 	int		rv;		/* ret val */
670 	uint32_t	magic_num;	/* magic number */
671 	int		byte_cnt;	/* count of bytes read */
672 	uint8_t		buf5[4+1];	/* sliding input buffer */
673 	int		i, j;		/* indices into buf5 */
674 	ssize_t		n;		/* gen use */
675 	uint8_t		drop_buf[1024];	/* dropped bytes buffer */
676 
677 	rv = 0;		/* assume success */
678 	magic_num = 0;
679 	byte_cnt = 0;
680 	j = 0;
681 
682 	/* magic number bytes are sent in network (big endian) order */
683 
684 	while (magic_num != ETM_PROTO_MAGIC_NUM) {
685 		if ((n = etm_io_op(hdl, "bad io read on magic",
686 				conn, &buf5[j], 1, ETM_IO_OP_RD)) < 0) {
687 			rv = n;
688 			goto func_ret;
689 		}
690 		byte_cnt++;
691 		j = MIN((j + 1), sizeof (magic_num));
692 		if (byte_cnt < sizeof (magic_num)) {
693 			continue;
694 		}
695 
696 		if (byte_cnt > sizeof (magic_num)) {
697 			etm_stats.etm_magic_drop_bytes.fmds_value.ui64++;
698 			i = MIN(byte_cnt - j - 1, sizeof (drop_buf) - 1);
699 			drop_buf[i] = buf5[0];
700 			for (i = 0; i < j; i++) {
701 				buf5[i] = buf5[i+1];
702 			} /* for sliding the buffer contents */
703 		}
704 		(void) memcpy(&magic_num, &buf5[0], sizeof (magic_num));
705 		magic_num = ntohl(magic_num);
706 	} /* for reading bytes until find magic number */
707 
708 func_ret:
709 
710 	if (byte_cnt != sizeof (magic_num)) {
711 		fmd_hdl_error(hdl, "warning: bad proto frame "
712 				"implies corrupt/lost msg(s)\n");
713 	}
714 	if ((byte_cnt > sizeof (magic_num)) && (etm_debug_lvl >= 2)) {
715 		i = MIN(byte_cnt - sizeof (magic_num), sizeof (drop_buf));
716 		fmd_hdl_debug(hdl, "info: magic drop hexdump "
717 				"first %d of %d bytes:\n",
718 				i, byte_cnt - sizeof (magic_num));
719 		etm_hexdump(hdl, drop_buf, i);
720 	}
721 
722 	if (rv == 0) {
723 		*magic_ptr = magic_num;
724 	}
725 	return (rv);
726 
727 } /* etm_magic_read() */
728 
729 /*
730  * etm_hdr_read - allocate, read, and validate a [variable sized]
731  *		ETM message header from the given connection,
732  *		return the allocated ETM message header
733  *		(which is guaranteed to be large enough to reuse as a
734  *		RESPONSE msg hdr) and its size
735  *		or NULL and set errno on failure
736  */
737 
738 static void *
739 etm_hdr_read(fmd_hdl_t *hdl, etm_xport_conn_t conn, size_t *szp)
740 {
741 	uint8_t			*hdrp;		/* ptr to header to return */
742 	size_t			hdr_sz;		/* sizeof *hdrp */
743 	etm_proto_v1_pp_t	pp; 		/* protocol preamble */
744 	etm_proto_v1_ev_hdr_t	*ev_hdrp;	/* for FMA_EVENT msg */
745 	etm_proto_v1_ctl_hdr_t	*ctl_hdrp;	/* for CONTROL msg */
746 	etm_proto_v1_resp_hdr_t *resp_hdrp;	/* for RESPONSE msg */
747 	uint32_t		*lenp;		/* ptr to FMA event length */
748 	ssize_t			i, n;		/* gen use */
749 	uint8_t	misc_buf[ETM_MISC_BUF_SZ];	/* for var sized hdrs */
750 	int			dummy_int;	/* dummy var to appease lint */
751 
752 	hdrp = NULL; hdr_sz = 0;
753 
754 	/* read the magic number which starts the protocol preamble */
755 
756 	if ((n = etm_magic_read(hdl, conn, &pp.pp_magic_num)) < 0) {
757 		errno = (-n);
758 		etm_stats.etm_magic_bad.fmds_value.ui64++;
759 		return (NULL);
760 	}
761 
762 	/* read the rest of the protocol preamble all at once */
763 
764 	if ((n = etm_io_op(hdl, "bad io read on preamble",
765 				conn, &pp.pp_proto_ver,
766 				sizeof (pp) - sizeof (pp.pp_magic_num),
767 				ETM_IO_OP_RD)) < 0) {
768 		errno = (-n);
769 		return (NULL);
770 	}
771 
772 	/*
773 	 * Design_Note:	The magic number was already network decoded; but
774 	 *		some other preamble fields also need to be decoded,
775 	 *		specifically pp_xid and pp_timeout. The rest of the
776 	 *		preamble fields are byte sized and hence need no
777 	 *		decoding.
778 	 */
779 
780 	pp.pp_xid = ntohl(pp.pp_xid);
781 	pp.pp_timeout = ntohl(pp.pp_timeout);
782 
783 	/* sanity check the header as best we can */
784 
785 	if ((pp.pp_proto_ver < ETM_PROTO_V1) ||
786 	    (pp.pp_proto_ver > ETM_PROTO_V2)) {
787 		fmd_hdl_error(hdl, "error: bad proto ver %d\n",
788 					(int)pp.pp_proto_ver);
789 		errno = EPROTO;
790 		etm_stats.etm_ver_bad.fmds_value.ui64++;
791 		return (NULL);
792 	}
793 
794 	dummy_int = pp.pp_msg_type;
795 	if ((dummy_int <= ETM_MSG_TYPE_TOO_LOW) ||
796 	    (dummy_int >= ETM_MSG_TYPE_TOO_BIG)) {
797 		fmd_hdl_error(hdl, "error: bad msg type %d", dummy_int);
798 		errno = EBADMSG;
799 		etm_stats.etm_msgtype_bad.fmds_value.ui64++;
800 		return (NULL);
801 	}
802 
803 	/* handle [var sized] hdrs for FMA_EVENT, CONTROL, RESPONSE msgs */
804 
805 	if (pp.pp_msg_type == ETM_MSG_TYPE_FMA_EVENT) {
806 
807 		ev_hdrp = (void*)&misc_buf[0];
808 		hdr_sz = sizeof (*ev_hdrp);
809 		(void) memcpy(&ev_hdrp->ev_pp, &pp, sizeof (pp));
810 
811 		/* sanity check the header's timeout */
812 
813 		if ((ev_hdrp->ev_pp.pp_proto_ver == ETM_PROTO_V1) &&
814 		    (ev_hdrp->ev_pp.pp_timeout != ETM_PROTO_V1_TIMEOUT_NONE)) {
815 			errno = ETIME;
816 			etm_stats.etm_timeout_bad.fmds_value.ui64++;
817 			return (NULL);
818 		}
819 
820 		/* get all FMA event lengths from the header */
821 
822 		lenp = (uint32_t *)&ev_hdrp->ev_lens[0]; lenp--;
823 		i = -1;	/* cnt of length entries preceding 0 */
824 		do {
825 			i++; lenp++;
826 			if ((sizeof (*ev_hdrp) + (i * sizeof (*lenp))) >=
827 							ETM_MISC_BUF_SZ) {
828 				errno = E2BIG;	/* ridiculous size */
829 				etm_stats.etm_evlens_bad.fmds_value.ui64++;
830 				return (NULL);
831 			}
832 			if ((n = etm_io_op(hdl, "bad io read on event len",
833 						conn, lenp, sizeof (*lenp),
834 						ETM_IO_OP_RD)) < 0) {
835 				errno = (-n);
836 				return (NULL);
837 			}
838 			*lenp = ntohl(*lenp);
839 
840 		} while (*lenp != 0);
841 		i += 0; /* first len already counted by sizeof(ev_hdr) */
842 		hdr_sz += (i * sizeof (*lenp));
843 
844 		etm_stats.etm_rd_hdr_fmaevent.fmds_value.ui64++;
845 
846 	} else if (pp.pp_msg_type == ETM_MSG_TYPE_CONTROL) {
847 
848 		ctl_hdrp = (void*)&misc_buf[0];
849 		hdr_sz = sizeof (*ctl_hdrp);
850 		(void) memcpy(&ctl_hdrp->ctl_pp, &pp, sizeof (pp));
851 
852 		/* sanity check the header's sub type (control selector) */
853 
854 		if ((ctl_hdrp->ctl_pp.pp_sub_type <= ETM_CTL_SEL_TOO_LOW) ||
855 		    (ctl_hdrp->ctl_pp.pp_sub_type >= ETM_CTL_SEL_TOO_BIG)) {
856 			fmd_hdl_error(hdl, "error: bad ctl sub type %d\n",
857 					(int)ctl_hdrp->ctl_pp.pp_sub_type);
858 			errno = EBADMSG;
859 			etm_stats.etm_subtype_bad.fmds_value.ui64++;
860 			return (NULL);
861 		}
862 
863 		/* get the control length */
864 
865 		if ((n = etm_io_op(hdl, "bad io read on ctl len",
866 					conn, &ctl_hdrp->ctl_len,
867 					sizeof (ctl_hdrp->ctl_len),
868 					ETM_IO_OP_RD)) < 0) {
869 			errno = (-n);
870 			return (NULL);
871 		}
872 
873 		ctl_hdrp->ctl_len = ntohl(ctl_hdrp->ctl_len);
874 
875 		etm_stats.etm_rd_hdr_control.fmds_value.ui64++;
876 
877 	} else if (pp.pp_msg_type == ETM_MSG_TYPE_RESPONSE) {
878 
879 		resp_hdrp = (void*)&misc_buf[0];
880 		hdr_sz = sizeof (*resp_hdrp);
881 		(void) memcpy(&resp_hdrp->resp_pp, &pp, sizeof (pp));
882 
883 		/* sanity check the header's timeout */
884 
885 		if (resp_hdrp->resp_pp.pp_timeout !=
886 						ETM_PROTO_V1_TIMEOUT_NONE) {
887 			errno = ETIME;
888 			etm_stats.etm_timeout_bad.fmds_value.ui64++;
889 			return (NULL);
890 		}
891 
892 		/* get the response code and length */
893 
894 		if ((n = etm_io_op(hdl, "bad io read on resp code+len",
895 					conn, &resp_hdrp->resp_code,
896 					sizeof (resp_hdrp->resp_code) +
897 					sizeof (resp_hdrp->resp_len),
898 					ETM_IO_OP_RD)) < 0) {
899 			errno = (-n);
900 			return (NULL);
901 		}
902 
903 		resp_hdrp->resp_code = ntohl(resp_hdrp->resp_code);
904 		resp_hdrp->resp_len = ntohl(resp_hdrp->resp_len);
905 
906 		etm_stats.etm_rd_hdr_response.fmds_value.ui64++;
907 
908 	} /* whether we have FMA_EVENT, CONTROL, RESPONSE msg */
909 
910 	/*
911 	 * choose a header size that allows hdr reuse for RESPONSE msgs,
912 	 * allocate and populate the message header, and
913 	 * return alloc size to caller for later free of hdrp
914 	 */
915 
916 	hdr_sz = MAX(hdr_sz, sizeof (*resp_hdrp));
917 	hdrp = fmd_hdl_zalloc(hdl, hdr_sz, FMD_SLEEP);
918 	(void) memcpy(hdrp, misc_buf, hdr_sz);
919 
920 	if (etm_debug_lvl >= 3) {
921 		fmd_hdl_debug(hdl, "info: msg hdr hexdump %d bytes:\n",
922 								hdr_sz);
923 		etm_hexdump(hdl, hdrp, hdr_sz);
924 	}
925 	*szp = hdr_sz;
926 	return (hdrp);
927 
928 } /* etm_hdr_read() */
929 
930 /*
931  * etm_hdr_write - create and write a [variable sized] ETM message header
932  *		to the given connection appropriate for the given FMA event
933  *		and type of nvlist encoding,
934  *		return the allocated ETM message header and its size
935  *		or NULL and set errno on failure
936  */
937 
938 static void*
939 etm_hdr_write(fmd_hdl_t *hdl, etm_xport_conn_t conn, nvlist_t *evp,
940 						int encoding, size_t *szp)
941 {
942 	etm_proto_v1_ev_hdr_t	*hdrp;		/* for FMA_EVENT msg */
943 	size_t			hdr_sz;		/* sizeof *hdrp */
944 	uint32_t		*lenp;		/* ptr to FMA event length */
945 	size_t			evsz;		/* packed FMA event size */
946 	ssize_t			n;		/* gen use */
947 
948 	/* allocate and populate the message header for 1 FMA event */
949 
950 	hdr_sz = sizeof (*hdrp) + (1 * sizeof (hdrp->ev_lens[0]));
951 
952 	hdrp = fmd_hdl_zalloc(hdl, hdr_sz, FMD_SLEEP);
953 
954 	/*
955 	 * Design_Note: Although the ETM protocol supports it, we do not (yet)
956 	 *		want responses/ACKs on FMA events that we send. All
957 	 *		such messages are sent with ETM_PROTO_V1_TIMEOUT_NONE.
958 	 */
959 
960 	hdrp->ev_pp.pp_magic_num = ETM_PROTO_MAGIC_NUM;
961 	hdrp->ev_pp.pp_magic_num = htonl(hdrp->ev_pp.pp_magic_num);
962 	hdrp->ev_pp.pp_proto_ver = ETM_PROTO_V1;
963 	hdrp->ev_pp.pp_msg_type = ETM_MSG_TYPE_FMA_EVENT;
964 	hdrp->ev_pp.pp_sub_type = 0;
965 	hdrp->ev_pp.pp_rsvd_pad = 0;
966 	hdrp->ev_pp.pp_xid = etm_xid_cur;
967 	hdrp->ev_pp.pp_xid = htonl(hdrp->ev_pp.pp_xid);
968 	etm_xid_cur += ETM_XID_INC;
969 	hdrp->ev_pp.pp_timeout = ETM_PROTO_V1_TIMEOUT_NONE;
970 	hdrp->ev_pp.pp_timeout = htonl(hdrp->ev_pp.pp_timeout);
971 
972 	lenp = &hdrp->ev_lens[0];
973 
974 	if ((n = nvlist_size(evp, &evsz, encoding)) != 0) {
975 		errno = n;
976 		fmd_hdl_free(hdl, hdrp, hdr_sz);
977 		etm_stats.etm_os_nvlist_size_fail.fmds_value.ui64++;
978 		return (NULL);
979 	}
980 
981 	/* indicate 1 FMA event, network encode its length, and 0-terminate */
982 
983 	*lenp = evsz; *lenp = htonl(*lenp); lenp++;
984 	*lenp = 0; *lenp = htonl(*lenp); lenp++;
985 
986 	/*
987 	 * write the network encoded header to the transport, and
988 	 * return alloc size to caller for later free
989 	 */
990 
991 	if ((n = etm_io_op(hdl, "bad io write on event hdr",
992 				conn, hdrp, hdr_sz, ETM_IO_OP_WR)) < 0) {
993 		errno = (-n);
994 		fmd_hdl_free(hdl, hdrp, hdr_sz);
995 		return (NULL);
996 	}
997 
998 	*szp = hdr_sz;
999 	return (hdrp);
1000 
1001 } /* etm_hdr_write() */
1002 
1003 /*
1004  * etm_post_to_fmd - post the given FMA event to FMD
1005  *			[via sysevent or via a FMD transport API call ],
1006  *			return 0 or -errno value
1007  *
1008  * Design_Note:	This routine exists to ease future porting to both
1009  *		FMA Phase 2 FMD as well as porting to Linux which lacks
1010  *		a native sysevent.
1011  */
1012 
1013 static int
1014 etm_post_to_fmd(fmd_hdl_t *hdl, nvlist_t *evp)
1015 {
1016 	int			rv;		/* ret val */
1017 	evchan_t		*scp;		/* sysevent channel ptr */
1018 	ssize_t			n;		/* gen use */
1019 
1020 	rv = 0; /* default success */
1021 
1022 	scp = NULL;
1023 
1024 	if (etm_debug_lvl >= 2) {
1025 		etm_show_time(hdl, "ante ev post");
1026 	}
1027 	if ((n = sysevent_evc_bind(FM_ERROR_CHAN, &scp,
1028 				EVCH_CREAT | EVCH_HOLD_PEND)) != 0) {
1029 		rv = (-n);
1030 		fmd_hdl_error(hdl, "error: FMA event dropped: "
1031 				"sysevent bind errno %d\n", n);
1032 		etm_stats.etm_os_sysevent_bind_fail.fmds_value.ui64++;
1033 		etm_stats.etm_rd_drop_fmaevent.fmds_value.ui64++;
1034 		goto func_ret;
1035 	}
1036 
1037 	if ((n = sysevent_evc_publish(scp, EC_FM, ESC_FM_ERROR, "com.sun",
1038 				getexecname(), evp, EVCH_SLEEP)) != 0) {
1039 		rv = (-n);
1040 		fmd_hdl_error(hdl, "error: FMA event dropped: "
1041 				"sysevent publish errno %d\n", n);
1042 		etm_stats.etm_os_sysevent_publish_fail.fmds_value.ui64++;
1043 		etm_stats.etm_rd_drop_fmaevent.fmds_value.ui64++;
1044 		goto func_ret;
1045 	}
1046 
1047 func_ret:
1048 
1049 	if (scp != NULL) {
1050 		sysevent_evc_unbind(scp);
1051 	}
1052 	if (rv == 0) {
1053 		etm_stats.etm_wr_fmd_fmaevent.fmds_value.ui64++;
1054 		(void) nvlist_size(evp, (size_t *)&n, NV_ENCODE_XDR);
1055 		etm_stats.etm_wr_fmd_bytes.fmds_value.ui64 += n;
1056 		if (etm_debug_lvl >= 1) {
1057 			fmd_hdl_debug(hdl, "info: event %p post ok to FMD\n",
1058 								evp);
1059 		}
1060 	}
1061 	if (etm_debug_lvl >= 2) {
1062 		etm_show_time(hdl, "post ev post");
1063 	}
1064 	return (rv);
1065 
1066 } /* etm_post_to_fmd() */
1067 
1068 /*
1069  * etm_req_ver_negot - send an ETM control message to the other end requesting
1070  *			that the ETM protocol version be negotiated/set
1071  */
1072 
1073 static void
1074 etm_req_ver_negot(fmd_hdl_t *hdl)
1075 {
1076 	etm_xport_addr_t	*addrv;		/* default dst addr(s) */
1077 	etm_xport_conn_t	conn;		/* connection to other end */
1078 	etm_proto_v1_ctl_hdr_t	*ctl_hdrp;	/* for CONTROL msg */
1079 	size_t			hdr_sz;		/* sizeof header */
1080 	uint8_t			*body_buf;	/* msg body buffer */
1081 	uint32_t		body_sz;	/* sizeof *body_buf */
1082 	ssize_t			i;		/* gen use */
1083 
1084 	/* populate an ETM control msg to send */
1085 
1086 	hdr_sz = sizeof (*ctl_hdrp);
1087 	body_sz = (2 + 1);		/* version bytes plus null byte */
1088 
1089 	ctl_hdrp = fmd_hdl_zalloc(hdl, hdr_sz + body_sz, FMD_SLEEP);
1090 
1091 	ctl_hdrp->ctl_pp.pp_magic_num = htonl(ETM_PROTO_MAGIC_NUM);
1092 	ctl_hdrp->ctl_pp.pp_proto_ver = ETM_PROTO_V1;
1093 	ctl_hdrp->ctl_pp.pp_msg_type = ETM_MSG_TYPE_CONTROL;
1094 	ctl_hdrp->ctl_pp.pp_sub_type = ETM_CTL_SEL_VER_NEGOT_REQ;
1095 	ctl_hdrp->ctl_pp.pp_rsvd_pad = 0;
1096 	etm_xid_ver_negot = etm_xid_cur;
1097 	etm_xid_cur += ETM_XID_INC;
1098 	ctl_hdrp->ctl_pp.pp_xid = htonl(etm_xid_ver_negot);
1099 	ctl_hdrp->ctl_pp.pp_timeout = htonl(ETM_PROTO_V1_TIMEOUT_FOREVER);
1100 	ctl_hdrp->ctl_len = htonl(body_sz);
1101 
1102 	body_buf = (void*)&ctl_hdrp->ctl_len;
1103 	body_buf += sizeof (ctl_hdrp->ctl_len);
1104 	*body_buf++ = ETM_PROTO_V2;
1105 	*body_buf++ = ETM_PROTO_V1;
1106 	*body_buf++ = '\0';
1107 
1108 	/*
1109 	 * open and close a connection to send the ETM control msg
1110 	 * to any/all of the default dst addrs
1111 	 */
1112 
1113 	if ((addrv = etm_xport_get_ev_addrv(hdl, NULL)) == NULL) {
1114 		fmd_hdl_error(hdl,
1115 			"error: bad ctl dst addrs errno %d\n", errno);
1116 		etm_stats.etm_xport_get_ev_addrv_fail.fmds_value.ui64++;
1117 		goto func_ret;
1118 	}
1119 
1120 	for (i = 0; addrv[i] != NULL; i++) {
1121 
1122 		if (etm_conn_open(hdl, "bad conn open during ver negot",
1123 					addrv[i], &conn) < 0) {
1124 			continue;
1125 		}
1126 		if (etm_io_op(hdl, "bad io write on ctl hdr+body",
1127 					conn, ctl_hdrp, hdr_sz + body_sz,
1128 					ETM_IO_OP_WR) >= 0) {
1129 			etm_stats.etm_wr_hdr_control.fmds_value.ui64++;
1130 			etm_stats.etm_wr_body_control.fmds_value.ui64++;
1131 		}
1132 		(void) etm_conn_close(hdl, "bad conn close during ver negot",
1133 									conn);
1134 
1135 	} /* foreach dst addr */
1136 
1137 func_ret:
1138 
1139 	if (addrv != NULL) {
1140 		etm_xport_free_addrv(hdl, addrv);
1141 	}
1142 	fmd_hdl_free(hdl, ctl_hdrp, hdr_sz + body_sz);
1143 
1144 } /* etm_req_ver_negot() */
1145 
1146 /*
1147  * Design_Note:	We rely on the fact that all message types have
1148  *		a common protocol preamble; if this fact should
1149  *		ever change it may break the code below. We also
1150  *		rely on the fact that FMA_EVENT and CONTROL headers
1151  *		returned will be sized large enough to reuse them
1152  *		as RESPONSE headers if the remote endpt asked
1153  *		for a response via the pp_timeout field.
1154  */
1155 
1156 /*
1157  * etm_maybe_send_response - check the given message header to see
1158  *				whether a response has been requested,
1159  *				if so then send an appropriate response
1160  *				back on the given connection using the
1161  *				given response code,
1162  *				return 0 or -errno value
1163  */
1164 
1165 static ssize_t
1166 etm_maybe_send_response(fmd_hdl_t *hdl, etm_xport_conn_t conn,
1167     void *hdrp, int32_t resp_code)
1168 {
1169 	ssize_t			rv;		/* ret val */
1170 	etm_proto_v1_pp_t	*ppp;		/* protocol preamble ptr */
1171 	etm_proto_v1_resp_hdr_t *resp_hdrp;	/* for RESPONSE msg */
1172 	uint8_t			resp_body[4];	/* response body if needed */
1173 	uint8_t			*resp_msg;	/* response hdr+body */
1174 	size_t			hdr_sz;		/* sizeof response hdr */
1175 	uint8_t			orig_msg_type;	/* orig hdr's message type */
1176 	uint32_t		orig_timeout;	/* orig hdr's timeout */
1177 	ssize_t			n;		/* gen use */
1178 
1179 	rv = 0;		/* default is success */
1180 	ppp = hdrp;
1181 	orig_msg_type = ppp->pp_msg_type;
1182 	orig_timeout = ppp->pp_timeout;
1183 
1184 	/* bail out now if no response is to be sent */
1185 
1186 	if (orig_timeout == ETM_PROTO_V1_TIMEOUT_NONE) {
1187 		return (0);
1188 	} /* if a nop */
1189 
1190 	if ((orig_msg_type != ETM_MSG_TYPE_FMA_EVENT) &&
1191 	    (orig_msg_type != ETM_MSG_TYPE_CONTROL)) {
1192 		return (-EINVAL);
1193 	} /* if inappropriate hdr for a response msg */
1194 
1195 	/* reuse the given header as a response header */
1196 
1197 	if (etm_debug_lvl >= 2) {
1198 		etm_show_time(hdl, "ante resp send");
1199 	}
1200 
1201 	resp_hdrp = hdrp;
1202 	resp_hdrp->resp_code = resp_code;
1203 	resp_hdrp->resp_len = 0;		/* default is empty body */
1204 
1205 	if ((orig_msg_type == ETM_MSG_TYPE_CONTROL) &&
1206 	    (ppp->pp_sub_type == ETM_CTL_SEL_VER_NEGOT_REQ)) {
1207 		resp_body[0] = ETM_PROTO_V2;
1208 		resp_hdrp->resp_len = 1;
1209 	} /* if should send our/negotiated proto ver in resp body */
1210 
1211 	/* respond with the proto ver that was negotiated */
1212 
1213 	resp_hdrp->resp_pp.pp_proto_ver = etm_resp_ver;
1214 	resp_hdrp->resp_pp.pp_msg_type = ETM_MSG_TYPE_RESPONSE;
1215 	resp_hdrp->resp_pp.pp_timeout = ETM_PROTO_V1_TIMEOUT_NONE;
1216 
1217 	/*
1218 	 * send the whole response msg in one write, header and body;
1219 	 * avoid the alloc-and-copy if we can reuse the hdr as the msg,
1220 	 * ie, if the body is empty
1221 	 *
1222 	 * update stats and note the xid associated with last ACKed FMA_EVENT
1223 	 * known to be successfully posted to FMD to aid duplicate filtering
1224 	 */
1225 
1226 	hdr_sz = sizeof (etm_proto_v1_resp_hdr_t);
1227 
1228 	resp_msg = hdrp;
1229 	if (resp_hdrp->resp_len > 0) {
1230 		resp_msg = fmd_hdl_zalloc(hdl, hdr_sz + resp_hdrp->resp_len,
1231 		    FMD_SLEEP);
1232 		(void) memcpy(resp_msg, resp_hdrp, hdr_sz);
1233 		(void) memcpy(resp_msg + hdr_sz, resp_body,
1234 		    resp_hdrp->resp_len);
1235 	}
1236 
1237 	if ((n = etm_io_op(hdl, "bad io write on resp msg", conn,
1238 	    resp_msg, hdr_sz + resp_hdrp->resp_len, ETM_IO_OP_WR)) < 0) {
1239 		rv = n;
1240 		goto func_ret;
1241 	}
1242 
1243 	etm_stats.etm_wr_hdr_response.fmds_value.ui64++;
1244 	etm_stats.etm_wr_body_response.fmds_value.ui64++;
1245 
1246 	if ((orig_msg_type == ETM_MSG_TYPE_FMA_EVENT) &&
1247 	    (resp_code >= 0)) {
1248 		etm_xid_posted_ev = resp_hdrp->resp_pp.pp_xid;
1249 	}
1250 
1251 	fmd_hdl_debug(hdl, "info: sent V%u RESPONSE msg to xport "
1252 	    "xid 0x%x code %d len %u\n",
1253 	    (unsigned int)resp_hdrp->resp_pp.pp_proto_ver,
1254 	    resp_hdrp->resp_pp.pp_xid, resp_hdrp->resp_code,
1255 	    resp_hdrp->resp_len);
1256 func_ret:
1257 
1258 	if (resp_hdrp->resp_len > 0) {
1259 		fmd_hdl_free(hdl, resp_msg, hdr_sz + resp_hdrp->resp_len);
1260 	}
1261 	if (etm_debug_lvl >= 2) {
1262 		etm_show_time(hdl, "post resp send");
1263 	}
1264 	return (rv);
1265 
1266 } /* etm_maybe_send_response() */
1267 
1268 /*
1269  * etm_handle_new_conn - receive an ETM message sent from the other end via
1270  *			the given open connection, pull out any FMA events
1271  *			and post them to the local FMD (or handle any ETM
1272  *			control or response msg); when done, close the
1273  *			connection
1274  */
1275 
1276 static void
1277 etm_handle_new_conn(fmd_hdl_t *hdl, etm_xport_conn_t conn)
1278 {
1279 	etm_proto_v1_ev_hdr_t	*ev_hdrp;	/* for FMA_EVENT msg */
1280 	etm_proto_v1_ctl_hdr_t	*ctl_hdrp;	/* for CONTROL msg */
1281 	etm_proto_v1_resp_hdr_t *resp_hdrp;	/* for RESPONSE msg */
1282 	int32_t			resp_code;	/* response code */
1283 	size_t			hdr_sz;		/* sizeof header */
1284 	uint8_t			*body_buf;	/* msg body buffer */
1285 	uint32_t		body_sz;	/* sizeof body_buf */
1286 	uint32_t		ev_cnt;		/* count of FMA events */
1287 	uint8_t			*bp;		/* byte ptr within body_buf */
1288 	nvlist_t		*evp;		/* ptr to unpacked FMA event */
1289 	char			*class;		/* FMA event class */
1290 	ssize_t			i, n;		/* gen use */
1291 
1292 	if (etm_debug_lvl >= 2) {
1293 		etm_show_time(hdl, "ante conn handle");
1294 	}
1295 	fmd_hdl_debug(hdl, "info: handling new conn %p\n", conn);
1296 
1297 	ev_hdrp = NULL;
1298 	ctl_hdrp = NULL;
1299 	resp_hdrp = NULL;
1300 	body_buf = NULL;
1301 	class = NULL;
1302 	evp = NULL;
1303 	resp_code = 0;	/* default is success */
1304 
1305 	/* read a network decoded message header from the connection */
1306 
1307 	if ((ev_hdrp = etm_hdr_read(hdl, conn, &hdr_sz)) == NULL) {
1308 		/* errno assumed set by above call */
1309 		fmd_hdl_error(hdl, "error: FMA event dropped: "
1310 					"bad hdr read errno %d\n", errno);
1311 		etm_stats.etm_rd_drop_fmaevent.fmds_value.ui64++;
1312 		goto func_ret;
1313 	}
1314 
1315 	/*
1316 	 * handle the message based on its preamble pp_msg_type
1317 	 * which is known to be valid from etm_hdr_read() checks
1318 	 */
1319 
1320 	if (ev_hdrp->ev_pp.pp_msg_type == ETM_MSG_TYPE_FMA_EVENT) {
1321 
1322 		fmd_hdl_debug(hdl, "info: rcvd FMA_EVENT msg from xport\n");
1323 
1324 		/* allocate buf large enough for whole body / all FMA events */
1325 
1326 		body_sz = 0;
1327 		for (i = 0; ev_hdrp->ev_lens[i] != 0; i++) {
1328 			body_sz += ev_hdrp->ev_lens[i];
1329 		} /* for summing sizes of all FMA events */
1330 		ev_cnt = i;
1331 
1332 		if (etm_debug_lvl >= 1) {
1333 			fmd_hdl_debug(hdl, "info: event lengths %u sum %u\n",
1334 			    ev_cnt, body_sz);
1335 		}
1336 
1337 		body_buf = fmd_hdl_zalloc(hdl, body_sz, FMD_SLEEP);
1338 
1339 		/* read all the FMA events at once */
1340 
1341 		if ((n = etm_io_op(hdl, "FMA event dropped: "
1342 					"bad io read on event bodies",
1343 					conn, body_buf, body_sz,
1344 					ETM_IO_OP_RD)) < 0) {
1345 			etm_stats.etm_rd_drop_fmaevent.fmds_value.ui64++;
1346 			goto func_ret;
1347 		}
1348 
1349 		etm_stats.etm_rd_xport_bytes.fmds_value.ui64 += body_sz;
1350 		etm_stats.etm_rd_body_fmaevent.fmds_value.ui64 += ev_cnt;
1351 
1352 		/*
1353 		 * check for dup msg/xid against last good response sent,
1354 		 * if a dup then resend response but skip repost to FMD
1355 		 */
1356 
1357 		if (ev_hdrp->ev_pp.pp_xid == etm_xid_posted_ev) {
1358 			(void) etm_maybe_send_response(hdl, conn, ev_hdrp, 0);
1359 			fmd_hdl_debug(hdl, "info: skipping dup FMA event post "
1360 			    "xid 0x%x\n", etm_xid_posted_ev);
1361 			etm_stats.etm_rd_dup_fmaevent.fmds_value.ui64++;
1362 			goto func_ret;
1363 		}
1364 
1365 		/* unpack each FMA event and post it to FMD */
1366 
1367 		bp = body_buf;
1368 		for (i = 0; ev_hdrp->ev_lens[i] != 0; i++) {
1369 			if ((n = nvlist_unpack((char *)bp,
1370 					ev_hdrp->ev_lens[i], &evp, 0)) != 0) {
1371 				resp_code = (-n);
1372 				(void) etm_maybe_send_response(hdl, conn,
1373 				    ev_hdrp, resp_code);
1374 				fmd_hdl_error(hdl, "error: FMA event dropped: "
1375 						"bad event body unpack "
1376 						"errno %d\n", n);
1377 				if (etm_debug_lvl >= 2) {
1378 					fmd_hdl_debug(hdl, "info: FMA event "
1379 						"hexdump %d bytes:\n",
1380 						ev_hdrp->ev_lens[i]);
1381 					etm_hexdump(hdl, bp,
1382 						ev_hdrp->ev_lens[i]);
1383 				}
1384 				etm_stats.etm_os_nvlist_unpack_fail.fmds_value.
1385 					ui64++;
1386 				etm_stats.etm_rd_drop_fmaevent.fmds_value.
1387 					ui64++;
1388 				bp += ev_hdrp->ev_lens[i];
1389 				continue;
1390 			}
1391 			if (etm_debug_lvl >= 1) {
1392 				(void) nvlist_lookup_string(evp, FM_CLASS,
1393 								&class);
1394 				if (class == NULL) {
1395 					class = "NULL";
1396 				}
1397 				fmd_hdl_debug(hdl, "info: FMA event %p "
1398 						"class %s\n", evp, class);
1399 			}
1400 			resp_code = etm_post_to_fmd(hdl, evp);
1401 			(void) etm_maybe_send_response(hdl, conn,
1402 							ev_hdrp, resp_code);
1403 			nvlist_free(evp);
1404 			bp += ev_hdrp->ev_lens[i];
1405 		} /* foreach FMA event in the body buffer */
1406 
1407 	} else if (ev_hdrp->ev_pp.pp_msg_type == ETM_MSG_TYPE_CONTROL) {
1408 
1409 		ctl_hdrp = (void*)ev_hdrp;
1410 
1411 		fmd_hdl_debug(hdl, "info: rcvd CONTROL msg from xport\n");
1412 		if (etm_debug_lvl >= 1) {
1413 			fmd_hdl_debug(hdl, "info: ctl sel %d xid 0x%x\n",
1414 					(int)ctl_hdrp->ctl_pp.pp_sub_type,
1415 					ctl_hdrp->ctl_pp.pp_xid);
1416 		}
1417 
1418 		/*
1419 		 * if we have a VER_NEGOT_REQ read the body and validate
1420 		 * the protocol version set contained therein,
1421 		 * otherwise we have a PING_REQ (which has no body)
1422 		 * and we [also] fall thru to the code which sends a
1423 		 * response msg if the pp_timeout field requested one
1424 		 */
1425 
1426 		if (ctl_hdrp->ctl_pp.pp_sub_type == ETM_CTL_SEL_VER_NEGOT_REQ) {
1427 
1428 			body_sz = ctl_hdrp->ctl_len;
1429 			body_buf = fmd_hdl_zalloc(hdl, body_sz, FMD_SLEEP);
1430 
1431 			if ((n = etm_io_op(hdl, "bad io read on ctl body",
1432 						conn, body_buf, body_sz,
1433 						ETM_IO_OP_RD)) < 0) {
1434 				goto func_ret;
1435 			}
1436 
1437 			/* complain if version set completely incompatible */
1438 
1439 			for (i = 0; i < body_sz; i++) {
1440 				if ((body_buf[i] == ETM_PROTO_V1) ||
1441 				    (body_buf[i] == ETM_PROTO_V2)) {
1442 					break;
1443 				}
1444 			}
1445 			if (i >= body_sz) {
1446 				etm_stats.etm_ver_bad.fmds_value.ui64++;
1447 				resp_code = (-EPROTO);
1448 			}
1449 
1450 		} /* if got version set request */
1451 
1452 		etm_stats.etm_rd_body_control.fmds_value.ui64++;
1453 
1454 		(void) etm_maybe_send_response(hdl, conn, ctl_hdrp, resp_code);
1455 
1456 	} else if (ev_hdrp->ev_pp.pp_msg_type == ETM_MSG_TYPE_RESPONSE) {
1457 
1458 		resp_hdrp = (void*)ev_hdrp;
1459 
1460 		fmd_hdl_debug(hdl, "info: rcvd RESPONSE msg from xport\n");
1461 		if (etm_debug_lvl >= 1) {
1462 			fmd_hdl_debug(hdl, "info: resp xid 0x%x\n",
1463 					(int)resp_hdrp->resp_pp.pp_xid);
1464 		}
1465 
1466 		body_sz = resp_hdrp->resp_len;
1467 		body_buf = fmd_hdl_zalloc(hdl, body_sz, FMD_SLEEP);
1468 
1469 		if ((n = etm_io_op(hdl, "bad io read on resp len",
1470 				conn, body_buf, body_sz, ETM_IO_OP_RD)) < 0) {
1471 			goto func_ret;
1472 		}
1473 
1474 		etm_stats.etm_rd_body_response.fmds_value.ui64++;
1475 
1476 		/*
1477 		 * look up the xid to interpret the response body
1478 		 *
1479 		 * ping is a nop; for ver negot confirm that a supported
1480 		 * protocol version was negotiated and remember which one
1481 		 */
1482 
1483 		if ((resp_hdrp->resp_pp.pp_xid != etm_xid_ping) &&
1484 		    (resp_hdrp->resp_pp.pp_xid != etm_xid_ver_negot)) {
1485 			etm_stats.etm_xid_bad.fmds_value.ui64++;
1486 			goto func_ret;
1487 		}
1488 
1489 		if (resp_hdrp->resp_pp.pp_xid == etm_xid_ver_negot) {
1490 			if ((body_buf[0] < ETM_PROTO_V1) ||
1491 			    (body_buf[0] > ETM_PROTO_V2)) {
1492 				etm_stats.etm_ver_bad.fmds_value.ui64++;
1493 				goto func_ret;
1494 			}
1495 			etm_resp_ver = body_buf[0];
1496 		} /* if have resp to last req to negotiate proto ver */
1497 
1498 	} /* whether we have a FMA_EVENT, CONTROL, or RESPONSE msg */
1499 
1500 func_ret:
1501 
1502 	(void) etm_conn_close(hdl, "bad conn close after msg recv", conn);
1503 
1504 	if (etm_debug_lvl >= 2) {
1505 		etm_show_time(hdl, "post conn handle");
1506 	}
1507 	if (ev_hdrp != NULL) {
1508 		fmd_hdl_free(hdl, ev_hdrp, hdr_sz);
1509 	}
1510 	if (body_buf != NULL) {
1511 		fmd_hdl_free(hdl, body_buf, body_sz);
1512 	}
1513 } /* etm_handle_new_conn() */
1514 
1515 /*
1516  * etm_server - loop forever accepting new connections
1517  *		using the given FMD handle,
1518  *		handling any ETM msgs sent from the other side
1519  *		via each such connection
1520  */
1521 
1522 static void
1523 etm_server(void *arg)
1524 {
1525 	etm_xport_conn_t	conn;		/* connection handle */
1526 	ssize_t			n;		/* gen use */
1527 	fmd_hdl_t		*hdl;		/* FMD handle */
1528 
1529 	hdl = arg;
1530 
1531 	fmd_hdl_debug(hdl, "info: connection server starting\n");
1532 
1533 	while (!etm_is_dying) {
1534 		if ((conn = etm_xport_accept(hdl, NULL)) == NULL) {
1535 			/* errno assumed set by above call */
1536 			n = errno;
1537 			if (etm_is_dying) {
1538 				break;
1539 			}
1540 			fmd_hdl_debug(hdl,
1541 				"error: bad conn accept errno %d\n", n);
1542 			etm_stats.etm_xport_accept_fail.fmds_value.ui64++;
1543 			/* avoid spinning CPU */
1544 			(void) etm_sleep(ETM_SLEEP_SLOW);
1545 			continue;
1546 		}
1547 
1548 		/*
1549 		 * Design_Note: etm_handle_new_conn() will close the
1550 		 *		accepted connection when done. In early designs
1551 		 *		etm_handle_new_conn() was spawned as a
1552 		 *		separate thread via pthread_create();
1553 		 *		however fmd_thr_create() constrains thread
1554 		 *		creation to prevent spawned threads from
1555 		 *		spawning others (ie, no grandchildren).
1556 		 *		Hence etm_handle_new_conn() is now called
1557 		 *		as a simple function [w/ multiple args].
1558 		 */
1559 
1560 		etm_handle_new_conn(hdl, conn);
1561 
1562 	} /* while accepting new connections until ETM dies */
1563 
1564 	/* ETM is dying (probably due to "fmadm unload etm") */
1565 
1566 	if (etm_debug_lvl >= 1) {
1567 		fmd_hdl_debug(hdl, "info: connection server is dying\n");
1568 	}
1569 } /* etm_server() */
1570 
1571 /*
1572  * -------------------------- FMD entry points -------------------------------
1573  */
1574 
1575 /*
1576  * _fmd_init - initialize the transport for use by ETM and start the
1577  *		server daemon to accept new connections to us
1578  *
1579  *		FMD will read our *.conf and subscribe us to FMA events
1580  */
1581 
1582 void
1583 _fmd_init(fmd_hdl_t *hdl)
1584 {
1585 	struct timeval		tmv;		/* timeval */
1586 	ssize_t			n;		/* gen use */
1587 
1588 	if (fmd_hdl_register(hdl, FMD_API_VERSION, &fmd_info) != 0) {
1589 		return; /* invalid data in configuration file */
1590 	}
1591 
1592 	fmd_hdl_debug(hdl, "info: module initializing\n");
1593 
1594 	/* setup statistics and properties from FMD */
1595 
1596 	(void) fmd_stat_create(hdl, FMD_STAT_NOALLOC,
1597 				sizeof (etm_stats) / sizeof (fmd_stat_t),
1598 				(fmd_stat_t *)&etm_stats);
1599 
1600 	etm_debug_lvl = fmd_prop_get_int32(hdl, ETM_PROP_NM_DEBUG_LVL);
1601 	etm_debug_max_ev_cnt = fmd_prop_get_int32(hdl,
1602 						ETM_PROP_NM_DEBUG_MAX_EV_CNT);
1603 	fmd_hdl_debug(hdl, "info: etm_debug_lvl %d "
1604 			"etm_debug_max_ev_cnt %d\n",
1605 			etm_debug_lvl, etm_debug_max_ev_cnt);
1606 
1607 	/* encourage protocol transaction id to be unique per module load */
1608 
1609 	(void) gettimeofday(&tmv, NULL);
1610 	etm_xid_cur = (uint32_t)((tmv.tv_sec << 10) |
1611 	    ((unsigned long)tmv.tv_usec >> 10));
1612 
1613 	/*
1614 	 * init the transport,
1615 	 * start the connection acceptance server, and
1616 	 * request protocol version be negotiated
1617 	 */
1618 
1619 	if ((n = etm_xport_init(hdl)) != 0) {
1620 		fmd_hdl_error(hdl, "error: bad xport init errno %d\n", (-n));
1621 		fmd_hdl_unregister(hdl);
1622 		return;
1623 	}
1624 
1625 	etm_svr_tid = fmd_thr_create(hdl, etm_server, hdl);
1626 	etm_req_ver_negot(hdl);
1627 
1628 	fmd_hdl_debug(hdl, "info: module initialized ok\n");
1629 
1630 } /* _fmd_init() */
1631 
1632 /*
1633  * etm_recv - receive an FMA event from FMD and transport it
1634  *		to the remote endpoint
1635  */
1636 
1637 /*ARGSUSED*/
1638 void
1639 etm_recv(fmd_hdl_t *hdl, fmd_event_t *ep, nvlist_t *evp, const char *class)
1640 {
1641 	etm_xport_addr_t	*addrv;	/* vector of transport addresses */
1642 	etm_xport_conn_t	conn;	/* connection handle */
1643 	etm_proto_v1_ev_hdr_t	*hdrp;	/* for FMA_EVENT msg */
1644 	ssize_t			i, n;	/* gen use */
1645 	size_t			sz;	/* header size */
1646 	size_t			buflen;	/* size of packed FMA event */
1647 	uint8_t			*buf;	/* tmp buffer for packed FMA event */
1648 
1649 	buflen = 0;
1650 	(void) nvlist_size(evp, &buflen, NV_ENCODE_XDR);
1651 	etm_stats.etm_rd_fmd_bytes.fmds_value.ui64 += buflen;
1652 	etm_stats.etm_rd_fmd_fmaevent.fmds_value.ui64++;
1653 
1654 	fmd_hdl_debug(hdl, "info: rcvd event %p from FMD\n", evp);
1655 	fmd_hdl_debug(hdl, "info: cnt %llu class %s\n",
1656 		etm_stats.etm_rd_fmd_fmaevent.fmds_value.ui64, class);
1657 
1658 	/*
1659 	 * if the debug limit has been set, avoid excessive traffic,
1660 	 * for example, an infinite cycle using loopback nodes
1661 	 */
1662 
1663 	if ((etm_debug_max_ev_cnt >= 0) &&
1664 		(etm_stats.etm_rd_fmd_fmaevent.fmds_value.ui64 >
1665 						etm_debug_max_ev_cnt)) {
1666 		fmd_hdl_debug(hdl, "warning: FMA event dropped: "
1667 			"event %p cnt %llu > debug max %d\n", evp,
1668 			etm_stats.etm_rd_fmd_fmaevent.fmds_value.ui64,
1669 			etm_debug_max_ev_cnt);
1670 		etm_stats.etm_wr_drop_fmaevent.fmds_value.ui64++;
1671 		return;
1672 	}
1673 
1674 	/* allocate a buffer for the FMA event and nvlist pack it */
1675 
1676 	buf = fmd_hdl_zalloc(hdl, buflen, FMD_SLEEP);
1677 
1678 	if ((n = nvlist_pack(evp, (char **)&buf, &buflen,
1679 					NV_ENCODE_XDR, 0)) != 0) {
1680 		fmd_hdl_error(hdl, "error: FMA event dropped: "
1681 				"event pack errno %d\n", n);
1682 		etm_stats.etm_os_nvlist_pack_fail.fmds_value.ui64++;
1683 		etm_stats.etm_wr_drop_fmaevent.fmds_value.ui64++;
1684 		fmd_hdl_free(hdl, buf, buflen);
1685 		return;
1686 	}
1687 
1688 	/* get vector of dst addrs and send the FMA event to each one */
1689 
1690 	if ((addrv = etm_xport_get_ev_addrv(hdl, evp)) == NULL) {
1691 		fmd_hdl_error(hdl, "error: FMA event dropped: "
1692 				"bad event dst addrs errno %d\n", errno);
1693 		etm_stats.etm_xport_get_ev_addrv_fail.fmds_value.ui64++;
1694 		etm_stats.etm_wr_drop_fmaevent.fmds_value.ui64++;
1695 		fmd_hdl_free(hdl, buf, buflen);
1696 		return;
1697 	}
1698 
1699 	for (i = 0; addrv[i] != NULL; i++) {
1700 
1701 		/* open a new connection to this dst addr */
1702 
1703 		if ((n = etm_conn_open(hdl, "FMA event dropped: "
1704 				"bad conn open on new ev",
1705 				addrv[i], &conn)) < 0) {
1706 			etm_stats.etm_wr_drop_fmaevent.fmds_value.ui64++;
1707 			continue;
1708 		}
1709 
1710 		/* write the ETM message header */
1711 
1712 		if ((hdrp = etm_hdr_write(hdl, conn, evp, NV_ENCODE_XDR,
1713 							&sz)) == NULL) {
1714 			fmd_hdl_error(hdl, "error: FMA event dropped: "
1715 					"bad hdr write errno %d\n", errno);
1716 			(void) etm_conn_close(hdl,
1717 				"bad conn close per bad hdr wr", conn);
1718 			etm_stats.etm_wr_drop_fmaevent.fmds_value.ui64++;
1719 			continue;
1720 		}
1721 
1722 		fmd_hdl_free(hdl, hdrp, sz);	/* header not needed */
1723 		etm_stats.etm_wr_hdr_fmaevent.fmds_value.ui64++;
1724 		fmd_hdl_debug(hdl, "info: hdr xport write ok for event %p\n",
1725 								evp);
1726 
1727 		/* write the ETM message body, ie, the packed nvlist */
1728 
1729 		if ((n = etm_io_op(hdl, "FMA event dropped: "
1730 					"bad io write on event", conn,
1731 					buf, buflen, ETM_IO_OP_WR)) < 0) {
1732 			(void) etm_conn_close(hdl,
1733 				"bad conn close per bad body wr", conn);
1734 			etm_stats.etm_wr_drop_fmaevent.fmds_value.ui64++;
1735 			continue;
1736 		}
1737 
1738 		etm_stats.etm_wr_body_fmaevent.fmds_value.ui64++;
1739 		etm_stats.etm_wr_xport_bytes.fmds_value.ui64 += buflen;
1740 		fmd_hdl_debug(hdl, "info: body xport write ok for event %p\n",
1741 								evp);
1742 
1743 		/* close the connection */
1744 
1745 		(void) etm_conn_close(hdl, "bad conn close after event send",
1746 									conn);
1747 	} /* foreach dst addr in the vector */
1748 
1749 	etm_xport_free_addrv(hdl, addrv);
1750 	fmd_hdl_free(hdl, buf, buflen);
1751 
1752 } /* etm_recv() */
1753 
1754 /*
1755  * _fmd_fini - stop the server daemon and teardown the transport
1756  */
1757 
1758 void
1759 _fmd_fini(fmd_hdl_t *hdl)
1760 {
1761 	ssize_t	n;	/* gen use */
1762 
1763 	fmd_hdl_debug(hdl, "info: module finializing\n");
1764 
1765 	/* kill the connection server ; wait for it to die */
1766 
1767 	etm_is_dying = 1;
1768 
1769 	if (etm_svr_tid != NULL) {
1770 		fmd_thr_signal(hdl, etm_svr_tid);
1771 		fmd_thr_destroy(hdl, etm_svr_tid);
1772 		etm_svr_tid = NULL;
1773 	} /* if server thread was successfully created */
1774 
1775 	/* teardown the transport */
1776 
1777 	if ((n = etm_xport_fini(hdl)) != 0) {
1778 		fmd_hdl_error(hdl, "warning: xport fini errno %d\n", (-n));
1779 	}
1780 
1781 	fmd_hdl_debug(hdl, "info: module finalized ok\n");
1782 
1783 } /* _fmd_fini() */
1784