1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 /* 28 * etm.c FMA Event Transport Module implementation, a plugin of FMD 29 * for sun4v/Ontario 30 * 31 * plugin for sending/receiving FMA events to/from service processor 32 */ 33 34 #pragma ident "%Z%%M% %I% %E% SMI" 35 36 /* 37 * --------------------------------- includes -------------------------------- 38 */ 39 40 #include <sys/fm/protocol.h> 41 #include <sys/fm/util.h> 42 #include <netinet/in.h> 43 #include <fm/fmd_api.h> 44 #include <sys/fm/ldom.h> 45 46 #include "etm_xport_api.h" 47 #include "etm_etm_proto.h" 48 #include "etm_impl.h" 49 50 #include <pthread.h> 51 #include <signal.h> 52 #include <stropts.h> 53 #include <locale.h> 54 #include <strings.h> 55 #include <stdlib.h> 56 #include <unistd.h> 57 #include <limits.h> 58 #include <values.h> 59 #include <alloca.h> 60 #include <errno.h> 61 #include <fcntl.h> 62 #include <time.h> 63 64 65 /* 66 * ----------------------------- forward decls ------------------------------- 67 */ 68 69 static void 70 etm_recv(fmd_hdl_t *hdl, fmd_event_t *ep, nvlist_t *nvl, const char *class); 71 72 /* 73 * ------------------------- data structs for FMD ---------------------------- 74 */ 75 76 static const fmd_hdl_ops_t fmd_ops = { 77 etm_recv, /* fmdo_recv */ 78 NULL, /* fmdo_timeout */ 79 NULL, /* fmdo_close */ 80 NULL, /* fmdo_stats */ 81 NULL, /* fmdo_gc */ 82 NULL, /* fmdo_send */ 83 }; 84 85 static const fmd_prop_t fmd_props[] = { 86 { ETM_PROP_NM_XPORT_ADDRS, FMD_TYPE_STRING, "" }, 87 { ETM_PROP_NM_DEBUG_LVL, FMD_TYPE_INT32, "0" }, 88 { ETM_PROP_NM_DEBUG_MAX_EV_CNT, FMD_TYPE_INT32, "-1" }, 89 { NULL, 0, NULL } 90 }; 91 92 static const fmd_hdl_info_t fmd_info = { 93 "FMA Event Transport Module", "1.0", &fmd_ops, fmd_props 94 }; 95 96 /* 97 * ----------------------- private consts and defns -------------------------- 98 */ 99 100 /* misc buffer for variable sized protocol header fields */ 101 102 #define ETM_MISC_BUF_SZ (4 * 1024) 103 104 /* try limit for IO operations w/ capped exp backoff sleep on retry */ 105 106 /* 107 * Design_Note: ETM will potentially retry forever IO operations that the 108 * transport fails with EAGAIN (aka EWOULDBLOCK) rather than 109 * giving up after some number of seconds. This avoids 110 * dropping FMA events while the service processor is down, 111 * but at the risk of pending fmdo_recv() forever and 112 * overflowing FMD's event queue for ETM. 113 * A future TBD enhancement would be to always recv 114 * and send each ETM msg in a single read/write() to reduce 115 * the risk of failure between ETM msg hdr and body, 116 * assuming the MTU_SZ is large enough. 117 */ 118 119 #define ETM_TRY_MAX_CNT (MAXINT - 1) 120 #define ETM_TRY_BACKOFF_RATE (4) 121 #define ETM_TRY_BACKOFF_CAP (60) 122 123 /* amount to increment protocol transaction id on each new send */ 124 125 #define ETM_XID_INC (2) 126 127 /* 128 * ---------------------------- global data ---------------------------------- 129 */ 130 131 static fmd_hdl_t 132 *init_hdl = NULL; /* used in mem allocator at init time */ 133 134 static int 135 etm_debug_lvl = 0; /* debug level: 0 is off, 1 is on, 2 is more, etc */ 136 137 static int 138 etm_debug_max_ev_cnt = -1; /* max allowed event count for debugging */ 139 140 static fmd_xprt_t 141 *etm_fmd_xprt = NULL; /* FMD transport layer handle */ 142 143 static pthread_t 144 etm_svr_tid = NULL; /* thread id of connection acceptance server */ 145 146 static volatile int 147 etm_is_dying = 0; /* bool for dying (killing self) */ 148 149 static uint32_t 150 etm_xid_cur = 0; /* current transaction id for sends */ 151 152 static uint32_t 153 etm_xid_ping = 0; /* xid of last CONTROL msg sent requesting ping */ 154 155 static uint32_t 156 etm_xid_ver_negot = 0; /* xid of last CONTROL msg sent requesting ver negot */ 157 158 static uint32_t 159 etm_xid_posted_ev = 0; /* xid of last FMA_EVENT msg/event posted OK to FMD */ 160 161 static uint8_t 162 etm_resp_ver = ETM_PROTO_V1; /* proto ver [negotiated] for msg sends */ 163 164 static struct stats { 165 166 /* ETM msg counters */ 167 168 fmd_stat_t etm_rd_hdr_fmaevent; 169 fmd_stat_t etm_rd_hdr_control; 170 fmd_stat_t etm_rd_hdr_response; 171 fmd_stat_t etm_rd_body_fmaevent; 172 fmd_stat_t etm_rd_body_control; 173 fmd_stat_t etm_rd_body_response; 174 fmd_stat_t etm_wr_hdr_fmaevent; 175 fmd_stat_t etm_wr_hdr_control; 176 fmd_stat_t etm_wr_hdr_response; 177 fmd_stat_t etm_wr_body_fmaevent; 178 fmd_stat_t etm_wr_body_control; 179 fmd_stat_t etm_wr_body_response; 180 181 /* ETM byte counters */ 182 183 fmd_stat_t etm_wr_fmd_bytes; 184 fmd_stat_t etm_rd_fmd_bytes; 185 fmd_stat_t etm_wr_xport_bytes; 186 fmd_stat_t etm_rd_xport_bytes; 187 188 fmd_stat_t etm_magic_drop_bytes; 189 190 /* ETM [dropped] FMA event counters */ 191 192 fmd_stat_t etm_rd_fmd_fmaevent; 193 fmd_stat_t etm_wr_fmd_fmaevent; 194 195 fmd_stat_t etm_rd_drop_fmaevent; 196 fmd_stat_t etm_wr_drop_fmaevent; 197 198 fmd_stat_t etm_rd_dup_fmaevent; 199 fmd_stat_t etm_wr_dup_fmaevent; 200 201 /* ETM protocol failures */ 202 203 fmd_stat_t etm_magic_bad; 204 fmd_stat_t etm_ver_bad; 205 fmd_stat_t etm_msgtype_bad; 206 fmd_stat_t etm_subtype_bad; 207 fmd_stat_t etm_xid_bad; 208 fmd_stat_t etm_fmaeventlen_bad; 209 fmd_stat_t etm_respcode_bad; 210 fmd_stat_t etm_timeout_bad; 211 fmd_stat_t etm_evlens_bad; 212 213 /* IO operation failures */ 214 215 fmd_stat_t etm_xport_wr_fail; 216 fmd_stat_t etm_xport_rd_fail; 217 fmd_stat_t etm_xport_pk_fail; 218 219 /* IO operation retries */ 220 221 fmd_stat_t etm_xport_wr_retry; 222 fmd_stat_t etm_xport_rd_retry; 223 fmd_stat_t etm_xport_pk_retry; 224 225 /* system and library failures */ 226 227 fmd_stat_t etm_os_nvlist_pack_fail; 228 fmd_stat_t etm_os_nvlist_unpack_fail; 229 fmd_stat_t etm_os_nvlist_size_fail; 230 fmd_stat_t etm_os_pthread_create_fail; 231 232 /* xport API failures */ 233 234 fmd_stat_t etm_xport_get_ev_addrv_fail; 235 fmd_stat_t etm_xport_open_fail; 236 fmd_stat_t etm_xport_close_fail; 237 fmd_stat_t etm_xport_accept_fail; 238 fmd_stat_t etm_xport_open_retry; 239 240 /* FMD entry point bad arguments */ 241 242 fmd_stat_t etm_fmd_recv_badargs; 243 fmd_stat_t etm_fmd_init_badargs; 244 fmd_stat_t etm_fmd_fini_badargs; 245 246 } etm_stats = { 247 248 /* ETM msg counters */ 249 250 { "etm_rd_hdr_fmaevent", FMD_TYPE_UINT64, 251 "ETM fmaevent msg headers rcvd from xport" }, 252 { "etm_rd_hdr_control", FMD_TYPE_UINT64, 253 "ETM control msg headers rcvd from xport" }, 254 { "etm_rd_hdr_response", FMD_TYPE_UINT64, 255 "ETM response msg headers rcvd from xport" }, 256 { "etm_rd_body_fmaevent", FMD_TYPE_UINT64, 257 "ETM fmaevent msg bodies rcvd from xport" }, 258 { "etm_rd_body_control", FMD_TYPE_UINT64, 259 "ETM control msg bodies rcvd from xport" }, 260 { "etm_rd_body_response", FMD_TYPE_UINT64, 261 "ETM response msg bodies rcvd from xport" }, 262 { "etm_wr_hdr_fmaevent", FMD_TYPE_UINT64, 263 "ETM fmaevent msg headers sent to xport" }, 264 { "etm_wr_hdr_control", FMD_TYPE_UINT64, 265 "ETM control msg headers sent to xport" }, 266 { "etm_wr_hdr_response", FMD_TYPE_UINT64, 267 "ETM response msg headers sent to xport" }, 268 { "etm_wr_body_fmaevent", FMD_TYPE_UINT64, 269 "ETM fmaevent msg bodies sent to xport" }, 270 { "etm_wr_body_control", FMD_TYPE_UINT64, 271 "ETM control msg bodies sent to xport" }, 272 { "etm_wr_body_response", FMD_TYPE_UINT64, 273 "ETM response msg bodies sent to xport" }, 274 275 /* ETM byte counters */ 276 277 { "etm_wr_fmd_bytes", FMD_TYPE_UINT64, 278 "bytes of FMA events sent to FMD" }, 279 { "etm_rd_fmd_bytes", FMD_TYPE_UINT64, 280 "bytes of FMA events rcvd from FMD" }, 281 { "etm_wr_xport_bytes", FMD_TYPE_UINT64, 282 "bytes of FMA events sent to xport" }, 283 { "etm_rd_xport_bytes", FMD_TYPE_UINT64, 284 "bytes of FMA events rcvd from xport" }, 285 286 { "etm_magic_drop_bytes", FMD_TYPE_UINT64, 287 "bytes dropped from xport pre magic num" }, 288 289 /* ETM [dropped] FMA event counters */ 290 291 { "etm_rd_fmd_fmaevent", FMD_TYPE_UINT64, 292 "FMA events rcvd from FMD" }, 293 { "etm_wr_fmd_fmaevent", FMD_TYPE_UINT64, 294 "FMA events sent to FMD" }, 295 296 { "etm_rd_drop_fmaevent", FMD_TYPE_UINT64, 297 "dropped FMA events from xport" }, 298 { "etm_wr_drop_fmaevent", FMD_TYPE_UINT64, 299 "dropped FMA events to xport" }, 300 301 { "etm_rd_dup_fmaevent", FMD_TYPE_UINT64, 302 "duplicate FMA events from xport" }, 303 { "etm_wr_dup_fmaevent", FMD_TYPE_UINT64, 304 "duplicate FMA events to xport" }, 305 306 /* ETM protocol failures */ 307 308 { "etm_magic_bad", FMD_TYPE_UINT64, 309 "ETM msgs w/ invalid magic num" }, 310 { "etm_ver_bad", FMD_TYPE_UINT64, 311 "ETM msgs w/ invalid protocol version" }, 312 { "etm_msgtype_bad", FMD_TYPE_UINT64, 313 "ETM msgs w/ invalid message type" }, 314 { "etm_subtype_bad", FMD_TYPE_UINT64, 315 "ETM msgs w/ invalid sub type" }, 316 { "etm_xid_bad", FMD_TYPE_UINT64, 317 "ETM msgs w/ unmatched xid" }, 318 { "etm_fmaeventlen_bad", FMD_TYPE_UINT64, 319 "ETM msgs w/ invalid FMA event length" }, 320 { "etm_respcode_bad", FMD_TYPE_UINT64, 321 "ETM msgs w/ invalid response code" }, 322 { "etm_timeout_bad", FMD_TYPE_UINT64, 323 "ETM msgs w/ invalid timeout value" }, 324 { "etm_evlens_bad", FMD_TYPE_UINT64, 325 "ETM msgs w/ too many event lengths" }, 326 327 /* IO operation failures */ 328 329 { "etm_xport_wr_fail", FMD_TYPE_UINT64, 330 "xport write failures" }, 331 { "etm_xport_rd_fail", FMD_TYPE_UINT64, 332 "xport read failures" }, 333 { "etm_xport_pk_fail", FMD_TYPE_UINT64, 334 "xport peek failures" }, 335 336 /* IO operation retries */ 337 338 { "etm_xport_wr_retry", FMD_TYPE_UINT64, 339 "xport write retries" }, 340 { "etm_xport_rd_retry", FMD_TYPE_UINT64, 341 "xport read retries" }, 342 { "etm_xport_pk_retry", FMD_TYPE_UINT64, 343 "xport peek retries" }, 344 345 /* system and library failures */ 346 347 { "etm_os_nvlist_pack_fail", FMD_TYPE_UINT64, 348 "nvlist_pack failures" }, 349 { "etm_os_nvlist_unpack_fail", FMD_TYPE_UINT64, 350 "nvlist_unpack failures" }, 351 { "etm_os_nvlist_size_fail", FMD_TYPE_UINT64, 352 "nvlist_size failures" }, 353 { "etm_os_pthread_create_fail", FMD_TYPE_UINT64, 354 "pthread_create failures" }, 355 356 /* transport API failures */ 357 358 { "etm_xport_get_ev_addrv_fail", FMD_TYPE_UINT64, 359 "xport get event addrv API failures" }, 360 { "etm_xport_open_fail", FMD_TYPE_UINT64, 361 "xport open API failures" }, 362 { "etm_xport_close_fail", FMD_TYPE_UINT64, 363 "xport close API failures" }, 364 { "etm_xport_accept_fail", FMD_TYPE_UINT64, 365 "xport accept API failures" }, 366 { "etm_xport_open_retry", FMD_TYPE_UINT64, 367 "xport open API retries" }, 368 369 /* FMD entry point bad arguments */ 370 371 { "etm_fmd_recv_badargs", FMD_TYPE_UINT64, 372 "bad arguments from fmd_recv entry point" }, 373 { "etm_fmd_init_badargs", FMD_TYPE_UINT64, 374 "bad arguments from fmd_init entry point" }, 375 { "etm_fmd_fini_badargs", FMD_TYPE_UINT64, 376 "bad arguments from fmd_fini entry point" } 377 }; 378 379 /* 380 * -------------------------- support functions ------------------------------ 381 */ 382 383 /* 384 * Design_Note: Each failure worth reporting to FMD should be done using 385 * a single call to fmd_hdl_error() as it logs an FMA event 386 * for each call. Also be aware that all the fmd_hdl_*() 387 * format strings currently use platform specific *printf() 388 * routines; so "%p" under Solaris does not prepend "0x" to 389 * the outputted hex digits, while Linux and VxWorks do. 390 */ 391 392 /* 393 * etm_show_time - display the current time of day (for debugging) using 394 * the given FMD module handle and annotation string 395 */ 396 397 static void 398 etm_show_time(fmd_hdl_t *hdl, char *note_str) 399 { 400 struct timeval tmv; /* timeval */ 401 402 (void) gettimeofday(&tmv, NULL); 403 fmd_hdl_debug(hdl, "info: %s: cur Unix Epoch time %d.%06d\n", 404 note_str, tmv.tv_sec, tmv.tv_usec); 405 406 } /* etm_show_time() */ 407 408 /* 409 * etm_hexdump - hexdump the given buffer (for debugging) using 410 * the given FMD module handle 411 */ 412 413 static void 414 etm_hexdump(fmd_hdl_t *hdl, void *buf, size_t byte_cnt) 415 { 416 uint8_t *bp; /* byte ptr */ 417 int i, j; /* index */ 418 char cb[80]; /* char buf */ 419 unsigned int n; /* a byte of data for sprintf() */ 420 421 bp = buf; 422 j = 0; 423 424 /* 425 * Design_Note: fmd_hdl_debug() auto adds a newline if missing; 426 * hence cb exists to accumulate a longer string. 427 */ 428 429 for (i = 1; i <= byte_cnt; i++) { 430 n = *bp++; 431 (void) sprintf(&cb[j], "%2.2x ", n); 432 j += 3; 433 /* add a newline every 16 bytes or at the buffer's end */ 434 if (((i % 16) == 0) || (i >= byte_cnt)) { 435 cb[j-1] = '\0'; 436 fmd_hdl_debug(hdl, "%s\n", cb); 437 j = 0; 438 } 439 } /* for each byte in the buffer */ 440 441 } /* etm_hexdump() */ 442 443 /* 444 * etm_sleep - sleep the caller for the given number of seconds, 445 * return 0 or -errno value 446 * 447 * Design_Note: To avoid interfering with FMD's signal mask (SIGALRM) 448 * do not use [Solaris] sleep(3C) and instead use 449 * pthread_cond_wait() or nanosleep(), both of which 450 * are POSIX spec-ed to leave signal masks alone. 451 * This is needed for Solaris and Linux (domain and SP). 452 */ 453 454 static int 455 etm_sleep(unsigned sleep_sec) 456 { 457 struct timespec tms; /* for nanosleep() */ 458 459 tms.tv_sec = sleep_sec; 460 tms.tv_nsec = 0; 461 462 if (nanosleep(&tms, NULL) < 0) { 463 /* errno assumed set by above call */ 464 return (-errno); 465 } 466 return (0); 467 468 } /* etm_sleep() */ 469 470 /* 471 * etm_conn_open - open a connection to the given transport address, 472 * return 0 and the opened connection handle 473 * or -errno value 474 * 475 * caveats: the err_substr is used in failure cases for calling 476 * fmd_hdl_error() 477 */ 478 479 static int 480 etm_conn_open(fmd_hdl_t *hdl, char *err_substr, 481 etm_xport_addr_t addr, etm_xport_conn_t *connp) 482 { 483 etm_xport_conn_t conn; /* connection to return */ 484 int nev; /* -errno value */ 485 486 if ((conn = etm_xport_open(hdl, addr)) == NULL) { 487 nev = (-errno); 488 fmd_hdl_error(hdl, "error: %s: errno %d\n", 489 err_substr, errno); 490 etm_stats.etm_xport_open_fail.fmds_value.ui64++; 491 return (nev); 492 } else { 493 *connp = conn; 494 return (0); 495 } 496 } /* etm_conn_open() */ 497 498 /* 499 * etm_conn_close - close the given connection, 500 * return 0 or -errno value 501 * 502 * caveats: the err_substr is used in failure cases for calling 503 * fmd_hdl_error() 504 */ 505 506 static int 507 etm_conn_close(fmd_hdl_t *hdl, char *err_substr, etm_xport_conn_t conn) 508 { 509 int nev; /* -errno value */ 510 511 if (etm_xport_close(hdl, conn) == NULL) { 512 nev = (-errno); 513 fmd_hdl_error(hdl, "warning: %s: errno %d\n", 514 err_substr, errno); 515 etm_stats.etm_xport_close_fail.fmds_value.ui64++; 516 return (nev); 517 } else { 518 return (0); 519 } 520 } /* etm_conn_close() */ 521 522 /* 523 * etm_io_op - perform an IO operation on the given connection 524 * with the given buffer, 525 * accommodating MTU size and retrying op if needed, 526 * return how many bytes actually done by the op 527 * or -errno value 528 * 529 * caveats: the err_substr is used in failure cases for calling 530 * fmd_hdl_error() 531 */ 532 533 static ssize_t 534 etm_io_op(fmd_hdl_t *hdl, char *err_substr, etm_xport_conn_t conn, 535 void *buf, size_t byte_cnt, int io_op) 536 { 537 ssize_t rv; /* ret val / byte count */ 538 ssize_t n; /* gen use */ 539 uint8_t *datap; /* ptr to data */ 540 size_t mtu_sz; /* MTU size in bytes */ 541 int (*io_func_ptr)(fmd_hdl_t *, etm_xport_conn_t, 542 void *, size_t); 543 size_t io_sz; /* byte count for io_func_ptr */ 544 int try_cnt; /* number of tries done */ 545 int sleep_sec; /* exp backoff sleep period in sec */ 546 int sleep_rv; /* ret val from sleeping */ 547 fmd_stat_t io_retry_stat; /* IO retry stat to update */ 548 fmd_stat_t io_fail_stat; /* IO failure stat to update */ 549 550 if ((conn == NULL) || (buf == NULL)) { 551 return (-EINVAL); 552 } 553 switch (io_op) { 554 case ETM_IO_OP_RD: 555 io_func_ptr = etm_xport_read; 556 io_retry_stat = etm_stats.etm_xport_rd_retry; 557 io_fail_stat = etm_stats.etm_xport_rd_fail; 558 break; 559 case ETM_IO_OP_WR: 560 io_func_ptr = etm_xport_write; 561 io_retry_stat = etm_stats.etm_xport_wr_retry; 562 io_fail_stat = etm_stats.etm_xport_wr_fail; 563 break; 564 default: 565 return (-EINVAL); 566 } 567 if (byte_cnt == 0) { 568 return (byte_cnt); /* nop */ 569 } 570 571 /* obtain [current] MTU size */ 572 573 if ((n = etm_xport_get_opt(hdl, conn, ETM_XPORT_OPT_MTU_SZ)) < 0) { 574 mtu_sz = ETM_XPORT_MTU_SZ_DEF; 575 } else { 576 mtu_sz = n; 577 } 578 579 /* loop until all IO done, try limit exceeded, or real failure */ 580 581 rv = 0; 582 datap = buf; 583 while (rv < byte_cnt) { 584 io_sz = MIN((byte_cnt - rv), mtu_sz); 585 try_cnt = 0; 586 sleep_sec = 0; 587 588 /* when give up, return -errno value even if partly done */ 589 590 while ((n = (*io_func_ptr)(hdl, conn, datap, io_sz)) == 591 (-EAGAIN)) { 592 try_cnt++; 593 if (try_cnt > ETM_TRY_MAX_CNT) { 594 rv = n; 595 goto func_ret; 596 } 597 if (etm_is_dying) { 598 rv = (-EINTR); 599 goto func_ret; 600 } 601 if ((sleep_rv = etm_sleep(sleep_sec)) < 0) { 602 rv = sleep_rv; 603 goto func_ret; 604 } 605 sleep_sec = ((sleep_sec == 0) ? 1 : 606 (sleep_sec * ETM_TRY_BACKOFF_RATE)); 607 sleep_sec = MIN(sleep_sec, ETM_TRY_BACKOFF_CAP); 608 io_retry_stat.fmds_value.ui64++; 609 if (etm_debug_lvl >= 1) { 610 fmd_hdl_debug(hdl, "info: retrying io op %d " 611 "due to EAGAIN\n", io_op); 612 } 613 } /* while trying the io operation */ 614 615 if (etm_is_dying) { 616 rv = (-EINTR); 617 goto func_ret; 618 } 619 if (n < 0) { 620 rv = n; 621 goto func_ret; 622 } 623 /* avoid spinning CPU when given 0 bytes but no error */ 624 if (n == 0) { 625 if ((sleep_rv = etm_sleep(ETM_SLEEP_QUIK)) < 0) { 626 rv = sleep_rv; 627 goto func_ret; 628 } 629 } 630 rv += n; 631 datap += n; 632 } /* while still have more data */ 633 634 func_ret: 635 636 if (rv < 0) { 637 io_fail_stat.fmds_value.ui64++; 638 fmd_hdl_error(hdl, "error: %s: errno %d\n", 639 err_substr, (int)(-rv)); 640 } 641 if (etm_debug_lvl >= 3) { 642 fmd_hdl_debug(hdl, "info: io op %d ret %d of %d\n", 643 io_op, (int)rv, (int)byte_cnt); 644 } 645 return (rv); 646 647 } /* etm_io_op() */ 648 649 /* 650 * etm_magic_read - read the magic number of an ETM message header 651 * from the given connection into the given buffer, 652 * return 0 or -errno value 653 * 654 * Design_Note: This routine is intended to help protect ETM from protocol 655 * framing errors as might be caused by an SP reset / crash in 656 * the middle of an ETM message send; the connection will be 657 * read from for as many bytes as needed until the magic number 658 * is found using a sliding buffer for comparisons. 659 */ 660 661 static int 662 etm_magic_read(fmd_hdl_t *hdl, etm_xport_conn_t conn, uint32_t *magic_ptr) 663 { 664 int rv; /* ret val */ 665 uint32_t magic_num; /* magic number */ 666 int byte_cnt; /* count of bytes read */ 667 uint8_t buf5[4+1]; /* sliding input buffer */ 668 int i, j; /* indices into buf5 */ 669 ssize_t n; /* gen use */ 670 uint8_t drop_buf[1024]; /* dropped bytes buffer */ 671 672 rv = 0; /* assume success */ 673 magic_num = 0; 674 byte_cnt = 0; 675 j = 0; 676 677 /* magic number bytes are sent in network (big endian) order */ 678 679 while (magic_num != ETM_PROTO_MAGIC_NUM) { 680 if ((n = etm_io_op(hdl, "bad io read on magic", 681 conn, &buf5[j], 1, ETM_IO_OP_RD)) < 0) { 682 rv = n; 683 goto func_ret; 684 } 685 byte_cnt++; 686 j = MIN((j + 1), sizeof (magic_num)); 687 if (byte_cnt < sizeof (magic_num)) { 688 continue; 689 } 690 691 if (byte_cnt > sizeof (magic_num)) { 692 etm_stats.etm_magic_drop_bytes.fmds_value.ui64++; 693 i = MIN(byte_cnt - j - 1, sizeof (drop_buf) - 1); 694 drop_buf[i] = buf5[0]; 695 for (i = 0; i < j; i++) { 696 buf5[i] = buf5[i+1]; 697 } /* for sliding the buffer contents */ 698 } 699 (void) memcpy(&magic_num, &buf5[0], sizeof (magic_num)); 700 magic_num = ntohl(magic_num); 701 } /* for reading bytes until find magic number */ 702 703 func_ret: 704 705 if (byte_cnt != sizeof (magic_num)) { 706 fmd_hdl_error(hdl, "warning: bad proto frame " 707 "implies corrupt/lost msg(s)\n"); 708 } 709 if ((byte_cnt > sizeof (magic_num)) && (etm_debug_lvl >= 2)) { 710 i = MIN(byte_cnt - sizeof (magic_num), sizeof (drop_buf)); 711 fmd_hdl_debug(hdl, "info: magic drop hexdump " 712 "first %d of %d bytes:\n", 713 i, byte_cnt - sizeof (magic_num)); 714 etm_hexdump(hdl, drop_buf, i); 715 } 716 717 if (rv == 0) { 718 *magic_ptr = magic_num; 719 } 720 return (rv); 721 722 } /* etm_magic_read() */ 723 724 /* 725 * etm_hdr_read - allocate, read, and validate a [variable sized] 726 * ETM message header from the given connection, 727 * return the allocated ETM message header 728 * (which is guaranteed to be large enough to reuse as a 729 * RESPONSE msg hdr) and its size 730 * or NULL and set errno on failure 731 */ 732 733 static void * 734 etm_hdr_read(fmd_hdl_t *hdl, etm_xport_conn_t conn, size_t *szp) 735 { 736 uint8_t *hdrp; /* ptr to header to return */ 737 size_t hdr_sz; /* sizeof *hdrp */ 738 etm_proto_v1_pp_t pp; /* protocol preamble */ 739 etm_proto_v1_ev_hdr_t *ev_hdrp; /* for FMA_EVENT msg */ 740 etm_proto_v1_ctl_hdr_t *ctl_hdrp; /* for CONTROL msg */ 741 etm_proto_v1_resp_hdr_t *resp_hdrp; /* for RESPONSE msg */ 742 uint32_t *lenp; /* ptr to FMA event length */ 743 ssize_t i, n; /* gen use */ 744 uint8_t misc_buf[ETM_MISC_BUF_SZ]; /* for var sized hdrs */ 745 int dummy_int; /* dummy var to appease lint */ 746 747 hdrp = NULL; hdr_sz = 0; 748 749 /* read the magic number which starts the protocol preamble */ 750 751 if ((n = etm_magic_read(hdl, conn, &pp.pp_magic_num)) < 0) { 752 errno = (-n); 753 etm_stats.etm_magic_bad.fmds_value.ui64++; 754 return (NULL); 755 } 756 757 /* read the rest of the protocol preamble all at once */ 758 759 if ((n = etm_io_op(hdl, "bad io read on preamble", 760 conn, &pp.pp_proto_ver, 761 sizeof (pp) - sizeof (pp.pp_magic_num), 762 ETM_IO_OP_RD)) < 0) { 763 errno = (-n); 764 return (NULL); 765 } 766 767 /* 768 * Design_Note: The magic number was already network decoded; but 769 * some other preamble fields also need to be decoded, 770 * specifically pp_xid and pp_timeout. The rest of the 771 * preamble fields are byte sized and hence need no 772 * decoding. 773 */ 774 775 pp.pp_xid = ntohl(pp.pp_xid); 776 pp.pp_timeout = ntohl(pp.pp_timeout); 777 778 /* sanity check the header as best we can */ 779 780 if ((pp.pp_proto_ver < ETM_PROTO_V1) || 781 (pp.pp_proto_ver > ETM_PROTO_V2)) { 782 fmd_hdl_error(hdl, "error: bad proto ver %d\n", 783 (int)pp.pp_proto_ver); 784 errno = EPROTO; 785 etm_stats.etm_ver_bad.fmds_value.ui64++; 786 return (NULL); 787 } 788 789 dummy_int = pp.pp_msg_type; 790 if ((dummy_int <= ETM_MSG_TYPE_TOO_LOW) || 791 (dummy_int >= ETM_MSG_TYPE_TOO_BIG)) { 792 fmd_hdl_error(hdl, "error: bad msg type %d", dummy_int); 793 errno = EBADMSG; 794 etm_stats.etm_msgtype_bad.fmds_value.ui64++; 795 return (NULL); 796 } 797 798 /* handle [var sized] hdrs for FMA_EVENT, CONTROL, RESPONSE msgs */ 799 800 if (pp.pp_msg_type == ETM_MSG_TYPE_FMA_EVENT) { 801 802 ev_hdrp = (void*)&misc_buf[0]; 803 hdr_sz = sizeof (*ev_hdrp); 804 (void) memcpy(&ev_hdrp->ev_pp, &pp, sizeof (pp)); 805 806 /* sanity check the header's timeout */ 807 808 if ((ev_hdrp->ev_pp.pp_proto_ver == ETM_PROTO_V1) && 809 (ev_hdrp->ev_pp.pp_timeout != ETM_PROTO_V1_TIMEOUT_NONE)) { 810 errno = ETIME; 811 etm_stats.etm_timeout_bad.fmds_value.ui64++; 812 return (NULL); 813 } 814 815 /* get all FMA event lengths from the header */ 816 817 lenp = (uint32_t *)&ev_hdrp->ev_lens[0]; lenp--; 818 i = -1; /* cnt of length entries preceding 0 */ 819 do { 820 i++; lenp++; 821 if ((sizeof (*ev_hdrp) + (i * sizeof (*lenp))) >= 822 ETM_MISC_BUF_SZ) { 823 errno = E2BIG; /* ridiculous size */ 824 etm_stats.etm_evlens_bad.fmds_value.ui64++; 825 return (NULL); 826 } 827 if ((n = etm_io_op(hdl, "bad io read on event len", 828 conn, lenp, sizeof (*lenp), 829 ETM_IO_OP_RD)) < 0) { 830 errno = (-n); 831 return (NULL); 832 } 833 *lenp = ntohl(*lenp); 834 835 } while (*lenp != 0); 836 i += 0; /* first len already counted by sizeof(ev_hdr) */ 837 hdr_sz += (i * sizeof (*lenp)); 838 839 etm_stats.etm_rd_hdr_fmaevent.fmds_value.ui64++; 840 841 } else if (pp.pp_msg_type == ETM_MSG_TYPE_CONTROL) { 842 843 ctl_hdrp = (void*)&misc_buf[0]; 844 hdr_sz = sizeof (*ctl_hdrp); 845 (void) memcpy(&ctl_hdrp->ctl_pp, &pp, sizeof (pp)); 846 847 /* sanity check the header's sub type (control selector) */ 848 849 if ((ctl_hdrp->ctl_pp.pp_sub_type <= ETM_CTL_SEL_TOO_LOW) || 850 (ctl_hdrp->ctl_pp.pp_sub_type >= ETM_CTL_SEL_TOO_BIG)) { 851 fmd_hdl_error(hdl, "error: bad ctl sub type %d\n", 852 (int)ctl_hdrp->ctl_pp.pp_sub_type); 853 errno = EBADMSG; 854 etm_stats.etm_subtype_bad.fmds_value.ui64++; 855 return (NULL); 856 } 857 858 /* get the control length */ 859 860 if ((n = etm_io_op(hdl, "bad io read on ctl len", 861 conn, &ctl_hdrp->ctl_len, 862 sizeof (ctl_hdrp->ctl_len), 863 ETM_IO_OP_RD)) < 0) { 864 errno = (-n); 865 return (NULL); 866 } 867 868 ctl_hdrp->ctl_len = ntohl(ctl_hdrp->ctl_len); 869 870 etm_stats.etm_rd_hdr_control.fmds_value.ui64++; 871 872 } else if (pp.pp_msg_type == ETM_MSG_TYPE_RESPONSE) { 873 874 resp_hdrp = (void*)&misc_buf[0]; 875 hdr_sz = sizeof (*resp_hdrp); 876 (void) memcpy(&resp_hdrp->resp_pp, &pp, sizeof (pp)); 877 878 /* sanity check the header's timeout */ 879 880 if (resp_hdrp->resp_pp.pp_timeout != 881 ETM_PROTO_V1_TIMEOUT_NONE) { 882 errno = ETIME; 883 etm_stats.etm_timeout_bad.fmds_value.ui64++; 884 return (NULL); 885 } 886 887 /* get the response code and length */ 888 889 if ((n = etm_io_op(hdl, "bad io read on resp code+len", 890 conn, &resp_hdrp->resp_code, 891 sizeof (resp_hdrp->resp_code) + 892 sizeof (resp_hdrp->resp_len), 893 ETM_IO_OP_RD)) < 0) { 894 errno = (-n); 895 return (NULL); 896 } 897 898 resp_hdrp->resp_code = ntohl(resp_hdrp->resp_code); 899 resp_hdrp->resp_len = ntohl(resp_hdrp->resp_len); 900 901 etm_stats.etm_rd_hdr_response.fmds_value.ui64++; 902 903 } /* whether we have FMA_EVENT, CONTROL, RESPONSE msg */ 904 905 /* 906 * choose a header size that allows hdr reuse for RESPONSE msgs, 907 * allocate and populate the message header, and 908 * return alloc size to caller for later free of hdrp 909 */ 910 911 hdr_sz = MAX(hdr_sz, sizeof (*resp_hdrp)); 912 hdrp = fmd_hdl_zalloc(hdl, hdr_sz, FMD_SLEEP); 913 (void) memcpy(hdrp, misc_buf, hdr_sz); 914 915 if (etm_debug_lvl >= 3) { 916 fmd_hdl_debug(hdl, "info: msg hdr hexdump %d bytes:\n", 917 hdr_sz); 918 etm_hexdump(hdl, hdrp, hdr_sz); 919 } 920 *szp = hdr_sz; 921 return (hdrp); 922 923 } /* etm_hdr_read() */ 924 925 /* 926 * etm_hdr_write - create and write a [variable sized] ETM message header 927 * to the given connection appropriate for the given FMA event 928 * and type of nvlist encoding, 929 * return the allocated ETM message header and its size 930 * or NULL and set errno on failure 931 */ 932 933 static void* 934 etm_hdr_write(fmd_hdl_t *hdl, etm_xport_conn_t conn, nvlist_t *evp, 935 int encoding, size_t *szp) 936 { 937 etm_proto_v1_ev_hdr_t *hdrp; /* for FMA_EVENT msg */ 938 size_t hdr_sz; /* sizeof *hdrp */ 939 uint32_t *lenp; /* ptr to FMA event length */ 940 size_t evsz; /* packed FMA event size */ 941 ssize_t n; /* gen use */ 942 943 /* allocate and populate the message header for 1 FMA event */ 944 945 hdr_sz = sizeof (*hdrp) + (1 * sizeof (hdrp->ev_lens[0])); 946 947 hdrp = fmd_hdl_zalloc(hdl, hdr_sz, FMD_SLEEP); 948 949 /* 950 * Design_Note: Although the ETM protocol supports it, we do not (yet) 951 * want responses/ACKs on FMA events that we send. All 952 * such messages are sent with ETM_PROTO_V1_TIMEOUT_NONE. 953 */ 954 955 hdrp->ev_pp.pp_magic_num = ETM_PROTO_MAGIC_NUM; 956 hdrp->ev_pp.pp_magic_num = htonl(hdrp->ev_pp.pp_magic_num); 957 hdrp->ev_pp.pp_proto_ver = ETM_PROTO_V1; 958 hdrp->ev_pp.pp_msg_type = ETM_MSG_TYPE_FMA_EVENT; 959 hdrp->ev_pp.pp_sub_type = 0; 960 hdrp->ev_pp.pp_rsvd_pad = 0; 961 hdrp->ev_pp.pp_xid = etm_xid_cur; 962 hdrp->ev_pp.pp_xid = htonl(hdrp->ev_pp.pp_xid); 963 etm_xid_cur += ETM_XID_INC; 964 hdrp->ev_pp.pp_timeout = ETM_PROTO_V1_TIMEOUT_NONE; 965 hdrp->ev_pp.pp_timeout = htonl(hdrp->ev_pp.pp_timeout); 966 967 lenp = &hdrp->ev_lens[0]; 968 969 if ((n = nvlist_size(evp, &evsz, encoding)) != 0) { 970 errno = n; 971 fmd_hdl_free(hdl, hdrp, hdr_sz); 972 etm_stats.etm_os_nvlist_size_fail.fmds_value.ui64++; 973 return (NULL); 974 } 975 976 /* indicate 1 FMA event, network encode its length, and 0-terminate */ 977 978 *lenp = evsz; *lenp = htonl(*lenp); lenp++; 979 *lenp = 0; *lenp = htonl(*lenp); lenp++; 980 981 /* 982 * write the network encoded header to the transport, and 983 * return alloc size to caller for later free 984 */ 985 986 if ((n = etm_io_op(hdl, "bad io write on event hdr", 987 conn, hdrp, hdr_sz, ETM_IO_OP_WR)) < 0) { 988 errno = (-n); 989 fmd_hdl_free(hdl, hdrp, hdr_sz); 990 return (NULL); 991 } 992 993 *szp = hdr_sz; 994 return (hdrp); 995 996 } /* etm_hdr_write() */ 997 998 /* 999 * etm_post_to_fmd - post the given FMA event to FMD 1000 * via a FMD transport API call, 1001 * return 0 or -errno value 1002 * 1003 * caveats: the FMA event (evp) is freed by FMD, 1004 * thus callers of this function should 1005 * immediately discard any ptr they have to the 1006 * nvlist without freeing or dereferencing it 1007 */ 1008 1009 static int 1010 etm_post_to_fmd(fmd_hdl_t *hdl, nvlist_t *evp) 1011 { 1012 ssize_t ev_sz; /* sizeof *evp */ 1013 1014 (void) nvlist_size(evp, (size_t *)&ev_sz, NV_ENCODE_XDR); 1015 1016 if (etm_debug_lvl >= 2) { 1017 etm_show_time(hdl, "ante ev post"); 1018 } 1019 fmd_xprt_post(hdl, etm_fmd_xprt, evp, 0); 1020 etm_stats.etm_wr_fmd_fmaevent.fmds_value.ui64++; 1021 etm_stats.etm_wr_fmd_bytes.fmds_value.ui64 += ev_sz; 1022 if (etm_debug_lvl >= 1) { 1023 fmd_hdl_debug(hdl, "info: event %p post ok to FMD\n", evp); 1024 } 1025 if (etm_debug_lvl >= 2) { 1026 etm_show_time(hdl, "post ev post"); 1027 } 1028 return (0); 1029 1030 } /* etm_post_to_fmd() */ 1031 1032 /* 1033 * etm_req_ver_negot - send an ETM control message to the other end requesting 1034 * that the ETM protocol version be negotiated/set 1035 */ 1036 1037 static void 1038 etm_req_ver_negot(fmd_hdl_t *hdl) 1039 { 1040 etm_xport_addr_t *addrv; /* default dst addr(s) */ 1041 etm_xport_conn_t conn; /* connection to other end */ 1042 etm_proto_v1_ctl_hdr_t *ctl_hdrp; /* for CONTROL msg */ 1043 size_t hdr_sz; /* sizeof header */ 1044 uint8_t *body_buf; /* msg body buffer */ 1045 uint32_t body_sz; /* sizeof *body_buf */ 1046 ssize_t i; /* gen use */ 1047 1048 /* populate an ETM control msg to send */ 1049 1050 hdr_sz = sizeof (*ctl_hdrp); 1051 body_sz = (2 + 1); /* version bytes plus null byte */ 1052 1053 ctl_hdrp = fmd_hdl_zalloc(hdl, hdr_sz + body_sz, FMD_SLEEP); 1054 1055 ctl_hdrp->ctl_pp.pp_magic_num = htonl(ETM_PROTO_MAGIC_NUM); 1056 ctl_hdrp->ctl_pp.pp_proto_ver = ETM_PROTO_V1; 1057 ctl_hdrp->ctl_pp.pp_msg_type = ETM_MSG_TYPE_CONTROL; 1058 ctl_hdrp->ctl_pp.pp_sub_type = ETM_CTL_SEL_VER_NEGOT_REQ; 1059 ctl_hdrp->ctl_pp.pp_rsvd_pad = 0; 1060 etm_xid_ver_negot = etm_xid_cur; 1061 etm_xid_cur += ETM_XID_INC; 1062 ctl_hdrp->ctl_pp.pp_xid = htonl(etm_xid_ver_negot); 1063 ctl_hdrp->ctl_pp.pp_timeout = htonl(ETM_PROTO_V1_TIMEOUT_FOREVER); 1064 ctl_hdrp->ctl_len = htonl(body_sz); 1065 1066 body_buf = (void*)&ctl_hdrp->ctl_len; 1067 body_buf += sizeof (ctl_hdrp->ctl_len); 1068 *body_buf++ = ETM_PROTO_V2; 1069 *body_buf++ = ETM_PROTO_V1; 1070 *body_buf++ = '\0'; 1071 1072 /* 1073 * open and close a connection to send the ETM control msg 1074 * to any/all of the default dst addrs 1075 */ 1076 1077 if ((addrv = etm_xport_get_ev_addrv(hdl, NULL)) == NULL) { 1078 fmd_hdl_error(hdl, 1079 "error: bad ctl dst addrs errno %d\n", errno); 1080 etm_stats.etm_xport_get_ev_addrv_fail.fmds_value.ui64++; 1081 goto func_ret; 1082 } 1083 1084 for (i = 0; addrv[i] != NULL; i++) { 1085 1086 etm_stats.etm_xport_open_fail.fmds_value.ui64++; 1087 if (etm_conn_open(hdl, "bad conn open during ver negot", 1088 addrv[i], &conn) < 0) { 1089 continue; 1090 } 1091 if (etm_io_op(hdl, "bad io write on ctl hdr+body", 1092 conn, ctl_hdrp, hdr_sz + body_sz, 1093 ETM_IO_OP_WR) >= 0) { 1094 etm_stats.etm_wr_hdr_control.fmds_value.ui64++; 1095 etm_stats.etm_wr_body_control.fmds_value.ui64++; 1096 } 1097 (void) etm_conn_close(hdl, "bad conn close during ver negot", 1098 conn); 1099 1100 } /* foreach dst addr */ 1101 1102 func_ret: 1103 1104 if (addrv != NULL) { 1105 etm_xport_free_addrv(hdl, addrv); 1106 } 1107 fmd_hdl_free(hdl, ctl_hdrp, hdr_sz + body_sz); 1108 1109 } /* etm_req_ver_negot() */ 1110 1111 /* 1112 * Design_Note: We rely on the fact that all message types have 1113 * a common protocol preamble; if this fact should 1114 * ever change it may break the code below. We also 1115 * rely on the fact that FMA_EVENT and CONTROL headers 1116 * returned will be sized large enough to reuse them 1117 * as RESPONSE headers if the remote endpt asked 1118 * for a response via the pp_timeout field. 1119 */ 1120 1121 /* 1122 * etm_maybe_send_response - check the given message header to see 1123 * whether a response has been requested, 1124 * if so then send an appropriate response 1125 * back on the given connection using the 1126 * given response code, 1127 * return 0 or -errno value 1128 */ 1129 1130 static ssize_t 1131 etm_maybe_send_response(fmd_hdl_t *hdl, etm_xport_conn_t conn, 1132 void *hdrp, int32_t resp_code) 1133 { 1134 ssize_t rv; /* ret val */ 1135 etm_proto_v1_pp_t *ppp; /* protocol preamble ptr */ 1136 etm_proto_v1_resp_hdr_t *resp_hdrp; /* for RESPONSE msg */ 1137 uint8_t resp_body[4]; /* response body if needed */ 1138 uint8_t *resp_msg; /* response hdr+body */ 1139 size_t hdr_sz; /* sizeof response hdr */ 1140 uint8_t orig_msg_type; /* orig hdr's message type */ 1141 uint32_t orig_timeout; /* orig hdr's timeout */ 1142 ssize_t n; /* gen use */ 1143 1144 rv = 0; /* default is success */ 1145 ppp = hdrp; 1146 orig_msg_type = ppp->pp_msg_type; 1147 orig_timeout = ppp->pp_timeout; 1148 1149 /* bail out now if no response is to be sent */ 1150 1151 if (orig_timeout == ETM_PROTO_V1_TIMEOUT_NONE) { 1152 return (0); 1153 } /* if a nop */ 1154 1155 if ((orig_msg_type != ETM_MSG_TYPE_FMA_EVENT) && 1156 (orig_msg_type != ETM_MSG_TYPE_CONTROL)) { 1157 return (-EINVAL); 1158 } /* if inappropriate hdr for a response msg */ 1159 1160 /* reuse the given header as a response header */ 1161 1162 if (etm_debug_lvl >= 2) { 1163 etm_show_time(hdl, "ante resp send"); 1164 } 1165 1166 resp_hdrp = hdrp; 1167 resp_hdrp->resp_code = resp_code; 1168 resp_hdrp->resp_len = 0; /* default is empty body */ 1169 1170 if ((orig_msg_type == ETM_MSG_TYPE_CONTROL) && 1171 (ppp->pp_sub_type == ETM_CTL_SEL_VER_NEGOT_REQ)) { 1172 resp_body[0] = ETM_PROTO_V2; 1173 resp_hdrp->resp_len = 1; 1174 } /* if should send our/negotiated proto ver in resp body */ 1175 1176 /* respond with the proto ver that was negotiated */ 1177 1178 resp_hdrp->resp_pp.pp_proto_ver = etm_resp_ver; 1179 resp_hdrp->resp_pp.pp_msg_type = ETM_MSG_TYPE_RESPONSE; 1180 resp_hdrp->resp_pp.pp_timeout = ETM_PROTO_V1_TIMEOUT_NONE; 1181 1182 /* 1183 * send the whole response msg in one write, header and body; 1184 * avoid the alloc-and-copy if we can reuse the hdr as the msg, 1185 * ie, if the body is empty 1186 * 1187 * update stats and note the xid associated with last ACKed FMA_EVENT 1188 * known to be successfully posted to FMD to aid duplicate filtering 1189 */ 1190 1191 hdr_sz = sizeof (etm_proto_v1_resp_hdr_t); 1192 1193 resp_msg = hdrp; 1194 if (resp_hdrp->resp_len > 0) { 1195 resp_msg = fmd_hdl_zalloc(hdl, hdr_sz + resp_hdrp->resp_len, 1196 FMD_SLEEP); 1197 (void) memcpy(resp_msg, resp_hdrp, hdr_sz); 1198 (void) memcpy(resp_msg + hdr_sz, resp_body, 1199 resp_hdrp->resp_len); 1200 } 1201 1202 if ((n = etm_io_op(hdl, "bad io write on resp msg", conn, 1203 resp_msg, hdr_sz + resp_hdrp->resp_len, ETM_IO_OP_WR)) < 0) { 1204 rv = n; 1205 goto func_ret; 1206 } 1207 1208 etm_stats.etm_wr_hdr_response.fmds_value.ui64++; 1209 etm_stats.etm_wr_body_response.fmds_value.ui64++; 1210 1211 if ((orig_msg_type == ETM_MSG_TYPE_FMA_EVENT) && 1212 (resp_code >= 0)) { 1213 etm_xid_posted_ev = resp_hdrp->resp_pp.pp_xid; 1214 } 1215 1216 fmd_hdl_debug(hdl, "info: sent V%u RESPONSE msg to xport " 1217 "xid 0x%x code %d len %u\n", 1218 (unsigned int)resp_hdrp->resp_pp.pp_proto_ver, 1219 resp_hdrp->resp_pp.pp_xid, resp_hdrp->resp_code, 1220 resp_hdrp->resp_len); 1221 func_ret: 1222 1223 if (resp_hdrp->resp_len > 0) { 1224 fmd_hdl_free(hdl, resp_msg, hdr_sz + resp_hdrp->resp_len); 1225 } 1226 if (etm_debug_lvl >= 2) { 1227 etm_show_time(hdl, "post resp send"); 1228 } 1229 return (rv); 1230 1231 } /* etm_maybe_send_response() */ 1232 1233 /* 1234 * etm_handle_new_conn - receive an ETM message sent from the other end via 1235 * the given open connection, pull out any FMA events 1236 * and post them to the local FMD (or handle any ETM 1237 * control or response msg); when done, close the 1238 * connection 1239 */ 1240 1241 static void 1242 etm_handle_new_conn(fmd_hdl_t *hdl, etm_xport_conn_t conn) 1243 { 1244 etm_proto_v1_ev_hdr_t *ev_hdrp; /* for FMA_EVENT msg */ 1245 etm_proto_v1_ctl_hdr_t *ctl_hdrp; /* for CONTROL msg */ 1246 etm_proto_v1_resp_hdr_t *resp_hdrp; /* for RESPONSE msg */ 1247 int32_t resp_code; /* response code */ 1248 size_t hdr_sz; /* sizeof header */ 1249 uint8_t *body_buf; /* msg body buffer */ 1250 uint32_t body_sz; /* sizeof body_buf */ 1251 uint32_t ev_cnt; /* count of FMA events */ 1252 uint8_t *bp; /* byte ptr within body_buf */ 1253 nvlist_t *evp; /* ptr to unpacked FMA event */ 1254 char *class; /* FMA event class */ 1255 ssize_t i, n; /* gen use */ 1256 1257 if (etm_debug_lvl >= 2) { 1258 etm_show_time(hdl, "ante conn handle"); 1259 } 1260 fmd_hdl_debug(hdl, "info: handling new conn %p\n", conn); 1261 1262 ev_hdrp = NULL; 1263 ctl_hdrp = NULL; 1264 resp_hdrp = NULL; 1265 body_buf = NULL; 1266 class = NULL; 1267 evp = NULL; 1268 resp_code = 0; /* default is success */ 1269 1270 /* read a network decoded message header from the connection */ 1271 1272 if ((ev_hdrp = etm_hdr_read(hdl, conn, &hdr_sz)) == NULL) { 1273 /* errno assumed set by above call */ 1274 fmd_hdl_error(hdl, "error: FMA event dropped: " 1275 "bad hdr read errno %d\n", errno); 1276 etm_stats.etm_rd_drop_fmaevent.fmds_value.ui64++; 1277 goto func_ret; 1278 } 1279 1280 /* 1281 * handle the message based on its preamble pp_msg_type 1282 * which is known to be valid from etm_hdr_read() checks 1283 */ 1284 1285 if (ev_hdrp->ev_pp.pp_msg_type == ETM_MSG_TYPE_FMA_EVENT) { 1286 1287 fmd_hdl_debug(hdl, "info: rcvd FMA_EVENT msg from xport\n"); 1288 1289 /* allocate buf large enough for whole body / all FMA events */ 1290 1291 body_sz = 0; 1292 for (i = 0; ev_hdrp->ev_lens[i] != 0; i++) { 1293 body_sz += ev_hdrp->ev_lens[i]; 1294 } /* for summing sizes of all FMA events */ 1295 ev_cnt = i; 1296 1297 if (etm_debug_lvl >= 1) { 1298 fmd_hdl_debug(hdl, "info: event lengths %u sum %u\n", 1299 ev_cnt, body_sz); 1300 } 1301 1302 body_buf = fmd_hdl_zalloc(hdl, body_sz, FMD_SLEEP); 1303 1304 /* read all the FMA events at once */ 1305 1306 if ((n = etm_io_op(hdl, "FMA event dropped: " 1307 "bad io read on event bodies", 1308 conn, body_buf, body_sz, 1309 ETM_IO_OP_RD)) < 0) { 1310 etm_stats.etm_rd_drop_fmaevent.fmds_value.ui64++; 1311 goto func_ret; 1312 } 1313 1314 etm_stats.etm_rd_xport_bytes.fmds_value.ui64 += body_sz; 1315 etm_stats.etm_rd_body_fmaevent.fmds_value.ui64 += ev_cnt; 1316 1317 /* 1318 * check for dup msg/xid against last good response sent, 1319 * if a dup then resend response but skip repost to FMD 1320 */ 1321 1322 if (ev_hdrp->ev_pp.pp_xid == etm_xid_posted_ev) { 1323 (void) etm_maybe_send_response(hdl, conn, ev_hdrp, 0); 1324 fmd_hdl_debug(hdl, "info: skipping dup FMA event post " 1325 "xid 0x%x\n", etm_xid_posted_ev); 1326 etm_stats.etm_rd_dup_fmaevent.fmds_value.ui64++; 1327 goto func_ret; 1328 } 1329 1330 /* unpack each FMA event and post it to FMD */ 1331 1332 bp = body_buf; 1333 for (i = 0; ev_hdrp->ev_lens[i] != 0; i++) { 1334 if ((n = nvlist_unpack((char *)bp, 1335 ev_hdrp->ev_lens[i], &evp, 0)) != 0) { 1336 resp_code = (-n); 1337 (void) etm_maybe_send_response(hdl, conn, 1338 ev_hdrp, resp_code); 1339 fmd_hdl_error(hdl, "error: FMA event dropped: " 1340 "bad event body unpack " 1341 "errno %d\n", n); 1342 if (etm_debug_lvl >= 2) { 1343 fmd_hdl_debug(hdl, "info: FMA event " 1344 "hexdump %d bytes:\n", 1345 ev_hdrp->ev_lens[i]); 1346 etm_hexdump(hdl, bp, 1347 ev_hdrp->ev_lens[i]); 1348 } 1349 etm_stats.etm_os_nvlist_unpack_fail.fmds_value. 1350 ui64++; 1351 etm_stats.etm_rd_drop_fmaevent.fmds_value. 1352 ui64++; 1353 bp += ev_hdrp->ev_lens[i]; 1354 continue; 1355 } 1356 if (etm_debug_lvl >= 1) { 1357 (void) nvlist_lookup_string(evp, FM_CLASS, 1358 &class); 1359 if (class == NULL) { 1360 class = "NULL"; 1361 } 1362 fmd_hdl_debug(hdl, "info: FMA event %p " 1363 "class %s\n", evp, class); 1364 } 1365 resp_code = etm_post_to_fmd(hdl, evp); 1366 evp = NULL; 1367 (void) etm_maybe_send_response(hdl, conn, 1368 ev_hdrp, resp_code); 1369 bp += ev_hdrp->ev_lens[i]; 1370 } /* foreach FMA event in the body buffer */ 1371 1372 } else if (ev_hdrp->ev_pp.pp_msg_type == ETM_MSG_TYPE_CONTROL) { 1373 1374 ctl_hdrp = (void*)ev_hdrp; 1375 1376 fmd_hdl_debug(hdl, "info: rcvd CONTROL msg from xport\n"); 1377 if (etm_debug_lvl >= 1) { 1378 fmd_hdl_debug(hdl, "info: ctl sel %d xid 0x%x\n", 1379 (int)ctl_hdrp->ctl_pp.pp_sub_type, 1380 ctl_hdrp->ctl_pp.pp_xid); 1381 } 1382 1383 /* 1384 * if we have a VER_NEGOT_REQ read the body and validate 1385 * the protocol version set contained therein, 1386 * otherwise we have a PING_REQ (which has no body) 1387 * and we [also] fall thru to the code which sends a 1388 * response msg if the pp_timeout field requested one 1389 */ 1390 1391 if (ctl_hdrp->ctl_pp.pp_sub_type == ETM_CTL_SEL_VER_NEGOT_REQ) { 1392 1393 body_sz = ctl_hdrp->ctl_len; 1394 body_buf = fmd_hdl_zalloc(hdl, body_sz, FMD_SLEEP); 1395 1396 if ((n = etm_io_op(hdl, "bad io read on ctl body", 1397 conn, body_buf, body_sz, 1398 ETM_IO_OP_RD)) < 0) { 1399 goto func_ret; 1400 } 1401 1402 /* complain if version set completely incompatible */ 1403 1404 for (i = 0; i < body_sz; i++) { 1405 if ((body_buf[i] == ETM_PROTO_V1) || 1406 (body_buf[i] == ETM_PROTO_V2)) { 1407 break; 1408 } 1409 } 1410 if (i >= body_sz) { 1411 etm_stats.etm_ver_bad.fmds_value.ui64++; 1412 resp_code = (-EPROTO); 1413 } 1414 1415 } /* if got version set request */ 1416 1417 etm_stats.etm_rd_body_control.fmds_value.ui64++; 1418 1419 (void) etm_maybe_send_response(hdl, conn, ctl_hdrp, resp_code); 1420 1421 } else if (ev_hdrp->ev_pp.pp_msg_type == ETM_MSG_TYPE_RESPONSE) { 1422 1423 resp_hdrp = (void*)ev_hdrp; 1424 1425 fmd_hdl_debug(hdl, "info: rcvd RESPONSE msg from xport\n"); 1426 if (etm_debug_lvl >= 1) { 1427 fmd_hdl_debug(hdl, "info: resp xid 0x%x\n", 1428 (int)resp_hdrp->resp_pp.pp_xid); 1429 } 1430 1431 body_sz = resp_hdrp->resp_len; 1432 body_buf = fmd_hdl_zalloc(hdl, body_sz, FMD_SLEEP); 1433 1434 if ((n = etm_io_op(hdl, "bad io read on resp len", 1435 conn, body_buf, body_sz, ETM_IO_OP_RD)) < 0) { 1436 goto func_ret; 1437 } 1438 1439 etm_stats.etm_rd_body_response.fmds_value.ui64++; 1440 1441 /* 1442 * look up the xid to interpret the response body 1443 * 1444 * ping is a nop; for ver negot confirm that a supported 1445 * protocol version was negotiated and remember which one 1446 */ 1447 1448 if ((resp_hdrp->resp_pp.pp_xid != etm_xid_ping) && 1449 (resp_hdrp->resp_pp.pp_xid != etm_xid_ver_negot)) { 1450 etm_stats.etm_xid_bad.fmds_value.ui64++; 1451 goto func_ret; 1452 } 1453 1454 if (resp_hdrp->resp_pp.pp_xid == etm_xid_ver_negot) { 1455 if ((body_buf[0] < ETM_PROTO_V1) || 1456 (body_buf[0] > ETM_PROTO_V2)) { 1457 etm_stats.etm_ver_bad.fmds_value.ui64++; 1458 goto func_ret; 1459 } 1460 etm_resp_ver = body_buf[0]; 1461 } /* if have resp to last req to negotiate proto ver */ 1462 1463 } /* whether we have a FMA_EVENT, CONTROL, or RESPONSE msg */ 1464 1465 func_ret: 1466 1467 (void) etm_conn_close(hdl, "bad conn close after msg recv", conn); 1468 1469 if (etm_debug_lvl >= 2) { 1470 etm_show_time(hdl, "post conn handle"); 1471 } 1472 if (ev_hdrp != NULL) { 1473 fmd_hdl_free(hdl, ev_hdrp, hdr_sz); 1474 } 1475 if (body_buf != NULL) { 1476 fmd_hdl_free(hdl, body_buf, body_sz); 1477 } 1478 } /* etm_handle_new_conn() */ 1479 1480 /* 1481 * etm_server - loop forever accepting new connections 1482 * using the given FMD handle, 1483 * handling any ETM msgs sent from the other side 1484 * via each such connection 1485 */ 1486 1487 static void 1488 etm_server(void *arg) 1489 { 1490 etm_xport_conn_t conn; /* connection handle */ 1491 ssize_t n; /* gen use */ 1492 fmd_hdl_t *hdl; /* FMD handle */ 1493 1494 hdl = arg; 1495 1496 fmd_hdl_debug(hdl, "info: connection server starting\n"); 1497 1498 while (!etm_is_dying) { 1499 1500 if ((conn = etm_xport_accept(hdl, NULL)) == NULL) { 1501 /* errno assumed set by above call */ 1502 n = errno; 1503 if (etm_is_dying) { 1504 break; 1505 } 1506 fmd_hdl_debug(hdl, 1507 "error: bad conn accept errno %d\n", n); 1508 etm_stats.etm_xport_accept_fail.fmds_value.ui64++; 1509 /* avoid spinning CPU */ 1510 (void) etm_sleep(ETM_SLEEP_SLOW); 1511 continue; 1512 } 1513 1514 /* 1515 * Design_Note: etm_handle_new_conn() will close the 1516 * accepted connection when done. In early designs 1517 * etm_handle_new_conn() was spawned as a 1518 * separate thread via pthread_create(); 1519 * however fmd_thr_create() constrains thread 1520 * creation to prevent spawned threads from 1521 * spawning others (ie, no grandchildren). 1522 * Hence etm_handle_new_conn() is now called 1523 * as a simple function [w/ multiple args]. 1524 */ 1525 1526 etm_handle_new_conn(hdl, conn); 1527 1528 } /* while accepting new connections until ETM dies */ 1529 1530 /* ETM is dying (probably due to "fmadm unload etm") */ 1531 1532 if (etm_debug_lvl >= 1) { 1533 fmd_hdl_debug(hdl, "info: connection server is dying\n"); 1534 } 1535 } /* etm_server() */ 1536 1537 static void * 1538 etm_init_alloc(size_t size) 1539 { 1540 return (fmd_hdl_alloc(init_hdl, size, FMD_SLEEP)); 1541 } 1542 1543 static void 1544 etm_init_free(void *addr, size_t size) 1545 { 1546 fmd_hdl_free(init_hdl, addr, size); 1547 } 1548 1549 /* 1550 * -------------------------- FMD entry points ------------------------------- 1551 */ 1552 1553 /* 1554 * _fmd_init - initialize the transport for use by ETM and start the 1555 * server daemon to accept new connections to us 1556 * 1557 * FMD will read our *.conf and subscribe us to FMA events 1558 */ 1559 1560 void 1561 _fmd_init(fmd_hdl_t *hdl) 1562 { 1563 struct timeval tmv; /* timeval */ 1564 ssize_t n; /* gen use */ 1565 ldom_hdl_t *lhp; 1566 1567 if (fmd_hdl_register(hdl, FMD_API_VERSION, &fmd_info) != 0) { 1568 return; /* invalid data in configuration file */ 1569 } 1570 1571 fmd_hdl_debug(hdl, "info: module initializing\n"); 1572 1573 init_hdl = hdl; 1574 lhp = ldom_init(etm_init_alloc, etm_init_free); 1575 1576 /* 1577 * Do not load this module if it is runing on a guest ldom. 1578 */ 1579 if (ldom_major_version(lhp) == 1 && ldom_on_service(lhp) == 0) { 1580 fmd_hdl_debug(hdl, "info: module unregistering\n"); 1581 ldom_fini(lhp); 1582 fmd_hdl_unregister(hdl); 1583 return; 1584 } else { 1585 ldom_fini(lhp); 1586 } 1587 1588 /* setup statistics and properties from FMD */ 1589 1590 (void) fmd_stat_create(hdl, FMD_STAT_NOALLOC, 1591 sizeof (etm_stats) / sizeof (fmd_stat_t), 1592 (fmd_stat_t *)&etm_stats); 1593 1594 etm_debug_lvl = fmd_prop_get_int32(hdl, ETM_PROP_NM_DEBUG_LVL); 1595 etm_debug_max_ev_cnt = fmd_prop_get_int32(hdl, 1596 ETM_PROP_NM_DEBUG_MAX_EV_CNT); 1597 fmd_hdl_debug(hdl, "info: etm_debug_lvl %d " 1598 "etm_debug_max_ev_cnt %d\n", 1599 etm_debug_lvl, etm_debug_max_ev_cnt); 1600 1601 /* obtain an FMD transport handle so we can post FMA events later */ 1602 1603 etm_fmd_xprt = fmd_xprt_open(hdl, FMD_XPRT_RDONLY, NULL, NULL); 1604 1605 /* encourage protocol transaction id to be unique per module load */ 1606 1607 (void) gettimeofday(&tmv, NULL); 1608 etm_xid_cur = (uint32_t)((tmv.tv_sec << 10) | 1609 ((unsigned long)tmv.tv_usec >> 10)); 1610 1611 /* 1612 * init the transport, 1613 * start the connection acceptance server, and 1614 * request protocol version be negotiated 1615 */ 1616 1617 if ((n = etm_xport_init(hdl)) != 0) { 1618 fmd_hdl_error(hdl, "error: bad xport init errno %d\n", (-n)); 1619 fmd_hdl_unregister(hdl); 1620 return; 1621 } 1622 1623 etm_svr_tid = fmd_thr_create(hdl, etm_server, hdl); 1624 1625 /* 1626 * Wait a second for the receiving is ready before start handshaking 1627 * with the SP. 1628 */ 1629 (void) etm_sleep(ETM_SLEEP_QUIK); 1630 1631 etm_req_ver_negot(hdl); 1632 1633 fmd_hdl_debug(hdl, "info: module initialized ok\n"); 1634 1635 } /* _fmd_init() */ 1636 1637 /* 1638 * etm_recv - receive an FMA event from FMD and transport it 1639 * to the remote endpoint 1640 */ 1641 1642 /*ARGSUSED*/ 1643 void 1644 etm_recv(fmd_hdl_t *hdl, fmd_event_t *ep, nvlist_t *evp, const char *class) 1645 { 1646 etm_xport_addr_t *addrv; /* vector of transport addresses */ 1647 etm_xport_conn_t conn; /* connection handle */ 1648 etm_proto_v1_ev_hdr_t *hdrp; /* for FMA_EVENT msg */ 1649 ssize_t i, n; /* gen use */ 1650 size_t sz; /* header size */ 1651 size_t buflen; /* size of packed FMA event */ 1652 uint8_t *buf; /* tmp buffer for packed FMA event */ 1653 1654 buflen = 0; 1655 (void) nvlist_size(evp, &buflen, NV_ENCODE_XDR); 1656 etm_stats.etm_rd_fmd_bytes.fmds_value.ui64 += buflen; 1657 etm_stats.etm_rd_fmd_fmaevent.fmds_value.ui64++; 1658 1659 fmd_hdl_debug(hdl, "info: rcvd event %p from FMD\n", evp); 1660 fmd_hdl_debug(hdl, "info: cnt %llu class %s\n", 1661 etm_stats.etm_rd_fmd_fmaevent.fmds_value.ui64, class); 1662 1663 /* 1664 * if the debug limit has been set, avoid excessive traffic, 1665 * for example, an infinite cycle using loopback nodes 1666 */ 1667 1668 if ((etm_debug_max_ev_cnt >= 0) && 1669 (etm_stats.etm_rd_fmd_fmaevent.fmds_value.ui64 > 1670 etm_debug_max_ev_cnt)) { 1671 fmd_hdl_debug(hdl, "warning: FMA event dropped: " 1672 "event %p cnt %llu > debug max %d\n", evp, 1673 etm_stats.etm_rd_fmd_fmaevent.fmds_value.ui64, 1674 etm_debug_max_ev_cnt); 1675 etm_stats.etm_wr_drop_fmaevent.fmds_value.ui64++; 1676 return; 1677 } 1678 1679 /* allocate a buffer for the FMA event and nvlist pack it */ 1680 1681 buf = fmd_hdl_zalloc(hdl, buflen, FMD_SLEEP); 1682 1683 if ((n = nvlist_pack(evp, (char **)&buf, &buflen, 1684 NV_ENCODE_XDR, 0)) != 0) { 1685 fmd_hdl_error(hdl, "error: FMA event dropped: " 1686 "event pack errno %d\n", n); 1687 etm_stats.etm_os_nvlist_pack_fail.fmds_value.ui64++; 1688 etm_stats.etm_wr_drop_fmaevent.fmds_value.ui64++; 1689 fmd_hdl_free(hdl, buf, buflen); 1690 return; 1691 } 1692 1693 /* get vector of dst addrs and send the FMA event to each one */ 1694 1695 if ((addrv = etm_xport_get_ev_addrv(hdl, evp)) == NULL) { 1696 fmd_hdl_error(hdl, "error: FMA event dropped: " 1697 "bad event dst addrs errno %d\n", errno); 1698 etm_stats.etm_xport_get_ev_addrv_fail.fmds_value.ui64++; 1699 etm_stats.etm_wr_drop_fmaevent.fmds_value.ui64++; 1700 fmd_hdl_free(hdl, buf, buflen); 1701 return; 1702 } 1703 1704 for (i = 0; addrv[i] != NULL; i++) { 1705 1706 /* open a new connection to this dst addr */ 1707 1708 if ((n = etm_conn_open(hdl, "FMA event dropped: " 1709 "bad conn open on new ev", 1710 addrv[i], &conn)) < 0) { 1711 etm_stats.etm_wr_drop_fmaevent.fmds_value.ui64++; 1712 continue; 1713 } 1714 1715 /* write the ETM message header */ 1716 1717 if ((hdrp = etm_hdr_write(hdl, conn, evp, NV_ENCODE_XDR, 1718 &sz)) == NULL) { 1719 fmd_hdl_error(hdl, "error: FMA event dropped: " 1720 "bad hdr write errno %d\n", errno); 1721 (void) etm_conn_close(hdl, 1722 "bad conn close per bad hdr wr", conn); 1723 etm_stats.etm_wr_drop_fmaevent.fmds_value.ui64++; 1724 continue; 1725 } 1726 1727 fmd_hdl_free(hdl, hdrp, sz); /* header not needed */ 1728 etm_stats.etm_wr_hdr_fmaevent.fmds_value.ui64++; 1729 fmd_hdl_debug(hdl, "info: hdr xport write ok for event %p\n", 1730 evp); 1731 1732 /* write the ETM message body, ie, the packed nvlist */ 1733 1734 if ((n = etm_io_op(hdl, "FMA event dropped: " 1735 "bad io write on event", conn, 1736 buf, buflen, ETM_IO_OP_WR)) < 0) { 1737 (void) etm_conn_close(hdl, 1738 "bad conn close per bad body wr", conn); 1739 etm_stats.etm_wr_drop_fmaevent.fmds_value.ui64++; 1740 continue; 1741 } 1742 1743 etm_stats.etm_wr_body_fmaevent.fmds_value.ui64++; 1744 etm_stats.etm_wr_xport_bytes.fmds_value.ui64 += buflen; 1745 fmd_hdl_debug(hdl, "info: body xport write ok for event %p\n", 1746 evp); 1747 1748 /* close the connection */ 1749 1750 (void) etm_conn_close(hdl, "bad conn close after event send", 1751 conn); 1752 } /* foreach dst addr in the vector */ 1753 1754 etm_xport_free_addrv(hdl, addrv); 1755 fmd_hdl_free(hdl, buf, buflen); 1756 1757 } /* etm_recv() */ 1758 1759 /* 1760 * _fmd_fini - stop the server daemon and teardown the transport 1761 */ 1762 1763 void 1764 _fmd_fini(fmd_hdl_t *hdl) 1765 { 1766 ssize_t n; /* gen use */ 1767 1768 fmd_hdl_debug(hdl, "info: module finializing\n"); 1769 1770 /* kill the connection server ; wait for it to die */ 1771 1772 etm_is_dying = 1; 1773 1774 if (etm_svr_tid != NULL) { 1775 fmd_thr_signal(hdl, etm_svr_tid); 1776 fmd_thr_destroy(hdl, etm_svr_tid); 1777 etm_svr_tid = NULL; 1778 } /* if server thread was successfully created */ 1779 1780 /* teardown the transport */ 1781 1782 if ((n = etm_xport_fini(hdl)) != 0) { 1783 fmd_hdl_error(hdl, "warning: xport fini errno %d\n", (-n)); 1784 } 1785 if (etm_fmd_xprt != NULL) { 1786 fmd_xprt_close(hdl, etm_fmd_xprt); 1787 } 1788 1789 fmd_hdl_debug(hdl, "info: module finalized ok\n"); 1790 1791 } /* _fmd_fini() */ 1792