1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved. 23 */ 24 25 /* 26 * Fault Management Architecture (FMA) Resource and Protocol Support 27 * 28 * The routines contained herein provide services to support kernel subsystems 29 * in publishing fault management telemetry (see PSARC 2002/412 and 2003/089). 30 * 31 * Name-Value Pair Lists 32 * 33 * The embodiment of an FMA protocol element (event, fmri or authority) is a 34 * name-value pair list (nvlist_t). FMA-specific nvlist construtor and 35 * destructor functions, fm_nvlist_create() and fm_nvlist_destroy(), are used 36 * to create an nvpair list using custom allocators. Callers may choose to 37 * allocate either from the kernel memory allocator, or from a preallocated 38 * buffer, useful in constrained contexts like high-level interrupt routines. 39 * 40 * Protocol Event and FMRI Construction 41 * 42 * Convenience routines are provided to construct nvlist events according to 43 * the FMA Event Protocol and Naming Schema specification for ereports and 44 * FMRIs for the dev, cpu, hc, mem, legacy hc and de schemes. 45 * 46 * ENA Manipulation 47 * 48 * Routines to generate ENA formats 0, 1 and 2 are available as well as 49 * routines to increment formats 1 and 2. Individual fields within the 50 * ENA are extractable via fm_ena_time_get(), fm_ena_id_get(), 51 * fm_ena_format_get() and fm_ena_gen_get(). 52 */ 53 54 #include <sys/types.h> 55 #include <sys/time.h> 56 #include <sys/sysevent.h> 57 #include <sys/sysevent_impl.h> 58 #include <sys/nvpair.h> 59 #include <sys/cmn_err.h> 60 #include <sys/cpuvar.h> 61 #include <sys/sysmacros.h> 62 #include <sys/systm.h> 63 #include <sys/ddifm.h> 64 #include <sys/ddifm_impl.h> 65 #include <sys/spl.h> 66 #include <sys/dumphdr.h> 67 #include <sys/compress.h> 68 #include <sys/cpuvar.h> 69 #include <sys/console.h> 70 #include <sys/panic.h> 71 #include <sys/kobj.h> 72 #include <sys/sunddi.h> 73 #include <sys/systeminfo.h> 74 #include <sys/sysevent/eventdefs.h> 75 #include <sys/fm/util.h> 76 #include <sys/fm/protocol.h> 77 78 /* 79 * URL and SUNW-MSG-ID value to display for fm_panic(), defined below. These 80 * values must be kept in sync with the FMA source code in usr/src/cmd/fm. 81 */ 82 static const char *fm_url = "http://illumos.org/msg"; 83 static const char *fm_msgid = "SUNOS-8000-0G"; 84 static char *volatile fm_panicstr = NULL; 85 86 errorq_t *ereport_errorq; 87 void *ereport_dumpbuf; 88 size_t ereport_dumplen; 89 90 static uint_t ereport_chanlen = ERPT_EVCH_MAX; 91 static evchan_t *ereport_chan = NULL; 92 static ulong_t ereport_qlen = 0; 93 static size_t ereport_size = 0; 94 static int ereport_cols = 80; 95 96 extern void fastreboot_disable_highpil(void); 97 98 /* 99 * Common fault management kstats to record ereport generation 100 * failures 101 */ 102 103 struct erpt_kstat { 104 kstat_named_t erpt_dropped; /* num erpts dropped on post */ 105 kstat_named_t erpt_set_failed; /* num erpt set failures */ 106 kstat_named_t fmri_set_failed; /* num fmri set failures */ 107 kstat_named_t payload_set_failed; /* num payload set failures */ 108 }; 109 110 static struct erpt_kstat erpt_kstat_data = { 111 { "erpt-dropped", KSTAT_DATA_UINT64 }, 112 { "erpt-set-failed", KSTAT_DATA_UINT64 }, 113 { "fmri-set-failed", KSTAT_DATA_UINT64 }, 114 { "payload-set-failed", KSTAT_DATA_UINT64 } 115 }; 116 117 /*ARGSUSED*/ 118 static void 119 fm_drain(void *private, void *data, errorq_elem_t *eep) 120 { 121 nvlist_t *nvl = errorq_elem_nvl(ereport_errorq, eep); 122 123 if (!panicstr) 124 (void) fm_ereport_post(nvl, EVCH_TRYHARD); 125 else 126 fm_nvprint(nvl); 127 } 128 129 void 130 fm_init(void) 131 { 132 kstat_t *ksp; 133 134 (void) sysevent_evc_bind(FM_ERROR_CHAN, 135 &ereport_chan, EVCH_CREAT | EVCH_HOLD_PEND); 136 137 (void) sysevent_evc_control(ereport_chan, 138 EVCH_SET_CHAN_LEN, &ereport_chanlen); 139 140 if (ereport_qlen == 0) 141 ereport_qlen = ERPT_MAX_ERRS * MAX(max_ncpus, 4); 142 143 if (ereport_size == 0) 144 ereport_size = ERPT_DATA_SZ; 145 146 ereport_errorq = errorq_nvcreate("fm_ereport_queue", 147 (errorq_func_t)fm_drain, NULL, ereport_qlen, ereport_size, 148 FM_ERR_PIL, ERRORQ_VITAL); 149 if (ereport_errorq == NULL) 150 panic("failed to create required ereport error queue"); 151 152 ereport_dumpbuf = kmem_alloc(ereport_size, KM_SLEEP); 153 ereport_dumplen = ereport_size; 154 155 /* Initialize ereport allocation and generation kstats */ 156 ksp = kstat_create("unix", 0, "fm", "misc", KSTAT_TYPE_NAMED, 157 sizeof (struct erpt_kstat) / sizeof (kstat_named_t), 158 KSTAT_FLAG_VIRTUAL); 159 160 if (ksp != NULL) { 161 ksp->ks_data = &erpt_kstat_data; 162 kstat_install(ksp); 163 } else { 164 cmn_err(CE_NOTE, "failed to create fm/misc kstat\n"); 165 166 } 167 } 168 169 /* 170 * Formatting utility function for fm_nvprintr. We attempt to wrap chunks of 171 * output so they aren't split across console lines, and return the end column. 172 */ 173 /*PRINTFLIKE4*/ 174 static int 175 fm_printf(int depth, int c, int cols, const char *format, ...) 176 { 177 va_list ap; 178 int width; 179 char c1; 180 181 va_start(ap, format); 182 width = vsnprintf(&c1, sizeof (c1), format, ap); 183 va_end(ap); 184 185 if (c + width >= cols) { 186 console_printf("\n\r"); 187 c = 0; 188 if (format[0] != ' ' && depth > 0) { 189 console_printf(" "); 190 c++; 191 } 192 } 193 194 va_start(ap, format); 195 console_vprintf(format, ap); 196 va_end(ap); 197 198 return ((c + width) % cols); 199 } 200 201 /* 202 * Recursively print a nvlist in the specified column width and return the 203 * column we end up in. This function is called recursively by fm_nvprint(), 204 * below. We generically format the entire nvpair using hexadecimal 205 * integers and strings, and elide any integer arrays. Arrays are basically 206 * used for cache dumps right now, so we suppress them so as not to overwhelm 207 * the amount of console output we produce at panic time. This can be further 208 * enhanced as FMA technology grows based upon the needs of consumers. All 209 * FMA telemetry is logged using the dump device transport, so the console 210 * output serves only as a fallback in case this procedure is unsuccessful. 211 */ 212 static int 213 fm_nvprintr(nvlist_t *nvl, int d, int c, int cols) 214 { 215 nvpair_t *nvp; 216 217 for (nvp = nvlist_next_nvpair(nvl, NULL); 218 nvp != NULL; nvp = nvlist_next_nvpair(nvl, nvp)) { 219 220 data_type_t type = nvpair_type(nvp); 221 const char *name = nvpair_name(nvp); 222 223 boolean_t b; 224 uint8_t i8; 225 uint16_t i16; 226 uint32_t i32; 227 uint64_t i64; 228 char *str; 229 nvlist_t *cnv; 230 231 if (strcmp(name, FM_CLASS) == 0) 232 continue; /* already printed by caller */ 233 234 c = fm_printf(d, c, cols, " %s=", name); 235 236 switch (type) { 237 case DATA_TYPE_BOOLEAN: 238 c = fm_printf(d + 1, c, cols, " 1"); 239 break; 240 241 case DATA_TYPE_BOOLEAN_VALUE: 242 (void) nvpair_value_boolean_value(nvp, &b); 243 c = fm_printf(d + 1, c, cols, b ? "1" : "0"); 244 break; 245 246 case DATA_TYPE_BYTE: 247 (void) nvpair_value_byte(nvp, &i8); 248 c = fm_printf(d + 1, c, cols, "%x", i8); 249 break; 250 251 case DATA_TYPE_INT8: 252 (void) nvpair_value_int8(nvp, (void *)&i8); 253 c = fm_printf(d + 1, c, cols, "%x", i8); 254 break; 255 256 case DATA_TYPE_UINT8: 257 (void) nvpair_value_uint8(nvp, &i8); 258 c = fm_printf(d + 1, c, cols, "%x", i8); 259 break; 260 261 case DATA_TYPE_INT16: 262 (void) nvpair_value_int16(nvp, (void *)&i16); 263 c = fm_printf(d + 1, c, cols, "%x", i16); 264 break; 265 266 case DATA_TYPE_UINT16: 267 (void) nvpair_value_uint16(nvp, &i16); 268 c = fm_printf(d + 1, c, cols, "%x", i16); 269 break; 270 271 case DATA_TYPE_INT32: 272 (void) nvpair_value_int32(nvp, (void *)&i32); 273 c = fm_printf(d + 1, c, cols, "%x", i32); 274 break; 275 276 case DATA_TYPE_UINT32: 277 (void) nvpair_value_uint32(nvp, &i32); 278 c = fm_printf(d + 1, c, cols, "%x", i32); 279 break; 280 281 case DATA_TYPE_INT64: 282 (void) nvpair_value_int64(nvp, (void *)&i64); 283 c = fm_printf(d + 1, c, cols, "%llx", 284 (u_longlong_t)i64); 285 break; 286 287 case DATA_TYPE_UINT64: 288 (void) nvpair_value_uint64(nvp, &i64); 289 c = fm_printf(d + 1, c, cols, "%llx", 290 (u_longlong_t)i64); 291 break; 292 293 case DATA_TYPE_HRTIME: 294 (void) nvpair_value_hrtime(nvp, (void *)&i64); 295 c = fm_printf(d + 1, c, cols, "%llx", 296 (u_longlong_t)i64); 297 break; 298 299 case DATA_TYPE_STRING: 300 (void) nvpair_value_string(nvp, &str); 301 c = fm_printf(d + 1, c, cols, "\"%s\"", 302 str ? str : "<NULL>"); 303 break; 304 305 case DATA_TYPE_NVLIST: 306 c = fm_printf(d + 1, c, cols, "["); 307 (void) nvpair_value_nvlist(nvp, &cnv); 308 c = fm_nvprintr(cnv, d + 1, c, cols); 309 c = fm_printf(d + 1, c, cols, " ]"); 310 break; 311 312 case DATA_TYPE_NVLIST_ARRAY: { 313 nvlist_t **val; 314 uint_t i, nelem; 315 316 c = fm_printf(d + 1, c, cols, "["); 317 (void) nvpair_value_nvlist_array(nvp, &val, &nelem); 318 for (i = 0; i < nelem; i++) { 319 c = fm_nvprintr(val[i], d + 1, c, cols); 320 } 321 c = fm_printf(d + 1, c, cols, " ]"); 322 } 323 break; 324 325 case DATA_TYPE_BOOLEAN_ARRAY: 326 case DATA_TYPE_BYTE_ARRAY: 327 case DATA_TYPE_INT8_ARRAY: 328 case DATA_TYPE_UINT8_ARRAY: 329 case DATA_TYPE_INT16_ARRAY: 330 case DATA_TYPE_UINT16_ARRAY: 331 case DATA_TYPE_INT32_ARRAY: 332 case DATA_TYPE_UINT32_ARRAY: 333 case DATA_TYPE_INT64_ARRAY: 334 case DATA_TYPE_UINT64_ARRAY: 335 case DATA_TYPE_STRING_ARRAY: 336 c = fm_printf(d + 1, c, cols, "[...]"); 337 break; 338 case DATA_TYPE_UNKNOWN: 339 case DATA_TYPE_DONTCARE: 340 c = fm_printf(d + 1, c, cols, "<unknown>"); 341 break; 342 } 343 } 344 345 return (c); 346 } 347 348 void 349 fm_nvprint(nvlist_t *nvl) 350 { 351 char *class; 352 int c = 0; 353 354 console_printf("\r"); 355 356 if (nvlist_lookup_string(nvl, FM_CLASS, &class) == 0) 357 c = fm_printf(0, c, ereport_cols, "%s", class); 358 359 if (fm_nvprintr(nvl, 0, c, ereport_cols) != 0) 360 console_printf("\n"); 361 362 console_printf("\n"); 363 } 364 365 /* 366 * Wrapper for panic() that first produces an FMA-style message for admins. 367 * Normally such messages are generated by fmd(1M)'s syslog-msgs agent: this 368 * is the one exception to that rule and the only error that gets messaged. 369 * This function is intended for use by subsystems that have detected a fatal 370 * error and enqueued appropriate ereports and wish to then force a panic. 371 */ 372 /*PRINTFLIKE1*/ 373 void 374 fm_panic(const char *format, ...) 375 { 376 va_list ap; 377 378 (void) atomic_cas_ptr((void *)&fm_panicstr, NULL, (void *)format); 379 #if defined(__x86) 380 fastreboot_disable_highpil(); 381 #endif /* __x86 */ 382 va_start(ap, format); 383 vpanic(format, ap); 384 va_end(ap); 385 } 386 387 /* 388 * Simply tell the caller if fm_panicstr is set, ie. an fma event has 389 * caused the panic. If so, something other than the default panic 390 * diagnosis method will diagnose the cause of the panic. 391 */ 392 int 393 is_fm_panic() 394 { 395 if (fm_panicstr) 396 return (1); 397 else 398 return (0); 399 } 400 401 /* 402 * Print any appropriate FMA banner message before the panic message. This 403 * function is called by panicsys() and prints the message for fm_panic(). 404 * We print the message here so that it comes after the system is quiesced. 405 * A one-line summary is recorded in the log only (cmn_err(9F) with "!" prefix). 406 * The rest of the message is for the console only and not needed in the log, 407 * so it is printed using console_printf(). We break it up into multiple 408 * chunks so as to avoid overflowing any small legacy prom_printf() buffers. 409 */ 410 void 411 fm_banner(void) 412 { 413 timespec_t tod; 414 hrtime_t now; 415 416 if (!fm_panicstr) 417 return; /* panic was not initiated by fm_panic(); do nothing */ 418 419 if (panicstr) { 420 tod = panic_hrestime; 421 now = panic_hrtime; 422 } else { 423 gethrestime(&tod); 424 now = gethrtime_waitfree(); 425 } 426 427 cmn_err(CE_NOTE, "!SUNW-MSG-ID: %s, " 428 "TYPE: Error, VER: 1, SEVERITY: Major\n", fm_msgid); 429 430 console_printf( 431 "\n\rSUNW-MSG-ID: %s, TYPE: Error, VER: 1, SEVERITY: Major\n" 432 "EVENT-TIME: 0x%lx.0x%lx (0x%llx)\n", 433 fm_msgid, tod.tv_sec, tod.tv_nsec, (u_longlong_t)now); 434 435 console_printf( 436 "PLATFORM: %s, CSN: -, HOSTNAME: %s\n" 437 "SOURCE: %s, REV: %s %s\n", 438 platform, utsname.nodename, utsname.sysname, 439 utsname.release, utsname.version); 440 441 console_printf( 442 "DESC: Errors have been detected that require a reboot to ensure system\n" 443 "integrity. See %s/%s for more information.\n", 444 fm_url, fm_msgid); 445 446 console_printf( 447 "AUTO-RESPONSE: Solaris will attempt to save and diagnose the error telemetry\n" 448 "IMPACT: The system will sync files, save a crash dump if needed, and reboot\n" 449 "REC-ACTION: Save the error summary below in case telemetry cannot be saved\n"); 450 451 console_printf("\n"); 452 } 453 454 /* 455 * Utility function to write all of the pending ereports to the dump device. 456 * This function is called at either normal reboot or panic time, and simply 457 * iterates over the in-transit messages in the ereport sysevent channel. 458 */ 459 void 460 fm_ereport_dump(void) 461 { 462 evchanq_t *chq; 463 sysevent_t *sep; 464 erpt_dump_t ed; 465 466 timespec_t tod; 467 hrtime_t now; 468 char *buf; 469 size_t len; 470 471 if (panicstr) { 472 tod = panic_hrestime; 473 now = panic_hrtime; 474 } else { 475 if (ereport_errorq != NULL) 476 errorq_drain(ereport_errorq); 477 gethrestime(&tod); 478 now = gethrtime_waitfree(); 479 } 480 481 /* 482 * In the panic case, sysevent_evc_walk_init() will return NULL. 483 */ 484 if ((chq = sysevent_evc_walk_init(ereport_chan, NULL)) == NULL && 485 !panicstr) 486 return; /* event channel isn't initialized yet */ 487 488 while ((sep = sysevent_evc_walk_step(chq)) != NULL) { 489 if ((buf = sysevent_evc_event_attr(sep, &len)) == NULL) 490 break; 491 492 ed.ed_magic = ERPT_MAGIC; 493 ed.ed_chksum = checksum32(buf, len); 494 ed.ed_size = (uint32_t)len; 495 ed.ed_pad = 0; 496 ed.ed_hrt_nsec = SE_TIME(sep); 497 ed.ed_hrt_base = now; 498 ed.ed_tod_base.sec = tod.tv_sec; 499 ed.ed_tod_base.nsec = tod.tv_nsec; 500 501 dumpvp_write(&ed, sizeof (ed)); 502 dumpvp_write(buf, len); 503 } 504 505 sysevent_evc_walk_fini(chq); 506 } 507 508 /* 509 * Post an error report (ereport) to the sysevent error channel. The error 510 * channel must be established with a prior call to sysevent_evc_create() 511 * before publication may occur. 512 */ 513 void 514 fm_ereport_post(nvlist_t *ereport, int evc_flag) 515 { 516 size_t nvl_size = 0; 517 evchan_t *error_chan; 518 519 (void) nvlist_size(ereport, &nvl_size, NV_ENCODE_NATIVE); 520 if (nvl_size > ERPT_DATA_SZ || nvl_size == 0) { 521 atomic_inc_64(&erpt_kstat_data.erpt_dropped.value.ui64); 522 return; 523 } 524 525 if (sysevent_evc_bind(FM_ERROR_CHAN, &error_chan, 526 EVCH_CREAT|EVCH_HOLD_PEND) != 0) { 527 atomic_inc_64(&erpt_kstat_data.erpt_dropped.value.ui64); 528 return; 529 } 530 531 if (sysevent_evc_publish(error_chan, EC_FM, ESC_FM_ERROR, 532 SUNW_VENDOR, FM_PUB, ereport, evc_flag) != 0) { 533 atomic_inc_64(&erpt_kstat_data.erpt_dropped.value.ui64); 534 (void) sysevent_evc_unbind(error_chan); 535 return; 536 } 537 (void) sysevent_evc_unbind(error_chan); 538 } 539 540 /* 541 * Wrapppers for FM nvlist allocators 542 */ 543 /* ARGSUSED */ 544 static void * 545 i_fm_alloc(nv_alloc_t *nva, size_t size) 546 { 547 return (kmem_zalloc(size, KM_SLEEP)); 548 } 549 550 /* ARGSUSED */ 551 static void 552 i_fm_free(nv_alloc_t *nva, void *buf, size_t size) 553 { 554 kmem_free(buf, size); 555 } 556 557 const nv_alloc_ops_t fm_mem_alloc_ops = { 558 NULL, 559 NULL, 560 i_fm_alloc, 561 i_fm_free, 562 NULL 563 }; 564 565 /* 566 * Create and initialize a new nv_alloc_t for a fixed buffer, buf. A pointer 567 * to the newly allocated nv_alloc_t structure is returned upon success or NULL 568 * is returned to indicate that the nv_alloc structure could not be created. 569 */ 570 nv_alloc_t * 571 fm_nva_xcreate(char *buf, size_t bufsz) 572 { 573 nv_alloc_t *nvhdl = kmem_zalloc(sizeof (nv_alloc_t), KM_SLEEP); 574 575 if (bufsz == 0 || nv_alloc_init(nvhdl, nv_fixed_ops, buf, bufsz) != 0) { 576 kmem_free(nvhdl, sizeof (nv_alloc_t)); 577 return (NULL); 578 } 579 580 return (nvhdl); 581 } 582 583 /* 584 * Destroy a previously allocated nv_alloc structure. The fixed buffer 585 * associated with nva must be freed by the caller. 586 */ 587 void 588 fm_nva_xdestroy(nv_alloc_t *nva) 589 { 590 nv_alloc_fini(nva); 591 kmem_free(nva, sizeof (nv_alloc_t)); 592 } 593 594 /* 595 * Create a new nv list. A pointer to a new nv list structure is returned 596 * upon success or NULL is returned to indicate that the structure could 597 * not be created. The newly created nv list is created and managed by the 598 * operations installed in nva. If nva is NULL, the default FMA nva 599 * operations are installed and used. 600 * 601 * When called from the kernel and nva == NULL, this function must be called 602 * from passive kernel context with no locks held that can prevent a 603 * sleeping memory allocation from occurring. Otherwise, this function may 604 * be called from other kernel contexts as long a valid nva created via 605 * fm_nva_create() is supplied. 606 */ 607 nvlist_t * 608 fm_nvlist_create(nv_alloc_t *nva) 609 { 610 int hdl_alloced = 0; 611 nvlist_t *nvl; 612 nv_alloc_t *nvhdl; 613 614 if (nva == NULL) { 615 nvhdl = kmem_zalloc(sizeof (nv_alloc_t), KM_SLEEP); 616 617 if (nv_alloc_init(nvhdl, &fm_mem_alloc_ops, NULL, 0) != 0) { 618 kmem_free(nvhdl, sizeof (nv_alloc_t)); 619 return (NULL); 620 } 621 hdl_alloced = 1; 622 } else { 623 nvhdl = nva; 624 } 625 626 if (nvlist_xalloc(&nvl, NV_UNIQUE_NAME, nvhdl) != 0) { 627 if (hdl_alloced) { 628 nv_alloc_fini(nvhdl); 629 kmem_free(nvhdl, sizeof (nv_alloc_t)); 630 } 631 return (NULL); 632 } 633 634 return (nvl); 635 } 636 637 /* 638 * Destroy a previously allocated nvlist structure. flag indicates whether 639 * or not the associated nva structure should be freed (FM_NVA_FREE) or 640 * retained (FM_NVA_RETAIN). Retaining the nv alloc structure allows 641 * it to be re-used for future nvlist creation operations. 642 */ 643 void 644 fm_nvlist_destroy(nvlist_t *nvl, int flag) 645 { 646 nv_alloc_t *nva = nvlist_lookup_nv_alloc(nvl); 647 648 nvlist_free(nvl); 649 650 if (nva != NULL) { 651 if (flag == FM_NVA_FREE) 652 fm_nva_xdestroy(nva); 653 } 654 } 655 656 int 657 i_fm_payload_set(nvlist_t *payload, const char *name, va_list ap) 658 { 659 int nelem, ret = 0; 660 data_type_t type; 661 662 while (ret == 0 && name != NULL) { 663 type = va_arg(ap, data_type_t); 664 switch (type) { 665 case DATA_TYPE_BYTE: 666 ret = nvlist_add_byte(payload, name, 667 va_arg(ap, uint_t)); 668 break; 669 case DATA_TYPE_BYTE_ARRAY: 670 nelem = va_arg(ap, int); 671 ret = nvlist_add_byte_array(payload, name, 672 va_arg(ap, uchar_t *), nelem); 673 break; 674 case DATA_TYPE_BOOLEAN_VALUE: 675 ret = nvlist_add_boolean_value(payload, name, 676 va_arg(ap, boolean_t)); 677 break; 678 case DATA_TYPE_BOOLEAN_ARRAY: 679 nelem = va_arg(ap, int); 680 ret = nvlist_add_boolean_array(payload, name, 681 va_arg(ap, boolean_t *), nelem); 682 break; 683 case DATA_TYPE_INT8: 684 ret = nvlist_add_int8(payload, name, 685 va_arg(ap, int)); 686 break; 687 case DATA_TYPE_INT8_ARRAY: 688 nelem = va_arg(ap, int); 689 ret = nvlist_add_int8_array(payload, name, 690 va_arg(ap, int8_t *), nelem); 691 break; 692 case DATA_TYPE_UINT8: 693 ret = nvlist_add_uint8(payload, name, 694 va_arg(ap, uint_t)); 695 break; 696 case DATA_TYPE_UINT8_ARRAY: 697 nelem = va_arg(ap, int); 698 ret = nvlist_add_uint8_array(payload, name, 699 va_arg(ap, uint8_t *), nelem); 700 break; 701 case DATA_TYPE_INT16: 702 ret = nvlist_add_int16(payload, name, 703 va_arg(ap, int)); 704 break; 705 case DATA_TYPE_INT16_ARRAY: 706 nelem = va_arg(ap, int); 707 ret = nvlist_add_int16_array(payload, name, 708 va_arg(ap, int16_t *), nelem); 709 break; 710 case DATA_TYPE_UINT16: 711 ret = nvlist_add_uint16(payload, name, 712 va_arg(ap, uint_t)); 713 break; 714 case DATA_TYPE_UINT16_ARRAY: 715 nelem = va_arg(ap, int); 716 ret = nvlist_add_uint16_array(payload, name, 717 va_arg(ap, uint16_t *), nelem); 718 break; 719 case DATA_TYPE_INT32: 720 ret = nvlist_add_int32(payload, name, 721 va_arg(ap, int32_t)); 722 break; 723 case DATA_TYPE_INT32_ARRAY: 724 nelem = va_arg(ap, int); 725 ret = nvlist_add_int32_array(payload, name, 726 va_arg(ap, int32_t *), nelem); 727 break; 728 case DATA_TYPE_UINT32: 729 ret = nvlist_add_uint32(payload, name, 730 va_arg(ap, uint32_t)); 731 break; 732 case DATA_TYPE_UINT32_ARRAY: 733 nelem = va_arg(ap, int); 734 ret = nvlist_add_uint32_array(payload, name, 735 va_arg(ap, uint32_t *), nelem); 736 break; 737 case DATA_TYPE_INT64: 738 ret = nvlist_add_int64(payload, name, 739 va_arg(ap, int64_t)); 740 break; 741 case DATA_TYPE_INT64_ARRAY: 742 nelem = va_arg(ap, int); 743 ret = nvlist_add_int64_array(payload, name, 744 va_arg(ap, int64_t *), nelem); 745 break; 746 case DATA_TYPE_UINT64: 747 ret = nvlist_add_uint64(payload, name, 748 va_arg(ap, uint64_t)); 749 break; 750 case DATA_TYPE_UINT64_ARRAY: 751 nelem = va_arg(ap, int); 752 ret = nvlist_add_uint64_array(payload, name, 753 va_arg(ap, uint64_t *), nelem); 754 break; 755 case DATA_TYPE_STRING: 756 ret = nvlist_add_string(payload, name, 757 va_arg(ap, char *)); 758 break; 759 case DATA_TYPE_STRING_ARRAY: 760 nelem = va_arg(ap, int); 761 ret = nvlist_add_string_array(payload, name, 762 va_arg(ap, char **), nelem); 763 break; 764 case DATA_TYPE_NVLIST: 765 ret = nvlist_add_nvlist(payload, name, 766 va_arg(ap, nvlist_t *)); 767 break; 768 case DATA_TYPE_NVLIST_ARRAY: 769 nelem = va_arg(ap, int); 770 ret = nvlist_add_nvlist_array(payload, name, 771 va_arg(ap, nvlist_t **), nelem); 772 break; 773 default: 774 ret = EINVAL; 775 } 776 777 name = va_arg(ap, char *); 778 } 779 return (ret); 780 } 781 782 void 783 fm_payload_set(nvlist_t *payload, ...) 784 { 785 int ret; 786 const char *name; 787 va_list ap; 788 789 va_start(ap, payload); 790 name = va_arg(ap, char *); 791 ret = i_fm_payload_set(payload, name, ap); 792 va_end(ap); 793 794 if (ret) 795 atomic_inc_64(&erpt_kstat_data.payload_set_failed.value.ui64); 796 } 797 798 /* 799 * Set-up and validate the members of an ereport event according to: 800 * 801 * Member name Type Value 802 * ==================================================== 803 * class string ereport 804 * version uint8_t 0 805 * ena uint64_t <ena> 806 * detector nvlist_t <detector> 807 * ereport-payload nvlist_t <var args> 808 * 809 * We don't actually add a 'version' member to the payload. Really, 810 * the version quoted to us by our caller is that of the category 1 811 * "ereport" event class (and we require FM_EREPORT_VERS0) but 812 * the payload version of the actual leaf class event under construction 813 * may be something else. Callers should supply a version in the varargs, 814 * or (better) we could take two version arguments - one for the 815 * ereport category 1 classification (expect FM_EREPORT_VERS0) and one 816 * for the leaf class. 817 */ 818 void 819 fm_ereport_set(nvlist_t *ereport, int version, const char *erpt_class, 820 uint64_t ena, const nvlist_t *detector, ...) 821 { 822 char ereport_class[FM_MAX_CLASS]; 823 const char *name; 824 va_list ap; 825 int ret; 826 827 if (version != FM_EREPORT_VERS0) { 828 atomic_inc_64(&erpt_kstat_data.erpt_set_failed.value.ui64); 829 return; 830 } 831 832 (void) snprintf(ereport_class, FM_MAX_CLASS, "%s.%s", 833 FM_EREPORT_CLASS, erpt_class); 834 if (nvlist_add_string(ereport, FM_CLASS, ereport_class) != 0) { 835 atomic_inc_64(&erpt_kstat_data.erpt_set_failed.value.ui64); 836 return; 837 } 838 839 if (nvlist_add_uint64(ereport, FM_EREPORT_ENA, ena)) { 840 atomic_inc_64(&erpt_kstat_data.erpt_set_failed.value.ui64); 841 } 842 843 if (nvlist_add_nvlist(ereport, FM_EREPORT_DETECTOR, 844 (nvlist_t *)detector) != 0) { 845 atomic_inc_64(&erpt_kstat_data.erpt_set_failed.value.ui64); 846 } 847 848 va_start(ap, detector); 849 name = va_arg(ap, const char *); 850 ret = i_fm_payload_set(ereport, name, ap); 851 va_end(ap); 852 853 if (ret) 854 atomic_inc_64(&erpt_kstat_data.erpt_set_failed.value.ui64); 855 } 856 857 /* 858 * Set-up and validate the members of an hc fmri according to; 859 * 860 * Member name Type Value 861 * =================================================== 862 * version uint8_t 0 863 * auth nvlist_t <auth> 864 * hc-name string <name> 865 * hc-id string <id> 866 * 867 * Note that auth and hc-id are optional members. 868 */ 869 870 #define HC_MAXPAIRS 20 871 #define HC_MAXNAMELEN 50 872 873 static int 874 fm_fmri_hc_set_common(nvlist_t *fmri, int version, const nvlist_t *auth) 875 { 876 if (version != FM_HC_SCHEME_VERSION) { 877 atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64); 878 return (0); 879 } 880 881 if (nvlist_add_uint8(fmri, FM_VERSION, version) != 0 || 882 nvlist_add_string(fmri, FM_FMRI_SCHEME, FM_FMRI_SCHEME_HC) != 0) { 883 atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64); 884 return (0); 885 } 886 887 if (auth != NULL && nvlist_add_nvlist(fmri, FM_FMRI_AUTHORITY, 888 (nvlist_t *)auth) != 0) { 889 atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64); 890 return (0); 891 } 892 893 return (1); 894 } 895 896 void 897 fm_fmri_hc_set(nvlist_t *fmri, int version, const nvlist_t *auth, 898 nvlist_t *snvl, int npairs, ...) 899 { 900 nv_alloc_t *nva = nvlist_lookup_nv_alloc(fmri); 901 nvlist_t *pairs[HC_MAXPAIRS]; 902 va_list ap; 903 int i; 904 905 if (!fm_fmri_hc_set_common(fmri, version, auth)) 906 return; 907 908 npairs = MIN(npairs, HC_MAXPAIRS); 909 910 va_start(ap, npairs); 911 for (i = 0; i < npairs; i++) { 912 const char *name = va_arg(ap, const char *); 913 uint32_t id = va_arg(ap, uint32_t); 914 char idstr[11]; 915 916 (void) snprintf(idstr, sizeof (idstr), "%u", id); 917 918 pairs[i] = fm_nvlist_create(nva); 919 if (nvlist_add_string(pairs[i], FM_FMRI_HC_NAME, name) != 0 || 920 nvlist_add_string(pairs[i], FM_FMRI_HC_ID, idstr) != 0) { 921 atomic_inc_64( 922 &erpt_kstat_data.fmri_set_failed.value.ui64); 923 } 924 } 925 va_end(ap); 926 927 if (nvlist_add_nvlist_array(fmri, FM_FMRI_HC_LIST, pairs, npairs) != 0) 928 atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64); 929 930 for (i = 0; i < npairs; i++) 931 fm_nvlist_destroy(pairs[i], FM_NVA_RETAIN); 932 933 if (snvl != NULL) { 934 if (nvlist_add_nvlist(fmri, FM_FMRI_HC_SPECIFIC, snvl) != 0) { 935 atomic_inc_64( 936 &erpt_kstat_data.fmri_set_failed.value.ui64); 937 } 938 } 939 } 940 941 /* 942 * Set-up and validate the members of an dev fmri according to: 943 * 944 * Member name Type Value 945 * ==================================================== 946 * version uint8_t 0 947 * auth nvlist_t <auth> 948 * devpath string <devpath> 949 * [devid] string <devid> 950 * [target-port-l0id] string <target-port-lun0-id> 951 * 952 * Note that auth and devid are optional members. 953 */ 954 void 955 fm_fmri_dev_set(nvlist_t *fmri_dev, int version, const nvlist_t *auth, 956 const char *devpath, const char *devid, const char *tpl0) 957 { 958 int err = 0; 959 960 if (version != DEV_SCHEME_VERSION0) { 961 atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64); 962 return; 963 } 964 965 err |= nvlist_add_uint8(fmri_dev, FM_VERSION, version); 966 err |= nvlist_add_string(fmri_dev, FM_FMRI_SCHEME, FM_FMRI_SCHEME_DEV); 967 968 if (auth != NULL) { 969 err |= nvlist_add_nvlist(fmri_dev, FM_FMRI_AUTHORITY, 970 (nvlist_t *)auth); 971 } 972 973 err |= nvlist_add_string(fmri_dev, FM_FMRI_DEV_PATH, devpath); 974 975 if (devid != NULL) 976 err |= nvlist_add_string(fmri_dev, FM_FMRI_DEV_ID, devid); 977 978 if (tpl0 != NULL) 979 err |= nvlist_add_string(fmri_dev, FM_FMRI_DEV_TGTPTLUN0, tpl0); 980 981 if (err) 982 atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64); 983 984 } 985 986 /* 987 * Set-up and validate the members of an cpu fmri according to: 988 * 989 * Member name Type Value 990 * ==================================================== 991 * version uint8_t 0 992 * auth nvlist_t <auth> 993 * cpuid uint32_t <cpu_id> 994 * cpumask uint8_t <cpu_mask> 995 * serial uint64_t <serial_id> 996 * 997 * Note that auth, cpumask, serial are optional members. 998 * 999 */ 1000 void 1001 fm_fmri_cpu_set(nvlist_t *fmri_cpu, int version, const nvlist_t *auth, 1002 uint32_t cpu_id, uint8_t *cpu_maskp, const char *serial_idp) 1003 { 1004 uint64_t *failedp = &erpt_kstat_data.fmri_set_failed.value.ui64; 1005 1006 if (version < CPU_SCHEME_VERSION1) { 1007 atomic_inc_64(failedp); 1008 return; 1009 } 1010 1011 if (nvlist_add_uint8(fmri_cpu, FM_VERSION, version) != 0) { 1012 atomic_inc_64(failedp); 1013 return; 1014 } 1015 1016 if (nvlist_add_string(fmri_cpu, FM_FMRI_SCHEME, 1017 FM_FMRI_SCHEME_CPU) != 0) { 1018 atomic_inc_64(failedp); 1019 return; 1020 } 1021 1022 if (auth != NULL && nvlist_add_nvlist(fmri_cpu, FM_FMRI_AUTHORITY, 1023 (nvlist_t *)auth) != 0) 1024 atomic_inc_64(failedp); 1025 1026 if (nvlist_add_uint32(fmri_cpu, FM_FMRI_CPU_ID, cpu_id) != 0) 1027 atomic_inc_64(failedp); 1028 1029 if (cpu_maskp != NULL && nvlist_add_uint8(fmri_cpu, FM_FMRI_CPU_MASK, 1030 *cpu_maskp) != 0) 1031 atomic_inc_64(failedp); 1032 1033 if (serial_idp == NULL || nvlist_add_string(fmri_cpu, 1034 FM_FMRI_CPU_SERIAL_ID, (char *)serial_idp) != 0) 1035 atomic_inc_64(failedp); 1036 } 1037 1038 /* 1039 * Set-up and validate the members of a mem according to: 1040 * 1041 * Member name Type Value 1042 * ==================================================== 1043 * version uint8_t 0 1044 * auth nvlist_t <auth> [optional] 1045 * unum string <unum> 1046 * serial string <serial> [optional*] 1047 * offset uint64_t <offset> [optional] 1048 * 1049 * * serial is required if offset is present 1050 */ 1051 void 1052 fm_fmri_mem_set(nvlist_t *fmri, int version, const nvlist_t *auth, 1053 const char *unum, const char *serial, uint64_t offset) 1054 { 1055 if (version != MEM_SCHEME_VERSION0) { 1056 atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64); 1057 return; 1058 } 1059 1060 if (!serial && (offset != (uint64_t)-1)) { 1061 atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64); 1062 return; 1063 } 1064 1065 if (nvlist_add_uint8(fmri, FM_VERSION, version) != 0) { 1066 atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64); 1067 return; 1068 } 1069 1070 if (nvlist_add_string(fmri, FM_FMRI_SCHEME, FM_FMRI_SCHEME_MEM) != 0) { 1071 atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64); 1072 return; 1073 } 1074 1075 if (auth != NULL) { 1076 if (nvlist_add_nvlist(fmri, FM_FMRI_AUTHORITY, 1077 (nvlist_t *)auth) != 0) { 1078 atomic_inc_64( 1079 &erpt_kstat_data.fmri_set_failed.value.ui64); 1080 } 1081 } 1082 1083 if (nvlist_add_string(fmri, FM_FMRI_MEM_UNUM, unum) != 0) { 1084 atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64); 1085 } 1086 1087 if (serial != NULL) { 1088 if (nvlist_add_string_array(fmri, FM_FMRI_MEM_SERIAL_ID, 1089 (char **)&serial, 1) != 0) { 1090 atomic_inc_64( 1091 &erpt_kstat_data.fmri_set_failed.value.ui64); 1092 } 1093 if (offset != (uint64_t)-1 && nvlist_add_uint64(fmri, 1094 FM_FMRI_MEM_OFFSET, offset) != 0) { 1095 atomic_inc_64( 1096 &erpt_kstat_data.fmri_set_failed.value.ui64); 1097 } 1098 } 1099 } 1100 1101 void 1102 fm_fmri_zfs_set(nvlist_t *fmri, int version, uint64_t pool_guid, 1103 uint64_t vdev_guid) 1104 { 1105 if (version != ZFS_SCHEME_VERSION0) { 1106 atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64); 1107 return; 1108 } 1109 1110 if (nvlist_add_uint8(fmri, FM_VERSION, version) != 0) { 1111 atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64); 1112 return; 1113 } 1114 1115 if (nvlist_add_string(fmri, FM_FMRI_SCHEME, FM_FMRI_SCHEME_ZFS) != 0) { 1116 atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64); 1117 return; 1118 } 1119 1120 if (nvlist_add_uint64(fmri, FM_FMRI_ZFS_POOL, pool_guid) != 0) { 1121 atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64); 1122 } 1123 1124 if (vdev_guid != 0) { 1125 if (nvlist_add_uint64(fmri, FM_FMRI_ZFS_VDEV, vdev_guid) != 0) { 1126 atomic_inc_64( 1127 &erpt_kstat_data.fmri_set_failed.value.ui64); 1128 } 1129 } 1130 } 1131 1132 uint64_t 1133 fm_ena_increment(uint64_t ena) 1134 { 1135 uint64_t new_ena; 1136 1137 switch (ENA_FORMAT(ena)) { 1138 case FM_ENA_FMT1: 1139 new_ena = ena + (1 << ENA_FMT1_GEN_SHFT); 1140 break; 1141 case FM_ENA_FMT2: 1142 new_ena = ena + (1 << ENA_FMT2_GEN_SHFT); 1143 break; 1144 default: 1145 new_ena = 0; 1146 } 1147 1148 return (new_ena); 1149 } 1150 1151 uint64_t 1152 fm_ena_generate_cpu(uint64_t timestamp, processorid_t cpuid, uchar_t format) 1153 { 1154 uint64_t ena = 0; 1155 1156 switch (format) { 1157 case FM_ENA_FMT1: 1158 if (timestamp) { 1159 ena = (uint64_t)((format & ENA_FORMAT_MASK) | 1160 ((cpuid << ENA_FMT1_CPUID_SHFT) & 1161 ENA_FMT1_CPUID_MASK) | 1162 ((timestamp << ENA_FMT1_TIME_SHFT) & 1163 ENA_FMT1_TIME_MASK)); 1164 } else { 1165 ena = (uint64_t)((format & ENA_FORMAT_MASK) | 1166 ((cpuid << ENA_FMT1_CPUID_SHFT) & 1167 ENA_FMT1_CPUID_MASK) | 1168 ((gethrtime_waitfree() << ENA_FMT1_TIME_SHFT) & 1169 ENA_FMT1_TIME_MASK)); 1170 } 1171 break; 1172 case FM_ENA_FMT2: 1173 ena = (uint64_t)((format & ENA_FORMAT_MASK) | 1174 ((timestamp << ENA_FMT2_TIME_SHFT) & ENA_FMT2_TIME_MASK)); 1175 break; 1176 default: 1177 break; 1178 } 1179 1180 return (ena); 1181 } 1182 1183 uint64_t 1184 fm_ena_generate(uint64_t timestamp, uchar_t format) 1185 { 1186 return (fm_ena_generate_cpu(timestamp, CPU->cpu_id, format)); 1187 } 1188 1189 uint64_t 1190 fm_ena_generation_get(uint64_t ena) 1191 { 1192 uint64_t gen; 1193 1194 switch (ENA_FORMAT(ena)) { 1195 case FM_ENA_FMT1: 1196 gen = (ena & ENA_FMT1_GEN_MASK) >> ENA_FMT1_GEN_SHFT; 1197 break; 1198 case FM_ENA_FMT2: 1199 gen = (ena & ENA_FMT2_GEN_MASK) >> ENA_FMT2_GEN_SHFT; 1200 break; 1201 default: 1202 gen = 0; 1203 break; 1204 } 1205 1206 return (gen); 1207 } 1208 1209 uchar_t 1210 fm_ena_format_get(uint64_t ena) 1211 { 1212 1213 return (ENA_FORMAT(ena)); 1214 } 1215 1216 uint64_t 1217 fm_ena_id_get(uint64_t ena) 1218 { 1219 uint64_t id; 1220 1221 switch (ENA_FORMAT(ena)) { 1222 case FM_ENA_FMT1: 1223 id = (ena & ENA_FMT1_ID_MASK) >> ENA_FMT1_ID_SHFT; 1224 break; 1225 case FM_ENA_FMT2: 1226 id = (ena & ENA_FMT2_ID_MASK) >> ENA_FMT2_ID_SHFT; 1227 break; 1228 default: 1229 id = 0; 1230 } 1231 1232 return (id); 1233 } 1234 1235 uint64_t 1236 fm_ena_time_get(uint64_t ena) 1237 { 1238 uint64_t time; 1239 1240 switch (ENA_FORMAT(ena)) { 1241 case FM_ENA_FMT1: 1242 time = (ena & ENA_FMT1_TIME_MASK) >> ENA_FMT1_TIME_SHFT; 1243 break; 1244 case FM_ENA_FMT2: 1245 time = (ena & ENA_FMT2_TIME_MASK) >> ENA_FMT2_TIME_SHFT; 1246 break; 1247 default: 1248 time = 0; 1249 } 1250 1251 return (time); 1252 } 1253 1254 /* 1255 * Convert a getpcstack() trace to symbolic name+offset, and add the resulting 1256 * string array to a Fault Management ereport as FM_EREPORT_PAYLOAD_NAME_STACK. 1257 */ 1258 void 1259 fm_payload_stack_add(nvlist_t *payload, const pc_t *stack, int depth) 1260 { 1261 int i; 1262 char *sym; 1263 ulong_t off; 1264 char *stkpp[FM_STK_DEPTH]; 1265 char buf[FM_STK_DEPTH * FM_SYM_SZ]; 1266 char *stkp = buf; 1267 1268 for (i = 0; i < depth && i != FM_STK_DEPTH; i++, stkp += FM_SYM_SZ) { 1269 if ((sym = kobj_getsymname(stack[i], &off)) != NULL) 1270 (void) snprintf(stkp, FM_SYM_SZ, "%s+%lx", sym, off); 1271 else 1272 (void) snprintf(stkp, FM_SYM_SZ, "%lx", (long)stack[i]); 1273 stkpp[i] = stkp; 1274 } 1275 1276 fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_STACK, 1277 DATA_TYPE_STRING_ARRAY, depth, stkpp, NULL); 1278 } 1279 1280 void 1281 print_msg_hwerr(ctid_t ct_id, proc_t *p) 1282 { 1283 uprintf("Killed process %d (%s) in contract id %d " 1284 "due to hardware error\n", p->p_pid, p->p_user.u_comm, ct_id); 1285 } 1286 1287 void 1288 fm_fmri_hc_create(nvlist_t *fmri, int version, const nvlist_t *auth, 1289 nvlist_t *snvl, nvlist_t *bboard, int npairs, ...) 1290 { 1291 nv_alloc_t *nva = nvlist_lookup_nv_alloc(fmri); 1292 nvlist_t *pairs[HC_MAXPAIRS]; 1293 nvlist_t **hcl; 1294 uint_t n; 1295 int i, j; 1296 va_list ap; 1297 char *hcname, *hcid; 1298 1299 if (!fm_fmri_hc_set_common(fmri, version, auth)) 1300 return; 1301 1302 /* 1303 * copy the bboard nvpairs to the pairs array 1304 */ 1305 if (nvlist_lookup_nvlist_array(bboard, FM_FMRI_HC_LIST, &hcl, &n) 1306 != 0) { 1307 atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64); 1308 return; 1309 } 1310 1311 for (i = 0; i < n; i++) { 1312 if (nvlist_lookup_string(hcl[i], FM_FMRI_HC_NAME, 1313 &hcname) != 0) { 1314 atomic_inc_64( 1315 &erpt_kstat_data.fmri_set_failed.value.ui64); 1316 return; 1317 } 1318 if (nvlist_lookup_string(hcl[i], FM_FMRI_HC_ID, &hcid) != 0) { 1319 atomic_inc_64( 1320 &erpt_kstat_data.fmri_set_failed.value.ui64); 1321 return; 1322 } 1323 1324 pairs[i] = fm_nvlist_create(nva); 1325 if (nvlist_add_string(pairs[i], FM_FMRI_HC_NAME, hcname) != 0 || 1326 nvlist_add_string(pairs[i], FM_FMRI_HC_ID, hcid) != 0) { 1327 for (j = 0; j <= i; j++) { 1328 if (pairs[j] != NULL) 1329 fm_nvlist_destroy(pairs[j], 1330 FM_NVA_RETAIN); 1331 } 1332 atomic_inc_64( 1333 &erpt_kstat_data.fmri_set_failed.value.ui64); 1334 return; 1335 } 1336 } 1337 1338 /* 1339 * create the pairs from passed in pairs 1340 */ 1341 npairs = MIN(npairs, HC_MAXPAIRS); 1342 1343 va_start(ap, npairs); 1344 for (i = n; i < npairs + n; i++) { 1345 const char *name = va_arg(ap, const char *); 1346 uint32_t id = va_arg(ap, uint32_t); 1347 char idstr[11]; 1348 (void) snprintf(idstr, sizeof (idstr), "%u", id); 1349 pairs[i] = fm_nvlist_create(nva); 1350 if (nvlist_add_string(pairs[i], FM_FMRI_HC_NAME, name) != 0 || 1351 nvlist_add_string(pairs[i], FM_FMRI_HC_ID, idstr) != 0) { 1352 for (j = 0; j <= i; j++) { 1353 if (pairs[j] != NULL) 1354 fm_nvlist_destroy(pairs[j], 1355 FM_NVA_RETAIN); 1356 } 1357 atomic_inc_64( 1358 &erpt_kstat_data.fmri_set_failed.value.ui64); 1359 return; 1360 } 1361 } 1362 va_end(ap); 1363 1364 /* 1365 * Create the fmri hc list 1366 */ 1367 if (nvlist_add_nvlist_array(fmri, FM_FMRI_HC_LIST, pairs, 1368 npairs + n) != 0) { 1369 atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64); 1370 return; 1371 } 1372 1373 for (i = 0; i < npairs + n; i++) { 1374 fm_nvlist_destroy(pairs[i], FM_NVA_RETAIN); 1375 } 1376 1377 if (snvl != NULL) { 1378 if (nvlist_add_nvlist(fmri, FM_FMRI_HC_SPECIFIC, snvl) != 0) { 1379 atomic_inc_64( 1380 &erpt_kstat_data.fmri_set_failed.value.ui64); 1381 return; 1382 } 1383 } 1384 } 1385