1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 /* 27 * Fault Management Architecture (FMA) Resource and Protocol Support 28 * 29 * The routines contained herein provide services to support kernel subsystems 30 * in publishing fault management telemetry (see PSARC 2002/412 and 2003/089). 31 * 32 * Name-Value Pair Lists 33 * 34 * The embodiment of an FMA protocol element (event, fmri or authority) is a 35 * name-value pair list (nvlist_t). FMA-specific nvlist construtor and 36 * destructor functions, fm_nvlist_create() and fm_nvlist_destroy(), are used 37 * to create an nvpair list using custom allocators. Callers may choose to 38 * allocate either from the kernel memory allocator, or from a preallocated 39 * buffer, useful in constrained contexts like high-level interrupt routines. 40 * 41 * Protocol Event and FMRI Construction 42 * 43 * Convenience routines are provided to construct nvlist events according to 44 * the FMA Event Protocol and Naming Schema specification for ereports and 45 * FMRIs for the dev, cpu, hc, mem, legacy hc and de schemes. 46 * 47 * ENA Manipulation 48 * 49 * Routines to generate ENA formats 0, 1 and 2 are available as well as 50 * routines to increment formats 1 and 2. Individual fields within the 51 * ENA are extractable via fm_ena_time_get(), fm_ena_id_get(), 52 * fm_ena_format_get() and fm_ena_gen_get(). 53 */ 54 55 #include <sys/types.h> 56 #include <sys/time.h> 57 #include <sys/sysevent.h> 58 #include <sys/sysevent_impl.h> 59 #include <sys/nvpair.h> 60 #include <sys/cmn_err.h> 61 #include <sys/cpuvar.h> 62 #include <sys/sysmacros.h> 63 #include <sys/systm.h> 64 #include <sys/ddifm.h> 65 #include <sys/ddifm_impl.h> 66 #include <sys/spl.h> 67 #include <sys/dumphdr.h> 68 #include <sys/compress.h> 69 #include <sys/cpuvar.h> 70 #include <sys/console.h> 71 #include <sys/panic.h> 72 #include <sys/kobj.h> 73 #include <sys/sunddi.h> 74 #include <sys/systeminfo.h> 75 #include <sys/sysevent/eventdefs.h> 76 #include <sys/fm/util.h> 77 #include <sys/fm/protocol.h> 78 79 /* 80 * URL and SUNW-MSG-ID value to display for fm_panic(), defined below. These 81 * values must be kept in sync with the FMA source code in usr/src/cmd/fm. 82 */ 83 static const char *fm_url = "http://www.sun.com/msg"; 84 static const char *fm_msgid = "SUNOS-8000-0G"; 85 static char *volatile fm_panicstr = NULL; 86 87 errorq_t *ereport_errorq; 88 void *ereport_dumpbuf; 89 size_t ereport_dumplen; 90 91 static uint_t ereport_chanlen = ERPT_EVCH_MAX; 92 static evchan_t *ereport_chan = NULL; 93 static ulong_t ereport_qlen = 0; 94 static size_t ereport_size = 0; 95 static int ereport_cols = 80; 96 97 extern void fastreboot_disable_highpil(void); 98 99 /* 100 * Common fault management kstats to record ereport generation 101 * failures 102 */ 103 104 struct erpt_kstat { 105 kstat_named_t erpt_dropped; /* num erpts dropped on post */ 106 kstat_named_t erpt_set_failed; /* num erpt set failures */ 107 kstat_named_t fmri_set_failed; /* num fmri set failures */ 108 kstat_named_t payload_set_failed; /* num payload set failures */ 109 }; 110 111 static struct erpt_kstat erpt_kstat_data = { 112 { "erpt-dropped", KSTAT_DATA_UINT64 }, 113 { "erpt-set-failed", KSTAT_DATA_UINT64 }, 114 { "fmri-set-failed", KSTAT_DATA_UINT64 }, 115 { "payload-set-failed", KSTAT_DATA_UINT64 } 116 }; 117 118 /*ARGSUSED*/ 119 static void 120 fm_drain(void *private, void *data, errorq_elem_t *eep) 121 { 122 nvlist_t *nvl = errorq_elem_nvl(ereport_errorq, eep); 123 124 if (!panicstr) 125 (void) fm_ereport_post(nvl, EVCH_TRYHARD); 126 else 127 fm_nvprint(nvl); 128 } 129 130 void 131 fm_init(void) 132 { 133 kstat_t *ksp; 134 135 (void) sysevent_evc_bind(FM_ERROR_CHAN, 136 &ereport_chan, EVCH_CREAT | EVCH_HOLD_PEND); 137 138 (void) sysevent_evc_control(ereport_chan, 139 EVCH_SET_CHAN_LEN, &ereport_chanlen); 140 141 if (ereport_qlen == 0) 142 ereport_qlen = ERPT_MAX_ERRS * MAX(max_ncpus, 4); 143 144 if (ereport_size == 0) 145 ereport_size = ERPT_DATA_SZ; 146 147 ereport_errorq = errorq_nvcreate("fm_ereport_queue", 148 (errorq_func_t)fm_drain, NULL, ereport_qlen, ereport_size, 149 FM_ERR_PIL, ERRORQ_VITAL); 150 if (ereport_errorq == NULL) 151 panic("failed to create required ereport error queue"); 152 153 ereport_dumpbuf = kmem_alloc(ereport_size, KM_SLEEP); 154 ereport_dumplen = ereport_size; 155 156 /* Initialize ereport allocation and generation kstats */ 157 ksp = kstat_create("unix", 0, "fm", "misc", KSTAT_TYPE_NAMED, 158 sizeof (struct erpt_kstat) / sizeof (kstat_named_t), 159 KSTAT_FLAG_VIRTUAL); 160 161 if (ksp != NULL) { 162 ksp->ks_data = &erpt_kstat_data; 163 kstat_install(ksp); 164 } else { 165 cmn_err(CE_NOTE, "failed to create fm/misc kstat\n"); 166 167 } 168 } 169 170 /* 171 * Formatting utility function for fm_nvprintr. We attempt to wrap chunks of 172 * output so they aren't split across console lines, and return the end column. 173 */ 174 /*PRINTFLIKE4*/ 175 static int 176 fm_printf(int depth, int c, int cols, const char *format, ...) 177 { 178 va_list ap; 179 int width; 180 char c1; 181 182 va_start(ap, format); 183 width = vsnprintf(&c1, sizeof (c1), format, ap); 184 va_end(ap); 185 186 if (c + width >= cols) { 187 console_printf("\n\r"); 188 c = 0; 189 if (format[0] != ' ' && depth > 0) { 190 console_printf(" "); 191 c++; 192 } 193 } 194 195 va_start(ap, format); 196 console_vprintf(format, ap); 197 va_end(ap); 198 199 return ((c + width) % cols); 200 } 201 202 /* 203 * Recursively print a nvlist in the specified column width and return the 204 * column we end up in. This function is called recursively by fm_nvprint(), 205 * below. We generically format the entire nvpair using hexadecimal 206 * integers and strings, and elide any integer arrays. Arrays are basically 207 * used for cache dumps right now, so we suppress them so as not to overwhelm 208 * the amount of console output we produce at panic time. This can be further 209 * enhanced as FMA technology grows based upon the needs of consumers. All 210 * FMA telemetry is logged using the dump device transport, so the console 211 * output serves only as a fallback in case this procedure is unsuccessful. 212 */ 213 static int 214 fm_nvprintr(nvlist_t *nvl, int d, int c, int cols) 215 { 216 nvpair_t *nvp; 217 218 for (nvp = nvlist_next_nvpair(nvl, NULL); 219 nvp != NULL; nvp = nvlist_next_nvpair(nvl, nvp)) { 220 221 data_type_t type = nvpair_type(nvp); 222 const char *name = nvpair_name(nvp); 223 224 boolean_t b; 225 uint8_t i8; 226 uint16_t i16; 227 uint32_t i32; 228 uint64_t i64; 229 char *str; 230 nvlist_t *cnv; 231 232 if (strcmp(name, FM_CLASS) == 0) 233 continue; /* already printed by caller */ 234 235 c = fm_printf(d, c, cols, " %s=", name); 236 237 switch (type) { 238 case DATA_TYPE_BOOLEAN: 239 c = fm_printf(d + 1, c, cols, " 1"); 240 break; 241 242 case DATA_TYPE_BOOLEAN_VALUE: 243 (void) nvpair_value_boolean_value(nvp, &b); 244 c = fm_printf(d + 1, c, cols, b ? "1" : "0"); 245 break; 246 247 case DATA_TYPE_BYTE: 248 (void) nvpair_value_byte(nvp, &i8); 249 c = fm_printf(d + 1, c, cols, "%x", i8); 250 break; 251 252 case DATA_TYPE_INT8: 253 (void) nvpair_value_int8(nvp, (void *)&i8); 254 c = fm_printf(d + 1, c, cols, "%x", i8); 255 break; 256 257 case DATA_TYPE_UINT8: 258 (void) nvpair_value_uint8(nvp, &i8); 259 c = fm_printf(d + 1, c, cols, "%x", i8); 260 break; 261 262 case DATA_TYPE_INT16: 263 (void) nvpair_value_int16(nvp, (void *)&i16); 264 c = fm_printf(d + 1, c, cols, "%x", i16); 265 break; 266 267 case DATA_TYPE_UINT16: 268 (void) nvpair_value_uint16(nvp, &i16); 269 c = fm_printf(d + 1, c, cols, "%x", i16); 270 break; 271 272 case DATA_TYPE_INT32: 273 (void) nvpair_value_int32(nvp, (void *)&i32); 274 c = fm_printf(d + 1, c, cols, "%x", i32); 275 break; 276 277 case DATA_TYPE_UINT32: 278 (void) nvpair_value_uint32(nvp, &i32); 279 c = fm_printf(d + 1, c, cols, "%x", i32); 280 break; 281 282 case DATA_TYPE_INT64: 283 (void) nvpair_value_int64(nvp, (void *)&i64); 284 c = fm_printf(d + 1, c, cols, "%llx", 285 (u_longlong_t)i64); 286 break; 287 288 case DATA_TYPE_UINT64: 289 (void) nvpair_value_uint64(nvp, &i64); 290 c = fm_printf(d + 1, c, cols, "%llx", 291 (u_longlong_t)i64); 292 break; 293 294 case DATA_TYPE_HRTIME: 295 (void) nvpair_value_hrtime(nvp, (void *)&i64); 296 c = fm_printf(d + 1, c, cols, "%llx", 297 (u_longlong_t)i64); 298 break; 299 300 case DATA_TYPE_STRING: 301 (void) nvpair_value_string(nvp, &str); 302 c = fm_printf(d + 1, c, cols, "\"%s\"", 303 str ? str : "<NULL>"); 304 break; 305 306 case DATA_TYPE_NVLIST: 307 c = fm_printf(d + 1, c, cols, "["); 308 (void) nvpair_value_nvlist(nvp, &cnv); 309 c = fm_nvprintr(cnv, d + 1, c, cols); 310 c = fm_printf(d + 1, c, cols, " ]"); 311 break; 312 313 case DATA_TYPE_NVLIST_ARRAY: { 314 nvlist_t **val; 315 uint_t i, nelem; 316 317 c = fm_printf(d + 1, c, cols, "["); 318 (void) nvpair_value_nvlist_array(nvp, &val, &nelem); 319 for (i = 0; i < nelem; i++) { 320 c = fm_nvprintr(val[i], d + 1, c, cols); 321 } 322 c = fm_printf(d + 1, c, cols, " ]"); 323 } 324 break; 325 326 case DATA_TYPE_BOOLEAN_ARRAY: 327 case DATA_TYPE_BYTE_ARRAY: 328 case DATA_TYPE_INT8_ARRAY: 329 case DATA_TYPE_UINT8_ARRAY: 330 case DATA_TYPE_INT16_ARRAY: 331 case DATA_TYPE_UINT16_ARRAY: 332 case DATA_TYPE_INT32_ARRAY: 333 case DATA_TYPE_UINT32_ARRAY: 334 case DATA_TYPE_INT64_ARRAY: 335 case DATA_TYPE_UINT64_ARRAY: 336 case DATA_TYPE_STRING_ARRAY: 337 c = fm_printf(d + 1, c, cols, "[...]"); 338 break; 339 case DATA_TYPE_UNKNOWN: 340 c = fm_printf(d + 1, c, cols, "<unknown>"); 341 break; 342 } 343 } 344 345 return (c); 346 } 347 348 void 349 fm_nvprint(nvlist_t *nvl) 350 { 351 char *class; 352 int c = 0; 353 354 console_printf("\r"); 355 356 if (nvlist_lookup_string(nvl, FM_CLASS, &class) == 0) 357 c = fm_printf(0, c, ereport_cols, "%s", class); 358 359 if (fm_nvprintr(nvl, 0, c, ereport_cols) != 0) 360 console_printf("\n"); 361 362 console_printf("\n"); 363 } 364 365 /* 366 * Wrapper for panic() that first produces an FMA-style message for admins. 367 * Normally such messages are generated by fmd(1M)'s syslog-msgs agent: this 368 * is the one exception to that rule and the only error that gets messaged. 369 * This function is intended for use by subsystems that have detected a fatal 370 * error and enqueued appropriate ereports and wish to then force a panic. 371 */ 372 /*PRINTFLIKE1*/ 373 void 374 fm_panic(const char *format, ...) 375 { 376 va_list ap; 377 378 (void) casptr((void *)&fm_panicstr, NULL, (void *)format); 379 #if defined(__i386) || defined(__amd64) 380 fastreboot_disable_highpil(); 381 #endif /* __i386 || __amd64 */ 382 va_start(ap, format); 383 vpanic(format, ap); 384 va_end(ap); 385 } 386 387 /* 388 * Print any appropriate FMA banner message before the panic message. This 389 * function is called by panicsys() and prints the message for fm_panic(). 390 * We print the message here so that it comes after the system is quiesced. 391 * A one-line summary is recorded in the log only (cmn_err(9F) with "!" prefix). 392 * The rest of the message is for the console only and not needed in the log, 393 * so it is printed using console_printf(). We break it up into multiple 394 * chunks so as to avoid overflowing any small legacy prom_printf() buffers. 395 */ 396 void 397 fm_banner(void) 398 { 399 timespec_t tod; 400 hrtime_t now; 401 402 if (!fm_panicstr) 403 return; /* panic was not initiated by fm_panic(); do nothing */ 404 405 if (panicstr) { 406 tod = panic_hrestime; 407 now = panic_hrtime; 408 } else { 409 gethrestime(&tod); 410 now = gethrtime_waitfree(); 411 } 412 413 cmn_err(CE_NOTE, "!SUNW-MSG-ID: %s, " 414 "TYPE: Error, VER: 1, SEVERITY: Major\n", fm_msgid); 415 416 console_printf( 417 "\n\rSUNW-MSG-ID: %s, TYPE: Error, VER: 1, SEVERITY: Major\n" 418 "EVENT-TIME: 0x%lx.0x%lx (0x%llx)\n", 419 fm_msgid, tod.tv_sec, tod.tv_nsec, (u_longlong_t)now); 420 421 console_printf( 422 "PLATFORM: %s, CSN: -, HOSTNAME: %s\n" 423 "SOURCE: %s, REV: %s %s\n", 424 platform, utsname.nodename, utsname.sysname, 425 utsname.release, utsname.version); 426 427 console_printf( 428 "DESC: Errors have been detected that require a reboot to ensure system\n" 429 "integrity. See %s/%s for more information.\n", 430 fm_url, fm_msgid); 431 432 console_printf( 433 "AUTO-RESPONSE: Solaris will attempt to save and diagnose the error telemetry\n" 434 "IMPACT: The system will sync files, save a crash dump if needed, and reboot\n" 435 "REC-ACTION: Save the error summary below in case telemetry cannot be saved\n"); 436 437 console_printf("\n"); 438 } 439 440 /* 441 * Utility function to write all of the pending ereports to the dump device. 442 * This function is called at either normal reboot or panic time, and simply 443 * iterates over the in-transit messages in the ereport sysevent channel. 444 */ 445 void 446 fm_ereport_dump(void) 447 { 448 evchanq_t *chq; 449 sysevent_t *sep; 450 erpt_dump_t ed; 451 452 timespec_t tod; 453 hrtime_t now; 454 char *buf; 455 size_t len; 456 457 if (panicstr) { 458 tod = panic_hrestime; 459 now = panic_hrtime; 460 } else { 461 if (ereport_errorq != NULL) 462 errorq_drain(ereport_errorq); 463 gethrestime(&tod); 464 now = gethrtime_waitfree(); 465 } 466 467 /* 468 * In the panic case, sysevent_evc_walk_init() will return NULL. 469 */ 470 if ((chq = sysevent_evc_walk_init(ereport_chan, NULL)) == NULL && 471 !panicstr) 472 return; /* event channel isn't initialized yet */ 473 474 while ((sep = sysevent_evc_walk_step(chq)) != NULL) { 475 if ((buf = sysevent_evc_event_attr(sep, &len)) == NULL) 476 break; 477 478 ed.ed_magic = ERPT_MAGIC; 479 ed.ed_chksum = checksum32(buf, len); 480 ed.ed_size = (uint32_t)len; 481 ed.ed_pad = 0; 482 ed.ed_hrt_nsec = SE_TIME(sep); 483 ed.ed_hrt_base = now; 484 ed.ed_tod_base.sec = tod.tv_sec; 485 ed.ed_tod_base.nsec = tod.tv_nsec; 486 487 dumpvp_write(&ed, sizeof (ed)); 488 dumpvp_write(buf, len); 489 } 490 491 sysevent_evc_walk_fini(chq); 492 } 493 494 /* 495 * Post an error report (ereport) to the sysevent error channel. The error 496 * channel must be established with a prior call to sysevent_evc_create() 497 * before publication may occur. 498 */ 499 void 500 fm_ereport_post(nvlist_t *ereport, int evc_flag) 501 { 502 size_t nvl_size = 0; 503 evchan_t *error_chan; 504 505 (void) nvlist_size(ereport, &nvl_size, NV_ENCODE_NATIVE); 506 if (nvl_size > ERPT_DATA_SZ || nvl_size == 0) { 507 atomic_add_64(&erpt_kstat_data.erpt_dropped.value.ui64, 1); 508 return; 509 } 510 511 if (sysevent_evc_bind(FM_ERROR_CHAN, &error_chan, 512 EVCH_CREAT|EVCH_HOLD_PEND) != 0) { 513 atomic_add_64(&erpt_kstat_data.erpt_dropped.value.ui64, 1); 514 return; 515 } 516 517 if (sysevent_evc_publish(error_chan, EC_FM, ESC_FM_ERROR, 518 SUNW_VENDOR, FM_PUB, ereport, evc_flag) != 0) { 519 atomic_add_64(&erpt_kstat_data.erpt_dropped.value.ui64, 1); 520 (void) sysevent_evc_unbind(error_chan); 521 return; 522 } 523 (void) sysevent_evc_unbind(error_chan); 524 } 525 526 /* 527 * Wrapppers for FM nvlist allocators 528 */ 529 /* ARGSUSED */ 530 static void * 531 i_fm_alloc(nv_alloc_t *nva, size_t size) 532 { 533 return (kmem_zalloc(size, KM_SLEEP)); 534 } 535 536 /* ARGSUSED */ 537 static void 538 i_fm_free(nv_alloc_t *nva, void *buf, size_t size) 539 { 540 kmem_free(buf, size); 541 } 542 543 const nv_alloc_ops_t fm_mem_alloc_ops = { 544 NULL, 545 NULL, 546 i_fm_alloc, 547 i_fm_free, 548 NULL 549 }; 550 551 /* 552 * Create and initialize a new nv_alloc_t for a fixed buffer, buf. A pointer 553 * to the newly allocated nv_alloc_t structure is returned upon success or NULL 554 * is returned to indicate that the nv_alloc structure could not be created. 555 */ 556 nv_alloc_t * 557 fm_nva_xcreate(char *buf, size_t bufsz) 558 { 559 nv_alloc_t *nvhdl = kmem_zalloc(sizeof (nv_alloc_t), KM_SLEEP); 560 561 if (bufsz == 0 || nv_alloc_init(nvhdl, nv_fixed_ops, buf, bufsz) != 0) { 562 kmem_free(nvhdl, sizeof (nv_alloc_t)); 563 return (NULL); 564 } 565 566 return (nvhdl); 567 } 568 569 /* 570 * Destroy a previously allocated nv_alloc structure. The fixed buffer 571 * associated with nva must be freed by the caller. 572 */ 573 void 574 fm_nva_xdestroy(nv_alloc_t *nva) 575 { 576 nv_alloc_fini(nva); 577 kmem_free(nva, sizeof (nv_alloc_t)); 578 } 579 580 /* 581 * Create a new nv list. A pointer to a new nv list structure is returned 582 * upon success or NULL is returned to indicate that the structure could 583 * not be created. The newly created nv list is created and managed by the 584 * operations installed in nva. If nva is NULL, the default FMA nva 585 * operations are installed and used. 586 * 587 * When called from the kernel and nva == NULL, this function must be called 588 * from passive kernel context with no locks held that can prevent a 589 * sleeping memory allocation from occurring. Otherwise, this function may 590 * be called from other kernel contexts as long a valid nva created via 591 * fm_nva_create() is supplied. 592 */ 593 nvlist_t * 594 fm_nvlist_create(nv_alloc_t *nva) 595 { 596 int hdl_alloced = 0; 597 nvlist_t *nvl; 598 nv_alloc_t *nvhdl; 599 600 if (nva == NULL) { 601 nvhdl = kmem_zalloc(sizeof (nv_alloc_t), KM_SLEEP); 602 603 if (nv_alloc_init(nvhdl, &fm_mem_alloc_ops, NULL, 0) != 0) { 604 kmem_free(nvhdl, sizeof (nv_alloc_t)); 605 return (NULL); 606 } 607 hdl_alloced = 1; 608 } else { 609 nvhdl = nva; 610 } 611 612 if (nvlist_xalloc(&nvl, NV_UNIQUE_NAME, nvhdl) != 0) { 613 if (hdl_alloced) { 614 kmem_free(nvhdl, sizeof (nv_alloc_t)); 615 nv_alloc_fini(nvhdl); 616 } 617 return (NULL); 618 } 619 620 return (nvl); 621 } 622 623 /* 624 * Destroy a previously allocated nvlist structure. flag indicates whether 625 * or not the associated nva structure should be freed (FM_NVA_FREE) or 626 * retained (FM_NVA_RETAIN). Retaining the nv alloc structure allows 627 * it to be re-used for future nvlist creation operations. 628 */ 629 void 630 fm_nvlist_destroy(nvlist_t *nvl, int flag) 631 { 632 nv_alloc_t *nva = nvlist_lookup_nv_alloc(nvl); 633 634 nvlist_free(nvl); 635 636 if (nva != NULL) { 637 if (flag == FM_NVA_FREE) 638 fm_nva_xdestroy(nva); 639 } 640 } 641 642 int 643 i_fm_payload_set(nvlist_t *payload, const char *name, va_list ap) 644 { 645 int nelem, ret = 0; 646 data_type_t type; 647 648 while (ret == 0 && name != NULL) { 649 type = va_arg(ap, data_type_t); 650 switch (type) { 651 case DATA_TYPE_BYTE: 652 ret = nvlist_add_byte(payload, name, 653 va_arg(ap, uint_t)); 654 break; 655 case DATA_TYPE_BYTE_ARRAY: 656 nelem = va_arg(ap, int); 657 ret = nvlist_add_byte_array(payload, name, 658 va_arg(ap, uchar_t *), nelem); 659 break; 660 case DATA_TYPE_BOOLEAN_VALUE: 661 ret = nvlist_add_boolean_value(payload, name, 662 va_arg(ap, boolean_t)); 663 break; 664 case DATA_TYPE_BOOLEAN_ARRAY: 665 nelem = va_arg(ap, int); 666 ret = nvlist_add_boolean_array(payload, name, 667 va_arg(ap, boolean_t *), nelem); 668 break; 669 case DATA_TYPE_INT8: 670 ret = nvlist_add_int8(payload, name, 671 va_arg(ap, int)); 672 break; 673 case DATA_TYPE_INT8_ARRAY: 674 nelem = va_arg(ap, int); 675 ret = nvlist_add_int8_array(payload, name, 676 va_arg(ap, int8_t *), nelem); 677 break; 678 case DATA_TYPE_UINT8: 679 ret = nvlist_add_uint8(payload, name, 680 va_arg(ap, uint_t)); 681 break; 682 case DATA_TYPE_UINT8_ARRAY: 683 nelem = va_arg(ap, int); 684 ret = nvlist_add_uint8_array(payload, name, 685 va_arg(ap, uint8_t *), nelem); 686 break; 687 case DATA_TYPE_INT16: 688 ret = nvlist_add_int16(payload, name, 689 va_arg(ap, int)); 690 break; 691 case DATA_TYPE_INT16_ARRAY: 692 nelem = va_arg(ap, int); 693 ret = nvlist_add_int16_array(payload, name, 694 va_arg(ap, int16_t *), nelem); 695 break; 696 case DATA_TYPE_UINT16: 697 ret = nvlist_add_uint16(payload, name, 698 va_arg(ap, uint_t)); 699 break; 700 case DATA_TYPE_UINT16_ARRAY: 701 nelem = va_arg(ap, int); 702 ret = nvlist_add_uint16_array(payload, name, 703 va_arg(ap, uint16_t *), nelem); 704 break; 705 case DATA_TYPE_INT32: 706 ret = nvlist_add_int32(payload, name, 707 va_arg(ap, int32_t)); 708 break; 709 case DATA_TYPE_INT32_ARRAY: 710 nelem = va_arg(ap, int); 711 ret = nvlist_add_int32_array(payload, name, 712 va_arg(ap, int32_t *), nelem); 713 break; 714 case DATA_TYPE_UINT32: 715 ret = nvlist_add_uint32(payload, name, 716 va_arg(ap, uint32_t)); 717 break; 718 case DATA_TYPE_UINT32_ARRAY: 719 nelem = va_arg(ap, int); 720 ret = nvlist_add_uint32_array(payload, name, 721 va_arg(ap, uint32_t *), nelem); 722 break; 723 case DATA_TYPE_INT64: 724 ret = nvlist_add_int64(payload, name, 725 va_arg(ap, int64_t)); 726 break; 727 case DATA_TYPE_INT64_ARRAY: 728 nelem = va_arg(ap, int); 729 ret = nvlist_add_int64_array(payload, name, 730 va_arg(ap, int64_t *), nelem); 731 break; 732 case DATA_TYPE_UINT64: 733 ret = nvlist_add_uint64(payload, name, 734 va_arg(ap, uint64_t)); 735 break; 736 case DATA_TYPE_UINT64_ARRAY: 737 nelem = va_arg(ap, int); 738 ret = nvlist_add_uint64_array(payload, name, 739 va_arg(ap, uint64_t *), nelem); 740 break; 741 case DATA_TYPE_STRING: 742 ret = nvlist_add_string(payload, name, 743 va_arg(ap, char *)); 744 break; 745 case DATA_TYPE_STRING_ARRAY: 746 nelem = va_arg(ap, int); 747 ret = nvlist_add_string_array(payload, name, 748 va_arg(ap, char **), nelem); 749 break; 750 case DATA_TYPE_NVLIST: 751 ret = nvlist_add_nvlist(payload, name, 752 va_arg(ap, nvlist_t *)); 753 break; 754 case DATA_TYPE_NVLIST_ARRAY: 755 nelem = va_arg(ap, int); 756 ret = nvlist_add_nvlist_array(payload, name, 757 va_arg(ap, nvlist_t **), nelem); 758 break; 759 default: 760 ret = EINVAL; 761 } 762 763 name = va_arg(ap, char *); 764 } 765 return (ret); 766 } 767 768 void 769 fm_payload_set(nvlist_t *payload, ...) 770 { 771 int ret; 772 const char *name; 773 va_list ap; 774 775 va_start(ap, payload); 776 name = va_arg(ap, char *); 777 ret = i_fm_payload_set(payload, name, ap); 778 va_end(ap); 779 780 if (ret) 781 atomic_add_64( 782 &erpt_kstat_data.payload_set_failed.value.ui64, 1); 783 } 784 785 /* 786 * Set-up and validate the members of an ereport event according to: 787 * 788 * Member name Type Value 789 * ==================================================== 790 * class string ereport 791 * version uint8_t 0 792 * ena uint64_t <ena> 793 * detector nvlist_t <detector> 794 * ereport-payload nvlist_t <var args> 795 * 796 */ 797 void 798 fm_ereport_set(nvlist_t *ereport, int version, const char *erpt_class, 799 uint64_t ena, const nvlist_t *detector, ...) 800 { 801 char ereport_class[FM_MAX_CLASS]; 802 const char *name; 803 va_list ap; 804 int ret; 805 806 if (version != FM_EREPORT_VERS0) { 807 atomic_add_64(&erpt_kstat_data.erpt_set_failed.value.ui64, 1); 808 return; 809 } 810 811 (void) snprintf(ereport_class, FM_MAX_CLASS, "%s.%s", 812 FM_EREPORT_CLASS, erpt_class); 813 if (nvlist_add_string(ereport, FM_CLASS, ereport_class) != 0) { 814 atomic_add_64(&erpt_kstat_data.erpt_set_failed.value.ui64, 1); 815 return; 816 } 817 818 if (nvlist_add_uint64(ereport, FM_EREPORT_ENA, ena)) { 819 atomic_add_64(&erpt_kstat_data.erpt_set_failed.value.ui64, 1); 820 } 821 822 if (nvlist_add_nvlist(ereport, FM_EREPORT_DETECTOR, 823 (nvlist_t *)detector) != 0) { 824 atomic_add_64(&erpt_kstat_data.erpt_set_failed.value.ui64, 1); 825 } 826 827 va_start(ap, detector); 828 name = va_arg(ap, const char *); 829 ret = i_fm_payload_set(ereport, name, ap); 830 va_end(ap); 831 832 if (ret) 833 atomic_add_64(&erpt_kstat_data.erpt_set_failed.value.ui64, 1); 834 } 835 836 /* 837 * Set-up and validate the members of an hc fmri according to; 838 * 839 * Member name Type Value 840 * =================================================== 841 * version uint8_t 0 842 * auth nvlist_t <auth> 843 * hc-name string <name> 844 * hc-id string <id> 845 * 846 * Note that auth and hc-id are optional members. 847 */ 848 849 #define HC_MAXPAIRS 20 850 #define HC_MAXNAMELEN 50 851 852 static int 853 fm_fmri_hc_set_common(nvlist_t *fmri, int version, const nvlist_t *auth) 854 { 855 if (version != FM_HC_SCHEME_VERSION) { 856 atomic_add_64(&erpt_kstat_data.fmri_set_failed.value.ui64, 1); 857 return (0); 858 } 859 860 if (nvlist_add_uint8(fmri, FM_VERSION, version) != 0 || 861 nvlist_add_string(fmri, FM_FMRI_SCHEME, FM_FMRI_SCHEME_HC) != 0) { 862 atomic_add_64(&erpt_kstat_data.fmri_set_failed.value.ui64, 1); 863 return (0); 864 } 865 866 if (auth != NULL && nvlist_add_nvlist(fmri, FM_FMRI_AUTHORITY, 867 (nvlist_t *)auth) != 0) { 868 atomic_add_64(&erpt_kstat_data.fmri_set_failed.value.ui64, 1); 869 return (0); 870 } 871 872 return (1); 873 } 874 875 void 876 fm_fmri_hc_set(nvlist_t *fmri, int version, const nvlist_t *auth, 877 nvlist_t *snvl, int npairs, ...) 878 { 879 nv_alloc_t *nva = nvlist_lookup_nv_alloc(fmri); 880 nvlist_t *pairs[HC_MAXPAIRS]; 881 va_list ap; 882 int i; 883 884 if (!fm_fmri_hc_set_common(fmri, version, auth)) 885 return; 886 887 npairs = MIN(npairs, HC_MAXPAIRS); 888 889 va_start(ap, npairs); 890 for (i = 0; i < npairs; i++) { 891 const char *name = va_arg(ap, const char *); 892 uint32_t id = va_arg(ap, uint32_t); 893 char idstr[11]; 894 895 (void) snprintf(idstr, sizeof (idstr), "%u", id); 896 897 pairs[i] = fm_nvlist_create(nva); 898 if (nvlist_add_string(pairs[i], FM_FMRI_HC_NAME, name) != 0 || 899 nvlist_add_string(pairs[i], FM_FMRI_HC_ID, idstr) != 0) { 900 atomic_add_64( 901 &erpt_kstat_data.fmri_set_failed.value.ui64, 1); 902 } 903 } 904 va_end(ap); 905 906 if (nvlist_add_nvlist_array(fmri, FM_FMRI_HC_LIST, pairs, npairs) != 0) 907 atomic_add_64(&erpt_kstat_data.fmri_set_failed.value.ui64, 1); 908 909 for (i = 0; i < npairs; i++) 910 fm_nvlist_destroy(pairs[i], FM_NVA_RETAIN); 911 912 if (snvl != NULL) { 913 if (nvlist_add_nvlist(fmri, FM_FMRI_HC_SPECIFIC, snvl) != 0) { 914 atomic_add_64( 915 &erpt_kstat_data.fmri_set_failed.value.ui64, 1); 916 } 917 } 918 } 919 920 /* 921 * Set-up and validate the members of an dev fmri according to: 922 * 923 * Member name Type Value 924 * ==================================================== 925 * version uint8_t 0 926 * auth nvlist_t <auth> 927 * devpath string <devpath> 928 * devid string <devid> 929 * 930 * Note that auth and devid are optional members. 931 */ 932 void 933 fm_fmri_dev_set(nvlist_t *fmri_dev, int version, const nvlist_t *auth, 934 const char *devpath, const char *devid) 935 { 936 if (version != DEV_SCHEME_VERSION0) { 937 atomic_add_64(&erpt_kstat_data.fmri_set_failed.value.ui64, 1); 938 return; 939 } 940 941 if (nvlist_add_uint8(fmri_dev, FM_VERSION, version) != 0) { 942 atomic_add_64(&erpt_kstat_data.fmri_set_failed.value.ui64, 1); 943 return; 944 } 945 946 if (nvlist_add_string(fmri_dev, FM_FMRI_SCHEME, 947 FM_FMRI_SCHEME_DEV) != 0) { 948 atomic_add_64(&erpt_kstat_data.fmri_set_failed.value.ui64, 1); 949 return; 950 } 951 952 if (auth != NULL) { 953 if (nvlist_add_nvlist(fmri_dev, FM_FMRI_AUTHORITY, 954 (nvlist_t *)auth) != 0) { 955 atomic_add_64( 956 &erpt_kstat_data.fmri_set_failed.value.ui64, 1); 957 } 958 } 959 960 if (nvlist_add_string(fmri_dev, FM_FMRI_DEV_PATH, devpath) != 0) { 961 atomic_add_64(&erpt_kstat_data.fmri_set_failed.value.ui64, 1); 962 } 963 964 if (devid != NULL) 965 if (nvlist_add_string(fmri_dev, FM_FMRI_DEV_ID, devid) != 0) 966 atomic_add_64( 967 &erpt_kstat_data.fmri_set_failed.value.ui64, 1); 968 } 969 970 /* 971 * Set-up and validate the members of an cpu fmri according to: 972 * 973 * Member name Type Value 974 * ==================================================== 975 * version uint8_t 0 976 * auth nvlist_t <auth> 977 * cpuid uint32_t <cpu_id> 978 * cpumask uint8_t <cpu_mask> 979 * serial uint64_t <serial_id> 980 * 981 * Note that auth, cpumask, serial are optional members. 982 * 983 */ 984 void 985 fm_fmri_cpu_set(nvlist_t *fmri_cpu, int version, const nvlist_t *auth, 986 uint32_t cpu_id, uint8_t *cpu_maskp, const char *serial_idp) 987 { 988 uint64_t *failedp = &erpt_kstat_data.fmri_set_failed.value.ui64; 989 990 if (version < CPU_SCHEME_VERSION1) { 991 atomic_add_64(failedp, 1); 992 return; 993 } 994 995 if (nvlist_add_uint8(fmri_cpu, FM_VERSION, version) != 0) { 996 atomic_add_64(failedp, 1); 997 return; 998 } 999 1000 if (nvlist_add_string(fmri_cpu, FM_FMRI_SCHEME, 1001 FM_FMRI_SCHEME_CPU) != 0) { 1002 atomic_add_64(failedp, 1); 1003 return; 1004 } 1005 1006 if (auth != NULL && nvlist_add_nvlist(fmri_cpu, FM_FMRI_AUTHORITY, 1007 (nvlist_t *)auth) != 0) 1008 atomic_add_64(failedp, 1); 1009 1010 if (nvlist_add_uint32(fmri_cpu, FM_FMRI_CPU_ID, cpu_id) != 0) 1011 atomic_add_64(failedp, 1); 1012 1013 if (cpu_maskp != NULL && nvlist_add_uint8(fmri_cpu, FM_FMRI_CPU_MASK, 1014 *cpu_maskp) != 0) 1015 atomic_add_64(failedp, 1); 1016 1017 if (serial_idp == NULL || nvlist_add_string(fmri_cpu, 1018 FM_FMRI_CPU_SERIAL_ID, (char *)serial_idp) != 0) 1019 atomic_add_64(failedp, 1); 1020 } 1021 1022 /* 1023 * Set-up and validate the members of a mem according to: 1024 * 1025 * Member name Type Value 1026 * ==================================================== 1027 * version uint8_t 0 1028 * auth nvlist_t <auth> [optional] 1029 * unum string <unum> 1030 * serial string <serial> [optional*] 1031 * offset uint64_t <offset> [optional] 1032 * 1033 * * serial is required if offset is present 1034 */ 1035 void 1036 fm_fmri_mem_set(nvlist_t *fmri, int version, const nvlist_t *auth, 1037 const char *unum, const char *serial, uint64_t offset) 1038 { 1039 if (version != MEM_SCHEME_VERSION0) { 1040 atomic_add_64(&erpt_kstat_data.fmri_set_failed.value.ui64, 1); 1041 return; 1042 } 1043 1044 if (!serial && (offset != (uint64_t)-1)) { 1045 atomic_add_64(&erpt_kstat_data.fmri_set_failed.value.ui64, 1); 1046 return; 1047 } 1048 1049 if (nvlist_add_uint8(fmri, FM_VERSION, version) != 0) { 1050 atomic_add_64(&erpt_kstat_data.fmri_set_failed.value.ui64, 1); 1051 return; 1052 } 1053 1054 if (nvlist_add_string(fmri, FM_FMRI_SCHEME, FM_FMRI_SCHEME_MEM) != 0) { 1055 atomic_add_64(&erpt_kstat_data.fmri_set_failed.value.ui64, 1); 1056 return; 1057 } 1058 1059 if (auth != NULL) { 1060 if (nvlist_add_nvlist(fmri, FM_FMRI_AUTHORITY, 1061 (nvlist_t *)auth) != 0) { 1062 atomic_add_64( 1063 &erpt_kstat_data.fmri_set_failed.value.ui64, 1); 1064 } 1065 } 1066 1067 if (nvlist_add_string(fmri, FM_FMRI_MEM_UNUM, unum) != 0) { 1068 atomic_add_64(&erpt_kstat_data.fmri_set_failed.value.ui64, 1); 1069 } 1070 1071 if (serial != NULL) { 1072 if (nvlist_add_string_array(fmri, FM_FMRI_MEM_SERIAL_ID, 1073 (char **)&serial, 1) != 0) { 1074 atomic_add_64( 1075 &erpt_kstat_data.fmri_set_failed.value.ui64, 1); 1076 } 1077 if (offset != (uint64_t)-1) { 1078 if (nvlist_add_uint64(fmri, FM_FMRI_MEM_OFFSET, 1079 offset) != 0) { 1080 atomic_add_64(&erpt_kstat_data. 1081 fmri_set_failed.value.ui64, 1); 1082 } 1083 } 1084 } 1085 } 1086 1087 void 1088 fm_fmri_zfs_set(nvlist_t *fmri, int version, uint64_t pool_guid, 1089 uint64_t vdev_guid) 1090 { 1091 if (version != ZFS_SCHEME_VERSION0) { 1092 atomic_add_64(&erpt_kstat_data.fmri_set_failed.value.ui64, 1); 1093 return; 1094 } 1095 1096 if (nvlist_add_uint8(fmri, FM_VERSION, version) != 0) { 1097 atomic_add_64(&erpt_kstat_data.fmri_set_failed.value.ui64, 1); 1098 return; 1099 } 1100 1101 if (nvlist_add_string(fmri, FM_FMRI_SCHEME, FM_FMRI_SCHEME_ZFS) != 0) { 1102 atomic_add_64(&erpt_kstat_data.fmri_set_failed.value.ui64, 1); 1103 return; 1104 } 1105 1106 if (nvlist_add_uint64(fmri, FM_FMRI_ZFS_POOL, pool_guid) != 0) { 1107 atomic_add_64(&erpt_kstat_data.fmri_set_failed.value.ui64, 1); 1108 } 1109 1110 if (vdev_guid != 0) { 1111 if (nvlist_add_uint64(fmri, FM_FMRI_ZFS_VDEV, vdev_guid) != 0) { 1112 atomic_add_64( 1113 &erpt_kstat_data.fmri_set_failed.value.ui64, 1); 1114 } 1115 } 1116 } 1117 1118 uint64_t 1119 fm_ena_increment(uint64_t ena) 1120 { 1121 uint64_t new_ena; 1122 1123 switch (ENA_FORMAT(ena)) { 1124 case FM_ENA_FMT1: 1125 new_ena = ena + (1 << ENA_FMT1_GEN_SHFT); 1126 break; 1127 case FM_ENA_FMT2: 1128 new_ena = ena + (1 << ENA_FMT2_GEN_SHFT); 1129 break; 1130 default: 1131 new_ena = 0; 1132 } 1133 1134 return (new_ena); 1135 } 1136 1137 uint64_t 1138 fm_ena_generate_cpu(uint64_t timestamp, processorid_t cpuid, uchar_t format) 1139 { 1140 uint64_t ena = 0; 1141 1142 switch (format) { 1143 case FM_ENA_FMT1: 1144 if (timestamp) { 1145 ena = (uint64_t)((format & ENA_FORMAT_MASK) | 1146 ((cpuid << ENA_FMT1_CPUID_SHFT) & 1147 ENA_FMT1_CPUID_MASK) | 1148 ((timestamp << ENA_FMT1_TIME_SHFT) & 1149 ENA_FMT1_TIME_MASK)); 1150 } else { 1151 ena = (uint64_t)((format & ENA_FORMAT_MASK) | 1152 ((cpuid << ENA_FMT1_CPUID_SHFT) & 1153 ENA_FMT1_CPUID_MASK) | 1154 ((gethrtime_waitfree() << ENA_FMT1_TIME_SHFT) & 1155 ENA_FMT1_TIME_MASK)); 1156 } 1157 break; 1158 case FM_ENA_FMT2: 1159 ena = (uint64_t)((format & ENA_FORMAT_MASK) | 1160 ((timestamp << ENA_FMT2_TIME_SHFT) & ENA_FMT2_TIME_MASK)); 1161 break; 1162 default: 1163 break; 1164 } 1165 1166 return (ena); 1167 } 1168 1169 uint64_t 1170 fm_ena_generate(uint64_t timestamp, uchar_t format) 1171 { 1172 return (fm_ena_generate_cpu(timestamp, CPU->cpu_id, format)); 1173 } 1174 1175 uint64_t 1176 fm_ena_generation_get(uint64_t ena) 1177 { 1178 uint64_t gen; 1179 1180 switch (ENA_FORMAT(ena)) { 1181 case FM_ENA_FMT1: 1182 gen = (ena & ENA_FMT1_GEN_MASK) >> ENA_FMT1_GEN_SHFT; 1183 break; 1184 case FM_ENA_FMT2: 1185 gen = (ena & ENA_FMT2_GEN_MASK) >> ENA_FMT2_GEN_SHFT; 1186 break; 1187 default: 1188 gen = 0; 1189 break; 1190 } 1191 1192 return (gen); 1193 } 1194 1195 uchar_t 1196 fm_ena_format_get(uint64_t ena) 1197 { 1198 1199 return (ENA_FORMAT(ena)); 1200 } 1201 1202 uint64_t 1203 fm_ena_id_get(uint64_t ena) 1204 { 1205 uint64_t id; 1206 1207 switch (ENA_FORMAT(ena)) { 1208 case FM_ENA_FMT1: 1209 id = (ena & ENA_FMT1_ID_MASK) >> ENA_FMT1_ID_SHFT; 1210 break; 1211 case FM_ENA_FMT2: 1212 id = (ena & ENA_FMT2_ID_MASK) >> ENA_FMT2_ID_SHFT; 1213 break; 1214 default: 1215 id = 0; 1216 } 1217 1218 return (id); 1219 } 1220 1221 uint64_t 1222 fm_ena_time_get(uint64_t ena) 1223 { 1224 uint64_t time; 1225 1226 switch (ENA_FORMAT(ena)) { 1227 case FM_ENA_FMT1: 1228 time = (ena & ENA_FMT1_TIME_MASK) >> ENA_FMT1_TIME_SHFT; 1229 break; 1230 case FM_ENA_FMT2: 1231 time = (ena & ENA_FMT2_TIME_MASK) >> ENA_FMT2_TIME_SHFT; 1232 break; 1233 default: 1234 time = 0; 1235 } 1236 1237 return (time); 1238 } 1239 1240 /* 1241 * Convert a getpcstack() trace to symbolic name+offset, and add the resulting 1242 * string array to a Fault Management ereport as FM_EREPORT_PAYLOAD_NAME_STACK. 1243 */ 1244 void 1245 fm_payload_stack_add(nvlist_t *payload, const pc_t *stack, int depth) 1246 { 1247 int i; 1248 char *sym; 1249 ulong_t off; 1250 char *stkpp[FM_STK_DEPTH]; 1251 char buf[FM_STK_DEPTH * FM_SYM_SZ]; 1252 char *stkp = buf; 1253 1254 for (i = 0; i < depth && i != FM_STK_DEPTH; i++, stkp += FM_SYM_SZ) { 1255 if ((sym = kobj_getsymname(stack[i], &off)) != NULL) 1256 (void) snprintf(stkp, FM_SYM_SZ, "%s+%lx", sym, off); 1257 else 1258 (void) snprintf(stkp, FM_SYM_SZ, "%lx", (long)stack[i]); 1259 stkpp[i] = stkp; 1260 } 1261 1262 fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_STACK, 1263 DATA_TYPE_STRING_ARRAY, depth, stkpp, NULL); 1264 } 1265 1266 void 1267 print_msg_hwerr(ctid_t ct_id, proc_t *p) 1268 { 1269 uprintf("Killed process %d (%s) in contract id %d " 1270 "due to hardware error\n", p->p_pid, p->p_user.u_comm, ct_id); 1271 } 1272 1273 void 1274 fm_fmri_hc_create(nvlist_t *fmri, int version, const nvlist_t *auth, 1275 nvlist_t *snvl, nvlist_t *bboard, int npairs, ...) 1276 { 1277 nv_alloc_t *nva = nvlist_lookup_nv_alloc(fmri); 1278 nvlist_t *pairs[HC_MAXPAIRS]; 1279 nvlist_t **hcl; 1280 uint_t n; 1281 int i, j; 1282 va_list ap; 1283 char *hcname, *hcid; 1284 1285 if (!fm_fmri_hc_set_common(fmri, version, auth)) 1286 return; 1287 1288 /* 1289 * copy the bboard nvpairs to the pairs array 1290 */ 1291 if (nvlist_lookup_nvlist_array(bboard, FM_FMRI_HC_LIST, &hcl, &n) 1292 != 0) { 1293 atomic_add_64(&erpt_kstat_data.fmri_set_failed.value.ui64, 1); 1294 return; 1295 } 1296 1297 for (i = 0; i < n; i++) { 1298 if (nvlist_lookup_string(hcl[i], FM_FMRI_HC_NAME, 1299 &hcname) != 0) { 1300 atomic_add_64( 1301 &erpt_kstat_data.fmri_set_failed.value.ui64, 1); 1302 return; 1303 } 1304 if (nvlist_lookup_string(hcl[i], FM_FMRI_HC_ID, &hcid) != 0) { 1305 atomic_add_64( 1306 &erpt_kstat_data.fmri_set_failed.value.ui64, 1); 1307 return; 1308 } 1309 1310 pairs[i] = fm_nvlist_create(nva); 1311 if (nvlist_add_string(pairs[i], FM_FMRI_HC_NAME, hcname) != 0 || 1312 nvlist_add_string(pairs[i], FM_FMRI_HC_ID, hcid) != 0) { 1313 for (j = 0; j <= i; j++) { 1314 if (pairs[j] != NULL) 1315 fm_nvlist_destroy(pairs[j], 1316 FM_NVA_RETAIN); 1317 } 1318 atomic_add_64( 1319 &erpt_kstat_data.fmri_set_failed.value.ui64, 1); 1320 return; 1321 } 1322 } 1323 1324 /* 1325 * create the pairs from passed in pairs 1326 */ 1327 npairs = MIN(npairs, HC_MAXPAIRS); 1328 1329 va_start(ap, npairs); 1330 for (i = n; i < npairs + n; i++) { 1331 const char *name = va_arg(ap, const char *); 1332 uint32_t id = va_arg(ap, uint32_t); 1333 char idstr[11]; 1334 (void) snprintf(idstr, sizeof (idstr), "%u", id); 1335 pairs[i] = fm_nvlist_create(nva); 1336 if (nvlist_add_string(pairs[i], FM_FMRI_HC_NAME, name) != 0 || 1337 nvlist_add_string(pairs[i], FM_FMRI_HC_ID, idstr) != 0) { 1338 for (j = 0; j <= i; j++) { 1339 if (pairs[j] != NULL) 1340 fm_nvlist_destroy(pairs[j], 1341 FM_NVA_RETAIN); 1342 } 1343 atomic_add_64( 1344 &erpt_kstat_data.fmri_set_failed.value.ui64, 1); 1345 return; 1346 } 1347 } 1348 va_end(ap); 1349 1350 /* 1351 * Create the fmri hc list 1352 */ 1353 if (nvlist_add_nvlist_array(fmri, FM_FMRI_HC_LIST, pairs, 1354 npairs + n) != 0) { 1355 atomic_add_64(&erpt_kstat_data.fmri_set_failed.value.ui64, 1); 1356 return; 1357 } 1358 1359 for (i = 0; i < npairs + n; i++) { 1360 fm_nvlist_destroy(pairs[i], FM_NVA_RETAIN); 1361 } 1362 1363 if (snvl != NULL) { 1364 if (nvlist_add_nvlist(fmri, FM_FMRI_HC_SPECIFIC, snvl) != 0) { 1365 atomic_add_64( 1366 &erpt_kstat_data.fmri_set_failed.value.ui64, 1); 1367 return; 1368 } 1369 } 1370 } 1371