1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved. 23 */ 24 25 /* 26 * Fault Management Architecture (FMA) Resource and Protocol Support 27 * 28 * The routines contained herein provide services to support kernel subsystems 29 * in publishing fault management telemetry (see PSARC 2002/412 and 2003/089). 30 * 31 * Name-Value Pair Lists 32 * 33 * The embodiment of an FMA protocol element (event, fmri or authority) is a 34 * name-value pair list (nvlist_t). FMA-specific nvlist construtor and 35 * destructor functions, fm_nvlist_create() and fm_nvlist_destroy(), are used 36 * to create an nvpair list using custom allocators. Callers may choose to 37 * allocate either from the kernel memory allocator, or from a preallocated 38 * buffer, useful in constrained contexts like high-level interrupt routines. 39 * 40 * Protocol Event and FMRI Construction 41 * 42 * Convenience routines are provided to construct nvlist events according to 43 * the FMA Event Protocol and Naming Schema specification for ereports and 44 * FMRIs for the dev, cpu, hc, mem, legacy hc and de schemes. 45 * 46 * ENA Manipulation 47 * 48 * Routines to generate ENA formats 0, 1 and 2 are available as well as 49 * routines to increment formats 1 and 2. Individual fields within the 50 * ENA are extractable via fm_ena_time_get(), fm_ena_id_get(), 51 * fm_ena_format_get() and fm_ena_gen_get(). 52 */ 53 54 #include <sys/types.h> 55 #include <sys/time.h> 56 #include <sys/sysevent.h> 57 #include <sys/sysevent_impl.h> 58 #include <sys/nvpair.h> 59 #include <sys/cmn_err.h> 60 #include <sys/cpuvar.h> 61 #include <sys/sysmacros.h> 62 #include <sys/systm.h> 63 #include <sys/ddifm.h> 64 #include <sys/ddifm_impl.h> 65 #include <sys/spl.h> 66 #include <sys/dumphdr.h> 67 #include <sys/compress.h> 68 #include <sys/cpuvar.h> 69 #include <sys/console.h> 70 #include <sys/panic.h> 71 #include <sys/kobj.h> 72 #include <sys/sunddi.h> 73 #include <sys/systeminfo.h> 74 #include <sys/sysevent/eventdefs.h> 75 #include <sys/fm/util.h> 76 #include <sys/fm/protocol.h> 77 78 /* 79 * URL and SUNW-MSG-ID value to display for fm_panic(), defined below. These 80 * values must be kept in sync with the FMA source code in usr/src/cmd/fm. 81 */ 82 static const char *fm_url = "http://illumos.org/msg"; 83 static const char *fm_msgid = "SUNOS-8000-0G"; 84 static char *volatile fm_panicstr = NULL; 85 86 errorq_t *ereport_errorq; 87 void *ereport_dumpbuf; 88 size_t ereport_dumplen; 89 90 static uint_t ereport_chanlen = ERPT_EVCH_MAX; 91 static evchan_t *ereport_chan = NULL; 92 static ulong_t ereport_qlen = 0; 93 static size_t ereport_size = 0; 94 static int ereport_cols = 80; 95 96 extern void fastreboot_disable_highpil(void); 97 98 /* 99 * Common fault management kstats to record ereport generation 100 * failures 101 */ 102 103 struct erpt_kstat { 104 kstat_named_t erpt_dropped; /* num erpts dropped on post */ 105 kstat_named_t erpt_set_failed; /* num erpt set failures */ 106 kstat_named_t fmri_set_failed; /* num fmri set failures */ 107 kstat_named_t payload_set_failed; /* num payload set failures */ 108 }; 109 110 static struct erpt_kstat erpt_kstat_data = { 111 { "erpt-dropped", KSTAT_DATA_UINT64 }, 112 { "erpt-set-failed", KSTAT_DATA_UINT64 }, 113 { "fmri-set-failed", KSTAT_DATA_UINT64 }, 114 { "payload-set-failed", KSTAT_DATA_UINT64 } 115 }; 116 117 /*ARGSUSED*/ 118 static void 119 fm_drain(void *private, void *data, errorq_elem_t *eep) 120 { 121 nvlist_t *nvl = errorq_elem_nvl(ereport_errorq, eep); 122 123 if (!panicstr) 124 (void) fm_ereport_post(nvl, EVCH_TRYHARD); 125 else 126 fm_nvprint(nvl); 127 } 128 129 void 130 fm_init(void) 131 { 132 kstat_t *ksp; 133 134 (void) sysevent_evc_bind(FM_ERROR_CHAN, 135 &ereport_chan, EVCH_CREAT | EVCH_HOLD_PEND); 136 137 (void) sysevent_evc_control(ereport_chan, 138 EVCH_SET_CHAN_LEN, &ereport_chanlen); 139 140 if (ereport_qlen == 0) 141 ereport_qlen = ERPT_MAX_ERRS * MAX(max_ncpus, 4); 142 143 if (ereport_size == 0) 144 ereport_size = ERPT_DATA_SZ; 145 146 ereport_errorq = errorq_nvcreate("fm_ereport_queue", 147 (errorq_func_t)fm_drain, NULL, ereport_qlen, ereport_size, 148 FM_ERR_PIL, ERRORQ_VITAL); 149 if (ereport_errorq == NULL) 150 panic("failed to create required ereport error queue"); 151 152 ereport_dumpbuf = kmem_alloc(ereport_size, KM_SLEEP); 153 ereport_dumplen = ereport_size; 154 155 /* Initialize ereport allocation and generation kstats */ 156 ksp = kstat_create("unix", 0, "fm", "misc", KSTAT_TYPE_NAMED, 157 sizeof (struct erpt_kstat) / sizeof (kstat_named_t), 158 KSTAT_FLAG_VIRTUAL); 159 160 if (ksp != NULL) { 161 ksp->ks_data = &erpt_kstat_data; 162 kstat_install(ksp); 163 } else { 164 cmn_err(CE_NOTE, "failed to create fm/misc kstat\n"); 165 166 } 167 } 168 169 /* 170 * Formatting utility function for fm_nvprintr. We attempt to wrap chunks of 171 * output so they aren't split across console lines, and return the end column. 172 */ 173 /*PRINTFLIKE4*/ 174 static int 175 fm_printf(int depth, int c, int cols, const char *format, ...) 176 { 177 va_list ap; 178 int width; 179 char c1; 180 181 va_start(ap, format); 182 width = vsnprintf(&c1, sizeof (c1), format, ap); 183 va_end(ap); 184 185 if (c + width >= cols) { 186 console_printf("\n\r"); 187 c = 0; 188 if (format[0] != ' ' && depth > 0) { 189 console_printf(" "); 190 c++; 191 } 192 } 193 194 va_start(ap, format); 195 console_vprintf(format, ap); 196 va_end(ap); 197 198 return ((c + width) % cols); 199 } 200 201 /* 202 * Recursively print a nvlist in the specified column width and return the 203 * column we end up in. This function is called recursively by fm_nvprint(), 204 * below. We generically format the entire nvpair using hexadecimal 205 * integers and strings, and elide any integer arrays. Arrays are basically 206 * used for cache dumps right now, so we suppress them so as not to overwhelm 207 * the amount of console output we produce at panic time. This can be further 208 * enhanced as FMA technology grows based upon the needs of consumers. All 209 * FMA telemetry is logged using the dump device transport, so the console 210 * output serves only as a fallback in case this procedure is unsuccessful. 211 */ 212 static int 213 fm_nvprintr(nvlist_t *nvl, int d, int c, int cols) 214 { 215 nvpair_t *nvp; 216 217 for (nvp = nvlist_next_nvpair(nvl, NULL); 218 nvp != NULL; nvp = nvlist_next_nvpair(nvl, nvp)) { 219 220 data_type_t type = nvpair_type(nvp); 221 const char *name = nvpair_name(nvp); 222 223 boolean_t b; 224 uint8_t i8; 225 uint16_t i16; 226 uint32_t i32; 227 uint64_t i64; 228 char *str; 229 nvlist_t *cnv; 230 231 if (strcmp(name, FM_CLASS) == 0) 232 continue; /* already printed by caller */ 233 234 c = fm_printf(d, c, cols, " %s=", name); 235 236 switch (type) { 237 case DATA_TYPE_BOOLEAN: 238 c = fm_printf(d + 1, c, cols, " 1"); 239 break; 240 241 case DATA_TYPE_BOOLEAN_VALUE: 242 (void) nvpair_value_boolean_value(nvp, &b); 243 c = fm_printf(d + 1, c, cols, b ? "1" : "0"); 244 break; 245 246 case DATA_TYPE_BYTE: 247 (void) nvpair_value_byte(nvp, &i8); 248 c = fm_printf(d + 1, c, cols, "%x", i8); 249 break; 250 251 case DATA_TYPE_INT8: 252 (void) nvpair_value_int8(nvp, (void *)&i8); 253 c = fm_printf(d + 1, c, cols, "%x", i8); 254 break; 255 256 case DATA_TYPE_UINT8: 257 (void) nvpair_value_uint8(nvp, &i8); 258 c = fm_printf(d + 1, c, cols, "%x", i8); 259 break; 260 261 case DATA_TYPE_INT16: 262 (void) nvpair_value_int16(nvp, (void *)&i16); 263 c = fm_printf(d + 1, c, cols, "%x", i16); 264 break; 265 266 case DATA_TYPE_UINT16: 267 (void) nvpair_value_uint16(nvp, &i16); 268 c = fm_printf(d + 1, c, cols, "%x", i16); 269 break; 270 271 case DATA_TYPE_INT32: 272 (void) nvpair_value_int32(nvp, (void *)&i32); 273 c = fm_printf(d + 1, c, cols, "%x", i32); 274 break; 275 276 case DATA_TYPE_UINT32: 277 (void) nvpair_value_uint32(nvp, &i32); 278 c = fm_printf(d + 1, c, cols, "%x", i32); 279 break; 280 281 case DATA_TYPE_INT64: 282 (void) nvpair_value_int64(nvp, (void *)&i64); 283 c = fm_printf(d + 1, c, cols, "%llx", 284 (u_longlong_t)i64); 285 break; 286 287 case DATA_TYPE_UINT64: 288 (void) nvpair_value_uint64(nvp, &i64); 289 c = fm_printf(d + 1, c, cols, "%llx", 290 (u_longlong_t)i64); 291 break; 292 293 case DATA_TYPE_HRTIME: 294 (void) nvpair_value_hrtime(nvp, (void *)&i64); 295 c = fm_printf(d + 1, c, cols, "%llx", 296 (u_longlong_t)i64); 297 break; 298 299 case DATA_TYPE_STRING: 300 (void) nvpair_value_string(nvp, &str); 301 c = fm_printf(d + 1, c, cols, "\"%s\"", 302 str ? str : "<NULL>"); 303 break; 304 305 case DATA_TYPE_NVLIST: 306 c = fm_printf(d + 1, c, cols, "["); 307 (void) nvpair_value_nvlist(nvp, &cnv); 308 c = fm_nvprintr(cnv, d + 1, c, cols); 309 c = fm_printf(d + 1, c, cols, " ]"); 310 break; 311 312 case DATA_TYPE_NVLIST_ARRAY: { 313 nvlist_t **val; 314 uint_t i, nelem; 315 316 c = fm_printf(d + 1, c, cols, "["); 317 (void) nvpair_value_nvlist_array(nvp, &val, &nelem); 318 for (i = 0; i < nelem; i++) { 319 c = fm_nvprintr(val[i], d + 1, c, cols); 320 } 321 c = fm_printf(d + 1, c, cols, " ]"); 322 } 323 break; 324 325 case DATA_TYPE_BOOLEAN_ARRAY: 326 case DATA_TYPE_BYTE_ARRAY: 327 case DATA_TYPE_INT8_ARRAY: 328 case DATA_TYPE_UINT8_ARRAY: 329 case DATA_TYPE_INT16_ARRAY: 330 case DATA_TYPE_UINT16_ARRAY: 331 case DATA_TYPE_INT32_ARRAY: 332 case DATA_TYPE_UINT32_ARRAY: 333 case DATA_TYPE_INT64_ARRAY: 334 case DATA_TYPE_UINT64_ARRAY: 335 case DATA_TYPE_STRING_ARRAY: 336 c = fm_printf(d + 1, c, cols, "[...]"); 337 break; 338 case DATA_TYPE_UNKNOWN: 339 c = fm_printf(d + 1, c, cols, "<unknown>"); 340 break; 341 } 342 } 343 344 return (c); 345 } 346 347 void 348 fm_nvprint(nvlist_t *nvl) 349 { 350 char *class; 351 int c = 0; 352 353 console_printf("\r"); 354 355 if (nvlist_lookup_string(nvl, FM_CLASS, &class) == 0) 356 c = fm_printf(0, c, ereport_cols, "%s", class); 357 358 if (fm_nvprintr(nvl, 0, c, ereport_cols) != 0) 359 console_printf("\n"); 360 361 console_printf("\n"); 362 } 363 364 /* 365 * Wrapper for panic() that first produces an FMA-style message for admins. 366 * Normally such messages are generated by fmd(1M)'s syslog-msgs agent: this 367 * is the one exception to that rule and the only error that gets messaged. 368 * This function is intended for use by subsystems that have detected a fatal 369 * error and enqueued appropriate ereports and wish to then force a panic. 370 */ 371 /*PRINTFLIKE1*/ 372 void 373 fm_panic(const char *format, ...) 374 { 375 va_list ap; 376 377 (void) atomic_cas_ptr((void *)&fm_panicstr, NULL, (void *)format); 378 #if defined(__x86) 379 fastreboot_disable_highpil(); 380 #endif /* __x86 */ 381 va_start(ap, format); 382 vpanic(format, ap); 383 va_end(ap); 384 } 385 386 /* 387 * Simply tell the caller if fm_panicstr is set, ie. an fma event has 388 * caused the panic. If so, something other than the default panic 389 * diagnosis method will diagnose the cause of the panic. 390 */ 391 int 392 is_fm_panic() 393 { 394 if (fm_panicstr) 395 return (1); 396 else 397 return (0); 398 } 399 400 /* 401 * Print any appropriate FMA banner message before the panic message. This 402 * function is called by panicsys() and prints the message for fm_panic(). 403 * We print the message here so that it comes after the system is quiesced. 404 * A one-line summary is recorded in the log only (cmn_err(9F) with "!" prefix). 405 * The rest of the message is for the console only and not needed in the log, 406 * so it is printed using console_printf(). We break it up into multiple 407 * chunks so as to avoid overflowing any small legacy prom_printf() buffers. 408 */ 409 void 410 fm_banner(void) 411 { 412 timespec_t tod; 413 hrtime_t now; 414 415 if (!fm_panicstr) 416 return; /* panic was not initiated by fm_panic(); do nothing */ 417 418 if (panicstr) { 419 tod = panic_hrestime; 420 now = panic_hrtime; 421 } else { 422 gethrestime(&tod); 423 now = gethrtime_waitfree(); 424 } 425 426 cmn_err(CE_NOTE, "!SUNW-MSG-ID: %s, " 427 "TYPE: Error, VER: 1, SEVERITY: Major\n", fm_msgid); 428 429 console_printf( 430 "\n\rSUNW-MSG-ID: %s, TYPE: Error, VER: 1, SEVERITY: Major\n" 431 "EVENT-TIME: 0x%lx.0x%lx (0x%llx)\n", 432 fm_msgid, tod.tv_sec, tod.tv_nsec, (u_longlong_t)now); 433 434 console_printf( 435 "PLATFORM: %s, CSN: -, HOSTNAME: %s\n" 436 "SOURCE: %s, REV: %s %s\n", 437 platform, utsname.nodename, utsname.sysname, 438 utsname.release, utsname.version); 439 440 console_printf( 441 "DESC: Errors have been detected that require a reboot to ensure system\n" 442 "integrity. See %s/%s for more information.\n", 443 fm_url, fm_msgid); 444 445 console_printf( 446 "AUTO-RESPONSE: Solaris will attempt to save and diagnose the error telemetry\n" 447 "IMPACT: The system will sync files, save a crash dump if needed, and reboot\n" 448 "REC-ACTION: Save the error summary below in case telemetry cannot be saved\n"); 449 450 console_printf("\n"); 451 } 452 453 /* 454 * Utility function to write all of the pending ereports to the dump device. 455 * This function is called at either normal reboot or panic time, and simply 456 * iterates over the in-transit messages in the ereport sysevent channel. 457 */ 458 void 459 fm_ereport_dump(void) 460 { 461 evchanq_t *chq; 462 sysevent_t *sep; 463 erpt_dump_t ed; 464 465 timespec_t tod; 466 hrtime_t now; 467 char *buf; 468 size_t len; 469 470 if (panicstr) { 471 tod = panic_hrestime; 472 now = panic_hrtime; 473 } else { 474 if (ereport_errorq != NULL) 475 errorq_drain(ereport_errorq); 476 gethrestime(&tod); 477 now = gethrtime_waitfree(); 478 } 479 480 /* 481 * In the panic case, sysevent_evc_walk_init() will return NULL. 482 */ 483 if ((chq = sysevent_evc_walk_init(ereport_chan, NULL)) == NULL && 484 !panicstr) 485 return; /* event channel isn't initialized yet */ 486 487 while ((sep = sysevent_evc_walk_step(chq)) != NULL) { 488 if ((buf = sysevent_evc_event_attr(sep, &len)) == NULL) 489 break; 490 491 ed.ed_magic = ERPT_MAGIC; 492 ed.ed_chksum = checksum32(buf, len); 493 ed.ed_size = (uint32_t)len; 494 ed.ed_pad = 0; 495 ed.ed_hrt_nsec = SE_TIME(sep); 496 ed.ed_hrt_base = now; 497 ed.ed_tod_base.sec = tod.tv_sec; 498 ed.ed_tod_base.nsec = tod.tv_nsec; 499 500 dumpvp_write(&ed, sizeof (ed)); 501 dumpvp_write(buf, len); 502 } 503 504 sysevent_evc_walk_fini(chq); 505 } 506 507 /* 508 * Post an error report (ereport) to the sysevent error channel. The error 509 * channel must be established with a prior call to sysevent_evc_create() 510 * before publication may occur. 511 */ 512 void 513 fm_ereport_post(nvlist_t *ereport, int evc_flag) 514 { 515 size_t nvl_size = 0; 516 evchan_t *error_chan; 517 518 (void) nvlist_size(ereport, &nvl_size, NV_ENCODE_NATIVE); 519 if (nvl_size > ERPT_DATA_SZ || nvl_size == 0) { 520 atomic_inc_64(&erpt_kstat_data.erpt_dropped.value.ui64); 521 return; 522 } 523 524 if (sysevent_evc_bind(FM_ERROR_CHAN, &error_chan, 525 EVCH_CREAT|EVCH_HOLD_PEND) != 0) { 526 atomic_inc_64(&erpt_kstat_data.erpt_dropped.value.ui64); 527 return; 528 } 529 530 if (sysevent_evc_publish(error_chan, EC_FM, ESC_FM_ERROR, 531 SUNW_VENDOR, FM_PUB, ereport, evc_flag) != 0) { 532 atomic_inc_64(&erpt_kstat_data.erpt_dropped.value.ui64); 533 (void) sysevent_evc_unbind(error_chan); 534 return; 535 } 536 (void) sysevent_evc_unbind(error_chan); 537 } 538 539 /* 540 * Wrapppers for FM nvlist allocators 541 */ 542 /* ARGSUSED */ 543 static void * 544 i_fm_alloc(nv_alloc_t *nva, size_t size) 545 { 546 return (kmem_zalloc(size, KM_SLEEP)); 547 } 548 549 /* ARGSUSED */ 550 static void 551 i_fm_free(nv_alloc_t *nva, void *buf, size_t size) 552 { 553 kmem_free(buf, size); 554 } 555 556 const nv_alloc_ops_t fm_mem_alloc_ops = { 557 NULL, 558 NULL, 559 i_fm_alloc, 560 i_fm_free, 561 NULL 562 }; 563 564 /* 565 * Create and initialize a new nv_alloc_t for a fixed buffer, buf. A pointer 566 * to the newly allocated nv_alloc_t structure is returned upon success or NULL 567 * is returned to indicate that the nv_alloc structure could not be created. 568 */ 569 nv_alloc_t * 570 fm_nva_xcreate(char *buf, size_t bufsz) 571 { 572 nv_alloc_t *nvhdl = kmem_zalloc(sizeof (nv_alloc_t), KM_SLEEP); 573 574 if (bufsz == 0 || nv_alloc_init(nvhdl, nv_fixed_ops, buf, bufsz) != 0) { 575 kmem_free(nvhdl, sizeof (nv_alloc_t)); 576 return (NULL); 577 } 578 579 return (nvhdl); 580 } 581 582 /* 583 * Destroy a previously allocated nv_alloc structure. The fixed buffer 584 * associated with nva must be freed by the caller. 585 */ 586 void 587 fm_nva_xdestroy(nv_alloc_t *nva) 588 { 589 nv_alloc_fini(nva); 590 kmem_free(nva, sizeof (nv_alloc_t)); 591 } 592 593 /* 594 * Create a new nv list. A pointer to a new nv list structure is returned 595 * upon success or NULL is returned to indicate that the structure could 596 * not be created. The newly created nv list is created and managed by the 597 * operations installed in nva. If nva is NULL, the default FMA nva 598 * operations are installed and used. 599 * 600 * When called from the kernel and nva == NULL, this function must be called 601 * from passive kernel context with no locks held that can prevent a 602 * sleeping memory allocation from occurring. Otherwise, this function may 603 * be called from other kernel contexts as long a valid nva created via 604 * fm_nva_create() is supplied. 605 */ 606 nvlist_t * 607 fm_nvlist_create(nv_alloc_t *nva) 608 { 609 int hdl_alloced = 0; 610 nvlist_t *nvl; 611 nv_alloc_t *nvhdl; 612 613 if (nva == NULL) { 614 nvhdl = kmem_zalloc(sizeof (nv_alloc_t), KM_SLEEP); 615 616 if (nv_alloc_init(nvhdl, &fm_mem_alloc_ops, NULL, 0) != 0) { 617 kmem_free(nvhdl, sizeof (nv_alloc_t)); 618 return (NULL); 619 } 620 hdl_alloced = 1; 621 } else { 622 nvhdl = nva; 623 } 624 625 if (nvlist_xalloc(&nvl, NV_UNIQUE_NAME, nvhdl) != 0) { 626 if (hdl_alloced) { 627 nv_alloc_fini(nvhdl); 628 kmem_free(nvhdl, sizeof (nv_alloc_t)); 629 } 630 return (NULL); 631 } 632 633 return (nvl); 634 } 635 636 /* 637 * Destroy a previously allocated nvlist structure. flag indicates whether 638 * or not the associated nva structure should be freed (FM_NVA_FREE) or 639 * retained (FM_NVA_RETAIN). Retaining the nv alloc structure allows 640 * it to be re-used for future nvlist creation operations. 641 */ 642 void 643 fm_nvlist_destroy(nvlist_t *nvl, int flag) 644 { 645 nv_alloc_t *nva = nvlist_lookup_nv_alloc(nvl); 646 647 nvlist_free(nvl); 648 649 if (nva != NULL) { 650 if (flag == FM_NVA_FREE) 651 fm_nva_xdestroy(nva); 652 } 653 } 654 655 int 656 i_fm_payload_set(nvlist_t *payload, const char *name, va_list ap) 657 { 658 int nelem, ret = 0; 659 data_type_t type; 660 661 while (ret == 0 && name != NULL) { 662 type = va_arg(ap, data_type_t); 663 switch (type) { 664 case DATA_TYPE_BYTE: 665 ret = nvlist_add_byte(payload, name, 666 va_arg(ap, uint_t)); 667 break; 668 case DATA_TYPE_BYTE_ARRAY: 669 nelem = va_arg(ap, int); 670 ret = nvlist_add_byte_array(payload, name, 671 va_arg(ap, uchar_t *), nelem); 672 break; 673 case DATA_TYPE_BOOLEAN_VALUE: 674 ret = nvlist_add_boolean_value(payload, name, 675 va_arg(ap, boolean_t)); 676 break; 677 case DATA_TYPE_BOOLEAN_ARRAY: 678 nelem = va_arg(ap, int); 679 ret = nvlist_add_boolean_array(payload, name, 680 va_arg(ap, boolean_t *), nelem); 681 break; 682 case DATA_TYPE_INT8: 683 ret = nvlist_add_int8(payload, name, 684 va_arg(ap, int)); 685 break; 686 case DATA_TYPE_INT8_ARRAY: 687 nelem = va_arg(ap, int); 688 ret = nvlist_add_int8_array(payload, name, 689 va_arg(ap, int8_t *), nelem); 690 break; 691 case DATA_TYPE_UINT8: 692 ret = nvlist_add_uint8(payload, name, 693 va_arg(ap, uint_t)); 694 break; 695 case DATA_TYPE_UINT8_ARRAY: 696 nelem = va_arg(ap, int); 697 ret = nvlist_add_uint8_array(payload, name, 698 va_arg(ap, uint8_t *), nelem); 699 break; 700 case DATA_TYPE_INT16: 701 ret = nvlist_add_int16(payload, name, 702 va_arg(ap, int)); 703 break; 704 case DATA_TYPE_INT16_ARRAY: 705 nelem = va_arg(ap, int); 706 ret = nvlist_add_int16_array(payload, name, 707 va_arg(ap, int16_t *), nelem); 708 break; 709 case DATA_TYPE_UINT16: 710 ret = nvlist_add_uint16(payload, name, 711 va_arg(ap, uint_t)); 712 break; 713 case DATA_TYPE_UINT16_ARRAY: 714 nelem = va_arg(ap, int); 715 ret = nvlist_add_uint16_array(payload, name, 716 va_arg(ap, uint16_t *), nelem); 717 break; 718 case DATA_TYPE_INT32: 719 ret = nvlist_add_int32(payload, name, 720 va_arg(ap, int32_t)); 721 break; 722 case DATA_TYPE_INT32_ARRAY: 723 nelem = va_arg(ap, int); 724 ret = nvlist_add_int32_array(payload, name, 725 va_arg(ap, int32_t *), nelem); 726 break; 727 case DATA_TYPE_UINT32: 728 ret = nvlist_add_uint32(payload, name, 729 va_arg(ap, uint32_t)); 730 break; 731 case DATA_TYPE_UINT32_ARRAY: 732 nelem = va_arg(ap, int); 733 ret = nvlist_add_uint32_array(payload, name, 734 va_arg(ap, uint32_t *), nelem); 735 break; 736 case DATA_TYPE_INT64: 737 ret = nvlist_add_int64(payload, name, 738 va_arg(ap, int64_t)); 739 break; 740 case DATA_TYPE_INT64_ARRAY: 741 nelem = va_arg(ap, int); 742 ret = nvlist_add_int64_array(payload, name, 743 va_arg(ap, int64_t *), nelem); 744 break; 745 case DATA_TYPE_UINT64: 746 ret = nvlist_add_uint64(payload, name, 747 va_arg(ap, uint64_t)); 748 break; 749 case DATA_TYPE_UINT64_ARRAY: 750 nelem = va_arg(ap, int); 751 ret = nvlist_add_uint64_array(payload, name, 752 va_arg(ap, uint64_t *), nelem); 753 break; 754 case DATA_TYPE_STRING: 755 ret = nvlist_add_string(payload, name, 756 va_arg(ap, char *)); 757 break; 758 case DATA_TYPE_STRING_ARRAY: 759 nelem = va_arg(ap, int); 760 ret = nvlist_add_string_array(payload, name, 761 va_arg(ap, char **), nelem); 762 break; 763 case DATA_TYPE_NVLIST: 764 ret = nvlist_add_nvlist(payload, name, 765 va_arg(ap, nvlist_t *)); 766 break; 767 case DATA_TYPE_NVLIST_ARRAY: 768 nelem = va_arg(ap, int); 769 ret = nvlist_add_nvlist_array(payload, name, 770 va_arg(ap, nvlist_t **), nelem); 771 break; 772 default: 773 ret = EINVAL; 774 } 775 776 name = va_arg(ap, char *); 777 } 778 return (ret); 779 } 780 781 void 782 fm_payload_set(nvlist_t *payload, ...) 783 { 784 int ret; 785 const char *name; 786 va_list ap; 787 788 va_start(ap, payload); 789 name = va_arg(ap, char *); 790 ret = i_fm_payload_set(payload, name, ap); 791 va_end(ap); 792 793 if (ret) 794 atomic_inc_64(&erpt_kstat_data.payload_set_failed.value.ui64); 795 } 796 797 /* 798 * Set-up and validate the members of an ereport event according to: 799 * 800 * Member name Type Value 801 * ==================================================== 802 * class string ereport 803 * version uint8_t 0 804 * ena uint64_t <ena> 805 * detector nvlist_t <detector> 806 * ereport-payload nvlist_t <var args> 807 * 808 * We don't actually add a 'version' member to the payload. Really, 809 * the version quoted to us by our caller is that of the category 1 810 * "ereport" event class (and we require FM_EREPORT_VERS0) but 811 * the payload version of the actual leaf class event under construction 812 * may be something else. Callers should supply a version in the varargs, 813 * or (better) we could take two version arguments - one for the 814 * ereport category 1 classification (expect FM_EREPORT_VERS0) and one 815 * for the leaf class. 816 */ 817 void 818 fm_ereport_set(nvlist_t *ereport, int version, const char *erpt_class, 819 uint64_t ena, const nvlist_t *detector, ...) 820 { 821 char ereport_class[FM_MAX_CLASS]; 822 const char *name; 823 va_list ap; 824 int ret; 825 826 if (version != FM_EREPORT_VERS0) { 827 atomic_inc_64(&erpt_kstat_data.erpt_set_failed.value.ui64); 828 return; 829 } 830 831 (void) snprintf(ereport_class, FM_MAX_CLASS, "%s.%s", 832 FM_EREPORT_CLASS, erpt_class); 833 if (nvlist_add_string(ereport, FM_CLASS, ereport_class) != 0) { 834 atomic_inc_64(&erpt_kstat_data.erpt_set_failed.value.ui64); 835 return; 836 } 837 838 if (nvlist_add_uint64(ereport, FM_EREPORT_ENA, ena)) { 839 atomic_inc_64(&erpt_kstat_data.erpt_set_failed.value.ui64); 840 } 841 842 if (nvlist_add_nvlist(ereport, FM_EREPORT_DETECTOR, 843 (nvlist_t *)detector) != 0) { 844 atomic_inc_64(&erpt_kstat_data.erpt_set_failed.value.ui64); 845 } 846 847 va_start(ap, detector); 848 name = va_arg(ap, const char *); 849 ret = i_fm_payload_set(ereport, name, ap); 850 va_end(ap); 851 852 if (ret) 853 atomic_inc_64(&erpt_kstat_data.erpt_set_failed.value.ui64); 854 } 855 856 /* 857 * Set-up and validate the members of an hc fmri according to; 858 * 859 * Member name Type Value 860 * =================================================== 861 * version uint8_t 0 862 * auth nvlist_t <auth> 863 * hc-name string <name> 864 * hc-id string <id> 865 * 866 * Note that auth and hc-id are optional members. 867 */ 868 869 #define HC_MAXPAIRS 20 870 #define HC_MAXNAMELEN 50 871 872 static int 873 fm_fmri_hc_set_common(nvlist_t *fmri, int version, const nvlist_t *auth) 874 { 875 if (version != FM_HC_SCHEME_VERSION) { 876 atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64); 877 return (0); 878 } 879 880 if (nvlist_add_uint8(fmri, FM_VERSION, version) != 0 || 881 nvlist_add_string(fmri, FM_FMRI_SCHEME, FM_FMRI_SCHEME_HC) != 0) { 882 atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64); 883 return (0); 884 } 885 886 if (auth != NULL && nvlist_add_nvlist(fmri, FM_FMRI_AUTHORITY, 887 (nvlist_t *)auth) != 0) { 888 atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64); 889 return (0); 890 } 891 892 return (1); 893 } 894 895 void 896 fm_fmri_hc_set(nvlist_t *fmri, int version, const nvlist_t *auth, 897 nvlist_t *snvl, int npairs, ...) 898 { 899 nv_alloc_t *nva = nvlist_lookup_nv_alloc(fmri); 900 nvlist_t *pairs[HC_MAXPAIRS]; 901 va_list ap; 902 int i; 903 904 if (!fm_fmri_hc_set_common(fmri, version, auth)) 905 return; 906 907 npairs = MIN(npairs, HC_MAXPAIRS); 908 909 va_start(ap, npairs); 910 for (i = 0; i < npairs; i++) { 911 const char *name = va_arg(ap, const char *); 912 uint32_t id = va_arg(ap, uint32_t); 913 char idstr[11]; 914 915 (void) snprintf(idstr, sizeof (idstr), "%u", id); 916 917 pairs[i] = fm_nvlist_create(nva); 918 if (nvlist_add_string(pairs[i], FM_FMRI_HC_NAME, name) != 0 || 919 nvlist_add_string(pairs[i], FM_FMRI_HC_ID, idstr) != 0) { 920 atomic_inc_64( 921 &erpt_kstat_data.fmri_set_failed.value.ui64); 922 } 923 } 924 va_end(ap); 925 926 if (nvlist_add_nvlist_array(fmri, FM_FMRI_HC_LIST, pairs, npairs) != 0) 927 atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64); 928 929 for (i = 0; i < npairs; i++) 930 fm_nvlist_destroy(pairs[i], FM_NVA_RETAIN); 931 932 if (snvl != NULL) { 933 if (nvlist_add_nvlist(fmri, FM_FMRI_HC_SPECIFIC, snvl) != 0) { 934 atomic_inc_64( 935 &erpt_kstat_data.fmri_set_failed.value.ui64); 936 } 937 } 938 } 939 940 /* 941 * Set-up and validate the members of an dev fmri according to: 942 * 943 * Member name Type Value 944 * ==================================================== 945 * version uint8_t 0 946 * auth nvlist_t <auth> 947 * devpath string <devpath> 948 * [devid] string <devid> 949 * [target-port-l0id] string <target-port-lun0-id> 950 * 951 * Note that auth and devid are optional members. 952 */ 953 void 954 fm_fmri_dev_set(nvlist_t *fmri_dev, int version, const nvlist_t *auth, 955 const char *devpath, const char *devid, const char *tpl0) 956 { 957 int err = 0; 958 959 if (version != DEV_SCHEME_VERSION0) { 960 atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64); 961 return; 962 } 963 964 err |= nvlist_add_uint8(fmri_dev, FM_VERSION, version); 965 err |= nvlist_add_string(fmri_dev, FM_FMRI_SCHEME, FM_FMRI_SCHEME_DEV); 966 967 if (auth != NULL) { 968 err |= nvlist_add_nvlist(fmri_dev, FM_FMRI_AUTHORITY, 969 (nvlist_t *)auth); 970 } 971 972 err |= nvlist_add_string(fmri_dev, FM_FMRI_DEV_PATH, devpath); 973 974 if (devid != NULL) 975 err |= nvlist_add_string(fmri_dev, FM_FMRI_DEV_ID, devid); 976 977 if (tpl0 != NULL) 978 err |= nvlist_add_string(fmri_dev, FM_FMRI_DEV_TGTPTLUN0, tpl0); 979 980 if (err) 981 atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64); 982 983 } 984 985 /* 986 * Set-up and validate the members of an cpu fmri according to: 987 * 988 * Member name Type Value 989 * ==================================================== 990 * version uint8_t 0 991 * auth nvlist_t <auth> 992 * cpuid uint32_t <cpu_id> 993 * cpumask uint8_t <cpu_mask> 994 * serial uint64_t <serial_id> 995 * 996 * Note that auth, cpumask, serial are optional members. 997 * 998 */ 999 void 1000 fm_fmri_cpu_set(nvlist_t *fmri_cpu, int version, const nvlist_t *auth, 1001 uint32_t cpu_id, uint8_t *cpu_maskp, const char *serial_idp) 1002 { 1003 uint64_t *failedp = &erpt_kstat_data.fmri_set_failed.value.ui64; 1004 1005 if (version < CPU_SCHEME_VERSION1) { 1006 atomic_inc_64(failedp); 1007 return; 1008 } 1009 1010 if (nvlist_add_uint8(fmri_cpu, FM_VERSION, version) != 0) { 1011 atomic_inc_64(failedp); 1012 return; 1013 } 1014 1015 if (nvlist_add_string(fmri_cpu, FM_FMRI_SCHEME, 1016 FM_FMRI_SCHEME_CPU) != 0) { 1017 atomic_inc_64(failedp); 1018 return; 1019 } 1020 1021 if (auth != NULL && nvlist_add_nvlist(fmri_cpu, FM_FMRI_AUTHORITY, 1022 (nvlist_t *)auth) != 0) 1023 atomic_inc_64(failedp); 1024 1025 if (nvlist_add_uint32(fmri_cpu, FM_FMRI_CPU_ID, cpu_id) != 0) 1026 atomic_inc_64(failedp); 1027 1028 if (cpu_maskp != NULL && nvlist_add_uint8(fmri_cpu, FM_FMRI_CPU_MASK, 1029 *cpu_maskp) != 0) 1030 atomic_inc_64(failedp); 1031 1032 if (serial_idp == NULL || nvlist_add_string(fmri_cpu, 1033 FM_FMRI_CPU_SERIAL_ID, (char *)serial_idp) != 0) 1034 atomic_inc_64(failedp); 1035 } 1036 1037 /* 1038 * Set-up and validate the members of a mem according to: 1039 * 1040 * Member name Type Value 1041 * ==================================================== 1042 * version uint8_t 0 1043 * auth nvlist_t <auth> [optional] 1044 * unum string <unum> 1045 * serial string <serial> [optional*] 1046 * offset uint64_t <offset> [optional] 1047 * 1048 * * serial is required if offset is present 1049 */ 1050 void 1051 fm_fmri_mem_set(nvlist_t *fmri, int version, const nvlist_t *auth, 1052 const char *unum, const char *serial, uint64_t offset) 1053 { 1054 if (version != MEM_SCHEME_VERSION0) { 1055 atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64); 1056 return; 1057 } 1058 1059 if (!serial && (offset != (uint64_t)-1)) { 1060 atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64); 1061 return; 1062 } 1063 1064 if (nvlist_add_uint8(fmri, FM_VERSION, version) != 0) { 1065 atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64); 1066 return; 1067 } 1068 1069 if (nvlist_add_string(fmri, FM_FMRI_SCHEME, FM_FMRI_SCHEME_MEM) != 0) { 1070 atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64); 1071 return; 1072 } 1073 1074 if (auth != NULL) { 1075 if (nvlist_add_nvlist(fmri, FM_FMRI_AUTHORITY, 1076 (nvlist_t *)auth) != 0) { 1077 atomic_inc_64( 1078 &erpt_kstat_data.fmri_set_failed.value.ui64); 1079 } 1080 } 1081 1082 if (nvlist_add_string(fmri, FM_FMRI_MEM_UNUM, unum) != 0) { 1083 atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64); 1084 } 1085 1086 if (serial != NULL) { 1087 if (nvlist_add_string_array(fmri, FM_FMRI_MEM_SERIAL_ID, 1088 (char **)&serial, 1) != 0) { 1089 atomic_inc_64( 1090 &erpt_kstat_data.fmri_set_failed.value.ui64); 1091 } 1092 if (offset != (uint64_t)-1 && nvlist_add_uint64(fmri, 1093 FM_FMRI_MEM_OFFSET, offset) != 0) { 1094 atomic_inc_64( 1095 &erpt_kstat_data.fmri_set_failed.value.ui64); 1096 } 1097 } 1098 } 1099 1100 void 1101 fm_fmri_zfs_set(nvlist_t *fmri, int version, uint64_t pool_guid, 1102 uint64_t vdev_guid) 1103 { 1104 if (version != ZFS_SCHEME_VERSION0) { 1105 atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64); 1106 return; 1107 } 1108 1109 if (nvlist_add_uint8(fmri, FM_VERSION, version) != 0) { 1110 atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64); 1111 return; 1112 } 1113 1114 if (nvlist_add_string(fmri, FM_FMRI_SCHEME, FM_FMRI_SCHEME_ZFS) != 0) { 1115 atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64); 1116 return; 1117 } 1118 1119 if (nvlist_add_uint64(fmri, FM_FMRI_ZFS_POOL, pool_guid) != 0) { 1120 atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64); 1121 } 1122 1123 if (vdev_guid != 0) { 1124 if (nvlist_add_uint64(fmri, FM_FMRI_ZFS_VDEV, vdev_guid) != 0) { 1125 atomic_inc_64( 1126 &erpt_kstat_data.fmri_set_failed.value.ui64); 1127 } 1128 } 1129 } 1130 1131 uint64_t 1132 fm_ena_increment(uint64_t ena) 1133 { 1134 uint64_t new_ena; 1135 1136 switch (ENA_FORMAT(ena)) { 1137 case FM_ENA_FMT1: 1138 new_ena = ena + (1 << ENA_FMT1_GEN_SHFT); 1139 break; 1140 case FM_ENA_FMT2: 1141 new_ena = ena + (1 << ENA_FMT2_GEN_SHFT); 1142 break; 1143 default: 1144 new_ena = 0; 1145 } 1146 1147 return (new_ena); 1148 } 1149 1150 uint64_t 1151 fm_ena_generate_cpu(uint64_t timestamp, processorid_t cpuid, uchar_t format) 1152 { 1153 uint64_t ena = 0; 1154 1155 switch (format) { 1156 case FM_ENA_FMT1: 1157 if (timestamp) { 1158 ena = (uint64_t)((format & ENA_FORMAT_MASK) | 1159 ((cpuid << ENA_FMT1_CPUID_SHFT) & 1160 ENA_FMT1_CPUID_MASK) | 1161 ((timestamp << ENA_FMT1_TIME_SHFT) & 1162 ENA_FMT1_TIME_MASK)); 1163 } else { 1164 ena = (uint64_t)((format & ENA_FORMAT_MASK) | 1165 ((cpuid << ENA_FMT1_CPUID_SHFT) & 1166 ENA_FMT1_CPUID_MASK) | 1167 ((gethrtime_waitfree() << ENA_FMT1_TIME_SHFT) & 1168 ENA_FMT1_TIME_MASK)); 1169 } 1170 break; 1171 case FM_ENA_FMT2: 1172 ena = (uint64_t)((format & ENA_FORMAT_MASK) | 1173 ((timestamp << ENA_FMT2_TIME_SHFT) & ENA_FMT2_TIME_MASK)); 1174 break; 1175 default: 1176 break; 1177 } 1178 1179 return (ena); 1180 } 1181 1182 uint64_t 1183 fm_ena_generate(uint64_t timestamp, uchar_t format) 1184 { 1185 return (fm_ena_generate_cpu(timestamp, CPU->cpu_id, format)); 1186 } 1187 1188 uint64_t 1189 fm_ena_generation_get(uint64_t ena) 1190 { 1191 uint64_t gen; 1192 1193 switch (ENA_FORMAT(ena)) { 1194 case FM_ENA_FMT1: 1195 gen = (ena & ENA_FMT1_GEN_MASK) >> ENA_FMT1_GEN_SHFT; 1196 break; 1197 case FM_ENA_FMT2: 1198 gen = (ena & ENA_FMT2_GEN_MASK) >> ENA_FMT2_GEN_SHFT; 1199 break; 1200 default: 1201 gen = 0; 1202 break; 1203 } 1204 1205 return (gen); 1206 } 1207 1208 uchar_t 1209 fm_ena_format_get(uint64_t ena) 1210 { 1211 1212 return (ENA_FORMAT(ena)); 1213 } 1214 1215 uint64_t 1216 fm_ena_id_get(uint64_t ena) 1217 { 1218 uint64_t id; 1219 1220 switch (ENA_FORMAT(ena)) { 1221 case FM_ENA_FMT1: 1222 id = (ena & ENA_FMT1_ID_MASK) >> ENA_FMT1_ID_SHFT; 1223 break; 1224 case FM_ENA_FMT2: 1225 id = (ena & ENA_FMT2_ID_MASK) >> ENA_FMT2_ID_SHFT; 1226 break; 1227 default: 1228 id = 0; 1229 } 1230 1231 return (id); 1232 } 1233 1234 uint64_t 1235 fm_ena_time_get(uint64_t ena) 1236 { 1237 uint64_t time; 1238 1239 switch (ENA_FORMAT(ena)) { 1240 case FM_ENA_FMT1: 1241 time = (ena & ENA_FMT1_TIME_MASK) >> ENA_FMT1_TIME_SHFT; 1242 break; 1243 case FM_ENA_FMT2: 1244 time = (ena & ENA_FMT2_TIME_MASK) >> ENA_FMT2_TIME_SHFT; 1245 break; 1246 default: 1247 time = 0; 1248 } 1249 1250 return (time); 1251 } 1252 1253 /* 1254 * Convert a getpcstack() trace to symbolic name+offset, and add the resulting 1255 * string array to a Fault Management ereport as FM_EREPORT_PAYLOAD_NAME_STACK. 1256 */ 1257 void 1258 fm_payload_stack_add(nvlist_t *payload, const pc_t *stack, int depth) 1259 { 1260 int i; 1261 char *sym; 1262 ulong_t off; 1263 char *stkpp[FM_STK_DEPTH]; 1264 char buf[FM_STK_DEPTH * FM_SYM_SZ]; 1265 char *stkp = buf; 1266 1267 for (i = 0; i < depth && i != FM_STK_DEPTH; i++, stkp += FM_SYM_SZ) { 1268 if ((sym = kobj_getsymname(stack[i], &off)) != NULL) 1269 (void) snprintf(stkp, FM_SYM_SZ, "%s+%lx", sym, off); 1270 else 1271 (void) snprintf(stkp, FM_SYM_SZ, "%lx", (long)stack[i]); 1272 stkpp[i] = stkp; 1273 } 1274 1275 fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_STACK, 1276 DATA_TYPE_STRING_ARRAY, depth, stkpp, NULL); 1277 } 1278 1279 void 1280 print_msg_hwerr(ctid_t ct_id, proc_t *p) 1281 { 1282 uprintf("Killed process %d (%s) in contract id %d " 1283 "due to hardware error\n", p->p_pid, p->p_user.u_comm, ct_id); 1284 } 1285 1286 void 1287 fm_fmri_hc_create(nvlist_t *fmri, int version, const nvlist_t *auth, 1288 nvlist_t *snvl, nvlist_t *bboard, int npairs, ...) 1289 { 1290 nv_alloc_t *nva = nvlist_lookup_nv_alloc(fmri); 1291 nvlist_t *pairs[HC_MAXPAIRS]; 1292 nvlist_t **hcl; 1293 uint_t n; 1294 int i, j; 1295 va_list ap; 1296 char *hcname, *hcid; 1297 1298 if (!fm_fmri_hc_set_common(fmri, version, auth)) 1299 return; 1300 1301 /* 1302 * copy the bboard nvpairs to the pairs array 1303 */ 1304 if (nvlist_lookup_nvlist_array(bboard, FM_FMRI_HC_LIST, &hcl, &n) 1305 != 0) { 1306 atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64); 1307 return; 1308 } 1309 1310 for (i = 0; i < n; i++) { 1311 if (nvlist_lookup_string(hcl[i], FM_FMRI_HC_NAME, 1312 &hcname) != 0) { 1313 atomic_inc_64( 1314 &erpt_kstat_data.fmri_set_failed.value.ui64); 1315 return; 1316 } 1317 if (nvlist_lookup_string(hcl[i], FM_FMRI_HC_ID, &hcid) != 0) { 1318 atomic_inc_64( 1319 &erpt_kstat_data.fmri_set_failed.value.ui64); 1320 return; 1321 } 1322 1323 pairs[i] = fm_nvlist_create(nva); 1324 if (nvlist_add_string(pairs[i], FM_FMRI_HC_NAME, hcname) != 0 || 1325 nvlist_add_string(pairs[i], FM_FMRI_HC_ID, hcid) != 0) { 1326 for (j = 0; j <= i; j++) { 1327 if (pairs[j] != NULL) 1328 fm_nvlist_destroy(pairs[j], 1329 FM_NVA_RETAIN); 1330 } 1331 atomic_inc_64( 1332 &erpt_kstat_data.fmri_set_failed.value.ui64); 1333 return; 1334 } 1335 } 1336 1337 /* 1338 * create the pairs from passed in pairs 1339 */ 1340 npairs = MIN(npairs, HC_MAXPAIRS); 1341 1342 va_start(ap, npairs); 1343 for (i = n; i < npairs + n; i++) { 1344 const char *name = va_arg(ap, const char *); 1345 uint32_t id = va_arg(ap, uint32_t); 1346 char idstr[11]; 1347 (void) snprintf(idstr, sizeof (idstr), "%u", id); 1348 pairs[i] = fm_nvlist_create(nva); 1349 if (nvlist_add_string(pairs[i], FM_FMRI_HC_NAME, name) != 0 || 1350 nvlist_add_string(pairs[i], FM_FMRI_HC_ID, idstr) != 0) { 1351 for (j = 0; j <= i; j++) { 1352 if (pairs[j] != NULL) 1353 fm_nvlist_destroy(pairs[j], 1354 FM_NVA_RETAIN); 1355 } 1356 atomic_inc_64( 1357 &erpt_kstat_data.fmri_set_failed.value.ui64); 1358 return; 1359 } 1360 } 1361 va_end(ap); 1362 1363 /* 1364 * Create the fmri hc list 1365 */ 1366 if (nvlist_add_nvlist_array(fmri, FM_FMRI_HC_LIST, pairs, 1367 npairs + n) != 0) { 1368 atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64); 1369 return; 1370 } 1371 1372 for (i = 0; i < npairs + n; i++) { 1373 fm_nvlist_destroy(pairs[i], FM_NVA_RETAIN); 1374 } 1375 1376 if (snvl != NULL) { 1377 if (nvlist_add_nvlist(fmri, FM_FMRI_HC_SPECIFIC, snvl) != 0) { 1378 atomic_inc_64( 1379 &erpt_kstat_data.fmri_set_failed.value.ui64); 1380 return; 1381 } 1382 } 1383 } 1384