1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #include <sys/types.h> 28 #include <sys/cmn_err.h> 29 #include <sys/errno.h> 30 #include <sys/log.h> 31 #include <sys/systm.h> 32 #include <sys/modctl.h> 33 #include <sys/errorq.h> 34 #include <sys/controlregs.h> 35 #include <sys/fm/util.h> 36 #include <sys/fm/protocol.h> 37 #include <sys/sysevent.h> 38 #include <sys/pghw.h> 39 #include <sys/cyclic.h> 40 #include <sys/pci_cfgspace.h> 41 #include <sys/mc_intel.h> 42 #include <sys/smbios.h> 43 #include "nb5000.h" 44 #include "nb_log.h" 45 #include "dimm_phys.h" 46 47 static uint32_t uerrcnt[2]; 48 static uint32_t cerrcnta[2][2]; 49 static uint32_t cerrcntb[2][2]; 50 static uint32_t cerrcntc[2][2]; 51 static uint32_t cerrcntd[2][2]; 52 static nb_logout_t nb_log; 53 54 struct mch_error_code { 55 int intel_error_list; /* error number in Chipset Error List */ 56 uint32_t emask; /* mask for machine check */ 57 uint32_t error_bit; /* error bit in fault register */ 58 }; 59 60 static struct mch_error_code fat_fbd_error_code[] = { 61 { 23, EMASK_FBD_M23, ERR_FAT_FBD_M23 }, 62 { 3, EMASK_FBD_M3, ERR_FAT_FBD_M3 }, 63 { 2, EMASK_FBD_M2, ERR_FAT_FBD_M2 }, 64 { 1, EMASK_FBD_M1, ERR_FAT_FBD_M1 } 65 }; 66 67 static int 68 intel_fat_fbd_err(uint32_t fat_fbd) 69 { 70 int rt = -1; 71 int nerr = 0; 72 uint32_t emask_fbd = 0; 73 int i; 74 int sz; 75 76 sz = sizeof (fat_fbd_error_code) / sizeof (struct mch_error_code); 77 78 for (i = 0; i < sz; i++) { 79 if (fat_fbd & fat_fbd_error_code[i].error_bit) { 80 rt = fat_fbd_error_code[i].intel_error_list; 81 emask_fbd |= fat_fbd_error_code[i].emask; 82 nerr++; 83 } 84 } 85 86 if (emask_fbd) 87 nb_fbd_mask_mc(emask_fbd); 88 if (nerr > 1) 89 rt = -1; 90 return (rt); 91 } 92 93 static char * 94 fat_memory_error(const nb_regs_t *rp, void *data) 95 { 96 int channel; 97 uint32_t ferr_fat_fbd, nrecmemb; 98 uint32_t nrecmema; 99 char *intr = "nb.unknown"; 100 nb_mem_scatchpad_t *sp = &((nb_scatchpad_t *)data)->ms; 101 102 ferr_fat_fbd = rp->nb.fat_fbd_regs.ferr_fat_fbd; 103 if ((ferr_fat_fbd & ERR_FAT_FBD_MASK) == 0) { 104 sp->intel_error_list = 105 intel_fat_fbd_err(rp->nb.fat_fbd_regs.nerr_fat_fbd); 106 sp->branch = -1; 107 sp->channel = -1; 108 sp->rank = -1; 109 sp->dimm = -1; 110 sp->bank = -1; 111 sp->cas = -1; 112 sp->ras = -1; 113 sp->pa = -1LL; 114 sp->offset = -1; 115 return (intr); 116 } 117 sp->intel_error_list = intel_fat_fbd_err(ferr_fat_fbd); 118 channel = (ferr_fat_fbd >> 28) & 3; 119 sp->branch = channel >> 1; 120 sp->channel = channel; 121 if ((ferr_fat_fbd & (ERR_FAT_FBD_M2|ERR_FAT_FBD_M1)) != 0) { 122 if ((ferr_fat_fbd & ERR_FAT_FBD_M1) != 0) 123 intr = "nb.fbd.alert"; /* Alert on FB-DIMM M1 */ 124 else 125 intr = "nb.fbd.crc"; /* CRC error FB_DIMM M2 */ 126 nrecmema = rp->nb.fat_fbd_regs.nrecmema; 127 nrecmemb = rp->nb.fat_fbd_regs.nrecmemb; 128 sp->rank = (nrecmema >> 8) & RANK_MASK; 129 sp->dimm = sp->rank >> 1; 130 sp->bank = (nrecmema >> 12) & BANK_MASK; 131 sp->cas = (nrecmemb >> 16) & CAS_MASK; 132 sp->ras = nrecmemb & RAS_MASK; 133 sp->pa = dimm_getphys(sp->branch, sp->rank, sp->bank, sp->ras, 134 sp->cas); 135 sp->offset = dimm_getoffset(sp->branch, sp->rank, sp->bank, 136 sp->ras, sp->cas); 137 } else { 138 if ((ferr_fat_fbd & ERR_FAT_FBD_M3) != 0) 139 intr = "nb.fbd.otf"; /* thermal temp > Tmid M3 */ 140 else if ((ferr_fat_fbd & ERR_FAT_FBD_M23) != 0) { 141 intr = "nb.fbd.reset_timeout"; 142 sp->channel = -1; 143 } 144 sp->rank = -1; 145 sp->dimm = -1; 146 sp->bank = -1; 147 sp->cas = -1; 148 sp->ras = -1; 149 sp->pa = -1LL; 150 sp->offset = -1; 151 } 152 return (intr); 153 } 154 155 156 static struct mch_error_code nf_fbd_error_code[] = { 157 { 29, EMASK_FBD_M29, ERR_NF_FBD_M29 }, 158 { 28, EMASK_FBD_M28, ERR_NF_FBD_M28 }, 159 { 27, EMASK_FBD_M27, ERR_NF_FBD_M27 }, 160 { 26, EMASK_FBD_M26, ERR_NF_FBD_M26 }, 161 { 25, EMASK_FBD_M25, ERR_NF_FBD_M25 }, 162 { 24, EMASK_FBD_M24, ERR_NF_FBD_M24 }, 163 { 22, EMASK_FBD_M22, ERR_NF_FBD_M22 }, 164 { 21, EMASK_FBD_M21, ERR_NF_FBD_M21 }, 165 { 20, EMASK_FBD_M20, ERR_NF_FBD_M20 }, 166 { 19, EMASK_FBD_M19, ERR_NF_FBD_M19 }, 167 { 18, EMASK_FBD_M18, ERR_NF_FBD_M18 }, 168 { 17, EMASK_FBD_M17, ERR_NF_FBD_M17 }, 169 { 16, EMASK_FBD_M16, ERR_NF_FBD_M16 }, 170 { 15, EMASK_FBD_M15, ERR_NF_FBD_M15 }, 171 { 14, EMASK_FBD_M14, ERR_NF_FBD_M14 }, 172 { 13, EMASK_FBD_M13, ERR_NF_FBD_M13 }, 173 { 12, EMASK_FBD_M12, ERR_NF_FBD_M12 }, 174 { 11, EMASK_FBD_M11, ERR_NF_FBD_M11 }, 175 { 10, EMASK_FBD_M10, ERR_NF_FBD_M10 }, 176 { 9, EMASK_FBD_M9, ERR_NF_FBD_M9 }, 177 { 8, EMASK_FBD_M8, ERR_NF_FBD_M8 }, 178 { 7, EMASK_FBD_M7, ERR_NF_FBD_M7 }, 179 { 6, EMASK_FBD_M6, ERR_NF_FBD_M6 }, 180 { 5, EMASK_FBD_M5, ERR_NF_FBD_M5 }, 181 { 4, EMASK_FBD_M4, ERR_NF_FBD_M4 } 182 }; 183 184 static int 185 intel_nf_fbd_err(uint32_t nf_fbd) 186 { 187 int rt = -1; 188 int nerr = 0; 189 uint32_t emask_fbd = 0; 190 int i; 191 int sz; 192 193 sz = sizeof (nf_fbd_error_code) / sizeof (struct mch_error_code); 194 195 for (i = 0; i < sz; i++) { 196 if (nf_fbd & nf_fbd_error_code[i].error_bit) { 197 rt = nf_fbd_error_code[i].intel_error_list; 198 emask_fbd |= nf_fbd_error_code[i].emask; 199 nerr++; 200 } 201 } 202 if (emask_fbd) 203 nb_fbd_mask_mc(emask_fbd); 204 if (nerr > 1) 205 rt = -1; 206 return (rt); 207 } 208 209 static char * 210 nf_memory_error(const nb_regs_t *rp, void *data) 211 { 212 uint32_t ferr_nf_fbd, recmemb, redmemb; 213 uint32_t recmema; 214 int branch, channel, ecc_locator; 215 char *intr = "nb.unknown"; 216 nb_mem_scatchpad_t *sp = &((nb_scatchpad_t *)data)->ms; 217 218 sp->rank = -1; 219 sp->dimm = -1; 220 sp->bank = -1; 221 sp->cas = -1; 222 sp->ras = -1LL; 223 sp->pa = -1LL; 224 sp->offset = -1; 225 ferr_nf_fbd = rp->nb.nf_fbd_regs.ferr_nf_fbd; 226 if ((ferr_nf_fbd & ERR_NF_FBD_MASK) == 0) { 227 sp->branch = -1; 228 sp->channel = -1; 229 sp->intel_error_list = 230 intel_nf_fbd_err(rp->nb.nf_fbd_regs.nerr_nf_fbd); 231 return (intr); 232 } 233 sp->intel_error_list = intel_nf_fbd_err(ferr_nf_fbd); 234 channel = (ferr_nf_fbd >> ERR_FBD_CH_SHIFT) & 3; 235 branch = channel >> 1; 236 sp->branch = branch; 237 sp->channel = channel; 238 if (ferr_nf_fbd & ERR_NF_FBD_MASK) { 239 if (ferr_nf_fbd & ERR_NF_FBD_ECC_UE) { 240 /* 241 * uncorrectable ECC M4 - M12 242 * we can only isolate to pair of dimms 243 * for single dimm configuration let eversholt 244 * sort it out with out needing a special rule 245 */ 246 sp->channel = -1; 247 recmema = rp->nb.nf_fbd_regs.recmema; 248 recmemb = rp->nb.nf_fbd_regs.recmemb; 249 sp->rank = (recmema >> 8) & RANK_MASK; 250 sp->bank = (recmema >> 12) & BANK_MASK; 251 sp->cas = (recmemb >> 16) & CAS_MASK; 252 sp->ras = recmemb & RAS_MASK; 253 intr = "nb.mem_ue"; 254 } else if (ferr_nf_fbd & ERR_NF_FBD_M13) { 255 /* 256 * write error M13 257 * we can only isolate to pair of dimms 258 */ 259 sp->channel = -1; 260 if (nb_mode != NB_MEMORY_MIRROR) { 261 recmema = rp->nb.nf_fbd_regs.recmema; 262 sp->rank = (recmema >> 8) & RANK_MASK; 263 sp->bank = (recmema >> 12) & BANK_MASK; 264 sp->cas = (recmemb >> 16) & CAS_MASK; 265 sp->ras = recmemb & RAS_MASK; 266 } 267 intr = "nb.fbd.ma"; /* memory alert */ 268 } else if (ferr_nf_fbd & ERR_NF_FBD_MA) { /* M14, M15 and M21 */ 269 intr = "nb.fbd.ch"; /* FBD on channel */ 270 } else if ((ferr_nf_fbd & ERR_NF_FBD_ECC_CE) != 0) { 271 /* correctable ECC M17-M20 */ 272 recmema = rp->nb.nf_fbd_regs.recmema; 273 recmemb = rp->nb.nf_fbd_regs.recmemb; 274 sp->rank = (recmema >> 8) & RANK_MASK; 275 redmemb = rp->nb.nf_fbd_regs.redmemb; 276 ecc_locator = redmemb & 0x3ffff; 277 if (ecc_locator & 0x1ff) 278 sp->channel = branch << 1; 279 else if (ecc_locator & 0x3fe00) 280 sp->channel = (branch << 1) + 1; 281 sp->dimm = sp->rank >> 1; 282 sp->bank = (recmema >> 12) & BANK_MASK; 283 sp->cas = (recmemb >> 16) & CAS_MASK; 284 sp->ras = recmemb & RAS_MASK; 285 intr = "nb.mem_ce"; 286 } else if ((ferr_nf_fbd & ERR_NF_FBD_SPARE) != 0) { 287 /* spare dimm M27, M28 */ 288 intr = "nb.mem_ds"; 289 sp->channel = -1; 290 if (rp->nb.nf_fbd_regs.spcps & SPCPS_SPARE_DEPLOYED) { 291 sp->rank = 292 SPCPS_FAILED_RANK(rp->nb.nf_fbd_regs.spcps); 293 nb_used_spare_rank(sp->branch, sp->rank); 294 nb_config_gen++; 295 } 296 } else if ((ferr_nf_fbd & ERR_NF_FBD_M22) != 0) { 297 intr = "nb.spd"; /* SPD protocol */ 298 } 299 } 300 if (sp->ras != -1) { 301 sp->pa = dimm_getphys(sp->branch, sp->rank, sp->bank, sp->ras, 302 sp->cas); 303 sp->offset = dimm_getoffset(sp->branch, sp->rank, sp->bank, 304 sp->ras, sp->cas); 305 } 306 return (intr); 307 } 308 309 static struct mch_error_code fat_int_error_code[] = { 310 { 14, EMASK_INT_B14, ERR_FAT_INT_B14 }, 311 { 12, EMASK_INT_B12, ERR_FAT_INT_B12 }, 312 { 25, EMASK_INT_B25, ERR_FAT_INT_B25 }, 313 { 23, EMASK_INT_B23, ERR_FAT_INT_B23 }, 314 { 21, EMASK_INT_B21, ERR_FAT_INT_B21 }, 315 { 7, EMASK_INT_B7, ERR_FAT_INT_B7 }, 316 { 4, EMASK_INT_B4, ERR_FAT_INT_B4 }, 317 { 3, EMASK_INT_B3, ERR_FAT_INT_B3 }, 318 { 2, EMASK_INT_B2, ERR_FAT_INT_B2 }, 319 { 1, EMASK_INT_B1, ERR_FAT_INT_B1 } 320 }; 321 322 static struct mch_error_code nf_int_error_code[] = { 323 { 27, 0, ERR_NF_INT_B27 }, 324 { 24, 0, ERR_NF_INT_B24 }, 325 { 22, EMASK_INT_B22, ERR_NF_INT_B22 }, 326 { 20, EMASK_INT_B20, ERR_NF_INT_B20 }, 327 { 19, EMASK_INT_B19, ERR_NF_INT_B19 }, 328 { 18, 0, ERR_NF_INT_B18 }, 329 { 17, 0, ERR_NF_INT_B17 }, 330 { 16, 0, ERR_NF_INT_B16 }, 331 { 11, EMASK_INT_B11, ERR_NF_INT_B11 }, 332 { 10, EMASK_INT_B10, ERR_NF_INT_B10 }, 333 { 9, EMASK_INT_B9, ERR_NF_INT_B9 }, 334 { 8, EMASK_INT_B8, ERR_NF_INT_B8 }, 335 { 6, EMASK_INT_B6, ERR_NF_INT_B6 }, 336 { 5, EMASK_INT_B5, ERR_NF_INT_B5 } 337 }; 338 339 static int 340 intel_int_err(uint16_t err_fat_int, uint16_t err_nf_int) 341 { 342 int rt = -1; 343 int nerr = 0; 344 uint32_t emask_int = 0; 345 int i; 346 int sz; 347 348 sz = sizeof (fat_int_error_code) / sizeof (struct mch_error_code); 349 350 for (i = 0; i < sz; i++) { 351 if (err_fat_int & fat_int_error_code[i].error_bit) { 352 rt = fat_int_error_code[i].intel_error_list; 353 emask_int |= fat_int_error_code[i].emask; 354 nerr++; 355 } 356 } 357 358 if (nb_chipset == INTEL_NB_5400 && 359 (err_nf_int & NERR_NF_5400_INT_B26) != 0) { 360 err_nf_int &= ~NERR_NF_5400_INT_B26; 361 rt = 26; 362 nerr++; 363 } 364 365 if (rt) 366 err_nf_int &= ~ERR_NF_INT_B18; 367 368 sz = sizeof (nf_int_error_code) / sizeof (struct mch_error_code); 369 370 for (i = 0; i < sz; i++) { 371 if (err_nf_int & nf_int_error_code[i].error_bit) { 372 rt = nf_int_error_code[i].intel_error_list; 373 emask_int |= nf_int_error_code[i].emask; 374 nerr++; 375 } 376 } 377 378 if (emask_int) 379 nb_int_mask_mc(emask_int); 380 if (nerr > 1) 381 rt = -1; 382 return (rt); 383 } 384 385 static int 386 log_int_err(nb_regs_t *rp, int willpanic, int *interpose) 387 { 388 int t = 0; 389 int rt = 0; 390 391 rp->flag = NB_REG_LOG_INT; 392 rp->nb.int_regs.ferr_fat_int = FERR_FAT_INT_RD(interpose); 393 rp->nb.int_regs.ferr_nf_int = FERR_NF_INT_RD(&t); 394 *interpose |= t; 395 rp->nb.int_regs.nerr_fat_int = NERR_FAT_INT_RD(&t); 396 *interpose |= t; 397 rp->nb.int_regs.nerr_nf_int = NERR_NF_INT_RD(&t); 398 *interpose |= t; 399 rp->nb.int_regs.nrecint = NRECINT_RD(); 400 rp->nb.int_regs.recint = RECINT_RD(); 401 rp->nb.int_regs.nrecsf = NRECSF_RD(); 402 rp->nb.int_regs.recsf = RECSF_RD(); 403 404 if (!willpanic) { 405 if (rp->nb.int_regs.ferr_fat_int || *interpose) 406 FERR_FAT_INT_WR(rp->nb.int_regs.ferr_fat_int); 407 if (rp->nb.int_regs.ferr_nf_int || *interpose) 408 FERR_NF_INT_WR(rp->nb.int_regs.ferr_nf_int); 409 if (rp->nb.int_regs.nerr_fat_int) 410 NERR_FAT_INT_WR(rp->nb.int_regs.nerr_fat_int); 411 if (rp->nb.int_regs.nerr_nf_int) 412 NERR_NF_INT_WR(rp->nb.int_regs.nerr_nf_int); 413 /* 414 * if interpose write read-only registers to clear from pcii 415 * cache 416 */ 417 if (*interpose) { 418 NRECINT_WR(); 419 RECINT_WR(); 420 NRECSF_WR(); 421 RECSF_WR(); 422 } 423 } 424 if (rp->nb.int_regs.ferr_fat_int == 0 && 425 rp->nb.int_regs.nerr_fat_int == 0 && 426 (rp->nb.int_regs.ferr_nf_int == ERR_NF_INT_B18 || 427 (rp->nb.int_regs.ferr_nf_int == 0 && 428 rp->nb.int_regs.nerr_nf_int == ERR_NF_INT_B18))) { 429 rt = 1; 430 } 431 return (rt); 432 } 433 434 static void 435 log_thermal_err(nb_regs_t *rp, int willpanic, int *interpose) 436 { 437 int t = 0; 438 439 rp->flag = NB_REG_LOG_THR; 440 rp->nb.thr_regs.ferr_fat_thr = FERR_FAT_THR_RD(interpose); 441 rp->nb.thr_regs.nerr_fat_thr = NERR_FAT_THR_RD(&t); 442 *interpose |= t; 443 rp->nb.thr_regs.ferr_nf_thr = FERR_NF_THR_RD(&t); 444 *interpose |= t; 445 rp->nb.thr_regs.nerr_nf_thr = NERR_NF_THR_RD(&t); 446 *interpose |= t; 447 rp->nb.thr_regs.ctsts = CTSTS_RD(); 448 rp->nb.thr_regs.thrtsts = THRTSTS_RD(); 449 450 if (!willpanic) { 451 if (rp->nb.thr_regs.ferr_fat_thr || *interpose) 452 FERR_FAT_THR_WR(rp->nb.thr_regs.ferr_fat_thr); 453 if (rp->nb.thr_regs.nerr_fat_thr || *interpose) 454 NERR_FAT_THR_WR(rp->nb.thr_regs.nerr_fat_thr); 455 if (rp->nb.thr_regs.ferr_nf_thr || *interpose) 456 FERR_NF_THR_WR(rp->nb.thr_regs.ferr_nf_thr); 457 if (rp->nb.thr_regs.nerr_nf_thr || *interpose) 458 NERR_NF_THR_WR(rp->nb.thr_regs.nerr_nf_thr); 459 460 if (*interpose) { 461 CTSTS_WR(rp->nb.thr_regs.ctsts); 462 THRTSTS_WR(rp->nb.thr_regs.thrtsts); 463 } 464 } 465 } 466 467 static void 468 log_dma_err(nb_regs_t *rp, int *interpose) 469 { 470 rp->flag = NB_REG_LOG_DMA; 471 472 rp->nb.dma_regs.pcists = PCISTS_RD(interpose); 473 rp->nb.dma_regs.pexdevsts = PCIDEVSTS_RD(); 474 } 475 476 static struct mch_error_code fat_fsb_error_code[] = { 477 { 9, EMASK_FSB_F9, ERR_FAT_FSB_F9 }, 478 { 2, EMASK_FSB_F2, ERR_FAT_FSB_F2 }, 479 { 1, EMASK_FSB_F1, ERR_FAT_FSB_F1 } 480 }; 481 482 static struct mch_error_code nf_fsb_error_code[] = { 483 { 8, EMASK_FSB_F8, ERR_NF_FSB_F8 }, 484 { 7, EMASK_FSB_F7, ERR_NF_FSB_F7 }, 485 { 6, EMASK_FSB_F6, ERR_NF_FSB_F6 } 486 }; 487 488 static int 489 intel_fsb_err(int fsb, uint8_t err_fat_fsb, uint8_t err_nf_fsb) 490 { 491 int rt = -1; 492 int nerr = 0; 493 uint16_t emask_fsb = 0; 494 int i; 495 int sz; 496 497 sz = sizeof (fat_fsb_error_code) / sizeof (struct mch_error_code); 498 499 for (i = 0; i < sz; i++) { 500 if (err_fat_fsb & fat_fsb_error_code[i].error_bit) { 501 rt = fat_fsb_error_code[i].intel_error_list; 502 emask_fsb |= fat_fsb_error_code[i].emask; 503 nerr++; 504 } 505 } 506 507 sz = sizeof (nf_fsb_error_code) / sizeof (struct mch_error_code); 508 509 for (i = 0; i < sz; i++) { 510 if (err_nf_fsb & nf_fsb_error_code[i].error_bit) { 511 rt = nf_fsb_error_code[i].intel_error_list; 512 emask_fsb |= nf_fsb_error_code[i].emask; 513 nerr++; 514 } 515 } 516 517 if (emask_fsb) 518 nb_fsb_mask_mc(fsb, emask_fsb); 519 if (nerr > 1) 520 rt = -1; 521 return (rt); 522 } 523 524 static void 525 log_fsb_err(uint64_t ferr, nb_regs_t *rp, int willpanic, int *interpose) 526 { 527 uint8_t fsb; 528 int t = 0; 529 530 fsb = GE_FERR_FSB(ferr); 531 rp->flag = NB_REG_LOG_FSB; 532 533 rp->nb.fsb_regs.fsb = fsb; 534 rp->nb.fsb_regs.ferr_fat_fsb = FERR_FAT_FSB_RD(fsb, interpose); 535 rp->nb.fsb_regs.ferr_nf_fsb = FERR_NF_FSB_RD(fsb, &t); 536 *interpose |= t; 537 rp->nb.fsb_regs.nerr_fat_fsb = NERR_FAT_FSB_RD(fsb, &t); 538 *interpose |= t; 539 rp->nb.fsb_regs.nerr_nf_fsb = NERR_NF_FSB_RD(fsb, &t); 540 *interpose |= t; 541 rp->nb.fsb_regs.nrecfsb = NRECFSB_RD(fsb); 542 rp->nb.fsb_regs.nrecfsb_addr = NRECADDR_RD(fsb); 543 rp->nb.fsb_regs.recfsb = RECFSB_RD(fsb); 544 if (!willpanic) { 545 /* Clear the fatal/non-fatal first/next FSB errors */ 546 if (rp->nb.fsb_regs.ferr_fat_fsb || *interpose) 547 FERR_FAT_FSB_WR(fsb, rp->nb.fsb_regs.ferr_fat_fsb); 548 if (rp->nb.fsb_regs.ferr_nf_fsb || *interpose) 549 FERR_NF_FSB_WR(fsb, rp->nb.fsb_regs.ferr_nf_fsb); 550 if (rp->nb.fsb_regs.nerr_fat_fsb || *interpose) 551 NERR_FAT_FSB_WR(fsb, rp->nb.fsb_regs.nerr_fat_fsb); 552 if (rp->nb.fsb_regs.nerr_nf_fsb || *interpose) 553 NERR_NF_FSB_WR(fsb, rp->nb.fsb_regs.nerr_nf_fsb); 554 555 /* 556 * if interpose write read-only registers to clear from pcii 557 * cache 558 */ 559 if (*interpose) { 560 NRECFSB_WR(fsb); 561 NRECADDR_WR(fsb); 562 RECFSB_WR(fsb); 563 } 564 } 565 } 566 567 static struct mch_error_code fat_pex_error_code[] = { 568 { 19, EMASK_UNCOR_PEX_IO19, PEX_FAT_IO19 }, 569 { 18, EMASK_UNCOR_PEX_IO18, PEX_FAT_IO18 }, 570 { 10, EMASK_UNCOR_PEX_IO10, PEX_FAT_IO10 }, 571 { 9, EMASK_UNCOR_PEX_IO9, PEX_FAT_IO9 }, 572 { 8, EMASK_UNCOR_PEX_IO8, PEX_FAT_IO8 }, 573 { 7, EMASK_UNCOR_PEX_IO7, PEX_FAT_IO7 }, 574 { 6, EMASK_UNCOR_PEX_IO6, PEX_FAT_IO6 }, 575 { 5, EMASK_UNCOR_PEX_IO5, PEX_FAT_IO5 }, 576 { 4, EMASK_UNCOR_PEX_IO4, PEX_FAT_IO4 }, 577 { 3, EMASK_UNCOR_PEX_IO3, PEX_FAT_IO3 }, 578 { 2, EMASK_UNCOR_PEX_IO2, PEX_FAT_IO2 }, 579 { 0, EMASK_UNCOR_PEX_IO0, PEX_FAT_IO0 } 580 }; 581 582 static struct mch_error_code fat_unit_pex_5400_error_code[] = { 583 { 32, EMASK_UNIT_PEX_IO32, PEX_5400_FAT_IO32 }, 584 { 31, EMASK_UNIT_PEX_IO31, PEX_5400_FAT_IO31 }, 585 { 30, EMASK_UNIT_PEX_IO30, PEX_5400_FAT_IO30 }, 586 { 29, EMASK_UNIT_PEX_IO29, PEX_5400_FAT_IO29 }, 587 { 27, EMASK_UNIT_PEX_IO27, PEX_5400_FAT_IO27 }, 588 { 26, EMASK_UNIT_PEX_IO26, PEX_5400_FAT_IO26 }, 589 { 25, EMASK_UNIT_PEX_IO25, PEX_5400_FAT_IO25 }, 590 { 24, EMASK_UNIT_PEX_IO24, PEX_5400_FAT_IO24 }, 591 { 23, EMASK_UNIT_PEX_IO23, PEX_5400_FAT_IO23 }, 592 { 22, EMASK_UNIT_PEX_IO22, PEX_5400_FAT_IO22 }, 593 }; 594 595 static struct mch_error_code fat_pex_5400_error_code[] = { 596 { 19, EMASK_UNCOR_PEX_IO19, PEX_5400_FAT_IO19 }, 597 { 18, EMASK_UNCOR_PEX_IO18, PEX_5400_FAT_IO18 }, 598 { 10, EMASK_UNCOR_PEX_IO10, PEX_5400_FAT_IO10 }, 599 { 9, EMASK_UNCOR_PEX_IO9, PEX_5400_FAT_IO9 }, 600 { 8, EMASK_UNCOR_PEX_IO8, PEX_5400_FAT_IO8 }, 601 { 7, EMASK_UNCOR_PEX_IO7, PEX_5400_FAT_IO7 }, 602 { 6, EMASK_UNCOR_PEX_IO6, PEX_5400_FAT_IO6 }, 603 { 5, EMASK_UNCOR_PEX_IO5, PEX_5400_FAT_IO5 }, 604 { 4, EMASK_UNCOR_PEX_IO4, PEX_5400_FAT_IO4 }, 605 { 2, EMASK_UNCOR_PEX_IO2, PEX_5400_FAT_IO2 }, 606 { 0, EMASK_UNCOR_PEX_IO0, PEX_5400_FAT_IO0 } 607 }; 608 609 static struct mch_error_code fat_rp_5400_error_code[] = { 610 { 1, EMASK_RP_PEX_IO1, PEX_5400_FAT_IO1 } 611 }; 612 613 static struct mch_error_code fat_rp_error_code[] = { 614 { 1, EMASK_RP_PEX_IO1, PEX_FAT_IO1 } 615 }; 616 617 static struct mch_error_code uncor_pex_error_code[] = { 618 { 19, EMASK_UNCOR_PEX_IO19, PEX_NF_IO19 }, 619 { 9, EMASK_UNCOR_PEX_IO9, PEX_NF_IO9 }, 620 { 8, EMASK_UNCOR_PEX_IO8, PEX_NF_IO8 }, 621 { 7, EMASK_UNCOR_PEX_IO7, PEX_NF_IO7 }, 622 { 6, EMASK_UNCOR_PEX_IO6, PEX_NF_IO6 }, 623 { 5, EMASK_UNCOR_PEX_IO5, PEX_NF_IO5 }, 624 { 4, EMASK_UNCOR_PEX_IO4, PEX_NF_IO4 }, 625 { 3, EMASK_UNCOR_PEX_IO3, PEX_NF_IO3 }, 626 { 0, EMASK_UNCOR_PEX_IO0, PEX_NF_IO0 } 627 }; 628 629 static struct mch_error_code uncor_pex_5400_error_code[] = { 630 { 33, EMASK_UNIT_PEX_IO33, PEX_5400_NF_IO33 }, 631 { 32, EMASK_UNIT_PEX_IO32, PEX_5400_NF_IO32 }, 632 { 31, EMASK_UNIT_PEX_IO31, PEX_5400_NF_IO31 }, 633 { 30, EMASK_UNIT_PEX_IO30, PEX_5400_NF_IO30 }, 634 { 29, EMASK_UNIT_PEX_IO29, PEX_5400_NF_IO29 }, 635 { 28, EMASK_UNIT_PEX_IO28, PEX_5400_NF_IO28 }, 636 { 27, EMASK_UNIT_PEX_IO27, PEX_5400_NF_IO27 }, 637 { 26, EMASK_UNIT_PEX_IO26, PEX_5400_NF_IO26 }, 638 { 25, EMASK_UNIT_PEX_IO25, PEX_5400_NF_IO25 }, 639 { 24, EMASK_UNIT_PEX_IO24, PEX_5400_NF_IO24 }, 640 { 23, EMASK_UNIT_PEX_IO23, PEX_5400_NF_IO23 }, 641 }; 642 643 static struct mch_error_code cor_pex_error_code[] = { 644 { 20, EMASK_COR_PEX_IO20, PEX_5400_NF_IO20 }, 645 { 16, EMASK_COR_PEX_IO16, PEX_NF_IO16 }, 646 { 15, EMASK_COR_PEX_IO15, PEX_NF_IO15 }, 647 { 14, EMASK_COR_PEX_IO14, PEX_NF_IO14 }, 648 { 13, EMASK_COR_PEX_IO13, PEX_NF_IO13 }, 649 { 12, EMASK_COR_PEX_IO12, PEX_NF_IO12 }, 650 { 10, 0, PEX_NF_IO10 }, 651 { 2, 0, PEX_NF_IO2 } 652 }; 653 654 static struct mch_error_code rp_pex_5400_error_code[] = { 655 { 17, EMASK_RP_PEX_IO17, PEX_5400_NF_IO17 }, 656 { 11, EMASK_RP_PEX_IO11, PEX_5400_NF_IO11 } 657 }; 658 659 static struct mch_error_code cor_pex_5400_error_code1[] = { 660 { 19, EMASK_UNCOR_PEX_IO19, PEX_5400_NF_IO19 }, 661 { 10, EMASK_UNCOR_PEX_IO10, PEX_5400_NF_IO10 }, 662 { 9, EMASK_UNCOR_PEX_IO9, PEX_5400_NF_IO9 }, 663 { 8, EMASK_UNCOR_PEX_IO8, PEX_5400_NF_IO8 }, 664 { 7, EMASK_UNCOR_PEX_IO7, PEX_5400_NF_IO7 }, 665 { 6, EMASK_UNCOR_PEX_IO6, PEX_5400_NF_IO6 }, 666 { 5, EMASK_UNCOR_PEX_IO5, PEX_5400_NF_IO5 }, 667 { 4, EMASK_UNCOR_PEX_IO4, PEX_5400_NF_IO4 }, 668 { 2, EMASK_UNCOR_PEX_IO2, PEX_5400_NF_IO2 }, 669 { 0, EMASK_UNCOR_PEX_IO0, PEX_5400_NF_IO0 } 670 }; 671 672 static struct mch_error_code cor_pex_5400_error_code2[] = { 673 { 20, EMASK_COR_PEX_IO20, PEX_5400_NF_IO20 }, 674 { 16, EMASK_COR_PEX_IO16, PEX_5400_NF_IO16 }, 675 { 15, EMASK_COR_PEX_IO15, PEX_5400_NF_IO15 }, 676 { 14, EMASK_COR_PEX_IO14, PEX_5400_NF_IO14 }, 677 { 13, EMASK_COR_PEX_IO13, PEX_5400_NF_IO13 }, 678 { 12, EMASK_COR_PEX_IO12, PEX_5400_NF_IO12 } 679 }; 680 681 static struct mch_error_code cor_pex_5400_error_code3[] = { 682 { 33, EMASK_UNIT_PEX_IO33, PEX_5400_NF_IO33 }, 683 { 32, EMASK_UNIT_PEX_IO32, PEX_5400_NF_IO32 }, 684 { 31, EMASK_UNIT_PEX_IO31, PEX_5400_NF_IO31 }, 685 { 30, EMASK_UNIT_PEX_IO30, PEX_5400_NF_IO30 }, 686 { 29, EMASK_UNIT_PEX_IO29, PEX_5400_NF_IO29 }, 687 { 28, EMASK_UNIT_PEX_IO28, PEX_5400_NF_IO28 }, 688 { 27, EMASK_UNIT_PEX_IO27, PEX_5400_NF_IO27 }, 689 { 26, EMASK_UNIT_PEX_IO26, PEX_5400_NF_IO26 }, 690 { 25, EMASK_UNIT_PEX_IO25, PEX_5400_NF_IO25 }, 691 { 24, EMASK_UNIT_PEX_IO24, PEX_5400_NF_IO24 }, 692 { 23, EMASK_UNIT_PEX_IO23, PEX_5400_NF_IO23 } 693 }; 694 695 static struct mch_error_code rp_pex_error_code[] = { 696 { 17, EMASK_RP_PEX_IO17, PEX_NF_IO17 }, 697 { 11, EMASK_RP_PEX_IO11, PEX_NF_IO11 }, 698 }; 699 700 static int 701 intel_pex_err(uint32_t pex_fat, uint32_t pex_nf_cor) 702 { 703 int rt = -1; 704 int nerr = 0; 705 int i; 706 int sz; 707 708 sz = sizeof (fat_pex_error_code) / sizeof (struct mch_error_code); 709 710 for (i = 0; i < sz; i++) { 711 if (pex_fat & fat_pex_error_code[i].error_bit) { 712 rt = fat_pex_error_code[i].intel_error_list; 713 nerr++; 714 } 715 } 716 sz = sizeof (fat_rp_error_code) / sizeof (struct mch_error_code); 717 718 for (i = 0; i < sz; i++) { 719 if (pex_fat & fat_rp_error_code[i].error_bit) { 720 rt = fat_rp_error_code[i].intel_error_list; 721 nerr++; 722 } 723 } 724 sz = sizeof (uncor_pex_error_code) / sizeof (struct mch_error_code); 725 726 for (i = 0; i < sz; i++) { 727 if (pex_nf_cor & uncor_pex_error_code[i].error_bit) { 728 rt = uncor_pex_error_code[i].intel_error_list; 729 nerr++; 730 } 731 } 732 733 sz = sizeof (cor_pex_error_code) / sizeof (struct mch_error_code); 734 735 for (i = 0; i < sz; i++) { 736 if (pex_nf_cor & cor_pex_error_code[i].error_bit) { 737 rt = cor_pex_error_code[i].intel_error_list; 738 nerr++; 739 } 740 } 741 sz = sizeof (rp_pex_error_code) / sizeof (struct mch_error_code); 742 743 for (i = 0; i < sz; i++) { 744 if (pex_nf_cor & rp_pex_error_code[i].error_bit) { 745 rt = rp_pex_error_code[i].intel_error_list; 746 nerr++; 747 } 748 } 749 750 if (nerr > 1) 751 rt = -1; 752 return (rt); 753 } 754 755 static struct mch_error_code fat_thr_error_code[] = { 756 { 2, EMASK_THR_F2, ERR_FAT_THR_F2 }, 757 { 1, EMASK_THR_F1, ERR_FAT_THR_F1 } 758 }; 759 760 static struct mch_error_code nf_thr_error_code[] = { 761 { 5, EMASK_THR_F5, ERR_NF_THR_F5 }, 762 { 4, EMASK_THR_F4, ERR_NF_THR_F4 }, 763 { 3, EMASK_THR_F3, ERR_NF_THR_F3 } 764 }; 765 766 static int 767 intel_thr_err(uint8_t err_fat_thr, uint8_t err_nf_thr) 768 { 769 int rt = -1; 770 int nerr = 0; 771 uint16_t emask_thr = 0; 772 int i; 773 int sz; 774 775 sz = sizeof (fat_thr_error_code) / sizeof (struct mch_error_code); 776 777 for (i = 0; i < sz; i++) { 778 if (err_fat_thr & fat_thr_error_code[i].error_bit) { 779 rt = fat_thr_error_code[i].intel_error_list; 780 emask_thr |= fat_thr_error_code[i].emask; 781 nerr++; 782 } 783 } 784 785 sz = sizeof (nf_thr_error_code) / sizeof (struct mch_error_code); 786 787 for (i = 0; i < sz; i++) { 788 if (err_nf_thr & nf_thr_error_code[i].error_bit) { 789 rt = nf_thr_error_code[i].intel_error_list; 790 emask_thr |= nf_thr_error_code[i].emask; 791 nerr++; 792 } 793 } 794 795 if (emask_thr) 796 nb_thr_mask_mc(emask_thr); 797 if (nerr > 1) 798 rt = -1; 799 return (rt); 800 } 801 802 static int 803 intel_pex_5400_err(uint32_t pex_fat, uint32_t pex_nf_cor) 804 { 805 int rt = -1; 806 int nerr = 0; 807 int i; 808 int sz; 809 810 sz = sizeof (fat_pex_5400_error_code) / sizeof (struct mch_error_code); 811 812 for (i = 0; i < sz; i++) { 813 if (pex_fat & fat_pex_5400_error_code[i].error_bit) { 814 rt = fat_pex_5400_error_code[i].intel_error_list; 815 nerr++; 816 } 817 } 818 sz = sizeof (fat_rp_5400_error_code) / sizeof (struct mch_error_code); 819 820 for (i = 0; i < sz; i++) { 821 if (pex_fat & fat_rp_5400_error_code[i].error_bit) { 822 rt = fat_rp_5400_error_code[i].intel_error_list; 823 nerr++; 824 } 825 } 826 sz = sizeof (fat_unit_pex_5400_error_code) / 827 sizeof (struct mch_error_code); 828 829 for (i = 0; i < sz; i++) { 830 if (pex_fat & 831 fat_unit_pex_5400_error_code[i].error_bit) { 832 rt = fat_unit_pex_5400_error_code[i].intel_error_list; 833 nerr++; 834 } 835 } 836 sz = sizeof (uncor_pex_5400_error_code) / 837 sizeof (struct mch_error_code); 838 839 for (i = 0; i < sz; i++) { 840 if (pex_fat & uncor_pex_5400_error_code[i].error_bit) { 841 rt = uncor_pex_5400_error_code[i].intel_error_list; 842 nerr++; 843 } 844 } 845 846 sz = sizeof (rp_pex_5400_error_code) / sizeof (struct mch_error_code); 847 848 for (i = 0; i < sz; i++) { 849 if (pex_nf_cor & rp_pex_5400_error_code[i].error_bit) { 850 rt = rp_pex_5400_error_code[i].intel_error_list; 851 nerr++; 852 } 853 } 854 855 sz = sizeof (cor_pex_5400_error_code1) / sizeof (struct mch_error_code); 856 857 for (i = 0; i < sz; i++) { 858 if (pex_nf_cor & cor_pex_5400_error_code1[i].error_bit) { 859 rt = cor_pex_5400_error_code1[i].intel_error_list; 860 nerr++; 861 } 862 } 863 864 sz = sizeof (cor_pex_5400_error_code2) / sizeof (struct mch_error_code); 865 866 for (i = 0; i < sz; i++) { 867 if (pex_nf_cor & cor_pex_5400_error_code2[i].error_bit) { 868 rt = cor_pex_5400_error_code2[i].intel_error_list; 869 nerr++; 870 } 871 } 872 873 sz = sizeof (cor_pex_5400_error_code3) / sizeof (struct mch_error_code); 874 875 for (i = 0; i < sz; i++) { 876 if (pex_nf_cor & cor_pex_5400_error_code3[i].error_bit) { 877 rt = cor_pex_5400_error_code3[i].intel_error_list; 878 nerr++; 879 } 880 } 881 882 if (nerr > 1) 883 rt = -1; 884 return (rt); 885 } 886 887 static void 888 log_pex_err(uint64_t ferr, nb_regs_t *rp, int willpanic, int *interpose) 889 { 890 uint8_t pex = (uint8_t)-1; 891 int t = 0; 892 893 rp->flag = NB_REG_LOG_PEX; 894 pex = GE_ERR_PEX(ferr); 895 896 rp->nb.pex_regs.pex = pex; 897 rp->nb.pex_regs.pex_fat_ferr = PEX_FAT_FERR_RD(pex, interpose); 898 rp->nb.pex_regs.pex_fat_nerr = PEX_FAT_NERR_RD(pex, &t); 899 *interpose |= t; 900 rp->nb.pex_regs.pex_nf_corr_ferr = PEX_NF_FERR_RD(pex, &t); 901 *interpose |= t; 902 rp->nb.pex_regs.pex_nf_corr_nerr = PEX_NF_NERR_RD(pex, &t); 903 *interpose |= t; 904 rp->nb.pex_regs.uncerrsev = UNCERRSEV_RD(pex); 905 rp->nb.pex_regs.rperrsts = RPERRSTS_RD(pex); 906 rp->nb.pex_regs.rperrsid = RPERRSID_RD(pex); 907 if (pex != (uint8_t)-1) 908 rp->nb.pex_regs.uncerrsts = UNCERRSTS_RD(pex); 909 else 910 rp->nb.pex_regs.uncerrsts = 0; 911 rp->nb.pex_regs.aerrcapctrl = AERRCAPCTRL_RD(pex); 912 rp->nb.pex_regs.corerrsts = CORERRSTS_RD(pex); 913 rp->nb.pex_regs.pexdevsts = PEXDEVSTS_RD(pex); 914 915 if (!willpanic) { 916 if (rp->nb.pex_regs.pex_fat_ferr || *interpose) 917 PEX_FAT_FERR_WR(pex, rp->nb.pex_regs.pex_fat_ferr); 918 if (rp->nb.pex_regs.pex_fat_nerr) 919 PEX_FAT_NERR_WR(pex, rp->nb.pex_regs.pex_fat_nerr); 920 if (rp->nb.pex_regs.pex_nf_corr_ferr || *interpose) 921 PEX_NF_FERR_WR(pex, rp->nb.pex_regs.pex_nf_corr_ferr); 922 if (rp->nb.pex_regs.pex_nf_corr_nerr) 923 PEX_NF_NERR_WR(pex, rp->nb.pex_regs.pex_nf_corr_nerr); 924 if (*interpose) 925 UNCERRSTS_WR(pex, rp->nb.pex_regs.uncerrsts); 926 if (*interpose) 927 RPERRSTS_WR(pex, rp->nb.pex_regs.rperrsts); 928 if (*interpose) 929 PEXDEVSTS_WR(pex, 0); 930 } 931 } 932 933 static void 934 log_fat_fbd_err(nb_regs_t *rp, int willpanic, int *interpose) 935 { 936 int channel, branch; 937 int t = 0; 938 939 rp->flag = NB_REG_LOG_FAT_FBD; 940 rp->nb.fat_fbd_regs.ferr_fat_fbd = FERR_FAT_FBD_RD(interpose); 941 channel = (rp->nb.fat_fbd_regs.ferr_fat_fbd >> 28) & 3; 942 branch = channel >> 1; 943 rp->nb.fat_fbd_regs.nerr_fat_fbd = NERR_FAT_FBD_RD(&t); 944 *interpose |= t; 945 rp->nb.fat_fbd_regs.nrecmema = NRECMEMA_RD(branch); 946 rp->nb.fat_fbd_regs.nrecmemb = NRECMEMB_RD(branch); 947 rp->nb.fat_fbd_regs.nrecfglog = NRECFGLOG_RD(branch); 948 rp->nb.fat_fbd_regs.nrecfbda = NRECFBDA_RD(branch); 949 rp->nb.fat_fbd_regs.nrecfbdb = NRECFBDB_RD(branch); 950 rp->nb.fat_fbd_regs.nrecfbdc = NRECFBDC_RD(branch); 951 rp->nb.fat_fbd_regs.nrecfbdd = NRECFBDD_RD(branch); 952 rp->nb.fat_fbd_regs.nrecfbde = NRECFBDE_RD(branch); 953 rp->nb.fat_fbd_regs.nrecfbdf = NRECFBDF_RD(branch); 954 rp->nb.fat_fbd_regs.spcps = SPCPS_RD(branch); 955 rp->nb.fat_fbd_regs.spcpc = SPCPC_RD(branch); 956 rp->nb.fat_fbd_regs.uerrcnt = UERRCNT_RD(branch); 957 rp->nb.fat_fbd_regs.uerrcnt_last = uerrcnt[branch]; 958 uerrcnt[branch] = rp->nb.fat_fbd_regs.uerrcnt; 959 rp->nb.fat_fbd_regs.badrama = BADRAMA_RD(branch); 960 rp->nb.fat_fbd_regs.badramb = BADRAMB_RD(branch); 961 rp->nb.fat_fbd_regs.badcnt = BADCNT_RD(branch); 962 if (!willpanic) { 963 if (rp->nb.fat_fbd_regs.ferr_fat_fbd || *interpose) 964 FERR_FAT_FBD_WR(rp->nb.fat_fbd_regs.ferr_fat_fbd); 965 if (rp->nb.fat_fbd_regs.nerr_fat_fbd) 966 NERR_FAT_FBD_WR(rp->nb.fat_fbd_regs.nerr_fat_fbd); 967 /* 968 * if interpose write read-only registers to clear from pcii 969 * cache 970 */ 971 if (*interpose) { 972 NRECMEMA_WR(branch); 973 NRECMEMB_WR(branch); 974 NRECFGLOG_WR(branch); 975 NRECFBDA_WR(branch); 976 NRECFBDB_WR(branch); 977 NRECFBDC_WR(branch); 978 NRECFBDD_WR(branch); 979 NRECFBDE_WR(branch); 980 NRECFBDF_WR(branch); 981 } 982 } 983 } 984 985 static void 986 log_nf_fbd_err(nb_regs_t *rp, int willpanic, int *interpose) 987 { 988 int channel, branch; 989 int t = 0; 990 991 rp->flag = NB_REG_LOG_NF_FBD; 992 rp->nb.nf_fbd_regs.ferr_nf_fbd = FERR_NF_FBD_RD(interpose); 993 channel = (rp->nb.nf_fbd_regs.ferr_nf_fbd >> 28) & 3; 994 branch = channel >> 1; 995 rp->nb.nf_fbd_regs.nerr_nf_fbd = NERR_NF_FBD_RD(&t); 996 *interpose |= t; 997 rp->nb.nf_fbd_regs.redmemb = REDMEMB_RD(); 998 rp->nb.nf_fbd_regs.recmema = RECMEMA_RD(branch); 999 rp->nb.nf_fbd_regs.recmemb = RECMEMB_RD(branch); 1000 rp->nb.nf_fbd_regs.recfglog = RECFGLOG_RD(branch); 1001 rp->nb.nf_fbd_regs.recfbda = RECFBDA_RD(branch); 1002 rp->nb.nf_fbd_regs.recfbdb = RECFBDB_RD(branch); 1003 rp->nb.nf_fbd_regs.recfbdc = RECFBDC_RD(branch); 1004 rp->nb.nf_fbd_regs.recfbdd = RECFBDD_RD(branch); 1005 rp->nb.nf_fbd_regs.recfbde = RECFBDE_RD(branch); 1006 rp->nb.nf_fbd_regs.recfbdf = RECFBDF_RD(branch); 1007 rp->nb.nf_fbd_regs.spcps = SPCPS_RD(branch); 1008 rp->nb.nf_fbd_regs.spcpc = SPCPC_RD(branch); 1009 if (nb_chipset == INTEL_NB_7300 || nb_chipset == INTEL_NB_5400) { 1010 rp->nb.nf_fbd_regs.cerrcnta = CERRCNTA_RD(branch, channel); 1011 rp->nb.nf_fbd_regs.cerrcntb = CERRCNTB_RD(branch, channel); 1012 rp->nb.nf_fbd_regs.cerrcntc = CERRCNTC_RD(branch, channel); 1013 rp->nb.nf_fbd_regs.cerrcntd = CERRCNTD_RD(branch, channel); 1014 } else { 1015 rp->nb.nf_fbd_regs.cerrcnta = CERRCNT_RD(branch); 1016 rp->nb.nf_fbd_regs.cerrcntb = 0; 1017 rp->nb.nf_fbd_regs.cerrcntc = 0; 1018 rp->nb.nf_fbd_regs.cerrcntd = 0; 1019 } 1020 rp->nb.nf_fbd_regs.cerrcnta_last = cerrcnta[branch][channel & 1]; 1021 rp->nb.nf_fbd_regs.cerrcntb_last = cerrcntb[branch][channel & 1]; 1022 rp->nb.nf_fbd_regs.cerrcntc_last = cerrcntc[branch][channel & 1]; 1023 rp->nb.nf_fbd_regs.cerrcntd_last = cerrcntd[branch][channel & 1]; 1024 cerrcnta[branch][channel & 1] = rp->nb.nf_fbd_regs.cerrcnta; 1025 cerrcntb[branch][channel & 1] = rp->nb.nf_fbd_regs.cerrcntb; 1026 cerrcntc[branch][channel & 1] = rp->nb.nf_fbd_regs.cerrcntc; 1027 cerrcntd[branch][channel & 1] = rp->nb.nf_fbd_regs.cerrcntd; 1028 rp->nb.nf_fbd_regs.badrama = BADRAMA_RD(branch); 1029 rp->nb.nf_fbd_regs.badramb = BADRAMB_RD(branch); 1030 rp->nb.nf_fbd_regs.badcnt = BADCNT_RD(branch); 1031 if (!willpanic) { 1032 if (rp->nb.nf_fbd_regs.ferr_nf_fbd || *interpose) 1033 FERR_NF_FBD_WR(rp->nb.nf_fbd_regs.ferr_nf_fbd); 1034 if (rp->nb.nf_fbd_regs.nerr_nf_fbd) 1035 NERR_NF_FBD_WR(rp->nb.nf_fbd_regs.nerr_nf_fbd); 1036 /* 1037 * if interpose write read-only registers to clear from pcii 1038 * cache 1039 */ 1040 if (*interpose) { 1041 RECMEMA_WR(branch); 1042 RECMEMB_WR(branch); 1043 RECFGLOG_WR(branch); 1044 RECFBDA_WR(branch); 1045 RECFBDB_WR(branch); 1046 RECFBDC_WR(branch); 1047 RECFBDD_WR(branch); 1048 RECFBDE_WR(branch); 1049 RECFBDF_WR(branch); 1050 SPCPS_WR(branch); 1051 } 1052 } 1053 } 1054 1055 static void 1056 log_ferr(uint64_t ferr, uint32_t *nerrp, nb_logout_t *log, int willpanic) 1057 { 1058 nb_regs_t *rp = &log->nb_regs; 1059 uint32_t nerr = *nerrp; 1060 int interpose = 0; 1061 int spurious = 0; 1062 1063 log->acl_timestamp = gethrtime_waitfree(); 1064 if ((ferr & (GE_PCIEX_FATAL | GE_PCIEX_NF)) != 0) { 1065 log_pex_err(ferr, rp, willpanic, &interpose); 1066 *nerrp = nerr & ~(GE_PCIEX_FATAL | GE_PCIEX_NF); 1067 } else if ((ferr & GE_FBD_FATAL) != 0) { 1068 log_fat_fbd_err(rp, willpanic, &interpose); 1069 *nerrp = nerr & ~GE_NERR_FBD_FATAL; 1070 } else if ((ferr & GE_FBD_NF) != 0) { 1071 log_nf_fbd_err(rp, willpanic, &interpose); 1072 *nerrp = nerr & ~GE_NERR_FBD_NF; 1073 } else if ((ferr & (GE_FERR_FSB_FATAL | GE_FERR_FSB_NF)) != 0) { 1074 log_fsb_err(ferr, rp, willpanic, &interpose); 1075 *nerrp = nerr & ~(GE_NERR_FSB_FATAL | GE_NERR_FSB_NF); 1076 } else if ((ferr & (GE_DMA_FATAL | GE_DMA_NF)) != 0) { 1077 log_dma_err(rp, &interpose); 1078 *nerrp = nerr & ~(GE_DMA_FATAL | GE_DMA_NF); 1079 } else if ((ferr & (GE_INT_FATAL | GE_INT_NF)) != 0) { 1080 spurious = log_int_err(rp, willpanic, &interpose); 1081 *nerrp = nerr & ~(GE_INT_FATAL | GE_INT_NF); 1082 } else if (nb_chipset == INTEL_NB_5400 && 1083 (ferr & (GE_FERR_THERMAL_FATAL | GE_FERR_THERMAL_NF)) != 0) { 1084 log_thermal_err(rp, willpanic, &interpose); 1085 *nerrp = nerr & ~(GE_FERR_THERMAL_FATAL | GE_FERR_THERMAL_NF); 1086 } 1087 if (interpose) 1088 log->type = "inject"; 1089 else 1090 log->type = "error"; 1091 if (!spurious) { 1092 errorq_dispatch(nb_queue, log, sizeof (nb_logout_t), 1093 willpanic ? ERRORQ_SYNC : ERRORQ_ASYNC); 1094 } 1095 } 1096 1097 static void 1098 log_nerr(uint32_t *errp, nb_logout_t *log, int willpanic) 1099 { 1100 uint32_t err; 1101 nb_regs_t *rp = &log->nb_regs; 1102 int interpose = 0; 1103 int spurious = 0; 1104 1105 err = *errp; 1106 log->acl_timestamp = gethrtime_waitfree(); 1107 if ((err & (GE_PCIEX_FATAL | GE_PCIEX_NF)) != 0) { 1108 log_pex_err(err, rp, willpanic, &interpose); 1109 *errp = err & ~(GE_PCIEX_FATAL | GE_PCIEX_NF); 1110 } else if ((err & GE_NERR_FBD_FATAL) != 0) { 1111 log_fat_fbd_err(rp, willpanic, &interpose); 1112 *errp = err & ~GE_NERR_FBD_FATAL; 1113 } else if ((err & GE_NERR_FBD_NF) != 0) { 1114 log_nf_fbd_err(rp, willpanic, &interpose); 1115 *errp = err & ~GE_NERR_FBD_NF; 1116 } else if ((err & (GE_NERR_FSB_FATAL | GE_NERR_FSB_NF)) != 0) { 1117 log_fsb_err(GE_NERR_TO_FERR_FSB(err), rp, willpanic, 1118 &interpose); 1119 *errp = err & ~(GE_NERR_FSB_FATAL | GE_NERR_FSB_NF); 1120 } else if ((err & (GE_DMA_FATAL | GE_DMA_NF)) != 0) { 1121 log_dma_err(rp, &interpose); 1122 *errp = err & ~(GE_DMA_FATAL | GE_DMA_NF); 1123 } else if ((err & (GE_INT_FATAL | GE_INT_NF)) != 0) { 1124 spurious = log_int_err(rp, willpanic, &interpose); 1125 *errp = err & ~(GE_INT_FATAL | GE_INT_NF); 1126 } 1127 if (interpose) 1128 log->type = "inject"; 1129 else 1130 log->type = "error"; 1131 if (!spurious) { 1132 errorq_dispatch(nb_queue, log, sizeof (nb_logout_t), 1133 willpanic ? ERRORQ_SYNC : ERRORQ_ASYNC); 1134 } 1135 } 1136 1137 /*ARGSUSED*/ 1138 void 1139 nb_error_trap(cmi_hdl_t hdl, boolean_t ismc, boolean_t willpanic) 1140 { 1141 uint64_t ferr; 1142 uint32_t nerr, err; 1143 int nmc = 0; 1144 int i; 1145 1146 if (mutex_tryenter(&nb_mutex) == 0) 1147 return; 1148 1149 nerr = NERR_GLOBAL_RD(); 1150 err = nerr; 1151 for (i = 0; i < NB_MAX_ERRORS; i++) { 1152 ferr = FERR_GLOBAL_RD(); 1153 nb_log.nb_regs.chipset = nb_chipset; 1154 nb_log.nb_regs.ferr = ferr; 1155 nb_log.nb_regs.nerr = nerr; 1156 if (ferr) { 1157 log_ferr(ferr, &err, &nb_log, willpanic); 1158 FERR_GLOBAL_WR(ferr); 1159 nmc++; 1160 } else if (err) { 1161 log_nerr(&err, &nb_log, willpanic); 1162 nmc++; 1163 } 1164 } 1165 if (nerr) { 1166 NERR_GLOBAL_WR(nerr); 1167 } 1168 if (nmc == 0 && nb_mask_mc_set) 1169 nb_mask_mc_reset(); 1170 mutex_exit(&nb_mutex); 1171 } 1172 1173 static void 1174 nb_fsb_err_payload(const nb_regs_t *nb_regs, nvlist_t *payload, 1175 nb_scatchpad_t *data) 1176 { 1177 int intel_error_list; 1178 char buf[32]; 1179 1180 fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_FSB, 1181 DATA_TYPE_UINT8, nb_regs->nb.fsb_regs.fsb, NULL); 1182 fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_FERR_FAT_FSB, 1183 DATA_TYPE_UINT8, nb_regs->nb.fsb_regs.ferr_fat_fsb, NULL); 1184 fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NERR_FAT_FSB, 1185 DATA_TYPE_UINT8, nb_regs->nb.fsb_regs.nerr_fat_fsb, NULL); 1186 fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_FERR_NF_FSB, 1187 DATA_TYPE_UINT8, nb_regs->nb.fsb_regs.ferr_nf_fsb, NULL); 1188 fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NERR_NF_FSB, 1189 DATA_TYPE_UINT8, nb_regs->nb.fsb_regs.nerr_nf_fsb, NULL); 1190 fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NRECFSB, 1191 DATA_TYPE_UINT32, nb_regs->nb.fsb_regs.nrecfsb, NULL); 1192 fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NRECFSB_ADDR, 1193 DATA_TYPE_UINT64, nb_regs->nb.fsb_regs.nrecfsb_addr, NULL); 1194 fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RECFSB, 1195 DATA_TYPE_UINT32, nb_regs->nb.fsb_regs.recfsb, NULL); 1196 intel_error_list = data->intel_error_list; 1197 if (intel_error_list >= 0) 1198 (void) snprintf(buf, sizeof (buf), "F%d", intel_error_list); 1199 else 1200 (void) snprintf(buf, sizeof (buf), "Multiple or unknown error"); 1201 fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_ERROR_NO, 1202 DATA_TYPE_STRING, buf, NULL); 1203 } 1204 1205 static void 1206 nb_pex_err_payload(const nb_regs_t *nb_regs, nvlist_t *payload, 1207 nb_scatchpad_t *data) 1208 { 1209 int intel_error_list; 1210 char buf[32]; 1211 1212 fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_PEX, 1213 DATA_TYPE_UINT8, nb_regs->nb.pex_regs.pex, NULL); 1214 fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_PEX_FAT_FERR, 1215 DATA_TYPE_UINT32, nb_regs->nb.pex_regs.pex_fat_ferr, NULL); 1216 fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_PEX_FAT_NERR, 1217 DATA_TYPE_UINT32, nb_regs->nb.pex_regs.pex_fat_nerr, NULL); 1218 fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_PEX_NF_CORR_FERR, 1219 DATA_TYPE_UINT32, nb_regs->nb.pex_regs.pex_nf_corr_ferr, NULL); 1220 fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_PEX_NF_CORR_NERR, 1221 DATA_TYPE_UINT32, nb_regs->nb.pex_regs.pex_nf_corr_nerr, NULL); 1222 fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_UNCERRSEV, 1223 DATA_TYPE_UINT32, nb_regs->nb.pex_regs.uncerrsev, NULL); 1224 fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RPERRSTS, 1225 DATA_TYPE_UINT32, nb_regs->nb.pex_regs.rperrsts, NULL); 1226 fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RPERRSID, 1227 DATA_TYPE_UINT32, nb_regs->nb.pex_regs.rperrsid, NULL); 1228 fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_UNCERRSTS, 1229 DATA_TYPE_UINT32, nb_regs->nb.pex_regs.uncerrsts, NULL); 1230 fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_AERRCAPCTRL, 1231 DATA_TYPE_UINT32, nb_regs->nb.pex_regs.aerrcapctrl, NULL); 1232 fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_CORERRSTS, 1233 DATA_TYPE_UINT32, nb_regs->nb.pex_regs.corerrsts, NULL); 1234 fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_PEXDEVSTS, 1235 DATA_TYPE_UINT16, nb_regs->nb.pex_regs.pexdevsts, NULL); 1236 intel_error_list = data->intel_error_list; 1237 if (intel_error_list >= 0) 1238 (void) snprintf(buf, sizeof (buf), "IO%d", intel_error_list); 1239 else 1240 (void) snprintf(buf, sizeof (buf), "Multiple or unknown error"); 1241 fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_ERROR_NO, 1242 DATA_TYPE_STRING, buf, NULL); 1243 } 1244 1245 static void 1246 nb_int_err_payload(const nb_regs_t *nb_regs, nvlist_t *payload, 1247 nb_scatchpad_t *data) 1248 { 1249 int intel_error_list; 1250 char buf[32]; 1251 1252 fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_FERR_FAT_INT, 1253 DATA_TYPE_UINT16, nb_regs->nb.int_regs.ferr_fat_int, NULL); 1254 fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_FERR_NF_INT, 1255 DATA_TYPE_UINT16, nb_regs->nb.int_regs.ferr_nf_int, NULL); 1256 fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NERR_FAT_INT, 1257 DATA_TYPE_UINT16, nb_regs->nb.int_regs.nerr_fat_int, NULL); 1258 fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NERR_NF_INT, 1259 DATA_TYPE_UINT16, nb_regs->nb.int_regs.nerr_nf_int, NULL); 1260 fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NRECINT, 1261 DATA_TYPE_UINT32, nb_regs->nb.int_regs.nrecint, NULL); 1262 fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RECINT, 1263 DATA_TYPE_UINT32, nb_regs->nb.int_regs.recint, NULL); 1264 fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NRECSF, 1265 DATA_TYPE_UINT64, nb_regs->nb.int_regs.nrecsf, NULL); 1266 fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RECSF, 1267 DATA_TYPE_UINT64, nb_regs->nb.int_regs.recsf, NULL); 1268 intel_error_list = data->intel_error_list; 1269 if (intel_error_list >= 0) 1270 (void) snprintf(buf, sizeof (buf), "B%d", intel_error_list); 1271 else 1272 (void) snprintf(buf, sizeof (buf), "Multiple or unknown error"); 1273 fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_ERROR_NO, 1274 DATA_TYPE_STRING, buf, NULL); 1275 } 1276 1277 static void 1278 nb_fat_fbd_err_payload(const nb_regs_t *nb_regs, nvlist_t *payload, 1279 nb_scatchpad_t *data) 1280 { 1281 nb_mem_scatchpad_t *sp; 1282 char buf[32]; 1283 1284 sp = &((nb_scatchpad_t *)data)->ms; 1285 1286 if (sp->ras != -1) { 1287 fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_BANK, 1288 DATA_TYPE_INT32, sp->bank, NULL); 1289 fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_CAS, 1290 DATA_TYPE_INT32, sp->cas, NULL); 1291 fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RAS, 1292 DATA_TYPE_INT32, sp->ras, NULL); 1293 if (sp->offset != -1LL) { 1294 fm_payload_set(payload, FM_FMRI_MEM_OFFSET, 1295 DATA_TYPE_UINT64, sp->offset, NULL); 1296 } 1297 if (sp->pa != -1LL) { 1298 fm_payload_set(payload, FM_FMRI_MEM_PHYSADDR, 1299 DATA_TYPE_UINT64, sp->pa, NULL); 1300 } 1301 } 1302 fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_FERR_FAT_FBD, 1303 DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.ferr_fat_fbd, NULL); 1304 fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NERR_FAT_FBD, 1305 DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.nerr_fat_fbd, NULL); 1306 fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NRECMEMA, 1307 DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.nrecmema, NULL); 1308 fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NRECMEMB, 1309 DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.nrecmemb, NULL); 1310 fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NRECFGLOG, 1311 DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.nrecfglog, NULL); 1312 fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NRECFBDA, 1313 DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.nrecfbda, NULL); 1314 fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NRECFBDB, 1315 DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.nrecfbdb, NULL); 1316 fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NRECFBDC, 1317 DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.nrecfbdc, NULL); 1318 fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NRECFBDD, 1319 DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.nrecfbdd, NULL); 1320 fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NRECFBDE, 1321 DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.nrecfbde, NULL); 1322 fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NRECFBDF, 1323 DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.nrecfbdf, NULL); 1324 fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_SPCPS, 1325 DATA_TYPE_UINT8, nb_regs->nb.fat_fbd_regs.spcps, NULL); 1326 fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_SPCPC, 1327 DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.spcpc, NULL); 1328 fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_UERRCNT, 1329 DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.uerrcnt, NULL); 1330 fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_UERRCNT_LAST, 1331 DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.uerrcnt_last, NULL); 1332 fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_BADRAMA, 1333 DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.badrama, NULL); 1334 fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_BADRAMB, 1335 DATA_TYPE_UINT16, nb_regs->nb.fat_fbd_regs.badramb, NULL); 1336 fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_BADCNT, 1337 DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.badcnt, NULL); 1338 1339 if (sp->intel_error_list >= 0) 1340 (void) snprintf(buf, sizeof (buf), "M%d", sp->intel_error_list); 1341 else 1342 (void) snprintf(buf, sizeof (buf), "Multiple or unknown error"); 1343 fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_ERROR_NO, 1344 DATA_TYPE_STRING, buf, NULL); 1345 } 1346 1347 static void 1348 nb_nf_fbd_err_payload(const nb_regs_t *nb_regs, nvlist_t *payload, 1349 nb_scatchpad_t *data) 1350 { 1351 nb_mem_scatchpad_t *sp; 1352 char buf[32]; 1353 1354 sp = &((nb_scatchpad_t *)data)->ms; 1355 1356 if (sp->dimm == -1 && sp->rank != -1) { 1357 fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RANK, 1358 DATA_TYPE_INT32, sp->rank, NULL); 1359 } 1360 if (sp->ras != -1) { 1361 fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_BANK, 1362 DATA_TYPE_INT32, sp->bank, NULL); 1363 fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_CAS, 1364 DATA_TYPE_INT32, sp->cas, NULL); 1365 fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RAS, 1366 DATA_TYPE_INT32, sp->ras, NULL); 1367 if (sp->offset != -1LL) { 1368 fm_payload_set(payload, FM_FMRI_MEM_OFFSET, 1369 DATA_TYPE_UINT64, sp->offset, NULL); 1370 } 1371 if (sp->pa != -1LL) { 1372 fm_payload_set(payload, FM_FMRI_MEM_PHYSADDR, 1373 DATA_TYPE_UINT64, sp->pa, NULL); 1374 } 1375 } 1376 fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_FERR_NF_FBD, 1377 DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.ferr_nf_fbd, NULL); 1378 fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NERR_NF_FBD, 1379 DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.nerr_nf_fbd, NULL); 1380 fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RECMEMA, 1381 DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.recmema, NULL); 1382 fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RECMEMB, 1383 DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.recmemb, NULL); 1384 fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RECFGLOG, 1385 DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.recfglog, NULL); 1386 fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RECFBDA, 1387 DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.recfbda, NULL); 1388 fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RECFBDB, 1389 DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.recfbdb, NULL); 1390 fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RECFBDC, 1391 DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.recfbdc, NULL); 1392 fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RECFBDD, 1393 DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.recfbdd, NULL); 1394 fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RECFBDE, 1395 DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.recfbde, NULL); 1396 fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RECFBDF, 1397 DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.recfbdf, NULL); 1398 fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_SPCPS, 1399 DATA_TYPE_UINT8, nb_regs->nb.nf_fbd_regs.spcps, NULL); 1400 fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_SPCPC, 1401 DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.spcpc, NULL); 1402 if (nb_chipset == INTEL_NB_7300 || nb_chipset == INTEL_NB_5400) { 1403 fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_CERRCNTA, 1404 DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.cerrcnta, NULL); 1405 fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_CERRCNTB, 1406 DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.cerrcntb, NULL); 1407 if (nb_chipset == INTEL_NB_7300) { 1408 fm_payload_set(payload, 1409 FM_EREPORT_PAYLOAD_NAME_CERRCNTC, 1410 DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.cerrcntc, 1411 NULL); 1412 fm_payload_set(payload, 1413 FM_EREPORT_PAYLOAD_NAME_CERRCNTD, 1414 DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.cerrcntd, 1415 NULL); 1416 } 1417 fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_CERRCNTA_LAST, 1418 DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.cerrcnta_last, 1419 NULL); 1420 fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_CERRCNTB_LAST, 1421 DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.cerrcntb_last, 1422 NULL); 1423 if (nb_chipset == INTEL_NB_7300) { 1424 fm_payload_set(payload, 1425 FM_EREPORT_PAYLOAD_NAME_CERRCNTC_LAST, 1426 DATA_TYPE_UINT32, 1427 nb_regs->nb.nf_fbd_regs.cerrcntc_last, NULL); 1428 fm_payload_set(payload, 1429 FM_EREPORT_PAYLOAD_NAME_CERRCNTD_LAST, 1430 DATA_TYPE_UINT32, 1431 nb_regs->nb.nf_fbd_regs.cerrcntd_last, NULL); 1432 } 1433 } else { 1434 fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_CERRCNT, 1435 DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.cerrcnta, NULL); 1436 fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_CERRCNT_LAST, 1437 DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.cerrcnta_last, 1438 NULL); 1439 } 1440 fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_BADRAMA, 1441 DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.badrama, NULL); 1442 fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_BADRAMB, 1443 DATA_TYPE_UINT16, nb_regs->nb.nf_fbd_regs.badramb, NULL); 1444 fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_BADCNT, 1445 DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.badcnt, NULL); 1446 1447 if (sp->intel_error_list >= 0) 1448 (void) snprintf(buf, sizeof (buf), "M%d", sp->intel_error_list); 1449 else 1450 (void) snprintf(buf, sizeof (buf), "Multiple or unknown error"); 1451 fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_ERROR_NO, 1452 DATA_TYPE_STRING, buf, NULL); 1453 } 1454 1455 static void 1456 nb_dma_err_payload(const nb_regs_t *nb_regs, nvlist_t *payload) 1457 { 1458 fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_PCISTS, 1459 DATA_TYPE_UINT16, nb_regs->nb.dma_regs.pcists, NULL); 1460 fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_PEXDEVSTS, 1461 DATA_TYPE_UINT16, nb_regs->nb.dma_regs.pexdevsts, NULL); 1462 } 1463 1464 static void 1465 nb_thr_err_payload(const nb_regs_t *nb_regs, nvlist_t *payload, 1466 nb_scatchpad_t *data) 1467 { 1468 char buf[32]; 1469 1470 fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_FERR_FAT_THR, 1471 DATA_TYPE_UINT8, nb_regs->nb.thr_regs.ferr_fat_thr, NULL); 1472 fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NERR_FAT_THR, 1473 DATA_TYPE_UINT8, nb_regs->nb.thr_regs.nerr_fat_thr, NULL); 1474 fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_FERR_NF_THR, 1475 DATA_TYPE_UINT8, nb_regs->nb.thr_regs.ferr_nf_thr, NULL); 1476 fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NERR_NF_THR, 1477 DATA_TYPE_UINT8, nb_regs->nb.thr_regs.nerr_nf_thr, NULL); 1478 fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_CTSTS, 1479 DATA_TYPE_UINT8, nb_regs->nb.thr_regs.ctsts, NULL); 1480 fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_THRTSTS, 1481 DATA_TYPE_UINT16, nb_regs->nb.thr_regs.thrtsts, NULL); 1482 if (data->intel_error_list >= 0) { 1483 (void) snprintf(buf, sizeof (buf), "TH%d", 1484 data->intel_error_list); 1485 } else { 1486 (void) snprintf(buf, sizeof (buf), "Multiple or unknown error"); 1487 } 1488 fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_ERROR_NO, 1489 DATA_TYPE_STRING, buf, NULL); 1490 } 1491 1492 static void 1493 nb_ereport_add_logout(nvlist_t *payload, const nb_logout_t *acl, 1494 nb_scatchpad_t *data) 1495 { 1496 const nb_regs_t *nb_regs = &acl->nb_regs; 1497 1498 fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_MC_TYPE, 1499 DATA_TYPE_STRING, acl->type, NULL); 1500 switch (nb_regs->flag) { 1501 case NB_REG_LOG_FSB: 1502 nb_fsb_err_payload(nb_regs, payload, data); 1503 break; 1504 case NB_REG_LOG_PEX: 1505 nb_pex_err_payload(nb_regs, payload, data); 1506 break; 1507 case NB_REG_LOG_INT: 1508 nb_int_err_payload(nb_regs, payload, data); 1509 break; 1510 case NB_REG_LOG_FAT_FBD: 1511 nb_fat_fbd_err_payload(nb_regs, payload, data); 1512 break; 1513 case NB_REG_LOG_NF_FBD: 1514 nb_nf_fbd_err_payload(nb_regs, payload, data); 1515 break; 1516 case NB_REG_LOG_DMA: 1517 nb_dma_err_payload(nb_regs, payload); 1518 break; 1519 case NB_REG_LOG_THR: 1520 nb_thr_err_payload(nb_regs, payload, data); 1521 break; 1522 default: 1523 fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_FERR_GLOBAL, 1524 DATA_TYPE_UINT64, nb_regs->ferr, NULL); 1525 fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NERR_GLOBAL, 1526 DATA_TYPE_UINT32, nb_regs->nerr, NULL); 1527 break; 1528 } 1529 } 1530 1531 void 1532 nb_fsb_report(const nb_regs_t *nb_regs, char *class, nvlist_t *detector, 1533 nb_scatchpad_t *data) 1534 { 1535 int chip; 1536 1537 if (nb_chipset == INTEL_NB_7300) 1538 chip = nb_regs->nb.fsb_regs.fsb * 2; 1539 else 1540 chip = nb_regs->nb.fsb_regs.fsb; 1541 fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 2, 1542 "motherboard", 0, "chip", chip); 1543 1544 if (nb_regs->nb.fsb_regs.ferr_fat_fsb == 0 && 1545 nb_regs->nb.fsb_regs.ferr_nf_fsb == 0) { 1546 data->intel_error_list = intel_fsb_err(nb_regs->nb.fsb_regs.fsb, 1547 nb_regs->nb.fsb_regs.nerr_fat_fsb, 1548 nb_regs->nb.fsb_regs.nerr_nf_fsb); 1549 } else { 1550 data->intel_error_list = intel_fsb_err(nb_regs->nb.fsb_regs.fsb, 1551 nb_regs->nb.fsb_regs.ferr_fat_fsb, 1552 nb_regs->nb.fsb_regs.ferr_nf_fsb); 1553 } 1554 (void) snprintf(class, FM_MAX_CLASS, "%s.%s.%s.%s", 1555 FM_ERROR_CPU, FM_EREPORT_CPU_INTEL, "nb", "fsb"); 1556 } 1557 1558 void 1559 nb_pex_report(const nb_regs_t *nb_regs, char *class, nvlist_t *detector, 1560 nb_scatchpad_t *data) 1561 { 1562 int hostbridge; 1563 1564 if (nb_regs->nb.pex_regs.pex == 0) { 1565 fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 1, 1566 "motherboard", 0); 1567 } else { 1568 hostbridge = nb_regs->nb.pex_regs.pex - 1; 1569 fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 2, 1570 "motherboard", 0, 1571 "hostbridge", hostbridge); 1572 } 1573 1574 if (nb_regs->nb.pex_regs.pex_fat_ferr == 0 && 1575 nb_regs->nb.pex_regs.pex_nf_corr_ferr == 0) { 1576 if (nb_chipset == INTEL_NB_5400) { 1577 data->intel_error_list = 1578 intel_pex_5400_err( 1579 nb_regs->nb.pex_regs.pex_fat_nerr, 1580 nb_regs->nb.pex_regs.pex_nf_corr_nerr); 1581 } else { 1582 data->intel_error_list = 1583 intel_pex_err(nb_regs->nb.pex_regs.pex_fat_nerr, 1584 nb_regs->nb.pex_regs.pex_nf_corr_nerr); 1585 } 1586 } else { 1587 if (nb_chipset == INTEL_NB_5400) { 1588 data->intel_error_list = 1589 intel_pex_5400_err( 1590 nb_regs->nb.pex_regs.pex_fat_ferr, 1591 nb_regs->nb.pex_regs.pex_nf_corr_ferr); 1592 } else { 1593 data->intel_error_list = 1594 intel_pex_err(nb_regs->nb.pex_regs.pex_fat_ferr, 1595 nb_regs->nb.pex_regs.pex_nf_corr_ferr); 1596 } 1597 } 1598 1599 if (nb_regs->nb.pex_regs.pex == 0) { 1600 (void) snprintf(class, FM_MAX_CLASS, "%s.%s.%s.%s", 1601 FM_ERROR_CPU, FM_EREPORT_CPU_INTEL, "nb", "esi"); 1602 } else { 1603 (void) snprintf(class, FM_MAX_CLASS, "%s.%s.%s.%s", 1604 FM_ERROR_CPU, FM_EREPORT_CPU_INTEL, "nb", "pex"); 1605 } 1606 } 1607 1608 void 1609 nb_int_report(const nb_regs_t *nb_regs, char *class, nvlist_t *detector, 1610 void *data) 1611 { 1612 fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 1, 1613 "motherboard", 0); 1614 1615 if (nb_regs->nb.int_regs.ferr_fat_int == 0 && 1616 nb_regs->nb.int_regs.ferr_nf_int == 0) { 1617 ((nb_scatchpad_t *)data)->intel_error_list = 1618 intel_int_err(nb_regs->nb.int_regs.nerr_fat_int, 1619 nb_regs->nb.int_regs.nerr_nf_int); 1620 } else { 1621 ((nb_scatchpad_t *)data)->intel_error_list = 1622 intel_int_err(nb_regs->nb.int_regs.ferr_fat_int, 1623 nb_regs->nb.int_regs.ferr_nf_int); 1624 } 1625 (void) snprintf(class, FM_MAX_CLASS, "%s.%s.%s.%s", 1626 FM_ERROR_CPU, FM_EREPORT_CPU_INTEL, "nb", "ie"); 1627 } 1628 1629 void 1630 nb_fat_fbd_report(const nb_regs_t *nb_regs, char *class, nvlist_t *detector, 1631 void *data) 1632 { 1633 char *intr; 1634 nb_mem_scatchpad_t *sp; 1635 1636 intr = fat_memory_error(nb_regs, data); 1637 sp = &((nb_scatchpad_t *)data)->ms; 1638 1639 if (sp->dimm != -1) { 1640 fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 5, 1641 "motherboard", 0, 1642 "memory-controller", sp->branch, 1643 "dram-channel", sp->channel, 1644 "dimm", sp->dimm, 1645 "rank", sp->rank); 1646 } else if (sp->channel != -1) { 1647 fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 3, 1648 "motherboard", 0, 1649 "memory-controller", sp->branch, 1650 "dram-channel", sp->channel); 1651 } else if (sp->branch != -1) { 1652 fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 2, 1653 "motherboard", 0, 1654 "memory-controller", sp->branch); 1655 } else { 1656 fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 1, 1657 "motherboard", 0); 1658 } 1659 1660 (void) snprintf(class, FM_MAX_CLASS, "%s.%s.%s", 1661 FM_ERROR_CPU, FM_EREPORT_CPU_INTEL, intr); 1662 } 1663 1664 void 1665 nb_nf_fbd_report(const nb_regs_t *nb_regs, char *class, nvlist_t *detector, 1666 void *data) 1667 { 1668 char *intr; 1669 nb_mem_scatchpad_t *sp; 1670 1671 intr = nf_memory_error(nb_regs, data); 1672 sp = &((nb_scatchpad_t *)data)->ms; 1673 1674 if (sp->dimm != -1) { 1675 fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 5, 1676 "motherboard", 0, 1677 "memory-controller", sp->branch, 1678 "dram-channel", sp->channel, 1679 "dimm", sp->dimm, 1680 "rank", sp->rank); 1681 } else if (sp->channel != -1) { 1682 fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 3, 1683 "motherboard", 0, 1684 "memory-controller", sp->branch, 1685 "dram-channel", sp->channel); 1686 } else if (sp->branch != -1) { 1687 fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 2, 1688 "motherboard", 0, 1689 "memory-controller", sp->branch); 1690 } else { 1691 fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 1, 1692 "motherboard", 0); 1693 } 1694 1695 (void) snprintf(class, FM_MAX_CLASS, "%s.%s.%s", 1696 FM_ERROR_CPU, FM_EREPORT_CPU_INTEL, intr); 1697 } 1698 1699 void 1700 nb_dma_report(char *class, nvlist_t *detector) 1701 { 1702 fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 1, 1703 "motherboard", 0); 1704 1705 (void) snprintf(class, FM_MAX_CLASS, "%s.%s.%s.%s", 1706 FM_ERROR_CPU, FM_EREPORT_CPU_INTEL, "nb", "dma"); 1707 } 1708 1709 void 1710 nb_thr_report(const nb_regs_t *nb_regs, char *class, nvlist_t *detector, 1711 void *data) 1712 { 1713 ((nb_scatchpad_t *)data)->intel_error_list = 1714 intel_thr_err(nb_regs->nb.thr_regs.ferr_fat_thr, 1715 nb_regs->nb.thr_regs.ferr_nf_thr); 1716 fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 1, 1717 "motherboard", 0); 1718 1719 (void) snprintf(class, FM_MAX_CLASS, "%s.%s.%s.%s", 1720 FM_ERROR_CPU, FM_EREPORT_CPU_INTEL, "nb", "otf"); 1721 } 1722 1723 1724 nvlist_t * 1725 nb_report(const nb_regs_t *nb_regs, char *class, nv_alloc_t *nva, void *scratch) 1726 { 1727 nvlist_t *detector = fm_nvlist_create(nva); 1728 1729 switch (nb_regs->flag) { 1730 case NB_REG_LOG_FSB: 1731 nb_fsb_report(nb_regs, class, detector, scratch); 1732 break; 1733 case NB_REG_LOG_PEX: 1734 nb_pex_report(nb_regs, class, detector, scratch); 1735 break; 1736 case NB_REG_LOG_INT: 1737 nb_int_report(nb_regs, class, detector, scratch); 1738 break; 1739 case NB_REG_LOG_FAT_FBD: 1740 nb_fat_fbd_report(nb_regs, class, detector, scratch); 1741 break; 1742 case NB_REG_LOG_NF_FBD: 1743 nb_nf_fbd_report(nb_regs, class, detector, scratch); 1744 break; 1745 case NB_REG_LOG_DMA: 1746 nb_dma_report(class, detector); 1747 break; 1748 case NB_REG_LOG_THR: 1749 nb_thr_report(nb_regs, class, detector, scratch); 1750 break; 1751 default: 1752 fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 1, 1753 "motherboard", 0); 1754 1755 (void) snprintf(class, FM_MAX_CLASS, "%s.%s.%s.%s", 1756 FM_ERROR_CPU, FM_EREPORT_CPU_INTEL, "nb", "unknown"); 1757 } 1758 return (detector); 1759 } 1760 1761 /*ARGSUSED*/ 1762 void 1763 nb_drain(void *ignored, const void *data, const errorq_elem_t *eqe) 1764 { 1765 nb_logout_t *acl = (nb_logout_t *)data; 1766 errorq_elem_t *eqep, *scr_eqep; 1767 nvlist_t *ereport, *detector; 1768 nv_alloc_t *nva = NULL; 1769 char buf[FM_MAX_CLASS]; 1770 nb_scatchpad_t nb_scatchpad; 1771 1772 if (panicstr) { 1773 if ((eqep = errorq_reserve(ereport_errorq)) == NULL) 1774 return; 1775 ereport = errorq_elem_nvl(ereport_errorq, eqep); 1776 /* 1777 * Now try to allocate another element for scratch space and 1778 * use that for further scratch space (eg for constructing 1779 * nvlists to add the main ereport). If we can't reserve 1780 * a scratch element just fallback to working within the 1781 * element we already have, and hope for the best. All this 1782 * is necessary because the fixed buffer nv allocator does 1783 * not reclaim freed space and nvlist construction is 1784 * expensive. 1785 */ 1786 if ((scr_eqep = errorq_reserve(ereport_errorq)) != NULL) 1787 nva = errorq_elem_nva(ereport_errorq, scr_eqep); 1788 else 1789 nva = errorq_elem_nva(ereport_errorq, eqep); 1790 } else { 1791 ereport = fm_nvlist_create(NULL); 1792 } 1793 detector = nb_report(&acl->nb_regs, buf, nva, &nb_scatchpad); 1794 if (detector == NULL) 1795 return; 1796 fm_ereport_set(ereport, FM_EREPORT_VERSION, buf, 1797 fm_ena_generate(acl->acl_timestamp, FM_ENA_FMT1), detector, NULL); 1798 /* 1799 * We're done with 'detector' so reclaim the scratch space. 1800 */ 1801 if (panicstr) { 1802 fm_nvlist_destroy(detector, FM_NVA_RETAIN); 1803 nv_alloc_reset(nva); 1804 } else { 1805 fm_nvlist_destroy(detector, FM_NVA_FREE); 1806 } 1807 1808 /* 1809 * Encode the error-specific data that was saved in the logout area. 1810 */ 1811 nb_ereport_add_logout(ereport, acl, &nb_scatchpad); 1812 1813 if (panicstr) { 1814 errorq_commit(ereport_errorq, eqep, ERRORQ_SYNC); 1815 if (scr_eqep) 1816 errorq_cancel(ereport_errorq, scr_eqep); 1817 } else { 1818 (void) fm_ereport_post(ereport, EVCH_TRYHARD); 1819 fm_nvlist_destroy(ereport, FM_NVA_FREE); 1820 } 1821 } 1822