1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #include <cma.h> 28 29 #include <unistd.h> 30 #include <fcntl.h> 31 #include <strings.h> 32 #include <errno.h> 33 #include <time.h> 34 #include <fm/fmd_api.h> 35 #include <sys/fm/protocol.h> 36 #include <sys/systeminfo.h> 37 #include <sys/utsname.h> 38 39 #ifdef sun4v 40 #include <sys/fm/ldom.h> 41 42 static fmd_hdl_t *init_hdl; 43 ldom_hdl_t *cma_lhp; 44 #endif 45 46 #ifdef i386 47 boolean_t cma_is_native; 48 #endif 49 50 extern const char *fmd_fmri_get_platform(); 51 52 cma_t cma; 53 54 cma_stats_t cma_stats = { 55 { "cpu_flts", FMD_TYPE_UINT64, "cpu faults resolved" }, 56 { "cpu_repairs", FMD_TYPE_UINT64, "cpu faults repaired" }, 57 { "cpu_fails", FMD_TYPE_UINT64, "cpu faults unresolveable" }, 58 { "cpu_blfails", FMD_TYPE_UINT64, "failed cpu blacklists" }, 59 { "cpu_supp", FMD_TYPE_UINT64, "cpu offlines suppressed" }, 60 { "cpu_blsupp", FMD_TYPE_UINT64, "cpu blacklists suppressed" }, 61 { "page_flts", FMD_TYPE_UINT64, "page faults resolved" }, 62 { "page_repairs", FMD_TYPE_UINT64, "page faults repaired" }, 63 { "page_fails", FMD_TYPE_UINT64, "page faults unresolveable" }, 64 { "page_supp", FMD_TYPE_UINT64, "page retires suppressed" }, 65 { "page_nonent", FMD_TYPE_UINT64, "retires for non-existent fmris" }, 66 { "bad_flts", FMD_TYPE_UINT64, "invalid fault events received" }, 67 { "nop_flts", FMD_TYPE_UINT64, "inapplicable fault events received" }, 68 { "auto_flts", FMD_TYPE_UINT64, "auto-close faults received" } 69 }; 70 71 typedef struct cma_subscriber { 72 const char *subr_class; 73 const char *subr_sname; 74 uint_t subr_svers; 75 int (*subr_func)(fmd_hdl_t *, nvlist_t *, nvlist_t *, const char *, 76 boolean_t); 77 } cma_subscriber_t; 78 79 static const cma_subscriber_t cma_subrs[] = { 80 #if defined(i386) 81 /* 82 * On x86, the ASRUs are expected to be in hc scheme. When 83 * cpumem-retire wants to retire a cpu or mem page, it calls the 84 * methods registered in the topo node to do that. The topo 85 * enumerator, which necessarily knows all the config info that 86 * we'd ever need in deciding what/how to retire etc. This takes 87 * away much of that complexity from the agent into the entity 88 * that knows all config/topo information. 89 */ 90 { "fault.memory.page", FM_FMRI_SCHEME_HC, FM_HC_SCHEME_VERSION, 91 cma_page_retire }, 92 { "fault.memory.page_sb", FM_FMRI_SCHEME_HC, FM_HC_SCHEME_VERSION, 93 cma_page_retire }, 94 { "fault.memory.page_ck", FM_FMRI_SCHEME_HC, FM_HC_SCHEME_VERSION, 95 cma_page_retire }, 96 { "fault.memory.page_ue", FM_FMRI_SCHEME_HC, FM_HC_SCHEME_VERSION, 97 cma_page_retire }, 98 { "fault.memory.generic-x86.page_ce", FM_FMRI_SCHEME_HC, 99 FM_HC_SCHEME_VERSION, cma_page_retire }, 100 { "fault.memory.generic-x86.page_ue", FM_FMRI_SCHEME_HC, 101 FM_HC_SCHEME_VERSION, cma_page_retire }, 102 { "fault.memory.intel.page_ce", FM_FMRI_SCHEME_HC, 103 FM_HC_SCHEME_VERSION, cma_page_retire }, 104 { "fault.memory.intel.page_ue", FM_FMRI_SCHEME_HC, 105 FM_HC_SCHEME_VERSION, cma_page_retire }, 106 { "fault.memory.dimm", FM_FMRI_SCHEME_HC, FM_HC_SCHEME_VERSION, 107 NULL }, 108 { "fault.memory.dimm_sb", FM_FMRI_SCHEME_HC, FM_HC_SCHEME_VERSION, 109 NULL }, 110 { "fault.memory.dimm_ck", FM_FMRI_SCHEME_HC, FM_HC_SCHEME_VERSION, 111 NULL }, 112 { "fault.memory.dimm_ue", FM_FMRI_SCHEME_HC, FM_HC_SCHEME_VERSION, 113 NULL }, 114 { "fault.memory.generic-x86.dimm_ce", FM_FMRI_SCHEME_HC, 115 FM_HC_SCHEME_VERSION, NULL }, 116 { "fault.memory.generic-x86.dimm_ue", FM_FMRI_SCHEME_HC, 117 FM_HC_SCHEME_VERSION, NULL }, 118 { "fault.memory.intel.dimm_ce", FM_FMRI_SCHEME_HC, 119 FM_HC_SCHEME_VERSION, NULL }, 120 { "fault.memory.intel.dimm_ue", FM_FMRI_SCHEME_HC, 121 FM_HC_SCHEME_VERSION, NULL }, 122 { "fault.memory.intel.fbd.*", FM_FMRI_SCHEME_HC, 123 FM_HC_SCHEME_VERSION, NULL }, 124 { "fault.memory.dimm_testfail", FM_FMRI_SCHEME_HC, 125 FM_HC_SCHEME_VERSION, NULL }, 126 { "fault.memory.bank", FM_FMRI_SCHEME_HC, FM_HC_SCHEME_VERSION, 127 NULL }, 128 { "fault.memory.datapath", FM_FMRI_SCHEME_HC, FM_HC_SCHEME_VERSION, 129 NULL }, 130 { "fault.cpu.intel.quickpath.*", FM_FMRI_SCHEME_HC, 131 FM_HC_SCHEME_VERSION, NULL }, 132 { "fault.cpu.generic-x86.mc", FM_FMRI_SCHEME_HC, 133 FM_HC_SCHEME_VERSION, NULL }, 134 { "fault.cpu.intel.dma", FM_FMRI_SCHEME_HC, 135 FM_HC_SCHEME_VERSION, NULL }, 136 { "fault.cpu.intel.dma", FM_FMRI_SCHEME_CPU, 137 FM_CPU_SCHEME_VERSION, NULL }, 138 139 /* 140 * The ASRU for cpu faults are in cpu scheme on native and in hc 141 * scheme on xpv. So each cpu fault class needs to be listed twice. 142 */ 143 144 /* 145 * The following faults do NOT retire a cpu thread, 146 * and therefore must be intercepted before 147 * the default "fault.cpu.*" dispatch to cma_cpu_hc_retire. 148 */ 149 { "fault.cpu.amd.dramchannel", FM_FMRI_SCHEME_HC, FM_HC_SCHEME_VERSION, 150 NULL }, 151 { "fault.cpu.amd.dramchannel", FM_FMRI_SCHEME_CPU, 152 FM_CPU_SCHEME_VERSION, NULL }, 153 { "fault.cpu.generic-x86.bus_interconnect_memory", FM_FMRI_SCHEME_HC, 154 FM_HC_SCHEME_VERSION, NULL }, 155 { "fault.cpu.generic-x86.bus_interconnect_memory", FM_FMRI_SCHEME_CPU, 156 FM_CPU_SCHEME_VERSION, NULL }, 157 { "fault.cpu.generic-x86.bus_interconnect_io", FM_FMRI_SCHEME_HC, 158 FM_HC_SCHEME_VERSION, NULL }, 159 { "fault.cpu.generic-x86.bus_interconnect_io", FM_FMRI_SCHEME_CPU, 160 FM_CPU_SCHEME_VERSION, NULL }, 161 { "fault.cpu.generic-x86.bus_interconnect", FM_FMRI_SCHEME_HC, 162 FM_HC_SCHEME_VERSION, NULL }, 163 { "fault.cpu.generic-x86.bus_interconnect", FM_FMRI_SCHEME_CPU, 164 FM_CPU_SCHEME_VERSION, NULL }, 165 { "fault.cpu.intel.bus_interconnect_memory", FM_FMRI_SCHEME_HC, 166 FM_HC_SCHEME_VERSION, NULL }, 167 { "fault.cpu.intel.bus_interconnect_memory", FM_FMRI_SCHEME_CPU, 168 FM_CPU_SCHEME_VERSION, NULL }, 169 { "fault.cpu.intel.bus_interconnect_io", FM_FMRI_SCHEME_HC, 170 FM_HC_SCHEME_VERSION, NULL }, 171 { "fault.cpu.intel.bus_interconnect_io", FM_FMRI_SCHEME_CPU, 172 FM_CPU_SCHEME_VERSION, NULL }, 173 { "fault.cpu.intel.bus_interconnect", FM_FMRI_SCHEME_HC, 174 FM_HC_SCHEME_VERSION, NULL }, 175 { "fault.cpu.intel.bus_interconnect", FM_FMRI_SCHEME_CPU, 176 FM_CPU_SCHEME_VERSION, NULL }, 177 { "fault.cpu.intel.nb.*", FM_FMRI_SCHEME_HC, FM_HC_SCHEME_VERSION, 178 NULL }, 179 { "fault.cpu.intel.nb.*", FM_FMRI_SCHEME_CPU, FM_CPU_SCHEME_VERSION, 180 NULL }, 181 { "fault.cpu.intel.dma", FM_FMRI_SCHEME_HC, FM_HC_SCHEME_VERSION, 182 NULL }, 183 { "fault.cpu.intel.dma", FM_FMRI_SCHEME_CPU, FM_CPU_SCHEME_VERSION, 184 NULL }, 185 { "fault.cpu.*", FM_FMRI_SCHEME_HC, FM_HC_SCHEME_VERSION, 186 cma_cpu_hc_retire }, 187 { "fault.cpu.*", FM_FMRI_SCHEME_CPU, FM_CPU_SCHEME_VERSION, 188 cma_cpu_hc_retire }, 189 #elif defined(sun4v) 190 { "fault.memory.page", FM_FMRI_SCHEME_MEM, FM_MEM_SCHEME_VERSION, 191 cma_page_retire }, 192 { "fault.memory.dimm", FM_FMRI_SCHEME_MEM, FM_MEM_SCHEME_VERSION, 193 NULL }, 194 { "fault.memory.dimm_sb", FM_FMRI_SCHEME_MEM, FM_MEM_SCHEME_VERSION, 195 NULL }, 196 { "fault.memory.dimm_ck", FM_FMRI_SCHEME_MEM, FM_MEM_SCHEME_VERSION, 197 NULL }, 198 { "fault.memory.dimm_ue", FM_FMRI_SCHEME_MEM, FM_MEM_SCHEME_VERSION, 199 NULL }, 200 { "fault.memory.bank", FM_FMRI_SCHEME_MEM, FM_MEM_SCHEME_VERSION, 201 NULL }, 202 { "fault.memory.datapath", FM_FMRI_SCHEME_MEM, FM_MEM_SCHEME_VERSION, 203 NULL }, 204 { "fault.memory.link-c", FM_FMRI_SCHEME_MEM, FM_MEM_SCHEME_VERSION, 205 NULL }, 206 { "fault.memory.link-u", FM_FMRI_SCHEME_MEM, FM_MEM_SCHEME_VERSION, 207 NULL }, 208 { "fault.memory.link-f", FM_FMRI_SCHEME_MEM, FM_MEM_SCHEME_VERSION, 209 NULL }, 210 { "fault.memory.link-c", FM_FMRI_SCHEME_HC, FM_HC_SCHEME_VERSION, 211 NULL }, 212 { "fault.memory.link-u", FM_FMRI_SCHEME_HC, FM_HC_SCHEME_VERSION, 213 NULL }, 214 { "fault.memory.link-f", FM_FMRI_SCHEME_HC, FM_HC_SCHEME_VERSION, 215 NULL }, 216 217 /* 218 * The following ultraSPARC-T1/T2 faults do NOT retire a cpu thread, 219 * and therefore must be intercepted before 220 * the default "fault.cpu.*" dispatch to cma_cpu_hc_retire. 221 */ 222 { "fault.cpu.*.l2cachedata", FM_FMRI_SCHEME_CPU, 223 FM_CPU_SCHEME_VERSION, NULL }, 224 { "fault.cpu.*.l2cachetag", FM_FMRI_SCHEME_CPU, 225 FM_CPU_SCHEME_VERSION, NULL }, 226 { "fault.cpu.*.l2cachectl", FM_FMRI_SCHEME_CPU, 227 FM_CPU_SCHEME_VERSION, NULL }, 228 { "fault.cpu.*.l2data-c", FM_FMRI_SCHEME_CPU, 229 FM_CPU_SCHEME_VERSION, NULL }, 230 { "fault.cpu.*.l2data-u", FM_FMRI_SCHEME_CPU, 231 FM_CPU_SCHEME_VERSION, NULL }, 232 { "fault.cpu.*.mau", FM_FMRI_SCHEME_CPU, 233 FM_CPU_SCHEME_VERSION, NULL }, 234 { "fault.cpu.*.lfu-u", FM_FMRI_SCHEME_CPU, 235 FM_CPU_SCHEME_VERSION, NULL }, 236 { "fault.cpu.*.lfu-f", FM_FMRI_SCHEME_CPU, 237 FM_CPU_SCHEME_VERSION, NULL }, 238 { "fault.cpu.*.lfu-p", FM_FMRI_SCHEME_CPU, 239 FM_CPU_SCHEME_VERSION, NULL }, 240 { "fault.cpu.ultraSPARC-T1.freg", FM_FMRI_SCHEME_CPU, 241 FM_CPU_SCHEME_VERSION, NULL }, 242 { "fault.cpu.ultraSPARC-T1.l2cachedata", FM_FMRI_SCHEME_CPU, 243 FM_CPU_SCHEME_VERSION, NULL }, 244 { "fault.cpu.ultraSPARC-T1.l2cachetag", FM_FMRI_SCHEME_CPU, 245 FM_CPU_SCHEME_VERSION, NULL }, 246 { "fault.cpu.ultraSPARC-T1.l2cachectl", FM_FMRI_SCHEME_CPU, 247 FM_CPU_SCHEME_VERSION, NULL }, 248 { "fault.cpu.ultraSPARC-T1.mau", FM_FMRI_SCHEME_CPU, 249 FM_CPU_SCHEME_VERSION, NULL }, 250 { "fault.cpu.ultraSPARC-T2plus.chip", FM_FMRI_SCHEME_HC, 251 FM_HC_SCHEME_VERSION, NULL }, 252 { "fault.cpu.*", FM_FMRI_SCHEME_HC, FM_HC_SCHEME_VERSION, 253 cma_cpu_hc_retire }, 254 { "fault.cpu.*", FM_FMRI_SCHEME_CPU, FM_CPU_SCHEME_VERSION, 255 cma_cpu_hc_retire }, 256 #elif defined(opl) 257 { "fault.memory.page", FM_FMRI_SCHEME_MEM, FM_MEM_SCHEME_VERSION, 258 cma_page_retire }, 259 { "fault.memory.dimm", FM_FMRI_SCHEME_MEM, FM_MEM_SCHEME_VERSION, 260 NULL }, 261 { "fault.memory.bank", FM_FMRI_SCHEME_MEM, FM_MEM_SCHEME_VERSION, 262 NULL }, 263 { "fault.cpu.SPARC64-VI.*", FM_FMRI_SCHEME_CPU, FM_CPU_SCHEME_VERSION, 264 cma_cpu_cpu_retire }, 265 { "fault.cpu.SPARC64-VII.*", FM_FMRI_SCHEME_CPU, FM_CPU_SCHEME_VERSION, 266 cma_cpu_cpu_retire }, 267 { "fault.chassis.SPARC-Enterprise.cpu.SPARC64-VI.core.se", 268 FM_FMRI_SCHEME_HC, FM_HC_SCHEME_VERSION, cma_cpu_hc_retire }, 269 { "fault.chassis.SPARC-Enterprise.cpu.SPARC64-VI.core.se-offlinereq", 270 FM_FMRI_SCHEME_HC, FM_HC_SCHEME_VERSION, cma_cpu_hc_retire }, 271 { "fault.chassis.SPARC-Enterprise.cpu.SPARC64-VI.core.ce", 272 FM_FMRI_SCHEME_HC, FM_HC_SCHEME_VERSION, cma_cpu_hc_retire }, 273 { "fault.chassis.SPARC-Enterprise.cpu.SPARC64-VI.core.ce-offlinereq", 274 FM_FMRI_SCHEME_HC, FM_HC_SCHEME_VERSION, cma_cpu_hc_retire }, 275 { "fault.chassis.SPARC-Enterprise.cpu.SPARC64-VII.core.se", 276 FM_FMRI_SCHEME_HC, FM_HC_SCHEME_VERSION, cma_cpu_hc_retire }, 277 { "fault.chassis.SPARC-Enterprise.cpu.SPARC64-VII.core.se-offlinereq", 278 FM_FMRI_SCHEME_HC, FM_HC_SCHEME_VERSION, cma_cpu_hc_retire }, 279 { "fault.chassis.SPARC-Enterprise.cpu.SPARC64-VII.core.ce", 280 FM_FMRI_SCHEME_HC, FM_HC_SCHEME_VERSION, cma_cpu_hc_retire }, 281 { "fault.chassis.SPARC-Enterprise.cpu.SPARC64-VII.core.ce-offlinereq", 282 FM_FMRI_SCHEME_HC, FM_HC_SCHEME_VERSION, cma_cpu_hc_retire }, 283 #else 284 /* 285 * For platforms excluding i386, sun4v and opl. 286 */ 287 { "fault.memory.page", FM_FMRI_SCHEME_MEM, FM_MEM_SCHEME_VERSION, 288 cma_page_retire }, 289 { "fault.memory.page_sb", FM_FMRI_SCHEME_MEM, FM_MEM_SCHEME_VERSION, 290 cma_page_retire }, 291 { "fault.memory.page_ck", FM_FMRI_SCHEME_MEM, FM_MEM_SCHEME_VERSION, 292 cma_page_retire }, 293 { "fault.memory.page_ue", FM_FMRI_SCHEME_MEM, FM_MEM_SCHEME_VERSION, 294 cma_page_retire }, 295 { "fault.memory.dimm", FM_FMRI_SCHEME_MEM, FM_MEM_SCHEME_VERSION, 296 NULL }, 297 { "fault.memory.dimm_sb", FM_FMRI_SCHEME_MEM, FM_MEM_SCHEME_VERSION, 298 NULL }, 299 { "fault.memory.dimm_ck", FM_FMRI_SCHEME_MEM, FM_MEM_SCHEME_VERSION, 300 NULL }, 301 { "fault.memory.dimm_ue", FM_FMRI_SCHEME_MEM, FM_MEM_SCHEME_VERSION, 302 NULL }, 303 { "fault.memory.dimm_testfail", FM_FMRI_SCHEME_MEM, 304 FM_MEM_SCHEME_VERSION, NULL }, 305 { "fault.memory.bank", FM_FMRI_SCHEME_MEM, FM_MEM_SCHEME_VERSION, 306 NULL }, 307 { "fault.memory.datapath", FM_FMRI_SCHEME_MEM, FM_MEM_SCHEME_VERSION, 308 NULL }, 309 310 /* 311 * The following faults do NOT retire a cpu thread, 312 * and therefore must be intercepted before 313 * the default "fault.cpu.*" dispatch to cma_cpu_cpu_retire. 314 */ 315 { "fault.cpu.ultraSPARC-IVplus.l2cachedata-line", 316 FM_FMRI_SCHEME_CPU, FM_CPU_SCHEME_VERSION, 317 cma_cache_way_retire }, 318 { "fault.cpu.ultraSPARC-IVplus.l3cachedata-line", 319 FM_FMRI_SCHEME_CPU, FM_CPU_SCHEME_VERSION, 320 cma_cache_way_retire }, 321 { "fault.cpu.ultraSPARC-IVplus.l2cachetag-line", 322 FM_FMRI_SCHEME_CPU, FM_CPU_SCHEME_VERSION, 323 cma_cache_way_retire }, 324 { "fault.cpu.ultraSPARC-IVplus.l3cachetag-line", 325 FM_FMRI_SCHEME_CPU, FM_CPU_SCHEME_VERSION, 326 cma_cache_way_retire }, 327 328 /* 329 * Default "fault.cpu.*" for "cpu" scheme ASRU dispatch. 330 */ 331 { "fault.cpu.*", FM_FMRI_SCHEME_CPU, FM_CPU_SCHEME_VERSION, 332 cma_cpu_cpu_retire }, 333 #endif 334 { NULL, NULL, 0, NULL } 335 }; 336 337 static const cma_subscriber_t * 338 nvl2subr(fmd_hdl_t *hdl, nvlist_t *nvl, nvlist_t **asrup) 339 { 340 const cma_subscriber_t *sp; 341 nvlist_t *asru; 342 char *scheme; 343 uint8_t version; 344 boolean_t retire; 345 346 if (nvlist_lookup_boolean_value(nvl, FM_SUSPECT_RETIRE, &retire) == 0 && 347 retire == 0) { 348 fmd_hdl_debug(hdl, "cma_recv: retire suppressed"); 349 return (NULL); 350 } 351 352 if (nvlist_lookup_nvlist(nvl, FM_FAULT_ASRU, &asru) != 0 || 353 nvlist_lookup_string(asru, FM_FMRI_SCHEME, &scheme) != 0 || 354 nvlist_lookup_uint8(asru, FM_VERSION, &version) != 0) { 355 cma_stats.bad_flts.fmds_value.ui64++; 356 return (NULL); 357 } 358 359 for (sp = cma_subrs; sp->subr_class != NULL; sp++) { 360 if (fmd_nvl_class_match(hdl, nvl, sp->subr_class) && 361 strcmp(scheme, sp->subr_sname) == 0 && 362 version <= sp->subr_svers) { 363 *asrup = asru; 364 return (sp); 365 } 366 } 367 368 cma_stats.nop_flts.fmds_value.ui64++; 369 return (NULL); 370 } 371 372 static void 373 cma_recv_list(fmd_hdl_t *hdl, nvlist_t *nvl, const char *class) 374 { 375 char *uuid = NULL; 376 nvlist_t **nva; 377 uint_t nvc = 0; 378 uint_t keepopen; 379 int err = 0; 380 nvlist_t *asru = NULL; 381 #ifndef i386 382 uint32_t index; 383 #endif 384 385 err |= nvlist_lookup_string(nvl, FM_SUSPECT_UUID, &uuid); 386 err |= nvlist_lookup_nvlist_array(nvl, FM_SUSPECT_FAULT_LIST, 387 &nva, &nvc); 388 if (err != 0) { 389 cma_stats.bad_flts.fmds_value.ui64++; 390 return; 391 } 392 393 keepopen = nvc; 394 while (nvc-- != 0 && (strcmp(class, FM_LIST_SUSPECT_CLASS) != 0 || 395 !fmd_case_uuclosed(hdl, uuid))) { 396 nvlist_t *nvl = *nva++; 397 const cma_subscriber_t *subr; 398 int has_fault; 399 400 if ((subr = nvl2subr(hdl, nvl, &asru)) == NULL) 401 continue; 402 403 /* 404 * A handler returns CMA_RA_SUCCESS to indicate that 405 * from this suspects point-of-view the case may be 406 * closed, CMA_RA_FAILURE otherwise. 407 * A handler must not close the case itself. 408 */ 409 if (subr->subr_func != NULL) { 410 has_fault = fmd_nvl_fmri_has_fault(hdl, asru, 411 FMD_HAS_FAULT_ASRU, NULL); 412 if (strcmp(class, FM_LIST_SUSPECT_CLASS) == 0) { 413 if (has_fault == 1) 414 err = subr->subr_func(hdl, nvl, asru, 415 uuid, 0); 416 } else { 417 if (has_fault == 0) 418 err = subr->subr_func(hdl, nvl, asru, 419 uuid, 1); 420 } 421 if (err == CMA_RA_SUCCESS) 422 keepopen--; 423 } 424 } 425 426 #ifndef i386 427 /* 428 * Do not close the case if we are handling cache faults. 429 */ 430 if (asru != NULL) { 431 if (nvlist_lookup_uint32(asru, FM_FMRI_CPU_CACHE_INDEX, 432 &index) != 0) { 433 if (!keepopen && strcmp(class, 434 FM_LIST_SUSPECT_CLASS) == 0) { 435 fmd_case_uuclose(hdl, uuid); 436 } 437 } 438 } 439 #endif 440 441 if (!keepopen && strcmp(class, FM_LIST_REPAIRED_CLASS) == 0) 442 fmd_case_uuresolved(hdl, uuid); 443 } 444 445 static void 446 cma_recv_one(fmd_hdl_t *hdl, nvlist_t *nvl) 447 { 448 const cma_subscriber_t *subr; 449 nvlist_t *asru; 450 451 if ((subr = nvl2subr(hdl, nvl, &asru)) == NULL) 452 return; 453 454 if (subr->subr_func != NULL) { 455 if (fmd_nvl_fmri_has_fault(hdl, asru, 456 FMD_HAS_FAULT_ASRU, NULL) == 1) 457 (void) subr->subr_func(hdl, nvl, asru, NULL, 0); 458 } 459 } 460 461 /*ARGSUSED*/ 462 static void 463 cma_recv(fmd_hdl_t *hdl, fmd_event_t *ep, nvlist_t *nvl, const char *class) 464 { 465 fmd_hdl_debug(hdl, "received %s\n", class); 466 467 if (strcmp(class, FM_LIST_SUSPECT_CLASS) == 0 || 468 strcmp(class, FM_LIST_REPAIRED_CLASS) == 0 || 469 strcmp(class, FM_LIST_UPDATED_CLASS) == 0) 470 cma_recv_list(hdl, nvl, class); 471 else 472 cma_recv_one(hdl, nvl); 473 } 474 475 /*ARGSUSED*/ 476 static void 477 cma_timeout(fmd_hdl_t *hdl, id_t id, void *arg) 478 { 479 if (id == cma.cma_page_timerid) 480 cma_page_retry(hdl); 481 #ifdef sun4v 482 /* 483 * cpu offline/online needs to be retried on sun4v because 484 * ldom request can be asynchronous. 485 */ 486 else if (id == cma.cma_cpu_timerid) 487 cma_cpu_retry(hdl); 488 #endif 489 } 490 491 #ifdef sun4v 492 static void * 493 cma_init_alloc(size_t size) 494 { 495 return (fmd_hdl_alloc(init_hdl, size, FMD_SLEEP)); 496 } 497 498 static void 499 cma_init_free(void *addr, size_t size) 500 { 501 fmd_hdl_free(init_hdl, addr, size); 502 } 503 #endif 504 505 static const fmd_hdl_ops_t fmd_ops = { 506 cma_recv, /* fmdo_recv */ 507 cma_timeout, /* fmdo_timeout */ 508 NULL, /* fmdo_close */ 509 NULL, /* fmdo_stats */ 510 NULL, /* fmdo_gc */ 511 }; 512 513 static const fmd_prop_t fmd_props[] = { 514 { "cpu_tries", FMD_TYPE_UINT32, "10" }, 515 { "cpu_delay", FMD_TYPE_TIME, "1sec" }, 516 #ifdef sun4v 517 { "cpu_ret_mindelay", FMD_TYPE_TIME, "5sec" }, 518 { "cpu_ret_maxdelay", FMD_TYPE_TIME, "5min" }, 519 #endif /* sun4v */ 520 { "cpu_offline_enable", FMD_TYPE_BOOL, "true" }, 521 { "cpu_online_enable", FMD_TYPE_BOOL, "true" }, 522 { "cpu_forced_offline", FMD_TYPE_BOOL, "true" }, 523 #ifdef opl 524 { "cpu_blacklist_enable", FMD_TYPE_BOOL, "false" }, 525 { "cpu_unblacklist_enable", FMD_TYPE_BOOL, "false" }, 526 #else 527 { "cpu_blacklist_enable", FMD_TYPE_BOOL, "true" }, 528 { "cpu_unblacklist_enable", FMD_TYPE_BOOL, "true" }, 529 #endif /* opl */ 530 { "page_ret_mindelay", FMD_TYPE_TIME, "1sec" }, 531 { "page_ret_maxdelay", FMD_TYPE_TIME, "5min" }, 532 { "page_retire_enable", FMD_TYPE_BOOL, "true" }, 533 { "page_unretire_enable", FMD_TYPE_BOOL, "true" }, 534 { NULL, 0, NULL } 535 }; 536 537 static const fmd_hdl_info_t fmd_info = { 538 "CPU/Memory Retire Agent", CMA_VERSION, &fmd_ops, fmd_props 539 }; 540 541 void 542 _fmd_init(fmd_hdl_t *hdl) 543 { 544 hrtime_t nsec; 545 #ifdef i386 546 char buf[BUFSIZ]; 547 const char *dom0 = "control_d"; 548 549 /* 550 * Abort the cpumem-retire module if Solaris is running under DomU. 551 */ 552 if (sysinfo(SI_PLATFORM, buf, sizeof (buf)) == -1) 553 return; 554 555 if (strncmp(buf, "i86pc", sizeof (buf)) == 0) { 556 cma_is_native = B_TRUE; 557 } else if (strncmp(buf, "i86xpv", sizeof (buf)) != 0) { 558 return; 559 } else { 560 int fd = open("/dev/xen/domcaps", O_RDONLY); 561 562 if (fd != -1) { 563 if (read(fd, buf, sizeof (buf)) <= 0 || 564 strncmp(buf, dom0, strlen(dom0)) != 0) { 565 (void) close(fd); 566 return; 567 } 568 (void) close(fd); 569 } 570 cma_is_native = B_FALSE; 571 } 572 #endif /* i386 */ 573 574 if (fmd_hdl_register(hdl, FMD_API_VERSION, &fmd_info) != 0) 575 return; /* invalid data in configuration file */ 576 577 fmd_hdl_subscribe(hdl, "fault.cpu.*"); 578 fmd_hdl_subscribe(hdl, "fault.memory.*"); 579 #ifdef opl 580 fmd_hdl_subscribe(hdl, "fault.chassis.SPARC-Enterprise.cpu.*"); 581 #endif 582 583 (void) fmd_stat_create(hdl, FMD_STAT_NOALLOC, sizeof (cma_stats) / 584 sizeof (fmd_stat_t), (fmd_stat_t *)&cma_stats); 585 586 cma.cma_cpu_tries = fmd_prop_get_int32(hdl, "cpu_tries"); 587 588 nsec = fmd_prop_get_int64(hdl, "cpu_delay"); 589 cma.cma_cpu_delay.tv_sec = nsec / NANOSEC; 590 cma.cma_cpu_delay.tv_nsec = nsec % NANOSEC; 591 592 cma.cma_page_mindelay = fmd_prop_get_int64(hdl, "page_ret_mindelay"); 593 cma.cma_page_maxdelay = fmd_prop_get_int64(hdl, "page_ret_maxdelay"); 594 595 #ifdef sun4v 596 cma.cma_cpu_mindelay = fmd_prop_get_int64(hdl, "cpu_ret_mindelay"); 597 cma.cma_cpu_maxdelay = fmd_prop_get_int64(hdl, "cpu_ret_maxdelay"); 598 #endif 599 600 cma.cma_cpu_dooffline = fmd_prop_get_int32(hdl, "cpu_offline_enable"); 601 cma.cma_cpu_forcedoffline = fmd_prop_get_int32(hdl, 602 "cpu_forced_offline"); 603 cma.cma_cpu_doonline = fmd_prop_get_int32(hdl, "cpu_online_enable"); 604 cma.cma_cpu_doblacklist = fmd_prop_get_int32(hdl, 605 "cpu_blacklist_enable"); 606 cma.cma_cpu_dounblacklist = fmd_prop_get_int32(hdl, 607 "cpu_unblacklist_enable"); 608 cma.cma_page_doretire = fmd_prop_get_int32(hdl, "page_retire_enable"); 609 cma.cma_page_dounretire = fmd_prop_get_int32(hdl, 610 "page_unretire_enable"); 611 612 if (cma.cma_page_maxdelay < cma.cma_page_mindelay) 613 fmd_hdl_abort(hdl, "page retirement delays conflict\n"); 614 615 #ifdef sun4v 616 init_hdl = hdl; 617 cma_lhp = ldom_init(cma_init_alloc, cma_init_free); 618 #endif 619 } 620 621 void 622 _fmd_fini(fmd_hdl_t *hdl) 623 { 624 #ifdef sun4v 625 ldom_fini(cma_lhp); 626 cma_cpu_fini(hdl); 627 #endif 628 cma_page_fini(hdl); 629 } 630