1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 /* 23 * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 #include <sys/types.h> 30 #include <sys/systm.h> 31 #include <sys/ddi.h> 32 #include <sys/sysmacros.h> 33 #include <sys/archsystm.h> 34 #include <sys/vmsystm.h> 35 #include <sys/machparam.h> 36 #include <sys/machsystm.h> 37 #include <sys/machthread.h> 38 #include <sys/cpu.h> 39 #include <sys/cmp.h> 40 #include <sys/elf_SPARC.h> 41 #include <vm/hat_sfmmu.h> 42 #include <vm/seg_kmem.h> 43 #include <sys/cpuvar.h> 44 #include <sys/cheetahregs.h> 45 #include <sys/us3_module.h> 46 #include <sys/async.h> 47 #include <sys/cmn_err.h> 48 #include <sys/debug.h> 49 #include <sys/dditypes.h> 50 #include <sys/prom_debug.h> 51 #include <sys/prom_plat.h> 52 #include <sys/cpu_module.h> 53 #include <sys/sysmacros.h> 54 #include <sys/intreg.h> 55 #include <sys/clock.h> 56 #include <sys/platform_module.h> 57 #include <sys/machtrap.h> 58 #include <sys/ontrap.h> 59 #include <sys/panic.h> 60 #include <sys/memlist.h> 61 #include <sys/bootconf.h> 62 #include <sys/ivintr.h> 63 #include <sys/atomic.h> 64 #include <sys/fm/protocol.h> 65 #include <sys/fm/cpu/UltraSPARC-III.h> 66 #include <vm/vm_dep.h> 67 68 #ifdef CHEETAHPLUS_ERRATUM_25 69 #include <sys/cyclic.h> 70 #endif /* CHEETAHPLUS_ERRATUM_25 */ 71 72 /* 73 * Setup trap handlers. 74 */ 75 void 76 cpu_init_trap(void) 77 { 78 CH_SET_TRAP(tt_pil15, ch_pil15_interrupt_instr); 79 80 CH_SET_TRAP(tt0_fecc, fecc_err_instr); 81 CH_SET_TRAP(tt1_fecc, fecc_err_tl1_instr); 82 CH_SET_TRAP(tt1_swtrap0, fecc_err_tl1_cont_instr); 83 } 84 85 static int 86 getintprop(pnode_t node, char *name, int deflt) 87 { 88 int value; 89 90 switch (prom_getproplen(node, name)) { 91 case sizeof (int): 92 (void) prom_getprop(node, name, (caddr_t)&value); 93 break; 94 95 default: 96 value = deflt; 97 break; 98 } 99 100 return (value); 101 } 102 103 /* 104 * Set the magic constants of the implementation. 105 */ 106 /*ARGSUSED*/ 107 void 108 cpu_fiximp(pnode_t dnode) 109 { 110 int i, a; 111 112 static struct { 113 char *name; 114 int *var; 115 int defval; 116 } prop[] = { 117 "dcache-size", &dcache_size, CH_DCACHE_SIZE, 118 "dcache-line-size", &dcache_linesize, CH_DCACHE_LSIZE, 119 "icache-size", &icache_size, CH_ICACHE_SIZE, 120 "icache-line-size", &icache_linesize, CH_ICACHE_LSIZE, 121 "ecache-size", &ecache_size, CH_ECACHE_MAX_SIZE, 122 "ecache-line-size", &ecache_alignsize, CH_ECACHE_MAX_LSIZE, 123 "ecache-associativity", &ecache_associativity, CH_ECACHE_NWAY 124 }; 125 126 extern int exec_lpg_disable, use_brk_lpg, use_stk_lpg, use_zmap_lpg; 127 128 129 for (i = 0; i < sizeof (prop) / sizeof (prop[0]); i++) 130 *prop[i].var = getintprop(dnode, prop[i].name, prop[i].defval); 131 132 ecache_setsize = ecache_size / ecache_associativity; 133 134 vac_size = CH_VAC_SIZE; 135 vac_mask = MMU_PAGEMASK & (vac_size - 1); 136 i = 0; a = vac_size; 137 while (a >>= 1) 138 ++i; 139 vac_shift = i; 140 shm_alignment = vac_size; 141 vac = 1; 142 143 /* 144 * Cheetah's large page support has problems with large numbers of 145 * large pages, so just disable large pages out-of-the-box. 146 */ 147 exec_lpg_disable = 1; 148 use_brk_lpg = 0; 149 use_stk_lpg = 0; 150 use_zmap_lpg = 0; 151 } 152 153 void 154 send_mondo_set(cpuset_t set) 155 { 156 int lo, busy, nack, shipped = 0; 157 uint16_t i, cpuids[IDSR_BN_SETS]; 158 uint64_t idsr, nackmask = 0, busymask, curnack, curbusy; 159 uint64_t starttick, endtick, tick, lasttick; 160 #if (NCPU > IDSR_BN_SETS) 161 int index = 0; 162 int ncpuids = 0; 163 #endif 164 #ifdef CHEETAHPLUS_ERRATUM_25 165 int recovered = 0; 166 int cpuid; 167 #endif 168 169 ASSERT(!CPUSET_ISNULL(set)); 170 starttick = lasttick = gettick(); 171 172 #if (NCPU <= IDSR_BN_SETS) 173 for (i = 0; i < NCPU; i++) 174 if (CPU_IN_SET(set, i)) { 175 shipit(i, shipped); 176 nackmask |= IDSR_NACK_BIT(shipped); 177 cpuids[shipped++] = i; 178 CPUSET_DEL(set, i); 179 if (CPUSET_ISNULL(set)) 180 break; 181 } 182 CPU_STATS_ADDQ(CPU, sys, xcalls, shipped); 183 #else 184 for (i = 0; i < NCPU; i++) 185 if (CPU_IN_SET(set, i)) { 186 ncpuids++; 187 188 /* 189 * Ship only to the first (IDSR_BN_SETS) CPUs. If we 190 * find we have shipped to more than (IDSR_BN_SETS) 191 * CPUs, set "index" to the highest numbered CPU in 192 * the set so we can ship to other CPUs a bit later on. 193 */ 194 if (shipped < IDSR_BN_SETS) { 195 shipit(i, shipped); 196 nackmask |= IDSR_NACK_BIT(shipped); 197 cpuids[shipped++] = i; 198 CPUSET_DEL(set, i); 199 if (CPUSET_ISNULL(set)) 200 break; 201 } else 202 index = (int)i; 203 } 204 205 CPU_STATS_ADDQ(CPU, sys, xcalls, ncpuids); 206 #endif 207 208 busymask = IDSR_NACK_TO_BUSY(nackmask); 209 busy = nack = 0; 210 endtick = starttick + xc_tick_limit; 211 for (;;) { 212 idsr = getidsr(); 213 #if (NCPU <= IDSR_BN_SETS) 214 if (idsr == 0) 215 break; 216 #else 217 if (idsr == 0 && shipped == ncpuids) 218 break; 219 #endif 220 tick = gettick(); 221 /* 222 * If there is a big jump between the current tick 223 * count and lasttick, we have probably hit a break 224 * point. Adjust endtick accordingly to avoid panic. 225 */ 226 if (tick > (lasttick + xc_tick_jump_limit)) 227 endtick += (tick - lasttick); 228 lasttick = tick; 229 if (tick > endtick) { 230 if (panic_quiesce) 231 return; 232 #ifdef CHEETAHPLUS_ERRATUM_25 233 cpuid = -1; 234 for (i = 0; i < IDSR_BN_SETS; i++) { 235 if (idsr & (IDSR_NACK_BIT(i) | 236 IDSR_BUSY_BIT(i))) { 237 cpuid = cpuids[i]; 238 break; 239 } 240 } 241 if (cheetah_sendmondo_recover && cpuid != -1 && 242 recovered == 0) { 243 if (mondo_recover(cpuid, i)) { 244 /* 245 * We claimed the whole memory or 246 * full scan is disabled. 247 */ 248 recovered++; 249 } 250 tick = gettick(); 251 endtick = tick + xc_tick_limit; 252 lasttick = tick; 253 /* 254 * Recheck idsr 255 */ 256 continue; 257 } else 258 #endif /* CHEETAHPLUS_ERRATUM_25 */ 259 { 260 cmn_err(CE_CONT, "send mondo timeout " 261 "[%d NACK %d BUSY]\nIDSR 0x%" 262 "" PRIx64 " cpuids:", nack, busy, idsr); 263 for (i = 0; i < IDSR_BN_SETS; i++) { 264 if (idsr & (IDSR_NACK_BIT(i) | 265 IDSR_BUSY_BIT(i))) { 266 cmn_err(CE_CONT, " 0x%x", 267 cpuids[i]); 268 } 269 } 270 cmn_err(CE_CONT, "\n"); 271 cmn_err(CE_PANIC, "send_mondo_set: timeout"); 272 } 273 } 274 curnack = idsr & nackmask; 275 curbusy = idsr & busymask; 276 #if (NCPU > IDSR_BN_SETS) 277 if (shipped < ncpuids) { 278 uint64_t cpus_left; 279 uint16_t next = (uint16_t)index; 280 281 cpus_left = ~(IDSR_NACK_TO_BUSY(curnack) | curbusy) & 282 busymask; 283 284 if (cpus_left) { 285 do { 286 /* 287 * Sequence through and ship to the 288 * remainder of the CPUs in the system 289 * (e.g. other than the first 290 * (IDSR_BN_SETS)) in reverse order. 291 */ 292 lo = lowbit(cpus_left) - 1; 293 i = IDSR_BUSY_IDX(lo); 294 shipit(next, i); 295 shipped++; 296 cpuids[i] = next; 297 298 /* 299 * If we've processed all the CPUs, 300 * exit the loop now and save 301 * instructions. 302 */ 303 if (shipped == ncpuids) 304 break; 305 306 for ((index = ((int)next - 1)); 307 index >= 0; index--) 308 if (CPU_IN_SET(set, index)) { 309 next = (uint16_t)index; 310 break; 311 } 312 313 cpus_left &= ~(1ull << lo); 314 } while (cpus_left); 315 #ifdef CHEETAHPLUS_ERRATUM_25 316 /* 317 * Clear recovered because we are sending to 318 * a new set of targets. 319 */ 320 recovered = 0; 321 #endif 322 continue; 323 } 324 } 325 #endif 326 if (curbusy) { 327 busy++; 328 continue; 329 } 330 331 #ifdef SEND_MONDO_STATS 332 { 333 int n = gettick() - starttick; 334 if (n < 8192) 335 x_nack_stimes[n >> 7]++; 336 } 337 #endif 338 while (gettick() < (tick + sys_clock_mhz)) 339 ; 340 do { 341 lo = lowbit(curnack) - 1; 342 i = IDSR_NACK_IDX(lo); 343 shipit(cpuids[i], i); 344 curnack &= ~(1ull << lo); 345 } while (curnack); 346 nack++; 347 busy = 0; 348 } 349 #ifdef SEND_MONDO_STATS 350 { 351 int n = gettick() - starttick; 352 if (n < 8192) 353 x_set_stimes[n >> 7]++; 354 else 355 x_set_ltimes[(n >> 13) & 0xf]++; 356 } 357 x_set_cpus[shipped]++; 358 #endif 359 } 360 361 /* 362 * Handles error logging for implementation specific error types. 363 */ 364 /*ARGSUSED*/ 365 int 366 cpu_impl_async_log_err(void *flt, errorq_elem_t *eqep) 367 { 368 /* There aren't any error types which are specific to cheetah only */ 369 return (CH_ASYNC_LOG_UNKNOWN); 370 } 371 372 /* 373 * Figure out if Ecache is direct-mapped (Cheetah or Cheetah+ with Ecache 374 * control ECCR_ASSOC bit off or 2-way (Cheetah+ with ECCR_ASSOC on). 375 * We need to do this on the fly because we may have mixed Cheetah+'s with 376 * both direct and 2-way Ecaches. 377 */ 378 int 379 cpu_ecache_nway(void) 380 { 381 return (CH_ECACHE_NWAY); 382 } 383 384 /* 385 * Note that these are entered into the table: Fatal Errors (PERR, IERR, 386 * ISAP, EMU) first, orphaned UCU/UCC, AFAR Overwrite policy, finally IVU, IVC. 387 * Afar overwrite policy is: 388 * UCU,UCC > UE,EDU,WDU,CPU > CE,EDC,EMC,WDC,CPC > TO,BERR 389 */ 390 ecc_type_to_info_t ecc_type_to_info[] = { 391 392 /* Fatal Errors */ 393 C_AFSR_PERR, "PERR ", ECC_ALL_TRAPS, CPU_FATAL, 394 "PERR Fatal", 395 FM_EREPORT_PAYLOAD_SYSTEM2, 396 FM_EREPORT_CPU_USIII_PERR, 397 C_AFSR_IERR, "IERR ", ECC_ALL_TRAPS, CPU_FATAL, 398 "IERR Fatal", 399 FM_EREPORT_PAYLOAD_SYSTEM2, 400 FM_EREPORT_CPU_USIII_IERR, 401 C_AFSR_ISAP, "ISAP ", ECC_ALL_TRAPS, CPU_FATAL, 402 "ISAP Fatal", 403 FM_EREPORT_PAYLOAD_SYSTEM1, 404 FM_EREPORT_CPU_USIII_ISAP, 405 C_AFSR_EMU, "EMU ", ECC_ASYNC_TRAPS, CPU_FATAL, 406 "EMU Fatal", 407 FM_EREPORT_PAYLOAD_MEMORY, 408 FM_EREPORT_CPU_USIII_EMU, 409 410 /* Orphaned UCC/UCU Errors */ 411 C_AFSR_UCU, "OUCU ", ECC_ORPH_TRAPS, CPU_ORPH, 412 "Orphaned UCU", 413 FM_EREPORT_PAYLOAD_L2_DATA, 414 FM_EREPORT_CPU_USIII_UCU, 415 C_AFSR_UCC, "OUCC ", ECC_ORPH_TRAPS, CPU_ORPH, 416 "Orphaned UCC", 417 FM_EREPORT_PAYLOAD_L2_DATA, 418 FM_EREPORT_CPU_USIII_UCC, 419 420 /* UCU, UCC */ 421 C_AFSR_UCU, "UCU ", ECC_F_TRAP, CPU_UE_ECACHE, 422 "UCU", 423 FM_EREPORT_PAYLOAD_L2_DATA, 424 FM_EREPORT_CPU_USIII_UCU, 425 C_AFSR_UCC, "UCC ", ECC_F_TRAP, CPU_CE_ECACHE, 426 "UCC", 427 FM_EREPORT_PAYLOAD_L2_DATA, 428 FM_EREPORT_CPU_USIII_UCC, 429 430 /* UE, EDU:ST, EDU:BLD, WDU, CPU */ 431 C_AFSR_UE, "UE ", ECC_ASYNC_TRAPS, CPU_UE, 432 "Uncorrectable system bus (UE)", 433 FM_EREPORT_PAYLOAD_MEMORY, 434 FM_EREPORT_CPU_USIII_UE, 435 C_AFSR_EDU, "EDU ", ECC_C_TRAP, CPU_UE_ECACHE_RETIRE, 436 "EDU:ST", 437 FM_EREPORT_PAYLOAD_L2_DATA, 438 FM_EREPORT_CPU_USIII_EDUST, 439 C_AFSR_EDU, "EDU ", ECC_D_TRAP, CPU_UE_ECACHE_RETIRE, 440 "EDU:BLD", 441 FM_EREPORT_PAYLOAD_L2_DATA, 442 FM_EREPORT_CPU_USIII_EDUBL, 443 C_AFSR_WDU, "WDU ", ECC_C_TRAP, CPU_UE_ECACHE_RETIRE, 444 "WDU", 445 FM_EREPORT_PAYLOAD_L2_DATA, 446 FM_EREPORT_CPU_USIII_WDU, 447 C_AFSR_CPU, "CPU ", ECC_C_TRAP, CPU_UE_ECACHE, 448 "CPU", 449 FM_EREPORT_PAYLOAD_L2_DATA, 450 FM_EREPORT_CPU_USIII_CPU, 451 452 /* CE, EDC, EMC, WDC, CPC */ 453 C_AFSR_CE, "CE ", ECC_C_TRAP, CPU_CE, 454 "Corrected system bus (CE)", 455 FM_EREPORT_PAYLOAD_MEMORY, 456 FM_EREPORT_CPU_USIII_CE, 457 C_AFSR_EDC, "EDC ", ECC_C_TRAP, CPU_CE_ECACHE, 458 "EDC", 459 FM_EREPORT_PAYLOAD_L2_DATA, 460 FM_EREPORT_CPU_USIII_EDC, 461 C_AFSR_EMC, "EMC ", ECC_C_TRAP, CPU_EMC, 462 "EMC", 463 FM_EREPORT_PAYLOAD_MEMORY, 464 FM_EREPORT_CPU_USIII_EMC, 465 C_AFSR_WDC, "WDC ", ECC_C_TRAP, CPU_CE_ECACHE, 466 "WDC", 467 FM_EREPORT_PAYLOAD_L2_DATA, 468 FM_EREPORT_CPU_USIII_WDC, 469 C_AFSR_CPC, "CPC ", ECC_C_TRAP, CPU_CE_ECACHE, 470 "CPC", 471 FM_EREPORT_PAYLOAD_L2_DATA, 472 FM_EREPORT_CPU_USIII_CPC, 473 474 /* TO, BERR */ 475 C_AFSR_TO, "TO ", ECC_ASYNC_TRAPS, CPU_TO, 476 "Timeout (TO)", 477 FM_EREPORT_PAYLOAD_IO, 478 FM_EREPORT_CPU_USIII_TO, 479 C_AFSR_BERR, "BERR ", ECC_ASYNC_TRAPS, CPU_BERR, 480 "Bus Error (BERR)", 481 FM_EREPORT_PAYLOAD_IO, 482 FM_EREPORT_CPU_USIII_BERR, 483 484 /* IVU, IVC */ 485 C_AFSR_IVU, "IVU ", ECC_C_TRAP, CPU_IV, 486 "IVU", 487 FM_EREPORT_PAYLOAD_SYSTEM1, 488 FM_EREPORT_CPU_USIII_IVU, 489 C_AFSR_IVC, "IVC ", ECC_C_TRAP, CPU_IV, 490 "IVC", 491 FM_EREPORT_PAYLOAD_SYSTEM1, 492 FM_EREPORT_CPU_USIII_IVC, 493 494 0, NULL, 0, 0, 495 NULL, 496 FM_EREPORT_PAYLOAD_UNKNOWN, 497 FM_EREPORT_CPU_USIII_UNKNOWN, 498 }; 499 500 /* 501 * Prioritized list of Error bits for AFAR overwrite. 502 * See Cheetah PRM P.6.1 503 * Class 4: UCC, UCU 504 * Class 3: UE, EDU, EMU, WDU, CPU 505 * Class 2: CE, EDC, EMC, WDC, CPC 506 * Class 1: TO, BERR 507 */ 508 uint64_t afar_overwrite[] = { 509 C_AFSR_UCC | C_AFSR_UCU, 510 C_AFSR_UE | C_AFSR_EDU | C_AFSR_EMU | C_AFSR_WDU | C_AFSR_CPU, 511 C_AFSR_CE | C_AFSR_EDC | C_AFSR_EMC | C_AFSR_WDC | C_AFSR_CPC, 512 C_AFSR_TO | C_AFSR_BERR, 513 0 514 }; 515 516 /* 517 * Prioritized list of Error bits for ESYND overwrite. 518 * See Cheetah PRM P.6.2 519 * Class 2: UE, IVU, EDU, WDU, UCU, CPU 520 * Class 1: CE, IVC, EDC, WDC, UCC, CPC 521 */ 522 uint64_t esynd_overwrite[] = { 523 C_AFSR_UE | C_AFSR_IVU | C_AFSR_EDU | C_AFSR_WDU | C_AFSR_UCU | 524 C_AFSR_CPU, 525 C_AFSR_CE | C_AFSR_IVC | C_AFSR_EDC | C_AFSR_WDC | C_AFSR_UCC | 526 C_AFSR_CPC, 527 0 528 }; 529 530 /* 531 * Prioritized list of Error bits for MSYND overwrite. 532 * See Cheetah PRM P.6.3 533 * Class 2: EMU 534 * Class 1: EMC 535 */ 536 uint64_t msynd_overwrite[] = { 537 C_AFSR_EMU, 538 C_AFSR_EMC, 539 0 540 }; 541 542 /* 543 * change cpu speed bits -- new speed will be normal-speed/divisor. 544 * 545 * The Jalapeno memory controllers are required to drain outstanding 546 * memory transactions within 32 JBus clocks in order to be ready 547 * to enter Estar mode. In some corner cases however, that time 548 * fell short. 549 * 550 * A safe software solution is to force MCU to act like in Estar mode, 551 * then delay 1us (in ppm code) prior to assert J_CHNG_L signal. 552 * To reverse the effect, upon exiting Estar, software restores the 553 * MCU to its original state. 554 */ 555 /* ARGSUSED1 */ 556 void 557 cpu_change_speed(uint64_t divisor, uint64_t arg2) 558 { 559 bus_config_eclk_t *bceclk; 560 uint64_t reg; 561 562 for (bceclk = bus_config_eclk; bceclk->divisor; bceclk++) { 563 if (bceclk->divisor != divisor) 564 continue; 565 reg = get_safari_config(); 566 reg &= ~SAFARI_CONFIG_ECLK_MASK; 567 reg |= bceclk->mask; 568 set_safari_config(reg); 569 CPU->cpu_m.divisor = (uchar_t)divisor; 570 return; 571 } 572 /* 573 * We will reach here only if OBP and kernel don't agree on 574 * the speeds supported by the CPU. 575 */ 576 cmn_err(CE_WARN, "cpu_change_speed: bad divisor %" PRIu64, divisor); 577 } 578 579 /* 580 * Cpu private initialization. This includes allocating the cpu_private 581 * data structure, initializing it, and initializing the scrubber for this 582 * cpu. This function calls cpu_init_ecache_scrub_dr to init the scrubber. 583 * We use kmem_cache_create for the cheetah private data structure because 584 * it needs to be allocated on a PAGESIZE (8192) byte boundary. 585 */ 586 void 587 cpu_init_private(struct cpu *cp) 588 { 589 cheetah_private_t *chprp; 590 int i; 591 592 ASSERT(CPU_PRIVATE(cp) == NULL); 593 594 /* LINTED: E_TRUE_LOGICAL_EXPR */ 595 ASSERT((offsetof(cheetah_private_t, chpr_tl1_err_data) + 596 sizeof (ch_err_tl1_data_t) * CH_ERR_TL1_TLMAX) <= PAGESIZE); 597 598 /* 599 * Running with a Cheetah+, Jaguar, or Panther on a Cheetah CPU 600 * machine is not a supported configuration. Attempting to do so 601 * may result in unpredictable failures (e.g. running Cheetah+ 602 * CPUs with Cheetah E$ disp flush) so don't allow it. 603 * 604 * This is just defensive code since this configuration mismatch 605 * should have been caught prior to OS execution. 606 */ 607 if (!IS_CHEETAH(cpunodes[cp->cpu_id].implementation)) { 608 cmn_err(CE_PANIC, "CPU%d: UltraSPARC-III+/IV/IV+ not" 609 " supported on UltraSPARC-III code\n", cp->cpu_id); 610 } 611 612 /* 613 * If the ch_private_cache has not been created, create it. 614 */ 615 if (ch_private_cache == NULL) { 616 ch_private_cache = kmem_cache_create("ch_private_cache", 617 sizeof (cheetah_private_t), PAGESIZE, NULL, NULL, 618 NULL, NULL, static_arena, 0); 619 } 620 621 chprp = CPU_PRIVATE(cp) = kmem_cache_alloc(ch_private_cache, KM_SLEEP); 622 623 bzero(chprp, sizeof (cheetah_private_t)); 624 chprp->chpr_fecctl0_logout.clo_data.chd_afar = LOGOUT_INVALID; 625 chprp->chpr_cecc_logout.clo_data.chd_afar = LOGOUT_INVALID; 626 chprp->chpr_async_logout.clo_data.chd_afar = LOGOUT_INVALID; 627 for (i = 0; i < CH_ERR_TL1_TLMAX; i++) 628 chprp->chpr_tl1_err_data[i].ch_err_tl1_logout.clo_data.chd_afar 629 = LOGOUT_INVALID; 630 631 chprp->chpr_icache_size = CH_ICACHE_SIZE; 632 chprp->chpr_icache_linesize = CH_ICACHE_LSIZE; 633 634 cpu_init_ecache_scrub_dr(cp); 635 636 chprp->chpr_ec_set_size = cpunodes[cp->cpu_id].ecache_size / 637 cpu_ecache_nway(); 638 639 adjust_hw_copy_limits(cpunodes[cp->cpu_id].ecache_size); 640 ch_err_tl1_paddrs[cp->cpu_id] = va_to_pa(chprp); 641 ASSERT(ch_err_tl1_paddrs[cp->cpu_id] != -1); 642 } 643 644 /* 645 * Clear the error state registers for this CPU. 646 * For Cheetah, just clear the AFSR 647 */ 648 void 649 set_cpu_error_state(ch_cpu_errors_t *cpu_error_regs) 650 { 651 set_asyncflt(cpu_error_regs->afsr & ~C_AFSR_FATAL_ERRS); 652 } 653 654 /* 655 * For Cheetah, the error recovery code uses an alternate flush area in the 656 * TL>0 fast ECC handler. ecache_tl1_flushaddr is the physical address of 657 * this exclusive displacement flush area. 658 */ 659 uint64_t ecache_tl1_flushaddr = (uint64_t)-1; /* physaddr for E$ flushing */ 660 661 /* 662 * Allocate and initialize the exclusive displacement flush area. 663 * Must be called before startup_bop_gone(). 664 */ 665 caddr_t 666 ecache_init_scrub_flush_area(caddr_t alloc_base) 667 { 668 unsigned size = 2 * CH_ECACHE_8M_SIZE; 669 caddr_t tmp_alloc_base = alloc_base; 670 caddr_t flush_alloc_base = 671 (caddr_t)roundup((uintptr_t)alloc_base, size); 672 caddr_t ecache_tl1_virtaddr; 673 674 /* 675 * Allocate the physical memory for the exclusive flush area 676 * 677 * Need to allocate an exclusive flush area that is twice the 678 * largest supported E$ size, physically contiguous, and 679 * aligned on twice the largest E$ size boundary. 680 * 681 * Memory allocated via BOP_ALLOC is included in the "cage" 682 * from the DR perspective and due to this, its physical 683 * address will never change and the memory will not be 684 * removed. 685 * 686 * BOP_ALLOC takes 4 arguments: bootops, virtual address hint, 687 * size of the area to allocate, and alignment of the area to 688 * allocate. It returns zero if the allocation fails, or the 689 * virtual address for a successful allocation. Memory BOP_ALLOC'd 690 * is physically contiguous. 691 */ 692 if ((ecache_tl1_virtaddr = (caddr_t)BOP_ALLOC(bootops, 693 flush_alloc_base, size, size)) != NULL) { 694 695 tmp_alloc_base = 696 (caddr_t)roundup((uintptr_t)(ecache_tl1_virtaddr + size), 697 ecache_alignsize); 698 699 /* 700 * get the physical address of the exclusive flush area 701 */ 702 ecache_tl1_flushaddr = va_to_pa(ecache_tl1_virtaddr); 703 704 } else { 705 ecache_tl1_virtaddr = (caddr_t)-1; 706 cmn_err(CE_NOTE, "!ecache_init_scrub_flush_area failed\n"); 707 } 708 709 return (tmp_alloc_base); 710 } 711 712 /* 713 * Update cpu_offline_set so the scrubber knows which cpus are offline 714 */ 715 /*ARGSUSED*/ 716 int 717 cpu_scrub_cpu_setup(cpu_setup_t what, int cpuid, void *arg) 718 { 719 switch (what) { 720 case CPU_ON: 721 case CPU_INIT: 722 CPUSET_DEL(cpu_offline_set, cpuid); 723 break; 724 case CPU_OFF: 725 CPUSET_ADD(cpu_offline_set, cpuid); 726 break; 727 default: 728 break; 729 } 730 return (0); 731 } 732