1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 */
25
26 #include <sys/types.h>
27 #include <sys/systm.h>
28 #include <sys/ddi.h>
29 #include <sys/sysmacros.h>
30 #include <sys/archsystm.h>
31 #include <sys/vmsystm.h>
32 #include <sys/machparam.h>
33 #include <sys/machsystm.h>
34 #include <sys/machthread.h>
35 #include <sys/cpu.h>
36 #include <sys/cmp.h>
37 #include <sys/elf_SPARC.h>
38 #include <vm/hat_sfmmu.h>
39 #include <vm/seg_kmem.h>
40 #include <sys/cpuvar.h>
41 #include <sys/cheetahregs.h>
42 #include <sys/us3_module.h>
43 #include <sys/async.h>
44 #include <sys/cmn_err.h>
45 #include <sys/debug.h>
46 #include <sys/dditypes.h>
47 #include <sys/prom_debug.h>
48 #include <sys/prom_plat.h>
49 #include <sys/cpu_module.h>
50 #include <sys/sysmacros.h>
51 #include <sys/intreg.h>
52 #include <sys/clock.h>
53 #include <sys/platform_module.h>
54 #include <sys/machtrap.h>
55 #include <sys/ontrap.h>
56 #include <sys/panic.h>
57 #include <sys/memlist.h>
58 #include <sys/bootconf.h>
59 #include <sys/ivintr.h>
60 #include <sys/atomic.h>
61 #include <sys/fm/protocol.h>
62 #include <sys/fm/cpu/UltraSPARC-III.h>
63 #include <vm/vm_dep.h>
64
65 #ifdef CHEETAHPLUS_ERRATUM_25
66 #include <sys/cyclic.h>
67 #endif /* CHEETAHPLUS_ERRATUM_25 */
68
69 /*
70 * Note that 'Cheetah PRM' refers to:
71 * SPARC V9 JPS1 Implementation Supplement: Sun UltraSPARC-III
72 */
73
74 /*
75 * Setup trap handlers.
76 */
77 void
cpu_init_trap(void)78 cpu_init_trap(void)
79 {
80 CH_SET_TRAP(pil15_epilogue, ch_pil15_interrupt_instr);
81
82 CH_SET_TRAP(tt0_fecc, fecc_err_instr);
83 CH_SET_TRAP(tt1_fecc, fecc_err_tl1_instr);
84 CH_SET_TRAP(tt1_swtrap0, fecc_err_tl1_cont_instr);
85 }
86
87 static int
getintprop(pnode_t node,char * name,int deflt)88 getintprop(pnode_t node, char *name, int deflt)
89 {
90 int value;
91
92 switch (prom_getproplen(node, name)) {
93 case sizeof (int):
94 (void) prom_getprop(node, name, (caddr_t)&value);
95 break;
96
97 default:
98 value = deflt;
99 break;
100 }
101
102 return (value);
103 }
104
105 /*
106 * Set the magic constants of the implementation.
107 */
108 /*ARGSUSED*/
109 void
cpu_fiximp(pnode_t dnode)110 cpu_fiximp(pnode_t dnode)
111 {
112 int i, a;
113
114 static struct {
115 char *name;
116 int *var;
117 int defval;
118 } prop[] = {
119 "dcache-size", &dcache_size, CH_DCACHE_SIZE,
120 "dcache-line-size", &dcache_linesize, CH_DCACHE_LSIZE,
121 "icache-size", &icache_size, CH_ICACHE_SIZE,
122 "icache-line-size", &icache_linesize, CH_ICACHE_LSIZE,
123 "ecache-size", &ecache_size, CH_ECACHE_MAX_SIZE,
124 "ecache-line-size", &ecache_alignsize, CH_ECACHE_MAX_LSIZE,
125 "ecache-associativity", &ecache_associativity, CH_ECACHE_NWAY
126 };
127
128 for (i = 0; i < sizeof (prop) / sizeof (prop[0]); i++)
129 *prop[i].var = getintprop(dnode, prop[i].name, prop[i].defval);
130
131 ecache_setsize = ecache_size / ecache_associativity;
132
133 vac_size = CH_VAC_SIZE;
134 vac_mask = MMU_PAGEMASK & (vac_size - 1);
135 i = 0; a = vac_size;
136 while (a >>= 1)
137 ++i;
138 vac_shift = i;
139 shm_alignment = vac_size;
140 vac = 1;
141
142 /*
143 * Cheetah's large page support has problems with large numbers of
144 * large pages, so just disable large pages out-of-the-box.
145 * Note that the other defaults are set in sun4u/vm/mach_vm_dep.c.
146 */
147 max_uheap_lpsize = MMU_PAGESIZE;
148 max_ustack_lpsize = MMU_PAGESIZE;
149 max_privmap_lpsize = MMU_PAGESIZE;
150 max_utext_lpsize = MMU_PAGESIZE;
151 max_shm_lpsize = MMU_PAGESIZE;
152 max_bootlp_tteszc = TTE8K;
153 }
154
155 void
send_mondo_set(cpuset_t set)156 send_mondo_set(cpuset_t set)
157 {
158 int lo, busy, nack, shipped = 0;
159 uint16_t i, cpuids[IDSR_BN_SETS];
160 uint64_t idsr, nackmask = 0, busymask, curnack, curbusy;
161 uint64_t starttick, endtick, tick, lasttick;
162 #if (NCPU > IDSR_BN_SETS)
163 int index = 0;
164 int ncpuids = 0;
165 #endif
166 #ifdef CHEETAHPLUS_ERRATUM_25
167 int recovered = 0;
168 int cpuid;
169 #endif
170
171 ASSERT(!CPUSET_ISNULL(set));
172 starttick = lasttick = gettick();
173
174 #if (NCPU <= IDSR_BN_SETS)
175 for (i = 0; i < NCPU; i++)
176 if (CPU_IN_SET(set, i)) {
177 shipit(i, shipped);
178 nackmask |= IDSR_NACK_BIT(shipped);
179 cpuids[shipped++] = i;
180 CPUSET_DEL(set, i);
181 if (CPUSET_ISNULL(set))
182 break;
183 }
184 CPU_STATS_ADDQ(CPU, sys, xcalls, shipped);
185 #else
186 for (i = 0; i < NCPU; i++)
187 if (CPU_IN_SET(set, i)) {
188 ncpuids++;
189
190 /*
191 * Ship only to the first (IDSR_BN_SETS) CPUs. If we
192 * find we have shipped to more than (IDSR_BN_SETS)
193 * CPUs, set "index" to the highest numbered CPU in
194 * the set so we can ship to other CPUs a bit later on.
195 */
196 if (shipped < IDSR_BN_SETS) {
197 shipit(i, shipped);
198 nackmask |= IDSR_NACK_BIT(shipped);
199 cpuids[shipped++] = i;
200 CPUSET_DEL(set, i);
201 if (CPUSET_ISNULL(set))
202 break;
203 } else
204 index = (int)i;
205 }
206
207 CPU_STATS_ADDQ(CPU, sys, xcalls, ncpuids);
208 #endif
209
210 busymask = IDSR_NACK_TO_BUSY(nackmask);
211 busy = nack = 0;
212 endtick = starttick + xc_tick_limit;
213 for (;;) {
214 idsr = getidsr();
215 #if (NCPU <= IDSR_BN_SETS)
216 if (idsr == 0)
217 break;
218 #else
219 if (idsr == 0 && shipped == ncpuids)
220 break;
221 #endif
222 tick = gettick();
223 /*
224 * If there is a big jump between the current tick
225 * count and lasttick, we have probably hit a break
226 * point. Adjust endtick accordingly to avoid panic.
227 */
228 if (tick > (lasttick + xc_tick_jump_limit))
229 endtick += (tick - lasttick);
230 lasttick = tick;
231 if (tick > endtick) {
232 if (panic_quiesce)
233 return;
234 #ifdef CHEETAHPLUS_ERRATUM_25
235 cpuid = -1;
236 for (i = 0; i < IDSR_BN_SETS; i++) {
237 if (idsr & (IDSR_NACK_BIT(i) |
238 IDSR_BUSY_BIT(i))) {
239 cpuid = cpuids[i];
240 break;
241 }
242 }
243 if (cheetah_sendmondo_recover && cpuid != -1 &&
244 recovered == 0) {
245 if (mondo_recover(cpuid, i)) {
246 /*
247 * We claimed the whole memory or
248 * full scan is disabled.
249 */
250 recovered++;
251 }
252 tick = gettick();
253 endtick = tick + xc_tick_limit;
254 lasttick = tick;
255 /*
256 * Recheck idsr
257 */
258 continue;
259 } else
260 #endif /* CHEETAHPLUS_ERRATUM_25 */
261 {
262 cmn_err(CE_CONT, "send mondo timeout "
263 "[%d NACK %d BUSY]\nIDSR 0x%"
264 "" PRIx64 " cpuids:", nack, busy, idsr);
265 for (i = 0; i < IDSR_BN_SETS; i++) {
266 if (idsr & (IDSR_NACK_BIT(i) |
267 IDSR_BUSY_BIT(i))) {
268 cmn_err(CE_CONT, " 0x%x",
269 cpuids[i]);
270 }
271 }
272 cmn_err(CE_CONT, "\n");
273 cmn_err(CE_PANIC, "send_mondo_set: timeout");
274 }
275 }
276 curnack = idsr & nackmask;
277 curbusy = idsr & busymask;
278 #if (NCPU > IDSR_BN_SETS)
279 if (shipped < ncpuids) {
280 uint64_t cpus_left;
281 uint16_t next = (uint16_t)index;
282
283 cpus_left = ~(IDSR_NACK_TO_BUSY(curnack) | curbusy) &
284 busymask;
285
286 if (cpus_left) {
287 do {
288 /*
289 * Sequence through and ship to the
290 * remainder of the CPUs in the system
291 * (e.g. other than the first
292 * (IDSR_BN_SETS)) in reverse order.
293 */
294 lo = lowbit(cpus_left) - 1;
295 i = IDSR_BUSY_IDX(lo);
296 shipit(next, i);
297 shipped++;
298 cpuids[i] = next;
299
300 /*
301 * If we've processed all the CPUs,
302 * exit the loop now and save
303 * instructions.
304 */
305 if (shipped == ncpuids)
306 break;
307
308 for ((index = ((int)next - 1));
309 index >= 0; index--)
310 if (CPU_IN_SET(set, index)) {
311 next = (uint16_t)index;
312 break;
313 }
314
315 cpus_left &= ~(1ull << lo);
316 } while (cpus_left);
317 #ifdef CHEETAHPLUS_ERRATUM_25
318 /*
319 * Clear recovered because we are sending to
320 * a new set of targets.
321 */
322 recovered = 0;
323 #endif
324 continue;
325 }
326 }
327 #endif
328 if (curbusy) {
329 busy++;
330 continue;
331 }
332
333 #ifdef SEND_MONDO_STATS
334 {
335 int n = gettick() - starttick;
336 if (n < 8192)
337 x_nack_stimes[n >> 7]++;
338 }
339 #endif
340 while (gettick() < (tick + sys_clock_mhz))
341 ;
342 do {
343 lo = lowbit(curnack) - 1;
344 i = IDSR_NACK_IDX(lo);
345 shipit(cpuids[i], i);
346 curnack &= ~(1ull << lo);
347 } while (curnack);
348 nack++;
349 busy = 0;
350 }
351 #ifdef SEND_MONDO_STATS
352 {
353 int n = gettick() - starttick;
354 if (n < 8192)
355 x_set_stimes[n >> 7]++;
356 else
357 x_set_ltimes[(n >> 13) & 0xf]++;
358 }
359 x_set_cpus[shipped]++;
360 #endif
361 }
362
363 /*
364 * Handles error logging for implementation specific error types.
365 */
366 /*ARGSUSED*/
367 int
cpu_impl_async_log_err(void * flt,errorq_elem_t * eqep)368 cpu_impl_async_log_err(void *flt, errorq_elem_t *eqep)
369 {
370 /* There aren't any error types which are specific to cheetah only */
371 return (CH_ASYNC_LOG_UNKNOWN);
372 }
373
374 /*
375 * Figure out if Ecache is direct-mapped (Cheetah or Cheetah+ with Ecache
376 * control ECCR_ASSOC bit off or 2-way (Cheetah+ with ECCR_ASSOC on).
377 * We need to do this on the fly because we may have mixed Cheetah+'s with
378 * both direct and 2-way Ecaches.
379 */
380 int
cpu_ecache_nway(void)381 cpu_ecache_nway(void)
382 {
383 return (CH_ECACHE_NWAY);
384 }
385
386 /*
387 * Note that these are entered into the table: Fatal Errors (PERR, IERR,
388 * ISAP, EMU) first, orphaned UCU/UCC, AFAR Overwrite policy, finally IVU, IVC.
389 * Afar overwrite policy is:
390 * UCU,UCC > UE,EDU,WDU,CPU > CE,EDC,EMC,WDC,CPC > TO,BERR
391 */
392 ecc_type_to_info_t ecc_type_to_info[] = {
393
394 /* Fatal Errors */
395 C_AFSR_PERR, "PERR ", ECC_ALL_TRAPS, CPU_FATAL,
396 "PERR Fatal",
397 FM_EREPORT_PAYLOAD_SYSTEM2,
398 FM_EREPORT_CPU_USIII_PERR,
399 C_AFSR_IERR, "IERR ", ECC_ALL_TRAPS, CPU_FATAL,
400 "IERR Fatal",
401 FM_EREPORT_PAYLOAD_SYSTEM2,
402 FM_EREPORT_CPU_USIII_IERR,
403 C_AFSR_ISAP, "ISAP ", ECC_ALL_TRAPS, CPU_FATAL,
404 "ISAP Fatal",
405 FM_EREPORT_PAYLOAD_SYSTEM1,
406 FM_EREPORT_CPU_USIII_ISAP,
407 C_AFSR_EMU, "EMU ", ECC_ASYNC_TRAPS, CPU_FATAL,
408 "EMU Fatal",
409 FM_EREPORT_PAYLOAD_MEMORY,
410 FM_EREPORT_CPU_USIII_EMU,
411
412 /* Orphaned UCC/UCU Errors */
413 C_AFSR_UCU, "OUCU ", ECC_ORPH_TRAPS, CPU_ORPH,
414 "Orphaned UCU",
415 FM_EREPORT_PAYLOAD_L2_DATA,
416 FM_EREPORT_CPU_USIII_UCU,
417 C_AFSR_UCC, "OUCC ", ECC_ORPH_TRAPS, CPU_ORPH,
418 "Orphaned UCC",
419 FM_EREPORT_PAYLOAD_L2_DATA,
420 FM_EREPORT_CPU_USIII_UCC,
421
422 /* UCU, UCC */
423 C_AFSR_UCU, "UCU ", ECC_F_TRAP, CPU_UE_ECACHE,
424 "UCU",
425 FM_EREPORT_PAYLOAD_L2_DATA,
426 FM_EREPORT_CPU_USIII_UCU,
427 C_AFSR_UCC, "UCC ", ECC_F_TRAP, CPU_CE_ECACHE,
428 "UCC",
429 FM_EREPORT_PAYLOAD_L2_DATA,
430 FM_EREPORT_CPU_USIII_UCC,
431
432 /* UE, EDU:ST, EDU:BLD, WDU, CPU */
433 C_AFSR_UE, "UE ", ECC_ASYNC_TRAPS, CPU_UE,
434 "Uncorrectable system bus (UE)",
435 FM_EREPORT_PAYLOAD_MEMORY,
436 FM_EREPORT_CPU_USIII_UE,
437 C_AFSR_EDU, "EDU ", ECC_C_TRAP, CPU_UE_ECACHE_RETIRE,
438 "EDU:ST",
439 FM_EREPORT_PAYLOAD_L2_DATA,
440 FM_EREPORT_CPU_USIII_EDUST,
441 C_AFSR_EDU, "EDU ", ECC_D_TRAP, CPU_UE_ECACHE_RETIRE,
442 "EDU:BLD",
443 FM_EREPORT_PAYLOAD_L2_DATA,
444 FM_EREPORT_CPU_USIII_EDUBL,
445 C_AFSR_WDU, "WDU ", ECC_C_TRAP, CPU_UE_ECACHE_RETIRE,
446 "WDU",
447 FM_EREPORT_PAYLOAD_L2_DATA,
448 FM_EREPORT_CPU_USIII_WDU,
449 C_AFSR_CPU, "CPU ", ECC_C_TRAP, CPU_UE_ECACHE,
450 "CPU",
451 FM_EREPORT_PAYLOAD_L2_DATA,
452 FM_EREPORT_CPU_USIII_CPU,
453
454 /* CE, EDC, EMC, WDC, CPC */
455 C_AFSR_CE, "CE ", ECC_C_TRAP, CPU_CE,
456 "Corrected system bus (CE)",
457 FM_EREPORT_PAYLOAD_MEMORY,
458 FM_EREPORT_CPU_USIII_CE,
459 C_AFSR_EDC, "EDC ", ECC_C_TRAP, CPU_CE_ECACHE,
460 "EDC",
461 FM_EREPORT_PAYLOAD_L2_DATA,
462 FM_EREPORT_CPU_USIII_EDC,
463 C_AFSR_EMC, "EMC ", ECC_C_TRAP, CPU_EMC,
464 "EMC",
465 FM_EREPORT_PAYLOAD_MEMORY,
466 FM_EREPORT_CPU_USIII_EMC,
467 C_AFSR_WDC, "WDC ", ECC_C_TRAP, CPU_CE_ECACHE,
468 "WDC",
469 FM_EREPORT_PAYLOAD_L2_DATA,
470 FM_EREPORT_CPU_USIII_WDC,
471 C_AFSR_CPC, "CPC ", ECC_C_TRAP, CPU_CE_ECACHE,
472 "CPC",
473 FM_EREPORT_PAYLOAD_L2_DATA,
474 FM_EREPORT_CPU_USIII_CPC,
475
476 /* TO, BERR */
477 C_AFSR_TO, "TO ", ECC_ASYNC_TRAPS, CPU_TO,
478 "Timeout (TO)",
479 FM_EREPORT_PAYLOAD_IO,
480 FM_EREPORT_CPU_USIII_TO,
481 C_AFSR_BERR, "BERR ", ECC_ASYNC_TRAPS, CPU_BERR,
482 "Bus Error (BERR)",
483 FM_EREPORT_PAYLOAD_IO,
484 FM_EREPORT_CPU_USIII_BERR,
485
486 /* IVU, IVC */
487 C_AFSR_IVU, "IVU ", ECC_C_TRAP, CPU_IV,
488 "IVU",
489 FM_EREPORT_PAYLOAD_SYSTEM1,
490 FM_EREPORT_CPU_USIII_IVU,
491 C_AFSR_IVC, "IVC ", ECC_C_TRAP, CPU_IV,
492 "IVC",
493 FM_EREPORT_PAYLOAD_SYSTEM1,
494 FM_EREPORT_CPU_USIII_IVC,
495
496 0, NULL, 0, 0,
497 NULL,
498 FM_EREPORT_PAYLOAD_UNKNOWN,
499 FM_EREPORT_CPU_USIII_UNKNOWN,
500 };
501
502 /*
503 * Prioritized list of Error bits for AFAR overwrite.
504 * See Cheetah PRM P.6.1
505 * Class 4: UCC, UCU
506 * Class 3: UE, EDU, EMU, WDU, CPU
507 * Class 2: CE, EDC, EMC, WDC, CPC
508 * Class 1: TO, BERR
509 */
510 uint64_t afar_overwrite[] = {
511 C_AFSR_UCC | C_AFSR_UCU,
512 C_AFSR_UE | C_AFSR_EDU | C_AFSR_EMU | C_AFSR_WDU | C_AFSR_CPU,
513 C_AFSR_CE | C_AFSR_EDC | C_AFSR_EMC | C_AFSR_WDC | C_AFSR_CPC,
514 C_AFSR_TO | C_AFSR_BERR,
515 0
516 };
517
518 /*
519 * Prioritized list of Error bits for ESYND overwrite.
520 * See Cheetah PRM P.6.2
521 * Class 2: UE, IVU, EDU, WDU, UCU, CPU
522 * Class 1: CE, IVC, EDC, WDC, UCC, CPC
523 */
524 uint64_t esynd_overwrite[] = {
525 C_AFSR_UE | C_AFSR_IVU | C_AFSR_EDU | C_AFSR_WDU | C_AFSR_UCU |
526 C_AFSR_CPU,
527 C_AFSR_CE | C_AFSR_IVC | C_AFSR_EDC | C_AFSR_WDC | C_AFSR_UCC |
528 C_AFSR_CPC,
529 0
530 };
531
532 /*
533 * Prioritized list of Error bits for MSYND overwrite.
534 * See Cheetah PRM P.6.3
535 * Class 2: EMU
536 * Class 1: EMC
537 */
538 uint64_t msynd_overwrite[] = {
539 C_AFSR_EMU,
540 C_AFSR_EMC,
541 0
542 };
543
544 /*
545 * change cpu speed bits -- new speed will be normal-speed/divisor.
546 *
547 * The Jalapeno memory controllers are required to drain outstanding
548 * memory transactions within 32 JBus clocks in order to be ready
549 * to enter Estar mode. In some corner cases however, that time
550 * fell short.
551 *
552 * A safe software solution is to force MCU to act like in Estar mode,
553 * then delay 1us (in ppm code) prior to assert J_CHNG_L signal.
554 * To reverse the effect, upon exiting Estar, software restores the
555 * MCU to its original state.
556 */
557 /* ARGSUSED1 */
558 void
cpu_change_speed(uint64_t divisor,uint64_t arg2)559 cpu_change_speed(uint64_t divisor, uint64_t arg2)
560 {
561 bus_config_eclk_t *bceclk;
562 uint64_t reg;
563 processor_info_t *pi = &(CPU->cpu_type_info);
564
565 for (bceclk = bus_config_eclk; bceclk->divisor; bceclk++) {
566 if (bceclk->divisor != divisor)
567 continue;
568 reg = get_safari_config();
569 reg &= ~SAFARI_CONFIG_ECLK_MASK;
570 reg |= bceclk->mask;
571 set_safari_config(reg);
572 CPU->cpu_m.divisor = (uchar_t)divisor;
573 cpu_set_curr_clock(((uint64_t)pi->pi_clock * 1000000) /
574 divisor);
575 return;
576 }
577 /*
578 * We will reach here only if OBP and kernel don't agree on
579 * the speeds supported by the CPU.
580 */
581 cmn_err(CE_WARN, "cpu_change_speed: bad divisor %" PRIu64, divisor);
582 }
583
584 /*
585 * Cpu private initialization. This includes allocating the cpu_private
586 * data structure, initializing it, and initializing the scrubber for this
587 * cpu. This function calls cpu_init_ecache_scrub_dr to init the scrubber.
588 * We use kmem_cache_create for the cheetah private data structure because
589 * it needs to be allocated on a PAGESIZE (8192) byte boundary.
590 */
591 void
cpu_init_private(struct cpu * cp)592 cpu_init_private(struct cpu *cp)
593 {
594 cheetah_private_t *chprp;
595 int i;
596
597 ASSERT(CPU_PRIVATE(cp) == NULL);
598
599 /* LINTED: E_TRUE_LOGICAL_EXPR */
600 ASSERT((offsetof(cheetah_private_t, chpr_tl1_err_data) +
601 sizeof (ch_err_tl1_data_t) * CH_ERR_TL1_TLMAX) <= PAGESIZE);
602
603 /*
604 * Running with a Cheetah+, Jaguar, or Panther on a Cheetah CPU
605 * machine is not a supported configuration. Attempting to do so
606 * may result in unpredictable failures (e.g. running Cheetah+
607 * CPUs with Cheetah E$ disp flush) so don't allow it.
608 *
609 * This is just defensive code since this configuration mismatch
610 * should have been caught prior to OS execution.
611 */
612 if (!IS_CHEETAH(cpunodes[cp->cpu_id].implementation)) {
613 cmn_err(CE_PANIC, "CPU%d: UltraSPARC-III+/IV/IV+ not"
614 " supported on UltraSPARC-III code\n", cp->cpu_id);
615 }
616
617 /*
618 * If the ch_private_cache has not been created, create it.
619 */
620 if (ch_private_cache == NULL) {
621 ch_private_cache = kmem_cache_create("ch_private_cache",
622 sizeof (cheetah_private_t), PAGESIZE, NULL, NULL,
623 NULL, NULL, static_arena, 0);
624 }
625
626 chprp = CPU_PRIVATE(cp) = kmem_cache_alloc(ch_private_cache, KM_SLEEP);
627
628 bzero(chprp, sizeof (cheetah_private_t));
629 chprp->chpr_fecctl0_logout.clo_data.chd_afar = LOGOUT_INVALID;
630 chprp->chpr_cecc_logout.clo_data.chd_afar = LOGOUT_INVALID;
631 chprp->chpr_async_logout.clo_data.chd_afar = LOGOUT_INVALID;
632 for (i = 0; i < CH_ERR_TL1_TLMAX; i++)
633 chprp->chpr_tl1_err_data[i].ch_err_tl1_logout.clo_data.chd_afar
634 = LOGOUT_INVALID;
635
636 chprp->chpr_icache_size = CH_ICACHE_SIZE;
637 chprp->chpr_icache_linesize = CH_ICACHE_LSIZE;
638
639 cpu_init_ecache_scrub_dr(cp);
640
641 chprp->chpr_ec_set_size = cpunodes[cp->cpu_id].ecache_size /
642 cpu_ecache_nway();
643
644 adjust_hw_copy_limits(cpunodes[cp->cpu_id].ecache_size);
645 ch_err_tl1_paddrs[cp->cpu_id] = va_to_pa(chprp);
646 ASSERT(ch_err_tl1_paddrs[cp->cpu_id] != -1);
647 }
648
649 /*
650 * Clear the error state registers for this CPU.
651 * For Cheetah, just clear the AFSR
652 */
653 void
set_cpu_error_state(ch_cpu_errors_t * cpu_error_regs)654 set_cpu_error_state(ch_cpu_errors_t *cpu_error_regs)
655 {
656 set_asyncflt(cpu_error_regs->afsr & ~C_AFSR_FATAL_ERRS);
657 }
658
659 /*
660 * For Cheetah, the error recovery code uses an alternate flush area in the
661 * TL>0 fast ECC handler. ecache_tl1_flushaddr is the physical address of
662 * this exclusive displacement flush area.
663 */
664 uint64_t ecache_tl1_flushaddr = (uint64_t)-1; /* physaddr for E$ flushing */
665
666 /*
667 * Allocate and initialize the exclusive displacement flush area.
668 */
669 caddr_t
ecache_init_scrub_flush_area(caddr_t alloc_base)670 ecache_init_scrub_flush_area(caddr_t alloc_base)
671 {
672 unsigned size = 2 * CH_ECACHE_8M_SIZE;
673 caddr_t tmp_alloc_base = alloc_base;
674 caddr_t flush_alloc_base =
675 (caddr_t)roundup((uintptr_t)alloc_base, size);
676 caddr_t ecache_tl1_virtaddr;
677
678 /*
679 * Allocate the physical memory for the exclusive flush area
680 *
681 * Need to allocate an exclusive flush area that is twice the
682 * largest supported E$ size, physically contiguous, and
683 * aligned on twice the largest E$ size boundary.
684 *
685 * Memory allocated via prom_alloc is included in the "cage"
686 * from the DR perspective and due to this, its physical
687 * address will never change and the memory will not be
688 * removed.
689 *
690 * prom_alloc takes 3 arguments: bootops, virtual address hint,
691 * size of the area to allocate, and alignment of the area to
692 * allocate. It returns zero if the allocation fails, or the
693 * virtual address for a successful allocation. Memory prom_alloc'd
694 * is physically contiguous.
695 */
696 if ((ecache_tl1_virtaddr =
697 prom_alloc(flush_alloc_base, size, size)) != NULL) {
698
699 tmp_alloc_base =
700 (caddr_t)roundup((uintptr_t)(ecache_tl1_virtaddr + size),
701 ecache_alignsize);
702
703 /*
704 * get the physical address of the exclusive flush area
705 */
706 ecache_tl1_flushaddr = va_to_pa(ecache_tl1_virtaddr);
707
708 } else {
709 ecache_tl1_virtaddr = (caddr_t)-1;
710 cmn_err(CE_NOTE, "!ecache_init_scrub_flush_area failed\n");
711 }
712
713 return (tmp_alloc_base);
714 }
715
716 /*
717 * Update cpu_offline_set so the scrubber knows which cpus are offline
718 */
719 /*ARGSUSED*/
720 int
cpu_scrub_cpu_setup(cpu_setup_t what,int cpuid,void * arg)721 cpu_scrub_cpu_setup(cpu_setup_t what, int cpuid, void *arg)
722 {
723 switch (what) {
724 case CPU_ON:
725 case CPU_INIT:
726 CPUSET_DEL(cpu_offline_set, cpuid);
727 break;
728 case CPU_OFF:
729 CPUSET_ADD(cpu_offline_set, cpuid);
730 break;
731 default:
732 break;
733 }
734 return (0);
735 }
736