1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 /*
28 * CPU management for serengeti DR
29 *
30 * There are three states a CPU can be in:
31 *
32 * disconnected: In reset
33 * connect,unconfigured: Idling in OBP's idle loop
34 * configured: Running Solaris
35 *
36 * State transitions:
37 *
38 * connect configure
39 * ------------> ------------>
40 * disconnected connected configured
41 * unconfigured
42 * <----------- <-------------
43 * disconnect unconfigure
44 *
45 * Firmware involvements
46 *
47 * start_cpu(SC)
48 * prom_serengeti_wakeupcpu(OBP)
49 * ------------> ------------------------->
50 * disconnected connected configured
51 * unconfigured
52 * <----------- <-------------------------
53 * prom_serengeti_cpu_off(OBP) prom_serengeti_cpu_off(OBP)
54 * stop_cpu(SC) prom_serengeti_wakeupcpu(OBP)
55 *
56 * SIR (Software Initiated Reset) is used to unconfigure a CPU.
57 * After the CPU has completed flushing the caches, it issues an
58 * sir instruction to put itself through POST. POST detects that
59 * it is an SIR, and re-enters OBP as a slave. When the operation
60 * completes successfully, the CPU will be idling in OBP.
61 */
62
63 #include <sys/obpdefs.h>
64 #include <sys/types.h>
65 #include <sys/cmn_err.h>
66 #include <sys/cpuvar.h>
67 #include <sys/membar.h>
68 #include <sys/x_call.h>
69 #include <sys/machsystm.h>
70 #include <sys/cpu_sgnblk_defs.h>
71 #include <sys/pte.h>
72 #include <vm/hat_sfmmu.h>
73 #include <sys/promif.h>
74 #include <sys/note.h>
75 #include <sys/vmsystm.h>
76 #include <vm/seg_kmem.h>
77
78 #include <sys/sbd_ioctl.h>
79 #include <sys/sbd.h>
80 #include <sys/sbdp_priv.h>
81 #include <sys/sbdp_mem.h>
82 #include <sys/sbdp_error.h>
83 #include <sys/sgsbbc_iosram.h>
84 #include <sys/prom_plat.h>
85 #include <sys/cheetahregs.h>
86
87 uint64_t *sbdp_valp;
88 extern uint64_t va_to_pa(void *);
89 static int sbdp_cpu_ntries = 50000;
90 static int sbdp_cpu_delay = 100;
91 void sbdp_get_cpu_sram_addr(uint64_t, uint64_t);
92 static int cpusram_map(caddr_t *, pgcnt_t *);
93 static void cpusram_unmap(caddr_t *, pgcnt_t);
94 extern int prom_serengeti_wakeupcpu(pnode_t);
95 extern int prom_serengeti_cpu_off(pnode_t);
96 extern sbdp_wnode_t *sbdp_get_wnodep(int);
97 extern caddr_t sbdp_shutdown_va;
98 static int sbdp_prom_get_cpu(void *arg, int changed);
99 static void sbdp_cpu_shutdown_self(void);
100
101 int
sbdp_disconnect_cpu(sbdp_handle_t * hp,dev_info_t * dip,processorid_t cpuid)102 sbdp_disconnect_cpu(sbdp_handle_t *hp, dev_info_t *dip, processorid_t cpuid)
103 {
104 pnode_t nodeid;
105 int bd, wnode;
106 sbdp_wnode_t *wnodep;
107 sbdp_bd_t *bdp = NULL;
108 int rv = 0;
109 processorid_t cpu = cpuid;
110 processorid_t portid;
111 static fn_t f = "sbdp_disconnect_cpu";
112
113 SBDP_DBG_FUNC("%s\n", f);
114
115 nodeid = ddi_get_nodeid(dip);
116
117 /*
118 * Get board number and node number
119 * The check for determining if nodeid is valid is done inside
120 * sbdp_get_bd_and_wnode_num.
121 */
122 if (SBDP_INJECT_ERROR(f, 0) ||
123 sbdp_get_bd_and_wnode_num(nodeid, &bd, &wnode) != 0) {
124
125 rv = -1;
126 goto out;
127 }
128
129 /*
130 * Grab the lock to prevent status threads from accessing
131 * registers on the CPU when it is being put into reset.
132 */
133 wnodep = sbdp_get_wnodep(wnode);
134 bdp = &wnodep->bds[bd];
135 ASSERT(bdp);
136 mutex_enter(&bdp->bd_mutex);
137
138 /*
139 * Mark the CPU in reset. This should be done before calling
140 * the SC because we won't know at which stage it failed if
141 * the SC call returns failure.
142 */
143 sbdp_cpu_in_reset(wnode, bd, SG_CPUID_TO_CPU_UNIT(cpuid), 1);
144
145 /*
146 * Ask OBP to mark the CPU as in POST
147 */
148 if (SBDP_INJECT_ERROR(f, 1) || prom_serengeti_cpu_off(nodeid) != 0) {
149
150 rv = -1;
151 goto out;
152 }
153
154 /*
155 * Ask the SC to put the CPU into reset. If the first
156 * core is not present, the stop CPU interface needs
157 * to be called with the portid rather than the cpuid.
158 */
159 portid = SG_CPUID_TO_PORTID(cpuid);
160 if (!SBDP_IS_CPU_PRESENT(bdp, SG_CPUID_TO_CPU_UNIT(portid))) {
161 cpu = portid;
162 }
163
164 if (SBDP_INJECT_ERROR(f, 2) || sbdp_stop_cpu(cpu) != 0) {
165
166 rv = -1;
167 goto out;
168 }
169
170 out:
171 if (bdp != NULL) {
172 mutex_exit(&bdp->bd_mutex);
173 }
174
175 if (rv != 0) {
176 sbdp_set_err(hp->h_err, ESGT_STOPCPU, NULL);
177 }
178
179 return (rv);
180 }
181
182 int
sbdp_connect_cpu(sbdp_handle_t * hp,dev_info_t * dip,processorid_t cpuid)183 sbdp_connect_cpu(sbdp_handle_t *hp, dev_info_t *dip, processorid_t cpuid)
184 {
185 pnode_t nodeid;
186 sbd_error_t *sep;
187 int i;
188 int bd, wnode;
189 int rv = 0;
190 static fn_t f = "sbdp_connect_cpu";
191
192 SBDP_DBG_FUNC("%s\n", f);
193
194 sep = hp->h_err;
195
196 nodeid = ddi_get_nodeid(dip);
197
198 /*
199 * The check for determining if nodeid is valid is done inside
200 * sbdp_get_bd_and_wnode_num.
201 */
202 if (SBDP_INJECT_ERROR(f, 0) ||
203 sbdp_get_bd_and_wnode_num(nodeid, &bd, &wnode) != 0) {
204
205 rv = -1;
206 goto out;
207 }
208
209 /*
210 * Ask the SC to bring the CPU out of reset.
211 * At this point, the sb_dev_present bit is not set for the CPU.
212 * From sbd point of view the CPU is not present yet. No
213 * status threads will try to read registers off the CPU.
214 * Since we are already holding sb_mutex, it is not necessary
215 * to grab the board mutex when checking and setting the
216 * cpus_in_reset bit.
217 */
218 if (sbdp_is_cpu_in_reset(wnode, bd, SG_CPUID_TO_CPU_UNIT(cpuid))) {
219
220 sbdp_wnode_t *wnodep;
221 sbdp_bd_t *bdp = NULL;
222 processorid_t cpu = cpuid;
223 processorid_t portid;
224
225 wnodep = sbdp_get_wnodep(wnode);
226 bdp = &wnodep->bds[bd];
227 ASSERT(bdp);
228
229 /*
230 * If the first core is not present, the start CPU
231 * interface needs to be called with the portid rather
232 * than the cpuid.
233 */
234 portid = SG_CPUID_TO_PORTID(cpuid);
235 if (!SBDP_IS_CPU_PRESENT(bdp, SG_CPUID_TO_CPU_UNIT(portid))) {
236 cpu = portid;
237 }
238
239 if (SBDP_INJECT_ERROR(f, 1) || sbdp_start_cpu(cpu) != 0) {
240
241 rv = -1;
242 goto out;
243 }
244
245 if (SBDP_INJECT_ERROR(f, 2) ||
246 prom_serengeti_wakeupcpu(nodeid) != 0) {
247
248 rv = -1;
249 goto out;
250 }
251 }
252
253 /*
254 * Mark the CPU out of reset.
255 */
256 sbdp_cpu_in_reset(wnode, bd, SG_CPUID_TO_CPU_UNIT(cpuid), 0);
257
258 /*
259 * Refresh the bd info
260 * we need to wait until all cpus are out of reset
261 */
262 for (i = 0; i < SG_MAX_CPUS_PER_BD; i++)
263 if (sbdp_is_cpu_present(wnode, bd, i) &&
264 sbdp_is_cpu_in_reset(wnode, bd, i) == 1) {
265 break;
266 }
267
268 if (i == SG_MAX_CPUS_PER_BD) {
269 /*
270 * All cpus are out of reset so it is safe to
271 * update the bd info
272 */
273 sbdp_add_new_bd_info(wnode, bd);
274 }
275
276 out:
277 if (rv != 0)
278 sbdp_set_err(sep, ESGT_WAKEUPCPU, NULL);
279
280 return (rv);
281 }
282
283 int
sbdp_cpu_poweron(struct cpu * cp)284 sbdp_cpu_poweron(struct cpu *cp)
285 {
286 int cpuid;
287 int ntries;
288 pnode_t nodeid;
289 extern void restart_other_cpu(int);
290 static fn_t f = "sbdp_cpu_poweron";
291
292 SBDP_DBG_FUNC("%s\n", f);
293
294 ASSERT(MUTEX_HELD(&cpu_lock));
295
296 ntries = sbdp_cpu_ntries;
297 cpuid = cp->cpu_id;
298
299 nodeid = cpunodes[cpuid].nodeid;
300 ASSERT(nodeid != (pnode_t)0);
301
302 /*
303 * This is a safe guard in case the CPU has taken a trap
304 * and idling in POST.
305 */
306 if (SBDP_INJECT_ERROR(f, 0) ||
307 prom_serengeti_wakeupcpu(nodeid) != 0) {
308
309 return (EBUSY);
310 }
311
312 cp->cpu_flags &= ~CPU_POWEROFF;
313
314 /*
315 * NOTE: restart_other_cpu pauses cpus during the
316 * slave cpu start. This helps to quiesce the
317 * bus traffic a bit which makes the tick sync
318 * routine in the prom more robust.
319 */
320 SBDP_DBG_CPU("%s: COLD START for cpu (%d)\n", f, cpuid);
321
322 restart_other_cpu(cpuid);
323
324 SBDP_DBG_CPU("after restarting other cpus\n");
325
326 /*
327 * Wait for the cpu to reach its idle thread before
328 * we zap him with a request to blow away the mappings
329 * he (might) have for the sbdp_shutdown_asm code
330 * he may have executed on unconfigure.
331 */
332 while ((cp->cpu_thread != cp->cpu_idle_thread) && (ntries > 0)) {
333 DELAY(sbdp_cpu_delay);
334 ntries--;
335 }
336
337 SBDP_DBG_CPU("%s: waited %d out of %d loops for cpu %d\n",
338 f, sbdp_cpu_ntries - ntries, sbdp_cpu_ntries, cpuid);
339
340 return (0);
341 }
342
343
344 #define SBDP_CPU_SRAM_ADDR 0x7fff0900000ull
345 #define SBDP_CPU_SRAM_SIZE 0x20000ull
346
347 static const char cpyren_key[] = "COPYREN";
348
349 static uint64_t bbsram_pa;
350 static uint_t bbsram_size;
351
352 typedef struct {
353 caddr_t vaddr;
354 pgcnt_t npages;
355 uint64_t *pa;
356 uint_t *size;
357 } sbdp_cpu_sram_map_t;
358
359 int
sbdp_cpu_poweroff(struct cpu * cp)360 sbdp_cpu_poweroff(struct cpu *cp)
361 {
362 processorid_t cpuid;
363 pnode_t nodeid;
364 sbdp_cpu_sram_map_t map;
365 static fn_t f = "sbdp_cpu_poweroff";
366
367 SBDP_DBG_FUNC("%s\n", f);
368
369 ASSERT(MUTEX_HELD(&cpu_lock));
370
371 /*
372 * Capture all CPUs (except for detaching proc) to prevent
373 * crosscalls to the detaching proc until it has cleared its
374 * bit in cpu_ready_set.
375 */
376 cpuid = cp->cpu_id;
377
378 nodeid = cpunodes[cpuid].nodeid;
379 ASSERT(nodeid != (pnode_t)0);
380
381 *sbdp_valp = 0ull;
382 /*
383 * Do the cpu sram mapping now. This avoids problems with
384 * mutexes and high PILS
385 */
386 if (SBDP_INJECT_ERROR(f, 0) ||
387 cpusram_map(&map.vaddr, &map.npages) != DDI_SUCCESS) {
388 return (EBUSY);
389 }
390
391 map.pa = &bbsram_pa;
392 map.size = &bbsram_size;
393
394 /*
395 * Do a cross call to the cpu so it obtains the base address
396 */
397 xc_one(cpuid, sbdp_get_cpu_sram_addr, (uint64_t)&map,
398 (uint64_t)NULL);
399
400 cpusram_unmap(&map.vaddr, map.npages);
401
402 if (SBDP_INJECT_ERROR(f, 1) || bbsram_size == 0) {
403 cmn_err(CE_WARN, "cpu%d: Key \"%s\" missing from CPU SRAM TOC",
404 cpuid, cpyren_key);
405 return (EBUSY);
406 }
407
408 if ((bbsram_pa & MMU_PAGEOFFSET) != 0) {
409 cmn_err(CE_WARN, "cpu%d: CPU SRAM key \"%s\" not page aligned, "
410 "offset = 0x%lx", cpuid, cpyren_key,
411 (bbsram_pa - (uint64_t)SBDP_CPU_SRAM_ADDR));
412 return (EBUSY);
413 }
414
415 if (bbsram_size < MMU_PAGESIZE) {
416 cmn_err(CE_WARN, "cpu%d: CPU SRAM key \"%s\" too small, "
417 "size = 0x%x", cpuid, cpyren_key, bbsram_size);
418 return (EBUSY);
419 }
420
421 /*
422 * Capture all CPUs (except for detaching proc) to prevent
423 * crosscalls to the detaching proc until it has cleared its
424 * bit in cpu_ready_set.
425 *
426 * The CPU's remain paused and the prom_mutex is known to be free.
427 * This prevents the x-trap victim from blocking when doing prom
428 * IEEE-1275 calls at a high PIL level.
429 */
430
431 promsafe_pause_cpus();
432
433 /*
434 * Quiesce interrupts on the target CPU. We do this by setting
435 * the CPU 'not ready'- (i.e. removing the CPU from cpu_ready_set) to
436 * prevent it from receiving cross calls and cross traps.
437 * This prevents the processor from receiving any new soft interrupts.
438 */
439
440 mp_cpu_quiesce(cp);
441
442 /* tell the prom the cpu is going away */
443 if (SBDP_INJECT_ERROR(f, 2) || prom_serengeti_cpu_off(nodeid) != 0)
444 return (EBUSY);
445
446 /*
447 * An sir instruction is issued at the end of the shutdown
448 * routine to make the CPU go through POST and re-enter OBP.
449 */
450 xt_one_unchecked(cp->cpu_id, (xcfunc_t *)idle_stop_xcall,
451 (uint64_t)sbdp_cpu_shutdown_self, 0);
452
453 *sbdp_valp = 3ull;
454
455 start_cpus();
456
457 /*
458 * Wait until we reach the OBP idle loop or time out.
459 * prom_serengeti_wakeupcpu waits for up to 60 seconds for the
460 * CPU to reach OBP idle loop.
461 */
462 if (SBDP_INJECT_ERROR(f, 3) ||
463 prom_serengeti_wakeupcpu(nodeid) != 0) {
464
465 /*
466 * If it fails here, we still consider the unconfigure
467 * operation as successful.
468 */
469 cmn_err(CE_WARN, "cpu%d: CPU failed to enter OBP idle loop.\n",
470 cpuid);
471 }
472
473 ASSERT(!(CPU_IN_SET(cpu_ready_set, cpuid)));
474
475 bbsram_pa = 0;
476 bbsram_size = 0;
477
478 return (0);
479 }
480
481 processorid_t
sbdp_get_cpuid(sbdp_handle_t * hp,dev_info_t * dip)482 sbdp_get_cpuid(sbdp_handle_t *hp, dev_info_t *dip)
483 {
484 int cpuid;
485 char type[OBP_MAXPROPNAME];
486 pnode_t nodeid;
487 sbd_error_t *sep;
488 static fn_t f = "sbdp_get_cpuid";
489
490 SBDP_DBG_FUNC("%s\n", f);
491
492 nodeid = ddi_get_nodeid(dip);
493 if (sbdp_is_node_bad(nodeid))
494 return (-1);
495
496 sep = hp->h_err;
497
498 if (prom_getproplen(nodeid, "device_type") < OBP_MAXPROPNAME)
499 (void) prom_getprop(nodeid, "device_type", (caddr_t)type);
500 else {
501 sbdp_set_err(sep, ESGT_NO_DEV_TYPE, NULL);
502 return (-1);
503 }
504
505 if (strcmp(type, "cpu") != 0) {
506 sbdp_set_err(sep, ESGT_NOT_CPUTYPE, NULL);
507 return (-1);
508 }
509
510 /*
511 * Check to see if property "cpuid" exists first.
512 * If not, check for "portid".
513 */
514 if (prom_getprop(nodeid, "cpuid", (caddr_t)&cpuid) == -1)
515 if (prom_getprop(nodeid, "portid", (caddr_t)&cpuid) == -1) {
516
517 return (-1);
518 }
519
520 return ((processorid_t)cpuid & SG_CPU_ID_MASK);
521 }
522
523 int
sbdp_cpu_get_impl(sbdp_handle_t * hp,dev_info_t * dip)524 sbdp_cpu_get_impl(sbdp_handle_t *hp, dev_info_t *dip)
525 {
526 int impl;
527 char type[OBP_MAXPROPNAME];
528 pnode_t nodeid;
529 sbd_error_t *sep;
530 static fn_t f = "sbdp_cpu_get_impl";
531
532 SBDP_DBG_FUNC("%s\n", f);
533
534 nodeid = ddi_get_nodeid(dip);
535 if (sbdp_is_node_bad(nodeid))
536 return (-1);
537
538 sep = hp->h_err;
539
540 if (prom_getproplen(nodeid, "device_type") < OBP_MAXPROPNAME)
541 (void) prom_getprop(nodeid, "device_type", (caddr_t)type);
542 else {
543 sbdp_set_err(sep, ESGT_NO_DEV_TYPE, NULL);
544 return (-1);
545 }
546
547 if (strcmp(type, "cpu") != 0) {
548 sbdp_set_err(sep, ESGT_NOT_CPUTYPE, NULL);
549 return (-1);
550 }
551
552 /*
553 * Get the implementation# property.
554 */
555 if (prom_getprop(nodeid, "implementation#", (caddr_t)&impl) == -1)
556 return (-1);
557
558 return (impl);
559 }
560
561 struct sbdp_prom_get_node_args {
562 pnode_t node; /* current node */
563 processorid_t portid; /* portid we are looking for */
564 pnode_t result_node; /* node found with the above portid */
565 };
566
567 pnode_t
sbdp_find_nearby_cpu_by_portid(pnode_t nodeid,processorid_t portid)568 sbdp_find_nearby_cpu_by_portid(pnode_t nodeid, processorid_t portid)
569 {
570 struct sbdp_prom_get_node_args arg;
571 static fn_t f = "sbdp_find_nearby_cpu_by_portid";
572
573 SBDP_DBG_FUNC("%s\n", f);
574
575 arg.node = nodeid;
576 arg.portid = portid;
577 (void) prom_tree_access(sbdp_prom_get_cpu, &arg, NULL);
578
579 return (arg.result_node);
580 }
581
582 /*ARGSUSED*/
583 static int
sbdp_prom_get_cpu(void * arg,int changed)584 sbdp_prom_get_cpu(void *arg, int changed)
585 {
586 int portid;
587 pnode_t parent, cur_node;
588 struct sbdp_prom_get_node_args *argp = arg;
589 static fn_t f = "sbdp_prom_get_cpu";
590
591 SBDP_DBG_FUNC("%s\n", f);
592
593 parent = prom_parentnode(argp->node);
594
595 for (cur_node = prom_childnode(parent); cur_node != OBP_NONODE;
596 cur_node = prom_nextnode(cur_node)) {
597
598 if (prom_getprop(cur_node, OBP_PORTID, (caddr_t)&portid) < 0)
599 continue;
600
601 if ((portid == argp->portid) && (cur_node != argp->node))
602 break;
603 }
604
605 argp->result_node = cur_node;
606
607 return (0);
608 }
609
610
611 /*
612 * A detaching CPU is xcalled with an xtrap to sbdp_cpu_stop_self() after
613 * it has been offlined. The function of this routine is to get the cpu
614 * spinning in a safe place. The requirement is that the system will not
615 * reference anything on the detaching board (memory and i/o is detached
616 * elsewhere) and that the CPU not reference anything on any other board
617 * in the system. This isolation is required during and after the writes
618 * to the domain masks to remove the board from the domain.
619 *
620 * To accomplish this isolation the following is done:
621 * 0) Map the CPUSRAM to obtain the correct address in SRAM
622 * 1) Create a locked mapping to a location in CPU SRAM where
623 * the cpu will execute.
624 * 2) Copy the target function (sbdp_shutdown_asm) in which
625 * the cpu will execute into CPU SRAM.
626 * 3) Jump into function with CPU SRAM.
627 * Function will:
628 * 3.1) Flush its Ecache (displacement).
629 * 3.2) Flush its Dcache with HW mechanism.
630 * 3.3) Flush its Icache with HW mechanism.
631 * 3.4) Flush all valid and _unlocked_ D-TLB entries.
632 * 3.5) Flush all valid and _unlocked_ I-TLB entries.
633 * 4) Jump into a tight loop.
634 */
635
636 static void
sbdp_cpu_stop_self(uint64_t pa)637 sbdp_cpu_stop_self(uint64_t pa)
638 {
639 cpu_t *cp = CPU;
640 int cpuid = cp->cpu_id;
641 tte_t tte;
642 volatile uint_t *src, *dst;
643 size_t funclen;
644 sbdp_shutdown_t sht;
645 uint_t bbsram_pfn;
646 uint64_t bbsram_addr;
647 void (*bbsram_func)(sbdp_shutdown_t *);
648 extern void sbdp_shutdown_asm(sbdp_shutdown_t *);
649 extern void sbdp_shutdown_asm_end(void);
650
651 funclen = (uintptr_t)sbdp_shutdown_asm_end -
652 (uintptr_t)sbdp_shutdown_asm;
653 ASSERT(funclen <= MMU_PAGESIZE);
654 ASSERT(bbsram_pa != 0);
655 ASSERT((bbsram_pa & MMU_PAGEOFFSET) == 0);
656 ASSERT(bbsram_size >= MMU_PAGESIZE);
657
658 stdphys(pa, 3);
659 bbsram_pfn = (uint_t)(bbsram_pa >> MMU_PAGESHIFT);
660
661 bbsram_addr = (uint64_t)sbdp_shutdown_va;
662 sht.estack = bbsram_addr + MMU_PAGESIZE;
663 sht.flushaddr = ecache_flushaddr;
664
665 tte.tte_inthi = TTE_VALID_INT | TTE_SZ_INT(TTE8K) |
666 TTE_PFN_INTHI(bbsram_pfn);
667 tte.tte_intlo = TTE_PFN_INTLO(bbsram_pfn) |
668 TTE_HWWR_INT | TTE_PRIV_INT | TTE_LCK_INT;
669 sfmmu_dtlb_ld_kva(sbdp_shutdown_va, &tte); /* load dtlb */
670 sfmmu_itlb_ld_kva(sbdp_shutdown_va, &tte); /* load itlb */
671
672 for (src = (uint_t *)sbdp_shutdown_asm, dst = (uint_t *)bbsram_addr;
673 src < (uint_t *)sbdp_shutdown_asm_end; src++, dst++)
674 *dst = *src;
675
676 bbsram_func = (void (*)())bbsram_addr;
677 sht.size = (uint32_t)cpunodes[cpuid].ecache_size << 1;
678 sht.linesize = (uint32_t)cpunodes[cpuid].ecache_linesize;
679 sht.physaddr = pa;
680
681 /*
682 * Signal to sbdp_cpu_poweroff() that we're just
683 * about done.
684 */
685 cp->cpu_m.in_prom = 1;
686
687 stdphys(pa, 4);
688 (*bbsram_func)(&sht);
689 }
690
691 /* ARGSUSED */
692 void
sbdp_get_cpu_sram_addr(uint64_t arg1,uint64_t arg2)693 sbdp_get_cpu_sram_addr(uint64_t arg1, uint64_t arg2)
694 {
695 uint64_t *pap;
696 uint_t *sizep;
697 struct iosram_toc *tocp;
698 uint_t offset;
699 uint_t size;
700 sbdp_cpu_sram_map_t *map;
701 int i;
702 fn_t f = "sbdp_get_cpu_sram_addr";
703
704 SBDP_DBG_FUNC("%s\n", f);
705
706 map = (sbdp_cpu_sram_map_t *)arg1;
707 tocp = (struct iosram_toc *)map->vaddr;
708 pap = map->pa;
709 sizep = map->size;
710
711 for (i = 0; i < tocp->iosram_tagno; i++) {
712 if (strcmp(tocp->iosram_keys[i].key, cpyren_key) == 0)
713 break;
714 }
715 if (i == tocp->iosram_tagno) {
716 *pap = 0;
717 *sizep = 0;
718 return;
719 }
720 offset = tocp->iosram_keys[i].offset;
721 size = tocp->iosram_keys[i].size;
722
723 /*
724 * The address we want is the begining of cpusram + offset
725 */
726 *pap = SBDP_CPU_SRAM_ADDR + offset;
727
728 *sizep = size;
729 }
730
731 static int
cpusram_map(caddr_t * vaddrp,pgcnt_t * npp)732 cpusram_map(caddr_t *vaddrp, pgcnt_t *npp)
733 {
734 uint_t pgoffset;
735 pgcnt_t npages;
736 pfn_t pfn;
737 uint64_t base;
738 caddr_t kaddr;
739 uint_t mapping_attr;
740
741 base = (uint64_t)SBDP_CPU_SRAM_ADDR & (~MMU_PAGEOFFSET);
742 pfn = mmu_btop(base);
743
744 /*
745 * Do a quick sanity check to make sure we are in I/O space.
746 */
747 if (pf_is_memory(pfn))
748 return (DDI_FAILURE);
749
750 pgoffset = (ulong_t)SBDP_CPU_SRAM_ADDR & MMU_PAGEOFFSET;
751 npages = mmu_btopr(SBDP_CPU_SRAM_SIZE + pgoffset);
752
753 kaddr = vmem_alloc(heap_arena, ptob(npages), VM_NOSLEEP);
754 if (kaddr == NULL)
755 return (DDI_ME_NORESOURCES);
756
757 mapping_attr = PROT_READ;
758 /*
759 * Now map in the pages we've allocated...
760 */
761 hat_devload(kas.a_hat, kaddr, ptob(npages), pfn, mapping_attr,
762 HAT_LOAD_LOCK);
763
764 *vaddrp = kaddr + pgoffset;
765 *npp = npages;
766
767 return (DDI_SUCCESS);
768 }
769
770 static void
cpusram_unmap(caddr_t * vaddrp,pgcnt_t npages)771 cpusram_unmap(caddr_t *vaddrp, pgcnt_t npages)
772 {
773 uint_t pgoffset;
774 caddr_t base;
775 caddr_t addr = *vaddrp;
776
777
778 pgoffset = (ulong_t)SBDP_CPU_SRAM_ADDR & MMU_PAGEOFFSET;
779 base = addr - pgoffset;
780 hat_unload(kas.a_hat, base, ptob(npages), HAT_UNLOAD_UNLOCK);
781 vmem_free(heap_arena, base, ptob(npages));
782
783 *vaddrp = 0;
784 }
785
786
787 static void
sbdp_cpu_shutdown_self(void)788 sbdp_cpu_shutdown_self(void)
789 {
790 cpu_t *cp = CPU;
791 int cpuid = cp->cpu_id;
792 extern void flush_windows(void);
793 uint64_t pa = va_to_pa((void *)sbdp_valp);
794
795 stdphys(pa, 8);
796 flush_windows();
797
798 (void) spl8();
799
800 stdphys(pa, 6);
801
802 ASSERT(cp->cpu_intr_actv == 0);
803 ASSERT(cp->cpu_thread == cp->cpu_idle_thread ||
804 cp->cpu_thread == cp->cpu_startup_thread);
805
806 cp->cpu_flags = CPU_OFFLINE | CPU_QUIESCED | CPU_POWEROFF;
807
808 CPU_SIGNATURE(OS_SIG, SIGST_DETACHED, SIGSUBST_NULL, cpuid);
809
810 stdphys(pa, 7);
811 sbdp_cpu_stop_self(pa);
812
813 cmn_err(CE_PANIC, "sbdp_cpu_shutdown_self: CPU %d FAILED TO SHUTDOWN",
814 cpuid);
815 }
816
817 typedef struct {
818 int node;
819 int board;
820 int non_panther_cpus;
821 } sbdp_node_walk_t;
822
823 static int
sbdp_find_non_panther_cpus(dev_info_t * dip,void * node_args)824 sbdp_find_non_panther_cpus(dev_info_t *dip, void *node_args)
825 {
826 int impl, cpuid, portid;
827 int buflen;
828 char buf[OBP_MAXPROPNAME];
829 sbdp_node_walk_t *args = (sbdp_node_walk_t *)node_args;
830
831 if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip,
832 DDI_PROP_DONTPASS, OBP_DEVICETYPE, (caddr_t)buf,
833 &buflen) != DDI_PROP_SUCCESS) {
834 return (DDI_WALK_CONTINUE);
835 }
836
837 if (strcmp(buf, "cpu") != 0) {
838 return (DDI_WALK_CONTINUE);
839 }
840
841 if ((impl = ddi_getprop(DDI_DEV_T_ANY, dip,
842 DDI_PROP_DONTPASS, "implementation#", -1)) == -1) {
843 return (DDI_WALK_CONTINUE);
844 }
845
846 if ((cpuid = ddi_getprop(DDI_DEV_T_ANY, dip,
847 DDI_PROP_DONTPASS, "cpuid", -1)) == -1) {
848 return (DDI_WALK_CONTINUE);
849 }
850
851 portid = SG_CPUID_TO_PORTID(cpuid);
852
853 /* filter out nodes not on this board */
854 if (SG_PORTID_TO_BOARD_NUM(portid) != args->board ||
855 SG_PORTID_TO_NODEID(portid) != args->node) {
856 return (DDI_WALK_PRUNECHILD);
857 }
858
859 switch (impl) {
860 case CHEETAH_IMPL:
861 case CHEETAH_PLUS_IMPL:
862 case JAGUAR_IMPL:
863 args->non_panther_cpus++;
864 break;
865 case PANTHER_IMPL:
866 break;
867 default:
868 ASSERT(0);
869 args->non_panther_cpus++;
870 break;
871 }
872
873 SBDP_DBG_CPU("cpuid=0x%x, portid=0x%x, impl=0x%x, device_type=%s",
874 cpuid, portid, impl, buf);
875
876 return (DDI_WALK_CONTINUE);
877 }
878
879 int
sbdp_board_non_panther_cpus(int node,int board)880 sbdp_board_non_panther_cpus(int node, int board)
881 {
882 sbdp_node_walk_t arg = {0};
883
884 arg.node = node;
885 arg.board = board;
886
887 /*
888 * Root node doesn't have to be held.
889 */
890 ddi_walk_devs(ddi_root_node(), sbdp_find_non_panther_cpus,
891 (void *)&arg);
892
893 return (arg.non_panther_cpus);
894 }
895