1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 * Copyright (c) 2016 by Delphix. All rights reserved.
26 */
27
28 /*
29 * CPU management for serengeti DR
30 *
31 * There are three states a CPU can be in:
32 *
33 * disconnected: In reset
34 * connect,unconfigured: Idling in OBP's idle loop
35 * configured: Running Solaris
36 *
37 * State transitions:
38 *
39 * connect configure
40 * ------------> ------------>
41 * disconnected connected configured
42 * unconfigured
43 * <----------- <-------------
44 * disconnect unconfigure
45 *
46 * Firmware involvements
47 *
48 * start_cpu(SC)
49 * prom_serengeti_wakeupcpu(OBP)
50 * ------------> ------------------------->
51 * disconnected connected configured
52 * unconfigured
53 * <----------- <-------------------------
54 * prom_serengeti_cpu_off(OBP) prom_serengeti_cpu_off(OBP)
55 * stop_cpu(SC) prom_serengeti_wakeupcpu(OBP)
56 *
57 * SIR (Software Initiated Reset) is used to unconfigure a CPU.
58 * After the CPU has completed flushing the caches, it issues an
59 * sir instruction to put itself through POST. POST detects that
60 * it is an SIR, and re-enters OBP as a slave. When the operation
61 * completes successfully, the CPU will be idling in OBP.
62 */
63
64 #include <sys/obpdefs.h>
65 #include <sys/types.h>
66 #include <sys/cmn_err.h>
67 #include <sys/cpuvar.h>
68 #include <sys/membar.h>
69 #include <sys/x_call.h>
70 #include <sys/machsystm.h>
71 #include <sys/cpu_sgnblk_defs.h>
72 #include <sys/pte.h>
73 #include <vm/hat_sfmmu.h>
74 #include <sys/promif.h>
75 #include <sys/note.h>
76 #include <sys/vmsystm.h>
77 #include <vm/seg_kmem.h>
78
79 #include <sys/sbd_ioctl.h>
80 #include <sys/sbd.h>
81 #include <sys/sbdp_priv.h>
82 #include <sys/sbdp_mem.h>
83 #include <sys/sbdp_error.h>
84 #include <sys/sgsbbc_iosram.h>
85 #include <sys/prom_plat.h>
86 #include <sys/cheetahregs.h>
87
88 uint64_t *sbdp_valp;
89 extern uint64_t va_to_pa(void *);
90 static int sbdp_cpu_ntries = 50000;
91 static int sbdp_cpu_delay = 100;
92 void sbdp_get_cpu_sram_addr(uint64_t, uint64_t);
93 static int cpusram_map(caddr_t *, pgcnt_t *);
94 static void cpusram_unmap(caddr_t *, pgcnt_t);
95 extern int prom_serengeti_wakeupcpu(pnode_t);
96 extern int prom_serengeti_cpu_off(pnode_t);
97 extern sbdp_wnode_t *sbdp_get_wnodep(int);
98 extern caddr_t sbdp_shutdown_va;
99 static int sbdp_prom_get_cpu(void *arg, int changed);
100 static void sbdp_cpu_shutdown_self(void);
101
102 int
sbdp_disconnect_cpu(sbdp_handle_t * hp,dev_info_t * dip,processorid_t cpuid)103 sbdp_disconnect_cpu(sbdp_handle_t *hp, dev_info_t *dip, processorid_t cpuid)
104 {
105 pnode_t nodeid;
106 int bd, wnode;
107 sbdp_wnode_t *wnodep;
108 sbdp_bd_t *bdp = NULL;
109 int rv = 0;
110 processorid_t cpu = cpuid;
111 processorid_t portid;
112 static fn_t f = "sbdp_disconnect_cpu";
113
114 SBDP_DBG_FUNC("%s\n", f);
115
116 nodeid = ddi_get_nodeid(dip);
117
118 /*
119 * Get board number and node number
120 * The check for determining if nodeid is valid is done inside
121 * sbdp_get_bd_and_wnode_num.
122 */
123 if (SBDP_INJECT_ERROR(f, 0) ||
124 sbdp_get_bd_and_wnode_num(nodeid, &bd, &wnode) != 0) {
125
126 rv = -1;
127 goto out;
128 }
129
130 /*
131 * Grab the lock to prevent status threads from accessing
132 * registers on the CPU when it is being put into reset.
133 */
134 wnodep = sbdp_get_wnodep(wnode);
135 bdp = &wnodep->bds[bd];
136 ASSERT(bdp);
137 mutex_enter(&bdp->bd_mutex);
138
139 /*
140 * Mark the CPU in reset. This should be done before calling
141 * the SC because we won't know at which stage it failed if
142 * the SC call returns failure.
143 */
144 sbdp_cpu_in_reset(wnode, bd, SG_CPUID_TO_CPU_UNIT(cpuid), 1);
145
146 /*
147 * Ask OBP to mark the CPU as in POST
148 */
149 if (SBDP_INJECT_ERROR(f, 1) || prom_serengeti_cpu_off(nodeid) != 0) {
150
151 rv = -1;
152 goto out;
153 }
154
155 /*
156 * Ask the SC to put the CPU into reset. If the first
157 * core is not present, the stop CPU interface needs
158 * to be called with the portid rather than the cpuid.
159 */
160 portid = SG_CPUID_TO_PORTID(cpuid);
161 if (!SBDP_IS_CPU_PRESENT(bdp, SG_CPUID_TO_CPU_UNIT(portid))) {
162 cpu = portid;
163 }
164
165 if (SBDP_INJECT_ERROR(f, 2) || sbdp_stop_cpu(cpu) != 0) {
166
167 rv = -1;
168 goto out;
169 }
170
171 out:
172 if (bdp != NULL) {
173 mutex_exit(&bdp->bd_mutex);
174 }
175
176 if (rv != 0) {
177 sbdp_set_err(hp->h_err, ESGT_STOPCPU, NULL);
178 }
179
180 return (rv);
181 }
182
183 int
sbdp_connect_cpu(sbdp_handle_t * hp,dev_info_t * dip,processorid_t cpuid)184 sbdp_connect_cpu(sbdp_handle_t *hp, dev_info_t *dip, processorid_t cpuid)
185 {
186 pnode_t nodeid;
187 sbd_error_t *sep;
188 int i;
189 int bd, wnode;
190 int rv = 0;
191 static fn_t f = "sbdp_connect_cpu";
192
193 SBDP_DBG_FUNC("%s\n", f);
194
195 sep = hp->h_err;
196
197 nodeid = ddi_get_nodeid(dip);
198
199 /*
200 * The check for determining if nodeid is valid is done inside
201 * sbdp_get_bd_and_wnode_num.
202 */
203 if (SBDP_INJECT_ERROR(f, 0) ||
204 sbdp_get_bd_and_wnode_num(nodeid, &bd, &wnode) != 0) {
205
206 rv = -1;
207 goto out;
208 }
209
210 /*
211 * Ask the SC to bring the CPU out of reset.
212 * At this point, the sb_dev_present bit is not set for the CPU.
213 * From sbd point of view the CPU is not present yet. No
214 * status threads will try to read registers off the CPU.
215 * Since we are already holding sb_mutex, it is not necessary
216 * to grab the board mutex when checking and setting the
217 * cpus_in_reset bit.
218 */
219 if (sbdp_is_cpu_in_reset(wnode, bd, SG_CPUID_TO_CPU_UNIT(cpuid))) {
220
221 sbdp_wnode_t *wnodep;
222 sbdp_bd_t *bdp = NULL;
223 processorid_t cpu = cpuid;
224 processorid_t portid;
225
226 wnodep = sbdp_get_wnodep(wnode);
227 bdp = &wnodep->bds[bd];
228 ASSERT(bdp);
229
230 /*
231 * If the first core is not present, the start CPU
232 * interface needs to be called with the portid rather
233 * than the cpuid.
234 */
235 portid = SG_CPUID_TO_PORTID(cpuid);
236 if (!SBDP_IS_CPU_PRESENT(bdp, SG_CPUID_TO_CPU_UNIT(portid))) {
237 cpu = portid;
238 }
239
240 if (SBDP_INJECT_ERROR(f, 1) || sbdp_start_cpu(cpu) != 0) {
241
242 rv = -1;
243 goto out;
244 }
245
246 if (SBDP_INJECT_ERROR(f, 2) ||
247 prom_serengeti_wakeupcpu(nodeid) != 0) {
248
249 rv = -1;
250 goto out;
251 }
252 }
253
254 /*
255 * Mark the CPU out of reset.
256 */
257 sbdp_cpu_in_reset(wnode, bd, SG_CPUID_TO_CPU_UNIT(cpuid), 0);
258
259 /*
260 * Refresh the bd info
261 * we need to wait until all cpus are out of reset
262 */
263 for (i = 0; i < SG_MAX_CPUS_PER_BD; i++)
264 if (sbdp_is_cpu_present(wnode, bd, i) &&
265 sbdp_is_cpu_in_reset(wnode, bd, i) == 1) {
266 break;
267 }
268
269 if (i == SG_MAX_CPUS_PER_BD) {
270 /*
271 * All cpus are out of reset so it is safe to
272 * update the bd info
273 */
274 sbdp_add_new_bd_info(wnode, bd);
275 }
276
277 out:
278 if (rv != 0)
279 sbdp_set_err(sep, ESGT_WAKEUPCPU, NULL);
280
281 return (rv);
282 }
283
284 int
sbdp_cpu_poweron(struct cpu * cp)285 sbdp_cpu_poweron(struct cpu *cp)
286 {
287 int cpuid;
288 int ntries;
289 pnode_t nodeid;
290 extern void restart_other_cpu(int);
291 static fn_t f = "sbdp_cpu_poweron";
292
293 SBDP_DBG_FUNC("%s\n", f);
294
295 ASSERT(MUTEX_HELD(&cpu_lock));
296
297 ntries = sbdp_cpu_ntries;
298 cpuid = cp->cpu_id;
299
300 nodeid = cpunodes[cpuid].nodeid;
301 ASSERT(nodeid != (pnode_t)0);
302
303 /*
304 * This is a safe guard in case the CPU has taken a trap
305 * and idling in POST.
306 */
307 if (SBDP_INJECT_ERROR(f, 0) ||
308 prom_serengeti_wakeupcpu(nodeid) != 0) {
309
310 return (EBUSY);
311 }
312
313 cp->cpu_flags &= ~CPU_POWEROFF;
314
315 /*
316 * NOTE: restart_other_cpu pauses cpus during the
317 * slave cpu start. This helps to quiesce the
318 * bus traffic a bit which makes the tick sync
319 * routine in the prom more robust.
320 */
321 SBDP_DBG_CPU("%s: COLD START for cpu (%d)\n", f, cpuid);
322
323 restart_other_cpu(cpuid);
324
325 SBDP_DBG_CPU("after restarting other cpus\n");
326
327 /*
328 * Wait for the cpu to reach its idle thread before
329 * we zap it with a request to blow away the mappings
330 * it (might) have for the sbdp_shutdown_asm code
331 * it may have executed on unconfigure.
332 */
333 while ((cp->cpu_thread != cp->cpu_idle_thread) && (ntries > 0)) {
334 DELAY(sbdp_cpu_delay);
335 ntries--;
336 }
337
338 SBDP_DBG_CPU("%s: waited %d out of %d loops for cpu %d\n",
339 f, sbdp_cpu_ntries - ntries, sbdp_cpu_ntries, cpuid);
340
341 return (0);
342 }
343
344
345 #define SBDP_CPU_SRAM_ADDR 0x7fff0900000ull
346 #define SBDP_CPU_SRAM_SIZE 0x20000ull
347
348 static const char cpyren_key[] = "COPYREN";
349
350 static uint64_t bbsram_pa;
351 static uint_t bbsram_size;
352
353 typedef struct {
354 caddr_t vaddr;
355 pgcnt_t npages;
356 uint64_t *pa;
357 uint_t *size;
358 } sbdp_cpu_sram_map_t;
359
360 int
sbdp_cpu_poweroff(struct cpu * cp)361 sbdp_cpu_poweroff(struct cpu *cp)
362 {
363 processorid_t cpuid;
364 pnode_t nodeid;
365 sbdp_cpu_sram_map_t map;
366 static fn_t f = "sbdp_cpu_poweroff";
367
368 SBDP_DBG_FUNC("%s\n", f);
369
370 ASSERT(MUTEX_HELD(&cpu_lock));
371
372 /*
373 * Capture all CPUs (except for detaching proc) to prevent
374 * crosscalls to the detaching proc until it has cleared its
375 * bit in cpu_ready_set.
376 */
377 cpuid = cp->cpu_id;
378
379 nodeid = cpunodes[cpuid].nodeid;
380 ASSERT(nodeid != (pnode_t)0);
381
382 *sbdp_valp = 0ull;
383 /*
384 * Do the cpu sram mapping now. This avoids problems with
385 * mutexes and high PILS
386 */
387 if (SBDP_INJECT_ERROR(f, 0) ||
388 cpusram_map(&map.vaddr, &map.npages) != DDI_SUCCESS) {
389 return (EBUSY);
390 }
391
392 map.pa = &bbsram_pa;
393 map.size = &bbsram_size;
394
395 /*
396 * Do a cross call to the cpu so it obtains the base address
397 */
398 xc_one(cpuid, sbdp_get_cpu_sram_addr, (uint64_t)&map,
399 (uint64_t)NULL);
400
401 cpusram_unmap(&map.vaddr, map.npages);
402
403 if (SBDP_INJECT_ERROR(f, 1) || bbsram_size == 0) {
404 cmn_err(CE_WARN, "cpu%d: Key \"%s\" missing from CPU SRAM TOC",
405 cpuid, cpyren_key);
406 return (EBUSY);
407 }
408
409 if ((bbsram_pa & MMU_PAGEOFFSET) != 0) {
410 cmn_err(CE_WARN, "cpu%d: CPU SRAM key \"%s\" not page aligned, "
411 "offset = 0x%lx", cpuid, cpyren_key,
412 (bbsram_pa - (uint64_t)SBDP_CPU_SRAM_ADDR));
413 return (EBUSY);
414 }
415
416 if (bbsram_size < MMU_PAGESIZE) {
417 cmn_err(CE_WARN, "cpu%d: CPU SRAM key \"%s\" too small, "
418 "size = 0x%x", cpuid, cpyren_key, bbsram_size);
419 return (EBUSY);
420 }
421
422 /*
423 * Capture all CPUs (except for detaching proc) to prevent
424 * crosscalls to the detaching proc until it has cleared its
425 * bit in cpu_ready_set.
426 *
427 * The CPU's remain paused and the prom_mutex is known to be free.
428 * This prevents the x-trap victim from blocking when doing prom
429 * IEEE-1275 calls at a high PIL level.
430 */
431
432 promsafe_pause_cpus();
433
434 /*
435 * Quiesce interrupts on the target CPU. We do this by setting
436 * the CPU 'not ready'- (i.e. removing the CPU from cpu_ready_set) to
437 * prevent it from receiving cross calls and cross traps.
438 * This prevents the processor from receiving any new soft interrupts.
439 */
440
441 mp_cpu_quiesce(cp);
442
443 /* tell the prom the cpu is going away */
444 if (SBDP_INJECT_ERROR(f, 2) || prom_serengeti_cpu_off(nodeid) != 0)
445 return (EBUSY);
446
447 /*
448 * An sir instruction is issued at the end of the shutdown
449 * routine to make the CPU go through POST and re-enter OBP.
450 */
451 xt_one_unchecked(cp->cpu_id, (xcfunc_t *)idle_stop_xcall,
452 (uint64_t)sbdp_cpu_shutdown_self, 0);
453
454 *sbdp_valp = 3ull;
455
456 start_cpus();
457
458 /*
459 * Wait until we reach the OBP idle loop or time out.
460 * prom_serengeti_wakeupcpu waits for up to 60 seconds for the
461 * CPU to reach OBP idle loop.
462 */
463 if (SBDP_INJECT_ERROR(f, 3) ||
464 prom_serengeti_wakeupcpu(nodeid) != 0) {
465
466 /*
467 * If it fails here, we still consider the unconfigure
468 * operation as successful.
469 */
470 cmn_err(CE_WARN, "cpu%d: CPU failed to enter OBP idle loop.\n",
471 cpuid);
472 }
473
474 ASSERT(!(CPU_IN_SET(cpu_ready_set, cpuid)));
475
476 bbsram_pa = 0;
477 bbsram_size = 0;
478
479 return (0);
480 }
481
482 processorid_t
sbdp_get_cpuid(sbdp_handle_t * hp,dev_info_t * dip)483 sbdp_get_cpuid(sbdp_handle_t *hp, dev_info_t *dip)
484 {
485 int cpuid;
486 char type[OBP_MAXPROPNAME];
487 pnode_t nodeid;
488 sbd_error_t *sep;
489 static fn_t f = "sbdp_get_cpuid";
490
491 SBDP_DBG_FUNC("%s\n", f);
492
493 nodeid = ddi_get_nodeid(dip);
494 if (sbdp_is_node_bad(nodeid))
495 return (-1);
496
497 sep = hp->h_err;
498
499 if (prom_getproplen(nodeid, "device_type") < OBP_MAXPROPNAME)
500 (void) prom_getprop(nodeid, "device_type", (caddr_t)type);
501 else {
502 sbdp_set_err(sep, ESGT_NO_DEV_TYPE, NULL);
503 return (-1);
504 }
505
506 if (strcmp(type, "cpu") != 0) {
507 sbdp_set_err(sep, ESGT_NOT_CPUTYPE, NULL);
508 return (-1);
509 }
510
511 /*
512 * Check to see if property "cpuid" exists first.
513 * If not, check for "portid".
514 */
515 if (prom_getprop(nodeid, "cpuid", (caddr_t)&cpuid) == -1)
516 if (prom_getprop(nodeid, "portid", (caddr_t)&cpuid) == -1) {
517
518 return (-1);
519 }
520
521 return ((processorid_t)cpuid & SG_CPU_ID_MASK);
522 }
523
524 int
sbdp_cpu_get_impl(sbdp_handle_t * hp,dev_info_t * dip)525 sbdp_cpu_get_impl(sbdp_handle_t *hp, dev_info_t *dip)
526 {
527 int impl;
528 char type[OBP_MAXPROPNAME];
529 pnode_t nodeid;
530 sbd_error_t *sep;
531 static fn_t f = "sbdp_cpu_get_impl";
532
533 SBDP_DBG_FUNC("%s\n", f);
534
535 nodeid = ddi_get_nodeid(dip);
536 if (sbdp_is_node_bad(nodeid))
537 return (-1);
538
539 sep = hp->h_err;
540
541 if (prom_getproplen(nodeid, "device_type") < OBP_MAXPROPNAME)
542 (void) prom_getprop(nodeid, "device_type", (caddr_t)type);
543 else {
544 sbdp_set_err(sep, ESGT_NO_DEV_TYPE, NULL);
545 return (-1);
546 }
547
548 if (strcmp(type, "cpu") != 0) {
549 sbdp_set_err(sep, ESGT_NOT_CPUTYPE, NULL);
550 return (-1);
551 }
552
553 /*
554 * Get the implementation# property.
555 */
556 if (prom_getprop(nodeid, "implementation#", (caddr_t)&impl) == -1)
557 return (-1);
558
559 return (impl);
560 }
561
562 struct sbdp_prom_get_node_args {
563 pnode_t node; /* current node */
564 processorid_t portid; /* portid we are looking for */
565 pnode_t result_node; /* node found with the above portid */
566 };
567
568 pnode_t
sbdp_find_nearby_cpu_by_portid(pnode_t nodeid,processorid_t portid)569 sbdp_find_nearby_cpu_by_portid(pnode_t nodeid, processorid_t portid)
570 {
571 struct sbdp_prom_get_node_args arg;
572 static fn_t f = "sbdp_find_nearby_cpu_by_portid";
573
574 SBDP_DBG_FUNC("%s\n", f);
575
576 arg.node = nodeid;
577 arg.portid = portid;
578 (void) prom_tree_access(sbdp_prom_get_cpu, &arg, NULL);
579
580 return (arg.result_node);
581 }
582
583 /*ARGSUSED*/
584 static int
sbdp_prom_get_cpu(void * arg,int changed)585 sbdp_prom_get_cpu(void *arg, int changed)
586 {
587 int portid;
588 pnode_t parent, cur_node;
589 struct sbdp_prom_get_node_args *argp = arg;
590 static fn_t f = "sbdp_prom_get_cpu";
591
592 SBDP_DBG_FUNC("%s\n", f);
593
594 parent = prom_parentnode(argp->node);
595
596 for (cur_node = prom_childnode(parent); cur_node != OBP_NONODE;
597 cur_node = prom_nextnode(cur_node)) {
598
599 if (prom_getprop(cur_node, OBP_PORTID, (caddr_t)&portid) < 0)
600 continue;
601
602 if ((portid == argp->portid) && (cur_node != argp->node))
603 break;
604 }
605
606 argp->result_node = cur_node;
607
608 return (0);
609 }
610
611
612 /*
613 * A detaching CPU is xcalled with an xtrap to sbdp_cpu_stop_self() after
614 * it has been offlined. The function of this routine is to get the cpu
615 * spinning in a safe place. The requirement is that the system will not
616 * reference anything on the detaching board (memory and i/o is detached
617 * elsewhere) and that the CPU not reference anything on any other board
618 * in the system. This isolation is required during and after the writes
619 * to the domain masks to remove the board from the domain.
620 *
621 * To accomplish this isolation the following is done:
622 * 0) Map the CPUSRAM to obtain the correct address in SRAM
623 * 1) Create a locked mapping to a location in CPU SRAM where
624 * the cpu will execute.
625 * 2) Copy the target function (sbdp_shutdown_asm) in which
626 * the cpu will execute into CPU SRAM.
627 * 3) Jump into function with CPU SRAM.
628 * Function will:
629 * 3.1) Flush its Ecache (displacement).
630 * 3.2) Flush its Dcache with HW mechanism.
631 * 3.3) Flush its Icache with HW mechanism.
632 * 3.4) Flush all valid and _unlocked_ D-TLB entries.
633 * 3.5) Flush all valid and _unlocked_ I-TLB entries.
634 * 4) Jump into a tight loop.
635 */
636
637 static void
sbdp_cpu_stop_self(uint64_t pa)638 sbdp_cpu_stop_self(uint64_t pa)
639 {
640 cpu_t *cp = CPU;
641 int cpuid = cp->cpu_id;
642 tte_t tte;
643 volatile uint_t *src, *dst;
644 size_t funclen;
645 sbdp_shutdown_t sht;
646 uint_t bbsram_pfn;
647 uint64_t bbsram_addr;
648 void (*bbsram_func)(sbdp_shutdown_t *);
649 extern void sbdp_shutdown_asm(sbdp_shutdown_t *);
650 extern void sbdp_shutdown_asm_end(void);
651
652 funclen = (uintptr_t)sbdp_shutdown_asm_end -
653 (uintptr_t)sbdp_shutdown_asm;
654 ASSERT(funclen <= MMU_PAGESIZE);
655 ASSERT(bbsram_pa != 0);
656 ASSERT((bbsram_pa & MMU_PAGEOFFSET) == 0);
657 ASSERT(bbsram_size >= MMU_PAGESIZE);
658
659 stdphys(pa, 3);
660 bbsram_pfn = (uint_t)(bbsram_pa >> MMU_PAGESHIFT);
661
662 bbsram_addr = (uint64_t)sbdp_shutdown_va;
663 sht.estack = bbsram_addr + MMU_PAGESIZE;
664 sht.flushaddr = ecache_flushaddr;
665
666 tte.tte_inthi = TTE_VALID_INT | TTE_SZ_INT(TTE8K) |
667 TTE_PFN_INTHI(bbsram_pfn);
668 tte.tte_intlo = TTE_PFN_INTLO(bbsram_pfn) |
669 TTE_HWWR_INT | TTE_PRIV_INT | TTE_LCK_INT;
670 sfmmu_dtlb_ld_kva(sbdp_shutdown_va, &tte); /* load dtlb */
671 sfmmu_itlb_ld_kva(sbdp_shutdown_va, &tte); /* load itlb */
672
673 for (src = (uint_t *)sbdp_shutdown_asm, dst = (uint_t *)bbsram_addr;
674 src < (uint_t *)sbdp_shutdown_asm_end; src++, dst++)
675 *dst = *src;
676
677 bbsram_func = (void (*)())bbsram_addr;
678 sht.size = (uint32_t)cpunodes[cpuid].ecache_size << 1;
679 sht.linesize = (uint32_t)cpunodes[cpuid].ecache_linesize;
680 sht.physaddr = pa;
681
682 /*
683 * Signal to sbdp_cpu_poweroff() that we're just
684 * about done.
685 */
686 cp->cpu_m.in_prom = 1;
687
688 stdphys(pa, 4);
689 (*bbsram_func)(&sht);
690 }
691
692 /* ARGSUSED */
693 void
sbdp_get_cpu_sram_addr(uint64_t arg1,uint64_t arg2)694 sbdp_get_cpu_sram_addr(uint64_t arg1, uint64_t arg2)
695 {
696 uint64_t *pap;
697 uint_t *sizep;
698 struct iosram_toc *tocp;
699 uint_t offset;
700 uint_t size;
701 sbdp_cpu_sram_map_t *map;
702 int i;
703 fn_t f = "sbdp_get_cpu_sram_addr";
704
705 SBDP_DBG_FUNC("%s\n", f);
706
707 map = (sbdp_cpu_sram_map_t *)arg1;
708 tocp = (struct iosram_toc *)map->vaddr;
709 pap = map->pa;
710 sizep = map->size;
711
712 for (i = 0; i < tocp->iosram_tagno; i++) {
713 if (strcmp(tocp->iosram_keys[i].key, cpyren_key) == 0)
714 break;
715 }
716 if (i == tocp->iosram_tagno) {
717 *pap = 0;
718 *sizep = 0;
719 return;
720 }
721 offset = tocp->iosram_keys[i].offset;
722 size = tocp->iosram_keys[i].size;
723
724 /*
725 * The address we want is the begining of cpusram + offset
726 */
727 *pap = SBDP_CPU_SRAM_ADDR + offset;
728
729 *sizep = size;
730 }
731
732 static int
cpusram_map(caddr_t * vaddrp,pgcnt_t * npp)733 cpusram_map(caddr_t *vaddrp, pgcnt_t *npp)
734 {
735 uint_t pgoffset;
736 pgcnt_t npages;
737 pfn_t pfn;
738 uint64_t base;
739 caddr_t kaddr;
740 uint_t mapping_attr;
741
742 base = (uint64_t)SBDP_CPU_SRAM_ADDR & (~MMU_PAGEOFFSET);
743 pfn = mmu_btop(base);
744
745 /*
746 * Do a quick sanity check to make sure we are in I/O space.
747 */
748 if (pf_is_memory(pfn))
749 return (DDI_FAILURE);
750
751 pgoffset = (ulong_t)SBDP_CPU_SRAM_ADDR & MMU_PAGEOFFSET;
752 npages = mmu_btopr(SBDP_CPU_SRAM_SIZE + pgoffset);
753
754 kaddr = vmem_alloc(heap_arena, ptob(npages), VM_NOSLEEP);
755 if (kaddr == NULL)
756 return (DDI_ME_NORESOURCES);
757
758 mapping_attr = PROT_READ;
759 /*
760 * Now map in the pages we've allocated...
761 */
762 hat_devload(kas.a_hat, kaddr, ptob(npages), pfn, mapping_attr,
763 HAT_LOAD_LOCK);
764
765 *vaddrp = kaddr + pgoffset;
766 *npp = npages;
767
768 return (DDI_SUCCESS);
769 }
770
771 static void
cpusram_unmap(caddr_t * vaddrp,pgcnt_t npages)772 cpusram_unmap(caddr_t *vaddrp, pgcnt_t npages)
773 {
774 uint_t pgoffset;
775 caddr_t base;
776 caddr_t addr = *vaddrp;
777
778
779 pgoffset = (ulong_t)SBDP_CPU_SRAM_ADDR & MMU_PAGEOFFSET;
780 base = addr - pgoffset;
781 hat_unload(kas.a_hat, base, ptob(npages), HAT_UNLOAD_UNLOCK);
782 vmem_free(heap_arena, base, ptob(npages));
783
784 *vaddrp = 0;
785 }
786
787
788 static void
sbdp_cpu_shutdown_self(void)789 sbdp_cpu_shutdown_self(void)
790 {
791 cpu_t *cp = CPU;
792 int cpuid = cp->cpu_id;
793 extern void flush_windows(void);
794 uint64_t pa = va_to_pa((void *)sbdp_valp);
795
796 stdphys(pa, 8);
797 flush_windows();
798
799 (void) spl8();
800
801 stdphys(pa, 6);
802
803 ASSERT(cp->cpu_intr_actv == 0);
804 ASSERT(cp->cpu_thread == cp->cpu_idle_thread ||
805 cp->cpu_thread == cp->cpu_startup_thread);
806
807 cp->cpu_flags = CPU_OFFLINE | CPU_QUIESCED | CPU_POWEROFF;
808
809 CPU_SIGNATURE(OS_SIG, SIGST_DETACHED, SIGSUBST_NULL, cpuid);
810
811 stdphys(pa, 7);
812 sbdp_cpu_stop_self(pa);
813
814 cmn_err(CE_PANIC, "sbdp_cpu_shutdown_self: CPU %d FAILED TO SHUTDOWN",
815 cpuid);
816 }
817
818 typedef struct {
819 int node;
820 int board;
821 int non_panther_cpus;
822 } sbdp_node_walk_t;
823
824 static int
sbdp_find_non_panther_cpus(dev_info_t * dip,void * node_args)825 sbdp_find_non_panther_cpus(dev_info_t *dip, void *node_args)
826 {
827 int impl, cpuid, portid;
828 int buflen;
829 char buf[OBP_MAXPROPNAME];
830 sbdp_node_walk_t *args = (sbdp_node_walk_t *)node_args;
831
832 if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip,
833 DDI_PROP_DONTPASS, OBP_DEVICETYPE, (caddr_t)buf,
834 &buflen) != DDI_PROP_SUCCESS) {
835 return (DDI_WALK_CONTINUE);
836 }
837
838 if (strcmp(buf, "cpu") != 0) {
839 return (DDI_WALK_CONTINUE);
840 }
841
842 if ((impl = ddi_getprop(DDI_DEV_T_ANY, dip,
843 DDI_PROP_DONTPASS, "implementation#", -1)) == -1) {
844 return (DDI_WALK_CONTINUE);
845 }
846
847 if ((cpuid = ddi_getprop(DDI_DEV_T_ANY, dip,
848 DDI_PROP_DONTPASS, "cpuid", -1)) == -1) {
849 return (DDI_WALK_CONTINUE);
850 }
851
852 portid = SG_CPUID_TO_PORTID(cpuid);
853
854 /* filter out nodes not on this board */
855 if (SG_PORTID_TO_BOARD_NUM(portid) != args->board ||
856 SG_PORTID_TO_NODEID(portid) != args->node) {
857 return (DDI_WALK_PRUNECHILD);
858 }
859
860 switch (impl) {
861 case CHEETAH_IMPL:
862 case CHEETAH_PLUS_IMPL:
863 case JAGUAR_IMPL:
864 args->non_panther_cpus++;
865 break;
866 case PANTHER_IMPL:
867 break;
868 default:
869 ASSERT(0);
870 args->non_panther_cpus++;
871 break;
872 }
873
874 SBDP_DBG_CPU("cpuid=0x%x, portid=0x%x, impl=0x%x, device_type=%s",
875 cpuid, portid, impl, buf);
876
877 return (DDI_WALK_CONTINUE);
878 }
879
880 int
sbdp_board_non_panther_cpus(int node,int board)881 sbdp_board_non_panther_cpus(int node, int board)
882 {
883 sbdp_node_walk_t arg = {0};
884
885 arg.node = node;
886 arg.board = board;
887
888 /*
889 * Root node doesn't have to be held.
890 */
891 ddi_walk_devs(ddi_root_node(), sbdp_find_non_panther_cpus,
892 (void *)&arg);
893
894 return (arg.non_panther_cpus);
895 }
896