1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 #include <sys/time.h>
28 #include <sys/cpuvar.h>
29 #include <sys/dditypes.h>
30 #include <sys/ddipropdefs.h>
31 #include <sys/ddi_impldefs.h>
32 #include <sys/sunddi.h>
33 #include <sys/esunddi.h>
34 #include <sys/sunndi.h>
35 #include <sys/platform_module.h>
36 #include <sys/errno.h>
37 #include <sys/conf.h>
38 #include <sys/modctl.h>
39 #include <sys/promif.h>
40 #include <sys/promimpl.h>
41 #include <sys/prom_plat.h>
42 #include <sys/cmn_err.h>
43 #include <sys/sysmacros.h>
44 #include <sys/mem_cage.h>
45 #include <sys/kobj.h>
46 #include <sys/utsname.h>
47 #include <sys/cpu_sgnblk_defs.h>
48 #include <sys/atomic.h>
49 #include <sys/kdi_impl.h>
50
51 #include <sys/sgsbbc.h>
52 #include <sys/sgsbbc_iosram.h>
53 #include <sys/sgsbbc_iosram_priv.h>
54 #include <sys/sgsbbc_mailbox.h>
55 #include <sys/sgsgn.h>
56 #include <sys/sgcn.h>
57 #include <sys/serengeti.h>
58 #include <sys/sgfrutypes.h>
59 #include <sys/machsystm.h>
60 #include <sys/sbd_ioctl.h>
61 #include <sys/sbd.h>
62 #include <sys/sbdp_mem.h>
63
64 #include <sys/memnode.h>
65 #include <vm/vm_dep.h>
66 #include <vm/page.h>
67
68 #include <sys/cheetahregs.h>
69 #include <sys/plat_ecc_unum.h>
70 #include <sys/plat_ecc_dimm.h>
71
72 #include <sys/lgrp.h>
73 #include <sys/clock_impl.h>
74
75 static int sg_debug = 0;
76
77 #ifdef DEBUG
78 #define DCMNERR if (sg_debug) cmn_err
79 #else
80 #define DCMNERR
81 #endif
82
83 int (*p2get_mem_unum)(int, uint64_t, char *, int, int *);
84
85 /* local functions */
86 static void cpu_sgn_update(ushort_t sgn, uchar_t state,
87 uchar_t sub_state, int cpuid);
88
89
90 /*
91 * Local data.
92 *
93 * iosram_write_ptr is a pointer to iosram_write(). Because of
94 * kernel dynamic linking, we can't get to the function by name,
95 * but we can look up its address, and store it in this variable
96 * instead.
97 *
98 * We include the extern for iosram_write() here not because we call
99 * it, but to force compilation errors if its prototype doesn't
100 * match the prototype of iosram_write_ptr.
101 *
102 * The same issues apply to iosram_read() and iosram_read_ptr.
103 */
104 /*CSTYLED*/
105 extern int iosram_write (int, uint32_t, caddr_t, uint32_t);
106 static int (*iosram_write_ptr)(int, uint32_t, caddr_t, uint32_t) = NULL;
107 /*CSTYLED*/
108 extern int iosram_read (int, uint32_t, caddr_t, uint32_t);
109 static int (*iosram_read_ptr)(int, uint32_t, caddr_t, uint32_t) = NULL;
110
111
112 /*
113 * Variable to indicate if the date should be obtained from the SC or not.
114 */
115 int todsg_use_sc = FALSE; /* set the false at the beginning */
116
117 /*
118 * Preallocation of spare tsb's for DR
119 *
120 * We don't allocate spares for Wildcat since TSBs should come
121 * out of memory local to the node.
122 */
123 #define IOMMU_PER_SCHIZO 2
124 int serengeti_tsb_spares = (SG_MAX_IO_BDS * SG_SCHIZO_PER_IO_BD *
125 IOMMU_PER_SCHIZO);
126
127 /*
128 * sg_max_ncpus is the maximum number of CPUs supported on Serengeti.
129 * sg_max_ncpus is set to be smaller than NCPU to reduce the amount of
130 * memory the logs take up until we have a dynamic log memory allocation
131 * solution.
132 */
133 int sg_max_ncpus = (24 * 2); /* (max # of processors * # of cores/proc) */
134
135 /*
136 * variables to control mailbox message timeouts.
137 * These can be patched via /etc/system or mdb.
138 */
139 int sbbc_mbox_default_timeout = MBOX_DEFAULT_TIMEOUT;
140 int sbbc_mbox_min_timeout = MBOX_MIN_TIMEOUT;
141
142 /* cached 'chosen' node_id */
143 pnode_t chosen_nodeid = (pnode_t)0;
144
145 static void (*sg_ecc_taskq_func)(sbbc_ecc_mbox_t *) = NULL;
146 static int (*sg_ecc_mbox_func)(sbbc_ecc_mbox_t *) = NULL;
147
148 /*
149 * Table that maps memory slices to a specific memnode.
150 */
151 int slice_to_memnode[SG_MAX_SLICE];
152
153 plat_dimm_sid_board_t domain_dimm_sids[SG_MAX_CPU_BDS];
154
155
156 int
set_platform_tsb_spares()157 set_platform_tsb_spares()
158 {
159 return (MIN(serengeti_tsb_spares, MAX_UPA));
160 }
161
162 #pragma weak mmu_init_large_pages
163
164 void
set_platform_defaults(void)165 set_platform_defaults(void)
166 {
167 extern int watchdog_enable;
168 extern uint64_t xc_tick_limit_scale;
169 extern void mmu_init_large_pages(size_t);
170
171 #ifdef DEBUG
172 char *todsg_name = "todsg";
173 ce_verbose_memory = 2;
174 ce_verbose_other = 2;
175 #endif /* DEBUG */
176
177 watchdog_enable = TRUE;
178 watchdog_available = TRUE;
179
180 cpu_sgn_func = cpu_sgn_update;
181
182 #ifdef DEBUG
183 /* tod_module_name should be set to "todsg" from OBP property */
184 if (tod_module_name && (strcmp(tod_module_name, todsg_name) == 0))
185 prom_printf("Using todsg driver\n");
186 else {
187 prom_printf("Force using todsg driver\n");
188 tod_module_name = todsg_name;
189 }
190 #endif /* DEBUG */
191
192 /* Serengeti does not support forthdebug */
193 forthdebug_supported = 0;
194
195
196 /*
197 * Some DR operations require the system to be sync paused.
198 * Sync pause on Serengeti could potentially take up to 4
199 * seconds to complete depending on the load on the SC. To
200 * avoid send_mond panics during such operations, we need to
201 * increase xc_tick_limit to a larger value on Serengeti by
202 * setting xc_tick_limit_scale to 5.
203 */
204 xc_tick_limit_scale = 5;
205
206 if ((mmu_page_sizes == max_mmu_page_sizes) &&
207 (mmu_ism_pagesize != DEFAULT_ISM_PAGESIZE)) {
208 if (&mmu_init_large_pages)
209 mmu_init_large_pages(mmu_ism_pagesize);
210 }
211 }
212
213 void
load_platform_modules(void)214 load_platform_modules(void)
215 {
216 if (modload("misc", "pcihp") < 0) {
217 cmn_err(CE_NOTE, "pcihp driver failed to load");
218 }
219 }
220
221 /*ARGSUSED*/
222 int
plat_cpu_poweron(struct cpu * cp)223 plat_cpu_poweron(struct cpu *cp)
224 {
225 int (*serengeti_cpu_poweron)(struct cpu *) = NULL;
226
227 serengeti_cpu_poweron =
228 (int (*)(struct cpu *))modgetsymvalue("sbdp_cpu_poweron", 0);
229
230 if (serengeti_cpu_poweron == NULL)
231 return (ENOTSUP);
232 else
233 return ((serengeti_cpu_poweron)(cp));
234 }
235
236 /*ARGSUSED*/
237 int
plat_cpu_poweroff(struct cpu * cp)238 plat_cpu_poweroff(struct cpu *cp)
239 {
240 int (*serengeti_cpu_poweroff)(struct cpu *) = NULL;
241
242 serengeti_cpu_poweroff =
243 (int (*)(struct cpu *))modgetsymvalue("sbdp_cpu_poweroff", 0);
244
245 if (serengeti_cpu_poweroff == NULL)
246 return (ENOTSUP);
247 else
248 return ((serengeti_cpu_poweroff)(cp));
249 }
250
251 #ifdef DEBUG
252 pgcnt_t serengeti_cage_size_limit;
253 #endif
254
255 /* Preferred minimum cage size (expressed in pages)... for DR */
256 pgcnt_t serengeti_minimum_cage_size = 0;
257
258 void
set_platform_cage_params(void)259 set_platform_cage_params(void)
260 {
261 extern pgcnt_t total_pages;
262 extern struct memlist *phys_avail;
263
264 if (kernel_cage_enable) {
265 pgcnt_t preferred_cage_size;
266
267 preferred_cage_size =
268 MAX(serengeti_minimum_cage_size, total_pages / 256);
269 #ifdef DEBUG
270 if (serengeti_cage_size_limit)
271 preferred_cage_size = serengeti_cage_size_limit;
272 #endif
273 /*
274 * Post copies obp into the lowest slice. This requires the
275 * cage to grow upwards
276 */
277 kcage_range_init(phys_avail, KCAGE_UP, preferred_cage_size);
278 }
279
280 kcage_startup_dir = KCAGE_UP;
281
282 /* Only note when the cage is off since it should always be on. */
283 if (!kcage_on)
284 cmn_err(CE_NOTE, "!DR Kernel Cage is DISABLED");
285 }
286
287 #define ALIGN(x, a) ((a) == 0 ? (uint64_t)(x) : \
288 (((uint64_t)(x) + (uint64_t)(a) - 1l) & ~((uint64_t)(a) - 1l)))
289
290 void
update_mem_bounds(int brd,uint64_t base,uint64_t sz)291 update_mem_bounds(int brd, uint64_t base, uint64_t sz)
292 {
293 uint64_t end;
294 int mnode;
295
296 end = base + sz - 1;
297
298 /*
299 * First see if this board already has a memnode associated
300 * with it. If not, see if this slice has a memnode. This
301 * covers the cases where a single slice covers multiple
302 * boards (cross-board interleaving) and where a single
303 * board has multiple slices (1+GB DIMMs).
304 */
305 if ((mnode = plat_lgrphand_to_mem_node(brd)) == -1) {
306 if ((mnode = slice_to_memnode[PA_2_SLICE(base)]) == -1)
307 mnode = mem_node_alloc();
308 plat_assign_lgrphand_to_mem_node(brd, mnode);
309 }
310
311 /*
312 * Align base at 16GB boundary
313 */
314 base = ALIGN(base, (1ul << PA_SLICE_SHIFT));
315
316 while (base < end) {
317 slice_to_memnode[PA_2_SLICE(base)] = mnode;
318 base += (1ul << PA_SLICE_SHIFT);
319 }
320 }
321
322 /*
323 * Dynamically detect memory slices in the system by decoding
324 * the cpu memory decoder registers at boot time.
325 */
326 void
plat_fill_mc(pnode_t nodeid)327 plat_fill_mc(pnode_t nodeid)
328 {
329 uint64_t mc_addr, mask;
330 uint64_t mc_decode[SG_MAX_BANKS_PER_MC];
331 uint64_t base, size;
332 uint32_t regs[4];
333 int len;
334 int local_mc;
335 int portid;
336 int boardid;
337 int i;
338
339 if ((prom_getprop(nodeid, "portid", (caddr_t)&portid) < 0) ||
340 (portid == -1))
341 return;
342
343 /*
344 * Decode the board number from the MC portid
345 */
346 boardid = SG_PORTID_TO_BOARD_NUM(portid);
347
348 /*
349 * The "reg" property returns 4 32-bit values. The first two are
350 * combined to form a 64-bit address. The second two are for a
351 * 64-bit size, but we don't actually need to look at that value.
352 */
353 len = prom_getproplen(nodeid, "reg");
354 if (len != (sizeof (uint32_t) * 4)) {
355 prom_printf("Warning: malformed 'reg' property\n");
356 return;
357 }
358 if (prom_getprop(nodeid, "reg", (caddr_t)regs) < 0)
359 return;
360 mc_addr = ((uint64_t)regs[0]) << 32;
361 mc_addr |= (uint64_t)regs[1];
362
363 /*
364 * Figure out whether the memory controller we are examining
365 * belongs to this CPU or a different one.
366 */
367 if (portid == cpunodes[CPU->cpu_id].portid)
368 local_mc = 1;
369 else
370 local_mc = 0;
371
372 for (i = 0; i < SG_MAX_BANKS_PER_MC; i++) {
373 mask = SG_REG_2_OFFSET(i);
374
375 /*
376 * If the memory controller is local to this CPU, we use
377 * the special ASI to read the decode registers.
378 * Otherwise, we load the values from a magic address in
379 * I/O space.
380 */
381 if (local_mc)
382 mc_decode[i] = lddmcdecode(mask & MC_OFFSET_MASK);
383 else
384 mc_decode[i] = lddphysio((mc_addr | mask));
385
386 if (mc_decode[i] >> MC_VALID_SHIFT) {
387 /*
388 * The memory decode register is a bitmask field,
389 * so we can decode that into both a base and
390 * a span.
391 */
392 base = MC_BASE(mc_decode[i]) << PHYS2UM_SHIFT;
393 size = MC_UK2SPAN(mc_decode[i]);
394 update_mem_bounds(boardid, base, size);
395 }
396 }
397 }
398
399 /*
400 * This routine is run midway through the boot process. By the time we get
401 * here, we know about all the active CPU boards in the system, and we have
402 * extracted information about each board's memory from the memory
403 * controllers. We have also figured out which ranges of memory will be
404 * assigned to which memnodes, so we walk the slice table to build the table
405 * of memnodes.
406 */
407 /* ARGSUSED */
408 void
plat_build_mem_nodes(prom_memlist_t * list,size_t nelems)409 plat_build_mem_nodes(prom_memlist_t *list, size_t nelems)
410 {
411 int slice;
412 pfn_t basepfn;
413 pgcnt_t npgs;
414
415 mem_node_pfn_shift = PFN_SLICE_SHIFT;
416 mem_node_physalign = (1ull << PA_SLICE_SHIFT);
417
418 for (slice = 0; slice < SG_MAX_SLICE; slice++) {
419 if (slice_to_memnode[slice] == -1)
420 continue;
421 basepfn = (uint64_t)slice << PFN_SLICE_SHIFT;
422 npgs = 1ull << PFN_SLICE_SHIFT;
423 mem_node_add_slice(basepfn, basepfn + npgs - 1);
424 }
425 }
426
427 int
plat_pfn_to_mem_node(pfn_t pfn)428 plat_pfn_to_mem_node(pfn_t pfn)
429 {
430 int node;
431
432 node = slice_to_memnode[PFN_2_SLICE(pfn)];
433
434 return (node);
435 }
436
437 /*
438 * Serengeti support for lgroups.
439 *
440 * On Serengeti, an lgroup platform handle == board number.
441 *
442 * Mappings between lgroup handles and memnodes are managed
443 * in addition to mappings between memory slices and memnodes
444 * to support cross-board interleaving as well as multiple
445 * slices per board (e.g. >1GB DIMMs). The initial mapping
446 * of memnodes to lgroup handles is determined at boot time.
447 * A DR addition of memory adds a new mapping. A DR copy-rename
448 * swaps mappings.
449 */
450
451 /*
452 * Macro for extracting the board number from the CPU id
453 */
454 #define CPUID_TO_BOARD(id) (((id) >> 2) & 0x7)
455
456 /*
457 * Return the platform handle for the lgroup containing the given CPU
458 *
459 * For Serengeti, lgroup platform handle == board number
460 */
461 lgrp_handle_t
plat_lgrp_cpu_to_hand(processorid_t id)462 plat_lgrp_cpu_to_hand(processorid_t id)
463 {
464 return (CPUID_TO_BOARD(id));
465 }
466
467 /*
468 * Platform specific lgroup initialization
469 */
470 void
plat_lgrp_init(void)471 plat_lgrp_init(void)
472 {
473 int i;
474 extern uint32_t lgrp_expand_proc_thresh;
475 extern uint32_t lgrp_expand_proc_diff;
476
477 /*
478 * Initialize lookup tables to invalid values so we catch
479 * any illegal use of them.
480 */
481 for (i = 0; i < SG_MAX_SLICE; i++) {
482 slice_to_memnode[i] = -1;
483 }
484
485 /*
486 * Set tuneables for Serengeti architecture
487 *
488 * lgrp_expand_proc_thresh is the minimum load on the lgroups
489 * this process is currently running on before considering
490 * expanding threads to another lgroup.
491 *
492 * lgrp_expand_proc_diff determines how much less the remote lgroup
493 * must be loaded before expanding to it.
494 *
495 * Bandwidth is maximized on Serengeti by spreading load across
496 * the machine. The impact to inter-thread communication isn't
497 * too costly since remote latencies are relatively low. These
498 * values equate to one CPU's load and so attempt to spread the
499 * load out across as many lgroups as possible one CPU at a time.
500 */
501 lgrp_expand_proc_thresh = LGRP_LOADAVG_THREAD_MAX;
502 lgrp_expand_proc_diff = LGRP_LOADAVG_THREAD_MAX;
503 }
504
505 /*
506 * Platform notification of lgroup (re)configuration changes
507 */
508 /*ARGSUSED*/
509 void
plat_lgrp_config(lgrp_config_flag_t evt,uintptr_t arg)510 plat_lgrp_config(lgrp_config_flag_t evt, uintptr_t arg)
511 {
512 update_membounds_t *umb;
513 lgrp_config_mem_rename_t lmr;
514 lgrp_handle_t shand, thand;
515 int snode, tnode;
516
517 switch (evt) {
518
519 case LGRP_CONFIG_MEM_ADD:
520 umb = (update_membounds_t *)arg;
521 update_mem_bounds(umb->u_board, umb->u_base, umb->u_len);
522
523 break;
524
525 case LGRP_CONFIG_MEM_DEL:
526 /* We don't have to do anything */
527 break;
528
529 case LGRP_CONFIG_MEM_RENAME:
530 /*
531 * During a DR copy-rename operation, all of the memory
532 * on one board is moved to another board -- but the
533 * addresses/pfns and memnodes don't change. This means
534 * the memory has changed locations without changing identity.
535 *
536 * Source is where we are copying from and target is where we
537 * are copying to. After source memnode is copied to target
538 * memnode, the physical addresses of the target memnode are
539 * renamed to match what the source memnode had. Then target
540 * memnode can be removed and source memnode can take its
541 * place.
542 *
543 * To do this, swap the lgroup handle to memnode mappings for
544 * the boards, so target lgroup will have source memnode and
545 * source lgroup will have empty target memnode which is where
546 * its memory will go (if any is added to it later).
547 *
548 * Then source memnode needs to be removed from its lgroup
549 * and added to the target lgroup where the memory was living
550 * but under a different name/memnode. The memory was in the
551 * target memnode and now lives in the source memnode with
552 * different physical addresses even though it is the same
553 * memory.
554 */
555 shand = arg & 0xffff;
556 thand = (arg & 0xffff0000) >> 16;
557 snode = plat_lgrphand_to_mem_node(shand);
558 tnode = plat_lgrphand_to_mem_node(thand);
559
560 plat_assign_lgrphand_to_mem_node(thand, snode);
561 plat_assign_lgrphand_to_mem_node(shand, tnode);
562
563 /*
564 * Remove source memnode of copy rename from its lgroup
565 * and add it to its new target lgroup
566 */
567 lmr.lmem_rename_from = shand;
568 lmr.lmem_rename_to = thand;
569
570 lgrp_config(LGRP_CONFIG_MEM_RENAME, (uintptr_t)snode,
571 (uintptr_t)&lmr);
572
573 break;
574
575 default:
576 break;
577 }
578 }
579
580 /*
581 * Return latency between "from" and "to" lgroups
582 *
583 * This latency number can only be used for relative comparison
584 * between lgroups on the running system, cannot be used across platforms,
585 * and may not reflect the actual latency. It is platform and implementation
586 * specific, so platform gets to decide its value. It would be nice if the
587 * number was at least proportional to make comparisons more meaningful though.
588 * NOTE: The numbers below are supposed to be load latencies for uncached
589 * memory divided by 10.
590 */
591 int
plat_lgrp_latency(lgrp_handle_t from,lgrp_handle_t to)592 plat_lgrp_latency(lgrp_handle_t from, lgrp_handle_t to)
593 {
594 /*
595 * Return min remote latency when there are more than two lgroups
596 * (root and child) and getting latency between two different lgroups
597 * or root is involved
598 */
599 if (lgrp_optimizations() && (from != to ||
600 from == LGRP_DEFAULT_HANDLE || to == LGRP_DEFAULT_HANDLE))
601 return (28);
602 else
603 return (23);
604 }
605
606 /* ARGSUSED */
607 void
plat_freelist_process(int mnode)608 plat_freelist_process(int mnode)
609 {
610 }
611
612 /*
613 * Find dip for chosen IOSRAM
614 */
615 dev_info_t *
find_chosen_dip(void)616 find_chosen_dip(void)
617 {
618 dev_info_t *dip;
619 char master_sbbc[MAXNAMELEN];
620 pnode_t nodeid;
621 uint_t tunnel;
622
623 /*
624 * find the /chosen SBBC node, prom interface will handle errors
625 */
626 nodeid = prom_chosennode();
627
628 /*
629 * get the 'iosram' property from the /chosen node
630 */
631 if (prom_getprop(nodeid, IOSRAM_CHOSEN_PROP, (caddr_t)&tunnel) <= 0) {
632 SBBC_ERR(CE_PANIC, "No iosram property found! \n");
633 }
634
635 if (prom_phandle_to_path((phandle_t)tunnel, master_sbbc,
636 sizeof (master_sbbc)) < 0) {
637 SBBC_ERR1(CE_PANIC, "prom_phandle_to_path(%d) failed\n",
638 tunnel);
639 }
640
641 chosen_nodeid = nodeid;
642
643 /*
644 * load and attach the sgsbbc driver.
645 * This will also attach all the sgsbbc driver instances
646 */
647 if (i_ddi_attach_hw_nodes("sgsbbc") != DDI_SUCCESS) {
648 cmn_err(CE_WARN, "sgsbbc failed to load\n");
649 }
650
651 /* translate a path name to a dev_info_t */
652 dip = e_ddi_hold_devi_by_path(master_sbbc, 0);
653 if ((dip == NULL) || (ddi_get_nodeid(dip) != tunnel)) {
654 cmn_err(CE_PANIC, "i_ddi_path_to_devi(%x) failed for SBBC\n",
655 tunnel);
656 }
657
658 /* make sure devi_ref is ZERO */
659 ndi_rele_devi(dip);
660
661 DCMNERR(CE_CONT, "Chosen IOSRAM is at %s \n", master_sbbc);
662
663 return (dip);
664 }
665
666 void
load_platform_drivers(void)667 load_platform_drivers(void)
668 {
669 int ret;
670
671 /*
672 * Load and attach the mc-us3 memory driver.
673 */
674 if (i_ddi_attach_hw_nodes("mc-us3") != DDI_SUCCESS)
675 cmn_err(CE_WARN, "mc-us3 failed to load");
676 else
677 (void) ddi_hold_driver(ddi_name_to_major("mc-us3"));
678
679 /*
680 * Initialize the chosen IOSRAM before its clients
681 * are loaded.
682 */
683 (void) find_chosen_dip();
684
685 /*
686 * Ideally, we'd do this in set_platform_defaults(), but
687 * at that point it's too early to look up symbols.
688 */
689 iosram_write_ptr = (int (*)(int, uint32_t, caddr_t, uint32_t))
690 modgetsymvalue("iosram_write", 0);
691
692 if (iosram_write_ptr == NULL) {
693 DCMNERR(CE_WARN, "load_platform_defaults: iosram_write()"
694 " not found; signatures will not be updated\n");
695 } else {
696 /*
697 * The iosram read ptr is only needed if we can actually
698 * write CPU signatures, so only bother setting it if we
699 * set a valid write pointer, above.
700 */
701 iosram_read_ptr = (int (*)(int, uint32_t, caddr_t, uint32_t))
702 modgetsymvalue("iosram_read", 0);
703
704 if (iosram_read_ptr == NULL)
705 DCMNERR(CE_WARN, "load_platform_defaults: iosram_read()"
706 " not found\n");
707 }
708
709 /*
710 * Set todsg_use_sc to TRUE so that we will be getting date
711 * from the SC.
712 */
713 todsg_use_sc = TRUE;
714
715 /*
716 * Now is a good time to activate hardware watchdog (if one exists).
717 */
718 mutex_enter(&tod_lock);
719 if (watchdog_enable)
720 ret = tod_ops.tod_set_watchdog_timer(watchdog_timeout_seconds);
721 mutex_exit(&tod_lock);
722 if (ret != 0)
723 printf("Hardware watchdog enabled\n");
724
725 /*
726 * Load and attach the schizo pci bus nexus driver.
727 */
728 if (i_ddi_attach_hw_nodes("pcisch") != DDI_SUCCESS)
729 cmn_err(CE_WARN, "pcisch failed to load");
730
731 plat_ecc_init();
732 }
733
734 /*
735 * No platform drivers on this platform
736 */
737 char *platform_module_list[] = {
738 (char *)0
739 };
740
741 /*ARGSUSED*/
742 void
plat_tod_fault(enum tod_fault_type tod_bad)743 plat_tod_fault(enum tod_fault_type tod_bad)
744 {
745 }
746 int
plat_max_boards()747 plat_max_boards()
748 {
749 return (SG_MAX_BDS);
750 }
751 int
plat_max_io_units_per_board()752 plat_max_io_units_per_board()
753 {
754 return (SG_MAX_IO_PER_BD);
755 }
756 int
plat_max_cmp_units_per_board()757 plat_max_cmp_units_per_board()
758 {
759 return (SG_MAX_CMPS_PER_BD);
760 }
761 int
plat_max_cpu_units_per_board()762 plat_max_cpu_units_per_board()
763 {
764 return (SG_MAX_CPUS_PER_BD);
765 }
766
767 int
plat_max_mc_units_per_board()768 plat_max_mc_units_per_board()
769 {
770 return (SG_MAX_CMPS_PER_BD); /* each CPU die has a memory controller */
771 }
772
773 int
plat_max_mem_units_per_board()774 plat_max_mem_units_per_board()
775 {
776 return (SG_MAX_MEM_PER_BD);
777 }
778
779 int
plat_max_cpumem_boards(void)780 plat_max_cpumem_boards(void)
781 {
782 return (SG_MAX_CPU_BDS);
783 }
784
785 int
set_platform_max_ncpus(void)786 set_platform_max_ncpus(void)
787 {
788 return (sg_max_ncpus);
789 }
790
791 void
plat_dmv_params(uint_t * hwint,uint_t * swint)792 plat_dmv_params(uint_t *hwint, uint_t *swint)
793 {
794 *hwint = MAX_UPA;
795 *swint = 0;
796 }
797
798 /*
799 * Our nodename has been set, pass it along to the SC.
800 */
801 void
plat_nodename_set(void)802 plat_nodename_set(void)
803 {
804 sbbc_msg_t req; /* request */
805 sbbc_msg_t resp; /* response */
806 int rv; /* return value from call to mbox */
807 struct nodename_info {
808 int32_t namelen;
809 char nodename[_SYS_NMLN];
810 } nni;
811 int (*sg_mbox)(sbbc_msg_t *, sbbc_msg_t *, time_t) = NULL;
812
813 /*
814 * find the symbol for the mailbox routine
815 */
816 sg_mbox = (int (*)(sbbc_msg_t *, sbbc_msg_t *, time_t))
817 modgetsymvalue("sbbc_mbox_request_response", 0);
818
819 if (sg_mbox == NULL) {
820 cmn_err(CE_NOTE, "!plat_nodename_set: sg_mbox not found\n");
821 return;
822 }
823
824 /*
825 * construct the message telling the SC our nodename
826 */
827 (void) strcpy(nni.nodename, utsname.nodename);
828 nni.namelen = (int32_t)strlen(nni.nodename);
829
830 req.msg_type.type = INFO_MBOX;
831 req.msg_type.sub_type = INFO_MBOX_NODENAME;
832 req.msg_status = 0;
833 req.msg_len = (int)(nni.namelen + sizeof (nni.namelen));
834 req.msg_bytes = 0;
835 req.msg_buf = (caddr_t)&nni;
836 req.msg_data[0] = 0;
837 req.msg_data[1] = 0;
838
839 /*
840 * initialize the response back from the SC
841 */
842 resp.msg_type.type = INFO_MBOX;
843 resp.msg_type.sub_type = INFO_MBOX_NODENAME;
844 resp.msg_status = 0;
845 resp.msg_len = 0;
846 resp.msg_bytes = 0;
847 resp.msg_buf = (caddr_t)0;
848 resp.msg_data[0] = 0;
849 resp.msg_data[1] = 0;
850
851 /*
852 * ship it and check for success
853 */
854 rv = (sg_mbox)(&req, &resp, sbbc_mbox_default_timeout);
855
856 if (rv != 0) {
857 cmn_err(CE_NOTE, "!plat_nodename_set: sg_mbox retval %d\n", rv);
858 } else if (resp.msg_status != 0) {
859 cmn_err(CE_NOTE, "!plat_nodename_set: msg_status %d\n",
860 resp.msg_status);
861 } else {
862 DCMNERR(CE_NOTE, "!plat_nodename_set was successful\n");
863
864 /*
865 * It is necessary to exchange the capability bitmap
866 * with SC before sending any ecc error information and
867 * indictment. We are calling the plat_ecc_capability_send()
868 * here just after sending the nodename successfully.
869 */
870 rv = plat_ecc_capability_send();
871 if (rv == 0) {
872 DCMNERR(CE_NOTE, "!plat_ecc_capability_send was"
873 " successful\n");
874 }
875 }
876 }
877
878 /*
879 * flag to allow users switch between using OBP's
880 * prom_get_unum() and mc-us3 driver's p2get_mem_unum()
881 * (for main memory errors only).
882 */
883 int sg_use_prom_get_unum = 0;
884
885 /*
886 * Debugging flag: set to 1 to call into obp for get_unum, or set it to 0
887 * to call into the unum cache system. This is the E$ equivalent of
888 * sg_use_prom_get_unum.
889 */
890 int sg_use_prom_ecache_unum = 0;
891
892 /* used for logging ECC errors to the SC */
893 #define SG_MEMORY_ECC 1
894 #define SG_ECACHE_ECC 2
895 #define SG_UNKNOWN_ECC (-1)
896
897 /*
898 * plat_get_mem_unum() generates a string identifying either the
899 * memory or E$ DIMM(s) during error logging. Depending on whether
900 * the error is E$ or memory related, the appropriate support
901 * routine is called to assist in the string generation.
902 *
903 * - For main memory errors we can use the mc-us3 drivers p2getunum()
904 * (or prom_get_unum() for debugging purposes).
905 *
906 * - For E$ errors we call sg_get_ecacheunum() to generate the unum (or
907 * prom_serengeti_get_ecacheunum() for debugging purposes).
908 */
909
910 static int
sg_prom_get_unum(int synd_code,uint64_t paddr,char * buf,int buflen,int * lenp)911 sg_prom_get_unum(int synd_code, uint64_t paddr, char *buf, int buflen,
912 int *lenp)
913 {
914 if ((prom_get_unum(synd_code, (unsigned long long)paddr,
915 buf, buflen, lenp)) != 0)
916 return (EIO);
917 else if (*lenp <= 1)
918 return (EINVAL);
919 else
920 return (0);
921 }
922
923 /*ARGSUSED*/
924 int
plat_get_mem_unum(int synd_code,uint64_t flt_addr,int flt_bus_id,int flt_in_memory,ushort_t flt_status,char * buf,int buflen,int * lenp)925 plat_get_mem_unum(int synd_code, uint64_t flt_addr, int flt_bus_id,
926 int flt_in_memory, ushort_t flt_status, char *buf, int buflen, int *lenp)
927 {
928 /*
929 * unum_func will either point to the memory drivers p2get_mem_unum()
930 * or to prom_get_unum() for memory errors.
931 */
932 int (*unum_func)(int synd_code, uint64_t paddr, char *buf,
933 int buflen, int *lenp) = p2get_mem_unum;
934
935 /*
936 * check if it's a Memory or an Ecache error.
937 */
938 if (flt_in_memory) {
939 /*
940 * It's a main memory error.
941 *
942 * For debugging we allow the user to switch between
943 * using OBP's get_unum and the memory driver's get_unum
944 * so we create a pointer to the functions and switch
945 * depending on the sg_use_prom_get_unum flag.
946 */
947 if (sg_use_prom_get_unum) {
948 DCMNERR(CE_NOTE, "Using prom_get_unum from OBP");
949 return (sg_prom_get_unum(synd_code,
950 P2ALIGN(flt_addr, 8), buf, buflen, lenp));
951 } else if (unum_func != NULL) {
952 return (unum_func(synd_code, P2ALIGN(flt_addr, 8),
953 buf, buflen, lenp));
954 } else {
955 return (ENOTSUP);
956 }
957 } else if (flt_status & ECC_ECACHE) {
958 /*
959 * It's an E$ error.
960 */
961 if (sg_use_prom_ecache_unum) {
962 /*
963 * We call to OBP to handle this.
964 */
965 DCMNERR(CE_NOTE,
966 "Using prom_serengeti_get_ecacheunum from OBP");
967 if (prom_serengeti_get_ecacheunum(flt_bus_id,
968 P2ALIGN(flt_addr, 8), buf, buflen, lenp) != 0) {
969 return (EIO);
970 }
971 } else {
972 return (sg_get_ecacheunum(flt_bus_id, flt_addr,
973 buf, buflen, lenp));
974 }
975 } else {
976 return (ENOTSUP);
977 }
978
979 return (0);
980 }
981
982 /*
983 * This platform hook gets called from mc_add_mem_unum_label() in the mc-us3
984 * driver giving each platform the opportunity to add platform
985 * specific label information to the unum for ECC error logging purposes.
986 */
987 void
plat_add_mem_unum_label(char * unum,int mcid,int bank,int dimm)988 plat_add_mem_unum_label(char *unum, int mcid, int bank, int dimm)
989 {
990 char new_unum[UNUM_NAMLEN] = "";
991 int node = SG_PORTID_TO_NODEID(mcid);
992 int board = SG_CPU_BD_PORTID_TO_BD_NUM(mcid);
993 int position = SG_PORTID_TO_CPU_POSN(mcid);
994
995 /*
996 * The mc-us3 driver deals with logical banks but for unum
997 * purposes we need to use physical banks so that the correct
998 * dimm can be physically located. Logical banks 0 and 2
999 * make up physical bank 0. Logical banks 1 and 3 make up
1000 * physical bank 1. Here we do the necessary conversion.
1001 */
1002 bank = (bank % 2);
1003
1004 if (dimm == -1) {
1005 SG_SET_FRU_NAME_NODE(new_unum, node);
1006 SG_SET_FRU_NAME_CPU_BOARD(new_unum, board);
1007 SG_SET_FRU_NAME_MODULE(new_unum, position);
1008 SG_SET_FRU_NAME_BANK(new_unum, bank);
1009
1010 } else {
1011 SG_SET_FRU_NAME_NODE(new_unum, node);
1012 SG_SET_FRU_NAME_CPU_BOARD(new_unum, board);
1013 SG_SET_FRU_NAME_MODULE(new_unum, position);
1014 SG_SET_FRU_NAME_BANK(new_unum, bank);
1015 SG_SET_FRU_NAME_DIMM(new_unum, dimm);
1016
1017 (void) strcat(new_unum, " ");
1018 (void) strcat(new_unum, unum);
1019 }
1020
1021 (void) strcpy(unum, new_unum);
1022 }
1023
1024 int
plat_get_cpu_unum(int cpuid,char * buf,int buflen,int * lenp)1025 plat_get_cpu_unum(int cpuid, char *buf, int buflen, int *lenp)
1026 {
1027 int node = SG_PORTID_TO_NODEID(cpuid);
1028 int board = SG_CPU_BD_PORTID_TO_BD_NUM(cpuid);
1029
1030 if (snprintf(buf, buflen, "/N%d/%s%d", node,
1031 SG_HPU_TYPE_CPU_BOARD_ID, board) >= buflen) {
1032 return (ENOSPC);
1033 } else {
1034 *lenp = strlen(buf);
1035 return (0);
1036 }
1037 }
1038
1039 /*
1040 * We log all ECC events to the SC so we send a mailbox
1041 * message to the SC passing it the relevant data.
1042 * ECC mailbox messages are sent via a taskq mechanism to
1043 * prevent impaired system performance during ECC floods.
1044 * Indictments have already passed through a taskq, so they
1045 * are not queued here.
1046 */
1047 int
plat_send_ecc_mailbox_msg(plat_ecc_message_type_t msg_type,void * datap)1048 plat_send_ecc_mailbox_msg(plat_ecc_message_type_t msg_type, void *datap)
1049 {
1050 sbbc_ecc_mbox_t *msgp;
1051 size_t msg_size;
1052 uint16_t msg_subtype;
1053 int sleep_flag, log_error;
1054
1055 if (sg_ecc_taskq_func == NULL) {
1056 sg_ecc_taskq_func = (void (*)(sbbc_ecc_mbox_t *))
1057 modgetsymvalue("sbbc_mbox_queue_ecc_event", 0);
1058 if (sg_ecc_taskq_func == NULL) {
1059 cmn_err(CE_NOTE, "!plat_send_ecc_mailbox_msg: "
1060 "sbbc_mbox_queue_ecc_event not found");
1061 return (ENODEV);
1062 }
1063 }
1064 if (sg_ecc_mbox_func == NULL) {
1065 sg_ecc_mbox_func = (int (*)(sbbc_ecc_mbox_t *))
1066 modgetsymvalue("sbbc_mbox_ecc_output", 0);
1067 if (sg_ecc_mbox_func == NULL) {
1068 cmn_err(CE_NOTE, "!plat_send_ecc_mailbox_msg: "
1069 "sbbc_mbox_ecc_output not found");
1070 return (ENODEV);
1071 }
1072 }
1073
1074 /*
1075 * Initialize the request and response structures
1076 */
1077 switch (msg_type) {
1078 case PLAT_ECC_ERROR_MESSAGE:
1079 msg_subtype = INFO_MBOX_ERROR_ECC;
1080 msg_size = sizeof (plat_ecc_error_data_t);
1081 sleep_flag = KM_NOSLEEP;
1082 log_error = 1;
1083 break;
1084 case PLAT_ECC_ERROR2_MESSAGE:
1085 msg_subtype = INFO_MBOX_ECC;
1086 msg_size = sizeof (plat_ecc_error2_data_t);
1087 sleep_flag = KM_NOSLEEP;
1088 log_error = 1;
1089 break;
1090 case PLAT_ECC_INDICTMENT_MESSAGE:
1091 msg_subtype = INFO_MBOX_ERROR_INDICT;
1092 msg_size = sizeof (plat_ecc_indictment_data_t);
1093 sleep_flag = KM_SLEEP;
1094 log_error = 0;
1095 break;
1096 case PLAT_ECC_INDICTMENT2_MESSAGE:
1097 msg_subtype = INFO_MBOX_ECC;
1098 msg_size = sizeof (plat_ecc_indictment2_data_t);
1099 sleep_flag = KM_SLEEP;
1100 log_error = 0;
1101 break;
1102 case PLAT_ECC_CAPABILITY_MESSAGE:
1103 msg_subtype = INFO_MBOX_ECC_CAP;
1104 msg_size = sizeof (plat_capability_data_t) +
1105 strlen(utsname.release) + strlen(utsname.version) + 2;
1106 sleep_flag = KM_SLEEP;
1107 log_error = 0;
1108 break;
1109 case PLAT_ECC_DIMM_SID_MESSAGE:
1110 msg_subtype = INFO_MBOX_ECC;
1111 msg_size = sizeof (plat_dimm_sid_request_data_t);
1112 sleep_flag = KM_SLEEP;
1113 log_error = 0;
1114 break;
1115 default:
1116 return (EINVAL);
1117 }
1118
1119 msgp = (sbbc_ecc_mbox_t *)kmem_zalloc(sizeof (sbbc_ecc_mbox_t),
1120 sleep_flag);
1121 if (msgp == NULL) {
1122 cmn_err(CE_NOTE, "!plat_send_ecc_mailbox_msg: "
1123 "unable to allocate sbbc_ecc_mbox");
1124 return (ENOMEM);
1125 }
1126
1127 msgp->ecc_log_error = log_error;
1128
1129 msgp->ecc_req.msg_type.type = INFO_MBOX;
1130 msgp->ecc_req.msg_type.sub_type = msg_subtype;
1131 msgp->ecc_req.msg_status = 0;
1132 msgp->ecc_req.msg_len = (int)msg_size;
1133 msgp->ecc_req.msg_bytes = 0;
1134 msgp->ecc_req.msg_buf = (caddr_t)kmem_zalloc(msg_size, sleep_flag);
1135 msgp->ecc_req.msg_data[0] = 0;
1136 msgp->ecc_req.msg_data[1] = 0;
1137
1138 if (msgp->ecc_req.msg_buf == NULL) {
1139 cmn_err(CE_NOTE, "!plat_send_ecc_mailbox_msg: "
1140 "unable to allocate request msg_buf");
1141 kmem_free((void *)msgp, sizeof (sbbc_ecc_mbox_t));
1142 return (ENOMEM);
1143 }
1144 bcopy(datap, (void *)msgp->ecc_req.msg_buf, msg_size);
1145
1146 /*
1147 * initialize the response back from the SC
1148 */
1149 msgp->ecc_resp.msg_type.type = INFO_MBOX;
1150 msgp->ecc_resp.msg_type.sub_type = msg_subtype;
1151 msgp->ecc_resp.msg_status = 0;
1152 msgp->ecc_resp.msg_len = 0;
1153 msgp->ecc_resp.msg_bytes = 0;
1154 msgp->ecc_resp.msg_buf = NULL;
1155 msgp->ecc_resp.msg_data[0] = 0;
1156 msgp->ecc_resp.msg_data[1] = 0;
1157
1158 switch (msg_type) {
1159 case PLAT_ECC_ERROR_MESSAGE:
1160 case PLAT_ECC_ERROR2_MESSAGE:
1161 /*
1162 * For Error Messages, we go through a taskq.
1163 * Queue up the message for processing
1164 */
1165 (*sg_ecc_taskq_func)(msgp);
1166 return (0);
1167
1168 case PLAT_ECC_CAPABILITY_MESSAGE:
1169 /*
1170 * For indictment and capability messages, we've already gone
1171 * through the taskq, so we can call the mailbox routine
1172 * directly. Find the symbol for the routine that sends
1173 * the mailbox msg
1174 */
1175 msgp->ecc_resp.msg_len = (int)msg_size;
1176 msgp->ecc_resp.msg_buf = (caddr_t)kmem_zalloc(msg_size,
1177 sleep_flag);
1178 /* FALLTHRU */
1179
1180 case PLAT_ECC_INDICTMENT_MESSAGE:
1181 case PLAT_ECC_INDICTMENT2_MESSAGE:
1182 return ((*sg_ecc_mbox_func)(msgp));
1183
1184 case PLAT_ECC_DIMM_SID_MESSAGE:
1185 msgp->ecc_resp.msg_len = sizeof (plat_dimm_sid_board_data_t);
1186 msgp->ecc_resp.msg_buf = (caddr_t)kmem_zalloc(
1187 sizeof (plat_dimm_sid_board_data_t), sleep_flag);
1188 return ((*sg_ecc_mbox_func)(msgp));
1189
1190 default:
1191 ASSERT(0);
1192 return (EINVAL);
1193 }
1194 }
1195
1196 /*
1197 * m is redundant on serengeti as the multiplier is always 4
1198 */
1199 /*ARGSUSED*/
1200 int
plat_make_fru_cpuid(int sb,int m,int proc)1201 plat_make_fru_cpuid(int sb, int m, int proc)
1202 {
1203 return (MAKE_CPUID(sb, proc));
1204 }
1205
1206 /*
1207 * board number for a given proc
1208 */
1209 int
plat_make_fru_boardnum(int proc)1210 plat_make_fru_boardnum(int proc)
1211 {
1212 return (SG_CPU_BD_PORTID_TO_BD_NUM(proc));
1213 }
1214
1215 static
1216 void
cpu_sgn_update(ushort_t sig,uchar_t state,uchar_t sub_state,int cpuid)1217 cpu_sgn_update(ushort_t sig, uchar_t state, uchar_t sub_state, int cpuid)
1218 {
1219 uint32_t signature = CPU_SIG_BLD(sig, state, sub_state);
1220 sig_state_t current_sgn;
1221 int i;
1222
1223 if (iosram_write_ptr == NULL) {
1224 /*
1225 * If the IOSRAM write pointer isn't set, we won't be able
1226 * to write signatures to ANYTHING, so we may as well just
1227 * write out an error message (if desired) and exit this
1228 * routine now...
1229 */
1230 DCMNERR(CE_WARN,
1231 "cpu_sgn_update: iosram_write() not found;"
1232 " cannot write signature 0x%x for CPU(s) or domain\n",
1233 signature);
1234 return;
1235 }
1236
1237
1238 /*
1239 * Differentiate a panic reboot from a non-panic reboot in the
1240 * setting of the substate of the signature.
1241 *
1242 * If the new substate is REBOOT and we're rebooting due to a panic,
1243 * then set the new substate to a special value indicating a panic
1244 * reboot, SIGSUBST_PANIC_REBOOT.
1245 *
1246 * A panic reboot is detected by a current (previous) domain signature
1247 * state of SIGST_EXIT, and a new signature substate of SIGSUBST_REBOOT.
1248 * The domain signature state SIGST_EXIT is used as the panic flow
1249 * progresses.
1250 *
1251 * At the end of the panic flow, the reboot occurs but we should now
1252 * one that was involuntary, something that may be quite useful to know
1253 * at OBP level.
1254 */
1255 if (sub_state == SIGSUBST_REBOOT) {
1256 if (iosram_read_ptr == NULL) {
1257 DCMNERR(CE_WARN,
1258 "cpu_sgn_update: iosram_read() not found;"
1259 " could not check current domain signature\n");
1260 } else {
1261 (void) (*iosram_read_ptr)(SBBC_SIGBLCK_KEY,
1262 SG_SGNBLK_DOMAINSIG_OFFSET,
1263 (char *)¤t_sgn, sizeof (current_sgn));
1264 if (current_sgn.state_t.state == SIGST_EXIT)
1265 signature = CPU_SIG_BLD(sig, state,
1266 SIGSUBST_PANIC_REBOOT);
1267 }
1268 }
1269
1270 /*
1271 * cpuid == -1 indicates that the operation applies to all cpus.
1272 */
1273 if (cpuid >= 0) {
1274 (void) (*iosram_write_ptr)(SBBC_SIGBLCK_KEY,
1275 SG_SGNBLK_CPUSIG_OFFSET(cpuid), (char *)&signature,
1276 sizeof (signature));
1277 } else {
1278 for (i = 0; i < NCPU; i++) {
1279 if (cpu[i] == NULL || !(cpu[i]->cpu_flags &
1280 (CPU_EXISTS|CPU_QUIESCED))) {
1281 continue;
1282 }
1283 (void) (*iosram_write_ptr)(SBBC_SIGBLCK_KEY,
1284 SG_SGNBLK_CPUSIG_OFFSET(i), (char *)&signature,
1285 sizeof (signature));
1286 }
1287 }
1288
1289 if (state == SIGST_OFFLINE || state == SIGST_DETACHED) {
1290 return;
1291 }
1292
1293 (void) (*iosram_write_ptr)(SBBC_SIGBLCK_KEY,
1294 SG_SGNBLK_DOMAINSIG_OFFSET, (char *)&signature,
1295 sizeof (signature));
1296 }
1297
1298 void
startup_platform(void)1299 startup_platform(void)
1300 {
1301 /* set per-platform constants for mutex backoff */
1302 mutex_backoff_base = 1;
1303 mutex_cap_factor = 32;
1304 }
1305
1306 /*
1307 * A routine to convert a number (represented as a string) to
1308 * the integer value it represents.
1309 */
1310
1311 static int
isdigit(int ch)1312 isdigit(int ch)
1313 {
1314 return (ch >= '0' && ch <= '9');
1315 }
1316
1317 #define isspace(c) ((c) == ' ' || (c) == '\t' || (c) == '\n')
1318
1319 static int
strtoi(char * p,char ** pos)1320 strtoi(char *p, char **pos)
1321 {
1322 int n;
1323 int c, neg = 0;
1324
1325 if (!isdigit(c = *p)) {
1326 while (isspace(c))
1327 c = *++p;
1328 switch (c) {
1329 case '-':
1330 neg++;
1331 /* FALLTHROUGH */
1332 case '+':
1333 c = *++p;
1334 }
1335 if (!isdigit(c)) {
1336 if (pos != NULL)
1337 *pos = p;
1338 return (0);
1339 }
1340 }
1341 for (n = '0' - c; isdigit(c = *++p); ) {
1342 n *= 10; /* two steps to avoid unnecessary overflow */
1343 n += '0' - c; /* accum neg to avoid surprises at MAX */
1344 }
1345 if (pos != NULL)
1346 *pos = p;
1347 return (neg ? n : -n);
1348 }
1349
1350 /*
1351 * Get the three parts of the Serengeti PROM version.
1352 * Used for feature readiness tests.
1353 *
1354 * Return 0 if version extracted successfully, -1 otherwise.
1355 */
1356
1357 int
sg_get_prom_version(int * sysp,int * intfp,int * bldp)1358 sg_get_prom_version(int *sysp, int *intfp, int *bldp)
1359 {
1360 int plen;
1361 char vers[512];
1362 static pnode_t node;
1363 static char version[] = "version";
1364 char *verp, *ep;
1365
1366 node = prom_finddevice("/openprom");
1367 if (node == OBP_BADNODE)
1368 return (-1);
1369
1370 plen = prom_getproplen(node, version);
1371 if (plen <= 0 || plen >= sizeof (vers))
1372 return (-1);
1373 (void) prom_getprop(node, version, vers);
1374 vers[plen] = '\0';
1375
1376 /* Make sure it's an OBP flashprom */
1377 if (vers[0] != 'O' && vers[1] != 'B' && vers[2] != 'P') {
1378 cmn_err(CE_WARN, "sg_get_prom_version: "
1379 "unknown <version> string in </openprom>\n");
1380 return (-1);
1381 }
1382 verp = &vers[4];
1383
1384 *sysp = strtoi(verp, &ep);
1385 if (ep == verp || *ep != '.')
1386 return (-1);
1387 verp = ep + 1;
1388
1389 *intfp = strtoi(verp, &ep);
1390 if (ep == verp || *ep != '.')
1391 return (-1);
1392 verp = ep + 1;
1393
1394 *bldp = strtoi(verp, &ep);
1395 if (ep == verp || (*ep != '\0' && !isspace(*ep)))
1396 return (-1);
1397 return (0);
1398 }
1399
1400 /*
1401 * Return 0 if system board Dynamic Reconfiguration
1402 * is supported by the firmware, -1 otherwise.
1403 */
1404 int
sg_prom_sb_dr_check(void)1405 sg_prom_sb_dr_check(void)
1406 {
1407 static int prom_res = 1;
1408
1409 if (prom_res == 1) {
1410 int sys, intf, bld;
1411 int rv;
1412
1413 rv = sg_get_prom_version(&sys, &intf, &bld);
1414 if (rv == 0 && sys == 5 &&
1415 (intf >= 12 || (intf == 11 && bld >= 200))) {
1416 prom_res = 0;
1417 } else {
1418 prom_res = -1;
1419 }
1420 }
1421 return (prom_res);
1422 }
1423
1424 /*
1425 * Return 0 if cPCI Dynamic Reconfiguration
1426 * is supported by the firmware, -1 otherwise.
1427 */
1428 int
sg_prom_cpci_dr_check(void)1429 sg_prom_cpci_dr_check(void)
1430 {
1431 /*
1432 * The version check is currently the same as for
1433 * system boards. Since the two DR sub-systems are
1434 * independent, this could change.
1435 */
1436 return (sg_prom_sb_dr_check());
1437 }
1438
1439 /*
1440 * KDI functions - used by the in-situ kernel debugger (kmdb) to perform
1441 * platform-specific operations. These functions execute when the world is
1442 * stopped, and as such cannot make any blocking calls, hold locks, etc.
1443 * promif functions are a special case, and may be used.
1444 */
1445
1446 /*
1447 * Our implementation of this KDI op updates the CPU signature in the system
1448 * controller. Note that we set the signature to OBP_SIG, rather than DBG_SIG.
1449 * The Forth words we execute will, among other things, transform our OBP_SIG
1450 * into DBG_SIG. They won't function properly if we try to use DBG_SIG.
1451 */
1452 static void
sg_system_claim(void)1453 sg_system_claim(void)
1454 {
1455 lbolt_debug_entry();
1456
1457 prom_interpret("sigb-sig! my-sigb-sig!", OBP_SIG, OBP_SIG, 0, 0, 0);
1458 }
1459
1460 static void
sg_system_release(void)1461 sg_system_release(void)
1462 {
1463 prom_interpret("sigb-sig! my-sigb-sig!", OS_SIG, OS_SIG, 0, 0, 0);
1464
1465 lbolt_debug_return();
1466 }
1467
1468 static void
sg_console_claim(void)1469 sg_console_claim(void)
1470 {
1471 (void) prom_serengeti_set_console_input(SGCN_OBP_STR);
1472 }
1473
1474 static void
sg_console_release(void)1475 sg_console_release(void)
1476 {
1477 (void) prom_serengeti_set_console_input(SGCN_CLNT_STR);
1478 }
1479
1480 void
plat_kdi_init(kdi_t * kdi)1481 plat_kdi_init(kdi_t *kdi)
1482 {
1483 kdi->pkdi_system_claim = sg_system_claim;
1484 kdi->pkdi_system_release = sg_system_release;
1485 kdi->pkdi_console_claim = sg_console_claim;
1486 kdi->pkdi_console_release = sg_console_release;
1487 }
1488