1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 #include <sys/time.h> 30 #include <sys/cpuvar.h> 31 #include <sys/dditypes.h> 32 #include <sys/ddipropdefs.h> 33 #include <sys/ddi_impldefs.h> 34 #include <sys/sunddi.h> 35 #include <sys/esunddi.h> 36 #include <sys/sunndi.h> 37 #include <sys/platform_module.h> 38 #include <sys/errno.h> 39 #include <sys/conf.h> 40 #include <sys/modctl.h> 41 #include <sys/promif.h> 42 #include <sys/promimpl.h> 43 #include <sys/prom_plat.h> 44 #include <sys/cmn_err.h> 45 #include <sys/sysmacros.h> 46 #include <sys/mem_cage.h> 47 #include <sys/kobj.h> 48 #include <sys/utsname.h> 49 #include <sys/cpu_sgnblk_defs.h> 50 #include <sys/atomic.h> 51 #include <sys/kdi_impl.h> 52 53 #include <sys/sgsbbc.h> 54 #include <sys/sgsbbc_iosram.h> 55 #include <sys/sgsbbc_iosram_priv.h> 56 #include <sys/sgsbbc_mailbox.h> 57 #include <sys/sgsgn.h> 58 #include <sys/sgcn.h> 59 #include <sys/serengeti.h> 60 #include <sys/sgfrutypes.h> 61 #include <sys/machsystm.h> 62 #include <sys/sbd_ioctl.h> 63 #include <sys/sbd.h> 64 #include <sys/sbdp_mem.h> 65 66 #include <sys/memnode.h> 67 #include <vm/vm_dep.h> 68 #include <vm/page.h> 69 70 #include <sys/cheetahregs.h> 71 #include <sys/plat_ecc_unum.h> 72 #include <sys/plat_ecc_dimm.h> 73 74 #include <sys/lgrp.h> 75 76 static int sg_debug = 0; 77 78 #ifdef DEBUG 79 #define DCMNERR if (sg_debug) cmn_err 80 #else 81 #define DCMNERR 82 #endif 83 84 int (*p2get_mem_unum)(int, uint64_t, char *, int, int *); 85 86 /* local functions */ 87 static void cpu_sgn_update(ushort_t sgn, uchar_t state, 88 uchar_t sub_state, int cpuid); 89 90 91 /* 92 * Local data. 93 * 94 * iosram_write_ptr is a pointer to iosram_write(). Because of 95 * kernel dynamic linking, we can't get to the function by name, 96 * but we can look up its address, and store it in this variable 97 * instead. 98 * 99 * We include the extern for iosram_write() here not because we call 100 * it, but to force compilation errors if its prototype doesn't 101 * match the prototype of iosram_write_ptr. 102 * 103 * The same issues apply to iosram_read() and iosram_read_ptr. 104 */ 105 /*CSTYLED*/ 106 extern int iosram_write (int, uint32_t, caddr_t, uint32_t); 107 static int (*iosram_write_ptr)(int, uint32_t, caddr_t, uint32_t) = NULL; 108 /*CSTYLED*/ 109 extern int iosram_read (int, uint32_t, caddr_t, uint32_t); 110 static int (*iosram_read_ptr)(int, uint32_t, caddr_t, uint32_t) = NULL; 111 112 113 /* 114 * Variable to indicate if the date should be obtained from the SC or not. 115 */ 116 int todsg_use_sc = FALSE; /* set the false at the beginning */ 117 118 /* 119 * Preallocation of spare tsb's for DR 120 * 121 * We don't allocate spares for Wildcat since TSBs should come 122 * out of memory local to the node. 123 */ 124 #define IOMMU_PER_SCHIZO 2 125 int serengeti_tsb_spares = (SG_MAX_IO_BDS * SG_SCHIZO_PER_IO_BD * 126 IOMMU_PER_SCHIZO); 127 128 /* 129 * sg_max_ncpus is the maximum number of CPUs supported on Serengeti 130 * and Wildcat at GA. We assume that the maximum number of SSM nodes 131 * supported at GA is 4. sg_max_ncpus is set to be smaller than NCPU 132 * to reduce the amount of memory the logs take up until we have a 133 * dynamic log memory allocation solution. 134 */ 135 int sg_max_ncpus = (24 * 4); /* (CPUs per node * max number of nodes) */ 136 137 /* 138 * variables to control mailbox message timeouts. 139 * These can be patched via /etc/system or mdb. 140 */ 141 int sbbc_mbox_default_timeout = MBOX_DEFAULT_TIMEOUT; 142 int sbbc_mbox_min_timeout = MBOX_MIN_TIMEOUT; 143 144 /* cached 'chosen' node_id */ 145 pnode_t chosen_nodeid = (pnode_t)0; 146 147 static void (*sg_ecc_taskq_func)(sbbc_ecc_mbox_t *) = NULL; 148 static int (*sg_ecc_mbox_func)(sbbc_ecc_mbox_t *) = NULL; 149 150 /* 151 * Table that maps memory slices to a specific memnode. 152 */ 153 int slice_to_memnode[SG_MAX_SLICE]; 154 155 plat_dimm_sid_board_t domain_dimm_sids[SG_MAX_CPU_BDS]; 156 157 158 int 159 set_platform_tsb_spares() 160 { 161 return (MIN(serengeti_tsb_spares, MAX_UPA)); 162 } 163 164 #pragma weak mmu_init_large_pages 165 166 void 167 set_platform_defaults(void) 168 { 169 extern int watchdog_enable; 170 extern uint64_t xc_tick_limit_scale; 171 extern void mmu_init_large_pages(size_t); 172 173 #ifdef DEBUG 174 char *todsg_name = "todsg"; 175 ce_verbose_memory = 2; 176 ce_verbose_other = 2; 177 #endif /* DEBUG */ 178 179 watchdog_enable = TRUE; 180 watchdog_available = TRUE; 181 182 cpu_sgn_func = cpu_sgn_update; 183 184 #ifdef DEBUG 185 /* tod_module_name should be set to "todsg" from OBP property */ 186 if (tod_module_name && (strcmp(tod_module_name, todsg_name) == 0)) 187 prom_printf("Using todsg driver\n"); 188 else { 189 prom_printf("Force using todsg driver\n"); 190 tod_module_name = todsg_name; 191 } 192 #endif /* DEBUG */ 193 194 /* Serengeti does not support forthdebug */ 195 forthdebug_supported = 0; 196 197 198 /* 199 * Some DR operations require the system to be sync paused. 200 * Sync pause on Serengeti could potentially take up to 4 201 * seconds to complete depending on the load on the SC. To 202 * avoid send_mond panics during such operations, we need to 203 * increase xc_tick_limit to a larger value on Serengeti by 204 * setting xc_tick_limit_scale to 5. 205 */ 206 xc_tick_limit_scale = 5; 207 208 if ((mmu_page_sizes == max_mmu_page_sizes) && 209 (mmu_ism_pagesize != MMU_PAGESIZE32M)) { 210 if (&mmu_init_large_pages) 211 mmu_init_large_pages(mmu_ism_pagesize); 212 } 213 } 214 215 void 216 load_platform_modules(void) 217 { 218 if (modload("misc", "pcihp") < 0) { 219 cmn_err(CE_NOTE, "pcihp driver failed to load"); 220 } 221 } 222 223 /*ARGSUSED*/ 224 int 225 plat_cpu_poweron(struct cpu *cp) 226 { 227 int (*serengeti_cpu_poweron)(struct cpu *) = NULL; 228 229 serengeti_cpu_poweron = 230 (int (*)(struct cpu *))modgetsymvalue("sbdp_cpu_poweron", 0); 231 232 if (serengeti_cpu_poweron == NULL) 233 return (ENOTSUP); 234 else 235 return ((serengeti_cpu_poweron)(cp)); 236 } 237 238 /*ARGSUSED*/ 239 int 240 plat_cpu_poweroff(struct cpu *cp) 241 { 242 int (*serengeti_cpu_poweroff)(struct cpu *) = NULL; 243 244 serengeti_cpu_poweroff = 245 (int (*)(struct cpu *))modgetsymvalue("sbdp_cpu_poweroff", 0); 246 247 if (serengeti_cpu_poweroff == NULL) 248 return (ENOTSUP); 249 else 250 return ((serengeti_cpu_poweroff)(cp)); 251 } 252 253 #ifdef DEBUG 254 pgcnt_t serengeti_cage_size_limit; 255 #endif 256 257 /* Preferred minimum cage size (expressed in pages)... for DR */ 258 pgcnt_t serengeti_minimum_cage_size = 0; 259 260 void 261 set_platform_cage_params(void) 262 { 263 extern pgcnt_t total_pages; 264 extern struct memlist *phys_avail; 265 int ret; 266 267 if (kernel_cage_enable) { 268 pgcnt_t preferred_cage_size; 269 270 preferred_cage_size = 271 MAX(serengeti_minimum_cage_size, total_pages / 256); 272 #ifdef DEBUG 273 if (serengeti_cage_size_limit) 274 preferred_cage_size = serengeti_cage_size_limit; 275 #endif 276 kcage_range_lock(); 277 /* 278 * Post copies obp into the lowest slice. This requires the 279 * cage to grow upwards 280 */ 281 ret = kcage_range_init(phys_avail, 0); 282 if (ret == 0) 283 kcage_init(preferred_cage_size); 284 kcage_range_unlock(); 285 } 286 287 /* Only note when the cage is off since it should always be on. */ 288 if (!kcage_on) 289 cmn_err(CE_NOTE, "!DR Kernel Cage is DISABLED"); 290 } 291 292 #define ALIGN(x, a) ((a) == 0 ? (uint64_t)(x) : \ 293 (((uint64_t)(x) + (uint64_t)(a) - 1l) & ~((uint64_t)(a) - 1l))) 294 295 void 296 update_mem_bounds(int brd, uint64_t base, uint64_t sz) 297 { 298 uint64_t end; 299 int mnode; 300 301 end = base + sz - 1; 302 303 /* 304 * First see if this board already has a memnode associated 305 * with it. If not, see if this slice has a memnode. This 306 * covers the cases where a single slice covers multiple 307 * boards (cross-board interleaving) and where a single 308 * board has multiple slices (1+GB DIMMs). 309 */ 310 if ((mnode = plat_lgrphand_to_mem_node(brd)) == -1) { 311 if ((mnode = slice_to_memnode[PA_2_SLICE(base)]) == -1) 312 mnode = mem_node_alloc(); 313 plat_assign_lgrphand_to_mem_node(brd, mnode); 314 } 315 316 /* 317 * Align base at 16GB boundary 318 */ 319 base = ALIGN(base, (1ul << PA_SLICE_SHIFT)); 320 321 while (base < end) { 322 slice_to_memnode[PA_2_SLICE(base)] = mnode; 323 base += (1ul << PA_SLICE_SHIFT); 324 } 325 } 326 327 /* 328 * Dynamically detect memory slices in the system by decoding 329 * the cpu memory decoder registers at boot time. 330 */ 331 void 332 plat_fill_mc(pnode_t nodeid) 333 { 334 uint64_t mc_addr, mask; 335 uint64_t mc_decode[SG_MAX_BANKS_PER_MC]; 336 uint64_t base, size; 337 uint32_t regs[4]; 338 int len; 339 int local_mc; 340 int portid; 341 int boardid; 342 int i; 343 344 if ((prom_getprop(nodeid, "portid", (caddr_t)&portid) < 0) || 345 (portid == -1)) 346 return; 347 348 /* 349 * Decode the board number from the MC portid 350 */ 351 boardid = SG_PORTID_TO_BOARD_NUM(portid); 352 353 /* 354 * The "reg" property returns 4 32-bit values. The first two are 355 * combined to form a 64-bit address. The second two are for a 356 * 64-bit size, but we don't actually need to look at that value. 357 */ 358 len = prom_getproplen(nodeid, "reg"); 359 if (len != (sizeof (uint32_t) * 4)) { 360 prom_printf("Warning: malformed 'reg' property\n"); 361 return; 362 } 363 if (prom_getprop(nodeid, "reg", (caddr_t)regs) < 0) 364 return; 365 mc_addr = ((uint64_t)regs[0]) << 32; 366 mc_addr |= (uint64_t)regs[1]; 367 368 /* 369 * Figure out whether the memory controller we are examining 370 * belongs to this CPU or a different one. 371 */ 372 if (portid == cpunodes[CPU->cpu_id].portid) 373 local_mc = 1; 374 else 375 local_mc = 0; 376 377 for (i = 0; i < SG_MAX_BANKS_PER_MC; i++) { 378 mask = SG_REG_2_OFFSET(i); 379 380 /* 381 * If the memory controller is local to this CPU, we use 382 * the special ASI to read the decode registers. 383 * Otherwise, we load the values from a magic address in 384 * I/O space. 385 */ 386 if (local_mc) 387 mc_decode[i] = lddmcdecode(mask & MC_OFFSET_MASK); 388 else 389 mc_decode[i] = lddphysio((mc_addr | mask)); 390 391 if (mc_decode[i] >> MC_VALID_SHIFT) { 392 /* 393 * The memory decode register is a bitmask field, 394 * so we can decode that into both a base and 395 * a span. 396 */ 397 base = MC_BASE(mc_decode[i]) << PHYS2UM_SHIFT; 398 size = MC_UK2SPAN(mc_decode[i]); 399 update_mem_bounds(boardid, base, size); 400 } 401 } 402 } 403 404 /* 405 * This routine is run midway through the boot process. By the time we get 406 * here, we know about all the active CPU boards in the system, and we have 407 * extracted information about each board's memory from the memory 408 * controllers. We have also figured out which ranges of memory will be 409 * assigned to which memnodes, so we walk the slice table to build the table 410 * of memnodes. 411 */ 412 /* ARGSUSED */ 413 void 414 plat_build_mem_nodes(u_longlong_t *list, size_t nelems) 415 { 416 int slice; 417 pfn_t basepfn; 418 pgcnt_t npgs; 419 420 mem_node_pfn_shift = PFN_SLICE_SHIFT; 421 mem_node_physalign = (1ull << PA_SLICE_SHIFT); 422 423 for (slice = 0; slice < SG_MAX_SLICE; slice++) { 424 if (slice_to_memnode[slice] == -1) 425 continue; 426 basepfn = (uint64_t)slice << PFN_SLICE_SHIFT; 427 npgs = 1ull << PFN_SLICE_SHIFT; 428 mem_node_add_slice(basepfn, basepfn + npgs - 1); 429 } 430 } 431 432 int 433 plat_pfn_to_mem_node(pfn_t pfn) 434 { 435 int node; 436 437 node = slice_to_memnode[PFN_2_SLICE(pfn)]; 438 439 return (node); 440 } 441 442 /* 443 * Serengeti support for lgroups. 444 * 445 * On Serengeti, an lgroup platform handle == board number. 446 * 447 * Mappings between lgroup handles and memnodes are managed 448 * in addition to mappings between memory slices and memnodes 449 * to support cross-board interleaving as well as multiple 450 * slices per board (e.g. >1GB DIMMs). The initial mapping 451 * of memnodes to lgroup handles is determined at boot time. 452 * A DR addition of memory adds a new mapping. A DR copy-rename 453 * swaps mappings. 454 */ 455 456 /* 457 * Macro for extracting the board number from the CPU id 458 */ 459 #define CPUID_TO_BOARD(id) (((id) >> 2) & 0x7) 460 461 /* 462 * Return the platform handle for the lgroup containing the given CPU 463 * 464 * For Serengeti, lgroup platform handle == board number 465 */ 466 lgrp_handle_t 467 plat_lgrp_cpu_to_hand(processorid_t id) 468 { 469 return (CPUID_TO_BOARD(id)); 470 } 471 472 /* 473 * Platform specific lgroup initialization 474 */ 475 void 476 plat_lgrp_init(void) 477 { 478 int i; 479 extern uint32_t lgrp_expand_proc_thresh; 480 extern uint32_t lgrp_expand_proc_diff; 481 482 /* 483 * Initialize lookup tables to invalid values so we catch 484 * any illegal use of them. 485 */ 486 for (i = 0; i < SG_MAX_SLICE; i++) { 487 slice_to_memnode[i] = -1; 488 } 489 490 /* 491 * Set tuneables for Serengeti architecture 492 * 493 * lgrp_expand_proc_thresh is the minimum load on the lgroups 494 * this process is currently running on before considering 495 * expanding threads to another lgroup. 496 * 497 * lgrp_expand_proc_diff determines how much less the remote lgroup 498 * must be loaded before expanding to it. 499 * 500 * Bandwidth is maximized on Serengeti by spreading load across 501 * the machine. The impact to inter-thread communication isn't 502 * too costly since remote latencies are relatively low. These 503 * values equate to one CPU's load and so attempt to spread the 504 * load out across as many lgroups as possible one CPU at a time. 505 */ 506 lgrp_expand_proc_thresh = LGRP_LOADAVG_THREAD_MAX; 507 lgrp_expand_proc_diff = LGRP_LOADAVG_THREAD_MAX; 508 } 509 510 /* 511 * Platform notification of lgroup (re)configuration changes 512 */ 513 /*ARGSUSED*/ 514 void 515 plat_lgrp_config(lgrp_config_flag_t evt, uintptr_t arg) 516 { 517 update_membounds_t *umb; 518 lgrp_config_mem_rename_t lmr; 519 lgrp_handle_t shand, thand; 520 int snode, tnode; 521 522 switch (evt) { 523 524 case LGRP_CONFIG_MEM_ADD: 525 umb = (update_membounds_t *)arg; 526 update_mem_bounds(umb->u_board, umb->u_base, umb->u_len); 527 528 break; 529 530 case LGRP_CONFIG_MEM_DEL: 531 /* We don't have to do anything */ 532 break; 533 534 case LGRP_CONFIG_MEM_RENAME: 535 /* 536 * During a DR copy-rename operation, all of the memory 537 * on one board is moved to another board -- but the 538 * addresses/pfns and memnodes don't change. This means 539 * the memory has changed locations without changing identity. 540 * 541 * Source is where we are copying from and target is where we 542 * are copying to. After source memnode is copied to target 543 * memnode, the physical addresses of the target memnode are 544 * renamed to match what the source memnode had. Then target 545 * memnode can be removed and source memnode can take its 546 * place. 547 * 548 * To do this, swap the lgroup handle to memnode mappings for 549 * the boards, so target lgroup will have source memnode and 550 * source lgroup will have empty target memnode which is where 551 * its memory will go (if any is added to it later). 552 * 553 * Then source memnode needs to be removed from its lgroup 554 * and added to the target lgroup where the memory was living 555 * but under a different name/memnode. The memory was in the 556 * target memnode and now lives in the source memnode with 557 * different physical addresses even though it is the same 558 * memory. 559 */ 560 shand = arg & 0xffff; 561 thand = (arg & 0xffff0000) >> 16; 562 snode = plat_lgrphand_to_mem_node(shand); 563 tnode = plat_lgrphand_to_mem_node(thand); 564 565 plat_assign_lgrphand_to_mem_node(thand, snode); 566 plat_assign_lgrphand_to_mem_node(shand, tnode); 567 568 /* 569 * Remove source memnode of copy rename from its lgroup 570 * and add it to its new target lgroup 571 */ 572 lmr.lmem_rename_from = shand; 573 lmr.lmem_rename_to = thand; 574 575 lgrp_config(LGRP_CONFIG_MEM_RENAME, (uintptr_t)snode, 576 (uintptr_t)&lmr); 577 578 break; 579 580 default: 581 break; 582 } 583 } 584 585 /* 586 * Return latency between "from" and "to" lgroups 587 * 588 * This latency number can only be used for relative comparison 589 * between lgroups on the running system, cannot be used across platforms, 590 * and may not reflect the actual latency. It is platform and implementation 591 * specific, so platform gets to decide its value. It would be nice if the 592 * number was at least proportional to make comparisons more meaningful though. 593 * NOTE: The numbers below are supposed to be load latencies for uncached 594 * memory divided by 10. 595 */ 596 int 597 plat_lgrp_latency(lgrp_handle_t from, lgrp_handle_t to) 598 { 599 /* 600 * Return min remote latency when there are more than two lgroups 601 * (root and child) and getting latency between two different lgroups 602 * or root is involved 603 */ 604 if (lgrp_optimizations() && (from != to || 605 from == LGRP_DEFAULT_HANDLE || to == LGRP_DEFAULT_HANDLE)) 606 return (28); 607 else 608 return (23); 609 } 610 611 /* ARGSUSED */ 612 void 613 plat_freelist_process(int mnode) 614 { 615 } 616 617 /* 618 * Find dip for chosen IOSRAM 619 */ 620 dev_info_t * 621 find_chosen_dip(void) 622 { 623 dev_info_t *dip; 624 char master_sbbc[MAXNAMELEN]; 625 pnode_t nodeid; 626 uint_t tunnel; 627 628 /* 629 * find the /chosen SBBC node, prom interface will handle errors 630 */ 631 nodeid = prom_chosennode(); 632 633 /* 634 * get the 'iosram' property from the /chosen node 635 */ 636 if (prom_getprop(nodeid, IOSRAM_CHOSEN_PROP, (caddr_t)&tunnel) <= 0) { 637 SBBC_ERR(CE_PANIC, "No iosram property found! \n"); 638 } 639 640 if (prom_phandle_to_path((phandle_t)tunnel, master_sbbc, 641 sizeof (master_sbbc)) < 0) { 642 SBBC_ERR1(CE_PANIC, "prom_phandle_to_path(%d) failed\n", 643 tunnel); 644 } 645 646 chosen_nodeid = nodeid; 647 648 /* 649 * load and attach the sgsbbc driver. 650 * This will also attach all the sgsbbc driver instances 651 */ 652 if (i_ddi_attach_hw_nodes("sgsbbc") != DDI_SUCCESS) { 653 cmn_err(CE_WARN, "sgsbbc failed to load\n"); 654 } 655 656 /* translate a path name to a dev_info_t */ 657 dip = e_ddi_hold_devi_by_path(master_sbbc, 0); 658 if ((dip == NULL) || (ddi_get_nodeid(dip) != tunnel)) { 659 cmn_err(CE_PANIC, "i_ddi_path_to_devi(%x) failed for SBBC\n", 660 tunnel); 661 } 662 663 /* make sure devi_ref is ZERO */ 664 ndi_rele_devi(dip); 665 666 DCMNERR(CE_CONT, "Chosen IOSRAM is at %s \n", master_sbbc); 667 668 return (dip); 669 } 670 671 void 672 load_platform_drivers(void) 673 { 674 int ret; 675 676 /* 677 * Load and attach the mc-us3 memory driver. 678 */ 679 if (i_ddi_attach_hw_nodes("mc-us3") != DDI_SUCCESS) 680 cmn_err(CE_WARN, "mc-us3 failed to load"); 681 else 682 (void) ddi_hold_driver(ddi_name_to_major("mc-us3")); 683 684 /* 685 * Initialize the chosen IOSRAM before its clients 686 * are loaded. 687 */ 688 (void) find_chosen_dip(); 689 690 /* 691 * Ideally, we'd do this in set_platform_defaults(), but 692 * at that point it's too early to look up symbols. 693 */ 694 iosram_write_ptr = (int (*)(int, uint32_t, caddr_t, uint32_t)) 695 modgetsymvalue("iosram_write", 0); 696 697 if (iosram_write_ptr == NULL) { 698 DCMNERR(CE_WARN, "load_platform_defaults: iosram_write()" 699 " not found; signatures will not be updated\n"); 700 } else { 701 /* 702 * The iosram read ptr is only needed if we can actually 703 * write CPU signatures, so only bother setting it if we 704 * set a valid write pointer, above. 705 */ 706 iosram_read_ptr = (int (*)(int, uint32_t, caddr_t, uint32_t)) 707 modgetsymvalue("iosram_read", 0); 708 709 if (iosram_read_ptr == NULL) 710 DCMNERR(CE_WARN, "load_platform_defaults: iosram_read()" 711 " not found\n"); 712 } 713 714 /* 715 * Set todsg_use_sc to TRUE so that we will be getting date 716 * from the SC. 717 */ 718 todsg_use_sc = TRUE; 719 720 /* 721 * Now is a good time to activate hardware watchdog (if one exists). 722 */ 723 mutex_enter(&tod_lock); 724 if (watchdog_enable) 725 ret = tod_ops.tod_set_watchdog_timer(watchdog_timeout_seconds); 726 mutex_exit(&tod_lock); 727 if (ret != 0) 728 printf("Hardware watchdog enabled\n"); 729 730 /* 731 * Load and attach the schizo pci bus nexus driver. 732 */ 733 if (i_ddi_attach_hw_nodes("pcisch") != DDI_SUCCESS) 734 cmn_err(CE_WARN, "pcisch failed to load"); 735 736 plat_ecc_init(); 737 } 738 739 /* 740 * No platform drivers on this platform 741 */ 742 char *platform_module_list[] = { 743 (char *)0 744 }; 745 746 /*ARGSUSED*/ 747 void 748 plat_tod_fault(enum tod_fault_type tod_bad) 749 { 750 } 751 int 752 plat_max_boards() 753 { 754 return (SG_MAX_BDS); 755 } 756 int 757 plat_max_io_units_per_board() 758 { 759 return (SG_MAX_IO_PER_BD); 760 } 761 int 762 plat_max_cmp_units_per_board() 763 { 764 return (SG_MAX_CMPS_PER_BD); 765 } 766 int 767 plat_max_cpu_units_per_board() 768 { 769 return (SG_MAX_CPUS_PER_BD); 770 } 771 772 int 773 plat_max_mc_units_per_board() 774 { 775 return (SG_MAX_CMPS_PER_BD); /* each CPU die has a memory controller */ 776 } 777 778 int 779 plat_max_mem_units_per_board() 780 { 781 return (SG_MAX_MEM_PER_BD); 782 } 783 784 int 785 plat_max_cpumem_boards(void) 786 { 787 return (SG_MAX_CPU_BDS); 788 } 789 790 int 791 set_platform_max_ncpus(void) 792 { 793 return (sg_max_ncpus); 794 } 795 796 void 797 plat_dmv_params(uint_t *hwint, uint_t *swint) 798 { 799 *hwint = MAX_UPA; 800 *swint = 0; 801 } 802 803 /* 804 * Our nodename has been set, pass it along to the SC. 805 */ 806 void 807 plat_nodename_set(void) 808 { 809 sbbc_msg_t req; /* request */ 810 sbbc_msg_t resp; /* response */ 811 int rv; /* return value from call to mbox */ 812 struct nodename_info { 813 int32_t namelen; 814 char nodename[_SYS_NMLN]; 815 } nni; 816 int (*sg_mbox)(sbbc_msg_t *, sbbc_msg_t *, time_t) = NULL; 817 818 /* 819 * find the symbol for the mailbox routine 820 */ 821 sg_mbox = (int (*)(sbbc_msg_t *, sbbc_msg_t *, time_t)) 822 modgetsymvalue("sbbc_mbox_request_response", 0); 823 824 if (sg_mbox == NULL) { 825 cmn_err(CE_NOTE, "!plat_nodename_set: sg_mbox not found\n"); 826 return; 827 } 828 829 /* 830 * construct the message telling the SC our nodename 831 */ 832 (void) strcpy(nni.nodename, utsname.nodename); 833 nni.namelen = (int32_t)strlen(nni.nodename); 834 835 req.msg_type.type = INFO_MBOX; 836 req.msg_type.sub_type = INFO_MBOX_NODENAME; 837 req.msg_status = 0; 838 req.msg_len = (int)(nni.namelen + sizeof (nni.namelen)); 839 req.msg_bytes = 0; 840 req.msg_buf = (caddr_t)&nni; 841 req.msg_data[0] = 0; 842 req.msg_data[1] = 0; 843 844 /* 845 * initialize the response back from the SC 846 */ 847 resp.msg_type.type = INFO_MBOX; 848 resp.msg_type.sub_type = INFO_MBOX_NODENAME; 849 resp.msg_status = 0; 850 resp.msg_len = 0; 851 resp.msg_bytes = 0; 852 resp.msg_buf = (caddr_t)0; 853 resp.msg_data[0] = 0; 854 resp.msg_data[1] = 0; 855 856 /* 857 * ship it and check for success 858 */ 859 rv = (sg_mbox)(&req, &resp, sbbc_mbox_default_timeout); 860 861 if (rv != 0) { 862 cmn_err(CE_NOTE, "!plat_nodename_set: sg_mbox retval %d\n", rv); 863 } else if (resp.msg_status != 0) { 864 cmn_err(CE_NOTE, "!plat_nodename_set: msg_status %d\n", 865 resp.msg_status); 866 } else { 867 DCMNERR(CE_NOTE, "!plat_nodename_set was successful\n"); 868 869 /* 870 * It is necessary to exchange the capability bitmap 871 * with SC before sending any ecc error information and 872 * indictment. We are calling the plat_ecc_capability_send() 873 * here just after sending the nodename successfully. 874 */ 875 rv = plat_ecc_capability_send(); 876 if (rv == 0) { 877 DCMNERR(CE_NOTE, "!plat_ecc_capability_send was" 878 " successful\n"); 879 } 880 } 881 } 882 883 /* 884 * flag to allow users switch between using OBP's 885 * prom_get_unum() and mc-us3 driver's p2get_mem_unum() 886 * (for main memory errors only). 887 */ 888 int sg_use_prom_get_unum = 0; 889 890 /* 891 * Debugging flag: set to 1 to call into obp for get_unum, or set it to 0 892 * to call into the unum cache system. This is the E$ equivalent of 893 * sg_use_prom_get_unum. 894 */ 895 int sg_use_prom_ecache_unum = 0; 896 897 /* used for logging ECC errors to the SC */ 898 #define SG_MEMORY_ECC 1 899 #define SG_ECACHE_ECC 2 900 #define SG_UNKNOWN_ECC (-1) 901 902 /* 903 * plat_get_mem_unum() generates a string identifying either the 904 * memory or E$ DIMM(s) during error logging. Depending on whether 905 * the error is E$ or memory related, the appropriate support 906 * routine is called to assist in the string generation. 907 * 908 * - For main memory errors we can use the mc-us3 drivers p2getunum() 909 * (or prom_get_unum() for debugging purposes). 910 * 911 * - For E$ errors we call sg_get_ecacheunum() to generate the unum (or 912 * prom_serengeti_get_ecacheunum() for debugging purposes). 913 */ 914 915 static int 916 sg_prom_get_unum(int synd_code, uint64_t paddr, char *buf, int buflen, 917 int *lenp) 918 { 919 if ((prom_get_unum(synd_code, (unsigned long long)paddr, 920 buf, buflen, lenp)) != 0) 921 return (EIO); 922 else if (*lenp <= 1) 923 return (EINVAL); 924 else 925 return (0); 926 } 927 928 /*ARGSUSED*/ 929 int 930 plat_get_mem_unum(int synd_code, uint64_t flt_addr, int flt_bus_id, 931 int flt_in_memory, ushort_t flt_status, char *buf, int buflen, int *lenp) 932 { 933 /* 934 * unum_func will either point to the memory drivers p2get_mem_unum() 935 * or to prom_get_unum() for memory errors. 936 */ 937 int (*unum_func)(int synd_code, uint64_t paddr, char *buf, 938 int buflen, int *lenp) = p2get_mem_unum; 939 940 /* 941 * check if it's a Memory or an Ecache error. 942 */ 943 if (flt_in_memory) { 944 /* 945 * It's a main memory error. 946 * 947 * For debugging we allow the user to switch between 948 * using OBP's get_unum and the memory driver's get_unum 949 * so we create a pointer to the functions and switch 950 * depending on the sg_use_prom_get_unum flag. 951 */ 952 if (sg_use_prom_get_unum) { 953 DCMNERR(CE_NOTE, "Using prom_get_unum from OBP"); 954 return (sg_prom_get_unum(synd_code, 955 P2ALIGN(flt_addr, 8), buf, buflen, lenp)); 956 } else if (unum_func != NULL) { 957 return (unum_func(synd_code, P2ALIGN(flt_addr, 8), 958 buf, buflen, lenp)); 959 } else { 960 return (ENOTSUP); 961 } 962 } else if (flt_status & ECC_ECACHE) { 963 /* 964 * It's an E$ error. 965 */ 966 if (sg_use_prom_ecache_unum) { 967 /* 968 * We call to OBP to handle this. 969 */ 970 DCMNERR(CE_NOTE, 971 "Using prom_serengeti_get_ecacheunum from OBP"); 972 if (prom_serengeti_get_ecacheunum(flt_bus_id, 973 P2ALIGN(flt_addr, 8), buf, buflen, lenp) != 0) { 974 return (EIO); 975 } 976 } else { 977 return (sg_get_ecacheunum(flt_bus_id, flt_addr, 978 buf, buflen, lenp)); 979 } 980 } else { 981 return (ENOTSUP); 982 } 983 984 return (0); 985 } 986 987 /* 988 * This platform hook gets called from mc_add_mem_unum_label() in the mc-us3 989 * driver giving each platform the opportunity to add platform 990 * specific label information to the unum for ECC error logging purposes. 991 */ 992 void 993 plat_add_mem_unum_label(char *unum, int mcid, int bank, int dimm) 994 { 995 char new_unum[UNUM_NAMLEN] = ""; 996 int node = SG_PORTID_TO_NODEID(mcid); 997 int board = SG_CPU_BD_PORTID_TO_BD_NUM(mcid); 998 int position = SG_PORTID_TO_CPU_POSN(mcid); 999 1000 /* 1001 * The mc-us3 driver deals with logical banks but for unum 1002 * purposes we need to use physical banks so that the correct 1003 * dimm can be physically located. Logical banks 0 and 2 1004 * make up physical bank 0. Logical banks 1 and 3 make up 1005 * physical bank 1. Here we do the necessary conversion. 1006 */ 1007 bank = (bank % 2); 1008 1009 if (dimm == -1) { 1010 SG_SET_FRU_NAME_NODE(new_unum, node); 1011 SG_SET_FRU_NAME_CPU_BOARD(new_unum, board); 1012 SG_SET_FRU_NAME_MODULE(new_unum, position); 1013 SG_SET_FRU_NAME_BANK(new_unum, bank); 1014 1015 } else { 1016 SG_SET_FRU_NAME_NODE(new_unum, node); 1017 SG_SET_FRU_NAME_CPU_BOARD(new_unum, board); 1018 SG_SET_FRU_NAME_MODULE(new_unum, position); 1019 SG_SET_FRU_NAME_BANK(new_unum, bank); 1020 SG_SET_FRU_NAME_DIMM(new_unum, dimm); 1021 1022 strcat(new_unum, " "); 1023 strcat(new_unum, unum); 1024 } 1025 1026 strcpy(unum, new_unum); 1027 } 1028 1029 int 1030 plat_get_cpu_unum(int cpuid, char *buf, int buflen, int *lenp) 1031 { 1032 int node = SG_PORTID_TO_NODEID(cpuid); 1033 int board = SG_CPU_BD_PORTID_TO_BD_NUM(cpuid); 1034 1035 if (snprintf(buf, buflen, "/N%d/%s%d", node, 1036 SG_HPU_TYPE_CPU_BOARD_ID, board) >= buflen) { 1037 return (ENOSPC); 1038 } else { 1039 *lenp = strlen(buf); 1040 return (0); 1041 } 1042 } 1043 1044 /* 1045 * We log all ECC events to the SC so we send a mailbox 1046 * message to the SC passing it the relevant data. 1047 * ECC mailbox messages are sent via a taskq mechanism to 1048 * prevent impaired system performance during ECC floods. 1049 * Indictments have already passed through a taskq, so they 1050 * are not queued here. 1051 */ 1052 int 1053 plat_send_ecc_mailbox_msg(plat_ecc_message_type_t msg_type, void *datap) 1054 { 1055 sbbc_ecc_mbox_t *msgp; 1056 size_t msg_size; 1057 uint16_t msg_subtype; 1058 int sleep_flag, log_error; 1059 1060 if (sg_ecc_taskq_func == NULL) { 1061 sg_ecc_taskq_func = (void (*)(sbbc_ecc_mbox_t *)) 1062 modgetsymvalue("sbbc_mbox_queue_ecc_event", 0); 1063 if (sg_ecc_taskq_func == NULL) { 1064 cmn_err(CE_NOTE, "!plat_send_ecc_mailbox_msg: " 1065 "sbbc_mbox_queue_ecc_event not found"); 1066 return (ENODEV); 1067 } 1068 } 1069 if (sg_ecc_mbox_func == NULL) { 1070 sg_ecc_mbox_func = (int (*)(sbbc_ecc_mbox_t *)) 1071 modgetsymvalue("sbbc_mbox_ecc_output", 0); 1072 if (sg_ecc_mbox_func == NULL) { 1073 cmn_err(CE_NOTE, "!plat_send_ecc_mailbox_msg: " 1074 "sbbc_mbox_ecc_output not found"); 1075 return (ENODEV); 1076 } 1077 } 1078 1079 /* 1080 * Initialize the request and response structures 1081 */ 1082 switch (msg_type) { 1083 case PLAT_ECC_ERROR_MESSAGE: 1084 msg_subtype = INFO_MBOX_ERROR_ECC; 1085 msg_size = sizeof (plat_ecc_error_data_t); 1086 sleep_flag = KM_NOSLEEP; 1087 log_error = 1; 1088 break; 1089 case PLAT_ECC_ERROR2_MESSAGE: 1090 msg_subtype = INFO_MBOX_ECC; 1091 msg_size = sizeof (plat_ecc_error2_data_t); 1092 sleep_flag = KM_NOSLEEP; 1093 log_error = 1; 1094 break; 1095 case PLAT_ECC_INDICTMENT_MESSAGE: 1096 msg_subtype = INFO_MBOX_ERROR_INDICT; 1097 msg_size = sizeof (plat_ecc_indictment_data_t); 1098 sleep_flag = KM_SLEEP; 1099 log_error = 0; 1100 break; 1101 case PLAT_ECC_INDICTMENT2_MESSAGE: 1102 msg_subtype = INFO_MBOX_ECC; 1103 msg_size = sizeof (plat_ecc_indictment2_data_t); 1104 sleep_flag = KM_SLEEP; 1105 log_error = 0; 1106 break; 1107 case PLAT_ECC_CAPABILITY_MESSAGE: 1108 msg_subtype = INFO_MBOX_ECC_CAP; 1109 msg_size = sizeof (plat_capability_data_t) + 1110 strlen(utsname.release) + strlen(utsname.version) + 2; 1111 sleep_flag = KM_SLEEP; 1112 log_error = 0; 1113 break; 1114 case PLAT_ECC_DIMM_SID_MESSAGE: 1115 msg_subtype = INFO_MBOX_ECC; 1116 msg_size = sizeof (plat_dimm_sid_request_data_t); 1117 sleep_flag = KM_SLEEP; 1118 log_error = 0; 1119 break; 1120 default: 1121 return (EINVAL); 1122 } 1123 1124 msgp = (sbbc_ecc_mbox_t *)kmem_zalloc(sizeof (sbbc_ecc_mbox_t), 1125 sleep_flag); 1126 if (msgp == NULL) { 1127 cmn_err(CE_NOTE, "!plat_send_ecc_mailbox_msg: " 1128 "unable to allocate sbbc_ecc_mbox"); 1129 return (ENOMEM); 1130 } 1131 1132 msgp->ecc_log_error = log_error; 1133 1134 msgp->ecc_req.msg_type.type = INFO_MBOX; 1135 msgp->ecc_req.msg_type.sub_type = msg_subtype; 1136 msgp->ecc_req.msg_status = 0; 1137 msgp->ecc_req.msg_len = (int)msg_size; 1138 msgp->ecc_req.msg_bytes = 0; 1139 msgp->ecc_req.msg_buf = (caddr_t)kmem_zalloc(msg_size, sleep_flag); 1140 msgp->ecc_req.msg_data[0] = 0; 1141 msgp->ecc_req.msg_data[1] = 0; 1142 1143 if (msgp->ecc_req.msg_buf == NULL) { 1144 cmn_err(CE_NOTE, "!plat_send_ecc_mailbox_msg: " 1145 "unable to allocate request msg_buf"); 1146 kmem_free((void *)msgp, sizeof (sbbc_ecc_mbox_t)); 1147 return (ENOMEM); 1148 } 1149 bcopy(datap, (void *)msgp->ecc_req.msg_buf, msg_size); 1150 1151 /* 1152 * initialize the response back from the SC 1153 */ 1154 msgp->ecc_resp.msg_type.type = INFO_MBOX; 1155 msgp->ecc_resp.msg_type.sub_type = msg_subtype; 1156 msgp->ecc_resp.msg_status = 0; 1157 msgp->ecc_resp.msg_len = 0; 1158 msgp->ecc_resp.msg_bytes = 0; 1159 msgp->ecc_resp.msg_buf = NULL; 1160 msgp->ecc_resp.msg_data[0] = 0; 1161 msgp->ecc_resp.msg_data[1] = 0; 1162 1163 switch (msg_type) { 1164 case PLAT_ECC_ERROR_MESSAGE: 1165 case PLAT_ECC_ERROR2_MESSAGE: 1166 /* 1167 * For Error Messages, we go through a taskq. 1168 * Queue up the message for processing 1169 */ 1170 (*sg_ecc_taskq_func)(msgp); 1171 return (0); 1172 1173 case PLAT_ECC_CAPABILITY_MESSAGE: 1174 /* 1175 * For indictment and capability messages, we've already gone 1176 * through the taskq, so we can call the mailbox routine 1177 * directly. Find the symbol for the routine that sends 1178 * the mailbox msg 1179 */ 1180 msgp->ecc_resp.msg_len = (int)msg_size; 1181 msgp->ecc_resp.msg_buf = (caddr_t)kmem_zalloc(msg_size, 1182 sleep_flag); 1183 /* FALLTHRU */ 1184 1185 case PLAT_ECC_INDICTMENT_MESSAGE: 1186 case PLAT_ECC_INDICTMENT2_MESSAGE: 1187 return ((*sg_ecc_mbox_func)(msgp)); 1188 1189 case PLAT_ECC_DIMM_SID_MESSAGE: 1190 msgp->ecc_resp.msg_len = sizeof (plat_dimm_sid_board_data_t); 1191 msgp->ecc_resp.msg_buf = (caddr_t)kmem_zalloc( 1192 sizeof (plat_dimm_sid_board_data_t), sleep_flag); 1193 return ((*sg_ecc_mbox_func)(msgp)); 1194 1195 default: 1196 ASSERT(0); 1197 return (EINVAL); 1198 } 1199 } 1200 1201 /* 1202 * m is redundant on serengeti as the multiplier is always 4 1203 */ 1204 /*ARGSUSED*/ 1205 int 1206 plat_make_fru_cpuid(int sb, int m, int proc) 1207 { 1208 return (MAKE_CPUID(sb, proc)); 1209 } 1210 1211 /* 1212 * board number for a given proc 1213 */ 1214 int 1215 plat_make_fru_boardnum(int proc) 1216 { 1217 return (SG_CPU_BD_PORTID_TO_BD_NUM(proc)); 1218 } 1219 1220 static 1221 void 1222 cpu_sgn_update(ushort_t sig, uchar_t state, uchar_t sub_state, int cpuid) 1223 { 1224 uint32_t signature = CPU_SIG_BLD(sig, state, sub_state); 1225 sig_state_t current_sgn; 1226 int i; 1227 1228 if (iosram_write_ptr == NULL) { 1229 /* 1230 * If the IOSRAM write pointer isn't set, we won't be able 1231 * to write signatures to ANYTHING, so we may as well just 1232 * write out an error message (if desired) and exit this 1233 * routine now... 1234 */ 1235 DCMNERR(CE_WARN, 1236 "cpu_sgn_update: iosram_write() not found;" 1237 " cannot write signature 0x%x for CPU(s) or domain\n", 1238 signature); 1239 return; 1240 } 1241 1242 1243 /* 1244 * Differentiate a panic reboot from a non-panic reboot in the 1245 * setting of the substate of the signature. 1246 * 1247 * If the new substate is REBOOT and we're rebooting due to a panic, 1248 * then set the new substate to a special value indicating a panic 1249 * reboot, SIGSUBST_PANIC_REBOOT. 1250 * 1251 * A panic reboot is detected by a current (previous) domain signature 1252 * state of SIGST_EXIT, and a new signature substate of SIGSUBST_REBOOT. 1253 * The domain signature state SIGST_EXIT is used as the panic flow 1254 * progresses. 1255 * 1256 * At the end of the panic flow, the reboot occurs but we should now 1257 * one that was involuntary, something that may be quite useful to know 1258 * at OBP level. 1259 */ 1260 if (sub_state == SIGSUBST_REBOOT) { 1261 if (iosram_read_ptr == NULL) { 1262 DCMNERR(CE_WARN, 1263 "cpu_sgn_update: iosram_read() not found;" 1264 " could not check current domain signature\n"); 1265 } else { 1266 (void) (*iosram_read_ptr)(SBBC_SIGBLCK_KEY, 1267 SG_SGNBLK_DOMAINSIG_OFFSET, 1268 (char *)¤t_sgn, sizeof (current_sgn)); 1269 if (current_sgn.state_t.state == SIGST_EXIT) 1270 signature = CPU_SIG_BLD(sig, state, 1271 SIGSUBST_PANIC_REBOOT); 1272 } 1273 } 1274 1275 /* 1276 * cpuid == -1 indicates that the operation applies to all cpus. 1277 */ 1278 if (cpuid >= 0) { 1279 (void) (*iosram_write_ptr)(SBBC_SIGBLCK_KEY, 1280 SG_SGNBLK_CPUSIG_OFFSET(cpuid), (char *)&signature, 1281 sizeof (signature)); 1282 } else { 1283 for (i = 0; i < NCPU; i++) { 1284 if (cpu[i] == NULL || !(cpu[i]->cpu_flags & 1285 (CPU_EXISTS|CPU_QUIESCED))) { 1286 continue; 1287 } 1288 (void) (*iosram_write_ptr)(SBBC_SIGBLCK_KEY, 1289 SG_SGNBLK_CPUSIG_OFFSET(i), (char *)&signature, 1290 sizeof (signature)); 1291 } 1292 } 1293 1294 if (state == SIGST_OFFLINE || state == SIGST_DETACHED) { 1295 return; 1296 } 1297 1298 (void) (*iosram_write_ptr)(SBBC_SIGBLCK_KEY, 1299 SG_SGNBLK_DOMAINSIG_OFFSET, (char *)&signature, 1300 sizeof (signature)); 1301 } 1302 1303 void 1304 startup_platform(void) 1305 { 1306 } 1307 1308 /* 1309 * A routine to convert a number (represented as a string) to 1310 * the integer value it represents. 1311 */ 1312 1313 static int 1314 isdigit(int ch) 1315 { 1316 return (ch >= '0' && ch <= '9'); 1317 } 1318 1319 #define isspace(c) ((c) == ' ' || (c) == '\t' || (c) == '\n') 1320 1321 static int 1322 strtoi(char *p, char **pos) 1323 { 1324 int n; 1325 int c, neg = 0; 1326 1327 if (!isdigit(c = *p)) { 1328 while (isspace(c)) 1329 c = *++p; 1330 switch (c) { 1331 case '-': 1332 neg++; 1333 /* FALLTHROUGH */ 1334 case '+': 1335 c = *++p; 1336 } 1337 if (!isdigit(c)) { 1338 if (pos != NULL) 1339 *pos = p; 1340 return (0); 1341 } 1342 } 1343 for (n = '0' - c; isdigit(c = *++p); ) { 1344 n *= 10; /* two steps to avoid unnecessary overflow */ 1345 n += '0' - c; /* accum neg to avoid surprises at MAX */ 1346 } 1347 if (pos != NULL) 1348 *pos = p; 1349 return (neg ? n : -n); 1350 } 1351 1352 /* 1353 * Get the three parts of the Serengeti PROM version. 1354 * Used for feature readiness tests. 1355 * 1356 * Return 0 if version extracted successfully, -1 otherwise. 1357 */ 1358 1359 int 1360 sg_get_prom_version(int *sysp, int *intfp, int *bldp) 1361 { 1362 int plen; 1363 char vers[512]; 1364 static pnode_t node; 1365 static char version[] = "version"; 1366 char *verp, *ep; 1367 1368 node = prom_finddevice("/openprom"); 1369 if (node == OBP_BADNODE) 1370 return (-1); 1371 1372 plen = prom_getproplen(node, version); 1373 if (plen <= 0 || plen >= sizeof (vers)) 1374 return (-1); 1375 (void) prom_getprop(node, version, vers); 1376 vers[plen] = '\0'; 1377 1378 /* Make sure it's an OBP flashprom */ 1379 if (vers[0] != 'O' && vers[1] != 'B' && vers[2] != 'P') { 1380 cmn_err(CE_WARN, "sg_get_prom_version: " 1381 "unknown <version> string in </openprom>\n"); 1382 return (-1); 1383 } 1384 verp = &vers[4]; 1385 1386 *sysp = strtoi(verp, &ep); 1387 if (ep == verp || *ep != '.') 1388 return (-1); 1389 verp = ep + 1; 1390 1391 *intfp = strtoi(verp, &ep); 1392 if (ep == verp || *ep != '.') 1393 return (-1); 1394 verp = ep + 1; 1395 1396 *bldp = strtoi(verp, &ep); 1397 if (ep == verp || (*ep != '\0' && !isspace(*ep))) 1398 return (-1); 1399 return (0); 1400 } 1401 1402 /* 1403 * Return 0 if system board Dynamic Reconfiguration 1404 * is supported by the firmware, -1 otherwise. 1405 */ 1406 int 1407 sg_prom_sb_dr_check(void) 1408 { 1409 static int prom_res = 1; 1410 1411 if (prom_res == 1) { 1412 int sys, intf, bld; 1413 int rv; 1414 1415 rv = sg_get_prom_version(&sys, &intf, &bld); 1416 if (rv == 0 && sys == 5 && 1417 (intf >= 12 || (intf == 11 && bld >= 200))) { 1418 prom_res = 0; 1419 } else { 1420 prom_res = -1; 1421 } 1422 } 1423 return (prom_res); 1424 } 1425 1426 /* 1427 * Return 0 if cPCI Dynamic Reconfiguration 1428 * is supported by the firmware, -1 otherwise. 1429 */ 1430 int 1431 sg_prom_cpci_dr_check(void) 1432 { 1433 /* 1434 * The version check is currently the same as for 1435 * system boards. Since the two DR sub-systems are 1436 * independent, this could change. 1437 */ 1438 return (sg_prom_sb_dr_check()); 1439 } 1440 1441 /* 1442 * KDI functions - used by the in-situ kernel debugger (kmdb) to perform 1443 * platform-specific operations. These functions execute when the world is 1444 * stopped, and as such cannot make any blocking calls, hold locks, etc. 1445 * promif functions are a special case, and may be used. 1446 */ 1447 1448 /* 1449 * Our implementation of this KDI op updates the CPU signature in the system 1450 * controller. Note that we set the signature to OBP_SIG, rather than DBG_SIG. 1451 * The Forth words we execute will, among other things, transform our OBP_SIG 1452 * into DBG_SIG. They won't function properly if we try to use DBG_SIG. 1453 */ 1454 static void 1455 sg_system_claim(void) 1456 { 1457 prom_interpret("sigb-sig! my-sigb-sig!", OBP_SIG, OBP_SIG, 0, 0, 0); 1458 } 1459 1460 static void 1461 sg_system_release(void) 1462 { 1463 prom_interpret("sigb-sig! my-sigb-sig!", OS_SIG, OS_SIG, 0, 0, 0); 1464 } 1465 1466 static void 1467 sg_console_claim(void) 1468 { 1469 prom_serengeti_set_console_input(SGCN_OBP_STR); 1470 } 1471 1472 static void 1473 sg_console_release(void) 1474 { 1475 prom_serengeti_set_console_input(SGCN_CLNT_STR); 1476 } 1477 1478 void 1479 plat_kdi_init(kdi_t *kdi) 1480 { 1481 kdi->pkdi_system_claim = sg_system_claim; 1482 kdi->pkdi_system_release = sg_system_release; 1483 kdi->pkdi_console_claim = sg_console_claim; 1484 kdi->pkdi_console_release = sg_console_release; 1485 } 1486