1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #include <sys/cpuvar.h> 29 #include <sys/systm.h> 30 #include <sys/sysmacros.h> 31 #include <sys/promif.h> 32 #include <sys/platform_module.h> 33 #include <sys/cmn_err.h> 34 #include <sys/errno.h> 35 #include <sys/machsystm.h> 36 #include <sys/bootconf.h> 37 #include <sys/nvpair.h> 38 #include <sys/kobj.h> 39 #include <sys/mem_cage.h> 40 #include <sys/opl.h> 41 #include <sys/scfd/scfostoescf.h> 42 #include <sys/cpu_sgnblk_defs.h> 43 #include <sys/utsname.h> 44 #include <sys/ddi.h> 45 #include <sys/sunndi.h> 46 #include <sys/lgrp.h> 47 #include <sys/memnode.h> 48 #include <sys/sysmacros.h> 49 #include <sys/time.h> 50 #include <sys/cpu.h> 51 #include <vm/vm_dep.h> 52 53 int (*opl_get_mem_unum)(int, uint64_t, char *, int, int *); 54 int (*opl_get_mem_sid)(char *unum, char *buf, int buflen, int *lenp); 55 int (*opl_get_mem_offset)(uint64_t paddr, uint64_t *offp); 56 int (*opl_get_mem_addr)(char *unum, char *sid, 57 uint64_t offset, uint64_t *paddr); 58 59 /* Memory for fcode claims. 16k times # maximum possible IO units */ 60 #define EFCODE_SIZE (OPL_MAX_BOARDS * OPL_MAX_IO_UNITS_PER_BOARD * 0x4000) 61 int efcode_size = EFCODE_SIZE; 62 63 #define OPL_MC_MEMBOARD_SHIFT 38 /* Boards on 256BG boundary */ 64 65 /* Set the maximum number of boards for DR */ 66 int opl_boards = OPL_MAX_BOARDS; 67 68 void sgn_update_all_cpus(ushort_t, uchar_t, uchar_t); 69 70 extern int tsb_lgrp_affinity; 71 72 int opl_tsb_spares = (OPL_MAX_BOARDS) * (OPL_MAX_PCICH_UNITS_PER_BOARD) * 73 (OPL_MAX_TSBS_PER_PCICH); 74 75 pgcnt_t opl_startup_cage_size = 0; 76 77 static opl_model_info_t opl_models[] = { 78 { "FF1", OPL_MAX_BOARDS_FF1, FF1, STD_DISPATCH_TABLE }, 79 { "FF2", OPL_MAX_BOARDS_FF2, FF2, STD_DISPATCH_TABLE }, 80 { "DC1", OPL_MAX_BOARDS_DC1, DC1, STD_DISPATCH_TABLE }, 81 { "DC2", OPL_MAX_BOARDS_DC2, DC2, EXT_DISPATCH_TABLE }, 82 { "DC3", OPL_MAX_BOARDS_DC3, DC3, EXT_DISPATCH_TABLE }, 83 }; 84 static int opl_num_models = sizeof (opl_models)/sizeof (opl_model_info_t); 85 86 /* 87 * opl_cur_model 88 */ 89 static opl_model_info_t *opl_cur_model = NULL; 90 91 static struct memlist *opl_memlist_per_board(struct memlist *ml); 92 93 /* 94 * Note FF/DC out-of-order instruction engine takes only a 95 * single cycle to execute each spin loop 96 * for comparison, Panther takes 6 cycles for same loop 97 * 1500 approx nsec for OPL sleep instruction 98 * if spin count = OPL_BOFF_SLEEP*OPL_BOFF_SPIN then 99 * spin time should be equal to OPL_BOFF_TM nsecs 100 * Listed values tuned for 2.15GHz to 2.4GHz systems 101 * Value may change for future systems 102 */ 103 #define OPL_BOFF_SPIN 720 104 #define OPL_BOFF_BASE 1 105 #define OPL_BOFF_SLEEP 5 106 #define OPL_BOFF_CAP1 20 107 #define OPL_BOFF_CAP2 60 108 #define OPL_BOFF_MAX (40 * OPL_BOFF_SLEEP) 109 #define OPL_BOFF_TM 1500 110 111 int 112 set_platform_max_ncpus(void) 113 { 114 return (OPL_MAX_CPU_PER_BOARD * OPL_MAX_BOARDS); 115 } 116 117 int 118 set_platform_tsb_spares(void) 119 { 120 return (MIN(opl_tsb_spares, MAX_UPA)); 121 } 122 123 static void 124 set_model_info() 125 { 126 extern int ts_dispatch_extended; 127 char name[MAXSYSNAME]; 128 int i; 129 130 /* 131 * Get model name from the root node. 132 * 133 * We are using the prom device tree since, at this point, 134 * the Solaris device tree is not yet setup. 135 */ 136 (void) prom_getprop(prom_rootnode(), "model", (caddr_t)name); 137 138 for (i = 0; i < opl_num_models; i++) { 139 if (strncmp(name, opl_models[i].model_name, MAXSYSNAME) == 0) { 140 opl_cur_model = &opl_models[i]; 141 break; 142 } 143 } 144 145 if (i == opl_num_models) 146 halt("No valid OPL model is found!"); 147 148 if ((opl_cur_model->model_cmds & EXT_DISPATCH_TABLE) && 149 (ts_dispatch_extended == -1)) { 150 /* 151 * Based on a platform model, select a dispatch table. 152 * Only DC2 and DC3 systems uses the alternate/extended 153 * TS dispatch table. 154 * FF1, FF2 and DC1 systems used standard dispatch tables. 155 */ 156 ts_dispatch_extended = 1; 157 } 158 159 } 160 161 static void 162 set_max_mmu_ctxdoms() 163 { 164 extern uint_t max_mmu_ctxdoms; 165 int max_boards; 166 167 /* 168 * From the model, get the maximum number of boards 169 * supported and set the value accordingly. If the model 170 * could not be determined or recognized, we assume the max value. 171 */ 172 if (opl_cur_model == NULL) 173 max_boards = OPL_MAX_BOARDS; 174 else 175 max_boards = opl_cur_model->model_max_boards; 176 177 /* 178 * On OPL, cores and MMUs are one-to-one. 179 */ 180 max_mmu_ctxdoms = OPL_MAX_CORE_UNITS_PER_BOARD * max_boards; 181 } 182 183 #pragma weak mmu_init_large_pages 184 185 void 186 set_platform_defaults(void) 187 { 188 extern char *tod_module_name; 189 extern void cpu_sgn_update(ushort_t, uchar_t, uchar_t, int); 190 extern void mmu_init_large_pages(size_t); 191 192 /* Set the CPU signature function pointer */ 193 cpu_sgn_func = cpu_sgn_update; 194 195 /* Set appropriate tod module for OPL platform */ 196 ASSERT(tod_module_name == NULL); 197 tod_module_name = "todopl"; 198 199 if ((mmu_page_sizes == max_mmu_page_sizes) && 200 (mmu_ism_pagesize != DEFAULT_ISM_PAGESIZE)) { 201 if (&mmu_init_large_pages) 202 mmu_init_large_pages(mmu_ism_pagesize); 203 } 204 205 tsb_lgrp_affinity = 1; 206 207 set_max_mmu_ctxdoms(); 208 } 209 210 /* 211 * Convert logical a board number to a physical one. 212 */ 213 214 #define LSBPROP "board#" 215 #define PSBPROP "physical-board#" 216 217 int 218 opl_get_physical_board(int id) 219 { 220 dev_info_t *root_dip, *dip = NULL; 221 char *dname = NULL; 222 int circ; 223 224 pnode_t pnode; 225 char pname[MAXSYSNAME] = {0}; 226 227 int lsb_id; /* Logical System Board ID */ 228 int psb_id; /* Physical System Board ID */ 229 230 231 /* 232 * This function is called on early stage of bootup when the 233 * kernel device tree is not initialized yet, and also 234 * later on when the device tree is up. We want to try 235 * the fast track first. 236 */ 237 root_dip = ddi_root_node(); 238 if (root_dip) { 239 /* Get from devinfo node */ 240 ndi_devi_enter(root_dip, &circ); 241 for (dip = ddi_get_child(root_dip); dip; 242 dip = ddi_get_next_sibling(dip)) { 243 244 dname = ddi_node_name(dip); 245 if (strncmp(dname, "pseudo-mc", 9) != 0) 246 continue; 247 248 if ((lsb_id = (int)ddi_getprop(DDI_DEV_T_ANY, dip, 249 DDI_PROP_DONTPASS, LSBPROP, -1)) == -1) 250 continue; 251 252 if (id == lsb_id) { 253 if ((psb_id = (int)ddi_getprop(DDI_DEV_T_ANY, 254 dip, DDI_PROP_DONTPASS, PSBPROP, -1)) 255 == -1) { 256 ndi_devi_exit(root_dip, circ); 257 return (-1); 258 } else { 259 ndi_devi_exit(root_dip, circ); 260 return (psb_id); 261 } 262 } 263 } 264 ndi_devi_exit(root_dip, circ); 265 } 266 267 /* 268 * We do not have the kernel device tree, or we did not 269 * find the node for some reason (let's say the kernel 270 * device tree was modified), let's try the OBP tree. 271 */ 272 pnode = prom_rootnode(); 273 for (pnode = prom_childnode(pnode); pnode; 274 pnode = prom_nextnode(pnode)) { 275 276 if ((prom_getprop(pnode, "name", (caddr_t)pname) == -1) || 277 (strncmp(pname, "pseudo-mc", 9) != 0)) 278 continue; 279 280 if (prom_getprop(pnode, LSBPROP, (caddr_t)&lsb_id) == -1) 281 continue; 282 283 if (id == lsb_id) { 284 if (prom_getprop(pnode, PSBPROP, 285 (caddr_t)&psb_id) == -1) { 286 return (-1); 287 } else { 288 return (psb_id); 289 } 290 } 291 } 292 293 return (-1); 294 } 295 296 /* 297 * For OPL it's possible that memory from two or more successive boards 298 * will be contiguous across the boards, and therefore represented as a 299 * single chunk. 300 * This function splits such chunks down the board boundaries. 301 */ 302 static struct memlist * 303 opl_memlist_per_board(struct memlist *ml) 304 { 305 uint64_t ssize, low, high, boundary; 306 struct memlist *head, *tail, *new; 307 308 ssize = (1ull << OPL_MC_MEMBOARD_SHIFT); 309 310 head = tail = NULL; 311 312 for (; ml; ml = ml->next) { 313 low = (uint64_t)ml->address; 314 high = low+(uint64_t)(ml->size); 315 while (low < high) { 316 boundary = roundup(low+1, ssize); 317 boundary = MIN(high, boundary); 318 new = kmem_zalloc(sizeof (struct memlist), KM_SLEEP); 319 new->address = low; 320 new->size = boundary - low; 321 if (head == NULL) 322 head = new; 323 if (tail) { 324 tail->next = new; 325 new->prev = tail; 326 } 327 tail = new; 328 low = boundary; 329 } 330 } 331 return (head); 332 } 333 334 void 335 set_platform_cage_params(void) 336 { 337 extern pgcnt_t total_pages; 338 extern struct memlist *phys_avail; 339 struct memlist *ml, *tml; 340 341 if (kernel_cage_enable) { 342 pgcnt_t preferred_cage_size; 343 344 preferred_cage_size = 345 MAX(opl_startup_cage_size, total_pages / 256); 346 347 ml = opl_memlist_per_board(phys_avail); 348 349 /* 350 * Note: we are assuming that post has load the 351 * whole show in to the high end of memory. Having 352 * taken this leap, we copy the whole of phys_avail 353 * the glist and arrange for the cage to grow 354 * downward (descending pfns). 355 */ 356 kcage_range_init(ml, KCAGE_DOWN, preferred_cage_size); 357 358 /* free the memlist */ 359 do { 360 tml = ml->next; 361 kmem_free(ml, sizeof (struct memlist)); 362 ml = tml; 363 } while (ml != NULL); 364 } 365 366 if (kcage_on) 367 cmn_err(CE_NOTE, "!DR Kernel Cage is ENABLED"); 368 else 369 cmn_err(CE_NOTE, "!DR Kernel Cage is DISABLED"); 370 } 371 372 /*ARGSUSED*/ 373 int 374 plat_cpu_poweron(struct cpu *cp) 375 { 376 int (*opl_cpu_poweron)(struct cpu *) = NULL; 377 378 opl_cpu_poweron = 379 (int (*)(struct cpu *))kobj_getsymvalue("drmach_cpu_poweron", 0); 380 381 if (opl_cpu_poweron == NULL) 382 return (ENOTSUP); 383 else 384 return ((opl_cpu_poweron)(cp)); 385 386 } 387 388 /*ARGSUSED*/ 389 int 390 plat_cpu_poweroff(struct cpu *cp) 391 { 392 int (*opl_cpu_poweroff)(struct cpu *) = NULL; 393 394 opl_cpu_poweroff = 395 (int (*)(struct cpu *))kobj_getsymvalue("drmach_cpu_poweroff", 0); 396 397 if (opl_cpu_poweroff == NULL) 398 return (ENOTSUP); 399 else 400 return ((opl_cpu_poweroff)(cp)); 401 402 } 403 404 int 405 plat_max_boards(void) 406 { 407 return (OPL_MAX_BOARDS); 408 } 409 410 int 411 plat_max_cpu_units_per_board(void) 412 { 413 return (OPL_MAX_CPU_PER_BOARD); 414 } 415 416 int 417 plat_max_mem_units_per_board(void) 418 { 419 return (OPL_MAX_MEM_UNITS_PER_BOARD); 420 } 421 422 int 423 plat_max_io_units_per_board(void) 424 { 425 return (OPL_MAX_IO_UNITS_PER_BOARD); 426 } 427 428 int 429 plat_max_cmp_units_per_board(void) 430 { 431 return (OPL_MAX_CMP_UNITS_PER_BOARD); 432 } 433 434 int 435 plat_max_core_units_per_board(void) 436 { 437 return (OPL_MAX_CORE_UNITS_PER_BOARD); 438 } 439 440 int 441 plat_pfn_to_mem_node(pfn_t pfn) 442 { 443 return (pfn >> mem_node_pfn_shift); 444 } 445 446 /* ARGSUSED */ 447 void 448 plat_build_mem_nodes(u_longlong_t *list, size_t nelems) 449 { 450 size_t elem; 451 pfn_t basepfn; 452 pgcnt_t npgs; 453 uint64_t boundary, ssize; 454 uint64_t low, high; 455 456 /* 457 * OPL mem slices are always aligned on a 256GB boundary. 458 */ 459 mem_node_pfn_shift = OPL_MC_MEMBOARD_SHIFT - MMU_PAGESHIFT; 460 mem_node_physalign = 0; 461 462 /* 463 * Boot install lists are arranged <addr, len>, <addr, len>, ... 464 */ 465 ssize = (1ull << OPL_MC_MEMBOARD_SHIFT); 466 for (elem = 0; elem < nelems; elem += 2) { 467 low = (uint64_t)list[elem]; 468 high = low+(uint64_t)(list[elem+1]); 469 while (low < high) { 470 boundary = roundup(low+1, ssize); 471 boundary = MIN(high, boundary); 472 basepfn = btop(low); 473 npgs = btop(boundary - low); 474 mem_node_add_slice(basepfn, basepfn + npgs - 1); 475 low = boundary; 476 } 477 } 478 } 479 480 /* 481 * Find the CPU associated with a slice at boot-time. 482 */ 483 void 484 plat_fill_mc(pnode_t nodeid) 485 { 486 int board; 487 int memnode; 488 struct { 489 uint64_t addr; 490 uint64_t size; 491 } mem_range; 492 493 if (prom_getprop(nodeid, "board#", (caddr_t)&board) < 0) { 494 panic("Can not find board# property in mc node %x", nodeid); 495 } 496 if (prom_getprop(nodeid, "sb-mem-ranges", (caddr_t)&mem_range) < 0) { 497 panic("Can not find sb-mem-ranges property in mc node %x", 498 nodeid); 499 } 500 memnode = mem_range.addr >> OPL_MC_MEMBOARD_SHIFT; 501 plat_assign_lgrphand_to_mem_node(board, memnode); 502 } 503 504 /* 505 * Return the platform handle for the lgroup containing the given CPU 506 * 507 * For OPL, lgroup platform handle == board #. 508 */ 509 510 extern int mpo_disabled; 511 extern lgrp_handle_t lgrp_default_handle; 512 513 lgrp_handle_t 514 plat_lgrp_cpu_to_hand(processorid_t id) 515 { 516 lgrp_handle_t plathand; 517 518 /* 519 * Return the real platform handle for the CPU until 520 * such time as we know that MPO should be disabled. 521 * At that point, we set the "mpo_disabled" flag to true, 522 * and from that point on, return the default handle. 523 * 524 * By the time we know that MPO should be disabled, the 525 * first CPU will have already been added to a leaf 526 * lgroup, but that's ok. The common lgroup code will 527 * double check that the boot CPU is in the correct place, 528 * and in the case where mpo should be disabled, will move 529 * it to the root if necessary. 530 */ 531 if (mpo_disabled) { 532 /* If MPO is disabled, return the default (UMA) handle */ 533 plathand = lgrp_default_handle; 534 } else 535 plathand = (lgrp_handle_t)LSB_ID(id); 536 return (plathand); 537 } 538 539 /* 540 * Platform specific lgroup initialization 541 */ 542 void 543 plat_lgrp_init(void) 544 { 545 extern uint32_t lgrp_expand_proc_thresh; 546 extern uint32_t lgrp_expand_proc_diff; 547 548 /* 549 * Set tuneables for the OPL architecture 550 * 551 * lgrp_expand_proc_thresh is the minimum load on the lgroups 552 * this process is currently running on before considering 553 * expanding threads to another lgroup. 554 * 555 * lgrp_expand_proc_diff determines how much less the remote lgroup 556 * must be loaded before expanding to it. 557 * 558 * Since remote latencies can be costly, attempt to keep 3 threads 559 * within the same lgroup before expanding to the next lgroup. 560 */ 561 lgrp_expand_proc_thresh = LGRP_LOADAVG_THREAD_MAX * 3; 562 lgrp_expand_proc_diff = LGRP_LOADAVG_THREAD_MAX; 563 } 564 565 /* 566 * Platform notification of lgroup (re)configuration changes 567 */ 568 /*ARGSUSED*/ 569 void 570 plat_lgrp_config(lgrp_config_flag_t evt, uintptr_t arg) 571 { 572 update_membounds_t *umb; 573 lgrp_config_mem_rename_t lmr; 574 int sbd, tbd; 575 lgrp_handle_t hand, shand, thand; 576 int mnode, snode, tnode; 577 pfn_t start, end; 578 579 if (mpo_disabled) 580 return; 581 582 switch (evt) { 583 584 case LGRP_CONFIG_MEM_ADD: 585 /* 586 * Establish the lgroup handle to memnode translation. 587 */ 588 umb = (update_membounds_t *)arg; 589 590 hand = umb->u_board; 591 mnode = plat_pfn_to_mem_node(umb->u_base >> MMU_PAGESHIFT); 592 plat_assign_lgrphand_to_mem_node(hand, mnode); 593 594 break; 595 596 case LGRP_CONFIG_MEM_DEL: 597 /* 598 * Special handling for possible memory holes. 599 */ 600 umb = (update_membounds_t *)arg; 601 hand = umb->u_board; 602 if ((mnode = plat_lgrphand_to_mem_node(hand)) != -1) { 603 if (mem_node_config[mnode].exists) { 604 start = mem_node_config[mnode].physbase; 605 end = mem_node_config[mnode].physmax; 606 mem_node_pre_del_slice(start, end); 607 mem_node_post_del_slice(start, end, 0); 608 } 609 } 610 611 break; 612 613 case LGRP_CONFIG_MEM_RENAME: 614 /* 615 * During a DR copy-rename operation, all of the memory 616 * on one board is moved to another board -- but the 617 * addresses/pfns and memnodes don't change. This means 618 * the memory has changed locations without changing identity. 619 * 620 * Source is where we are copying from and target is where we 621 * are copying to. After source memnode is copied to target 622 * memnode, the physical addresses of the target memnode are 623 * renamed to match what the source memnode had. Then target 624 * memnode can be removed and source memnode can take its 625 * place. 626 * 627 * To do this, swap the lgroup handle to memnode mappings for 628 * the boards, so target lgroup will have source memnode and 629 * source lgroup will have empty target memnode which is where 630 * its memory will go (if any is added to it later). 631 * 632 * Then source memnode needs to be removed from its lgroup 633 * and added to the target lgroup where the memory was living 634 * but under a different name/memnode. The memory was in the 635 * target memnode and now lives in the source memnode with 636 * different physical addresses even though it is the same 637 * memory. 638 */ 639 sbd = arg & 0xffff; 640 tbd = (arg & 0xffff0000) >> 16; 641 shand = sbd; 642 thand = tbd; 643 snode = plat_lgrphand_to_mem_node(shand); 644 tnode = plat_lgrphand_to_mem_node(thand); 645 646 /* 647 * Special handling for possible memory holes. 648 */ 649 if (tnode != -1 && mem_node_config[tnode].exists) { 650 start = mem_node_config[tnode].physbase; 651 end = mem_node_config[tnode].physmax; 652 mem_node_pre_del_slice(start, end); 653 mem_node_post_del_slice(start, end, 0); 654 } 655 656 plat_assign_lgrphand_to_mem_node(thand, snode); 657 plat_assign_lgrphand_to_mem_node(shand, tnode); 658 659 lmr.lmem_rename_from = shand; 660 lmr.lmem_rename_to = thand; 661 662 /* 663 * Remove source memnode of copy rename from its lgroup 664 * and add it to its new target lgroup 665 */ 666 lgrp_config(LGRP_CONFIG_MEM_RENAME, (uintptr_t)snode, 667 (uintptr_t)&lmr); 668 669 break; 670 671 default: 672 break; 673 } 674 } 675 676 /* 677 * Return latency between "from" and "to" lgroups 678 * 679 * This latency number can only be used for relative comparison 680 * between lgroups on the running system, cannot be used across platforms, 681 * and may not reflect the actual latency. It is platform and implementation 682 * specific, so platform gets to decide its value. It would be nice if the 683 * number was at least proportional to make comparisons more meaningful though. 684 * NOTE: The numbers below are supposed to be load latencies for uncached 685 * memory divided by 10. 686 * 687 */ 688 int 689 plat_lgrp_latency(lgrp_handle_t from, lgrp_handle_t to) 690 { 691 /* 692 * Return min remote latency when there are more than two lgroups 693 * (root and child) and getting latency between two different lgroups 694 * or root is involved 695 */ 696 if (lgrp_optimizations() && (from != to || 697 from == LGRP_DEFAULT_HANDLE || to == LGRP_DEFAULT_HANDLE)) 698 return (42); 699 else 700 return (35); 701 } 702 703 /* 704 * Return platform handle for root lgroup 705 */ 706 lgrp_handle_t 707 plat_lgrp_root_hand(void) 708 { 709 if (mpo_disabled) 710 return (lgrp_default_handle); 711 712 return (LGRP_DEFAULT_HANDLE); 713 } 714 715 /*ARGSUSED*/ 716 void 717 plat_freelist_process(int mnode) 718 { 719 } 720 721 void 722 load_platform_drivers(void) 723 { 724 (void) i_ddi_attach_pseudo_node("dr"); 725 } 726 727 /* 728 * No platform drivers on this platform 729 */ 730 char *platform_module_list[] = { 731 (char *)0 732 }; 733 734 /*ARGSUSED*/ 735 void 736 plat_tod_fault(enum tod_fault_type tod_bad) 737 { 738 } 739 740 /*ARGSUSED*/ 741 void 742 cpu_sgn_update(ushort_t sgn, uchar_t state, uchar_t sub_state, int cpuid) 743 { 744 static void (*scf_panic_callback)(int); 745 static void (*scf_shutdown_callback)(int); 746 747 /* 748 * This is for notifing system panic/shutdown to SCF. 749 * In case of shutdown and panic, SCF call back 750 * function should be called. 751 * <SCF call back functions> 752 * scf_panic_callb() : panicsys()->panic_quiesce_hw() 753 * scf_shutdown_callb(): halt() or power_down() or reboot_machine() 754 * cpuid should be -1 and state should be SIGST_EXIT. 755 */ 756 if (state == SIGST_EXIT && cpuid == -1) { 757 758 /* 759 * find the symbol for the SCF panic callback routine in driver 760 */ 761 if (scf_panic_callback == NULL) 762 scf_panic_callback = (void (*)(int)) 763 modgetsymvalue("scf_panic_callb", 0); 764 if (scf_shutdown_callback == NULL) 765 scf_shutdown_callback = (void (*)(int)) 766 modgetsymvalue("scf_shutdown_callb", 0); 767 768 switch (sub_state) { 769 case SIGSUBST_PANIC: 770 if (scf_panic_callback == NULL) { 771 cmn_err(CE_NOTE, "!cpu_sgn_update: " 772 "scf_panic_callb not found\n"); 773 return; 774 } 775 scf_panic_callback(SIGSUBST_PANIC); 776 break; 777 778 case SIGSUBST_HALT: 779 if (scf_shutdown_callback == NULL) { 780 cmn_err(CE_NOTE, "!cpu_sgn_update: " 781 "scf_shutdown_callb not found\n"); 782 return; 783 } 784 scf_shutdown_callback(SIGSUBST_HALT); 785 break; 786 787 case SIGSUBST_ENVIRON: 788 if (scf_shutdown_callback == NULL) { 789 cmn_err(CE_NOTE, "!cpu_sgn_update: " 790 "scf_shutdown_callb not found\n"); 791 return; 792 } 793 scf_shutdown_callback(SIGSUBST_ENVIRON); 794 break; 795 796 case SIGSUBST_REBOOT: 797 if (scf_shutdown_callback == NULL) { 798 cmn_err(CE_NOTE, "!cpu_sgn_update: " 799 "scf_shutdown_callb not found\n"); 800 return; 801 } 802 scf_shutdown_callback(SIGSUBST_REBOOT); 803 break; 804 } 805 } 806 } 807 808 /*ARGSUSED*/ 809 int 810 plat_get_mem_unum(int synd_code, uint64_t flt_addr, int flt_bus_id, 811 int flt_in_memory, ushort_t flt_status, 812 char *buf, int buflen, int *lenp) 813 { 814 /* 815 * check if it's a Memory error. 816 */ 817 if (flt_in_memory) { 818 if (opl_get_mem_unum != NULL) { 819 return (opl_get_mem_unum(synd_code, flt_addr, 820 buf, buflen, lenp)); 821 } else { 822 return (ENOTSUP); 823 } 824 } else { 825 return (ENOTSUP); 826 } 827 } 828 829 /*ARGSUSED*/ 830 int 831 plat_get_cpu_unum(int cpuid, char *buf, int buflen, int *lenp) 832 { 833 int ret = 0; 834 uint_t sb; 835 int plen; 836 837 sb = opl_get_physical_board(LSB_ID(cpuid)); 838 if (sb == -1) { 839 return (ENXIO); 840 } 841 842 /* 843 * opl_cur_model is assigned here 844 */ 845 if (opl_cur_model == NULL) { 846 set_model_info(); 847 } 848 849 ASSERT((opl_cur_model - opl_models) == (opl_cur_model->model_type)); 850 851 switch (opl_cur_model->model_type) { 852 case FF1: 853 plen = snprintf(buf, buflen, "/%s/CPUM%d", "MBU_A", 854 CHIP_ID(cpuid) / 2); 855 break; 856 857 case FF2: 858 plen = snprintf(buf, buflen, "/%s/CPUM%d", "MBU_B", 859 (CHIP_ID(cpuid) / 2) + (sb * 2)); 860 break; 861 862 case DC1: 863 case DC2: 864 case DC3: 865 plen = snprintf(buf, buflen, "/%s%02d/CPUM%d", "CMU", sb, 866 CHIP_ID(cpuid)); 867 break; 868 869 default: 870 /* This should never happen */ 871 return (ENODEV); 872 } 873 874 if (plen >= buflen) { 875 ret = ENOSPC; 876 } else { 877 if (lenp) 878 *lenp = strlen(buf); 879 } 880 return (ret); 881 } 882 883 #define SCF_PUTINFO(f, s, p) \ 884 f(KEY_ESCF, 0x01, 0, s, p) 885 void 886 plat_nodename_set(void) 887 { 888 void *datap; 889 static int (*scf_service_function)(uint32_t, uint8_t, 890 uint32_t, uint32_t, void *); 891 int counter = 5; 892 893 /* 894 * find the symbol for the SCF put routine in driver 895 */ 896 if (scf_service_function == NULL) 897 scf_service_function = 898 (int (*)(uint32_t, uint8_t, uint32_t, uint32_t, void *)) 899 modgetsymvalue("scf_service_putinfo", 0); 900 901 /* 902 * If the symbol was found, call it. Otherwise, log a note (but not to 903 * the console). 904 */ 905 906 if (scf_service_function == NULL) { 907 cmn_err(CE_NOTE, 908 "!plat_nodename_set: scf_service_putinfo not found\n"); 909 return; 910 } 911 912 datap = 913 (struct utsname *)kmem_zalloc(sizeof (struct utsname), KM_SLEEP); 914 915 if (datap == NULL) { 916 return; 917 } 918 919 bcopy((struct utsname *)&utsname, 920 (struct utsname *)datap, sizeof (struct utsname)); 921 922 while ((SCF_PUTINFO(scf_service_function, 923 sizeof (struct utsname), datap) == EBUSY) && (counter-- > 0)) { 924 delay(10 * drv_usectohz(1000000)); 925 } 926 if (counter == 0) 927 cmn_err(CE_NOTE, 928 "!plat_nodename_set: " 929 "scf_service_putinfo not responding\n"); 930 931 kmem_free(datap, sizeof (struct utsname)); 932 } 933 934 caddr_t efcode_vaddr = NULL; 935 936 /* 937 * Preallocate enough memory for fcode claims. 938 */ 939 940 caddr_t 941 efcode_alloc(caddr_t alloc_base) 942 { 943 caddr_t efcode_alloc_base = (caddr_t)roundup((uintptr_t)alloc_base, 944 MMU_PAGESIZE); 945 caddr_t vaddr; 946 947 /* 948 * allocate the physical memory for the Oberon fcode. 949 */ 950 if ((vaddr = (caddr_t)BOP_ALLOC(bootops, efcode_alloc_base, 951 efcode_size, MMU_PAGESIZE)) == NULL) 952 cmn_err(CE_PANIC, "Cannot allocate Efcode Memory"); 953 954 efcode_vaddr = vaddr; 955 956 return (efcode_alloc_base + efcode_size); 957 } 958 959 caddr_t 960 plat_startup_memlist(caddr_t alloc_base) 961 { 962 caddr_t tmp_alloc_base; 963 964 tmp_alloc_base = efcode_alloc(alloc_base); 965 tmp_alloc_base = 966 (caddr_t)roundup((uintptr_t)tmp_alloc_base, ecache_alignsize); 967 return (tmp_alloc_base); 968 } 969 970 void 971 startup_platform(void) 972 { 973 } 974 975 void 976 plat_cpuid_to_mmu_ctx_info(processorid_t cpuid, mmu_ctx_info_t *info) 977 { 978 int impl; 979 980 impl = cpunodes[cpuid].implementation; 981 if (IS_OLYMPUS_C(impl)) { 982 info->mmu_idx = MMU_ID(cpuid); 983 info->mmu_nctxs = 8192; 984 } else { 985 cmn_err(CE_PANIC, "Unknown processor %d", impl); 986 } 987 } 988 989 int 990 plat_get_mem_sid(char *unum, char *buf, int buflen, int *lenp) 991 { 992 if (opl_get_mem_sid == NULL) { 993 return (ENOTSUP); 994 } 995 return (opl_get_mem_sid(unum, buf, buflen, lenp)); 996 } 997 998 int 999 plat_get_mem_offset(uint64_t paddr, uint64_t *offp) 1000 { 1001 if (opl_get_mem_offset == NULL) { 1002 return (ENOTSUP); 1003 } 1004 return (opl_get_mem_offset(paddr, offp)); 1005 } 1006 1007 int 1008 plat_get_mem_addr(char *unum, char *sid, uint64_t offset, uint64_t *addrp) 1009 { 1010 if (opl_get_mem_addr == NULL) { 1011 return (ENOTSUP); 1012 } 1013 return (opl_get_mem_addr(unum, sid, offset, addrp)); 1014 } 1015 1016 void 1017 plat_lock_delay(int *backoff) 1018 { 1019 int i; 1020 int cnt; 1021 int flag; 1022 int ctr; 1023 hrtime_t delay_start; 1024 /* 1025 * Platform specific lock delay code for OPL 1026 * 1027 * Using staged linear increases in the delay. 1028 * The sleep instruction is the preferred method of delay, 1029 * but is too large of granularity for the initial backoff. 1030 */ 1031 1032 if (*backoff == 0) *backoff = OPL_BOFF_BASE; 1033 1034 flag = !*backoff; 1035 1036 if (*backoff < OPL_BOFF_CAP1) { 1037 /* 1038 * If desired backoff is long enough, 1039 * use sleep for most of it 1040 */ 1041 for (cnt = *backoff; 1042 cnt >= OPL_BOFF_SLEEP; 1043 cnt -= OPL_BOFF_SLEEP) { 1044 cpu_smt_pause(); 1045 } 1046 /* 1047 * spin for small remainder of backoff 1048 * 1049 * fake call to nulldev included to prevent 1050 * compiler from optimizing out the spin loop 1051 */ 1052 for (ctr = cnt * OPL_BOFF_SPIN; ctr; ctr--) { 1053 if (flag) (void) nulldev(); 1054 } 1055 } else { 1056 /* backoff is very large. Fill it by sleeping */ 1057 delay_start = gethrtime(); 1058 cnt = *backoff/OPL_BOFF_SLEEP; 1059 /* 1060 * use sleep instructions for delay 1061 */ 1062 for (i = 0; i < cnt; i++) { 1063 cpu_smt_pause(); 1064 } 1065 1066 /* 1067 * Note: if the other strand executes a sleep instruction, 1068 * then the sleep ends immediately with a minimum time of 1069 * 42 clocks. We check gethrtime to insure we have 1070 * waited long enough. And we include both a short 1071 * spin loop and a sleep for any final delay time. 1072 */ 1073 1074 while ((gethrtime() - delay_start) < cnt * OPL_BOFF_TM) { 1075 cpu_smt_pause(); 1076 for (ctr = OPL_BOFF_SPIN; ctr; ctr--) { 1077 if (flag) (void) nulldev(); 1078 } 1079 } 1080 } 1081 1082 /* 1083 * We adjust the backoff in three linear stages 1084 * The initial stage has small increases as this phase is 1085 * usually handle locks with light contention. We don't want 1086 * to have a long backoff on a lock that is available. 1087 * 1088 * In the second stage, we are in transition, unsure whether 1089 * the lock is under heavy contention. As the failures to 1090 * obtain the lock increase, we back off further. 1091 * 1092 * For the final stage, we are in a heavily contended or 1093 * long held long so we want to reduce the number of tries. 1094 */ 1095 if (*backoff < OPL_BOFF_CAP1) { 1096 *backoff += 1; 1097 } else { 1098 if (*backoff < OPL_BOFF_CAP2) { 1099 *backoff += OPL_BOFF_SLEEP; 1100 } else { 1101 *backoff += 2 * OPL_BOFF_SLEEP; 1102 } 1103 if (*backoff > OPL_BOFF_MAX) { 1104 *backoff = OPL_BOFF_MAX; 1105 } 1106 } 1107 } 1108