1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #include <sys/param.h> 28 #include <sys/systm.h> 29 #include <sys/sysmacros.h> 30 #include <sys/sunddi.h> 31 #include <sys/esunddi.h> 32 33 #include <sys/platform_module.h> 34 #include <sys/errno.h> 35 #include <sys/cpu_sgnblk_defs.h> 36 #include <sys/rmc_comm_dp.h> 37 #include <sys/rmc_comm_drvintf.h> 38 #include <sys/modctl.h> 39 #include <sys/lgrp.h> 40 #include <sys/memnode.h> 41 #include <sys/promif.h> 42 43 /* Anything related to shared i2c access applies to Seattle only */ 44 #define SHARED_MI2CV_PATH "/i2c@1f,530000" 45 static dev_info_t *shared_mi2cv_dip; 46 static kmutex_t mi2cv_mutex; 47 48 int (*p2get_mem_unum)(int, uint64_t, char *, int, int *); 49 static void cpu_sgn_update(ushort_t, uchar_t, uchar_t, int); 50 int (*rmc_req_now)(rmc_comm_msg_t *, uint8_t) = NULL; 51 52 void 53 startup_platform(void) 54 { 55 mutex_init(&mi2cv_mutex, NULL, MUTEX_ADAPTIVE, NULL); 56 } 57 58 int 59 set_platform_tsb_spares() 60 { 61 return (0); 62 } 63 64 void 65 set_platform_defaults(void) 66 { 67 extern char *tod_module_name; 68 /* Set appropriate tod module */ 69 if (tod_module_name == NULL) 70 tod_module_name = "todm5823"; 71 72 cpu_sgn_func = cpu_sgn_update; 73 } 74 75 /* 76 * these two dummy functions are loaded over the original 77 * todm5823 set and clear_power_alarm functions. On Seattle 78 * these functions are not supported, and thus we need to provide 79 * dummy functions that just returns. 80 * On Seattle, clock chip is not persistant across reboots, 81 * and moreover it has a bug sending memory access. 82 * This fix is done by writing over the original 83 * tod_ops function pointer with our dummy replacement functions. 84 */ 85 /*ARGSUSED*/ 86 static void 87 dummy_todm5823_set_power_alarm(timestruc_t ts) 88 { 89 } 90 91 static void 92 dummy_todm5823_clear_power_alarm(void) 93 { 94 } 95 96 /* 97 * Definitions for accessing the pci config space of the isa node 98 * of Southbridge. 99 */ 100 static ddi_acc_handle_t isa_handle = NULL; /* handle for isa pci space */ 101 102 /* 103 * Definition for accessing rmclomv 104 */ 105 #define RMCLOMV_PATHNAME "/pseudo/rmclomv@0" 106 107 void 108 load_platform_drivers(void) 109 { 110 dev_info_t *rmclomv_dip; 111 /* 112 * It is OK to return error because 'us' driver is not available 113 * in all clusters (e.g. missing in Core cluster). 114 */ 115 (void) i_ddi_attach_hw_nodes("us"); 116 117 118 /* 119 * mc-us3i must stay loaded for plat_get_mem_unum() 120 */ 121 if (i_ddi_attach_hw_nodes("mc-us3i") != DDI_SUCCESS) 122 cmn_err(CE_WARN, "mc-us3i driver failed to install"); 123 (void) ddi_hold_driver(ddi_name_to_major("mc-us3i")); 124 125 /* 126 * load the power button driver 127 */ 128 if (i_ddi_attach_hw_nodes("power") != DDI_SUCCESS) 129 cmn_err(CE_WARN, "power button driver failed to install"); 130 else 131 (void) ddi_hold_driver(ddi_name_to_major("power")); 132 133 /* 134 * load the GPIO driver for the ALOM reset and watchdog lines 135 */ 136 if (i_ddi_attach_hw_nodes("pmugpio") != DDI_SUCCESS) 137 cmn_err(CE_WARN, "pmugpio failed to install"); 138 else { 139 extern int watchdog_enable, watchdog_available; 140 extern int disable_watchdog_on_exit; 141 142 /* 143 * Disable an active h/w watchdog timer upon exit to OBP. 144 */ 145 disable_watchdog_on_exit = 1; 146 147 watchdog_enable = 1; 148 watchdog_available = 1; 149 } 150 (void) ddi_hold_driver(ddi_name_to_major("pmugpio")); 151 152 /* 153 * Figure out which mi2cv dip is shared with OBP for the nvram 154 * device, so the lock can be acquired. 155 */ 156 shared_mi2cv_dip = e_ddi_hold_devi_by_path(SHARED_MI2CV_PATH, 0); 157 /* 158 * Load the environmentals driver (rmclomv) 159 * 160 * We need this driver to handle events from the RMC when state 161 * changes occur in the environmental data. 162 */ 163 if (i_ddi_attach_hw_nodes("rmc_comm") != DDI_SUCCESS) { 164 cmn_err(CE_WARN, "rmc_comm failed to install"); 165 } else { 166 (void) ddi_hold_driver(ddi_name_to_major("rmc_comm")); 167 168 rmclomv_dip = e_ddi_hold_devi_by_path(RMCLOMV_PATHNAME, 0); 169 if (rmclomv_dip == NULL) { 170 cmn_err(CE_WARN, "Could not install rmclomv driver\n"); 171 } 172 } 173 174 /* 175 * These two dummy functions are loaded over the original 176 * todm5823 set and clear_power_alarm functions. On Seattle, 177 * these functionalities are not supported. 178 * The load_platform_drivers(void) is called from post_startup() 179 * which is after all the initialization of the tod module is 180 * finished, then we replace 2 of the tod_ops function pointers 181 * with our dummy version. 182 */ 183 tod_ops.tod_set_power_alarm = dummy_todm5823_set_power_alarm; 184 tod_ops.tod_clear_power_alarm = dummy_todm5823_clear_power_alarm; 185 186 /* 187 * create a handle to the rmc_comm_request_nowait() function 188 * inside the rmc_comm module. 189 * 190 * The Seattle/Boston todm5823 driver will use this handle to 191 * use the rmc_comm_request_nowait() function to send time/date 192 * updates to ALOM. 193 */ 194 rmc_req_now = (int (*)(rmc_comm_msg_t *, uint8_t)) 195 modgetsymvalue("rmc_comm_request_nowait", 0); 196 } 197 198 /* 199 * This routine is needed if a device error or timeout occurs before the 200 * driver is loaded. 201 */ 202 /*ARGSUSED*/ 203 int 204 plat_ide_chipreset(dev_info_t *dip, int chno) 205 { 206 int ret = DDI_SUCCESS; 207 208 if (isa_handle == NULL) { 209 return (DDI_FAILURE); 210 } 211 212 /* 213 * This will be filled in with the reset logic 214 * for the ULI1573 when that becomes available. 215 * currently this is just a stub. 216 */ 217 return (ret); 218 } 219 220 221 /*ARGSUSED*/ 222 int 223 plat_cpu_poweron(struct cpu *cp) 224 { 225 return (ENOTSUP); /* not supported on this platform */ 226 } 227 228 /*ARGSUSED*/ 229 int 230 plat_cpu_poweroff(struct cpu *cp) 231 { 232 return (ENOTSUP); /* not supported on this platform */ 233 } 234 235 /*ARGSUSED*/ 236 void 237 plat_freelist_process(int mnode) 238 { 239 } 240 241 char *platform_module_list[] = { 242 "mi2cv", 243 "pca9556", 244 (char *)0 245 }; 246 247 /*ARGSUSED*/ 248 void 249 plat_tod_fault(enum tod_fault_type tod_bad) 250 { 251 } 252 253 /*ARGSUSED*/ 254 int 255 plat_get_mem_unum(int synd_code, uint64_t flt_addr, int flt_bus_id, 256 int flt_in_memory, ushort_t flt_status, char *buf, int buflen, int *lenp) 257 { 258 if (flt_in_memory && (p2get_mem_unum != NULL)) 259 return (p2get_mem_unum(synd_code, P2ALIGN(flt_addr, 8), 260 buf, buflen, lenp)); 261 else 262 return (ENOTSUP); 263 } 264 265 /* 266 * This platform hook gets called from mc_add_mem_unum_label() in the mc-us3i 267 * driver giving each platform the opportunity to add platform 268 * specific label information to the unum for ECC error logging purposes. 269 */ 270 /*ARGSUSED*/ 271 void 272 plat_add_mem_unum_label(char *unum, int mcid, int bank, int dimm) 273 { 274 char old_unum[UNUM_NAMLEN]; 275 int printed; 276 int buflen = UNUM_NAMLEN; 277 (void) strcpy(old_unum, unum); 278 printed = snprintf(unum, buflen, "MB/P%d/B%d", mcid, bank); 279 buflen -= printed; 280 unum += printed; 281 282 if (dimm != -1) { 283 printed = snprintf(unum, buflen, "/D%d", dimm); 284 buflen -= printed; 285 unum += printed; 286 } 287 288 (void) snprintf(unum, buflen, ": %s", old_unum); 289 } 290 291 /*ARGSUSED*/ 292 int 293 plat_get_cpu_unum(int cpuid, char *buf, int buflen, int *lenp) 294 { 295 if (snprintf(buf, buflen, "MB") >= buflen) { 296 return (ENOSPC); 297 } else { 298 *lenp = strlen(buf); 299 return (0); 300 } 301 } 302 303 /* 304 * Our nodename has been set, pass it along to the RMC. 305 */ 306 void 307 plat_nodename_set(void) 308 { 309 rmc_comm_msg_t req; /* request */ 310 int (*rmc_req_res)(rmc_comm_msg_t *, rmc_comm_msg_t *, time_t) = NULL; 311 312 /* 313 * find the symbol for the mailbox routine 314 */ 315 rmc_req_res = (int (*)(rmc_comm_msg_t *, rmc_comm_msg_t *, time_t)) 316 modgetsymvalue("rmc_comm_request_response", 0); 317 318 if (rmc_req_res == NULL) { 319 return; 320 } 321 322 /* 323 * construct the message telling the RMC our nodename 324 */ 325 req.msg_type = DP_SET_CPU_NODENAME; 326 req.msg_len = strlen(utsname.nodename) + 1; 327 req.msg_bytes = 0; 328 req.msg_buf = (caddr_t)utsname.nodename; 329 330 /* 331 * ship it 332 */ 333 (void) (rmc_req_res)(&req, NULL, 2000); 334 } 335 336 sig_state_t current_sgn; 337 338 /* 339 * cpu signatures - we're only interested in the overall system 340 * "signature" on this platform - not individual cpu signatures 341 */ 342 /*ARGSUSED*/ 343 static void 344 cpu_sgn_update(ushort_t sig, uchar_t state, uchar_t sub_state, int cpuid) 345 { 346 dp_cpu_signature_t signature; 347 rmc_comm_msg_t req; /* request */ 348 int (*rmc_req_now)(rmc_comm_msg_t *, uint8_t) = NULL; 349 350 351 /* 352 * Differentiate a panic reboot from a non-panic reboot in the 353 * setting of the substate of the signature. 354 * 355 * If the new substate is REBOOT and we're rebooting due to a panic, 356 * then set the new substate to a special value indicating a panic 357 * reboot, SIGSUBST_PANIC_REBOOT. 358 * 359 * A panic reboot is detected by a current (previous) signature 360 * state of SIGST_EXIT, and a new signature substate of SIGSUBST_REBOOT. 361 * The domain signature state SIGST_EXIT is used as the panic flow 362 * progresses. 363 * 364 * At the end of the panic flow, the reboot occurs but we should know 365 * one that was involuntary, something that may be quite useful to know 366 * at OBP level. 367 */ 368 if (state == SIGST_EXIT && sub_state == SIGSUBST_REBOOT) { 369 if (current_sgn.state_t.state == SIGST_EXIT && 370 current_sgn.state_t.sub_state != SIGSUBST_REBOOT) 371 sub_state = SIGSUBST_PANIC_REBOOT; 372 } 373 374 /* 375 * offline and detached states only apply to a specific cpu 376 * so ignore them. 377 */ 378 if (state == SIGST_OFFLINE || state == SIGST_DETACHED) { 379 return; 380 } 381 382 current_sgn.signature = CPU_SIG_BLD(sig, state, sub_state); 383 384 /* 385 * find the symbol for the mailbox routine 386 */ 387 rmc_req_now = (int (*)(rmc_comm_msg_t *, uint8_t)) 388 modgetsymvalue("rmc_comm_request_nowait", 0); 389 if (rmc_req_now == NULL) { 390 return; 391 } 392 393 signature.cpu_id = -1; 394 signature.sig = sig; 395 signature.states = state; 396 signature.sub_state = sub_state; 397 req.msg_type = DP_SET_CPU_SIGNATURE; 398 req.msg_len = (int)(sizeof (signature)); 399 req.msg_bytes = 0; 400 req.msg_buf = (caddr_t)&signature; 401 402 /* 403 * We need to tell the SP that the host is about to stop running. The 404 * SP will then allow the date to be set at its console, it will change 405 * state of the activity indicator, it will display the correct host 406 * status, and it will stop sending console messages and alerts to the 407 * host communication channel. 408 * 409 * This requires the RMC_COMM_DREQ_URGENT as we want to 410 * be sure activity indicators will reflect the correct status. 411 * 412 * When sub_state SIGSUBST_DUMP is sent, the urgent flag 413 * (RMC_COMM_DREQ_URGENT) is not required as SIGSUBST_PANIC_REBOOT 414 * has already been sent and changed activity indicators. 415 */ 416 if (state == SIGST_EXIT && (sub_state == SIGSUBST_HALT || 417 sub_state == SIGSUBST_REBOOT || sub_state == SIGSUBST_ENVIRON || 418 sub_state == SIGSUBST_PANIC_REBOOT)) 419 (void) (rmc_req_now)(&req, RMC_COMM_DREQ_URGENT); 420 else 421 (void) (rmc_req_now)(&req, 0); 422 } 423 424 /* 425 * Fiesta support for lgroups. 426 * 427 * On fiesta platform, an lgroup platform handle == CPU id 428 */ 429 430 /* 431 * Macro for extracting the CPU number from the CPU id 432 */ 433 #define CPUID_TO_LGRP(id) ((id) & 0x7) 434 #define PLATFORM_MC_SHIFT 36 435 436 /* 437 * Return the platform handle for the lgroup containing the given CPU 438 */ 439 void * 440 plat_lgrp_cpu_to_hand(processorid_t id) 441 { 442 return ((void *)(uintptr_t)CPUID_TO_LGRP(id)); 443 } 444 445 /* 446 * Platform specific lgroup initialization 447 */ 448 void 449 plat_lgrp_init(void) 450 { 451 pnode_t curnode; 452 char tmp_name[sizeof (OBP_CPU) + 1]; /* extra padding */ 453 int portid; 454 int cpucnt = 0; 455 int max_portid = -1; 456 extern uint32_t lgrp_expand_proc_thresh; 457 extern uint32_t lgrp_expand_proc_diff; 458 extern pgcnt_t lgrp_mem_free_thresh; 459 extern uint32_t lgrp_loadavg_tolerance; 460 extern uint32_t lgrp_loadavg_max_effect; 461 extern uint32_t lgrp_load_thresh; 462 extern lgrp_mem_policy_t lgrp_mem_policy_root; 463 464 /* 465 * Count the number of CPUs installed to determine if 466 * NUMA optimization should be enabled or not. 467 * 468 * All CPU nodes reside in the root node and have a 469 * device type "cpu". 470 */ 471 curnode = prom_rootnode(); 472 for (curnode = prom_childnode(curnode); curnode; 473 curnode = prom_nextnode(curnode)) { 474 bzero(tmp_name, sizeof (tmp_name)); 475 if (prom_bounded_getprop(curnode, OBP_DEVICETYPE, tmp_name, 476 sizeof (OBP_CPU)) == -1 || strcmp(tmp_name, OBP_CPU) != 0) 477 continue; 478 479 cpucnt++; 480 481 if (prom_getprop(curnode, "portid", (caddr_t)&portid) != 482 -1 && portid > max_portid) 483 max_portid = portid; 484 } 485 if (cpucnt <= 1) 486 max_mem_nodes = 1; 487 else if (max_portid >= 0 && max_portid < MAX_MEM_NODES) 488 max_mem_nodes = max_portid + 1; 489 490 /* 491 * Set tuneables for fiesta architecture 492 * 493 * lgrp_expand_proc_thresh is the minimum load on the lgroups 494 * this process is currently running on before considering 495 * expanding threads to another lgroup. 496 * 497 * lgrp_expand_proc_diff determines how much less the remote lgroup 498 * must be loaded before expanding to it. 499 * 500 * Optimize for memory bandwidth by spreading multi-threaded 501 * program to different lgroups. 502 */ 503 lgrp_expand_proc_thresh = lgrp_loadavg_max_effect - 1; 504 lgrp_expand_proc_diff = lgrp_loadavg_max_effect / 2; 505 lgrp_loadavg_tolerance = lgrp_loadavg_max_effect / 2; 506 lgrp_mem_free_thresh = 1; /* home lgrp must have some memory */ 507 lgrp_expand_proc_thresh = lgrp_loadavg_max_effect - 1; 508 lgrp_mem_policy_root = LGRP_MEM_POLICY_NEXT; 509 lgrp_load_thresh = 0; 510 511 mem_node_pfn_shift = PLATFORM_MC_SHIFT - MMU_PAGESHIFT; 512 } 513 514 /* 515 * Return latency between "from" and "to" lgroups 516 * 517 * This latency number can only be used for relative comparison 518 * between lgroups on the running system, cannot be used across platforms, 519 * and may not reflect the actual latency. It is platform and implementation 520 * specific, so platform gets to decide its value. It would be nice if the 521 * number was at least proportional to make comparisons more meaningful though. 522 * NOTE: The numbers below are supposed to be load latencies for uncached 523 * memory divided by 10. 524 */ 525 int 526 plat_lgrp_latency(void *from, void *to) 527 { 528 /* 529 * Return remote latency when there are more than two lgroups 530 * (root and child) and getting latency between two different 531 * lgroups or root is involved 532 */ 533 if (lgrp_optimizations() && (from != to || from == 534 (void *) LGRP_DEFAULT_HANDLE || to == (void *) LGRP_DEFAULT_HANDLE)) 535 return (17); 536 else 537 return (12); 538 } 539 540 int 541 plat_pfn_to_mem_node(pfn_t pfn) 542 { 543 ASSERT(max_mem_nodes > 1); 544 return (pfn >> mem_node_pfn_shift); 545 } 546 547 /* 548 * Assign memnode to lgroups 549 */ 550 void 551 plat_fill_mc(pnode_t nodeid) 552 { 553 int portid; 554 555 /* 556 * Memory controller portid == global CPU id 557 */ 558 if ((prom_getprop(nodeid, "portid", (caddr_t)&portid) == -1) || 559 (portid < 0)) 560 return; 561 562 if (portid < max_mem_nodes) 563 plat_assign_lgrphand_to_mem_node((lgrp_handle_t)portid, portid); 564 } 565 566 /* 567 * Common locking enter code 568 */ 569 void 570 plat_setprop_enter(void) 571 { 572 mutex_enter(&mi2cv_mutex); 573 } 574 575 /* 576 * Common locking exit code 577 */ 578 void 579 plat_setprop_exit(void) 580 { 581 mutex_exit(&mi2cv_mutex); 582 } 583 584 /* 585 * Called by mi2cv driver 586 */ 587 void 588 plat_shared_i2c_enter(dev_info_t *i2cnexus_dip) 589 { 590 if (i2cnexus_dip == shared_mi2cv_dip) { 591 plat_setprop_enter(); 592 } 593 } 594 595 /* 596 * Called by mi2cv driver 597 */ 598 void 599 plat_shared_i2c_exit(dev_info_t *i2cnexus_dip) 600 { 601 if (i2cnexus_dip == shared_mi2cv_dip) { 602 plat_setprop_exit(); 603 } 604 } 605 /* 606 * Called by todm5823 driver 607 */ 608 void 609 plat_rmc_comm_req(struct rmc_comm_msg *request) 610 { 611 if (rmc_req_now) 612 (void) rmc_req_now(request, 0); 613 } 614