1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #include <sys/machsystm.h> 29 #include <sys/archsystm.h> 30 #include <sys/vm.h> 31 #include <sys/cpu.h> 32 #include <sys/cpupart.h> 33 #include <sys/atomic.h> 34 #include <sys/reboot.h> 35 #include <sys/kdi.h> 36 #include <sys/bootconf.h> 37 #include <sys/memlist_plat.h> 38 #include <sys/memlist_impl.h> 39 #include <sys/prom_plat.h> 40 #include <sys/prom_isa.h> 41 #include <sys/autoconf.h> 42 #include <sys/intreg.h> 43 #include <sys/ivintr.h> 44 #include <sys/fpu/fpusystm.h> 45 #include <sys/iommutsb.h> 46 #include <vm/vm_dep.h> 47 #include <vm/seg_kmem.h> 48 #include <vm/seg_kpm.h> 49 #include <vm/seg_map.h> 50 #include <vm/seg_kp.h> 51 #include <sys/sysconf.h> 52 #include <vm/hat_sfmmu.h> 53 #include <sys/kobj.h> 54 #include <sys/sun4asi.h> 55 #include <sys/clconf.h> 56 #include <sys/platform_module.h> 57 #include <sys/panic.h> 58 #include <sys/cpu_sgnblk_defs.h> 59 #include <sys/clock.h> 60 #include <sys/fpras_impl.h> 61 #include <sys/prom_debug.h> 62 #include <sys/traptrace.h> 63 #include <sys/memnode.h> 64 #include <sys/mem_cage.h> 65 66 /* 67 * fpRAS implementation structures. 68 */ 69 struct fpras_chkfn *fpras_chkfnaddrs[FPRAS_NCOPYOPS]; 70 struct fpras_chkfngrp *fpras_chkfngrps; 71 struct fpras_chkfngrp *fpras_chkfngrps_base; 72 int fpras_frequency = -1; 73 int64_t fpras_interval = -1; 74 75 /* 76 * Halt idling cpus optimization 77 * 78 * This optimation is only enabled in platforms that have 79 * the CPU halt support. The cpu_halt_cpu() support is provided 80 * in the cpu module and it is referenced here with a pragma weak. 81 * The presence of this routine automatically enable the halt idling 82 * cpus functionality if the global switch enable_halt_idle_cpus 83 * is set (default is set). 84 * 85 */ 86 #pragma weak cpu_halt_cpu 87 extern void cpu_halt_cpu(); 88 89 int enable_halt_idle_cpus = 1; /* global switch */ 90 91 void 92 setup_trap_table(void) 93 { 94 intr_init(CPU); /* init interrupt request free list */ 95 setwstate(WSTATE_KERN); 96 prom_set_traptable(&trap_table); 97 } 98 99 void 100 mach_fpras() 101 { 102 if (fpras_implemented && !fpras_disable) { 103 int i; 104 struct fpras_chkfngrp *fcgp; 105 size_t chkfngrpsallocsz; 106 107 /* 108 * Note that we size off of NCPU and setup for 109 * all those possibilities regardless of whether 110 * the cpu id is present or not. We do this so that 111 * we don't have any construction or destruction 112 * activity to perform at DR time, and it's not 113 * costly in memory. We require block alignment. 114 */ 115 chkfngrpsallocsz = NCPU * sizeof (struct fpras_chkfngrp); 116 fpras_chkfngrps_base = kmem_alloc(chkfngrpsallocsz, KM_SLEEP); 117 if (IS_P2ALIGNED((uintptr_t)fpras_chkfngrps_base, 64)) { 118 fpras_chkfngrps = fpras_chkfngrps_base; 119 } else { 120 kmem_free(fpras_chkfngrps_base, chkfngrpsallocsz); 121 chkfngrpsallocsz += 64; 122 fpras_chkfngrps_base = kmem_alloc(chkfngrpsallocsz, 123 KM_SLEEP); 124 fpras_chkfngrps = (struct fpras_chkfngrp *) 125 P2ROUNDUP((uintptr_t)fpras_chkfngrps_base, 64); 126 } 127 128 /* 129 * Copy our check function into place for each copy operation 130 * and each cpu id. 131 */ 132 fcgp = &fpras_chkfngrps[0]; 133 for (i = 0; i < FPRAS_NCOPYOPS; ++i) 134 bcopy((void *)fpras_chkfn_type1, &fcgp->fpras_fn[i], 135 sizeof (struct fpras_chkfn)); 136 for (i = 1; i < NCPU; ++i) 137 *(&fpras_chkfngrps[i]) = *fcgp; 138 139 /* 140 * At definition fpras_frequency is set to -1, and it will 141 * still have that value unless changed in /etc/system (not 142 * strictly supported, but not preventable). The following 143 * both sets the default and sanity checks anything from 144 * /etc/system. 145 */ 146 if (fpras_frequency < 0) 147 fpras_frequency = FPRAS_DEFAULT_FREQUENCY; 148 149 /* 150 * Now calculate fpras_interval. When fpras_interval 151 * becomes non-negative fpras checks will commence 152 * (copies before this point in boot will bypass fpras). 153 * Our stores of instructions must be visible; no need 154 * to flush as they're never been executed before. 155 */ 156 membar_producer(); 157 fpras_interval = (fpras_frequency == 0) ? 158 0 : sys_tick_freq / fpras_frequency; 159 } 160 } 161 162 void 163 mach_hw_copy_limit(void) 164 { 165 if (!fpu_exists) { 166 use_hw_bcopy = 0; 167 hw_copy_limit_1 = 0; 168 hw_copy_limit_2 = 0; 169 hw_copy_limit_4 = 0; 170 hw_copy_limit_8 = 0; 171 use_hw_bzero = 0; 172 } 173 } 174 175 void 176 load_tod_module() 177 { 178 /* 179 * Load tod driver module for the tod part found on this system. 180 * Recompute the cpu frequency/delays based on tod as tod part 181 * tends to keep time more accurately. 182 */ 183 if (tod_module_name == NULL || modload("tod", tod_module_name) == -1) 184 halt("Can't load tod module"); 185 } 186 187 void 188 mach_memscrub(void) 189 { 190 /* 191 * Startup memory scrubber, if not running fpu emulation code. 192 */ 193 194 #ifndef _HW_MEMSCRUB_SUPPORT 195 if (fpu_exists) { 196 if (memscrub_init()) { 197 cmn_err(CE_WARN, 198 "Memory scrubber failed to initialize"); 199 } 200 } 201 #endif /* _HW_MEMSCRUB_SUPPORT */ 202 } 203 204 /* 205 * Halt the calling CPU until awoken via an interrupt 206 * This routine should only be invoked if cpu_halt_cpu() 207 * exists and is supported, see mach_cpu_halt_idle() 208 */ 209 static void 210 cpu_halt(void) 211 { 212 cpu_t *cpup = CPU; 213 processorid_t cpun = cpup->cpu_id; 214 cpupart_t *cp = cpup->cpu_part; 215 int hset_update = 1; 216 uint_t pstate; 217 extern uint_t getpstate(void); 218 extern void setpstate(uint_t); 219 220 /* 221 * If this CPU is online, and there's multiple CPUs 222 * in the system, then we should notate our halting 223 * by adding ourselves to the partition's halted CPU 224 * bitmap. This allows other CPUs to find/awaken us when 225 * work becomes available. 226 */ 227 if (CPU->cpu_flags & CPU_OFFLINE || ncpus == 1) 228 hset_update = 0; 229 230 /* 231 * Add ourselves to the partition's halted CPUs bitmask 232 * and set our HALTED flag, if necessary. 233 * 234 * When a thread becomes runnable, it is placed on the queue 235 * and then the halted cpuset is checked to determine who 236 * (if anyone) should be awoken. We therefore need to first 237 * add ourselves to the halted cpuset, and then check if there 238 * is any work available. 239 */ 240 if (hset_update) { 241 cpup->cpu_disp_flags |= CPU_DISP_HALTED; 242 membar_producer(); 243 CPUSET_ATOMIC_ADD(cp->cp_mach->mc_haltset, cpun); 244 } 245 246 /* 247 * Check to make sure there's really nothing to do. 248 * Work destined for this CPU may become available after 249 * this check. We'll be notified through the clearing of our 250 * bit in the halted CPU bitmask, and a poke. 251 */ 252 if (disp_anywork()) { 253 if (hset_update) { 254 cpup->cpu_disp_flags &= ~CPU_DISP_HALTED; 255 CPUSET_ATOMIC_DEL(cp->cp_mach->mc_haltset, cpun); 256 } 257 return; 258 } 259 260 /* 261 * We're on our way to being halted. 262 * 263 * Disable interrupts now, so that we'll awaken immediately 264 * after halting if someone tries to poke us between now and 265 * the time we actually halt. 266 * 267 * We check for the presence of our bit after disabling interrupts. 268 * If it's cleared, we'll return. If the bit is cleared after 269 * we check then the poke will pop us out of the halted state. 270 * 271 * The ordering of the poke and the clearing of the bit by cpu_wakeup 272 * is important. 273 * cpu_wakeup() must clear, then poke. 274 * cpu_halt() must disable interrupts, then check for the bit. 275 */ 276 pstate = getpstate(); 277 setpstate(pstate & ~PSTATE_IE); 278 279 if (hset_update && !CPU_IN_SET(cp->cp_mach->mc_haltset, cpun)) { 280 cpup->cpu_disp_flags &= ~CPU_DISP_HALTED; 281 setpstate(pstate); 282 return; 283 } 284 285 /* 286 * The check for anything locally runnable is here for performance 287 * and isn't needed for correctness. disp_nrunnable ought to be 288 * in our cache still, so it's inexpensive to check, and if there 289 * is anything runnable we won't have to wait for the poke. 290 */ 291 if (cpup->cpu_disp->disp_nrunnable != 0) { 292 if (hset_update) { 293 cpup->cpu_disp_flags &= ~CPU_DISP_HALTED; 294 CPUSET_ATOMIC_DEL(cp->cp_mach->mc_haltset, cpun); 295 } 296 setpstate(pstate); 297 return; 298 } 299 300 /* 301 * Halt the strand. 302 */ 303 if (&cpu_halt_cpu) 304 cpu_halt_cpu(); 305 306 /* 307 * We're no longer halted 308 */ 309 setpstate(pstate); 310 if (hset_update) { 311 cpup->cpu_disp_flags &= ~CPU_DISP_HALTED; 312 CPUSET_ATOMIC_DEL(cp->cp_mach->mc_haltset, cpun); 313 } 314 } 315 316 /* 317 * If "cpu" is halted, then wake it up clearing its halted bit in advance. 318 * Otherwise, see if other CPUs in the cpu partition are halted and need to 319 * be woken up so that they can steal the thread we placed on this CPU. 320 * This function is only used on MP systems. 321 * This function should only be invoked if cpu_halt_cpu() 322 * exists and is supported, see mach_cpu_halt_idle() 323 */ 324 static void 325 cpu_wakeup(cpu_t *cpu, int bound) 326 { 327 uint_t cpu_found; 328 int result; 329 cpupart_t *cp; 330 331 cp = cpu->cpu_part; 332 if (CPU_IN_SET(cp->cp_mach->mc_haltset, cpu->cpu_id)) { 333 /* 334 * Clear the halted bit for that CPU since it will be 335 * poked in a moment. 336 */ 337 CPUSET_ATOMIC_DEL(cp->cp_mach->mc_haltset, cpu->cpu_id); 338 /* 339 * We may find the current CPU present in the halted cpuset 340 * if we're in the context of an interrupt that occurred 341 * before we had a chance to clear our bit in cpu_halt(). 342 * Poking ourself is obviously unnecessary, since if 343 * we're here, we're not halted. 344 */ 345 if (cpu != CPU) 346 poke_cpu(cpu->cpu_id); 347 return; 348 } else { 349 /* 350 * This cpu isn't halted, but it's idle or undergoing a 351 * context switch. No need to awaken anyone else. 352 */ 353 if (cpu->cpu_thread == cpu->cpu_idle_thread || 354 cpu->cpu_disp_flags & CPU_DISP_DONTSTEAL) 355 return; 356 } 357 358 /* 359 * No need to wake up other CPUs if the thread we just enqueued 360 * is bound. 361 */ 362 if (bound) 363 return; 364 365 /* 366 * See if there's any other halted CPUs. If there are, then 367 * select one, and awaken it. 368 * It's possible that after we find a CPU, somebody else 369 * will awaken it before we get the chance. 370 * In that case, look again. 371 */ 372 do { 373 CPUSET_FIND(cp->cp_mach->mc_haltset, cpu_found); 374 if (cpu_found == CPUSET_NOTINSET) 375 return; 376 377 ASSERT(cpu_found >= 0 && cpu_found < NCPU); 378 CPUSET_ATOMIC_XDEL(cp->cp_mach->mc_haltset, cpu_found, result); 379 } while (result < 0); 380 381 if (cpu_found != CPU->cpu_id) 382 poke_cpu(cpu_found); 383 } 384 385 void 386 mach_cpu_halt_idle() 387 { 388 if (enable_halt_idle_cpus) { 389 if (&cpu_halt_cpu) { 390 idle_cpu = cpu_halt; 391 disp_enq_thread = cpu_wakeup; 392 } 393 } 394 } 395 396 /*ARGSUSED*/ 397 int 398 cpu_intrq_setup(struct cpu *cp) 399 { 400 /* Interrupt mondo queues not applicable to sun4u */ 401 return (0); 402 } 403 404 /*ARGSUSED*/ 405 void 406 cpu_intrq_cleanup(struct cpu *cp) 407 { 408 /* Interrupt mondo queues not applicable to sun4u */ 409 } 410 411 /*ARGSUSED*/ 412 void 413 cpu_intrq_register(struct cpu *cp) 414 { 415 /* Interrupt/error queues not applicable to sun4u */ 416 } 417 418 /*ARGSUSED*/ 419 void 420 mach_htraptrace_setup(int cpuid) 421 { 422 /* Setup hypervisor traptrace buffer, not applicable to sun4u */ 423 } 424 425 /*ARGSUSED*/ 426 void 427 mach_htraptrace_configure(int cpuid) 428 { 429 /* enable/ disable hypervisor traptracing, not applicable to sun4u */ 430 } 431 432 /*ARGSUSED*/ 433 void 434 mach_htraptrace_cleanup(int cpuid) 435 { 436 /* cleanup hypervisor traptrace buffer, not applicable to sun4u */ 437 } 438 439 void 440 mach_descrip_startup_init(void) 441 { 442 /* 443 * Only for sun4v. 444 * Initialize Machine description framework during startup. 445 */ 446 } 447 void 448 mach_descrip_startup_fini(void) 449 { 450 /* 451 * Only for sun4v. 452 * Clean up Machine Description framework during startup. 453 */ 454 } 455 456 void 457 mach_descrip_init(void) 458 { 459 /* 460 * Only for sun4v. 461 * Initialize Machine description framework. 462 */ 463 } 464 465 void 466 hsvc_setup(void) 467 { 468 /* Setup hypervisor services, not applicable to sun4u */ 469 } 470 471 void 472 load_mach_drivers(void) 473 { 474 /* Currently no machine class (sun4u) specific drivers to load */ 475 } 476 477 /* 478 * Return true if the machine we're running on is a Positron. 479 * (Positron is an unsupported developers platform.) 480 */ 481 int 482 iam_positron(void) 483 { 484 char model[32]; 485 const char proto_model[] = "SUNW,501-2732"; 486 pnode_t root = prom_rootnode(); 487 488 if (prom_getproplen(root, "model") != sizeof (proto_model)) 489 return (0); 490 491 (void) prom_getprop(root, "model", model); 492 if (strcmp(model, proto_model) == 0) 493 return (1); 494 return (0); 495 } 496 497 /* 498 * Find a physically contiguous area of twice the largest ecache size 499 * to be used while doing displacement flush of ecaches. 500 */ 501 uint64_t 502 ecache_flush_address(void) 503 { 504 struct memlist *pmem; 505 uint64_t flush_size; 506 uint64_t ret_val; 507 508 flush_size = ecache_size * 2; 509 for (pmem = phys_install; pmem; pmem = pmem->next) { 510 ret_val = P2ROUNDUP(pmem->address, ecache_size); 511 if (ret_val + flush_size <= pmem->address + pmem->size) 512 return (ret_val); 513 } 514 return ((uint64_t)-1); 515 } 516 517 /* 518 * Called with the memlist lock held to say that phys_install has 519 * changed. 520 */ 521 void 522 phys_install_has_changed(void) 523 { 524 /* 525 * Get the new address into a temporary just in case panicking 526 * involves use of ecache_flushaddr. 527 */ 528 uint64_t new_addr; 529 530 new_addr = ecache_flush_address(); 531 if (new_addr == (uint64_t)-1) { 532 cmn_err(CE_PANIC, 533 "ecache_flush_address(): failed, ecache_size=%x", 534 ecache_size); 535 /*NOTREACHED*/ 536 } 537 ecache_flushaddr = new_addr; 538 membar_producer(); 539 } 540