17c478bd9Sstevel@tonic-gate /* 27c478bd9Sstevel@tonic-gate * CDDL HEADER START 37c478bd9Sstevel@tonic-gate * 47c478bd9Sstevel@tonic-gate * The contents of this file are subject to the terms of the 5c56c1e58Sgirish * Common Development and Distribution License (the "License"). 6c56c1e58Sgirish * You may not use this file except in compliance with the License. 77c478bd9Sstevel@tonic-gate * 87c478bd9Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 97c478bd9Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 107c478bd9Sstevel@tonic-gate * See the License for the specific language governing permissions 117c478bd9Sstevel@tonic-gate * and limitations under the License. 127c478bd9Sstevel@tonic-gate * 137c478bd9Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 147c478bd9Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 157c478bd9Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 167c478bd9Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 177c478bd9Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 187c478bd9Sstevel@tonic-gate * 197c478bd9Sstevel@tonic-gate * CDDL HEADER END 207c478bd9Sstevel@tonic-gate */ 217c478bd9Sstevel@tonic-gate /* 22*0542eecfSRafael Vanoni * Copyright (c) 1993, 2010, Oracle and/or its affiliates. All rights reserved. 237c478bd9Sstevel@tonic-gate */ 247c478bd9Sstevel@tonic-gate 257c478bd9Sstevel@tonic-gate #include <sys/machsystm.h> 267c478bd9Sstevel@tonic-gate #include <sys/archsystm.h> 277c478bd9Sstevel@tonic-gate #include <sys/vm.h> 287c478bd9Sstevel@tonic-gate #include <sys/cpu.h> 2925cf1a30Sjl139090 #include <sys/cpupart.h> 306890d023SEric Saxe #include <sys/cmt.h> 316890d023SEric Saxe #include <sys/bitset.h> 327c478bd9Sstevel@tonic-gate #include <sys/reboot.h> 337c478bd9Sstevel@tonic-gate #include <sys/kdi.h> 347c478bd9Sstevel@tonic-gate #include <sys/bootconf.h> 357c478bd9Sstevel@tonic-gate #include <sys/memlist_plat.h> 367c478bd9Sstevel@tonic-gate #include <sys/memlist_impl.h> 377c478bd9Sstevel@tonic-gate #include <sys/prom_plat.h> 387c478bd9Sstevel@tonic-gate #include <sys/prom_isa.h> 397c478bd9Sstevel@tonic-gate #include <sys/autoconf.h> 407c478bd9Sstevel@tonic-gate #include <sys/intreg.h> 417c478bd9Sstevel@tonic-gate #include <sys/ivintr.h> 427c478bd9Sstevel@tonic-gate #include <sys/fpu/fpusystm.h> 437c478bd9Sstevel@tonic-gate #include <sys/iommutsb.h> 447c478bd9Sstevel@tonic-gate #include <vm/vm_dep.h> 457c478bd9Sstevel@tonic-gate #include <vm/seg_kmem.h> 467c478bd9Sstevel@tonic-gate #include <vm/seg_kpm.h> 477c478bd9Sstevel@tonic-gate #include <vm/seg_map.h> 487c478bd9Sstevel@tonic-gate #include <vm/seg_kp.h> 497c478bd9Sstevel@tonic-gate #include <sys/sysconf.h> 507c478bd9Sstevel@tonic-gate #include <vm/hat_sfmmu.h> 517c478bd9Sstevel@tonic-gate #include <sys/kobj.h> 527c478bd9Sstevel@tonic-gate #include <sys/sun4asi.h> 537c478bd9Sstevel@tonic-gate #include <sys/clconf.h> 547c478bd9Sstevel@tonic-gate #include <sys/platform_module.h> 557c478bd9Sstevel@tonic-gate #include <sys/panic.h> 567c478bd9Sstevel@tonic-gate #include <sys/cpu_sgnblk_defs.h> 577c478bd9Sstevel@tonic-gate #include <sys/clock.h> 587c478bd9Sstevel@tonic-gate #include <sys/fpras_impl.h> 597c478bd9Sstevel@tonic-gate #include <sys/prom_debug.h> 607c478bd9Sstevel@tonic-gate #include <sys/traptrace.h> 617c478bd9Sstevel@tonic-gate #include <sys/memnode.h> 627c478bd9Sstevel@tonic-gate #include <sys/mem_cage.h> 637c478bd9Sstevel@tonic-gate 647c478bd9Sstevel@tonic-gate /* 657c478bd9Sstevel@tonic-gate * fpRAS implementation structures. 667c478bd9Sstevel@tonic-gate */ 677c478bd9Sstevel@tonic-gate struct fpras_chkfn *fpras_chkfnaddrs[FPRAS_NCOPYOPS]; 687c478bd9Sstevel@tonic-gate struct fpras_chkfngrp *fpras_chkfngrps; 697c478bd9Sstevel@tonic-gate struct fpras_chkfngrp *fpras_chkfngrps_base; 707c478bd9Sstevel@tonic-gate int fpras_frequency = -1; 717c478bd9Sstevel@tonic-gate int64_t fpras_interval = -1; 727c478bd9Sstevel@tonic-gate 7325cf1a30Sjl139090 /* 746ceee06eSJerry Gilliam * Increase unix symbol table size as a work around for 6828121 756ceee06eSJerry Gilliam */ 766ceee06eSJerry Gilliam int alloc_mem_bermuda_triangle; 776ceee06eSJerry Gilliam 786ceee06eSJerry Gilliam /* 7925cf1a30Sjl139090 * Halt idling cpus optimization 8025cf1a30Sjl139090 * 8125cf1a30Sjl139090 * This optimation is only enabled in platforms that have 8225cf1a30Sjl139090 * the CPU halt support. The cpu_halt_cpu() support is provided 8325cf1a30Sjl139090 * in the cpu module and it is referenced here with a pragma weak. 8425cf1a30Sjl139090 * The presence of this routine automatically enable the halt idling 8525cf1a30Sjl139090 * cpus functionality if the global switch enable_halt_idle_cpus 8625cf1a30Sjl139090 * is set (default is set). 8725cf1a30Sjl139090 * 8825cf1a30Sjl139090 */ 8925cf1a30Sjl139090 #pragma weak cpu_halt_cpu 9025cf1a30Sjl139090 extern void cpu_halt_cpu(); 9125cf1a30Sjl139090 92c210ded4Sesaxe /* 93c210ded4Sesaxe * Defines for the idle_state_transition DTrace probe 94c210ded4Sesaxe * 95c210ded4Sesaxe * The probe fires when the CPU undergoes an idle state change (e.g. halting) 96c210ded4Sesaxe * The agument passed is the state to which the CPU is transitioning. 97c210ded4Sesaxe * 98c210ded4Sesaxe * The states are defined here. 99c210ded4Sesaxe */ 100c210ded4Sesaxe #define IDLE_STATE_NORMAL 0 101c210ded4Sesaxe #define IDLE_STATE_HALTED 1 102c210ded4Sesaxe 10325cf1a30Sjl139090 int enable_halt_idle_cpus = 1; /* global switch */ 10425cf1a30Sjl139090 105*0542eecfSRafael Vanoni uint_t cp_haltset_fanout = 3; 106*0542eecfSRafael Vanoni 1077c478bd9Sstevel@tonic-gate void 1087c478bd9Sstevel@tonic-gate setup_trap_table(void) 1097c478bd9Sstevel@tonic-gate { 1107c478bd9Sstevel@tonic-gate intr_init(CPU); /* init interrupt request free list */ 1117c478bd9Sstevel@tonic-gate setwstate(WSTATE_KERN); 1127c478bd9Sstevel@tonic-gate prom_set_traptable(&trap_table); 1137c478bd9Sstevel@tonic-gate } 1147c478bd9Sstevel@tonic-gate 1157c478bd9Sstevel@tonic-gate void 1167c478bd9Sstevel@tonic-gate mach_fpras() 1177c478bd9Sstevel@tonic-gate { 1187c478bd9Sstevel@tonic-gate if (fpras_implemented && !fpras_disable) { 1197c478bd9Sstevel@tonic-gate int i; 1207c478bd9Sstevel@tonic-gate struct fpras_chkfngrp *fcgp; 1217c478bd9Sstevel@tonic-gate size_t chkfngrpsallocsz; 1227c478bd9Sstevel@tonic-gate 1237c478bd9Sstevel@tonic-gate /* 1247c478bd9Sstevel@tonic-gate * Note that we size off of NCPU and setup for 1257c478bd9Sstevel@tonic-gate * all those possibilities regardless of whether 1267c478bd9Sstevel@tonic-gate * the cpu id is present or not. We do this so that 1277c478bd9Sstevel@tonic-gate * we don't have any construction or destruction 1287c478bd9Sstevel@tonic-gate * activity to perform at DR time, and it's not 1297c478bd9Sstevel@tonic-gate * costly in memory. We require block alignment. 1307c478bd9Sstevel@tonic-gate */ 1317c478bd9Sstevel@tonic-gate chkfngrpsallocsz = NCPU * sizeof (struct fpras_chkfngrp); 1327c478bd9Sstevel@tonic-gate fpras_chkfngrps_base = kmem_alloc(chkfngrpsallocsz, KM_SLEEP); 1337c478bd9Sstevel@tonic-gate if (IS_P2ALIGNED((uintptr_t)fpras_chkfngrps_base, 64)) { 1347c478bd9Sstevel@tonic-gate fpras_chkfngrps = fpras_chkfngrps_base; 1357c478bd9Sstevel@tonic-gate } else { 1367c478bd9Sstevel@tonic-gate kmem_free(fpras_chkfngrps_base, chkfngrpsallocsz); 1377c478bd9Sstevel@tonic-gate chkfngrpsallocsz += 64; 1387c478bd9Sstevel@tonic-gate fpras_chkfngrps_base = kmem_alloc(chkfngrpsallocsz, 1397c478bd9Sstevel@tonic-gate KM_SLEEP); 1407c478bd9Sstevel@tonic-gate fpras_chkfngrps = (struct fpras_chkfngrp *) 1417c478bd9Sstevel@tonic-gate P2ROUNDUP((uintptr_t)fpras_chkfngrps_base, 64); 1427c478bd9Sstevel@tonic-gate } 1437c478bd9Sstevel@tonic-gate 1447c478bd9Sstevel@tonic-gate /* 1457c478bd9Sstevel@tonic-gate * Copy our check function into place for each copy operation 1467c478bd9Sstevel@tonic-gate * and each cpu id. 1477c478bd9Sstevel@tonic-gate */ 1487c478bd9Sstevel@tonic-gate fcgp = &fpras_chkfngrps[0]; 1497c478bd9Sstevel@tonic-gate for (i = 0; i < FPRAS_NCOPYOPS; ++i) 1507c478bd9Sstevel@tonic-gate bcopy((void *)fpras_chkfn_type1, &fcgp->fpras_fn[i], 1517c478bd9Sstevel@tonic-gate sizeof (struct fpras_chkfn)); 1527c478bd9Sstevel@tonic-gate for (i = 1; i < NCPU; ++i) 1537c478bd9Sstevel@tonic-gate *(&fpras_chkfngrps[i]) = *fcgp; 1547c478bd9Sstevel@tonic-gate 1557c478bd9Sstevel@tonic-gate /* 1567c478bd9Sstevel@tonic-gate * At definition fpras_frequency is set to -1, and it will 1577c478bd9Sstevel@tonic-gate * still have that value unless changed in /etc/system (not 1587c478bd9Sstevel@tonic-gate * strictly supported, but not preventable). The following 1597c478bd9Sstevel@tonic-gate * both sets the default and sanity checks anything from 1607c478bd9Sstevel@tonic-gate * /etc/system. 1617c478bd9Sstevel@tonic-gate */ 1627c478bd9Sstevel@tonic-gate if (fpras_frequency < 0) 1637c478bd9Sstevel@tonic-gate fpras_frequency = FPRAS_DEFAULT_FREQUENCY; 1647c478bd9Sstevel@tonic-gate 1657c478bd9Sstevel@tonic-gate /* 1667c478bd9Sstevel@tonic-gate * Now calculate fpras_interval. When fpras_interval 1677c478bd9Sstevel@tonic-gate * becomes non-negative fpras checks will commence 1687c478bd9Sstevel@tonic-gate * (copies before this point in boot will bypass fpras). 1697c478bd9Sstevel@tonic-gate * Our stores of instructions must be visible; no need 1707c478bd9Sstevel@tonic-gate * to flush as they're never been executed before. 1717c478bd9Sstevel@tonic-gate */ 1727c478bd9Sstevel@tonic-gate membar_producer(); 1737c478bd9Sstevel@tonic-gate fpras_interval = (fpras_frequency == 0) ? 1747c478bd9Sstevel@tonic-gate 0 : sys_tick_freq / fpras_frequency; 1757c478bd9Sstevel@tonic-gate } 1767c478bd9Sstevel@tonic-gate } 1777c478bd9Sstevel@tonic-gate 1787c478bd9Sstevel@tonic-gate void 1797c478bd9Sstevel@tonic-gate mach_hw_copy_limit(void) 1807c478bd9Sstevel@tonic-gate { 1817c478bd9Sstevel@tonic-gate if (!fpu_exists) { 1827c478bd9Sstevel@tonic-gate use_hw_bcopy = 0; 1837c478bd9Sstevel@tonic-gate hw_copy_limit_1 = 0; 1847c478bd9Sstevel@tonic-gate hw_copy_limit_2 = 0; 1857c478bd9Sstevel@tonic-gate hw_copy_limit_4 = 0; 1867c478bd9Sstevel@tonic-gate hw_copy_limit_8 = 0; 1877c478bd9Sstevel@tonic-gate use_hw_bzero = 0; 1887c478bd9Sstevel@tonic-gate } 1897c478bd9Sstevel@tonic-gate } 1907c478bd9Sstevel@tonic-gate 1917c478bd9Sstevel@tonic-gate void 1927c478bd9Sstevel@tonic-gate load_tod_module() 1937c478bd9Sstevel@tonic-gate { 1947c478bd9Sstevel@tonic-gate /* 1957c478bd9Sstevel@tonic-gate * Load tod driver module for the tod part found on this system. 1967c478bd9Sstevel@tonic-gate * Recompute the cpu frequency/delays based on tod as tod part 1977c478bd9Sstevel@tonic-gate * tends to keep time more accurately. 1987c478bd9Sstevel@tonic-gate */ 1997c478bd9Sstevel@tonic-gate if (tod_module_name == NULL || modload("tod", tod_module_name) == -1) 2007c478bd9Sstevel@tonic-gate halt("Can't load tod module"); 2017c478bd9Sstevel@tonic-gate } 2027c478bd9Sstevel@tonic-gate 2037c478bd9Sstevel@tonic-gate void 2047c478bd9Sstevel@tonic-gate mach_memscrub(void) 2057c478bd9Sstevel@tonic-gate { 2067c478bd9Sstevel@tonic-gate /* 2077c478bd9Sstevel@tonic-gate * Startup memory scrubber, if not running fpu emulation code. 2087c478bd9Sstevel@tonic-gate */ 2097c478bd9Sstevel@tonic-gate 21025cf1a30Sjl139090 #ifndef _HW_MEMSCRUB_SUPPORT 2117c478bd9Sstevel@tonic-gate if (fpu_exists) { 2127c478bd9Sstevel@tonic-gate if (memscrub_init()) { 2137c478bd9Sstevel@tonic-gate cmn_err(CE_WARN, 2147c478bd9Sstevel@tonic-gate "Memory scrubber failed to initialize"); 2157c478bd9Sstevel@tonic-gate } 2167c478bd9Sstevel@tonic-gate } 21725cf1a30Sjl139090 #endif /* _HW_MEMSCRUB_SUPPORT */ 21825cf1a30Sjl139090 } 21925cf1a30Sjl139090 22025cf1a30Sjl139090 /* 2216b2c23f3SDave Plauger * Halt the present CPU until awoken via an interrupt. 22225cf1a30Sjl139090 * This routine should only be invoked if cpu_halt_cpu() 22325cf1a30Sjl139090 * exists and is supported, see mach_cpu_halt_idle() 22425cf1a30Sjl139090 */ 2256b2c23f3SDave Plauger void 22625cf1a30Sjl139090 cpu_halt(void) 22725cf1a30Sjl139090 { 22825cf1a30Sjl139090 cpu_t *cpup = CPU; 2296890d023SEric Saxe processorid_t cpu_sid = cpup->cpu_seqid; 23025cf1a30Sjl139090 cpupart_t *cp = cpup->cpu_part; 23125cf1a30Sjl139090 int hset_update = 1; 2326b2c23f3SDave Plauger volatile int *p = &cpup->cpu_disp->disp_nrunnable; 2336b2c23f3SDave Plauger uint_t s; 23425cf1a30Sjl139090 23525cf1a30Sjl139090 /* 2366b2c23f3SDave Plauger * If this CPU is online then we should notate our halting 23725cf1a30Sjl139090 * by adding ourselves to the partition's halted CPU 2386890d023SEric Saxe * bitset. This allows other CPUs to find/awaken us when 23925cf1a30Sjl139090 * work becomes available. 24025cf1a30Sjl139090 */ 2416b2c23f3SDave Plauger if (CPU->cpu_flags & CPU_OFFLINE) 24225cf1a30Sjl139090 hset_update = 0; 24325cf1a30Sjl139090 24425cf1a30Sjl139090 /* 2456b2c23f3SDave Plauger * Add ourselves to the partition's halted CPUs bitset 24625cf1a30Sjl139090 * and set our HALTED flag, if necessary. 24725cf1a30Sjl139090 * 24825cf1a30Sjl139090 * When a thread becomes runnable, it is placed on the queue 2496890d023SEric Saxe * and then the halted cpu bitset is checked to determine who 25025cf1a30Sjl139090 * (if anyone) should be awoken. We therefore need to first 2516b2c23f3SDave Plauger * add ourselves to the halted bitset, and then check if there 2526890d023SEric Saxe * is any work available. The order is important to prevent a race 2536890d023SEric Saxe * that can lead to work languishing on a run queue somewhere while 2546890d023SEric Saxe * this CPU remains halted. 2556890d023SEric Saxe * 2566890d023SEric Saxe * Either the producing CPU will see we're halted and will awaken us, 2576890d023SEric Saxe * or this CPU will see the work available in disp_anywork() 25825cf1a30Sjl139090 */ 25925cf1a30Sjl139090 if (hset_update) { 26025cf1a30Sjl139090 cpup->cpu_disp_flags |= CPU_DISP_HALTED; 26125cf1a30Sjl139090 membar_producer(); 2626890d023SEric Saxe bitset_atomic_add(&cp->cp_haltset, cpu_sid); 26325cf1a30Sjl139090 } 26425cf1a30Sjl139090 26525cf1a30Sjl139090 /* 26625cf1a30Sjl139090 * Check to make sure there's really nothing to do. 26725cf1a30Sjl139090 * Work destined for this CPU may become available after 26825cf1a30Sjl139090 * this check. We'll be notified through the clearing of our 2696890d023SEric Saxe * bit in the halted CPU bitset, and a poke. 27025cf1a30Sjl139090 */ 27125cf1a30Sjl139090 if (disp_anywork()) { 27225cf1a30Sjl139090 if (hset_update) { 27325cf1a30Sjl139090 cpup->cpu_disp_flags &= ~CPU_DISP_HALTED; 2746890d023SEric Saxe bitset_atomic_del(&cp->cp_haltset, cpu_sid); 27525cf1a30Sjl139090 } 27625cf1a30Sjl139090 return; 27725cf1a30Sjl139090 } 27825cf1a30Sjl139090 27925cf1a30Sjl139090 /* 2806b2c23f3SDave Plauger * We're on our way to being halted. Wait until something becomes 2816b2c23f3SDave Plauger * runnable locally or we are awaken (i.e. removed from the halt set). 2826b2c23f3SDave Plauger * Note that the call to hv_cpu_yield() can return even if we have 2836b2c23f3SDave Plauger * nothing to do. 28425cf1a30Sjl139090 * 28525cf1a30Sjl139090 * Disable interrupts now, so that we'll awaken immediately 28625cf1a30Sjl139090 * after halting if someone tries to poke us between now and 28725cf1a30Sjl139090 * the time we actually halt. 28825cf1a30Sjl139090 * 28925cf1a30Sjl139090 * We check for the presence of our bit after disabling interrupts. 29025cf1a30Sjl139090 * If it's cleared, we'll return. If the bit is cleared after 29125cf1a30Sjl139090 * we check then the poke will pop us out of the halted state. 2926b2c23f3SDave Plauger * Also, if the offlined CPU has been brought back on-line, then 2936b2c23f3SDave Plauger * we return as well. 29425cf1a30Sjl139090 * 29525cf1a30Sjl139090 * The ordering of the poke and the clearing of the bit by cpu_wakeup 29625cf1a30Sjl139090 * is important. 29725cf1a30Sjl139090 * cpu_wakeup() must clear, then poke. 29825cf1a30Sjl139090 * cpu_halt() must disable interrupts, then check for the bit. 2996b2c23f3SDave Plauger * 30025cf1a30Sjl139090 * The check for anything locally runnable is here for performance 30125cf1a30Sjl139090 * and isn't needed for correctness. disp_nrunnable ought to be 30225cf1a30Sjl139090 * in our cache still, so it's inexpensive to check, and if there 30325cf1a30Sjl139090 * is anything runnable we won't have to wait for the poke. 3046b2c23f3SDave Plauger * 3056b2c23f3SDave Plauger * Any interrupt will awaken the cpu from halt. Looping here 3066b2c23f3SDave Plauger * will filter spurious interrupts that wake us up, but don't 3076b2c23f3SDave Plauger * represent a need for us to head back out to idle(). This 3086b2c23f3SDave Plauger * will enable the idle loop to be more efficient and sleep in 3096b2c23f3SDave Plauger * the processor pipeline for a larger percent of the time, 3106b2c23f3SDave Plauger * which returns useful cycles to the peer hardware strand 3116b2c23f3SDave Plauger * that shares the pipeline. 31225cf1a30Sjl139090 */ 3136b2c23f3SDave Plauger s = disable_vec_intr(); 3146b2c23f3SDave Plauger while (*p == 0 && 3156b2c23f3SDave Plauger ((hset_update && bitset_in_set(&cp->cp_haltset, cpu_sid)) || 3166b2c23f3SDave Plauger (!hset_update && (CPU->cpu_flags & CPU_OFFLINE)))) { 31725cf1a30Sjl139090 318c210ded4Sesaxe DTRACE_PROBE1(idle__state__transition, 319c210ded4Sesaxe uint_t, IDLE_STATE_HALTED); 3206b2c23f3SDave Plauger (void) cpu_halt_cpu(); 321c210ded4Sesaxe DTRACE_PROBE1(idle__state__transition, 322c210ded4Sesaxe uint_t, IDLE_STATE_NORMAL); 3236b2c23f3SDave Plauger 3246b2c23f3SDave Plauger enable_vec_intr(s); 3256b2c23f3SDave Plauger s = disable_vec_intr(); 326c210ded4Sesaxe } 327c210ded4Sesaxe 32825cf1a30Sjl139090 /* 32925cf1a30Sjl139090 * We're no longer halted 33025cf1a30Sjl139090 */ 3316b2c23f3SDave Plauger enable_vec_intr(s); 33225cf1a30Sjl139090 if (hset_update) { 33325cf1a30Sjl139090 cpup->cpu_disp_flags &= ~CPU_DISP_HALTED; 3346890d023SEric Saxe bitset_atomic_del(&cp->cp_haltset, cpu_sid); 33525cf1a30Sjl139090 } 33625cf1a30Sjl139090 } 33725cf1a30Sjl139090 33825cf1a30Sjl139090 /* 33925cf1a30Sjl139090 * If "cpu" is halted, then wake it up clearing its halted bit in advance. 34025cf1a30Sjl139090 * Otherwise, see if other CPUs in the cpu partition are halted and need to 34125cf1a30Sjl139090 * be woken up so that they can steal the thread we placed on this CPU. 34225cf1a30Sjl139090 * This function is only used on MP systems. 34325cf1a30Sjl139090 * This function should only be invoked if cpu_halt_cpu() 34425cf1a30Sjl139090 * exists and is supported, see mach_cpu_halt_idle() 34525cf1a30Sjl139090 */ 34625cf1a30Sjl139090 static void 34725cf1a30Sjl139090 cpu_wakeup(cpu_t *cpu, int bound) 34825cf1a30Sjl139090 { 34925cf1a30Sjl139090 uint_t cpu_found; 3506890d023SEric Saxe processorid_t cpu_sid; 35125cf1a30Sjl139090 cpupart_t *cp; 35225cf1a30Sjl139090 35325cf1a30Sjl139090 cp = cpu->cpu_part; 3546890d023SEric Saxe cpu_sid = cpu->cpu_seqid; 3556890d023SEric Saxe if (bitset_in_set(&cp->cp_haltset, cpu_sid)) { 35625cf1a30Sjl139090 /* 35725cf1a30Sjl139090 * Clear the halted bit for that CPU since it will be 35825cf1a30Sjl139090 * poked in a moment. 35925cf1a30Sjl139090 */ 3606890d023SEric Saxe bitset_atomic_del(&cp->cp_haltset, cpu_sid); 36125cf1a30Sjl139090 /* 3626890d023SEric Saxe * We may find the current CPU present in the halted cpu bitset 36325cf1a30Sjl139090 * if we're in the context of an interrupt that occurred 36425cf1a30Sjl139090 * before we had a chance to clear our bit in cpu_halt(). 36525cf1a30Sjl139090 * Poking ourself is obviously unnecessary, since if 36625cf1a30Sjl139090 * we're here, we're not halted. 36725cf1a30Sjl139090 */ 36825cf1a30Sjl139090 if (cpu != CPU) 36925cf1a30Sjl139090 poke_cpu(cpu->cpu_id); 37025cf1a30Sjl139090 return; 37125cf1a30Sjl139090 } else { 37225cf1a30Sjl139090 /* 37325cf1a30Sjl139090 * This cpu isn't halted, but it's idle or undergoing a 37425cf1a30Sjl139090 * context switch. No need to awaken anyone else. 37525cf1a30Sjl139090 */ 37625cf1a30Sjl139090 if (cpu->cpu_thread == cpu->cpu_idle_thread || 37725cf1a30Sjl139090 cpu->cpu_disp_flags & CPU_DISP_DONTSTEAL) 37825cf1a30Sjl139090 return; 37925cf1a30Sjl139090 } 38025cf1a30Sjl139090 38125cf1a30Sjl139090 /* 3826890d023SEric Saxe * No need to wake up other CPUs if this is for a bound thread. 38325cf1a30Sjl139090 */ 38425cf1a30Sjl139090 if (bound) 38525cf1a30Sjl139090 return; 38625cf1a30Sjl139090 38725cf1a30Sjl139090 /* 3886890d023SEric Saxe * The CPU specified for wakeup isn't currently halted, so check 3896890d023SEric Saxe * to see if there are any other halted CPUs in the partition, 3906890d023SEric Saxe * and if there are then awaken one. 3916890d023SEric Saxe * 3926890d023SEric Saxe * If possible, try to select a CPU close to the target, since this 3936890d023SEric Saxe * will likely trigger a migration. 39425cf1a30Sjl139090 */ 39525cf1a30Sjl139090 do { 3966890d023SEric Saxe cpu_found = bitset_find(&cp->cp_haltset); 3976890d023SEric Saxe if (cpu_found == (uint_t)-1) 39825cf1a30Sjl139090 return; 3996890d023SEric Saxe } while (bitset_atomic_test_and_del(&cp->cp_haltset, cpu_found) < 0); 40025cf1a30Sjl139090 4016890d023SEric Saxe if (cpu_found != CPU->cpu_seqid) 4026890d023SEric Saxe poke_cpu(cpu_seq[cpu_found]->cpu_id); 4037c478bd9Sstevel@tonic-gate } 4047c478bd9Sstevel@tonic-gate 4057c478bd9Sstevel@tonic-gate void 4060e751525SEric Saxe mach_cpu_halt_idle(void) 4077c478bd9Sstevel@tonic-gate { 40825cf1a30Sjl139090 if (enable_halt_idle_cpus) { 40925cf1a30Sjl139090 if (&cpu_halt_cpu) { 41025cf1a30Sjl139090 idle_cpu = cpu_halt; 41125cf1a30Sjl139090 disp_enq_thread = cpu_wakeup; 41225cf1a30Sjl139090 } 41325cf1a30Sjl139090 } 4147c478bd9Sstevel@tonic-gate } 4157c478bd9Sstevel@tonic-gate 4167c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 417982b9107Sjb145095 int 4187c478bd9Sstevel@tonic-gate cpu_intrq_setup(struct cpu *cp) 4197c478bd9Sstevel@tonic-gate { 4207c478bd9Sstevel@tonic-gate /* Interrupt mondo queues not applicable to sun4u */ 421982b9107Sjb145095 return (0); 4227c478bd9Sstevel@tonic-gate } 4237c478bd9Sstevel@tonic-gate 4247c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 4257c478bd9Sstevel@tonic-gate void 4261ae08745Sheppo cpu_intrq_cleanup(struct cpu *cp) 4271ae08745Sheppo { 4281ae08745Sheppo /* Interrupt mondo queues not applicable to sun4u */ 4291ae08745Sheppo } 4301ae08745Sheppo 4311ae08745Sheppo /*ARGSUSED*/ 4321ae08745Sheppo void 4337c478bd9Sstevel@tonic-gate cpu_intrq_register(struct cpu *cp) 4347c478bd9Sstevel@tonic-gate { 4357c478bd9Sstevel@tonic-gate /* Interrupt/error queues not applicable to sun4u */ 4367c478bd9Sstevel@tonic-gate } 4377c478bd9Sstevel@tonic-gate 4387c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 4397c478bd9Sstevel@tonic-gate void 440db6d2ee3Ssvemuri mach_htraptrace_setup(int cpuid) 4417c478bd9Sstevel@tonic-gate { 4427c478bd9Sstevel@tonic-gate /* Setup hypervisor traptrace buffer, not applicable to sun4u */ 4437c478bd9Sstevel@tonic-gate } 4447c478bd9Sstevel@tonic-gate 4457c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 4467c478bd9Sstevel@tonic-gate void 447db6d2ee3Ssvemuri mach_htraptrace_configure(int cpuid) 4487c478bd9Sstevel@tonic-gate { 449db6d2ee3Ssvemuri /* enable/ disable hypervisor traptracing, not applicable to sun4u */ 4507c478bd9Sstevel@tonic-gate } 451db6d2ee3Ssvemuri 452db6d2ee3Ssvemuri /*ARGSUSED*/ 453db6d2ee3Ssvemuri void 454db6d2ee3Ssvemuri mach_htraptrace_cleanup(int cpuid) 455db6d2ee3Ssvemuri { 456db6d2ee3Ssvemuri /* cleanup hypervisor traptrace buffer, not applicable to sun4u */ 457db6d2ee3Ssvemuri } 4587c478bd9Sstevel@tonic-gate 4597c478bd9Sstevel@tonic-gate void 4601ae08745Sheppo mach_descrip_startup_init(void) 4611ae08745Sheppo { 4621ae08745Sheppo /* 4631ae08745Sheppo * Only for sun4v. 4641ae08745Sheppo * Initialize Machine description framework during startup. 4651ae08745Sheppo */ 4661ae08745Sheppo } 4671ae08745Sheppo void 4681ae08745Sheppo mach_descrip_startup_fini(void) 4691ae08745Sheppo { 4701ae08745Sheppo /* 4711ae08745Sheppo * Only for sun4v. 4721ae08745Sheppo * Clean up Machine Description framework during startup. 4731ae08745Sheppo */ 4741ae08745Sheppo } 4751ae08745Sheppo 4761ae08745Sheppo void 4777c478bd9Sstevel@tonic-gate mach_descrip_init(void) 4787c478bd9Sstevel@tonic-gate { 4791ae08745Sheppo /* 4801ae08745Sheppo * Only for sun4v. 4811ae08745Sheppo * Initialize Machine description framework. 4821ae08745Sheppo */ 4837c478bd9Sstevel@tonic-gate } 4847c478bd9Sstevel@tonic-gate 485c56c1e58Sgirish void 486c56c1e58Sgirish hsvc_setup(void) 487c56c1e58Sgirish { 488c56c1e58Sgirish /* Setup hypervisor services, not applicable to sun4u */ 489c56c1e58Sgirish } 490c56c1e58Sgirish 4911ae08745Sheppo void 4921ae08745Sheppo load_mach_drivers(void) 4931ae08745Sheppo { 4941ae08745Sheppo /* Currently no machine class (sun4u) specific drivers to load */ 4951ae08745Sheppo } 4961ae08745Sheppo 4977c478bd9Sstevel@tonic-gate /* 4987c478bd9Sstevel@tonic-gate * Return true if the machine we're running on is a Positron. 4997c478bd9Sstevel@tonic-gate * (Positron is an unsupported developers platform.) 5007c478bd9Sstevel@tonic-gate */ 5017c478bd9Sstevel@tonic-gate int 5027c478bd9Sstevel@tonic-gate iam_positron(void) 5037c478bd9Sstevel@tonic-gate { 5047c478bd9Sstevel@tonic-gate char model[32]; 5057c478bd9Sstevel@tonic-gate const char proto_model[] = "SUNW,501-2732"; 506fa9e4066Sahrens pnode_t root = prom_rootnode(); 5077c478bd9Sstevel@tonic-gate 5087c478bd9Sstevel@tonic-gate if (prom_getproplen(root, "model") != sizeof (proto_model)) 5097c478bd9Sstevel@tonic-gate return (0); 5107c478bd9Sstevel@tonic-gate 5117c478bd9Sstevel@tonic-gate (void) prom_getprop(root, "model", model); 5127c478bd9Sstevel@tonic-gate if (strcmp(model, proto_model) == 0) 5137c478bd9Sstevel@tonic-gate return (1); 5147c478bd9Sstevel@tonic-gate return (0); 5157c478bd9Sstevel@tonic-gate } 5167c478bd9Sstevel@tonic-gate 5177c478bd9Sstevel@tonic-gate /* 5187c478bd9Sstevel@tonic-gate * Find a physically contiguous area of twice the largest ecache size 5197c478bd9Sstevel@tonic-gate * to be used while doing displacement flush of ecaches. 5207c478bd9Sstevel@tonic-gate */ 5217c478bd9Sstevel@tonic-gate uint64_t 5227c478bd9Sstevel@tonic-gate ecache_flush_address(void) 5237c478bd9Sstevel@tonic-gate { 5247c478bd9Sstevel@tonic-gate struct memlist *pmem; 5257c478bd9Sstevel@tonic-gate uint64_t flush_size; 5267c478bd9Sstevel@tonic-gate uint64_t ret_val; 5277c478bd9Sstevel@tonic-gate 5287c478bd9Sstevel@tonic-gate flush_size = ecache_size * 2; 52956f33205SJonathan Adams for (pmem = phys_install; pmem; pmem = pmem->ml_next) { 53056f33205SJonathan Adams ret_val = P2ROUNDUP(pmem->ml_address, ecache_size); 53156f33205SJonathan Adams if (ret_val + flush_size <= pmem->ml_address + pmem->ml_size) 5327c478bd9Sstevel@tonic-gate return (ret_val); 5337c478bd9Sstevel@tonic-gate } 5347c478bd9Sstevel@tonic-gate return ((uint64_t)-1); 5357c478bd9Sstevel@tonic-gate } 5367c478bd9Sstevel@tonic-gate 5377c478bd9Sstevel@tonic-gate /* 5387c478bd9Sstevel@tonic-gate * Called with the memlist lock held to say that phys_install has 5397c478bd9Sstevel@tonic-gate * changed. 5407c478bd9Sstevel@tonic-gate */ 5417c478bd9Sstevel@tonic-gate void 5427c478bd9Sstevel@tonic-gate phys_install_has_changed(void) 5437c478bd9Sstevel@tonic-gate { 5447c478bd9Sstevel@tonic-gate /* 5457c478bd9Sstevel@tonic-gate * Get the new address into a temporary just in case panicking 5467c478bd9Sstevel@tonic-gate * involves use of ecache_flushaddr. 5477c478bd9Sstevel@tonic-gate */ 5487c478bd9Sstevel@tonic-gate uint64_t new_addr; 5497c478bd9Sstevel@tonic-gate 5507c478bd9Sstevel@tonic-gate new_addr = ecache_flush_address(); 5517c478bd9Sstevel@tonic-gate if (new_addr == (uint64_t)-1) { 5527c478bd9Sstevel@tonic-gate cmn_err(CE_PANIC, 5537c478bd9Sstevel@tonic-gate "ecache_flush_address(): failed, ecache_size=%x", 5547c478bd9Sstevel@tonic-gate ecache_size); 5557c478bd9Sstevel@tonic-gate /*NOTREACHED*/ 5567c478bd9Sstevel@tonic-gate } 5577c478bd9Sstevel@tonic-gate ecache_flushaddr = new_addr; 5587c478bd9Sstevel@tonic-gate membar_producer(); 5597c478bd9Sstevel@tonic-gate } 560