1b885580bSAlexander Kolbasov 27c478bd9Sstevel@tonic-gate /* 37c478bd9Sstevel@tonic-gate * CDDL HEADER START 47c478bd9Sstevel@tonic-gate * 57c478bd9Sstevel@tonic-gate * The contents of this file are subject to the terms of the 641791439Sandrei * Common Development and Distribution License (the "License"). 741791439Sandrei * You may not use this file except in compliance with the License. 87c478bd9Sstevel@tonic-gate * 97c478bd9Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 107c478bd9Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 117c478bd9Sstevel@tonic-gate * See the License for the specific language governing permissions 127c478bd9Sstevel@tonic-gate * and limitations under the License. 137c478bd9Sstevel@tonic-gate * 147c478bd9Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 157c478bd9Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 167c478bd9Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 177c478bd9Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 187c478bd9Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 197c478bd9Sstevel@tonic-gate * 207c478bd9Sstevel@tonic-gate * CDDL HEADER END 217c478bd9Sstevel@tonic-gate */ 227c478bd9Sstevel@tonic-gate /* 235cd376e8SJimmy Vetayases * Copyright (c) 1992, 2010, Oracle and/or its affiliates. All rights reserved. 247c478bd9Sstevel@tonic-gate */ 25fa96bd91SMichael Corcoran /* 26a3114836SGerry Liu * Copyright (c) 2009-2010, Intel Corporation. 27fa96bd91SMichael Corcoran * All rights reserved. 28fa96bd91SMichael Corcoran */ 297c478bd9Sstevel@tonic-gate 30a3114836SGerry Liu #define PSMI_1_7 317c478bd9Sstevel@tonic-gate #include <sys/smp_impldefs.h> 327c478bd9Sstevel@tonic-gate #include <sys/psm.h> 337c478bd9Sstevel@tonic-gate #include <sys/psm_modctl.h> 347c478bd9Sstevel@tonic-gate #include <sys/pit.h> 357c478bd9Sstevel@tonic-gate #include <sys/cmn_err.h> 367c478bd9Sstevel@tonic-gate #include <sys/strlog.h> 377c478bd9Sstevel@tonic-gate #include <sys/clock.h> 387c478bd9Sstevel@tonic-gate #include <sys/debug.h> 397c478bd9Sstevel@tonic-gate #include <sys/rtc.h> 407c478bd9Sstevel@tonic-gate #include <sys/x86_archext.h> 417c478bd9Sstevel@tonic-gate #include <sys/cpupart.h> 427c478bd9Sstevel@tonic-gate #include <sys/cpuvar.h> 43fb2caebeSRandy Fishel #include <sys/cpu_event.h> 44d129bde2Sesaxe #include <sys/cmt.h> 45f98fbcecSbholler #include <sys/cpu.h> 467c478bd9Sstevel@tonic-gate #include <sys/disp.h> 477c478bd9Sstevel@tonic-gate #include <sys/archsystm.h> 48ae115bc7Smrj #include <sys/machsystm.h> 49f98fbcecSbholler #include <sys/sysmacros.h> 50843e1988Sjohnlev #include <sys/memlist.h> 51ae115bc7Smrj #include <sys/param.h> 52ae115bc7Smrj #include <sys/promif.h> 530e751525SEric Saxe #include <sys/cpu_pm.h> 54843e1988Sjohnlev #if defined(__xpv) 55843e1988Sjohnlev #include <sys/hypervisor.h> 56843e1988Sjohnlev #endif 577a364d25Sschwartz #include <sys/mach_intr.h> 58f98fbcecSbholler #include <vm/hat_i86.h> 59a1af7ba0Scwb #include <sys/kdi_machimpl.h> 60c210ded4Sesaxe #include <sys/sdt.h> 610e751525SEric Saxe #include <sys/hpet.h> 62fa96bd91SMichael Corcoran #include <sys/sunddi.h> 63fa96bd91SMichael Corcoran #include <sys/sunndi.h> 64b885580bSAlexander Kolbasov #include <sys/cpc_pcbe.h> 657c478bd9Sstevel@tonic-gate 667c478bd9Sstevel@tonic-gate #define OFFSETOF(s, m) (size_t)(&(((s *)0)->m)) 677c478bd9Sstevel@tonic-gate 687c478bd9Sstevel@tonic-gate /* 697c478bd9Sstevel@tonic-gate * Local function prototypes 707c478bd9Sstevel@tonic-gate */ 717c478bd9Sstevel@tonic-gate static int mp_disable_intr(processorid_t cpun); 727c478bd9Sstevel@tonic-gate static void mp_enable_intr(processorid_t cpun); 737c478bd9Sstevel@tonic-gate static void mach_init(); 747c478bd9Sstevel@tonic-gate static void mach_picinit(); 757c478bd9Sstevel@tonic-gate static int machhztomhz(uint64_t cpu_freq_hz); 767c478bd9Sstevel@tonic-gate static uint64_t mach_getcpufreq(void); 777c478bd9Sstevel@tonic-gate static void mach_fixcpufreq(void); 787c478bd9Sstevel@tonic-gate static int mach_clkinit(int, int *); 797c478bd9Sstevel@tonic-gate static void mach_smpinit(void); 807c478bd9Sstevel@tonic-gate static int mach_softlvl_to_vect(int ipl); 817c478bd9Sstevel@tonic-gate static void mach_get_platform(int owner); 827c478bd9Sstevel@tonic-gate static void mach_construct_info(); 837c478bd9Sstevel@tonic-gate static int mach_translate_irq(dev_info_t *dip, int irqno); 847c478bd9Sstevel@tonic-gate static int mach_intr_ops(dev_info_t *, ddi_intr_handle_impl_t *, 857c478bd9Sstevel@tonic-gate psm_intr_op_t, int *); 867c478bd9Sstevel@tonic-gate static void mach_notify_error(int level, char *errmsg); 877c478bd9Sstevel@tonic-gate static hrtime_t dummy_hrtime(void); 887c478bd9Sstevel@tonic-gate static void dummy_scalehrtime(hrtime_t *); 89113b131bSEric Saxe static uint64_t dummy_unscalehrtime(hrtime_t); 900e751525SEric Saxe void cpu_idle(void); 917c478bd9Sstevel@tonic-gate static void cpu_wakeup(cpu_t *, int); 92843e1988Sjohnlev #ifndef __xpv 930e751525SEric Saxe void cpu_idle_mwait(void); 94f98fbcecSbholler static void cpu_wakeup_mwait(cpu_t *, int); 95843e1988Sjohnlev #endif 96fa96bd91SMichael Corcoran static int mach_cpu_create_devinfo(cpu_t *cp, dev_info_t **dipp); 97fa96bd91SMichael Corcoran 987c478bd9Sstevel@tonic-gate /* 997c478bd9Sstevel@tonic-gate * External reference functions 1007c478bd9Sstevel@tonic-gate */ 1017c478bd9Sstevel@tonic-gate extern void return_instr(); 1027c478bd9Sstevel@tonic-gate extern uint64_t freq_tsc(uint32_t *); 1037c478bd9Sstevel@tonic-gate #if defined(__i386) 1047c478bd9Sstevel@tonic-gate extern uint64_t freq_notsc(uint32_t *); 1057c478bd9Sstevel@tonic-gate #endif 1067c478bd9Sstevel@tonic-gate extern void pc_gethrestime(timestruc_t *); 107fb2f18f8Sesaxe extern int cpuid_get_coreid(cpu_t *); 108fb2f18f8Sesaxe extern int cpuid_get_chipid(cpu_t *); 1097c478bd9Sstevel@tonic-gate 1107c478bd9Sstevel@tonic-gate /* 1117c478bd9Sstevel@tonic-gate * PSM functions initialization 1127c478bd9Sstevel@tonic-gate */ 113ae115bc7Smrj void (*psm_shutdownf)(int, int) = (void (*)(int, int))return_instr; 114ae115bc7Smrj void (*psm_preshutdownf)(int, int) = (void (*)(int, int))return_instr; 115ae115bc7Smrj void (*psm_notifyf)(int) = (void (*)(int))return_instr; 116ae115bc7Smrj void (*psm_set_idle_cpuf)(int) = (void (*)(int))return_instr; 117ae115bc7Smrj void (*psm_unset_idle_cpuf)(int) = (void (*)(int))return_instr; 1187c478bd9Sstevel@tonic-gate void (*psminitf)() = mach_init; 1197c478bd9Sstevel@tonic-gate void (*picinitf)() = return_instr; 1207c478bd9Sstevel@tonic-gate int (*clkinitf)(int, int *) = (int (*)(int, int *))return_instr; 1217c478bd9Sstevel@tonic-gate int (*ap_mlsetup)() = (int (*)(void))return_instr; 1227c478bd9Sstevel@tonic-gate void (*send_dirintf)() = return_instr; 123ae115bc7Smrj void (*setspl)(int) = (void (*)(int))return_instr; 1247c478bd9Sstevel@tonic-gate int (*addspl)(int, int, int, int) = (int (*)(int, int, int, int))return_instr; 1257c478bd9Sstevel@tonic-gate int (*delspl)(int, int, int, int) = (int (*)(int, int, int, int))return_instr; 1267ff178cdSJimmy Vetayases int (*get_pending_spl)(void) = (int (*)(void))return_instr; 1277ff178cdSJimmy Vetayases int (*addintr)(void *, int, avfunc, char *, int, caddr_t, caddr_t, 1287ff178cdSJimmy Vetayases uint64_t *, dev_info_t *) = NULL; 1297ff178cdSJimmy Vetayases void (*remintr)(void *, int, avfunc, int) = NULL; 130a1af7ba0Scwb void (*kdisetsoftint)(int, struct av_softinfo *)= 131a1af7ba0Scwb (void (*)(int, struct av_softinfo *))return_instr; 132e23a7e34Slq150181 void (*setsoftint)(int, struct av_softinfo *)= 133e23a7e34Slq150181 (void (*)(int, struct av_softinfo *))return_instr; 1347c478bd9Sstevel@tonic-gate int (*slvltovect)(int) = (int (*)(int))return_instr; 1357c478bd9Sstevel@tonic-gate int (*setlvl)(int, int *) = (int (*)(int, int *))return_instr; 1367c478bd9Sstevel@tonic-gate void (*setlvlx)(int, int) = (void (*)(int, int))return_instr; 1377c478bd9Sstevel@tonic-gate int (*psm_disable_intr)(int) = mp_disable_intr; 1387c478bd9Sstevel@tonic-gate void (*psm_enable_intr)(int) = mp_enable_intr; 1397c478bd9Sstevel@tonic-gate hrtime_t (*gethrtimef)(void) = dummy_hrtime; 1407c478bd9Sstevel@tonic-gate hrtime_t (*gethrtimeunscaledf)(void) = dummy_hrtime; 1417c478bd9Sstevel@tonic-gate void (*scalehrtimef)(hrtime_t *) = dummy_scalehrtime; 142113b131bSEric Saxe uint64_t (*unscalehrtimef)(hrtime_t) = dummy_unscalehrtime; 1437c478bd9Sstevel@tonic-gate int (*psm_translate_irq)(dev_info_t *, int) = mach_translate_irq; 1447c478bd9Sstevel@tonic-gate void (*gethrestimef)(timestruc_t *) = pc_gethrestime; 1457c478bd9Sstevel@tonic-gate void (*psm_notify_error)(int, char *) = (void (*)(int, char *))NULL; 1467c478bd9Sstevel@tonic-gate int (*psm_get_clockirq)(int) = NULL; 1477c478bd9Sstevel@tonic-gate int (*psm_get_ipivect)(int, int) = NULL; 1487ff178cdSJimmy Vetayases uchar_t (*psm_get_ioapicid)(uchar_t) = NULL; 1497ff178cdSJimmy Vetayases uint32_t (*psm_get_localapicid)(uint32_t) = NULL; 1507ff178cdSJimmy Vetayases uchar_t (*psm_xlate_vector_by_irq)(uchar_t) = NULL; 1517c478bd9Sstevel@tonic-gate 1527c478bd9Sstevel@tonic-gate int (*psm_clkinit)(int) = NULL; 1537c478bd9Sstevel@tonic-gate void (*psm_timer_reprogram)(hrtime_t) = NULL; 1547c478bd9Sstevel@tonic-gate void (*psm_timer_enable)(void) = NULL; 1557c478bd9Sstevel@tonic-gate void (*psm_timer_disable)(void) = NULL; 1567c478bd9Sstevel@tonic-gate void (*psm_post_cyclic_setup)(void *arg) = NULL; 1577c478bd9Sstevel@tonic-gate int (*psm_intr_ops)(dev_info_t *, ddi_intr_handle_impl_t *, psm_intr_op_t, 1587c478bd9Sstevel@tonic-gate int *) = mach_intr_ops; 1592df1fe9cSrandyf int (*psm_state)(psm_state_request_t *) = (int (*)(psm_state_request_t *)) 1602df1fe9cSrandyf return_instr; 1617c478bd9Sstevel@tonic-gate 1627c478bd9Sstevel@tonic-gate void (*notify_error)(int, char *) = (void (*)(int, char *))return_instr; 1637c478bd9Sstevel@tonic-gate void (*hrtime_tick)(void) = return_instr; 1647c478bd9Sstevel@tonic-gate 165fa96bd91SMichael Corcoran int (*psm_cpu_create_devinfo)(cpu_t *, dev_info_t **) = mach_cpu_create_devinfo; 166a3114836SGerry Liu int (*psm_cpu_get_devinfo)(cpu_t *, dev_info_t **) = NULL; 167fa96bd91SMichael Corcoran 1687ff178cdSJimmy Vetayases /* global IRM pool for APIX (PSM) module */ 1697ff178cdSJimmy Vetayases ddi_irm_pool_t *apix_irm_pool_p = NULL; 1707ff178cdSJimmy Vetayases 171843e1988Sjohnlev /* 172843e1988Sjohnlev * True if the generic TSC code is our source of hrtime, rather than whatever 173843e1988Sjohnlev * the PSM can provide. 174843e1988Sjohnlev */ 175843e1988Sjohnlev #ifdef __xpv 176843e1988Sjohnlev int tsc_gethrtime_enable = 0; 177843e1988Sjohnlev #else 1787c478bd9Sstevel@tonic-gate int tsc_gethrtime_enable = 1; 179843e1988Sjohnlev #endif 1807c478bd9Sstevel@tonic-gate int tsc_gethrtime_initted = 0; 1817c478bd9Sstevel@tonic-gate 1827c478bd9Sstevel@tonic-gate /* 183843e1988Sjohnlev * True if the hrtime implementation is "hires"; namely, better than microdata. 184843e1988Sjohnlev */ 185843e1988Sjohnlev int gethrtime_hires = 0; 186843e1988Sjohnlev 187843e1988Sjohnlev /* 1887c478bd9Sstevel@tonic-gate * Local Static Data 1897c478bd9Sstevel@tonic-gate */ 1907c478bd9Sstevel@tonic-gate static struct psm_ops mach_ops; 1917c478bd9Sstevel@tonic-gate static struct psm_ops *mach_set[4] = {&mach_ops, NULL, NULL, NULL}; 1927c478bd9Sstevel@tonic-gate static ushort_t mach_ver[4] = {0, 0, 0, 0}; 1937c478bd9Sstevel@tonic-gate 1947c478bd9Sstevel@tonic-gate /* 195bb8220baSVikram Hegde * virtualization support for psm 196bb8220baSVikram Hegde */ 197bb8220baSVikram Hegde void *psm_vt_ops = NULL; 198bb8220baSVikram Hegde /* 199ae115bc7Smrj * If non-zero, idle cpus will become "halted" when there's 2007c478bd9Sstevel@tonic-gate * no work to do. 2017c478bd9Sstevel@tonic-gate */ 202ae115bc7Smrj int idle_cpu_use_hlt = 1; 2037c478bd9Sstevel@tonic-gate 204843e1988Sjohnlev #ifndef __xpv 205f98fbcecSbholler /* 206f98fbcecSbholler * If non-zero, idle cpus will use mwait if available to halt instead of hlt. 207f98fbcecSbholler */ 208f98fbcecSbholler int idle_cpu_prefer_mwait = 1; 2091d1a3942SBill Holler /* 2101d1a3942SBill Holler * Set to 0 to avoid MONITOR+CLFLUSH assertion. 2111d1a3942SBill Holler */ 2121d1a3942SBill Holler int idle_cpu_assert_cflush_monitor = 1; 2131d1a3942SBill Holler 2140e751525SEric Saxe /* 2150e751525SEric Saxe * If non-zero, idle cpus will not use power saving Deep C-States idle loop. 2160e751525SEric Saxe */ 2170e751525SEric Saxe int idle_cpu_no_deep_c = 0; 2180e751525SEric Saxe /* 2190e751525SEric Saxe * Non-power saving idle loop and wakeup pointers. 2200e751525SEric Saxe * Allows user to toggle Deep Idle power saving feature on/off. 2210e751525SEric Saxe */ 2220e751525SEric Saxe void (*non_deep_idle_cpu)() = cpu_idle; 2230e751525SEric Saxe void (*non_deep_idle_disp_enq_thread)(cpu_t *, int); 2240e751525SEric Saxe 2250e751525SEric Saxe /* 2260e751525SEric Saxe * Object for the kernel to access the HPET. 2270e751525SEric Saxe */ 2280e751525SEric Saxe hpet_t hpet; 2290e751525SEric Saxe 2300e751525SEric Saxe #endif /* ifndef __xpv */ 231fb2f18f8Sesaxe 2320542eecfSRafael Vanoni uint_t cp_haltset_fanout = 0; 2330542eecfSRafael Vanoni 2347c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 235fb2f18f8Sesaxe int 236fb2f18f8Sesaxe pg_plat_hw_shared(cpu_t *cp, pghw_type_t hw) 2377c478bd9Sstevel@tonic-gate { 238fb2f18f8Sesaxe switch (hw) { 239fb2f18f8Sesaxe case PGHW_IPIPE: 2407417cfdeSKuriakose Kuruvilla if (is_x86_feature(x86_featureset, X86FSET_HTT)) { 2417c478bd9Sstevel@tonic-gate /* 242fb2f18f8Sesaxe * Hyper-threading is SMT 2437c478bd9Sstevel@tonic-gate */ 244fb2f18f8Sesaxe return (1); 2458949bcd6Sandrei } else { 246fb2f18f8Sesaxe return (0); 247fb2f18f8Sesaxe } 248*7660e73fSHans Rosenfeld case PGHW_FPU: 249*7660e73fSHans Rosenfeld if (cpuid_get_cores_per_compunit(cp) > 1) 250*7660e73fSHans Rosenfeld return (1); 251*7660e73fSHans Rosenfeld else 252*7660e73fSHans Rosenfeld return (0); 2538031591dSSrihari Venkatesan case PGHW_PROCNODE: 2548031591dSSrihari Venkatesan if (cpuid_get_procnodes_per_pkg(cp) > 1) 2558031591dSSrihari Venkatesan return (1); 2568031591dSSrihari Venkatesan else 2578031591dSSrihari Venkatesan return (0); 258fb2f18f8Sesaxe case PGHW_CHIP: 2597417cfdeSKuriakose Kuruvilla if (is_x86_feature(x86_featureset, X86FSET_CMP) || 2607417cfdeSKuriakose Kuruvilla is_x86_feature(x86_featureset, X86FSET_HTT)) 261fb2f18f8Sesaxe return (1); 262fb2f18f8Sesaxe else 263fb2f18f8Sesaxe return (0); 264d129bde2Sesaxe case PGHW_CACHE: 265d129bde2Sesaxe if (cpuid_get_ncpu_sharing_last_cache(cp) > 1) 266d129bde2Sesaxe return (1); 267d129bde2Sesaxe else 268d129bde2Sesaxe return (0); 2690e751525SEric Saxe case PGHW_POW_ACTIVE: 2700e751525SEric Saxe if (cpupm_domain_id(cp, CPUPM_DTYPE_ACTIVE) != (id_t)-1) 2710e751525SEric Saxe return (1); 2720e751525SEric Saxe else 2730e751525SEric Saxe return (0); 2740e751525SEric Saxe case PGHW_POW_IDLE: 2750e751525SEric Saxe if (cpupm_domain_id(cp, CPUPM_DTYPE_IDLE) != (id_t)-1) 2760e751525SEric Saxe return (1); 2770e751525SEric Saxe else 2780e751525SEric Saxe return (0); 279fb2f18f8Sesaxe default: 280fb2f18f8Sesaxe return (0); 281fb2f18f8Sesaxe } 2828949bcd6Sandrei } 2837c478bd9Sstevel@tonic-gate 284fb2f18f8Sesaxe /* 285fb2f18f8Sesaxe * Compare two CPUs and see if they have a pghw_type_t sharing relationship 286fb2f18f8Sesaxe * If pghw_type_t is an unsupported hardware type, then return -1 287fb2f18f8Sesaxe */ 288fb2f18f8Sesaxe int 289fb2f18f8Sesaxe pg_plat_cpus_share(cpu_t *cpu_a, cpu_t *cpu_b, pghw_type_t hw) 290fb2f18f8Sesaxe { 291fb2f18f8Sesaxe id_t pgp_a, pgp_b; 292fb2f18f8Sesaxe 293fb2f18f8Sesaxe pgp_a = pg_plat_hw_instance_id(cpu_a, hw); 294fb2f18f8Sesaxe pgp_b = pg_plat_hw_instance_id(cpu_b, hw); 295fb2f18f8Sesaxe 296fb2f18f8Sesaxe if (pgp_a == -1 || pgp_b == -1) 297fb2f18f8Sesaxe return (-1); 298fb2f18f8Sesaxe 299fb2f18f8Sesaxe return (pgp_a == pgp_b); 300fb2f18f8Sesaxe } 301fb2f18f8Sesaxe 302fb2f18f8Sesaxe /* 303fb2f18f8Sesaxe * Return a physical instance identifier for known hardware sharing 304fb2f18f8Sesaxe * relationships 305fb2f18f8Sesaxe */ 306fb2f18f8Sesaxe id_t 307fb2f18f8Sesaxe pg_plat_hw_instance_id(cpu_t *cpu, pghw_type_t hw) 308fb2f18f8Sesaxe { 309fb2f18f8Sesaxe switch (hw) { 310fb2f18f8Sesaxe case PGHW_IPIPE: 311fb2f18f8Sesaxe return (cpuid_get_coreid(cpu)); 312d129bde2Sesaxe case PGHW_CACHE: 313d129bde2Sesaxe return (cpuid_get_last_lvl_cacheid(cpu)); 314*7660e73fSHans Rosenfeld case PGHW_FPU: 315*7660e73fSHans Rosenfeld return (cpuid_get_compunitid(cpu)); 3168031591dSSrihari Venkatesan case PGHW_PROCNODE: 3178031591dSSrihari Venkatesan return (cpuid_get_procnodeid(cpu)); 318fb2f18f8Sesaxe case PGHW_CHIP: 319fb2f18f8Sesaxe return (cpuid_get_chipid(cpu)); 3200e751525SEric Saxe case PGHW_POW_ACTIVE: 3210e751525SEric Saxe return (cpupm_domain_id(cpu, CPUPM_DTYPE_ACTIVE)); 3220e751525SEric Saxe case PGHW_POW_IDLE: 3230e751525SEric Saxe return (cpupm_domain_id(cpu, CPUPM_DTYPE_IDLE)); 324fb2f18f8Sesaxe default: 325fb2f18f8Sesaxe return (-1); 326fb2f18f8Sesaxe } 327fb2f18f8Sesaxe } 328fb2f18f8Sesaxe 3290e751525SEric Saxe /* 3300e751525SEric Saxe * Express preference for optimizing for sharing relationship 3310e751525SEric Saxe * hw1 vs hw2 3320e751525SEric Saxe */ 3330e751525SEric Saxe pghw_type_t 3340e751525SEric Saxe pg_plat_hw_rank(pghw_type_t hw1, pghw_type_t hw2) 335fb2f18f8Sesaxe { 3360e751525SEric Saxe int i, rank1, rank2; 3370e751525SEric Saxe 338fb2f18f8Sesaxe static pghw_type_t hw_hier[] = { 339fb2f18f8Sesaxe PGHW_IPIPE, 340d129bde2Sesaxe PGHW_CACHE, 341*7660e73fSHans Rosenfeld PGHW_FPU, 3428031591dSSrihari Venkatesan PGHW_PROCNODE, 343fb2f18f8Sesaxe PGHW_CHIP, 3440e751525SEric Saxe PGHW_POW_IDLE, 3450e751525SEric Saxe PGHW_POW_ACTIVE, 346fb2f18f8Sesaxe PGHW_NUM_COMPONENTS 347fb2f18f8Sesaxe }; 348fb2f18f8Sesaxe 349fb2f18f8Sesaxe for (i = 0; hw_hier[i] != PGHW_NUM_COMPONENTS; i++) { 3500e751525SEric Saxe if (hw_hier[i] == hw1) 3510e751525SEric Saxe rank1 = i; 3520e751525SEric Saxe if (hw_hier[i] == hw2) 3530e751525SEric Saxe rank2 = i; 354fb2f18f8Sesaxe } 3550e751525SEric Saxe 3560e751525SEric Saxe if (rank1 > rank2) 3570e751525SEric Saxe return (hw1); 3580e751525SEric Saxe else 3590e751525SEric Saxe return (hw2); 360fb2f18f8Sesaxe } 361fb2f18f8Sesaxe 362d129bde2Sesaxe /* 3630e751525SEric Saxe * Override the default CMT dispatcher policy for the specified 3640e751525SEric Saxe * hardware sharing relationship 365d129bde2Sesaxe */ 3660e751525SEric Saxe pg_cmt_policy_t 3670e751525SEric Saxe pg_plat_cmt_policy(pghw_type_t hw) 368d129bde2Sesaxe { 369d129bde2Sesaxe /* 3700e751525SEric Saxe * For shared caches, also load balance across them to 3710e751525SEric Saxe * maximize aggregate cache capacity 372*7660e73fSHans Rosenfeld * 373*7660e73fSHans Rosenfeld * On AMD family 0x15 CPUs, cores come in pairs called 374*7660e73fSHans Rosenfeld * compute units, sharing the FPU and the I$ and L2 375*7660e73fSHans Rosenfeld * caches. Use balancing and cache affinity. 376d129bde2Sesaxe */ 3770e751525SEric Saxe switch (hw) { 378*7660e73fSHans Rosenfeld case PGHW_FPU: 3790e751525SEric Saxe case PGHW_CACHE: 3800e751525SEric Saxe return (CMT_BALANCE|CMT_AFFINITY); 3810e751525SEric Saxe default: 3820e751525SEric Saxe return (CMT_NO_POLICY); 3830e751525SEric Saxe } 384d129bde2Sesaxe } 385d129bde2Sesaxe 386fb2f18f8Sesaxe id_t 387fb2f18f8Sesaxe pg_plat_get_core_id(cpu_t *cpu) 388fb2f18f8Sesaxe { 389fb2f18f8Sesaxe return ((id_t)cpuid_get_coreid(cpu)); 390fb2f18f8Sesaxe } 391fb2f18f8Sesaxe 392fb2f18f8Sesaxe void 393fb2f18f8Sesaxe cmp_set_nosteal_interval(void) 394fb2f18f8Sesaxe { 395fb2f18f8Sesaxe /* Set the nosteal interval (used by disp_getbest()) to 100us */ 396fb2f18f8Sesaxe nosteal_nsec = 100000UL; 3977c478bd9Sstevel@tonic-gate } 3987c478bd9Sstevel@tonic-gate 3997c478bd9Sstevel@tonic-gate /* 4007c478bd9Sstevel@tonic-gate * Routine to ensure initial callers to hrtime gets 0 as return 4017c478bd9Sstevel@tonic-gate */ 4027c478bd9Sstevel@tonic-gate static hrtime_t 4037c478bd9Sstevel@tonic-gate dummy_hrtime(void) 4047c478bd9Sstevel@tonic-gate { 4057c478bd9Sstevel@tonic-gate return (0); 4067c478bd9Sstevel@tonic-gate } 4077c478bd9Sstevel@tonic-gate 4087c478bd9Sstevel@tonic-gate /* ARGSUSED */ 4097c478bd9Sstevel@tonic-gate static void 4107c478bd9Sstevel@tonic-gate dummy_scalehrtime(hrtime_t *ticks) 4117c478bd9Sstevel@tonic-gate {} 4127c478bd9Sstevel@tonic-gate 413113b131bSEric Saxe static uint64_t 414113b131bSEric Saxe dummy_unscalehrtime(hrtime_t nsecs) 415113b131bSEric Saxe { 416113b131bSEric Saxe return ((uint64_t)nsecs); 417113b131bSEric Saxe } 418113b131bSEric Saxe 4197c478bd9Sstevel@tonic-gate /* 4200e751525SEric Saxe * Supports Deep C-State power saving idle loop. 4210e751525SEric Saxe */ 4220e751525SEric Saxe void 4230e751525SEric Saxe cpu_idle_adaptive(void) 4240e751525SEric Saxe { 4250e751525SEric Saxe (*CPU->cpu_m.mcpu_idle_cpu)(); 4260e751525SEric Saxe } 4270e751525SEric Saxe 428fb2caebeSRandy Fishel /* 429fb2caebeSRandy Fishel * Function called by CPU idle notification framework to check whether CPU 430fb2caebeSRandy Fishel * has been awakened. It will be called with interrupt disabled. 431fb2caebeSRandy Fishel * If CPU has been awakened, call cpu_idle_exit() to notify CPU idle 432fb2caebeSRandy Fishel * notification framework. 433fb2caebeSRandy Fishel */ 434fb2caebeSRandy Fishel /*ARGSUSED*/ 435fb2caebeSRandy Fishel static void 436fb2caebeSRandy Fishel cpu_idle_check_wakeup(void *arg) 4370e751525SEric Saxe { 438fb2caebeSRandy Fishel /* 439fb2caebeSRandy Fishel * Toggle interrupt flag to detect pending interrupts. 440fb2caebeSRandy Fishel * If interrupt happened, do_interrupt() will notify CPU idle 441fb2caebeSRandy Fishel * notification framework so no need to call cpu_idle_exit() here. 442fb2caebeSRandy Fishel */ 443fb2caebeSRandy Fishel sti(); 444fb2caebeSRandy Fishel SMT_PAUSE(); 445fb2caebeSRandy Fishel cli(); 4460e751525SEric Saxe } 4470e751525SEric Saxe 4480e751525SEric Saxe /* 449fb2caebeSRandy Fishel * Idle the present CPU until wakened via an interrupt 4507c478bd9Sstevel@tonic-gate */ 4510e751525SEric Saxe void 452ae115bc7Smrj cpu_idle(void) 4537c478bd9Sstevel@tonic-gate { 4547c478bd9Sstevel@tonic-gate cpu_t *cpup = CPU; 4556890d023SEric Saxe processorid_t cpu_sid = cpup->cpu_seqid; 456f1f2d3ffSesaxe cpupart_t *cp = cpup->cpu_part; 4577c478bd9Sstevel@tonic-gate int hset_update = 1; 4587c478bd9Sstevel@tonic-gate 4597c478bd9Sstevel@tonic-gate /* 4607c478bd9Sstevel@tonic-gate * If this CPU is online, and there's multiple CPUs 4617c478bd9Sstevel@tonic-gate * in the system, then we should notate our halting 4627c478bd9Sstevel@tonic-gate * by adding ourselves to the partition's halted CPU 4637c478bd9Sstevel@tonic-gate * bitmap. This allows other CPUs to find/awaken us when 4647c478bd9Sstevel@tonic-gate * work becomes available. 4657c478bd9Sstevel@tonic-gate */ 4667c478bd9Sstevel@tonic-gate if (cpup->cpu_flags & CPU_OFFLINE || ncpus == 1) 4677c478bd9Sstevel@tonic-gate hset_update = 0; 4687c478bd9Sstevel@tonic-gate 4697c478bd9Sstevel@tonic-gate /* 4706890d023SEric Saxe * Add ourselves to the partition's halted CPUs bitmap 4717c478bd9Sstevel@tonic-gate * and set our HALTED flag, if necessary. 4727c478bd9Sstevel@tonic-gate * 473f1f2d3ffSesaxe * When a thread becomes runnable, it is placed on the queue 4746890d023SEric Saxe * and then the halted CPU bitmap is checked to determine who 475fb2caebeSRandy Fishel * (if anyone) should be awakened. We therefore need to first 4766890d023SEric Saxe * add ourselves to the bitmap, and and then check if there 4776890d023SEric Saxe * is any work available. The order is important to prevent a race 4786890d023SEric Saxe * that can lead to work languishing on a run queue somewhere while 4796890d023SEric Saxe * this CPU remains halted. 4806890d023SEric Saxe * 4816890d023SEric Saxe * Either the producing CPU will see we're halted and will awaken us, 4826890d023SEric Saxe * or this CPU will see the work available in disp_anywork(). 483f1f2d3ffSesaxe * 4847c478bd9Sstevel@tonic-gate * Note that memory barriers after updating the HALTED flag 4856890d023SEric Saxe * are not necessary since an atomic operation (updating the bitset) 4867c478bd9Sstevel@tonic-gate * immediately follows. On x86 the atomic operation acts as a 4877c478bd9Sstevel@tonic-gate * memory barrier for the update of cpu_disp_flags. 4887c478bd9Sstevel@tonic-gate */ 4897c478bd9Sstevel@tonic-gate if (hset_update) { 4907c478bd9Sstevel@tonic-gate cpup->cpu_disp_flags |= CPU_DISP_HALTED; 4916890d023SEric Saxe bitset_atomic_add(&cp->cp_haltset, cpu_sid); 4927c478bd9Sstevel@tonic-gate } 4937c478bd9Sstevel@tonic-gate 4947c478bd9Sstevel@tonic-gate /* 4957c478bd9Sstevel@tonic-gate * Check to make sure there's really nothing to do. 496f1f2d3ffSesaxe * Work destined for this CPU may become available after 497f1f2d3ffSesaxe * this check. We'll be notified through the clearing of our 4986890d023SEric Saxe * bit in the halted CPU bitmap, and a poke. 4997c478bd9Sstevel@tonic-gate */ 5007c478bd9Sstevel@tonic-gate if (disp_anywork()) { 5017c478bd9Sstevel@tonic-gate if (hset_update) { 5027c478bd9Sstevel@tonic-gate cpup->cpu_disp_flags &= ~CPU_DISP_HALTED; 5036890d023SEric Saxe bitset_atomic_del(&cp->cp_haltset, cpu_sid); 5047c478bd9Sstevel@tonic-gate } 505f1f2d3ffSesaxe return; 506f1f2d3ffSesaxe } 507f1f2d3ffSesaxe 508f1f2d3ffSesaxe /* 509f1f2d3ffSesaxe * We're on our way to being halted. 510f1f2d3ffSesaxe * 511f1f2d3ffSesaxe * Disable interrupts now, so that we'll awaken immediately 512f1f2d3ffSesaxe * after halting if someone tries to poke us between now and 513f1f2d3ffSesaxe * the time we actually halt. 514f1f2d3ffSesaxe * 515f1f2d3ffSesaxe * We check for the presence of our bit after disabling interrupts. 516f1f2d3ffSesaxe * If it's cleared, we'll return. If the bit is cleared after 517f1f2d3ffSesaxe * we check then the poke will pop us out of the halted state. 518f1f2d3ffSesaxe * 519f1f2d3ffSesaxe * This means that the ordering of the poke and the clearing 520f1f2d3ffSesaxe * of the bit by cpu_wakeup is important. 521f1f2d3ffSesaxe * cpu_wakeup() must clear, then poke. 522ae115bc7Smrj * cpu_idle() must disable interrupts, then check for the bit. 523f1f2d3ffSesaxe */ 524f1f2d3ffSesaxe cli(); 525f1f2d3ffSesaxe 5266890d023SEric Saxe if (hset_update && bitset_in_set(&cp->cp_haltset, cpu_sid) == 0) { 527f1f2d3ffSesaxe cpup->cpu_disp_flags &= ~CPU_DISP_HALTED; 528f1f2d3ffSesaxe sti(); 529f1f2d3ffSesaxe return; 530f1f2d3ffSesaxe } 531f1f2d3ffSesaxe 532f1f2d3ffSesaxe /* 533f1f2d3ffSesaxe * The check for anything locally runnable is here for performance 534f1f2d3ffSesaxe * and isn't needed for correctness. disp_nrunnable ought to be 535f1f2d3ffSesaxe * in our cache still, so it's inexpensive to check, and if there 536f1f2d3ffSesaxe * is anything runnable we won't have to wait for the poke. 537f1f2d3ffSesaxe */ 538f1f2d3ffSesaxe if (cpup->cpu_disp->disp_nrunnable != 0) { 539f1f2d3ffSesaxe if (hset_update) { 540f1f2d3ffSesaxe cpup->cpu_disp_flags &= ~CPU_DISP_HALTED; 5416890d023SEric Saxe bitset_atomic_del(&cp->cp_haltset, cpu_sid); 542f1f2d3ffSesaxe } 5437c478bd9Sstevel@tonic-gate sti(); 5447c478bd9Sstevel@tonic-gate return; 5457c478bd9Sstevel@tonic-gate } 5467c478bd9Sstevel@tonic-gate 547fb2caebeSRandy Fishel if (cpu_idle_enter(IDLE_STATE_C1, 0, 548fb2caebeSRandy Fishel cpu_idle_check_wakeup, NULL) == 0) { 549ae115bc7Smrj mach_cpu_idle(); 550fb2caebeSRandy Fishel cpu_idle_exit(CPU_IDLE_CB_FLAG_IDLE); 551fb2caebeSRandy Fishel } 55259870565Srv207048 5537c478bd9Sstevel@tonic-gate /* 5547c478bd9Sstevel@tonic-gate * We're no longer halted 5557c478bd9Sstevel@tonic-gate */ 5567c478bd9Sstevel@tonic-gate if (hset_update) { 5577c478bd9Sstevel@tonic-gate cpup->cpu_disp_flags &= ~CPU_DISP_HALTED; 5586890d023SEric Saxe bitset_atomic_del(&cp->cp_haltset, cpu_sid); 5597c478bd9Sstevel@tonic-gate } 5607c478bd9Sstevel@tonic-gate } 5617c478bd9Sstevel@tonic-gate 5627c478bd9Sstevel@tonic-gate 5637c478bd9Sstevel@tonic-gate /* 5647c478bd9Sstevel@tonic-gate * If "cpu" is halted, then wake it up clearing its halted bit in advance. 5657c478bd9Sstevel@tonic-gate * Otherwise, see if other CPUs in the cpu partition are halted and need to 5667c478bd9Sstevel@tonic-gate * be woken up so that they can steal the thread we placed on this CPU. 5677c478bd9Sstevel@tonic-gate * This function is only used on MP systems. 5687c478bd9Sstevel@tonic-gate */ 5697c478bd9Sstevel@tonic-gate static void 5707c478bd9Sstevel@tonic-gate cpu_wakeup(cpu_t *cpu, int bound) 5717c478bd9Sstevel@tonic-gate { 5727c478bd9Sstevel@tonic-gate uint_t cpu_found; 5736890d023SEric Saxe processorid_t cpu_sid; 5747c478bd9Sstevel@tonic-gate cpupart_t *cp; 5757c478bd9Sstevel@tonic-gate 5767c478bd9Sstevel@tonic-gate cp = cpu->cpu_part; 5776890d023SEric Saxe cpu_sid = cpu->cpu_seqid; 5786890d023SEric Saxe if (bitset_in_set(&cp->cp_haltset, cpu_sid)) { 5797c478bd9Sstevel@tonic-gate /* 5807c478bd9Sstevel@tonic-gate * Clear the halted bit for that CPU since it will be 5817c478bd9Sstevel@tonic-gate * poked in a moment. 5827c478bd9Sstevel@tonic-gate */ 5836890d023SEric Saxe bitset_atomic_del(&cp->cp_haltset, cpu_sid); 5847c478bd9Sstevel@tonic-gate /* 5857c478bd9Sstevel@tonic-gate * We may find the current CPU present in the halted cpuset 5867c478bd9Sstevel@tonic-gate * if we're in the context of an interrupt that occurred 587ae115bc7Smrj * before we had a chance to clear our bit in cpu_idle(). 5887c478bd9Sstevel@tonic-gate * Poking ourself is obviously unnecessary, since if 5897c478bd9Sstevel@tonic-gate * we're here, we're not halted. 5907c478bd9Sstevel@tonic-gate */ 5917c478bd9Sstevel@tonic-gate if (cpu != CPU) 5927c478bd9Sstevel@tonic-gate poke_cpu(cpu->cpu_id); 5937c478bd9Sstevel@tonic-gate return; 5947c478bd9Sstevel@tonic-gate } else { 5957c478bd9Sstevel@tonic-gate /* 5967c478bd9Sstevel@tonic-gate * This cpu isn't halted, but it's idle or undergoing a 5977c478bd9Sstevel@tonic-gate * context switch. No need to awaken anyone else. 5987c478bd9Sstevel@tonic-gate */ 5997c478bd9Sstevel@tonic-gate if (cpu->cpu_thread == cpu->cpu_idle_thread || 6007c478bd9Sstevel@tonic-gate cpu->cpu_disp_flags & CPU_DISP_DONTSTEAL) 6017c478bd9Sstevel@tonic-gate return; 6027c478bd9Sstevel@tonic-gate } 6037c478bd9Sstevel@tonic-gate 6047c478bd9Sstevel@tonic-gate /* 6056890d023SEric Saxe * No need to wake up other CPUs if this is for a bound thread. 6067c478bd9Sstevel@tonic-gate */ 6077c478bd9Sstevel@tonic-gate if (bound) 6087c478bd9Sstevel@tonic-gate return; 6097c478bd9Sstevel@tonic-gate 6107c478bd9Sstevel@tonic-gate /* 6116890d023SEric Saxe * The CPU specified for wakeup isn't currently halted, so check 6126890d023SEric Saxe * to see if there are any other halted CPUs in the partition, 6136890d023SEric Saxe * and if there are then awaken one. 6147c478bd9Sstevel@tonic-gate */ 6157c478bd9Sstevel@tonic-gate do { 6166890d023SEric Saxe cpu_found = bitset_find(&cp->cp_haltset); 6176890d023SEric Saxe if (cpu_found == (uint_t)-1) 6187c478bd9Sstevel@tonic-gate return; 6196890d023SEric Saxe } while (bitset_atomic_test_and_del(&cp->cp_haltset, cpu_found) < 0); 6207c478bd9Sstevel@tonic-gate 6216890d023SEric Saxe if (cpu_found != CPU->cpu_seqid) { 6226890d023SEric Saxe poke_cpu(cpu_seq[cpu_found]->cpu_id); 6236890d023SEric Saxe } 6247c478bd9Sstevel@tonic-gate } 6257c478bd9Sstevel@tonic-gate 626843e1988Sjohnlev #ifndef __xpv 627f98fbcecSbholler /* 628fb2caebeSRandy Fishel * Function called by CPU idle notification framework to check whether CPU 629fb2caebeSRandy Fishel * has been awakened. It will be called with interrupt disabled. 630fb2caebeSRandy Fishel * If CPU has been awakened, call cpu_idle_exit() to notify CPU idle 631fb2caebeSRandy Fishel * notification framework. 632fb2caebeSRandy Fishel */ 633fb2caebeSRandy Fishel static void 634fb2caebeSRandy Fishel cpu_idle_mwait_check_wakeup(void *arg) 635fb2caebeSRandy Fishel { 636fb2caebeSRandy Fishel volatile uint32_t *mcpu_mwait = (volatile uint32_t *)arg; 637fb2caebeSRandy Fishel 638fb2caebeSRandy Fishel ASSERT(arg != NULL); 639fb2caebeSRandy Fishel if (*mcpu_mwait != MWAIT_HALTED) { 640fb2caebeSRandy Fishel /* 641fb2caebeSRandy Fishel * CPU has been awakened, notify CPU idle notification system. 642fb2caebeSRandy Fishel */ 643fb2caebeSRandy Fishel cpu_idle_exit(CPU_IDLE_CB_FLAG_IDLE); 644fb2caebeSRandy Fishel } else { 645fb2caebeSRandy Fishel /* 646fb2caebeSRandy Fishel * Toggle interrupt flag to detect pending interrupts. 647fb2caebeSRandy Fishel * If interrupt happened, do_interrupt() will notify CPU idle 648fb2caebeSRandy Fishel * notification framework so no need to call cpu_idle_exit() 649fb2caebeSRandy Fishel * here. 650fb2caebeSRandy Fishel */ 651fb2caebeSRandy Fishel sti(); 652fb2caebeSRandy Fishel SMT_PAUSE(); 653fb2caebeSRandy Fishel cli(); 654fb2caebeSRandy Fishel } 655fb2caebeSRandy Fishel } 656fb2caebeSRandy Fishel 657fb2caebeSRandy Fishel /* 658fb2caebeSRandy Fishel * Idle the present CPU until awakened via touching its monitored line 659f98fbcecSbholler */ 6600e751525SEric Saxe void 661f98fbcecSbholler cpu_idle_mwait(void) 662f98fbcecSbholler { 663f98fbcecSbholler volatile uint32_t *mcpu_mwait = CPU->cpu_m.mcpu_mwait; 664f98fbcecSbholler cpu_t *cpup = CPU; 6656890d023SEric Saxe processorid_t cpu_sid = cpup->cpu_seqid; 666f98fbcecSbholler cpupart_t *cp = cpup->cpu_part; 667f98fbcecSbholler int hset_update = 1; 668f98fbcecSbholler 669f98fbcecSbholler /* 6700e751525SEric Saxe * Set our mcpu_mwait here, so we can tell if anyone tries to 671f98fbcecSbholler * wake us between now and when we call mwait. No other cpu will 6726890d023SEric Saxe * attempt to set our mcpu_mwait until we add ourself to the halted 6736890d023SEric Saxe * CPU bitmap. 674f98fbcecSbholler */ 675f98fbcecSbholler *mcpu_mwait = MWAIT_HALTED; 676f98fbcecSbholler 677f98fbcecSbholler /* 678f98fbcecSbholler * If this CPU is online, and there's multiple CPUs 6790e751525SEric Saxe * in the system, then we should note our halting 680f98fbcecSbholler * by adding ourselves to the partition's halted CPU 681f98fbcecSbholler * bitmap. This allows other CPUs to find/awaken us when 682f98fbcecSbholler * work becomes available. 683f98fbcecSbholler */ 684f98fbcecSbholler if (cpup->cpu_flags & CPU_OFFLINE || ncpus == 1) 685f98fbcecSbholler hset_update = 0; 686f98fbcecSbholler 687f98fbcecSbholler /* 6886890d023SEric Saxe * Add ourselves to the partition's halted CPUs bitmap 689f98fbcecSbholler * and set our HALTED flag, if necessary. 690f98fbcecSbholler * 691f98fbcecSbholler * When a thread becomes runnable, it is placed on the queue 6926890d023SEric Saxe * and then the halted CPU bitmap is checked to determine who 6930e751525SEric Saxe * (if anyone) should be awakened. We therefore need to first 6946890d023SEric Saxe * add ourselves to the bitmap, and and then check if there 695f98fbcecSbholler * is any work available. 696f98fbcecSbholler * 697f98fbcecSbholler * Note that memory barriers after updating the HALTED flag 698f98fbcecSbholler * are not necessary since an atomic operation (updating the bitmap) 699f98fbcecSbholler * immediately follows. On x86 the atomic operation acts as a 700f98fbcecSbholler * memory barrier for the update of cpu_disp_flags. 701f98fbcecSbholler */ 702f98fbcecSbholler if (hset_update) { 703f98fbcecSbholler cpup->cpu_disp_flags |= CPU_DISP_HALTED; 7046890d023SEric Saxe bitset_atomic_add(&cp->cp_haltset, cpu_sid); 705f98fbcecSbholler } 706f98fbcecSbholler 707f98fbcecSbholler /* 708f98fbcecSbholler * Check to make sure there's really nothing to do. 709f98fbcecSbholler * Work destined for this CPU may become available after 710f98fbcecSbholler * this check. We'll be notified through the clearing of our 7116890d023SEric Saxe * bit in the halted CPU bitmap, and a write to our mcpu_mwait. 712f98fbcecSbholler * 713f98fbcecSbholler * disp_anywork() checks disp_nrunnable, so we do not have to later. 714f98fbcecSbholler */ 715f98fbcecSbholler if (disp_anywork()) { 716f98fbcecSbholler if (hset_update) { 717f98fbcecSbholler cpup->cpu_disp_flags &= ~CPU_DISP_HALTED; 7186890d023SEric Saxe bitset_atomic_del(&cp->cp_haltset, cpu_sid); 719f98fbcecSbholler } 720f98fbcecSbholler return; 721f98fbcecSbholler } 722f98fbcecSbholler 723f98fbcecSbholler /* 724f98fbcecSbholler * We're on our way to being halted. 725f98fbcecSbholler * To avoid a lost wakeup, arm the monitor before checking if another 726f98fbcecSbholler * cpu wrote to mcpu_mwait to wake us up. 727f98fbcecSbholler */ 728f98fbcecSbholler i86_monitor(mcpu_mwait, 0, 0); 729f98fbcecSbholler if (*mcpu_mwait == MWAIT_HALTED) { 730fb2caebeSRandy Fishel if (cpu_idle_enter(IDLE_STATE_C1, 0, 731fb2caebeSRandy Fishel cpu_idle_mwait_check_wakeup, (void *)mcpu_mwait) == 0) { 732fb2caebeSRandy Fishel if (*mcpu_mwait == MWAIT_HALTED) { 733f98fbcecSbholler i86_mwait(0, 0); 734fb2caebeSRandy Fishel } 735fb2caebeSRandy Fishel cpu_idle_exit(CPU_IDLE_CB_FLAG_IDLE); 736fb2caebeSRandy Fishel } 737f98fbcecSbholler } 738f98fbcecSbholler 739f98fbcecSbholler /* 740f98fbcecSbholler * We're no longer halted 741f98fbcecSbholler */ 742f98fbcecSbholler if (hset_update) { 743f98fbcecSbholler cpup->cpu_disp_flags &= ~CPU_DISP_HALTED; 7446890d023SEric Saxe bitset_atomic_del(&cp->cp_haltset, cpu_sid); 745f98fbcecSbholler } 746f98fbcecSbholler } 747f98fbcecSbholler 748f98fbcecSbholler /* 749f98fbcecSbholler * If "cpu" is halted in mwait, then wake it up clearing its halted bit in 750f98fbcecSbholler * advance. Otherwise, see if other CPUs in the cpu partition are halted and 751f98fbcecSbholler * need to be woken up so that they can steal the thread we placed on this CPU. 752f98fbcecSbholler * This function is only used on MP systems. 753f98fbcecSbholler */ 754f98fbcecSbholler static void 755f98fbcecSbholler cpu_wakeup_mwait(cpu_t *cp, int bound) 756f98fbcecSbholler { 757f98fbcecSbholler cpupart_t *cpu_part; 758f98fbcecSbholler uint_t cpu_found; 7596890d023SEric Saxe processorid_t cpu_sid; 760f98fbcecSbholler 761f98fbcecSbholler cpu_part = cp->cpu_part; 7626890d023SEric Saxe cpu_sid = cp->cpu_seqid; 763f98fbcecSbholler 764f98fbcecSbholler /* 765f98fbcecSbholler * Clear the halted bit for that CPU since it will be woken up 766f98fbcecSbholler * in a moment. 767f98fbcecSbholler */ 7686890d023SEric Saxe if (bitset_in_set(&cpu_part->cp_haltset, cpu_sid)) { 769f98fbcecSbholler /* 770f98fbcecSbholler * Clear the halted bit for that CPU since it will be 771f98fbcecSbholler * poked in a moment. 772f98fbcecSbholler */ 7736890d023SEric Saxe bitset_atomic_del(&cpu_part->cp_haltset, cpu_sid); 774f98fbcecSbholler /* 775f98fbcecSbholler * We may find the current CPU present in the halted cpuset 776f98fbcecSbholler * if we're in the context of an interrupt that occurred 777f98fbcecSbholler * before we had a chance to clear our bit in cpu_idle(). 778f98fbcecSbholler * Waking ourself is obviously unnecessary, since if 779f98fbcecSbholler * we're here, we're not halted. 780f98fbcecSbholler * 781f98fbcecSbholler * monitor/mwait wakeup via writing to our cache line is 782f98fbcecSbholler * harmless and less expensive than always checking if we 783f98fbcecSbholler * are waking ourself which is an uncommon case. 784f98fbcecSbholler */ 785f98fbcecSbholler MWAIT_WAKEUP(cp); /* write to monitored line */ 786f98fbcecSbholler return; 787f98fbcecSbholler } else { 788f98fbcecSbholler /* 789f98fbcecSbholler * This cpu isn't halted, but it's idle or undergoing a 790f98fbcecSbholler * context switch. No need to awaken anyone else. 791f98fbcecSbholler */ 792f98fbcecSbholler if (cp->cpu_thread == cp->cpu_idle_thread || 793f98fbcecSbholler cp->cpu_disp_flags & CPU_DISP_DONTSTEAL) 794f98fbcecSbholler return; 795f98fbcecSbholler } 796f98fbcecSbholler 797f98fbcecSbholler /* 798f98fbcecSbholler * No need to wake up other CPUs if the thread we just enqueued 799f98fbcecSbholler * is bound. 800f98fbcecSbholler */ 8016890d023SEric Saxe if (bound || ncpus == 1) 802f98fbcecSbholler return; 803f98fbcecSbholler 804f98fbcecSbholler /* 805f98fbcecSbholler * See if there's any other halted CPUs. If there are, then 806f98fbcecSbholler * select one, and awaken it. 807f98fbcecSbholler * It's possible that after we find a CPU, somebody else 808f98fbcecSbholler * will awaken it before we get the chance. 809f98fbcecSbholler * In that case, look again. 810f98fbcecSbholler */ 811f98fbcecSbholler do { 8126890d023SEric Saxe cpu_found = bitset_find(&cpu_part->cp_haltset); 8136890d023SEric Saxe if (cpu_found == (uint_t)-1) 814f98fbcecSbholler return; 8156890d023SEric Saxe } while (bitset_atomic_test_and_del(&cpu_part->cp_haltset, 8166890d023SEric Saxe cpu_found) < 0); 817f98fbcecSbholler 818f98fbcecSbholler /* 8196890d023SEric Saxe * Do not check if cpu_found is ourself as monitor/mwait 8206890d023SEric Saxe * wakeup is cheap. 821f98fbcecSbholler */ 8226890d023SEric Saxe MWAIT_WAKEUP(cpu_seq[cpu_found]); /* write to monitored line */ 823f98fbcecSbholler } 8246890d023SEric Saxe 825843e1988Sjohnlev #endif 826f98fbcecSbholler 827ae115bc7Smrj void (*cpu_pause_handler)(volatile char *) = NULL; 828ae115bc7Smrj 8297c478bd9Sstevel@tonic-gate static int 8307c478bd9Sstevel@tonic-gate mp_disable_intr(int cpun) 8317c478bd9Sstevel@tonic-gate { 8327c478bd9Sstevel@tonic-gate /* 8337c478bd9Sstevel@tonic-gate * switch to the offline cpu 8347c478bd9Sstevel@tonic-gate */ 8357c478bd9Sstevel@tonic-gate affinity_set(cpun); 8367c478bd9Sstevel@tonic-gate /* 8377c478bd9Sstevel@tonic-gate * raise ipl to just below cross call 8387c478bd9Sstevel@tonic-gate */ 839f34a7178SJoe Bonasera splx(XC_SYS_PIL - 1); 8407c478bd9Sstevel@tonic-gate /* 8417c478bd9Sstevel@tonic-gate * set base spl to prevent the next swtch to idle from 8427c478bd9Sstevel@tonic-gate * lowering back to ipl 0 8437c478bd9Sstevel@tonic-gate */ 844f34a7178SJoe Bonasera CPU->cpu_intr_actv |= (1 << (XC_SYS_PIL - 1)); 8457c478bd9Sstevel@tonic-gate set_base_spl(); 8467c478bd9Sstevel@tonic-gate affinity_clear(); 8477c478bd9Sstevel@tonic-gate return (DDI_SUCCESS); 8487c478bd9Sstevel@tonic-gate } 8497c478bd9Sstevel@tonic-gate 8507c478bd9Sstevel@tonic-gate static void 8517c478bd9Sstevel@tonic-gate mp_enable_intr(int cpun) 8527c478bd9Sstevel@tonic-gate { 8537c478bd9Sstevel@tonic-gate /* 8547c478bd9Sstevel@tonic-gate * switch to the online cpu 8557c478bd9Sstevel@tonic-gate */ 8567c478bd9Sstevel@tonic-gate affinity_set(cpun); 8577c478bd9Sstevel@tonic-gate /* 8587c478bd9Sstevel@tonic-gate * clear the interrupt active mask 8597c478bd9Sstevel@tonic-gate */ 860f34a7178SJoe Bonasera CPU->cpu_intr_actv &= ~(1 << (XC_SYS_PIL - 1)); 8617c478bd9Sstevel@tonic-gate set_base_spl(); 8627c478bd9Sstevel@tonic-gate (void) spl0(); 8637c478bd9Sstevel@tonic-gate affinity_clear(); 8647c478bd9Sstevel@tonic-gate } 8657c478bd9Sstevel@tonic-gate 8667c478bd9Sstevel@tonic-gate static void 8677c478bd9Sstevel@tonic-gate mach_get_platform(int owner) 8687c478bd9Sstevel@tonic-gate { 8697c478bd9Sstevel@tonic-gate void **srv_opsp; 8707c478bd9Sstevel@tonic-gate void **clt_opsp; 8717c478bd9Sstevel@tonic-gate int i; 8727c478bd9Sstevel@tonic-gate int total_ops; 8737c478bd9Sstevel@tonic-gate 8747c478bd9Sstevel@tonic-gate /* fix up psm ops */ 8757c478bd9Sstevel@tonic-gate srv_opsp = (void **)mach_set[0]; 8767c478bd9Sstevel@tonic-gate clt_opsp = (void **)mach_set[owner]; 8777c478bd9Sstevel@tonic-gate if (mach_ver[owner] == (ushort_t)PSM_INFO_VER01) 8787c478bd9Sstevel@tonic-gate total_ops = sizeof (struct psm_ops_ver01) / 8797c478bd9Sstevel@tonic-gate sizeof (void (*)(void)); 8807c478bd9Sstevel@tonic-gate else if (mach_ver[owner] == (ushort_t)PSM_INFO_VER01_1) 8817c478bd9Sstevel@tonic-gate /* no psm_notify_func */ 8827c478bd9Sstevel@tonic-gate total_ops = OFFSETOF(struct psm_ops, psm_notify_func) / 8837c478bd9Sstevel@tonic-gate sizeof (void (*)(void)); 8847c478bd9Sstevel@tonic-gate else if (mach_ver[owner] == (ushort_t)PSM_INFO_VER01_2) 8857c478bd9Sstevel@tonic-gate /* no psm_timer funcs */ 8867c478bd9Sstevel@tonic-gate total_ops = OFFSETOF(struct psm_ops, psm_timer_reprogram) / 8877c478bd9Sstevel@tonic-gate sizeof (void (*)(void)); 8887c478bd9Sstevel@tonic-gate else if (mach_ver[owner] == (ushort_t)PSM_INFO_VER01_3) 8897c478bd9Sstevel@tonic-gate /* no psm_preshutdown function */ 8907c478bd9Sstevel@tonic-gate total_ops = OFFSETOF(struct psm_ops, psm_preshutdown) / 8917c478bd9Sstevel@tonic-gate sizeof (void (*)(void)); 8927c478bd9Sstevel@tonic-gate else if (mach_ver[owner] == (ushort_t)PSM_INFO_VER01_4) 893a3114836SGerry Liu /* no psm_intr_ops function */ 8947c478bd9Sstevel@tonic-gate total_ops = OFFSETOF(struct psm_ops, psm_intr_ops) / 8957c478bd9Sstevel@tonic-gate sizeof (void (*)(void)); 896a3114836SGerry Liu else if (mach_ver[owner] == (ushort_t)PSM_INFO_VER01_5) 897a3114836SGerry Liu /* no psm_state function */ 898a3114836SGerry Liu total_ops = OFFSETOF(struct psm_ops, psm_state) / 899a3114836SGerry Liu sizeof (void (*)(void)); 900a3114836SGerry Liu else if (mach_ver[owner] == (ushort_t)PSM_INFO_VER01_6) 901a3114836SGerry Liu /* no psm_cpu_ops function */ 902a3114836SGerry Liu total_ops = OFFSETOF(struct psm_ops, psm_cpu_ops) / 903a3114836SGerry Liu sizeof (void (*)(void)); 9047c478bd9Sstevel@tonic-gate else 9057c478bd9Sstevel@tonic-gate total_ops = sizeof (struct psm_ops) / sizeof (void (*)(void)); 9067c478bd9Sstevel@tonic-gate 9077c478bd9Sstevel@tonic-gate /* 9087c478bd9Sstevel@tonic-gate * Save the version of the PSM module, in case we need to 909fb2caebeSRandy Fishel * behave differently based on version. 9107c478bd9Sstevel@tonic-gate */ 9117c478bd9Sstevel@tonic-gate mach_ver[0] = mach_ver[owner]; 9127c478bd9Sstevel@tonic-gate 9137c478bd9Sstevel@tonic-gate for (i = 0; i < total_ops; i++) 9147c478bd9Sstevel@tonic-gate if (clt_opsp[i] != NULL) 9157c478bd9Sstevel@tonic-gate srv_opsp[i] = clt_opsp[i]; 9167c478bd9Sstevel@tonic-gate } 9177c478bd9Sstevel@tonic-gate 9187c478bd9Sstevel@tonic-gate static void 9197c478bd9Sstevel@tonic-gate mach_construct_info() 9207c478bd9Sstevel@tonic-gate { 921ae115bc7Smrj struct psm_sw *swp; 9227c478bd9Sstevel@tonic-gate int mach_cnt[PSM_OWN_OVERRIDE+1] = {0}; 9237c478bd9Sstevel@tonic-gate int conflict_owner = 0; 9247c478bd9Sstevel@tonic-gate 9257c478bd9Sstevel@tonic-gate if (psmsw->psw_forw == psmsw) 9267c478bd9Sstevel@tonic-gate panic("No valid PSM modules found"); 9277c478bd9Sstevel@tonic-gate mutex_enter(&psmsw_lock); 9287c478bd9Sstevel@tonic-gate for (swp = psmsw->psw_forw; swp != psmsw; swp = swp->psw_forw) { 9297c478bd9Sstevel@tonic-gate if (!(swp->psw_flag & PSM_MOD_IDENTIFY)) 9307c478bd9Sstevel@tonic-gate continue; 9317c478bd9Sstevel@tonic-gate mach_set[swp->psw_infop->p_owner] = swp->psw_infop->p_ops; 9327c478bd9Sstevel@tonic-gate mach_ver[swp->psw_infop->p_owner] = swp->psw_infop->p_version; 9337c478bd9Sstevel@tonic-gate mach_cnt[swp->psw_infop->p_owner]++; 9347c478bd9Sstevel@tonic-gate } 9357c478bd9Sstevel@tonic-gate mutex_exit(&psmsw_lock); 9367c478bd9Sstevel@tonic-gate 9377c478bd9Sstevel@tonic-gate mach_get_platform(PSM_OWN_SYS_DEFAULT); 9387c478bd9Sstevel@tonic-gate 9397c478bd9Sstevel@tonic-gate /* check to see are there any conflicts */ 9407c478bd9Sstevel@tonic-gate if (mach_cnt[PSM_OWN_EXCLUSIVE] > 1) 9417c478bd9Sstevel@tonic-gate conflict_owner = PSM_OWN_EXCLUSIVE; 9427c478bd9Sstevel@tonic-gate if (mach_cnt[PSM_OWN_OVERRIDE] > 1) 9437c478bd9Sstevel@tonic-gate conflict_owner = PSM_OWN_OVERRIDE; 9447c478bd9Sstevel@tonic-gate if (conflict_owner) { 9457c478bd9Sstevel@tonic-gate /* remove all psm modules except uppc */ 9467c478bd9Sstevel@tonic-gate cmn_err(CE_WARN, 9477c478bd9Sstevel@tonic-gate "Conflicts detected on the following PSM modules:"); 9487c478bd9Sstevel@tonic-gate mutex_enter(&psmsw_lock); 9497c478bd9Sstevel@tonic-gate for (swp = psmsw->psw_forw; swp != psmsw; swp = swp->psw_forw) { 9507c478bd9Sstevel@tonic-gate if (swp->psw_infop->p_owner == conflict_owner) 9517c478bd9Sstevel@tonic-gate cmn_err(CE_WARN, "%s ", 9527c478bd9Sstevel@tonic-gate swp->psw_infop->p_mach_idstring); 9537c478bd9Sstevel@tonic-gate } 9547c478bd9Sstevel@tonic-gate mutex_exit(&psmsw_lock); 9557c478bd9Sstevel@tonic-gate cmn_err(CE_WARN, 9567c478bd9Sstevel@tonic-gate "Setting the system back to SINGLE processor mode!"); 9577c478bd9Sstevel@tonic-gate cmn_err(CE_WARN, 9587c478bd9Sstevel@tonic-gate "Please edit /etc/mach to remove the invalid PSM module."); 9597c478bd9Sstevel@tonic-gate return; 9607c478bd9Sstevel@tonic-gate } 9617c478bd9Sstevel@tonic-gate 9627c478bd9Sstevel@tonic-gate if (mach_set[PSM_OWN_EXCLUSIVE]) 9637c478bd9Sstevel@tonic-gate mach_get_platform(PSM_OWN_EXCLUSIVE); 9647c478bd9Sstevel@tonic-gate 9657c478bd9Sstevel@tonic-gate if (mach_set[PSM_OWN_OVERRIDE]) 9667c478bd9Sstevel@tonic-gate mach_get_platform(PSM_OWN_OVERRIDE); 9677c478bd9Sstevel@tonic-gate } 9687c478bd9Sstevel@tonic-gate 9697c478bd9Sstevel@tonic-gate static void 9707c478bd9Sstevel@tonic-gate mach_init() 9717c478bd9Sstevel@tonic-gate { 972ae115bc7Smrj struct psm_ops *pops; 9737c478bd9Sstevel@tonic-gate 9747c478bd9Sstevel@tonic-gate mach_construct_info(); 9757c478bd9Sstevel@tonic-gate 9767c478bd9Sstevel@tonic-gate pops = mach_set[0]; 9777c478bd9Sstevel@tonic-gate 9787c478bd9Sstevel@tonic-gate /* register the interrupt and clock initialization rotuines */ 9797c478bd9Sstevel@tonic-gate picinitf = mach_picinit; 9807c478bd9Sstevel@tonic-gate clkinitf = mach_clkinit; 9817c478bd9Sstevel@tonic-gate psm_get_clockirq = pops->psm_get_clockirq; 9827c478bd9Sstevel@tonic-gate 9837c478bd9Sstevel@tonic-gate /* register the interrupt setup code */ 9847c478bd9Sstevel@tonic-gate slvltovect = mach_softlvl_to_vect; 9857c478bd9Sstevel@tonic-gate addspl = pops->psm_addspl; 9867c478bd9Sstevel@tonic-gate delspl = pops->psm_delspl; 9877c478bd9Sstevel@tonic-gate 9887c478bd9Sstevel@tonic-gate if (pops->psm_translate_irq) 9897c478bd9Sstevel@tonic-gate psm_translate_irq = pops->psm_translate_irq; 9907c478bd9Sstevel@tonic-gate if (pops->psm_intr_ops) 9917c478bd9Sstevel@tonic-gate psm_intr_ops = pops->psm_intr_ops; 992ae115bc7Smrj 993ae115bc7Smrj #if defined(PSMI_1_2) || defined(PSMI_1_3) || defined(PSMI_1_4) 994ae115bc7Smrj /* 995ae115bc7Smrj * Time-of-day functionality now handled in TOD modules. 996ae115bc7Smrj * (Warn about PSM modules that think that we're going to use 997ae115bc7Smrj * their ops vectors.) 998ae115bc7Smrj */ 999ae115bc7Smrj if (pops->psm_tod_get) 1000ae115bc7Smrj cmn_err(CE_WARN, "obsolete psm_tod_get op %p", 1001ae115bc7Smrj (void *)pops->psm_tod_get); 1002ae115bc7Smrj 1003ae115bc7Smrj if (pops->psm_tod_set) 1004ae115bc7Smrj cmn_err(CE_WARN, "obsolete psm_tod_set op %p", 1005ae115bc7Smrj (void *)pops->psm_tod_set); 1006ae115bc7Smrj #endif 1007ae115bc7Smrj 10087c478bd9Sstevel@tonic-gate if (pops->psm_notify_error) { 10097c478bd9Sstevel@tonic-gate psm_notify_error = mach_notify_error; 10107c478bd9Sstevel@tonic-gate notify_error = pops->psm_notify_error; 10117c478bd9Sstevel@tonic-gate } 10127c478bd9Sstevel@tonic-gate 10137c478bd9Sstevel@tonic-gate (*pops->psm_softinit)(); 10147c478bd9Sstevel@tonic-gate 10157c478bd9Sstevel@tonic-gate /* 10160e751525SEric Saxe * Initialize the dispatcher's function hooks to enable CPU halting 10170e751525SEric Saxe * when idle. Set both the deep-idle and non-deep-idle hooks. 10180e751525SEric Saxe * 10190e751525SEric Saxe * Assume we can use power saving deep-idle loop cpu_idle_adaptive. 10200e751525SEric Saxe * Platform deep-idle driver will reset our idle loop to 10210e751525SEric Saxe * non_deep_idle_cpu if power saving deep-idle feature is not available. 10220e751525SEric Saxe * 10235b8a6efeSbholler * Do not use monitor/mwait if idle_cpu_use_hlt is not set(spin idle) 10245b8a6efeSbholler * or idle_cpu_prefer_mwait is not set. 1025f98fbcecSbholler * Allocate monitor/mwait buffer for cpu0. 10267c478bd9Sstevel@tonic-gate */ 10270e751525SEric Saxe #ifndef __xpv 10280e751525SEric Saxe non_deep_idle_disp_enq_thread = disp_enq_thread; 10290e751525SEric Saxe #endif 1030f98fbcecSbholler if (idle_cpu_use_hlt) { 10310e751525SEric Saxe idle_cpu = cpu_idle_adaptive; 10320e751525SEric Saxe CPU->cpu_m.mcpu_idle_cpu = cpu_idle; 1033843e1988Sjohnlev #ifndef __xpv 10347417cfdeSKuriakose Kuruvilla if (is_x86_feature(x86_featureset, X86FSET_MWAIT) && 10357417cfdeSKuriakose Kuruvilla idle_cpu_prefer_mwait) { 10365b8a6efeSbholler CPU->cpu_m.mcpu_mwait = cpuid_mwait_alloc(CPU); 10375b8a6efeSbholler /* 10385b8a6efeSbholler * Protect ourself from insane mwait size. 10395b8a6efeSbholler */ 10405b8a6efeSbholler if (CPU->cpu_m.mcpu_mwait == NULL) { 10415b8a6efeSbholler #ifdef DEBUG 10425b8a6efeSbholler cmn_err(CE_NOTE, "Using hlt idle. Cannot " 10435b8a6efeSbholler "handle cpu 0 mwait size."); 10445b8a6efeSbholler #endif 10455b8a6efeSbholler idle_cpu_prefer_mwait = 0; 10460e751525SEric Saxe CPU->cpu_m.mcpu_idle_cpu = cpu_idle; 10475b8a6efeSbholler } else { 10480e751525SEric Saxe CPU->cpu_m.mcpu_idle_cpu = cpu_idle_mwait; 10495b8a6efeSbholler } 1050f98fbcecSbholler } else { 10510e751525SEric Saxe CPU->cpu_m.mcpu_idle_cpu = cpu_idle; 10520e751525SEric Saxe } 10530e751525SEric Saxe non_deep_idle_cpu = CPU->cpu_m.mcpu_idle_cpu; 10540e751525SEric Saxe 10550e751525SEric Saxe /* 10560e751525SEric Saxe * Disable power saving deep idle loop? 10570e751525SEric Saxe */ 10580e751525SEric Saxe if (idle_cpu_no_deep_c) { 10590e751525SEric Saxe idle_cpu = non_deep_idle_cpu; 1060f98fbcecSbholler } 1061843e1988Sjohnlev #endif 1062f98fbcecSbholler } 10637c478bd9Sstevel@tonic-gate 10647c478bd9Sstevel@tonic-gate mach_smpinit(); 10657c478bd9Sstevel@tonic-gate } 10667c478bd9Sstevel@tonic-gate 10677c478bd9Sstevel@tonic-gate static void 10687c478bd9Sstevel@tonic-gate mach_smpinit(void) 10697c478bd9Sstevel@tonic-gate { 107041791439Sandrei struct psm_ops *pops; 107141791439Sandrei processorid_t cpu_id; 10727c478bd9Sstevel@tonic-gate int cnt; 107341791439Sandrei cpuset_t cpumask; 10747c478bd9Sstevel@tonic-gate 10757c478bd9Sstevel@tonic-gate pops = mach_set[0]; 1076a563a037Sbholler CPUSET_ZERO(cpumask); 10777c478bd9Sstevel@tonic-gate 10787c478bd9Sstevel@tonic-gate cpu_id = -1; 10797c478bd9Sstevel@tonic-gate cpu_id = (*pops->psm_get_next_processorid)(cpu_id); 1080a3114836SGerry Liu /* 1081a3114836SGerry Liu * Only add boot_ncpus CPUs to mp_cpus. Other CPUs will be handled 1082a3114836SGerry Liu * by CPU DR driver at runtime. 1083a3114836SGerry Liu */ 1084a3114836SGerry Liu for (cnt = 0; cpu_id != -1 && cnt < boot_ncpus; cnt++) { 108541791439Sandrei CPUSET_ADD(cpumask, cpu_id); 10867c478bd9Sstevel@tonic-gate cpu_id = (*pops->psm_get_next_processorid)(cpu_id); 10877c478bd9Sstevel@tonic-gate } 10887c478bd9Sstevel@tonic-gate 10897c478bd9Sstevel@tonic-gate mp_cpus = cpumask; 10907c478bd9Sstevel@tonic-gate 10917c478bd9Sstevel@tonic-gate /* MP related routines */ 10927c478bd9Sstevel@tonic-gate ap_mlsetup = pops->psm_post_cpu_start; 10937c478bd9Sstevel@tonic-gate send_dirintf = pops->psm_send_ipi; 10947c478bd9Sstevel@tonic-gate 10957c478bd9Sstevel@tonic-gate /* optional MP related routines */ 10967c478bd9Sstevel@tonic-gate if (pops->psm_shutdown) 10977c478bd9Sstevel@tonic-gate psm_shutdownf = pops->psm_shutdown; 10987c478bd9Sstevel@tonic-gate if (pops->psm_preshutdown) 10997c478bd9Sstevel@tonic-gate psm_preshutdownf = pops->psm_preshutdown; 11007c478bd9Sstevel@tonic-gate if (pops->psm_notify_func) 11017c478bd9Sstevel@tonic-gate psm_notifyf = pops->psm_notify_func; 11027c478bd9Sstevel@tonic-gate if (pops->psm_set_idlecpu) 11037c478bd9Sstevel@tonic-gate psm_set_idle_cpuf = pops->psm_set_idlecpu; 11047c478bd9Sstevel@tonic-gate if (pops->psm_unset_idlecpu) 11057c478bd9Sstevel@tonic-gate psm_unset_idle_cpuf = pops->psm_unset_idlecpu; 11067c478bd9Sstevel@tonic-gate 11077c478bd9Sstevel@tonic-gate psm_clkinit = pops->psm_clkinit; 11087c478bd9Sstevel@tonic-gate 11097c478bd9Sstevel@tonic-gate if (pops->psm_timer_reprogram) 11107c478bd9Sstevel@tonic-gate psm_timer_reprogram = pops->psm_timer_reprogram; 11117c478bd9Sstevel@tonic-gate 11127c478bd9Sstevel@tonic-gate if (pops->psm_timer_enable) 11137c478bd9Sstevel@tonic-gate psm_timer_enable = pops->psm_timer_enable; 11147c478bd9Sstevel@tonic-gate 11157c478bd9Sstevel@tonic-gate if (pops->psm_timer_disable) 11167c478bd9Sstevel@tonic-gate psm_timer_disable = pops->psm_timer_disable; 11177c478bd9Sstevel@tonic-gate 11187c478bd9Sstevel@tonic-gate if (pops->psm_post_cyclic_setup) 11197c478bd9Sstevel@tonic-gate psm_post_cyclic_setup = pops->psm_post_cyclic_setup; 11207c478bd9Sstevel@tonic-gate 11212df1fe9cSrandyf if (pops->psm_state) 11222df1fe9cSrandyf psm_state = pops->psm_state; 11232df1fe9cSrandyf 1124643e2e74Sbholler /* 1125643e2e74Sbholler * Set these vectors here so they can be used by Suspend/Resume 1126643e2e74Sbholler * on UP machines. 1127643e2e74Sbholler */ 1128643e2e74Sbholler if (pops->psm_disable_intr) 1129643e2e74Sbholler psm_disable_intr = pops->psm_disable_intr; 1130643e2e74Sbholler if (pops->psm_enable_intr) 1131643e2e74Sbholler psm_enable_intr = pops->psm_enable_intr; 1132643e2e74Sbholler 1133643e2e74Sbholler /* check for multiple CPUs */ 1134a3114836SGerry Liu if (cnt < 2 && plat_dr_support_cpu() == B_FALSE) 11357c478bd9Sstevel@tonic-gate return; 11367c478bd9Sstevel@tonic-gate 11377c478bd9Sstevel@tonic-gate /* check for MP platforms */ 11387c478bd9Sstevel@tonic-gate if (pops->psm_cpu_start == NULL) 11397c478bd9Sstevel@tonic-gate return; 11407c478bd9Sstevel@tonic-gate 11417c478bd9Sstevel@tonic-gate /* 11427c478bd9Sstevel@tonic-gate * Set the dispatcher hook to enable cpu "wake up" 11437c478bd9Sstevel@tonic-gate * when a thread becomes runnable. 11447c478bd9Sstevel@tonic-gate */ 1145843e1988Sjohnlev if (idle_cpu_use_hlt) { 1146843e1988Sjohnlev disp_enq_thread = cpu_wakeup; 1147843e1988Sjohnlev #ifndef __xpv 11487417cfdeSKuriakose Kuruvilla if (is_x86_feature(x86_featureset, X86FSET_MWAIT) && 11497417cfdeSKuriakose Kuruvilla idle_cpu_prefer_mwait) 1150f98fbcecSbholler disp_enq_thread = cpu_wakeup_mwait; 11510e751525SEric Saxe non_deep_idle_disp_enq_thread = disp_enq_thread; 1152843e1988Sjohnlev #endif 1153843e1988Sjohnlev } 11547c478bd9Sstevel@tonic-gate 11557c478bd9Sstevel@tonic-gate psm_get_ipivect = pops->psm_get_ipivect; 11567c478bd9Sstevel@tonic-gate 1157f34a7178SJoe Bonasera (void) add_avintr((void *)NULL, XC_HI_PIL, xc_serv, "xc_intr", 11587c478bd9Sstevel@tonic-gate (*pops->psm_get_ipivect)(XC_HI_PIL, PSM_INTR_IPI_HI), 1159f34a7178SJoe Bonasera NULL, NULL, NULL, NULL); 11607c478bd9Sstevel@tonic-gate 11617c478bd9Sstevel@tonic-gate (void) (*pops->psm_get_ipivect)(XC_CPUPOKE_PIL, PSM_INTR_POKE); 11627c478bd9Sstevel@tonic-gate } 11637c478bd9Sstevel@tonic-gate 11647c478bd9Sstevel@tonic-gate static void 11657c478bd9Sstevel@tonic-gate mach_picinit() 11667c478bd9Sstevel@tonic-gate { 116741791439Sandrei struct psm_ops *pops; 11687c478bd9Sstevel@tonic-gate 11697c478bd9Sstevel@tonic-gate pops = mach_set[0]; 11707c478bd9Sstevel@tonic-gate 11717c478bd9Sstevel@tonic-gate /* register the interrupt handlers */ 11727c478bd9Sstevel@tonic-gate setlvl = pops->psm_intr_enter; 11737c478bd9Sstevel@tonic-gate setlvlx = pops->psm_intr_exit; 11747c478bd9Sstevel@tonic-gate 11757c478bd9Sstevel@tonic-gate /* initialize the interrupt hardware */ 11767c478bd9Sstevel@tonic-gate (*pops->psm_picinit)(); 11777c478bd9Sstevel@tonic-gate 11787c478bd9Sstevel@tonic-gate /* set interrupt mask for current ipl */ 11797c478bd9Sstevel@tonic-gate setspl = pops->psm_setspl; 1180ae115bc7Smrj cli(); 11817c478bd9Sstevel@tonic-gate setspl(CPU->cpu_pri); 11827c478bd9Sstevel@tonic-gate } 11837c478bd9Sstevel@tonic-gate 11847c478bd9Sstevel@tonic-gate uint_t cpu_freq; /* MHz */ 11857c478bd9Sstevel@tonic-gate uint64_t cpu_freq_hz; /* measured (in hertz) */ 11867c478bd9Sstevel@tonic-gate 11877c478bd9Sstevel@tonic-gate #define MEGA_HZ 1000000 11887c478bd9Sstevel@tonic-gate 1189843e1988Sjohnlev #ifdef __xpv 1190843e1988Sjohnlev 1191843e1988Sjohnlev int xpv_cpufreq_workaround = 1; 1192843e1988Sjohnlev int xpv_cpufreq_verbose = 0; 1193843e1988Sjohnlev 1194843e1988Sjohnlev #else /* __xpv */ 1195843e1988Sjohnlev 11967c478bd9Sstevel@tonic-gate static uint64_t 11977c478bd9Sstevel@tonic-gate mach_calchz(uint32_t pit_counter, uint64_t *processor_clks) 11987c478bd9Sstevel@tonic-gate { 11997c478bd9Sstevel@tonic-gate uint64_t cpu_hz; 12007c478bd9Sstevel@tonic-gate 12017c478bd9Sstevel@tonic-gate if ((pit_counter == 0) || (*processor_clks == 0) || 12027c478bd9Sstevel@tonic-gate (*processor_clks > (((uint64_t)-1) / PIT_HZ))) 12037c478bd9Sstevel@tonic-gate return (0); 12047c478bd9Sstevel@tonic-gate 12057c478bd9Sstevel@tonic-gate cpu_hz = ((uint64_t)PIT_HZ * *processor_clks) / pit_counter; 12067c478bd9Sstevel@tonic-gate 12077c478bd9Sstevel@tonic-gate return (cpu_hz); 12087c478bd9Sstevel@tonic-gate } 12097c478bd9Sstevel@tonic-gate 1210843e1988Sjohnlev #endif /* __xpv */ 1211843e1988Sjohnlev 12127c478bd9Sstevel@tonic-gate static uint64_t 12137c478bd9Sstevel@tonic-gate mach_getcpufreq(void) 12147c478bd9Sstevel@tonic-gate { 1215843e1988Sjohnlev #if defined(__xpv) 1216843e1988Sjohnlev vcpu_time_info_t *vti = &CPU->cpu_m.mcpu_vcpu_info->time; 1217843e1988Sjohnlev uint64_t cpu_hz; 1218843e1988Sjohnlev 1219843e1988Sjohnlev /* 1220843e1988Sjohnlev * During dom0 bringup, it was noted that on at least one older 1221843e1988Sjohnlev * Intel HT machine, the hypervisor initially gives a tsc_to_system_mul 1222843e1988Sjohnlev * value that is quite wrong (the 3.06GHz clock was reported 1223843e1988Sjohnlev * as 4.77GHz) 1224843e1988Sjohnlev * 1225843e1988Sjohnlev * The curious thing is, that if you stop the kernel at entry, 1226843e1988Sjohnlev * breakpoint here and inspect the value with kmdb, the value 1227843e1988Sjohnlev * is correct - but if you don't stop and simply enable the 1228843e1988Sjohnlev * printf statement (below), you can see the bad value printed 1229843e1988Sjohnlev * here. Almost as if something kmdb did caused the hypervisor to 1230843e1988Sjohnlev * figure it out correctly. And, note that the hypervisor 1231843e1988Sjohnlev * eventually -does- figure it out correctly ... if you look at 1232843e1988Sjohnlev * the field later in the life of dom0, it is correct. 1233843e1988Sjohnlev * 1234843e1988Sjohnlev * For now, on dom0, we employ a slightly cheesy workaround of 1235843e1988Sjohnlev * using the DOM0_PHYSINFO hypercall. 1236843e1988Sjohnlev */ 1237843e1988Sjohnlev if (DOMAIN_IS_INITDOMAIN(xen_info) && xpv_cpufreq_workaround) { 1238349b53ddSStuart Maybee cpu_hz = 1000 * xpv_cpu_khz(); 1239843e1988Sjohnlev } else { 1240843e1988Sjohnlev cpu_hz = (UINT64_C(1000000000) << 32) / vti->tsc_to_system_mul; 1241843e1988Sjohnlev 1242843e1988Sjohnlev if (vti->tsc_shift < 0) 1243843e1988Sjohnlev cpu_hz <<= -vti->tsc_shift; 1244843e1988Sjohnlev else 1245843e1988Sjohnlev cpu_hz >>= vti->tsc_shift; 1246843e1988Sjohnlev } 1247843e1988Sjohnlev 1248843e1988Sjohnlev if (xpv_cpufreq_verbose) 1249843e1988Sjohnlev printf("mach_getcpufreq: system_mul 0x%x, shift %d, " 1250843e1988Sjohnlev "cpu_hz %" PRId64 "Hz\n", 1251843e1988Sjohnlev vti->tsc_to_system_mul, vti->tsc_shift, cpu_hz); 1252843e1988Sjohnlev 1253843e1988Sjohnlev return (cpu_hz); 1254843e1988Sjohnlev #else /* __xpv */ 12557c478bd9Sstevel@tonic-gate uint32_t pit_counter; 12567c478bd9Sstevel@tonic-gate uint64_t processor_clks; 12577c478bd9Sstevel@tonic-gate 12587417cfdeSKuriakose Kuruvilla if (is_x86_feature(x86_featureset, X86FSET_TSC)) { 12597c478bd9Sstevel@tonic-gate /* 12607c478bd9Sstevel@tonic-gate * We have a TSC. freq_tsc() knows how to measure the number 12617c478bd9Sstevel@tonic-gate * of clock cycles sampled against the PIT. 12627c478bd9Sstevel@tonic-gate */ 1263ae115bc7Smrj ulong_t flags = clear_int_flag(); 12647c478bd9Sstevel@tonic-gate processor_clks = freq_tsc(&pit_counter); 1265ae115bc7Smrj restore_int_flag(flags); 12667c478bd9Sstevel@tonic-gate return (mach_calchz(pit_counter, &processor_clks)); 12677c478bd9Sstevel@tonic-gate } else if (x86_vendor == X86_VENDOR_Cyrix || x86_type == X86_TYPE_P5) { 12687c478bd9Sstevel@tonic-gate #if defined(__amd64) 12697c478bd9Sstevel@tonic-gate panic("mach_getcpufreq: no TSC!"); 12707c478bd9Sstevel@tonic-gate #elif defined(__i386) 12717c478bd9Sstevel@tonic-gate /* 12727c478bd9Sstevel@tonic-gate * We are a Cyrix based on a 6x86 core or an Intel Pentium 12737c478bd9Sstevel@tonic-gate * for which freq_notsc() knows how to measure the number of 12747c478bd9Sstevel@tonic-gate * elapsed clock cycles sampled against the PIT 12757c478bd9Sstevel@tonic-gate */ 1276ae115bc7Smrj ulong_t flags = clear_int_flag(); 12777c478bd9Sstevel@tonic-gate processor_clks = freq_notsc(&pit_counter); 1278ae115bc7Smrj restore_int_flag(flags); 12797c478bd9Sstevel@tonic-gate return (mach_calchz(pit_counter, &processor_clks)); 12807c478bd9Sstevel@tonic-gate #endif /* __i386 */ 12817c478bd9Sstevel@tonic-gate } 12827c478bd9Sstevel@tonic-gate 12837c478bd9Sstevel@tonic-gate /* We do not know how to calculate cpu frequency for this cpu. */ 12847c478bd9Sstevel@tonic-gate return (0); 1285843e1988Sjohnlev #endif /* __xpv */ 12867c478bd9Sstevel@tonic-gate } 12877c478bd9Sstevel@tonic-gate 12887c478bd9Sstevel@tonic-gate /* 12897c478bd9Sstevel@tonic-gate * If the clock speed of a cpu is found to be reported incorrectly, do not add 12907c478bd9Sstevel@tonic-gate * to this array, instead improve the accuracy of the algorithm that determines 12917c478bd9Sstevel@tonic-gate * the clock speed of the processor or extend the implementation to support the 12927c478bd9Sstevel@tonic-gate * vendor as appropriate. This is here only to support adjusting the speed on 12937c478bd9Sstevel@tonic-gate * older slower processors that mach_fixcpufreq() would not be able to account 12947c478bd9Sstevel@tonic-gate * for otherwise. 12957c478bd9Sstevel@tonic-gate */ 12967c478bd9Sstevel@tonic-gate static int x86_cpu_freq[] = { 60, 75, 80, 90, 120, 160, 166, 175, 180, 233 }; 12977c478bd9Sstevel@tonic-gate 12987c478bd9Sstevel@tonic-gate /* 12997c478bd9Sstevel@tonic-gate * On fast processors the clock frequency that is measured may be off by 13007c478bd9Sstevel@tonic-gate * a few MHz from the value printed on the part. This is a combination of 13017c478bd9Sstevel@tonic-gate * the factors that for such fast parts being off by this much is within 13027c478bd9Sstevel@tonic-gate * the tolerances for manufacture and because of the difficulties in the 13037c478bd9Sstevel@tonic-gate * measurement that can lead to small error. This function uses some 13047c478bd9Sstevel@tonic-gate * heuristics in order to tweak the value that was measured to match what 13057c478bd9Sstevel@tonic-gate * is most likely printed on the part. 13067c478bd9Sstevel@tonic-gate * 13077c478bd9Sstevel@tonic-gate * Some examples: 13087c478bd9Sstevel@tonic-gate * AMD Athlon 1000 mhz measured as 998 mhz 13097c478bd9Sstevel@tonic-gate * Intel Pentium III Xeon 733 mhz measured as 731 mhz 13107c478bd9Sstevel@tonic-gate * Intel Pentium IV 1500 mhz measured as 1495mhz 13117c478bd9Sstevel@tonic-gate * 13127c478bd9Sstevel@tonic-gate * If in the future this function is no longer sufficient to correct 13137c478bd9Sstevel@tonic-gate * for the error in the measurement, then the algorithm used to perform 13147c478bd9Sstevel@tonic-gate * the measurement will have to be improved in order to increase accuracy 13157c478bd9Sstevel@tonic-gate * rather than adding horrible and questionable kludges here. 13167c478bd9Sstevel@tonic-gate * 13177c478bd9Sstevel@tonic-gate * This is called after the cyclics subsystem because of the potential 13187c478bd9Sstevel@tonic-gate * that the heuristics within may give a worse estimate of the clock 13197c478bd9Sstevel@tonic-gate * frequency than the value that was measured. 13207c478bd9Sstevel@tonic-gate */ 13217c478bd9Sstevel@tonic-gate static void 13227c478bd9Sstevel@tonic-gate mach_fixcpufreq(void) 13237c478bd9Sstevel@tonic-gate { 13247c478bd9Sstevel@tonic-gate uint32_t freq, mul, near66, delta66, near50, delta50, fixed, delta, i; 13257c478bd9Sstevel@tonic-gate 13267c478bd9Sstevel@tonic-gate freq = (uint32_t)cpu_freq; 13277c478bd9Sstevel@tonic-gate 13287c478bd9Sstevel@tonic-gate /* 13297c478bd9Sstevel@tonic-gate * Find the nearest integer multiple of 200/3 (about 66) MHz to the 13307c478bd9Sstevel@tonic-gate * measured speed taking into account that the 667 MHz parts were 13317c478bd9Sstevel@tonic-gate * the first to round-up. 13327c478bd9Sstevel@tonic-gate */ 13337c478bd9Sstevel@tonic-gate mul = (uint32_t)((3 * (uint64_t)freq + 100) / 200); 13347c478bd9Sstevel@tonic-gate near66 = (uint32_t)((200 * (uint64_t)mul + ((mul >= 10) ? 1 : 0)) / 3); 13357c478bd9Sstevel@tonic-gate delta66 = (near66 > freq) ? (near66 - freq) : (freq - near66); 13367c478bd9Sstevel@tonic-gate 13377c478bd9Sstevel@tonic-gate /* Find the nearest integer multiple of 50 MHz to the measured speed */ 13387c478bd9Sstevel@tonic-gate mul = (freq + 25) / 50; 13397c478bd9Sstevel@tonic-gate near50 = mul * 50; 13407c478bd9Sstevel@tonic-gate delta50 = (near50 > freq) ? (near50 - freq) : (freq - near50); 13417c478bd9Sstevel@tonic-gate 13427c478bd9Sstevel@tonic-gate /* Find the closer of the two */ 13437c478bd9Sstevel@tonic-gate if (delta66 < delta50) { 13447c478bd9Sstevel@tonic-gate fixed = near66; 13457c478bd9Sstevel@tonic-gate delta = delta66; 13467c478bd9Sstevel@tonic-gate } else { 13477c478bd9Sstevel@tonic-gate fixed = near50; 13487c478bd9Sstevel@tonic-gate delta = delta50; 13497c478bd9Sstevel@tonic-gate } 13507c478bd9Sstevel@tonic-gate 13517c478bd9Sstevel@tonic-gate if (fixed > INT_MAX) 13527c478bd9Sstevel@tonic-gate return; 13537c478bd9Sstevel@tonic-gate 13547c478bd9Sstevel@tonic-gate /* 13557c478bd9Sstevel@tonic-gate * Some older parts have a core clock frequency that is not an 13567c478bd9Sstevel@tonic-gate * integral multiple of 50 or 66 MHz. Check if one of the old 13577c478bd9Sstevel@tonic-gate * clock frequencies is closer to the measured value than any 13587c478bd9Sstevel@tonic-gate * of the integral multiples of 50 an 66, and if so set fixed 13597c478bd9Sstevel@tonic-gate * and delta appropriately to represent the closest value. 13607c478bd9Sstevel@tonic-gate */ 13617c478bd9Sstevel@tonic-gate i = sizeof (x86_cpu_freq) / sizeof (int); 13627c478bd9Sstevel@tonic-gate while (i > 0) { 13637c478bd9Sstevel@tonic-gate i--; 13647c478bd9Sstevel@tonic-gate 13657c478bd9Sstevel@tonic-gate if (x86_cpu_freq[i] <= freq) { 13667c478bd9Sstevel@tonic-gate mul = freq - x86_cpu_freq[i]; 13677c478bd9Sstevel@tonic-gate 13687c478bd9Sstevel@tonic-gate if (mul < delta) { 13697c478bd9Sstevel@tonic-gate fixed = x86_cpu_freq[i]; 13707c478bd9Sstevel@tonic-gate delta = mul; 13717c478bd9Sstevel@tonic-gate } 13727c478bd9Sstevel@tonic-gate 13737c478bd9Sstevel@tonic-gate break; 13747c478bd9Sstevel@tonic-gate } 13757c478bd9Sstevel@tonic-gate 13767c478bd9Sstevel@tonic-gate mul = x86_cpu_freq[i] - freq; 13777c478bd9Sstevel@tonic-gate 13787c478bd9Sstevel@tonic-gate if (mul < delta) { 13797c478bd9Sstevel@tonic-gate fixed = x86_cpu_freq[i]; 13807c478bd9Sstevel@tonic-gate delta = mul; 13817c478bd9Sstevel@tonic-gate } 13827c478bd9Sstevel@tonic-gate } 13837c478bd9Sstevel@tonic-gate 13847c478bd9Sstevel@tonic-gate /* 13857c478bd9Sstevel@tonic-gate * Set a reasonable maximum for how much to correct the measured 13867c478bd9Sstevel@tonic-gate * result by. This check is here to prevent the adjustment made 13877c478bd9Sstevel@tonic-gate * by this function from being more harm than good. It is entirely 13887c478bd9Sstevel@tonic-gate * possible that in the future parts will be made that are not 13897c478bd9Sstevel@tonic-gate * integral multiples of 66 or 50 in clock frequency or that 13907c478bd9Sstevel@tonic-gate * someone may overclock a part to some odd frequency. If the 13917c478bd9Sstevel@tonic-gate * measured value is farther from the corrected value than 13927c478bd9Sstevel@tonic-gate * allowed, then assume the corrected value is in error and use 13937c478bd9Sstevel@tonic-gate * the measured value. 13947c478bd9Sstevel@tonic-gate */ 13957c478bd9Sstevel@tonic-gate if (6 < delta) 13967c478bd9Sstevel@tonic-gate return; 13977c478bd9Sstevel@tonic-gate 13987c478bd9Sstevel@tonic-gate cpu_freq = (int)fixed; 13997c478bd9Sstevel@tonic-gate } 14007c478bd9Sstevel@tonic-gate 14017c478bd9Sstevel@tonic-gate 14027c478bd9Sstevel@tonic-gate static int 14037c478bd9Sstevel@tonic-gate machhztomhz(uint64_t cpu_freq_hz) 14047c478bd9Sstevel@tonic-gate { 14057c478bd9Sstevel@tonic-gate uint64_t cpu_mhz; 14067c478bd9Sstevel@tonic-gate 14077c478bd9Sstevel@tonic-gate /* Round to nearest MHZ */ 14087c478bd9Sstevel@tonic-gate cpu_mhz = (cpu_freq_hz + (MEGA_HZ / 2)) / MEGA_HZ; 14097c478bd9Sstevel@tonic-gate 14107c478bd9Sstevel@tonic-gate if (cpu_mhz > INT_MAX) 14117c478bd9Sstevel@tonic-gate return (0); 14127c478bd9Sstevel@tonic-gate 14137c478bd9Sstevel@tonic-gate return ((int)cpu_mhz); 14147c478bd9Sstevel@tonic-gate 14157c478bd9Sstevel@tonic-gate } 14167c478bd9Sstevel@tonic-gate 14177c478bd9Sstevel@tonic-gate 14187c478bd9Sstevel@tonic-gate static int 14197c478bd9Sstevel@tonic-gate mach_clkinit(int preferred_mode, int *set_mode) 14207c478bd9Sstevel@tonic-gate { 1421ae115bc7Smrj struct psm_ops *pops; 14227c478bd9Sstevel@tonic-gate int resolution; 14237c478bd9Sstevel@tonic-gate 14247c478bd9Sstevel@tonic-gate pops = mach_set[0]; 14257c478bd9Sstevel@tonic-gate 14267c478bd9Sstevel@tonic-gate cpu_freq_hz = mach_getcpufreq(); 14277c478bd9Sstevel@tonic-gate 14287c478bd9Sstevel@tonic-gate cpu_freq = machhztomhz(cpu_freq_hz); 14297c478bd9Sstevel@tonic-gate 14307417cfdeSKuriakose Kuruvilla if (!is_x86_feature(x86_featureset, X86FSET_TSC) || (cpu_freq == 0)) 14317c478bd9Sstevel@tonic-gate tsc_gethrtime_enable = 0; 14327c478bd9Sstevel@tonic-gate 1433843e1988Sjohnlev #ifndef __xpv 14347c478bd9Sstevel@tonic-gate if (tsc_gethrtime_enable) { 14357c478bd9Sstevel@tonic-gate tsc_hrtimeinit(cpu_freq_hz); 1436843e1988Sjohnlev } else 1437843e1988Sjohnlev #endif 1438843e1988Sjohnlev { 14397c478bd9Sstevel@tonic-gate if (pops->psm_hrtimeinit) 14407c478bd9Sstevel@tonic-gate (*pops->psm_hrtimeinit)(); 14417c478bd9Sstevel@tonic-gate gethrtimef = pops->psm_gethrtime; 14427c478bd9Sstevel@tonic-gate gethrtimeunscaledf = gethrtimef; 14437c478bd9Sstevel@tonic-gate /* scalehrtimef will remain dummy */ 14447c478bd9Sstevel@tonic-gate } 14457c478bd9Sstevel@tonic-gate 14467c478bd9Sstevel@tonic-gate mach_fixcpufreq(); 14477c478bd9Sstevel@tonic-gate 14487c478bd9Sstevel@tonic-gate if (mach_ver[0] >= PSM_INFO_VER01_3) { 1449843e1988Sjohnlev if (preferred_mode == TIMER_ONESHOT) { 14507c478bd9Sstevel@tonic-gate 14517c478bd9Sstevel@tonic-gate resolution = (*pops->psm_clkinit)(0); 14527c478bd9Sstevel@tonic-gate if (resolution != 0) { 14537c478bd9Sstevel@tonic-gate *set_mode = TIMER_ONESHOT; 14547c478bd9Sstevel@tonic-gate return (resolution); 14557c478bd9Sstevel@tonic-gate } 14567c478bd9Sstevel@tonic-gate } 14577c478bd9Sstevel@tonic-gate 14587c478bd9Sstevel@tonic-gate /* 14597c478bd9Sstevel@tonic-gate * either periodic mode was requested or could not set to 14607c478bd9Sstevel@tonic-gate * one-shot mode 14617c478bd9Sstevel@tonic-gate */ 14627c478bd9Sstevel@tonic-gate resolution = (*pops->psm_clkinit)(hz); 14637c478bd9Sstevel@tonic-gate /* 14647c478bd9Sstevel@tonic-gate * psm should be able to do periodic, so we do not check 14657c478bd9Sstevel@tonic-gate * for return value of psm_clkinit here. 14667c478bd9Sstevel@tonic-gate */ 14677c478bd9Sstevel@tonic-gate *set_mode = TIMER_PERIODIC; 14687c478bd9Sstevel@tonic-gate return (resolution); 14697c478bd9Sstevel@tonic-gate } else { 14707c478bd9Sstevel@tonic-gate /* 14717c478bd9Sstevel@tonic-gate * PSMI interface prior to PSMI_3 does not define a return 14727c478bd9Sstevel@tonic-gate * value for psm_clkinit, so the return value is ignored. 14737c478bd9Sstevel@tonic-gate */ 14747c478bd9Sstevel@tonic-gate (void) (*pops->psm_clkinit)(hz); 14757c478bd9Sstevel@tonic-gate *set_mode = TIMER_PERIODIC; 14767c478bd9Sstevel@tonic-gate return (nsec_per_tick); 14777c478bd9Sstevel@tonic-gate } 14787c478bd9Sstevel@tonic-gate } 14797c478bd9Sstevel@tonic-gate 1480a1af7ba0Scwb 1481e23a7e34Slq150181 /*ARGSUSED*/ 14827c478bd9Sstevel@tonic-gate static int 1483ae115bc7Smrj mach_softlvl_to_vect(int ipl) 14847c478bd9Sstevel@tonic-gate { 1485e23a7e34Slq150181 setsoftint = av_set_softint_pending; 1486a1af7ba0Scwb kdisetsoftint = kdi_av_set_softint_pending; 14877c478bd9Sstevel@tonic-gate 14887c478bd9Sstevel@tonic-gate return (PSM_SV_SOFTWARE); 14897c478bd9Sstevel@tonic-gate } 14907c478bd9Sstevel@tonic-gate 1491ae115bc7Smrj #ifdef DEBUG 1492ae115bc7Smrj /* 1493ae115bc7Smrj * This is here to allow us to simulate cpus that refuse to start. 1494ae115bc7Smrj */ 1495ae115bc7Smrj cpuset_t cpufailset; 1496ae115bc7Smrj #endif 1497ae115bc7Smrj 1498ae115bc7Smrj int 1499ae115bc7Smrj mach_cpu_start(struct cpu *cp, void *ctx) 15007c478bd9Sstevel@tonic-gate { 1501ae115bc7Smrj struct psm_ops *pops = mach_set[0]; 1502ae115bc7Smrj processorid_t id = cp->cpu_id; 15037c478bd9Sstevel@tonic-gate 1504ae115bc7Smrj #ifdef DEBUG 1505ae115bc7Smrj if (CPU_IN_SET(cpufailset, id)) 1506ae115bc7Smrj return (0); 1507ae115bc7Smrj #endif 1508ae115bc7Smrj return ((*pops->psm_cpu_start)(id, ctx)); 15097c478bd9Sstevel@tonic-gate } 15107c478bd9Sstevel@tonic-gate 15112df1fe9cSrandyf int 15122df1fe9cSrandyf mach_cpuid_start(processorid_t id, void *ctx) 15132df1fe9cSrandyf { 15142df1fe9cSrandyf struct psm_ops *pops = mach_set[0]; 15152df1fe9cSrandyf 15162df1fe9cSrandyf #ifdef DEBUG 15172df1fe9cSrandyf if (CPU_IN_SET(cpufailset, id)) 15182df1fe9cSrandyf return (0); 15192df1fe9cSrandyf #endif 15202df1fe9cSrandyf return ((*pops->psm_cpu_start)(id, ctx)); 15212df1fe9cSrandyf } 15222df1fe9cSrandyf 1523a3114836SGerry Liu int 1524a3114836SGerry Liu mach_cpu_stop(cpu_t *cp, void *ctx) 1525a3114836SGerry Liu { 1526a3114836SGerry Liu struct psm_ops *pops = mach_set[0]; 1527a3114836SGerry Liu psm_cpu_request_t request; 1528a3114836SGerry Liu 1529a3114836SGerry Liu if (pops->psm_cpu_ops == NULL) { 1530a3114836SGerry Liu return (ENOTSUP); 1531a3114836SGerry Liu } 1532a3114836SGerry Liu 1533a3114836SGerry Liu ASSERT(cp->cpu_id != -1); 1534a3114836SGerry Liu request.pcr_cmd = PSM_CPU_STOP; 1535a3114836SGerry Liu request.req.cpu_stop.cpuid = cp->cpu_id; 1536a3114836SGerry Liu request.req.cpu_stop.ctx = ctx; 1537a3114836SGerry Liu 1538a3114836SGerry Liu return ((*pops->psm_cpu_ops)(&request)); 1539a3114836SGerry Liu } 1540a3114836SGerry Liu 1541a3114836SGerry Liu int 1542a3114836SGerry Liu mach_cpu_add(mach_cpu_add_arg_t *argp, processorid_t *cpuidp) 1543a3114836SGerry Liu { 1544a3114836SGerry Liu int rc; 1545a3114836SGerry Liu struct psm_ops *pops = mach_set[0]; 1546a3114836SGerry Liu psm_cpu_request_t request; 1547a3114836SGerry Liu 1548a3114836SGerry Liu if (pops->psm_cpu_ops == NULL) { 1549a3114836SGerry Liu return (ENOTSUP); 1550a3114836SGerry Liu } 1551a3114836SGerry Liu 1552a3114836SGerry Liu request.pcr_cmd = PSM_CPU_ADD; 1553a3114836SGerry Liu request.req.cpu_add.argp = argp; 1554a3114836SGerry Liu request.req.cpu_add.cpuid = -1; 1555a3114836SGerry Liu rc = (*pops->psm_cpu_ops)(&request); 1556a3114836SGerry Liu if (rc == 0) { 1557a3114836SGerry Liu ASSERT(request.req.cpu_add.cpuid != -1); 1558a3114836SGerry Liu *cpuidp = request.req.cpu_add.cpuid; 1559a3114836SGerry Liu } 1560a3114836SGerry Liu 1561a3114836SGerry Liu return (rc); 1562a3114836SGerry Liu } 1563a3114836SGerry Liu 1564a3114836SGerry Liu int 1565a3114836SGerry Liu mach_cpu_remove(processorid_t cpuid) 1566a3114836SGerry Liu { 1567a3114836SGerry Liu struct psm_ops *pops = mach_set[0]; 1568a3114836SGerry Liu psm_cpu_request_t request; 1569a3114836SGerry Liu 1570a3114836SGerry Liu if (pops->psm_cpu_ops == NULL) { 1571a3114836SGerry Liu return (ENOTSUP); 1572a3114836SGerry Liu } 1573a3114836SGerry Liu 1574a3114836SGerry Liu request.pcr_cmd = PSM_CPU_REMOVE; 1575a3114836SGerry Liu request.req.cpu_remove.cpuid = cpuid; 1576a3114836SGerry Liu 1577a3114836SGerry Liu return ((*pops->psm_cpu_ops)(&request)); 1578a3114836SGerry Liu } 1579a3114836SGerry Liu 1580fa96bd91SMichael Corcoran /* 1581fa96bd91SMichael Corcoran * Default handler to create device node for CPU. 1582fa96bd91SMichael Corcoran * One reference count will be held on created device node. 1583fa96bd91SMichael Corcoran */ 1584fa96bd91SMichael Corcoran static int 1585fa96bd91SMichael Corcoran mach_cpu_create_devinfo(cpu_t *cp, dev_info_t **dipp) 1586fa96bd91SMichael Corcoran { 1587fa96bd91SMichael Corcoran int rv, circ; 1588fa96bd91SMichael Corcoran dev_info_t *dip; 1589fa96bd91SMichael Corcoran static kmutex_t cpu_node_lock; 1590fa96bd91SMichael Corcoran static dev_info_t *cpu_nex_devi = NULL; 1591fa96bd91SMichael Corcoran 1592fa96bd91SMichael Corcoran ASSERT(cp != NULL); 1593fa96bd91SMichael Corcoran ASSERT(dipp != NULL); 1594fa96bd91SMichael Corcoran *dipp = NULL; 1595fa96bd91SMichael Corcoran 1596fa96bd91SMichael Corcoran if (cpu_nex_devi == NULL) { 1597fa96bd91SMichael Corcoran mutex_enter(&cpu_node_lock); 1598fa96bd91SMichael Corcoran /* First check whether cpus exists. */ 1599fa96bd91SMichael Corcoran cpu_nex_devi = ddi_find_devinfo("cpus", -1, 0); 1600fa96bd91SMichael Corcoran /* Create cpus if it doesn't exist. */ 1601fa96bd91SMichael Corcoran if (cpu_nex_devi == NULL) { 1602fa96bd91SMichael Corcoran ndi_devi_enter(ddi_root_node(), &circ); 1603fa96bd91SMichael Corcoran rv = ndi_devi_alloc(ddi_root_node(), "cpus", 1604fa96bd91SMichael Corcoran (pnode_t)DEVI_SID_NODEID, &dip); 1605fa96bd91SMichael Corcoran if (rv != NDI_SUCCESS) { 1606fa96bd91SMichael Corcoran mutex_exit(&cpu_node_lock); 1607fa96bd91SMichael Corcoran cmn_err(CE_CONT, 1608fa96bd91SMichael Corcoran "?failed to create cpu nexus device.\n"); 1609fa96bd91SMichael Corcoran return (PSM_FAILURE); 1610fa96bd91SMichael Corcoran } 1611fa96bd91SMichael Corcoran ASSERT(dip != NULL); 1612fa96bd91SMichael Corcoran (void) ndi_devi_online(dip, 0); 1613fa96bd91SMichael Corcoran ndi_devi_exit(ddi_root_node(), circ); 1614fa96bd91SMichael Corcoran cpu_nex_devi = dip; 1615fa96bd91SMichael Corcoran } 1616fa96bd91SMichael Corcoran mutex_exit(&cpu_node_lock); 1617fa96bd91SMichael Corcoran } 1618fa96bd91SMichael Corcoran 1619fa96bd91SMichael Corcoran /* 1620fa96bd91SMichael Corcoran * create a child node for cpu identified as 'cpu_id' 1621fa96bd91SMichael Corcoran */ 1622fa96bd91SMichael Corcoran ndi_devi_enter(cpu_nex_devi, &circ); 16239209759aSMichael Corcoran dip = ddi_add_child(cpu_nex_devi, "cpu", DEVI_SID_NODEID, -1); 1624fa96bd91SMichael Corcoran if (dip == NULL) { 1625fa96bd91SMichael Corcoran cmn_err(CE_CONT, 1626fa96bd91SMichael Corcoran "?failed to create device node for cpu%d.\n", cp->cpu_id); 1627fa96bd91SMichael Corcoran rv = PSM_FAILURE; 1628fa96bd91SMichael Corcoran } else { 1629fa96bd91SMichael Corcoran *dipp = dip; 1630fa96bd91SMichael Corcoran (void) ndi_hold_devi(dip); 1631fa96bd91SMichael Corcoran rv = PSM_SUCCESS; 1632fa96bd91SMichael Corcoran } 1633fa96bd91SMichael Corcoran ndi_devi_exit(cpu_nex_devi, circ); 1634fa96bd91SMichael Corcoran 1635fa96bd91SMichael Corcoran return (rv); 1636fa96bd91SMichael Corcoran } 1637fa96bd91SMichael Corcoran 1638fa96bd91SMichael Corcoran /* 1639fa96bd91SMichael Corcoran * Create cpu device node in device tree and online it. 1640fa96bd91SMichael Corcoran * Return created dip with reference count held if requested. 1641fa96bd91SMichael Corcoran */ 1642fa96bd91SMichael Corcoran int 1643fa96bd91SMichael Corcoran mach_cpu_create_device_node(struct cpu *cp, dev_info_t **dipp) 1644fa96bd91SMichael Corcoran { 1645fa96bd91SMichael Corcoran int rv; 1646fa96bd91SMichael Corcoran dev_info_t *dip = NULL; 1647fa96bd91SMichael Corcoran 1648fa96bd91SMichael Corcoran ASSERT(psm_cpu_create_devinfo != NULL); 1649fa96bd91SMichael Corcoran rv = psm_cpu_create_devinfo(cp, &dip); 1650fa96bd91SMichael Corcoran if (rv == PSM_SUCCESS) { 1651fa96bd91SMichael Corcoran cpuid_set_cpu_properties(dip, cp->cpu_id, cp->cpu_m.mcpu_cpi); 1652fa96bd91SMichael Corcoran /* Recursively attach driver for parent nexus device. */ 1653fa96bd91SMichael Corcoran if (i_ddi_attach_node_hierarchy(ddi_get_parent(dip)) == 1654fa96bd91SMichael Corcoran DDI_SUCCESS) { 1655fa96bd91SMichael Corcoran /* Configure cpu itself and descendants. */ 1656fa96bd91SMichael Corcoran (void) ndi_devi_online(dip, 1657fa96bd91SMichael Corcoran NDI_ONLINE_ATTACH | NDI_CONFIG); 1658fa96bd91SMichael Corcoran } 1659fa96bd91SMichael Corcoran if (dipp != NULL) { 1660fa96bd91SMichael Corcoran *dipp = dip; 1661fa96bd91SMichael Corcoran } else { 1662fa96bd91SMichael Corcoran (void) ndi_rele_devi(dip); 1663fa96bd91SMichael Corcoran } 1664fa96bd91SMichael Corcoran } 1665fa96bd91SMichael Corcoran 1666fa96bd91SMichael Corcoran return (rv); 1667fa96bd91SMichael Corcoran } 1668fa96bd91SMichael Corcoran 1669a3114836SGerry Liu /* 1670a3114836SGerry Liu * The dipp contains one of following values on return: 1671a3114836SGerry Liu * - NULL if no device node found 1672a3114836SGerry Liu * - pointer to device node if found 1673a3114836SGerry Liu */ 1674a3114836SGerry Liu int 1675a3114836SGerry Liu mach_cpu_get_device_node(struct cpu *cp, dev_info_t **dipp) 1676a3114836SGerry Liu { 1677a3114836SGerry Liu *dipp = NULL; 1678a3114836SGerry Liu if (psm_cpu_get_devinfo != NULL) { 1679a3114836SGerry Liu if (psm_cpu_get_devinfo(cp, dipp) == PSM_SUCCESS) { 1680a3114836SGerry Liu return (PSM_SUCCESS); 1681a3114836SGerry Liu } 1682a3114836SGerry Liu } 1683a3114836SGerry Liu 1684a3114836SGerry Liu return (PSM_FAILURE); 1685a3114836SGerry Liu } 1686a3114836SGerry Liu 16877c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 16887c478bd9Sstevel@tonic-gate static int 16897c478bd9Sstevel@tonic-gate mach_translate_irq(dev_info_t *dip, int irqno) 16907c478bd9Sstevel@tonic-gate { 16917c478bd9Sstevel@tonic-gate return (irqno); /* default to NO translation */ 16927c478bd9Sstevel@tonic-gate } 16937c478bd9Sstevel@tonic-gate 16947c478bd9Sstevel@tonic-gate static void 16957c478bd9Sstevel@tonic-gate mach_notify_error(int level, char *errmsg) 16967c478bd9Sstevel@tonic-gate { 16977c478bd9Sstevel@tonic-gate /* 16987c478bd9Sstevel@tonic-gate * SL_FATAL is pass in once panicstr is set, deliver it 16997c478bd9Sstevel@tonic-gate * as CE_PANIC. Also, translate SL_ codes back to CE_ 17007c478bd9Sstevel@tonic-gate * codes for the psmi handler 17017c478bd9Sstevel@tonic-gate */ 17027c478bd9Sstevel@tonic-gate if (level & SL_FATAL) 17037c478bd9Sstevel@tonic-gate (*notify_error)(CE_PANIC, errmsg); 17047c478bd9Sstevel@tonic-gate else if (level & SL_WARN) 17057c478bd9Sstevel@tonic-gate (*notify_error)(CE_WARN, errmsg); 17067c478bd9Sstevel@tonic-gate else if (level & SL_NOTE) 17077c478bd9Sstevel@tonic-gate (*notify_error)(CE_NOTE, errmsg); 17087c478bd9Sstevel@tonic-gate else if (level & SL_CONSOLE) 17097c478bd9Sstevel@tonic-gate (*notify_error)(CE_CONT, errmsg); 17107c478bd9Sstevel@tonic-gate } 17117c478bd9Sstevel@tonic-gate 17127c478bd9Sstevel@tonic-gate /* 17137c478bd9Sstevel@tonic-gate * It provides the default basic intr_ops interface for the new DDI 17147c478bd9Sstevel@tonic-gate * interrupt framework if the PSM doesn't have one. 17157c478bd9Sstevel@tonic-gate * 17167c478bd9Sstevel@tonic-gate * Input: 17177c478bd9Sstevel@tonic-gate * dip - pointer to the dev_info structure of the requested device 17187c478bd9Sstevel@tonic-gate * hdlp - pointer to the internal interrupt handle structure for the 17197c478bd9Sstevel@tonic-gate * requested interrupt 17207c478bd9Sstevel@tonic-gate * intr_op - opcode for this call 17217c478bd9Sstevel@tonic-gate * result - pointer to the integer that will hold the result to be 17227c478bd9Sstevel@tonic-gate * passed back if return value is PSM_SUCCESS 17237c478bd9Sstevel@tonic-gate * 17247c478bd9Sstevel@tonic-gate * Output: 17257c478bd9Sstevel@tonic-gate * return value is either PSM_SUCCESS or PSM_FAILURE 17267c478bd9Sstevel@tonic-gate */ 17277c478bd9Sstevel@tonic-gate static int 17287c478bd9Sstevel@tonic-gate mach_intr_ops(dev_info_t *dip, ddi_intr_handle_impl_t *hdlp, 17297c478bd9Sstevel@tonic-gate psm_intr_op_t intr_op, int *result) 17307c478bd9Sstevel@tonic-gate { 17317c478bd9Sstevel@tonic-gate struct intrspec *ispec; 17327c478bd9Sstevel@tonic-gate 17337c478bd9Sstevel@tonic-gate switch (intr_op) { 17347c478bd9Sstevel@tonic-gate case PSM_INTR_OP_CHECK_MSI: 17357c478bd9Sstevel@tonic-gate *result = hdlp->ih_type & ~(DDI_INTR_TYPE_MSI | 17367c478bd9Sstevel@tonic-gate DDI_INTR_TYPE_MSIX); 17377c478bd9Sstevel@tonic-gate break; 17387c478bd9Sstevel@tonic-gate case PSM_INTR_OP_ALLOC_VECTORS: 17397c478bd9Sstevel@tonic-gate if (hdlp->ih_type == DDI_INTR_TYPE_FIXED) 17407c478bd9Sstevel@tonic-gate *result = 1; 17417c478bd9Sstevel@tonic-gate else 17427c478bd9Sstevel@tonic-gate *result = 0; 17437c478bd9Sstevel@tonic-gate break; 17447c478bd9Sstevel@tonic-gate case PSM_INTR_OP_FREE_VECTORS: 17457c478bd9Sstevel@tonic-gate break; 17467c478bd9Sstevel@tonic-gate case PSM_INTR_OP_NAVAIL_VECTORS: 17477c478bd9Sstevel@tonic-gate if (hdlp->ih_type == DDI_INTR_TYPE_FIXED) 17487c478bd9Sstevel@tonic-gate *result = 1; 17497c478bd9Sstevel@tonic-gate else 17507c478bd9Sstevel@tonic-gate *result = 0; 17517c478bd9Sstevel@tonic-gate break; 17527c478bd9Sstevel@tonic-gate case PSM_INTR_OP_XLATE_VECTOR: 17537a364d25Sschwartz ispec = ((ihdl_plat_t *)hdlp->ih_private)->ip_ispecp; 17547c478bd9Sstevel@tonic-gate *result = psm_translate_irq(dip, ispec->intrspec_vec); 17557c478bd9Sstevel@tonic-gate break; 17567c478bd9Sstevel@tonic-gate case PSM_INTR_OP_GET_CAP: 17577c478bd9Sstevel@tonic-gate *result = 0; 17587c478bd9Sstevel@tonic-gate break; 17597c478bd9Sstevel@tonic-gate case PSM_INTR_OP_GET_PENDING: 17607c478bd9Sstevel@tonic-gate case PSM_INTR_OP_CLEAR_MASK: 17617c478bd9Sstevel@tonic-gate case PSM_INTR_OP_SET_MASK: 17627c478bd9Sstevel@tonic-gate case PSM_INTR_OP_GET_SHARED: 17637c478bd9Sstevel@tonic-gate case PSM_INTR_OP_SET_PRI: 17647c478bd9Sstevel@tonic-gate case PSM_INTR_OP_SET_CAP: 17657a364d25Sschwartz case PSM_INTR_OP_SET_CPU: 17667a364d25Sschwartz case PSM_INTR_OP_GET_INTR: 17677c478bd9Sstevel@tonic-gate default: 17687c478bd9Sstevel@tonic-gate return (PSM_FAILURE); 17697c478bd9Sstevel@tonic-gate } 17707c478bd9Sstevel@tonic-gate return (PSM_SUCCESS); 17717c478bd9Sstevel@tonic-gate } 1772ce8eb11aSdp78419 /* 1773ce8eb11aSdp78419 * Return 1 if CMT load balancing policies should be 1774ce8eb11aSdp78419 * implemented across instances of the specified hardware 1775ce8eb11aSdp78419 * sharing relationship. 1776ce8eb11aSdp78419 */ 1777ce8eb11aSdp78419 int 1778ce8eb11aSdp78419 pg_cmt_load_bal_hw(pghw_type_t hw) 1779ce8eb11aSdp78419 { 1780ce8eb11aSdp78419 if (hw == PGHW_IPIPE || 1781ce8eb11aSdp78419 hw == PGHW_FPU || 17828031591dSSrihari Venkatesan hw == PGHW_PROCNODE || 1783ce8eb11aSdp78419 hw == PGHW_CHIP) 1784ce8eb11aSdp78419 return (1); 1785ce8eb11aSdp78419 else 1786ce8eb11aSdp78419 return (0); 1787ce8eb11aSdp78419 } 1788ce8eb11aSdp78419 /* 1789ce8eb11aSdp78419 * Return 1 if thread affinity polices should be implemented 1790ce8eb11aSdp78419 * for instances of the specifed hardware sharing relationship. 1791ce8eb11aSdp78419 */ 1792ce8eb11aSdp78419 int 1793ce8eb11aSdp78419 pg_cmt_affinity_hw(pghw_type_t hw) 1794ce8eb11aSdp78419 { 1795ce8eb11aSdp78419 if (hw == PGHW_CACHE) 1796ce8eb11aSdp78419 return (1); 1797ce8eb11aSdp78419 else 1798ce8eb11aSdp78419 return (0); 1799ce8eb11aSdp78419 } 1800b885580bSAlexander Kolbasov 1801b885580bSAlexander Kolbasov /* 1802b885580bSAlexander Kolbasov * Return number of counter events requested to measure hardware capacity and 1803b885580bSAlexander Kolbasov * utilization and setup CPC requests for specified CPU as needed 1804b885580bSAlexander Kolbasov * 1805b885580bSAlexander Kolbasov * May return 0 when platform or processor specific code knows that no CPC 1806b885580bSAlexander Kolbasov * events should be programmed on this CPU or -1 when platform or processor 1807b885580bSAlexander Kolbasov * specific code doesn't know which counter events are best to use and common 1808b885580bSAlexander Kolbasov * code should decide for itself 1809b885580bSAlexander Kolbasov */ 1810b885580bSAlexander Kolbasov int 1811b885580bSAlexander Kolbasov /* LINTED E_FUNC_ARG_UNUSED */ 1812b885580bSAlexander Kolbasov cu_plat_cpc_init(cpu_t *cp, kcpc_request_list_t *reqs, int nreqs) 1813b885580bSAlexander Kolbasov { 1814b885580bSAlexander Kolbasov const char *impl_name; 1815b885580bSAlexander Kolbasov 1816b885580bSAlexander Kolbasov /* 1817b885580bSAlexander Kolbasov * Return error if pcbe_ops not set 1818b885580bSAlexander Kolbasov */ 1819b885580bSAlexander Kolbasov if (pcbe_ops == NULL) 1820b885580bSAlexander Kolbasov return (-1); 1821b885580bSAlexander Kolbasov 1822b885580bSAlexander Kolbasov /* 1823b885580bSAlexander Kolbasov * Return that no CPC events should be programmed on hyperthreaded 1824b885580bSAlexander Kolbasov * Pentium 4 and return error for all other x86 processors to tell 1825b885580bSAlexander Kolbasov * common code to decide what counter events to program on those CPUs 1826b885580bSAlexander Kolbasov * for measuring hardware capacity and utilization 1827b885580bSAlexander Kolbasov */ 1828b885580bSAlexander Kolbasov impl_name = pcbe_ops->pcbe_impl_name(); 1829b885580bSAlexander Kolbasov if (impl_name != NULL && strcmp(impl_name, PCBE_IMPL_NAME_P4HT) == 0) 1830b885580bSAlexander Kolbasov return (0); 1831b885580bSAlexander Kolbasov else 1832b885580bSAlexander Kolbasov return (-1); 1833b885580bSAlexander Kolbasov } 1834