17c478bd9Sstevel@tonic-gate /* 27c478bd9Sstevel@tonic-gate * CDDL HEADER START 37c478bd9Sstevel@tonic-gate * 47c478bd9Sstevel@tonic-gate * The contents of this file are subject to the terms of the 5a8599265Selowe * Common Development and Distribution License (the "License"). 6a8599265Selowe * You may not use this file except in compliance with the License. 77c478bd9Sstevel@tonic-gate * 87c478bd9Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 97c478bd9Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 107c478bd9Sstevel@tonic-gate * See the License for the specific language governing permissions 117c478bd9Sstevel@tonic-gate * and limitations under the License. 127c478bd9Sstevel@tonic-gate * 137c478bd9Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 147c478bd9Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 157c478bd9Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 167c478bd9Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 177c478bd9Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 187c478bd9Sstevel@tonic-gate * 197c478bd9Sstevel@tonic-gate * CDDL HEADER END 207c478bd9Sstevel@tonic-gate */ 217c478bd9Sstevel@tonic-gate /* 22*bd28a477SPrashanth Sreenivasa * Copyright (c) 1993, 2010, Oracle and/or its affiliates. All rights reserved. 237c478bd9Sstevel@tonic-gate */ 247c478bd9Sstevel@tonic-gate 257c478bd9Sstevel@tonic-gate #include <sys/types.h> 267c478bd9Sstevel@tonic-gate #include <sys/kstat.h> 277c478bd9Sstevel@tonic-gate #include <sys/param.h> 287c478bd9Sstevel@tonic-gate #include <sys/stack.h> 297c478bd9Sstevel@tonic-gate #include <sys/regset.h> 307c478bd9Sstevel@tonic-gate #include <sys/thread.h> 317c478bd9Sstevel@tonic-gate #include <sys/proc.h> 327c478bd9Sstevel@tonic-gate #include <sys/procfs_isa.h> 337c478bd9Sstevel@tonic-gate #include <sys/kmem.h> 347c478bd9Sstevel@tonic-gate #include <sys/cpuvar.h> 357c478bd9Sstevel@tonic-gate #include <sys/systm.h> 367c478bd9Sstevel@tonic-gate #include <sys/machpcb.h> 377c478bd9Sstevel@tonic-gate #include <sys/machasi.h> 387c478bd9Sstevel@tonic-gate #include <sys/vis.h> 397c478bd9Sstevel@tonic-gate #include <sys/fpu/fpusystm.h> 407c478bd9Sstevel@tonic-gate #include <sys/cpu_module.h> 417c478bd9Sstevel@tonic-gate #include <sys/privregs.h> 427c478bd9Sstevel@tonic-gate #include <sys/archsystm.h> 437c478bd9Sstevel@tonic-gate #include <sys/atomic.h> 447c478bd9Sstevel@tonic-gate #include <sys/cmn_err.h> 457c478bd9Sstevel@tonic-gate #include <sys/time.h> 467c478bd9Sstevel@tonic-gate #include <sys/clock.h> 477c478bd9Sstevel@tonic-gate #include <sys/cmp.h> 487c478bd9Sstevel@tonic-gate #include <sys/platform_module.h> 497c478bd9Sstevel@tonic-gate #include <sys/bl.h> 507c478bd9Sstevel@tonic-gate #include <sys/nvpair.h> 517c478bd9Sstevel@tonic-gate #include <sys/kdi_impl.h> 527c478bd9Sstevel@tonic-gate #include <sys/machsystm.h> 537c478bd9Sstevel@tonic-gate #include <sys/sysmacros.h> 547c478bd9Sstevel@tonic-gate #include <sys/promif.h> 557c478bd9Sstevel@tonic-gate #include <sys/pool_pset.h> 56ae115bc7Smrj #include <sys/mem.h> 57ae115bc7Smrj #include <sys/dumphdr.h> 58a8599265Selowe #include <vm/seg_kmem.h> 59ae115bc7Smrj #include <sys/hold_page.h> 60ae115bc7Smrj #include <sys/cpu.h> 61d3d50737SRafael Vanoni #include <sys/ivintr.h> 62d3d50737SRafael Vanoni #include <sys/clock_impl.h> 63*bd28a477SPrashanth Sreenivasa #include <sys/machclock.h> 647c478bd9Sstevel@tonic-gate 657c478bd9Sstevel@tonic-gate int maxphys = MMU_PAGESIZE * 16; /* 128k */ 667c478bd9Sstevel@tonic-gate int klustsize = MMU_PAGESIZE * 16; /* 128k */ 677c478bd9Sstevel@tonic-gate 687c478bd9Sstevel@tonic-gate /* 697c478bd9Sstevel@tonic-gate * Initialize kernel thread's stack. 707c478bd9Sstevel@tonic-gate */ 717c478bd9Sstevel@tonic-gate caddr_t 727c478bd9Sstevel@tonic-gate thread_stk_init(caddr_t stk) 737c478bd9Sstevel@tonic-gate { 747c478bd9Sstevel@tonic-gate kfpu_t *fp; 757c478bd9Sstevel@tonic-gate ulong_t align; 767c478bd9Sstevel@tonic-gate 777c478bd9Sstevel@tonic-gate /* allocate extra space for floating point state */ 787c478bd9Sstevel@tonic-gate stk -= SA(sizeof (kfpu_t) + GSR_SIZE); 797c478bd9Sstevel@tonic-gate align = (uintptr_t)stk & 0x3f; 807c478bd9Sstevel@tonic-gate stk -= align; /* force v9_fpu to be 16 byte aligned */ 817c478bd9Sstevel@tonic-gate fp = (kfpu_t *)stk; 827c478bd9Sstevel@tonic-gate fp->fpu_fprs = 0; 837c478bd9Sstevel@tonic-gate 847c478bd9Sstevel@tonic-gate stk -= SA(MINFRAME); 857c478bd9Sstevel@tonic-gate return (stk); 867c478bd9Sstevel@tonic-gate } 877c478bd9Sstevel@tonic-gate 88a8599265Selowe #define WIN32_SIZE (MAXWIN * sizeof (struct rwindow32)) 89a8599265Selowe #define WIN64_SIZE (MAXWIN * sizeof (struct rwindow64)) 90a8599265Selowe 91a8599265Selowe kmem_cache_t *wbuf32_cache; 92a8599265Selowe kmem_cache_t *wbuf64_cache; 93a8599265Selowe 94a8599265Selowe void 95a8599265Selowe lwp_stk_cache_init(void) 96a8599265Selowe { 9738b87cdfSelowe /* 9838b87cdfSelowe * Window buffers are allocated from the static arena 9938b87cdfSelowe * because they are accessed at TL>0. We also must use 10038b87cdfSelowe * KMC_NOHASH to prevent them from straddling page 10138b87cdfSelowe * boundaries as they are accessed by physical address. 10238b87cdfSelowe */ 103a8599265Selowe wbuf32_cache = kmem_cache_create("wbuf32_cache", WIN32_SIZE, 10438b87cdfSelowe 0, NULL, NULL, NULL, NULL, static_arena, KMC_NOHASH); 105a8599265Selowe wbuf64_cache = kmem_cache_create("wbuf64_cache", WIN64_SIZE, 10638b87cdfSelowe 0, NULL, NULL, NULL, NULL, static_arena, KMC_NOHASH); 107a8599265Selowe } 108a8599265Selowe 1097c478bd9Sstevel@tonic-gate /* 1107c478bd9Sstevel@tonic-gate * Initialize lwp's kernel stack. 1117c478bd9Sstevel@tonic-gate * Note that now that the floating point register save area (kfpu_t) 1127c478bd9Sstevel@tonic-gate * has been broken out from machpcb and aligned on a 64 byte boundary so that 1137c478bd9Sstevel@tonic-gate * we can do block load/stores to/from it, there are a couple of potential 1147c478bd9Sstevel@tonic-gate * optimizations to save stack space. 1. The floating point register save 1157c478bd9Sstevel@tonic-gate * area could be aligned on a 16 byte boundary, and the floating point code 1167c478bd9Sstevel@tonic-gate * changed to (a) check the alignment and (b) use different save/restore 1177c478bd9Sstevel@tonic-gate * macros depending upon the alignment. 2. The lwp_stk_init code below 1187c478bd9Sstevel@tonic-gate * could be changed to calculate if less space would be wasted if machpcb 1197c478bd9Sstevel@tonic-gate * was first instead of second. However there is a REGOFF macro used in 1207c478bd9Sstevel@tonic-gate * locore, syscall_trap, machdep and mlsetup that assumes that the saved 1217c478bd9Sstevel@tonic-gate * register area is a fixed distance from the %sp, and would have to be 1227c478bd9Sstevel@tonic-gate * changed to a pointer or something...JJ said later. 1237c478bd9Sstevel@tonic-gate */ 1247c478bd9Sstevel@tonic-gate caddr_t 1257c478bd9Sstevel@tonic-gate lwp_stk_init(klwp_t *lwp, caddr_t stk) 1267c478bd9Sstevel@tonic-gate { 1277c478bd9Sstevel@tonic-gate struct machpcb *mpcb; 1287c478bd9Sstevel@tonic-gate kfpu_t *fp; 1297c478bd9Sstevel@tonic-gate uintptr_t aln; 1307c478bd9Sstevel@tonic-gate 1317c478bd9Sstevel@tonic-gate stk -= SA(sizeof (kfpu_t) + GSR_SIZE); 1327c478bd9Sstevel@tonic-gate aln = (uintptr_t)stk & 0x3F; 1337c478bd9Sstevel@tonic-gate stk -= aln; 1347c478bd9Sstevel@tonic-gate fp = (kfpu_t *)stk; 1357c478bd9Sstevel@tonic-gate stk -= SA(sizeof (struct machpcb)); 1367c478bd9Sstevel@tonic-gate mpcb = (struct machpcb *)stk; 1377c478bd9Sstevel@tonic-gate bzero(mpcb, sizeof (struct machpcb)); 1387c478bd9Sstevel@tonic-gate bzero(fp, sizeof (kfpu_t) + GSR_SIZE); 1397c478bd9Sstevel@tonic-gate lwp->lwp_regs = (void *)&mpcb->mpcb_regs; 1407c478bd9Sstevel@tonic-gate lwp->lwp_fpu = (void *)fp; 1417c478bd9Sstevel@tonic-gate mpcb->mpcb_fpu = fp; 1427c478bd9Sstevel@tonic-gate mpcb->mpcb_fpu->fpu_q = mpcb->mpcb_fpu_q; 1437c478bd9Sstevel@tonic-gate mpcb->mpcb_thread = lwp->lwp_thread; 1447c478bd9Sstevel@tonic-gate mpcb->mpcb_wbcnt = 0; 1457c478bd9Sstevel@tonic-gate if (lwp->lwp_procp->p_model == DATAMODEL_ILP32) { 1467c478bd9Sstevel@tonic-gate mpcb->mpcb_wstate = WSTATE_USER32; 147a8599265Selowe mpcb->mpcb_wbuf = kmem_cache_alloc(wbuf32_cache, KM_SLEEP); 1487c478bd9Sstevel@tonic-gate } else { 1497c478bd9Sstevel@tonic-gate mpcb->mpcb_wstate = WSTATE_USER64; 150a8599265Selowe mpcb->mpcb_wbuf = kmem_cache_alloc(wbuf64_cache, KM_SLEEP); 1517c478bd9Sstevel@tonic-gate } 1527c478bd9Sstevel@tonic-gate ASSERT(((uintptr_t)mpcb->mpcb_wbuf & 7) == 0); 1537c478bd9Sstevel@tonic-gate mpcb->mpcb_wbuf_pa = va_to_pa(mpcb->mpcb_wbuf); 1547c478bd9Sstevel@tonic-gate mpcb->mpcb_pa = va_to_pa(mpcb); 1557c478bd9Sstevel@tonic-gate return (stk); 1567c478bd9Sstevel@tonic-gate } 1577c478bd9Sstevel@tonic-gate 1587c478bd9Sstevel@tonic-gate void 1597c478bd9Sstevel@tonic-gate lwp_stk_fini(klwp_t *lwp) 1607c478bd9Sstevel@tonic-gate { 1617c478bd9Sstevel@tonic-gate struct machpcb *mpcb = lwptompcb(lwp); 1627c478bd9Sstevel@tonic-gate 1637c478bd9Sstevel@tonic-gate /* 1647c478bd9Sstevel@tonic-gate * there might be windows still in the wbuf due to unmapped 1657c478bd9Sstevel@tonic-gate * stack, misaligned stack pointer, etc. We just free it. 1667c478bd9Sstevel@tonic-gate */ 1677c478bd9Sstevel@tonic-gate mpcb->mpcb_wbcnt = 0; 1687c478bd9Sstevel@tonic-gate if (mpcb->mpcb_wstate == WSTATE_USER32) 169a8599265Selowe kmem_cache_free(wbuf32_cache, mpcb->mpcb_wbuf); 1707c478bd9Sstevel@tonic-gate else 171a8599265Selowe kmem_cache_free(wbuf64_cache, mpcb->mpcb_wbuf); 1727c478bd9Sstevel@tonic-gate mpcb->mpcb_wbuf = NULL; 1737c478bd9Sstevel@tonic-gate mpcb->mpcb_wbuf_pa = -1; 1747c478bd9Sstevel@tonic-gate } 1757c478bd9Sstevel@tonic-gate 1767c478bd9Sstevel@tonic-gate 1777c478bd9Sstevel@tonic-gate /* 1787c478bd9Sstevel@tonic-gate * Copy regs from parent to child. 1797c478bd9Sstevel@tonic-gate */ 1807c478bd9Sstevel@tonic-gate void 1817c478bd9Sstevel@tonic-gate lwp_forkregs(klwp_t *lwp, klwp_t *clwp) 1827c478bd9Sstevel@tonic-gate { 1837c478bd9Sstevel@tonic-gate kthread_t *t, *pt = lwptot(lwp); 1847c478bd9Sstevel@tonic-gate struct machpcb *mpcb = lwptompcb(clwp); 1857c478bd9Sstevel@tonic-gate struct machpcb *pmpcb = lwptompcb(lwp); 1867c478bd9Sstevel@tonic-gate kfpu_t *fp, *pfp = lwptofpu(lwp); 1877c478bd9Sstevel@tonic-gate caddr_t wbuf; 1887c478bd9Sstevel@tonic-gate uint_t wstate; 1897c478bd9Sstevel@tonic-gate 1907c478bd9Sstevel@tonic-gate t = mpcb->mpcb_thread; 1917c478bd9Sstevel@tonic-gate /* 1927c478bd9Sstevel@tonic-gate * remember child's fp and wbuf since they will get erased during 1937c478bd9Sstevel@tonic-gate * the bcopy. 1947c478bd9Sstevel@tonic-gate */ 1957c478bd9Sstevel@tonic-gate fp = mpcb->mpcb_fpu; 1967c478bd9Sstevel@tonic-gate wbuf = mpcb->mpcb_wbuf; 1977c478bd9Sstevel@tonic-gate wstate = mpcb->mpcb_wstate; 1987c478bd9Sstevel@tonic-gate /* 1997c478bd9Sstevel@tonic-gate * Don't copy mpcb_frame since we hand-crafted it 2007c478bd9Sstevel@tonic-gate * in thread_load(). 2017c478bd9Sstevel@tonic-gate */ 2027c478bd9Sstevel@tonic-gate bcopy(lwp->lwp_regs, clwp->lwp_regs, sizeof (struct machpcb) - REGOFF); 2037c478bd9Sstevel@tonic-gate mpcb->mpcb_thread = t; 2047c478bd9Sstevel@tonic-gate mpcb->mpcb_fpu = fp; 2057c478bd9Sstevel@tonic-gate fp->fpu_q = mpcb->mpcb_fpu_q; 2067c478bd9Sstevel@tonic-gate 2077c478bd9Sstevel@tonic-gate /* 2087c478bd9Sstevel@tonic-gate * It is theoretically possibly for the lwp's wstate to 2097c478bd9Sstevel@tonic-gate * be different from its value assigned in lwp_stk_init, 2107c478bd9Sstevel@tonic-gate * since lwp_stk_init assumed the data model of the process. 2117c478bd9Sstevel@tonic-gate * Here, we took on the data model of the cloned lwp. 2127c478bd9Sstevel@tonic-gate */ 2137c478bd9Sstevel@tonic-gate if (mpcb->mpcb_wstate != wstate) { 2147c478bd9Sstevel@tonic-gate if (wstate == WSTATE_USER32) { 215a8599265Selowe kmem_cache_free(wbuf32_cache, wbuf); 216a8599265Selowe wbuf = kmem_cache_alloc(wbuf64_cache, KM_SLEEP); 2177c478bd9Sstevel@tonic-gate wstate = WSTATE_USER64; 2187c478bd9Sstevel@tonic-gate } else { 219a8599265Selowe kmem_cache_free(wbuf64_cache, wbuf); 220a8599265Selowe wbuf = kmem_cache_alloc(wbuf32_cache, KM_SLEEP); 2217c478bd9Sstevel@tonic-gate wstate = WSTATE_USER32; 2227c478bd9Sstevel@tonic-gate } 2237c478bd9Sstevel@tonic-gate } 2247c478bd9Sstevel@tonic-gate 2257c478bd9Sstevel@tonic-gate mpcb->mpcb_pa = va_to_pa(mpcb); 2267c478bd9Sstevel@tonic-gate mpcb->mpcb_wbuf = wbuf; 2277c478bd9Sstevel@tonic-gate mpcb->mpcb_wbuf_pa = va_to_pa(wbuf); 2287c478bd9Sstevel@tonic-gate 2297c478bd9Sstevel@tonic-gate ASSERT(mpcb->mpcb_wstate == wstate); 2307c478bd9Sstevel@tonic-gate 2317c478bd9Sstevel@tonic-gate if (mpcb->mpcb_wbcnt != 0) { 2327c478bd9Sstevel@tonic-gate bcopy(pmpcb->mpcb_wbuf, mpcb->mpcb_wbuf, 2337c478bd9Sstevel@tonic-gate mpcb->mpcb_wbcnt * ((mpcb->mpcb_wstate == WSTATE_USER32) ? 2347c478bd9Sstevel@tonic-gate sizeof (struct rwindow32) : sizeof (struct rwindow64))); 2357c478bd9Sstevel@tonic-gate } 2367c478bd9Sstevel@tonic-gate 2377c478bd9Sstevel@tonic-gate if (pt == curthread) 2387c478bd9Sstevel@tonic-gate pfp->fpu_fprs = _fp_read_fprs(); 2397c478bd9Sstevel@tonic-gate if ((pfp->fpu_en) || (pfp->fpu_fprs & FPRS_FEF)) { 2407c478bd9Sstevel@tonic-gate if (pt == curthread && fpu_exists) { 2417c478bd9Sstevel@tonic-gate save_gsr(clwp->lwp_fpu); 2427c478bd9Sstevel@tonic-gate } else { 2437c478bd9Sstevel@tonic-gate uint64_t gsr; 2447c478bd9Sstevel@tonic-gate gsr = get_gsr(lwp->lwp_fpu); 2457c478bd9Sstevel@tonic-gate set_gsr(gsr, clwp->lwp_fpu); 2467c478bd9Sstevel@tonic-gate } 2477c478bd9Sstevel@tonic-gate fp_fork(lwp, clwp); 2487c478bd9Sstevel@tonic-gate } 2497c478bd9Sstevel@tonic-gate } 2507c478bd9Sstevel@tonic-gate 2517c478bd9Sstevel@tonic-gate /* 2527c478bd9Sstevel@tonic-gate * Free lwp fpu regs. 2537c478bd9Sstevel@tonic-gate */ 2547c478bd9Sstevel@tonic-gate void 2557c478bd9Sstevel@tonic-gate lwp_freeregs(klwp_t *lwp, int isexec) 2567c478bd9Sstevel@tonic-gate { 2577c478bd9Sstevel@tonic-gate kfpu_t *fp = lwptofpu(lwp); 2587c478bd9Sstevel@tonic-gate 2597c478bd9Sstevel@tonic-gate if (lwptot(lwp) == curthread) 2607c478bd9Sstevel@tonic-gate fp->fpu_fprs = _fp_read_fprs(); 2617c478bd9Sstevel@tonic-gate if ((fp->fpu_en) || (fp->fpu_fprs & FPRS_FEF)) 2627c478bd9Sstevel@tonic-gate fp_free(fp, isexec); 2637c478bd9Sstevel@tonic-gate } 2647c478bd9Sstevel@tonic-gate 2657c478bd9Sstevel@tonic-gate /* 266fd9e7635Sedp * These function are currently unused on sparc. 2679acbbeafSnn35248 */ 2689acbbeafSnn35248 /*ARGSUSED*/ 2699acbbeafSnn35248 void 2709acbbeafSnn35248 lwp_attach_brand_hdlrs(klwp_t *lwp) 2719acbbeafSnn35248 {} 2729acbbeafSnn35248 273fd9e7635Sedp /*ARGSUSED*/ 274fd9e7635Sedp void 275fd9e7635Sedp lwp_detach_brand_hdlrs(klwp_t *lwp) 276fd9e7635Sedp {} 277fd9e7635Sedp 2789acbbeafSnn35248 /* 2797c478bd9Sstevel@tonic-gate * fill in the extra register state area specified with the 2807c478bd9Sstevel@tonic-gate * specified lwp's platform-dependent non-floating-point extra 2817c478bd9Sstevel@tonic-gate * register state information 2827c478bd9Sstevel@tonic-gate */ 2837c478bd9Sstevel@tonic-gate /* ARGSUSED */ 2847c478bd9Sstevel@tonic-gate void 2857c478bd9Sstevel@tonic-gate xregs_getgfiller(klwp_id_t lwp, caddr_t xrp) 2867c478bd9Sstevel@tonic-gate { 2877c478bd9Sstevel@tonic-gate /* for sun4u nothing to do here, added for symmetry */ 2887c478bd9Sstevel@tonic-gate } 2897c478bd9Sstevel@tonic-gate 2907c478bd9Sstevel@tonic-gate /* 2917c478bd9Sstevel@tonic-gate * fill in the extra register state area specified with the specified lwp's 2927c478bd9Sstevel@tonic-gate * platform-dependent floating-point extra register state information. 2937c478bd9Sstevel@tonic-gate * NOTE: 'lwp' might not correspond to 'curthread' since this is 2947c478bd9Sstevel@tonic-gate * called from code in /proc to get the registers of another lwp. 2957c478bd9Sstevel@tonic-gate */ 2967c478bd9Sstevel@tonic-gate void 2977c478bd9Sstevel@tonic-gate xregs_getfpfiller(klwp_id_t lwp, caddr_t xrp) 2987c478bd9Sstevel@tonic-gate { 2997c478bd9Sstevel@tonic-gate prxregset_t *xregs = (prxregset_t *)xrp; 3007c478bd9Sstevel@tonic-gate kfpu_t *fp = lwptofpu(lwp); 3017c478bd9Sstevel@tonic-gate uint32_t fprs = (FPRS_FEF|FPRS_DU|FPRS_DL); 3027c478bd9Sstevel@tonic-gate uint64_t gsr; 3037c478bd9Sstevel@tonic-gate 3047c478bd9Sstevel@tonic-gate /* 3057c478bd9Sstevel@tonic-gate * fp_fksave() does not flush the GSR register into 3067c478bd9Sstevel@tonic-gate * the lwp area, so do it now 3077c478bd9Sstevel@tonic-gate */ 3087c478bd9Sstevel@tonic-gate kpreempt_disable(); 3097c478bd9Sstevel@tonic-gate if (ttolwp(curthread) == lwp && fpu_exists) { 3107c478bd9Sstevel@tonic-gate fp->fpu_fprs = _fp_read_fprs(); 3117c478bd9Sstevel@tonic-gate if ((fp->fpu_fprs & FPRS_FEF) != FPRS_FEF) { 3127c478bd9Sstevel@tonic-gate _fp_write_fprs(fprs); 3137c478bd9Sstevel@tonic-gate fp->fpu_fprs = (V9_FPU_FPRS_TYPE)fprs; 3147c478bd9Sstevel@tonic-gate } 3157c478bd9Sstevel@tonic-gate save_gsr(fp); 3167c478bd9Sstevel@tonic-gate } 3177c478bd9Sstevel@tonic-gate gsr = get_gsr(fp); 3187c478bd9Sstevel@tonic-gate kpreempt_enable(); 3197c478bd9Sstevel@tonic-gate PRXREG_GSR(xregs) = gsr; 3207c478bd9Sstevel@tonic-gate } 3217c478bd9Sstevel@tonic-gate 3227c478bd9Sstevel@tonic-gate /* 3237c478bd9Sstevel@tonic-gate * set the specified lwp's platform-dependent non-floating-point 3247c478bd9Sstevel@tonic-gate * extra register state based on the specified input 3257c478bd9Sstevel@tonic-gate */ 3267c478bd9Sstevel@tonic-gate /* ARGSUSED */ 3277c478bd9Sstevel@tonic-gate void 3287c478bd9Sstevel@tonic-gate xregs_setgfiller(klwp_id_t lwp, caddr_t xrp) 3297c478bd9Sstevel@tonic-gate { 3307c478bd9Sstevel@tonic-gate /* for sun4u nothing to do here, added for symmetry */ 3317c478bd9Sstevel@tonic-gate } 3327c478bd9Sstevel@tonic-gate 3337c478bd9Sstevel@tonic-gate /* 3347c478bd9Sstevel@tonic-gate * set the specified lwp's platform-dependent floating-point 3357c478bd9Sstevel@tonic-gate * extra register state based on the specified input 3367c478bd9Sstevel@tonic-gate */ 3377c478bd9Sstevel@tonic-gate void 3387c478bd9Sstevel@tonic-gate xregs_setfpfiller(klwp_id_t lwp, caddr_t xrp) 3397c478bd9Sstevel@tonic-gate { 3407c478bd9Sstevel@tonic-gate prxregset_t *xregs = (prxregset_t *)xrp; 3417c478bd9Sstevel@tonic-gate kfpu_t *fp = lwptofpu(lwp); 3427c478bd9Sstevel@tonic-gate uint32_t fprs = (FPRS_FEF|FPRS_DU|FPRS_DL); 3437c478bd9Sstevel@tonic-gate uint64_t gsr = PRXREG_GSR(xregs); 3447c478bd9Sstevel@tonic-gate 3457c478bd9Sstevel@tonic-gate kpreempt_disable(); 3467c478bd9Sstevel@tonic-gate set_gsr(gsr, lwptofpu(lwp)); 3477c478bd9Sstevel@tonic-gate 3487c478bd9Sstevel@tonic-gate if ((lwp == ttolwp(curthread)) && fpu_exists) { 3497c478bd9Sstevel@tonic-gate fp->fpu_fprs = _fp_read_fprs(); 3507c478bd9Sstevel@tonic-gate if ((fp->fpu_fprs & FPRS_FEF) != FPRS_FEF) { 3517c478bd9Sstevel@tonic-gate _fp_write_fprs(fprs); 3527c478bd9Sstevel@tonic-gate fp->fpu_fprs = (V9_FPU_FPRS_TYPE)fprs; 3537c478bd9Sstevel@tonic-gate } 3547c478bd9Sstevel@tonic-gate restore_gsr(lwptofpu(lwp)); 3557c478bd9Sstevel@tonic-gate } 3567c478bd9Sstevel@tonic-gate kpreempt_enable(); 3577c478bd9Sstevel@tonic-gate } 3587c478bd9Sstevel@tonic-gate 3597c478bd9Sstevel@tonic-gate /* 3607c478bd9Sstevel@tonic-gate * fill in the sun4u asrs, ie, the lwp's platform-dependent 3617c478bd9Sstevel@tonic-gate * non-floating-point extra register state information 3627c478bd9Sstevel@tonic-gate */ 3637c478bd9Sstevel@tonic-gate /* ARGSUSED */ 3647c478bd9Sstevel@tonic-gate void 3657c478bd9Sstevel@tonic-gate getasrs(klwp_t *lwp, asrset_t asr) 3667c478bd9Sstevel@tonic-gate { 3677c478bd9Sstevel@tonic-gate /* for sun4u nothing to do here, added for symmetry */ 3687c478bd9Sstevel@tonic-gate } 3697c478bd9Sstevel@tonic-gate 3707c478bd9Sstevel@tonic-gate /* 3717c478bd9Sstevel@tonic-gate * fill in the sun4u asrs, ie, the lwp's platform-dependent 3727c478bd9Sstevel@tonic-gate * floating-point extra register state information 3737c478bd9Sstevel@tonic-gate */ 3747c478bd9Sstevel@tonic-gate void 3757c478bd9Sstevel@tonic-gate getfpasrs(klwp_t *lwp, asrset_t asr) 3767c478bd9Sstevel@tonic-gate { 3777c478bd9Sstevel@tonic-gate kfpu_t *fp = lwptofpu(lwp); 3787c478bd9Sstevel@tonic-gate uint32_t fprs = (FPRS_FEF|FPRS_DU|FPRS_DL); 3797c478bd9Sstevel@tonic-gate 3807c478bd9Sstevel@tonic-gate kpreempt_disable(); 3817c478bd9Sstevel@tonic-gate if (ttolwp(curthread) == lwp) 3827c478bd9Sstevel@tonic-gate fp->fpu_fprs = _fp_read_fprs(); 3837c478bd9Sstevel@tonic-gate if ((fp->fpu_en) || (fp->fpu_fprs & FPRS_FEF)) { 3847c478bd9Sstevel@tonic-gate if (fpu_exists && ttolwp(curthread) == lwp) { 3857c478bd9Sstevel@tonic-gate if ((fp->fpu_fprs & FPRS_FEF) != FPRS_FEF) { 3867c478bd9Sstevel@tonic-gate _fp_write_fprs(fprs); 3877c478bd9Sstevel@tonic-gate fp->fpu_fprs = (V9_FPU_FPRS_TYPE)fprs; 3887c478bd9Sstevel@tonic-gate } 3897c478bd9Sstevel@tonic-gate save_gsr(fp); 3907c478bd9Sstevel@tonic-gate } 3917c478bd9Sstevel@tonic-gate asr[ASR_GSR] = (int64_t)get_gsr(fp); 3927c478bd9Sstevel@tonic-gate } 3937c478bd9Sstevel@tonic-gate kpreempt_enable(); 3947c478bd9Sstevel@tonic-gate } 3957c478bd9Sstevel@tonic-gate 3967c478bd9Sstevel@tonic-gate /* 3977c478bd9Sstevel@tonic-gate * set the sun4u asrs, ie, the lwp's platform-dependent 3987c478bd9Sstevel@tonic-gate * non-floating-point extra register state information 3997c478bd9Sstevel@tonic-gate */ 4007c478bd9Sstevel@tonic-gate /* ARGSUSED */ 4017c478bd9Sstevel@tonic-gate void 4027c478bd9Sstevel@tonic-gate setasrs(klwp_t *lwp, asrset_t asr) 4037c478bd9Sstevel@tonic-gate { 4047c478bd9Sstevel@tonic-gate /* for sun4u nothing to do here, added for symmetry */ 4057c478bd9Sstevel@tonic-gate } 4067c478bd9Sstevel@tonic-gate 4077c478bd9Sstevel@tonic-gate void 4087c478bd9Sstevel@tonic-gate setfpasrs(klwp_t *lwp, asrset_t asr) 4097c478bd9Sstevel@tonic-gate { 4107c478bd9Sstevel@tonic-gate kfpu_t *fp = lwptofpu(lwp); 4117c478bd9Sstevel@tonic-gate uint32_t fprs = (FPRS_FEF|FPRS_DU|FPRS_DL); 4127c478bd9Sstevel@tonic-gate 4137c478bd9Sstevel@tonic-gate kpreempt_disable(); 4147c478bd9Sstevel@tonic-gate if (ttolwp(curthread) == lwp) 4157c478bd9Sstevel@tonic-gate fp->fpu_fprs = _fp_read_fprs(); 4167c478bd9Sstevel@tonic-gate if ((fp->fpu_en) || (fp->fpu_fprs & FPRS_FEF)) { 4177c478bd9Sstevel@tonic-gate set_gsr(asr[ASR_GSR], fp); 4187c478bd9Sstevel@tonic-gate if (fpu_exists && ttolwp(curthread) == lwp) { 4197c478bd9Sstevel@tonic-gate if ((fp->fpu_fprs & FPRS_FEF) != FPRS_FEF) { 4207c478bd9Sstevel@tonic-gate _fp_write_fprs(fprs); 4217c478bd9Sstevel@tonic-gate fp->fpu_fprs = (V9_FPU_FPRS_TYPE)fprs; 4227c478bd9Sstevel@tonic-gate } 4237c478bd9Sstevel@tonic-gate restore_gsr(fp); 4247c478bd9Sstevel@tonic-gate } 4257c478bd9Sstevel@tonic-gate } 4267c478bd9Sstevel@tonic-gate kpreempt_enable(); 4277c478bd9Sstevel@tonic-gate } 4287c478bd9Sstevel@tonic-gate 4297c478bd9Sstevel@tonic-gate /* 4307c478bd9Sstevel@tonic-gate * Create interrupt kstats for this CPU. 4317c478bd9Sstevel@tonic-gate */ 4327c478bd9Sstevel@tonic-gate void 4337c478bd9Sstevel@tonic-gate cpu_create_intrstat(cpu_t *cp) 4347c478bd9Sstevel@tonic-gate { 4357c478bd9Sstevel@tonic-gate int i; 4367c478bd9Sstevel@tonic-gate kstat_t *intr_ksp; 4377c478bd9Sstevel@tonic-gate kstat_named_t *knp; 4387c478bd9Sstevel@tonic-gate char name[KSTAT_STRLEN]; 4397c478bd9Sstevel@tonic-gate zoneid_t zoneid; 4407c478bd9Sstevel@tonic-gate 4417c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock)); 4427c478bd9Sstevel@tonic-gate 4437c478bd9Sstevel@tonic-gate if (pool_pset_enabled()) 4447c478bd9Sstevel@tonic-gate zoneid = GLOBAL_ZONEID; 4457c478bd9Sstevel@tonic-gate else 4467c478bd9Sstevel@tonic-gate zoneid = ALL_ZONES; 4477c478bd9Sstevel@tonic-gate 4487c478bd9Sstevel@tonic-gate intr_ksp = kstat_create_zone("cpu", cp->cpu_id, "intrstat", "misc", 4497c478bd9Sstevel@tonic-gate KSTAT_TYPE_NAMED, PIL_MAX * 2, NULL, zoneid); 4507c478bd9Sstevel@tonic-gate 4517c478bd9Sstevel@tonic-gate /* 4527c478bd9Sstevel@tonic-gate * Initialize each PIL's named kstat 4537c478bd9Sstevel@tonic-gate */ 4547c478bd9Sstevel@tonic-gate if (intr_ksp != NULL) { 4557c478bd9Sstevel@tonic-gate intr_ksp->ks_update = cpu_kstat_intrstat_update; 4567c478bd9Sstevel@tonic-gate knp = (kstat_named_t *)intr_ksp->ks_data; 4577c478bd9Sstevel@tonic-gate intr_ksp->ks_private = cp; 4587c478bd9Sstevel@tonic-gate for (i = 0; i < PIL_MAX; i++) { 4597c478bd9Sstevel@tonic-gate (void) snprintf(name, KSTAT_STRLEN, "level-%d-time", 4607c478bd9Sstevel@tonic-gate i + 1); 4617c478bd9Sstevel@tonic-gate kstat_named_init(&knp[i * 2], name, KSTAT_DATA_UINT64); 4627c478bd9Sstevel@tonic-gate (void) snprintf(name, KSTAT_STRLEN, "level-%d-count", 4637c478bd9Sstevel@tonic-gate i + 1); 4647c478bd9Sstevel@tonic-gate kstat_named_init(&knp[(i * 2) + 1], name, 4657c478bd9Sstevel@tonic-gate KSTAT_DATA_UINT64); 4667c478bd9Sstevel@tonic-gate } 4677c478bd9Sstevel@tonic-gate kstat_install(intr_ksp); 4687c478bd9Sstevel@tonic-gate } 4697c478bd9Sstevel@tonic-gate } 4707c478bd9Sstevel@tonic-gate 4717c478bd9Sstevel@tonic-gate /* 4727c478bd9Sstevel@tonic-gate * Delete interrupt kstats for this CPU. 4737c478bd9Sstevel@tonic-gate */ 4747c478bd9Sstevel@tonic-gate void 4757c478bd9Sstevel@tonic-gate cpu_delete_intrstat(cpu_t *cp) 4767c478bd9Sstevel@tonic-gate { 4777c478bd9Sstevel@tonic-gate kstat_delete_byname_zone("cpu", cp->cpu_id, "intrstat", ALL_ZONES); 4787c478bd9Sstevel@tonic-gate } 4797c478bd9Sstevel@tonic-gate 4807c478bd9Sstevel@tonic-gate /* 4817c478bd9Sstevel@tonic-gate * Convert interrupt statistics from CPU ticks to nanoseconds and 4827c478bd9Sstevel@tonic-gate * update kstat. 4837c478bd9Sstevel@tonic-gate */ 4847c478bd9Sstevel@tonic-gate int 4857c478bd9Sstevel@tonic-gate cpu_kstat_intrstat_update(kstat_t *ksp, int rw) 4867c478bd9Sstevel@tonic-gate { 4877c478bd9Sstevel@tonic-gate kstat_named_t *knp = ksp->ks_data; 4887c478bd9Sstevel@tonic-gate cpu_t *cpup = (cpu_t *)ksp->ks_private; 4897c478bd9Sstevel@tonic-gate int i; 4907c478bd9Sstevel@tonic-gate 4917c478bd9Sstevel@tonic-gate if (rw == KSTAT_WRITE) 4927c478bd9Sstevel@tonic-gate return (EACCES); 4937c478bd9Sstevel@tonic-gate 4947c478bd9Sstevel@tonic-gate /* 4957c478bd9Sstevel@tonic-gate * We use separate passes to copy and convert the statistics to 4967c478bd9Sstevel@tonic-gate * nanoseconds. This assures that the snapshot of the data is as 4977c478bd9Sstevel@tonic-gate * self-consistent as possible. 4987c478bd9Sstevel@tonic-gate */ 4997c478bd9Sstevel@tonic-gate 5007c478bd9Sstevel@tonic-gate for (i = 0; i < PIL_MAX; i++) { 5017c478bd9Sstevel@tonic-gate knp[i * 2].value.ui64 = cpup->cpu_m.intrstat[i + 1][0]; 5027c478bd9Sstevel@tonic-gate knp[(i * 2) + 1].value.ui64 = cpup->cpu_stats.sys.intr[i]; 5037c478bd9Sstevel@tonic-gate } 5047c478bd9Sstevel@tonic-gate 5057c478bd9Sstevel@tonic-gate for (i = 0; i < PIL_MAX; i++) { 5067c478bd9Sstevel@tonic-gate knp[i * 2].value.ui64 = 5077c478bd9Sstevel@tonic-gate (uint64_t)tick2ns((hrtime_t)knp[i * 2].value.ui64, 5087c478bd9Sstevel@tonic-gate cpup->cpu_id); 5097c478bd9Sstevel@tonic-gate } 5107c478bd9Sstevel@tonic-gate 5117c478bd9Sstevel@tonic-gate return (0); 5127c478bd9Sstevel@tonic-gate } 5137c478bd9Sstevel@tonic-gate 5147c478bd9Sstevel@tonic-gate /* 5157c478bd9Sstevel@tonic-gate * Called by common/os/cpu.c for psrinfo(1m) kstats 5167c478bd9Sstevel@tonic-gate */ 5177c478bd9Sstevel@tonic-gate char * 5187c478bd9Sstevel@tonic-gate cpu_fru_fmri(cpu_t *cp) 5197c478bd9Sstevel@tonic-gate { 5207c478bd9Sstevel@tonic-gate return (cpunodes[cp->cpu_id].fru_fmri); 5217c478bd9Sstevel@tonic-gate } 5227c478bd9Sstevel@tonic-gate 5237c478bd9Sstevel@tonic-gate /* 5247c478bd9Sstevel@tonic-gate * An interrupt thread is ending a time slice, so compute the interval it 5257c478bd9Sstevel@tonic-gate * ran for and update the statistic for its PIL. 5267c478bd9Sstevel@tonic-gate */ 5277c478bd9Sstevel@tonic-gate void 5287c478bd9Sstevel@tonic-gate cpu_intr_swtch_enter(kthread_id_t t) 5297c478bd9Sstevel@tonic-gate { 5307c478bd9Sstevel@tonic-gate uint64_t interval; 5317c478bd9Sstevel@tonic-gate uint64_t start; 532eda89462Sesolom cpu_t *cpu; 5337c478bd9Sstevel@tonic-gate 5347c478bd9Sstevel@tonic-gate ASSERT((t->t_flag & T_INTR_THREAD) != 0); 5357c478bd9Sstevel@tonic-gate ASSERT(t->t_pil > 0 && t->t_pil <= LOCK_LEVEL); 5367c478bd9Sstevel@tonic-gate 5377c478bd9Sstevel@tonic-gate /* 5387c478bd9Sstevel@tonic-gate * We could be here with a zero timestamp. This could happen if: 5397c478bd9Sstevel@tonic-gate * an interrupt thread which no longer has a pinned thread underneath 5407c478bd9Sstevel@tonic-gate * it (i.e. it blocked at some point in its past) has finished running 5417c478bd9Sstevel@tonic-gate * its handler. intr_thread() updated the interrupt statistic for its 5427c478bd9Sstevel@tonic-gate * PIL and zeroed its timestamp. Since there was no pinned thread to 5437c478bd9Sstevel@tonic-gate * return to, swtch() gets called and we end up here. 5447c478bd9Sstevel@tonic-gate * 5457c478bd9Sstevel@tonic-gate * It can also happen if an interrupt thread in intr_thread() calls 5467c478bd9Sstevel@tonic-gate * preempt. It will have already taken care of updating stats. In 5477c478bd9Sstevel@tonic-gate * this event, the interrupt thread will be runnable. 5487c478bd9Sstevel@tonic-gate */ 5497c478bd9Sstevel@tonic-gate if (t->t_intr_start) { 5507c478bd9Sstevel@tonic-gate do { 5517c478bd9Sstevel@tonic-gate start = t->t_intr_start; 552*bd28a477SPrashanth Sreenivasa interval = CLOCK_TICK_COUNTER() - start; 5537c478bd9Sstevel@tonic-gate } while (cas64(&t->t_intr_start, start, 0) != start); 554eda89462Sesolom cpu = CPU; 555eda89462Sesolom if (cpu->cpu_m.divisor > 1) 556eda89462Sesolom interval *= cpu->cpu_m.divisor; 557eda89462Sesolom cpu->cpu_m.intrstat[t->t_pil][0] += interval; 558eda89462Sesolom 559eda89462Sesolom atomic_add_64((uint64_t *)&cpu->cpu_intracct[cpu->cpu_mstate], 560eda89462Sesolom interval); 5617c478bd9Sstevel@tonic-gate } else 5627c478bd9Sstevel@tonic-gate ASSERT(t->t_intr == NULL || t->t_state == TS_RUN); 5637c478bd9Sstevel@tonic-gate } 5647c478bd9Sstevel@tonic-gate 5657c478bd9Sstevel@tonic-gate 5667c478bd9Sstevel@tonic-gate /* 5677c478bd9Sstevel@tonic-gate * An interrupt thread is returning from swtch(). Place a starting timestamp 5687c478bd9Sstevel@tonic-gate * in its thread structure. 5697c478bd9Sstevel@tonic-gate */ 5707c478bd9Sstevel@tonic-gate void 5717c478bd9Sstevel@tonic-gate cpu_intr_swtch_exit(kthread_id_t t) 5727c478bd9Sstevel@tonic-gate { 5737c478bd9Sstevel@tonic-gate uint64_t ts; 5747c478bd9Sstevel@tonic-gate 5757c478bd9Sstevel@tonic-gate ASSERT((t->t_flag & T_INTR_THREAD) != 0); 5767c478bd9Sstevel@tonic-gate ASSERT(t->t_pil > 0 && t->t_pil <= LOCK_LEVEL); 5777c478bd9Sstevel@tonic-gate 5787c478bd9Sstevel@tonic-gate do { 5797c478bd9Sstevel@tonic-gate ts = t->t_intr_start; 580*bd28a477SPrashanth Sreenivasa } while (cas64(&t->t_intr_start, ts, CLOCK_TICK_COUNTER()) != ts); 5817c478bd9Sstevel@tonic-gate } 5827c478bd9Sstevel@tonic-gate 5837c478bd9Sstevel@tonic-gate 5847c478bd9Sstevel@tonic-gate int 5857c478bd9Sstevel@tonic-gate blacklist(int cmd, const char *scheme, nvlist_t *fmri, const char *class) 5867c478bd9Sstevel@tonic-gate { 5877c478bd9Sstevel@tonic-gate if (&plat_blacklist) 5887c478bd9Sstevel@tonic-gate return (plat_blacklist(cmd, scheme, fmri, class)); 5897c478bd9Sstevel@tonic-gate 5907c478bd9Sstevel@tonic-gate return (ENOTSUP); 5917c478bd9Sstevel@tonic-gate } 5927c478bd9Sstevel@tonic-gate 5937c478bd9Sstevel@tonic-gate int 5947c478bd9Sstevel@tonic-gate kdi_pread(caddr_t buf, size_t nbytes, uint64_t addr, size_t *ncopiedp) 5957c478bd9Sstevel@tonic-gate { 5967c478bd9Sstevel@tonic-gate extern void kdi_flush_caches(void); 5977c478bd9Sstevel@tonic-gate size_t nread = 0; 5987c478bd9Sstevel@tonic-gate uint32_t word; 5997c478bd9Sstevel@tonic-gate int slop, i; 6007c478bd9Sstevel@tonic-gate 6017c478bd9Sstevel@tonic-gate kdi_flush_caches(); 6027c478bd9Sstevel@tonic-gate membar_enter(); 6037c478bd9Sstevel@tonic-gate 6047c478bd9Sstevel@tonic-gate /* We might not begin on a word boundary. */ 6057c478bd9Sstevel@tonic-gate if ((slop = addr & 3) != 0) { 6067c478bd9Sstevel@tonic-gate word = ldphys(addr & ~3); 6077c478bd9Sstevel@tonic-gate for (i = slop; i < 4 && nbytes > 0; i++, nbytes--, nread++) 6087c478bd9Sstevel@tonic-gate *buf++ = ((uchar_t *)&word)[i]; 6097c478bd9Sstevel@tonic-gate addr = roundup(addr, 4); 6107c478bd9Sstevel@tonic-gate } 6117c478bd9Sstevel@tonic-gate 6127c478bd9Sstevel@tonic-gate while (nbytes > 0) { 6137c478bd9Sstevel@tonic-gate word = ldphys(addr); 6147c478bd9Sstevel@tonic-gate for (i = 0; i < 4 && nbytes > 0; i++, nbytes--, nread++, addr++) 6157c478bd9Sstevel@tonic-gate *buf++ = ((uchar_t *)&word)[i]; 6167c478bd9Sstevel@tonic-gate } 6177c478bd9Sstevel@tonic-gate 6187c478bd9Sstevel@tonic-gate kdi_flush_caches(); 6197c478bd9Sstevel@tonic-gate 6207c478bd9Sstevel@tonic-gate *ncopiedp = nread; 6217c478bd9Sstevel@tonic-gate return (0); 6227c478bd9Sstevel@tonic-gate } 6237c478bd9Sstevel@tonic-gate 6247c478bd9Sstevel@tonic-gate int 6257c478bd9Sstevel@tonic-gate kdi_pwrite(caddr_t buf, size_t nbytes, uint64_t addr, size_t *ncopiedp) 6267c478bd9Sstevel@tonic-gate { 6277c478bd9Sstevel@tonic-gate extern void kdi_flush_caches(void); 6287c478bd9Sstevel@tonic-gate size_t nwritten = 0; 6297c478bd9Sstevel@tonic-gate uint32_t word; 6307c478bd9Sstevel@tonic-gate int slop, i; 6317c478bd9Sstevel@tonic-gate 6327c478bd9Sstevel@tonic-gate kdi_flush_caches(); 6337c478bd9Sstevel@tonic-gate 6347c478bd9Sstevel@tonic-gate /* We might not begin on a word boundary. */ 6357c478bd9Sstevel@tonic-gate if ((slop = addr & 3) != 0) { 6367c478bd9Sstevel@tonic-gate word = ldphys(addr & ~3); 6377c478bd9Sstevel@tonic-gate for (i = slop; i < 4 && nbytes > 0; i++, nbytes--, nwritten++) 6387c478bd9Sstevel@tonic-gate ((uchar_t *)&word)[i] = *buf++; 6397c478bd9Sstevel@tonic-gate stphys(addr & ~3, word); 6407c478bd9Sstevel@tonic-gate addr = roundup(addr, 4); 6417c478bd9Sstevel@tonic-gate } 6427c478bd9Sstevel@tonic-gate 6437c478bd9Sstevel@tonic-gate while (nbytes > 3) { 6447c478bd9Sstevel@tonic-gate for (word = 0, i = 0; i < 4; i++, nbytes--, nwritten++) 6457c478bd9Sstevel@tonic-gate ((uchar_t *)&word)[i] = *buf++; 6467c478bd9Sstevel@tonic-gate stphys(addr, word); 6477c478bd9Sstevel@tonic-gate addr += 4; 6487c478bd9Sstevel@tonic-gate } 6497c478bd9Sstevel@tonic-gate 6507c478bd9Sstevel@tonic-gate /* We might not end with a whole word. */ 6517c478bd9Sstevel@tonic-gate if (nbytes > 0) { 6527c478bd9Sstevel@tonic-gate word = ldphys(addr); 6537c478bd9Sstevel@tonic-gate for (i = 0; nbytes > 0; i++, nbytes--, nwritten++) 6547c478bd9Sstevel@tonic-gate ((uchar_t *)&word)[i] = *buf++; 6557c478bd9Sstevel@tonic-gate stphys(addr, word); 6567c478bd9Sstevel@tonic-gate } 6577c478bd9Sstevel@tonic-gate 6587c478bd9Sstevel@tonic-gate membar_enter(); 6597c478bd9Sstevel@tonic-gate kdi_flush_caches(); 6607c478bd9Sstevel@tonic-gate 6617c478bd9Sstevel@tonic-gate *ncopiedp = nwritten; 6627c478bd9Sstevel@tonic-gate return (0); 6637c478bd9Sstevel@tonic-gate } 6647c478bd9Sstevel@tonic-gate 6657c478bd9Sstevel@tonic-gate static void 6667c478bd9Sstevel@tonic-gate kdi_kernpanic(struct regs *regs, uint_t tt) 6677c478bd9Sstevel@tonic-gate { 6687c478bd9Sstevel@tonic-gate sync_reg_buf = *regs; 6697c478bd9Sstevel@tonic-gate sync_tt = tt; 6707c478bd9Sstevel@tonic-gate 6717c478bd9Sstevel@tonic-gate sync_handler(); 6727c478bd9Sstevel@tonic-gate } 6737c478bd9Sstevel@tonic-gate 6747c478bd9Sstevel@tonic-gate static void 6757c478bd9Sstevel@tonic-gate kdi_plat_call(void (*platfn)(void)) 6767c478bd9Sstevel@tonic-gate { 6777c478bd9Sstevel@tonic-gate if (platfn != NULL) { 6787c478bd9Sstevel@tonic-gate prom_suspend_prepost(); 6797c478bd9Sstevel@tonic-gate platfn(); 6807c478bd9Sstevel@tonic-gate prom_resume_prepost(); 6817c478bd9Sstevel@tonic-gate } 6827c478bd9Sstevel@tonic-gate } 6837c478bd9Sstevel@tonic-gate 684d3d50737SRafael Vanoni /* 685d3d50737SRafael Vanoni * kdi_system_claim and release are defined here for all sun4 platforms and 686d3d50737SRafael Vanoni * pointed to by mach_kdi_init() to provide default callbacks for such systems. 687d3d50737SRafael Vanoni * Specific sun4u or sun4v platforms may implement their own claim and release 688d3d50737SRafael Vanoni * routines, at which point their respective callbacks will be updated. 689d3d50737SRafael Vanoni */ 690d3d50737SRafael Vanoni static void 691d3d50737SRafael Vanoni kdi_system_claim(void) 692d3d50737SRafael Vanoni { 693d3d50737SRafael Vanoni lbolt_debug_entry(); 694d3d50737SRafael Vanoni } 695d3d50737SRafael Vanoni 696d3d50737SRafael Vanoni static void 697d3d50737SRafael Vanoni kdi_system_release(void) 698d3d50737SRafael Vanoni { 699d3d50737SRafael Vanoni lbolt_debug_return(); 700d3d50737SRafael Vanoni } 701d3d50737SRafael Vanoni 7027c478bd9Sstevel@tonic-gate void 7037c478bd9Sstevel@tonic-gate mach_kdi_init(kdi_t *kdi) 7047c478bd9Sstevel@tonic-gate { 7057c478bd9Sstevel@tonic-gate kdi->kdi_plat_call = kdi_plat_call; 706ae115bc7Smrj kdi->kdi_kmdb_enter = kmdb_enter; 707d3d50737SRafael Vanoni kdi->pkdi_system_claim = kdi_system_claim; 708d3d50737SRafael Vanoni kdi->pkdi_system_release = kdi_system_release; 7097c478bd9Sstevel@tonic-gate kdi->mkdi_cpu_index = kdi_cpu_index; 7107c478bd9Sstevel@tonic-gate kdi->mkdi_trap_vatotte = kdi_trap_vatotte; 7117c478bd9Sstevel@tonic-gate kdi->mkdi_kernpanic = kdi_kernpanic; 7127c478bd9Sstevel@tonic-gate } 713eda89462Sesolom 714eda89462Sesolom 715eda89462Sesolom /* 716eda89462Sesolom * get_cpu_mstate() is passed an array of timestamps, NCMSTATES 717eda89462Sesolom * long, and it fills in the array with the time spent on cpu in 718eda89462Sesolom * each of the mstates, where time is returned in nsec. 719eda89462Sesolom * 720eda89462Sesolom * No guarantee is made that the returned values in times[] will 721eda89462Sesolom * monotonically increase on sequential calls, although this will 722eda89462Sesolom * be true in the long run. Any such guarantee must be handled by 723eda89462Sesolom * the caller, if needed. This can happen if we fail to account 724eda89462Sesolom * for elapsed time due to a generation counter conflict, yet we 725eda89462Sesolom * did account for it on a prior call (see below). 726eda89462Sesolom * 727eda89462Sesolom * The complication is that the cpu in question may be updating 728eda89462Sesolom * its microstate at the same time that we are reading it. 729eda89462Sesolom * Because the microstate is only updated when the CPU's state 730eda89462Sesolom * changes, the values in cpu_intracct[] can be indefinitely out 731eda89462Sesolom * of date. To determine true current values, it is necessary to 732eda89462Sesolom * compare the current time with cpu_mstate_start, and add the 733eda89462Sesolom * difference to times[cpu_mstate]. 734eda89462Sesolom * 735eda89462Sesolom * This can be a problem if those values are changing out from 736eda89462Sesolom * under us. Because the code path in new_cpu_mstate() is 737eda89462Sesolom * performance critical, we have not added a lock to it. Instead, 738eda89462Sesolom * we have added a generation counter. Before beginning 739eda89462Sesolom * modifications, the counter is set to 0. After modifications, 740eda89462Sesolom * it is set to the old value plus one. 741eda89462Sesolom * 742eda89462Sesolom * get_cpu_mstate() will not consider the values of cpu_mstate 743eda89462Sesolom * and cpu_mstate_start to be usable unless the value of 744eda89462Sesolom * cpu_mstate_gen is both non-zero and unchanged, both before and 745eda89462Sesolom * after reading the mstate information. Note that we must 746eda89462Sesolom * protect against out-of-order loads around accesses to the 747eda89462Sesolom * generation counter. Also, this is a best effort approach in 748eda89462Sesolom * that we do not retry should the counter be found to have 749eda89462Sesolom * changed. 750eda89462Sesolom * 751eda89462Sesolom * cpu_intracct[] is used to identify time spent in each CPU 752eda89462Sesolom * mstate while handling interrupts. Such time should be reported 753eda89462Sesolom * against system time, and so is subtracted out from its 754eda89462Sesolom * corresponding cpu_acct[] time and added to 755eda89462Sesolom * cpu_acct[CMS_SYSTEM]. Additionally, intracct time is stored in 756eda89462Sesolom * %ticks, but acct time may be stored as %sticks, thus requiring 757eda89462Sesolom * different conversions before they can be compared. 758eda89462Sesolom */ 759eda89462Sesolom 760eda89462Sesolom void 761eda89462Sesolom get_cpu_mstate(cpu_t *cpu, hrtime_t *times) 762eda89462Sesolom { 763eda89462Sesolom int i; 764eda89462Sesolom hrtime_t now, start; 765eda89462Sesolom uint16_t gen; 766eda89462Sesolom uint16_t state; 767eda89462Sesolom hrtime_t intracct[NCMSTATES]; 768eda89462Sesolom 769eda89462Sesolom /* 770eda89462Sesolom * Load all volatile state under the protection of membar. 771eda89462Sesolom * cpu_acct[cpu_mstate] must be loaded to avoid double counting 772eda89462Sesolom * of (now - cpu_mstate_start) by a change in CPU mstate that 773eda89462Sesolom * arrives after we make our last check of cpu_mstate_gen. 774eda89462Sesolom */ 775eda89462Sesolom 776eda89462Sesolom now = gethrtime_unscaled(); 777eda89462Sesolom gen = cpu->cpu_mstate_gen; 778eda89462Sesolom 779eda89462Sesolom membar_consumer(); /* guarantee load ordering */ 780eda89462Sesolom start = cpu->cpu_mstate_start; 781eda89462Sesolom state = cpu->cpu_mstate; 782eda89462Sesolom for (i = 0; i < NCMSTATES; i++) { 783eda89462Sesolom intracct[i] = cpu->cpu_intracct[i]; 784eda89462Sesolom times[i] = cpu->cpu_acct[i]; 785eda89462Sesolom } 786eda89462Sesolom membar_consumer(); /* guarantee load ordering */ 787eda89462Sesolom 788eda89462Sesolom if (gen != 0 && gen == cpu->cpu_mstate_gen && now > start) 789eda89462Sesolom times[state] += now - start; 790eda89462Sesolom 791eda89462Sesolom for (i = 0; i < NCMSTATES; i++) { 792eda89462Sesolom scalehrtime(×[i]); 793eda89462Sesolom intracct[i] = tick2ns((hrtime_t)intracct[i], cpu->cpu_id); 794eda89462Sesolom } 795eda89462Sesolom 796eda89462Sesolom for (i = 0; i < NCMSTATES; i++) { 797eda89462Sesolom if (i == CMS_SYSTEM) 798eda89462Sesolom continue; 799eda89462Sesolom times[i] -= intracct[i]; 800eda89462Sesolom if (times[i] < 0) { 801eda89462Sesolom intracct[i] += times[i]; 802eda89462Sesolom times[i] = 0; 803eda89462Sesolom } 804eda89462Sesolom times[CMS_SYSTEM] += intracct[i]; 805eda89462Sesolom } 806eda89462Sesolom } 807ae115bc7Smrj 808ae115bc7Smrj void 809ae115bc7Smrj mach_cpu_pause(volatile char *safe) 810ae115bc7Smrj { 811ae115bc7Smrj /* 812ae115bc7Smrj * This cpu is now safe. 813ae115bc7Smrj */ 814ae115bc7Smrj *safe = PAUSE_WAIT; 815ae115bc7Smrj membar_enter(); /* make sure stores are flushed */ 816ae115bc7Smrj 817ae115bc7Smrj /* 818ae115bc7Smrj * Now we wait. When we are allowed to continue, safe 819ae115bc7Smrj * will be set to PAUSE_IDLE. 820ae115bc7Smrj */ 821ae115bc7Smrj while (*safe != PAUSE_IDLE) 822ae115bc7Smrj SMT_PAUSE(); 823ae115bc7Smrj } 824ae115bc7Smrj 825ae115bc7Smrj /*ARGSUSED*/ 826ae115bc7Smrj int 827843e1988Sjohnlev plat_mem_do_mmio(struct uio *uio, enum uio_rw rw) 828ae115bc7Smrj { 829843e1988Sjohnlev return (ENOTSUP); 830ae115bc7Smrj } 831ae115bc7Smrj 832ca3e8d88SDave Plauger /* cpu threshold for compressed dumps */ 833ca3e8d88SDave Plauger #ifdef sun4v 834ca3e8d88SDave Plauger uint_t dump_plat_mincpu = DUMP_PLAT_SUN4V_MINCPU; 835ca3e8d88SDave Plauger #else 836ca3e8d88SDave Plauger uint_t dump_plat_mincpu = DUMP_PLAT_SUN4U_MINCPU; 837ca3e8d88SDave Plauger #endif 838ca3e8d88SDave Plauger 839ae115bc7Smrj int 840ae115bc7Smrj dump_plat_addr() 841ae115bc7Smrj { 842ae115bc7Smrj return (0); 843ae115bc7Smrj } 844ae115bc7Smrj 845ae115bc7Smrj void 846ae115bc7Smrj dump_plat_pfn() 847ae115bc7Smrj { 848ae115bc7Smrj } 849ae115bc7Smrj 850ae115bc7Smrj /* ARGSUSED */ 851ae115bc7Smrj int 852ae115bc7Smrj dump_plat_data(void *dump_cdata) 853ae115bc7Smrj { 854ae115bc7Smrj return (0); 855ae115bc7Smrj } 856ae115bc7Smrj 857ae115bc7Smrj /* ARGSUSED */ 858ae115bc7Smrj int 859ae115bc7Smrj plat_hold_page(pfn_t pfn, int lock, page_t **pp_ret) 860ae115bc7Smrj { 861ae115bc7Smrj return (PLAT_HOLD_OK); 862ae115bc7Smrj } 863ae115bc7Smrj 864ae115bc7Smrj /* ARGSUSED */ 865ae115bc7Smrj void 866ae115bc7Smrj plat_release_page(page_t *pp) 867ae115bc7Smrj { 868ae115bc7Smrj } 86948633f18SJan Setje-Eilers 87048633f18SJan Setje-Eilers /* ARGSUSED */ 87148633f18SJan Setje-Eilers void 87248633f18SJan Setje-Eilers progressbar_key_abort(ldi_ident_t li) 87348633f18SJan Setje-Eilers { 87448633f18SJan Setje-Eilers } 875d3d50737SRafael Vanoni 876d3d50737SRafael Vanoni /* 877d3d50737SRafael Vanoni * We need to post a soft interrupt to reprogram the lbolt cyclic when 878d3d50737SRafael Vanoni * switching from event to cyclic driven lbolt. The following code adds 879d3d50737SRafael Vanoni * and posts the softint for sun4 platforms. 880d3d50737SRafael Vanoni */ 881d3d50737SRafael Vanoni static uint64_t lbolt_softint_inum; 882d3d50737SRafael Vanoni 883d3d50737SRafael Vanoni void 884d3d50737SRafael Vanoni lbolt_softint_add(void) 885d3d50737SRafael Vanoni { 886d3d50737SRafael Vanoni lbolt_softint_inum = add_softintr(LOCK_LEVEL, 887d3d50737SRafael Vanoni (softintrfunc)lbolt_ev_to_cyclic, NULL, SOFTINT_MT); 888d3d50737SRafael Vanoni } 889d3d50737SRafael Vanoni 890d3d50737SRafael Vanoni void 891d3d50737SRafael Vanoni lbolt_softint_post(void) 892d3d50737SRafael Vanoni { 893d3d50737SRafael Vanoni setsoftint(lbolt_softint_inum); 894d3d50737SRafael Vanoni } 895