17c478bd9Sstevel@tonic-gate /* 27c478bd9Sstevel@tonic-gate * CDDL HEADER START 37c478bd9Sstevel@tonic-gate * 47c478bd9Sstevel@tonic-gate * The contents of this file are subject to the terms of the 544961713Sgirish * Common Development and Distribution License (the "License"). 644961713Sgirish * You may not use this file except in compliance with the License. 77c478bd9Sstevel@tonic-gate * 87c478bd9Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 97c478bd9Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 107c478bd9Sstevel@tonic-gate * See the License for the specific language governing permissions 117c478bd9Sstevel@tonic-gate * and limitations under the License. 127c478bd9Sstevel@tonic-gate * 137c478bd9Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 147c478bd9Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 157c478bd9Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 167c478bd9Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 177c478bd9Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 187c478bd9Sstevel@tonic-gate * 197c478bd9Sstevel@tonic-gate * CDDL HEADER END 207c478bd9Sstevel@tonic-gate */ 217aec1d6eScindi 227c478bd9Sstevel@tonic-gate /* 23b9e93c10SJonathan Haslam * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 247c478bd9Sstevel@tonic-gate * Use is subject to license terms. 257c478bd9Sstevel@tonic-gate */ 267c478bd9Sstevel@tonic-gate 277c478bd9Sstevel@tonic-gate #include <sys/param.h> 287c478bd9Sstevel@tonic-gate #include <sys/thread.h> 297c478bd9Sstevel@tonic-gate #include <sys/cpuvar.h> 307c478bd9Sstevel@tonic-gate #include <sys/inttypes.h> 317c478bd9Sstevel@tonic-gate #include <sys/cmn_err.h> 327c478bd9Sstevel@tonic-gate #include <sys/time.h> 334568bee7Strevtom #include <sys/ksynch.h> 347c478bd9Sstevel@tonic-gate #include <sys/systm.h> 357c478bd9Sstevel@tonic-gate #include <sys/kcpc.h> 367c478bd9Sstevel@tonic-gate #include <sys/cpc_impl.h> 377c478bd9Sstevel@tonic-gate #include <sys/cpc_pcbe.h> 387c478bd9Sstevel@tonic-gate #include <sys/atomic.h> 397c478bd9Sstevel@tonic-gate #include <sys/sunddi.h> 407c478bd9Sstevel@tonic-gate #include <sys/modctl.h> 417c478bd9Sstevel@tonic-gate #include <sys/sdt.h> 42*b885580bSAlexander Kolbasov #include <sys/archsystm.h> 43*b885580bSAlexander Kolbasov #include <sys/promif.h> 44*b885580bSAlexander Kolbasov #include <sys/x_call.h> 45*b885580bSAlexander Kolbasov #include <sys/cap_util.h> 467c478bd9Sstevel@tonic-gate #if defined(__x86) 477c478bd9Sstevel@tonic-gate #include <asm/clock.h> 48*b885580bSAlexander Kolbasov #include <sys/xc_levels.h> 497c478bd9Sstevel@tonic-gate #endif 507c478bd9Sstevel@tonic-gate 51*b885580bSAlexander Kolbasov static kmutex_t kcpc_ctx_llock[CPC_HASH_BUCKETS]; /* protects ctx_list */ 52*b885580bSAlexander Kolbasov static kcpc_ctx_t *kcpc_ctx_list[CPC_HASH_BUCKETS]; /* head of list */ 537c478bd9Sstevel@tonic-gate 547c478bd9Sstevel@tonic-gate 557c478bd9Sstevel@tonic-gate krwlock_t kcpc_cpuctx_lock; /* lock for 'kcpc_cpuctx' below */ 567c478bd9Sstevel@tonic-gate int kcpc_cpuctx; /* number of cpu-specific contexts */ 577c478bd9Sstevel@tonic-gate 587c478bd9Sstevel@tonic-gate int kcpc_counts_include_idle = 1; /* Project Private /etc/system variable */ 597c478bd9Sstevel@tonic-gate 607c478bd9Sstevel@tonic-gate /* 617c478bd9Sstevel@tonic-gate * These are set when a PCBE module is loaded. 627c478bd9Sstevel@tonic-gate */ 637c478bd9Sstevel@tonic-gate uint_t cpc_ncounters = 0; 647c478bd9Sstevel@tonic-gate pcbe_ops_t *pcbe_ops = NULL; 657c478bd9Sstevel@tonic-gate 667c478bd9Sstevel@tonic-gate /* 677c478bd9Sstevel@tonic-gate * Statistics on (mis)behavior 687c478bd9Sstevel@tonic-gate */ 697c478bd9Sstevel@tonic-gate static uint32_t kcpc_intrctx_count; /* # overflows in an interrupt handler */ 707c478bd9Sstevel@tonic-gate static uint32_t kcpc_nullctx_count; /* # overflows in a thread with no ctx */ 717c478bd9Sstevel@tonic-gate 727c478bd9Sstevel@tonic-gate /* 73b9e93c10SJonathan Haslam * By setting 'kcpc_nullctx_panic' to 1, any overflow interrupts in a thread 74b9e93c10SJonathan Haslam * with no valid context will result in a panic. 757c478bd9Sstevel@tonic-gate */ 767c478bd9Sstevel@tonic-gate static int kcpc_nullctx_panic = 0; 777c478bd9Sstevel@tonic-gate 787c478bd9Sstevel@tonic-gate static void kcpc_lwp_create(kthread_t *t, kthread_t *ct); 797c478bd9Sstevel@tonic-gate static void kcpc_restore(kcpc_ctx_t *ctx); 807c478bd9Sstevel@tonic-gate static void kcpc_save(kcpc_ctx_t *ctx); 817c478bd9Sstevel@tonic-gate static void kcpc_ctx_clone(kcpc_ctx_t *ctx, kcpc_ctx_t *cctx); 827c478bd9Sstevel@tonic-gate static int kcpc_tryassign(kcpc_set_t *set, int starting_req, int *scratch); 837c478bd9Sstevel@tonic-gate static kcpc_set_t *kcpc_dup_set(kcpc_set_t *set); 84*b885580bSAlexander Kolbasov static kcpc_set_t *kcpc_set_create(kcpc_request_t *reqs, int nreqs, 85*b885580bSAlexander Kolbasov int set_flags, int kmem_flags); 86*b885580bSAlexander Kolbasov 87*b885580bSAlexander Kolbasov /* 88*b885580bSAlexander Kolbasov * Macros to manipulate context flags. All flag updates should use one of these 89*b885580bSAlexander Kolbasov * two macros 90*b885580bSAlexander Kolbasov * 91*b885580bSAlexander Kolbasov * Flags should be always be updated atomically since some of the updates are 92*b885580bSAlexander Kolbasov * not protected by locks. 93*b885580bSAlexander Kolbasov */ 94*b885580bSAlexander Kolbasov #define KCPC_CTX_FLAG_SET(ctx, flag) atomic_or_uint(&(ctx)->kc_flags, (flag)) 95*b885580bSAlexander Kolbasov #define KCPC_CTX_FLAG_CLR(ctx, flag) atomic_and_uint(&(ctx)->kc_flags, ~(flag)) 96*b885580bSAlexander Kolbasov 97*b885580bSAlexander Kolbasov /* 98*b885580bSAlexander Kolbasov * The IS_HIPIL() macro verifies that the code is executed either from a 99*b885580bSAlexander Kolbasov * cross-call or from high-PIL interrupt 100*b885580bSAlexander Kolbasov */ 101*b885580bSAlexander Kolbasov #ifdef DEBUG 102*b885580bSAlexander Kolbasov #define IS_HIPIL() (getpil() >= XCALL_PIL) 103*b885580bSAlexander Kolbasov #else 104*b885580bSAlexander Kolbasov #define IS_HIPIL() 105*b885580bSAlexander Kolbasov #endif /* DEBUG */ 106*b885580bSAlexander Kolbasov 107*b885580bSAlexander Kolbasov 108*b885580bSAlexander Kolbasov extern int kcpc_hw_load_pcbe(void); 109*b885580bSAlexander Kolbasov 110*b885580bSAlexander Kolbasov /* 111*b885580bSAlexander Kolbasov * Return value from kcpc_hw_load_pcbe() 112*b885580bSAlexander Kolbasov */ 113*b885580bSAlexander Kolbasov static int kcpc_pcbe_error = 0; 114*b885580bSAlexander Kolbasov 115*b885580bSAlexander Kolbasov /* 116*b885580bSAlexander Kolbasov * Perform one-time initialization of kcpc framework. 117*b885580bSAlexander Kolbasov * This function performs the initialization only the first time it is called. 118*b885580bSAlexander Kolbasov * It is safe to call it multiple times. 119*b885580bSAlexander Kolbasov */ 120*b885580bSAlexander Kolbasov int 121*b885580bSAlexander Kolbasov kcpc_init(void) 122*b885580bSAlexander Kolbasov { 123*b885580bSAlexander Kolbasov long hash; 124*b885580bSAlexander Kolbasov static uint32_t kcpc_initialized = 0; 125*b885580bSAlexander Kolbasov 126*b885580bSAlexander Kolbasov /* 127*b885580bSAlexander Kolbasov * We already tried loading platform pcbe module and failed 128*b885580bSAlexander Kolbasov */ 129*b885580bSAlexander Kolbasov if (kcpc_pcbe_error != 0) 130*b885580bSAlexander Kolbasov return (-1); 131*b885580bSAlexander Kolbasov 132*b885580bSAlexander Kolbasov /* 133*b885580bSAlexander Kolbasov * The kcpc framework should be initialized at most once 134*b885580bSAlexander Kolbasov */ 135*b885580bSAlexander Kolbasov if (atomic_cas_32(&kcpc_initialized, 0, 1) != 0) 136*b885580bSAlexander Kolbasov return (0); 137*b885580bSAlexander Kolbasov 138*b885580bSAlexander Kolbasov rw_init(&kcpc_cpuctx_lock, NULL, RW_DEFAULT, NULL); 139*b885580bSAlexander Kolbasov for (hash = 0; hash < CPC_HASH_BUCKETS; hash++) 140*b885580bSAlexander Kolbasov mutex_init(&kcpc_ctx_llock[hash], 141*b885580bSAlexander Kolbasov NULL, MUTEX_DRIVER, (void *)(uintptr_t)15); 142*b885580bSAlexander Kolbasov 143*b885580bSAlexander Kolbasov /* 144*b885580bSAlexander Kolbasov * Load platform-specific pcbe module 145*b885580bSAlexander Kolbasov */ 146*b885580bSAlexander Kolbasov kcpc_pcbe_error = kcpc_hw_load_pcbe(); 147*b885580bSAlexander Kolbasov 148*b885580bSAlexander Kolbasov return (kcpc_pcbe_error == 0 ? 0 : -1); 149*b885580bSAlexander Kolbasov } 1507c478bd9Sstevel@tonic-gate 1517c478bd9Sstevel@tonic-gate void 1527c478bd9Sstevel@tonic-gate kcpc_register_pcbe(pcbe_ops_t *ops) 1537c478bd9Sstevel@tonic-gate { 1547c478bd9Sstevel@tonic-gate pcbe_ops = ops; 1557c478bd9Sstevel@tonic-gate cpc_ncounters = pcbe_ops->pcbe_ncounters(); 1567c478bd9Sstevel@tonic-gate } 1577c478bd9Sstevel@tonic-gate 158b9e93c10SJonathan Haslam void 159b9e93c10SJonathan Haslam kcpc_register_dcpc(void (*func)(uint64_t)) 160b9e93c10SJonathan Haslam { 161b9e93c10SJonathan Haslam dtrace_cpc_fire = func; 162b9e93c10SJonathan Haslam } 163b9e93c10SJonathan Haslam 164b9e93c10SJonathan Haslam void 165b9e93c10SJonathan Haslam kcpc_unregister_dcpc(void) 166b9e93c10SJonathan Haslam { 167b9e93c10SJonathan Haslam dtrace_cpc_fire = NULL; 168b9e93c10SJonathan Haslam } 169b9e93c10SJonathan Haslam 1707c478bd9Sstevel@tonic-gate int 1717c478bd9Sstevel@tonic-gate kcpc_bind_cpu(kcpc_set_t *set, processorid_t cpuid, int *subcode) 1727c478bd9Sstevel@tonic-gate { 1737c478bd9Sstevel@tonic-gate cpu_t *cp; 1747c478bd9Sstevel@tonic-gate kcpc_ctx_t *ctx; 1757c478bd9Sstevel@tonic-gate int error; 176*b885580bSAlexander Kolbasov int save_spl; 1777c478bd9Sstevel@tonic-gate 178*b885580bSAlexander Kolbasov ctx = kcpc_ctx_alloc(KM_SLEEP); 1797c478bd9Sstevel@tonic-gate 1807c478bd9Sstevel@tonic-gate if (kcpc_assign_reqs(set, ctx) != 0) { 1817c478bd9Sstevel@tonic-gate kcpc_ctx_free(ctx); 1827c478bd9Sstevel@tonic-gate *subcode = CPC_RESOURCE_UNAVAIL; 1837c478bd9Sstevel@tonic-gate return (EINVAL); 1847c478bd9Sstevel@tonic-gate } 1857c478bd9Sstevel@tonic-gate 1867c478bd9Sstevel@tonic-gate ctx->kc_cpuid = cpuid; 1877c478bd9Sstevel@tonic-gate ctx->kc_thread = curthread; 1887c478bd9Sstevel@tonic-gate 1897c478bd9Sstevel@tonic-gate set->ks_data = kmem_zalloc(set->ks_nreqs * sizeof (uint64_t), KM_SLEEP); 1907c478bd9Sstevel@tonic-gate 1917c478bd9Sstevel@tonic-gate if ((error = kcpc_configure_reqs(ctx, set, subcode)) != 0) { 1927c478bd9Sstevel@tonic-gate kmem_free(set->ks_data, set->ks_nreqs * sizeof (uint64_t)); 1937c478bd9Sstevel@tonic-gate kcpc_ctx_free(ctx); 1947c478bd9Sstevel@tonic-gate return (error); 1957c478bd9Sstevel@tonic-gate } 1967c478bd9Sstevel@tonic-gate 1977c478bd9Sstevel@tonic-gate set->ks_ctx = ctx; 1987c478bd9Sstevel@tonic-gate ctx->kc_set = set; 1997c478bd9Sstevel@tonic-gate 2007c478bd9Sstevel@tonic-gate /* 2017c478bd9Sstevel@tonic-gate * We must hold cpu_lock to prevent DR, offlining, or unbinding while 2027c478bd9Sstevel@tonic-gate * we are manipulating the cpu_t and programming the hardware, else the 2037c478bd9Sstevel@tonic-gate * the cpu_t could go away while we're looking at it. 2047c478bd9Sstevel@tonic-gate */ 2057c478bd9Sstevel@tonic-gate mutex_enter(&cpu_lock); 2067c478bd9Sstevel@tonic-gate cp = cpu_get(cpuid); 2077c478bd9Sstevel@tonic-gate 2087c478bd9Sstevel@tonic-gate if (cp == NULL) 2097c478bd9Sstevel@tonic-gate /* 2107c478bd9Sstevel@tonic-gate * The CPU could have been DRd out while we were getting set up. 2117c478bd9Sstevel@tonic-gate */ 2127c478bd9Sstevel@tonic-gate goto unbound; 2137c478bd9Sstevel@tonic-gate 2147c478bd9Sstevel@tonic-gate mutex_enter(&cp->cpu_cpc_ctxlock); 215*b885580bSAlexander Kolbasov kpreempt_disable(); 216*b885580bSAlexander Kolbasov save_spl = spl_xcall(); 2177c478bd9Sstevel@tonic-gate 218*b885580bSAlexander Kolbasov /* 219*b885580bSAlexander Kolbasov * Check to see whether counters for CPU already being used by someone 220*b885580bSAlexander Kolbasov * other than kernel for capacity and utilization (since kernel will 221*b885580bSAlexander Kolbasov * let go of counters for user in kcpc_program() below) 222*b885580bSAlexander Kolbasov */ 223*b885580bSAlexander Kolbasov if (cp->cpu_cpc_ctx != NULL && !CU_CPC_ON(cp)) { 2247c478bd9Sstevel@tonic-gate /* 2257c478bd9Sstevel@tonic-gate * If this CPU already has a bound set, return an error. 2267c478bd9Sstevel@tonic-gate */ 227*b885580bSAlexander Kolbasov splx(save_spl); 228*b885580bSAlexander Kolbasov kpreempt_enable(); 2297c478bd9Sstevel@tonic-gate mutex_exit(&cp->cpu_cpc_ctxlock); 2307c478bd9Sstevel@tonic-gate goto unbound; 2317c478bd9Sstevel@tonic-gate } 2327c478bd9Sstevel@tonic-gate 2337c478bd9Sstevel@tonic-gate if (curthread->t_bind_cpu != cpuid) { 234*b885580bSAlexander Kolbasov splx(save_spl); 235*b885580bSAlexander Kolbasov kpreempt_enable(); 2367c478bd9Sstevel@tonic-gate mutex_exit(&cp->cpu_cpc_ctxlock); 2377c478bd9Sstevel@tonic-gate goto unbound; 2387c478bd9Sstevel@tonic-gate } 2397c478bd9Sstevel@tonic-gate 240*b885580bSAlexander Kolbasov kcpc_program(ctx, B_FALSE, B_TRUE); 241*b885580bSAlexander Kolbasov 242*b885580bSAlexander Kolbasov splx(save_spl); 2437c478bd9Sstevel@tonic-gate kpreempt_enable(); 2447c478bd9Sstevel@tonic-gate 2457c478bd9Sstevel@tonic-gate mutex_exit(&cp->cpu_cpc_ctxlock); 2467c478bd9Sstevel@tonic-gate mutex_exit(&cpu_lock); 2477c478bd9Sstevel@tonic-gate 2484568bee7Strevtom mutex_enter(&set->ks_lock); 2494568bee7Strevtom set->ks_state |= KCPC_SET_BOUND; 2504568bee7Strevtom cv_signal(&set->ks_condv); 2514568bee7Strevtom mutex_exit(&set->ks_lock); 2524568bee7Strevtom 2537c478bd9Sstevel@tonic-gate return (0); 2547c478bd9Sstevel@tonic-gate 2557c478bd9Sstevel@tonic-gate unbound: 2567c478bd9Sstevel@tonic-gate mutex_exit(&cpu_lock); 2577c478bd9Sstevel@tonic-gate set->ks_ctx = NULL; 2587c478bd9Sstevel@tonic-gate kmem_free(set->ks_data, set->ks_nreqs * sizeof (uint64_t)); 2597c478bd9Sstevel@tonic-gate kcpc_ctx_free(ctx); 2607c478bd9Sstevel@tonic-gate return (EAGAIN); 2617c478bd9Sstevel@tonic-gate } 2627c478bd9Sstevel@tonic-gate 2637c478bd9Sstevel@tonic-gate int 2647c478bd9Sstevel@tonic-gate kcpc_bind_thread(kcpc_set_t *set, kthread_t *t, int *subcode) 2657c478bd9Sstevel@tonic-gate { 2667c478bd9Sstevel@tonic-gate kcpc_ctx_t *ctx; 2677c478bd9Sstevel@tonic-gate int error; 2687c478bd9Sstevel@tonic-gate 2697c478bd9Sstevel@tonic-gate /* 2707c478bd9Sstevel@tonic-gate * Only one set is allowed per context, so ensure there is no 2717c478bd9Sstevel@tonic-gate * existing context. 2727c478bd9Sstevel@tonic-gate */ 2737c478bd9Sstevel@tonic-gate 2747c478bd9Sstevel@tonic-gate if (t->t_cpc_ctx != NULL) 2757c478bd9Sstevel@tonic-gate return (EEXIST); 2767c478bd9Sstevel@tonic-gate 277*b885580bSAlexander Kolbasov ctx = kcpc_ctx_alloc(KM_SLEEP); 2787c478bd9Sstevel@tonic-gate 2797c478bd9Sstevel@tonic-gate /* 2807c478bd9Sstevel@tonic-gate * The context must begin life frozen until it has been properly 2817c478bd9Sstevel@tonic-gate * programmed onto the hardware. This prevents the context ops from 2827c478bd9Sstevel@tonic-gate * worrying about it until we're ready. 2837c478bd9Sstevel@tonic-gate */ 284*b885580bSAlexander Kolbasov KCPC_CTX_FLAG_SET(ctx, KCPC_CTX_FREEZE); 2857c478bd9Sstevel@tonic-gate ctx->kc_hrtime = gethrtime(); 2867c478bd9Sstevel@tonic-gate 2877c478bd9Sstevel@tonic-gate if (kcpc_assign_reqs(set, ctx) != 0) { 2887c478bd9Sstevel@tonic-gate kcpc_ctx_free(ctx); 2897c478bd9Sstevel@tonic-gate *subcode = CPC_RESOURCE_UNAVAIL; 2907c478bd9Sstevel@tonic-gate return (EINVAL); 2917c478bd9Sstevel@tonic-gate } 2927c478bd9Sstevel@tonic-gate 2937c478bd9Sstevel@tonic-gate ctx->kc_cpuid = -1; 2947c478bd9Sstevel@tonic-gate if (set->ks_flags & CPC_BIND_LWP_INHERIT) 295*b885580bSAlexander Kolbasov KCPC_CTX_FLAG_SET(ctx, KCPC_CTX_LWPINHERIT); 2967c478bd9Sstevel@tonic-gate ctx->kc_thread = t; 2977c478bd9Sstevel@tonic-gate t->t_cpc_ctx = ctx; 2987c478bd9Sstevel@tonic-gate /* 2997c478bd9Sstevel@tonic-gate * Permit threads to look at their own hardware counters from userland. 3007c478bd9Sstevel@tonic-gate */ 301*b885580bSAlexander Kolbasov KCPC_CTX_FLAG_SET(ctx, KCPC_CTX_NONPRIV); 3027c478bd9Sstevel@tonic-gate 3037c478bd9Sstevel@tonic-gate /* 3047c478bd9Sstevel@tonic-gate * Create the data store for this set. 3057c478bd9Sstevel@tonic-gate */ 3067c478bd9Sstevel@tonic-gate set->ks_data = kmem_alloc(set->ks_nreqs * sizeof (uint64_t), KM_SLEEP); 3077c478bd9Sstevel@tonic-gate 3087c478bd9Sstevel@tonic-gate if ((error = kcpc_configure_reqs(ctx, set, subcode)) != 0) { 3097c478bd9Sstevel@tonic-gate kmem_free(set->ks_data, set->ks_nreqs * sizeof (uint64_t)); 3107c478bd9Sstevel@tonic-gate kcpc_ctx_free(ctx); 3117c478bd9Sstevel@tonic-gate t->t_cpc_ctx = NULL; 3127c478bd9Sstevel@tonic-gate return (error); 3137c478bd9Sstevel@tonic-gate } 3147c478bd9Sstevel@tonic-gate 3157c478bd9Sstevel@tonic-gate set->ks_ctx = ctx; 3167c478bd9Sstevel@tonic-gate ctx->kc_set = set; 3177c478bd9Sstevel@tonic-gate 3187c478bd9Sstevel@tonic-gate /* 3197c478bd9Sstevel@tonic-gate * Add a device context to the subject thread. 3207c478bd9Sstevel@tonic-gate */ 3217c478bd9Sstevel@tonic-gate installctx(t, ctx, kcpc_save, kcpc_restore, NULL, 3227c478bd9Sstevel@tonic-gate kcpc_lwp_create, NULL, kcpc_free); 3237c478bd9Sstevel@tonic-gate 3247c478bd9Sstevel@tonic-gate /* 3257c478bd9Sstevel@tonic-gate * Ask the backend to program the hardware. 3267c478bd9Sstevel@tonic-gate */ 3277c478bd9Sstevel@tonic-gate if (t == curthread) { 328*b885580bSAlexander Kolbasov int save_spl; 329*b885580bSAlexander Kolbasov 3307c478bd9Sstevel@tonic-gate kpreempt_disable(); 331*b885580bSAlexander Kolbasov save_spl = spl_xcall(); 332*b885580bSAlexander Kolbasov kcpc_program(ctx, B_TRUE, B_TRUE); 333*b885580bSAlexander Kolbasov splx(save_spl); 3347c478bd9Sstevel@tonic-gate kpreempt_enable(); 335*b885580bSAlexander Kolbasov } else { 3367c478bd9Sstevel@tonic-gate /* 3377c478bd9Sstevel@tonic-gate * Since we are the agent LWP, we know the victim LWP is stopped 3387c478bd9Sstevel@tonic-gate * until we're done here; no need to worry about preemption or 3397c478bd9Sstevel@tonic-gate * migration here. We still use an atomic op to clear the flag 3407c478bd9Sstevel@tonic-gate * to ensure the flags are always self-consistent; they can 3417c478bd9Sstevel@tonic-gate * still be accessed from, for instance, another CPU doing a 3427c478bd9Sstevel@tonic-gate * kcpc_invalidate_all(). 3437c478bd9Sstevel@tonic-gate */ 344*b885580bSAlexander Kolbasov KCPC_CTX_FLAG_CLR(ctx, KCPC_CTX_FREEZE); 345*b885580bSAlexander Kolbasov } 3467c478bd9Sstevel@tonic-gate 3474568bee7Strevtom mutex_enter(&set->ks_lock); 3484568bee7Strevtom set->ks_state |= KCPC_SET_BOUND; 3494568bee7Strevtom cv_signal(&set->ks_condv); 3504568bee7Strevtom mutex_exit(&set->ks_lock); 3517c478bd9Sstevel@tonic-gate 3527c478bd9Sstevel@tonic-gate return (0); 3537c478bd9Sstevel@tonic-gate } 3547c478bd9Sstevel@tonic-gate 3557c478bd9Sstevel@tonic-gate /* 3567c478bd9Sstevel@tonic-gate * Walk through each request in the set and ask the PCBE to configure a 3577c478bd9Sstevel@tonic-gate * corresponding counter. 3587c478bd9Sstevel@tonic-gate */ 359b9e93c10SJonathan Haslam int 3607c478bd9Sstevel@tonic-gate kcpc_configure_reqs(kcpc_ctx_t *ctx, kcpc_set_t *set, int *subcode) 3617c478bd9Sstevel@tonic-gate { 3627c478bd9Sstevel@tonic-gate int i; 3637c478bd9Sstevel@tonic-gate int ret; 3647c478bd9Sstevel@tonic-gate kcpc_request_t *rp; 3657c478bd9Sstevel@tonic-gate 3667c478bd9Sstevel@tonic-gate for (i = 0; i < set->ks_nreqs; i++) { 3677c478bd9Sstevel@tonic-gate int n; 3687c478bd9Sstevel@tonic-gate rp = &set->ks_req[i]; 3697c478bd9Sstevel@tonic-gate 3707c478bd9Sstevel@tonic-gate n = rp->kr_picnum; 3717c478bd9Sstevel@tonic-gate 3727c478bd9Sstevel@tonic-gate ASSERT(n >= 0 && n < cpc_ncounters); 3737c478bd9Sstevel@tonic-gate 3747c478bd9Sstevel@tonic-gate ASSERT(ctx->kc_pics[n].kp_req == NULL); 3757c478bd9Sstevel@tonic-gate 3767c478bd9Sstevel@tonic-gate if (rp->kr_flags & CPC_OVF_NOTIFY_EMT) { 3777c478bd9Sstevel@tonic-gate if ((pcbe_ops->pcbe_caps & CPC_CAP_OVERFLOW_INTERRUPT) 3787c478bd9Sstevel@tonic-gate == 0) { 3797c478bd9Sstevel@tonic-gate *subcode = -1; 3807c478bd9Sstevel@tonic-gate return (ENOTSUP); 3817c478bd9Sstevel@tonic-gate } 3827c478bd9Sstevel@tonic-gate /* 3837c478bd9Sstevel@tonic-gate * If any of the counters have requested overflow 3847c478bd9Sstevel@tonic-gate * notification, we flag the context as being one that 3857c478bd9Sstevel@tonic-gate * cares about overflow. 3867c478bd9Sstevel@tonic-gate */ 387*b885580bSAlexander Kolbasov KCPC_CTX_FLAG_SET(ctx, KCPC_CTX_SIGOVF); 3887c478bd9Sstevel@tonic-gate } 3897c478bd9Sstevel@tonic-gate 3907c478bd9Sstevel@tonic-gate rp->kr_config = NULL; 3917c478bd9Sstevel@tonic-gate if ((ret = pcbe_ops->pcbe_configure(n, rp->kr_event, 3927c478bd9Sstevel@tonic-gate rp->kr_preset, rp->kr_flags, rp->kr_nattrs, rp->kr_attr, 3937c478bd9Sstevel@tonic-gate &(rp->kr_config), (void *)ctx)) != 0) { 3947c478bd9Sstevel@tonic-gate kcpc_free_configs(set); 3957c478bd9Sstevel@tonic-gate *subcode = ret; 3968d4e547dSae112802 switch (ret) { 3978d4e547dSae112802 case CPC_ATTR_REQUIRES_PRIVILEGE: 3988d4e547dSae112802 case CPC_HV_NO_ACCESS: 3997c478bd9Sstevel@tonic-gate return (EACCES); 4008d4e547dSae112802 default: 4017c478bd9Sstevel@tonic-gate return (EINVAL); 4027c478bd9Sstevel@tonic-gate } 4038d4e547dSae112802 } 4047c478bd9Sstevel@tonic-gate 4057c478bd9Sstevel@tonic-gate ctx->kc_pics[n].kp_req = rp; 4067c478bd9Sstevel@tonic-gate rp->kr_picp = &ctx->kc_pics[n]; 4077c478bd9Sstevel@tonic-gate rp->kr_data = set->ks_data + rp->kr_index; 4087c478bd9Sstevel@tonic-gate *rp->kr_data = rp->kr_preset; 4097c478bd9Sstevel@tonic-gate } 4107c478bd9Sstevel@tonic-gate 4117c478bd9Sstevel@tonic-gate return (0); 4127c478bd9Sstevel@tonic-gate } 4137c478bd9Sstevel@tonic-gate 414b9e93c10SJonathan Haslam void 4157c478bd9Sstevel@tonic-gate kcpc_free_configs(kcpc_set_t *set) 4167c478bd9Sstevel@tonic-gate { 4177c478bd9Sstevel@tonic-gate int i; 4187c478bd9Sstevel@tonic-gate 4197c478bd9Sstevel@tonic-gate for (i = 0; i < set->ks_nreqs; i++) 4207c478bd9Sstevel@tonic-gate if (set->ks_req[i].kr_config != NULL) 4217c478bd9Sstevel@tonic-gate pcbe_ops->pcbe_free(set->ks_req[i].kr_config); 4227c478bd9Sstevel@tonic-gate } 4237c478bd9Sstevel@tonic-gate 4247c478bd9Sstevel@tonic-gate /* 4257c478bd9Sstevel@tonic-gate * buf points to a user address and the data should be copied out to that 4267c478bd9Sstevel@tonic-gate * address in the current process. 4277c478bd9Sstevel@tonic-gate */ 4287c478bd9Sstevel@tonic-gate int 4297c478bd9Sstevel@tonic-gate kcpc_sample(kcpc_set_t *set, uint64_t *buf, hrtime_t *hrtime, uint64_t *tick) 4307c478bd9Sstevel@tonic-gate { 4317c478bd9Sstevel@tonic-gate kcpc_ctx_t *ctx = set->ks_ctx; 432*b885580bSAlexander Kolbasov int save_spl; 4337c478bd9Sstevel@tonic-gate 4344568bee7Strevtom mutex_enter(&set->ks_lock); 4354568bee7Strevtom if ((set->ks_state & KCPC_SET_BOUND) == 0) { 4364568bee7Strevtom mutex_exit(&set->ks_lock); 4377c478bd9Sstevel@tonic-gate return (EINVAL); 4384568bee7Strevtom } 4394568bee7Strevtom mutex_exit(&set->ks_lock); 4404568bee7Strevtom 4417c478bd9Sstevel@tonic-gate /* 442*b885580bSAlexander Kolbasov * Kernel preemption must be disabled while reading the hardware regs, 443*b885580bSAlexander Kolbasov * and if this is a CPU-bound context, while checking the CPU binding of 444*b885580bSAlexander Kolbasov * the current thread. 4457c478bd9Sstevel@tonic-gate */ 4467c478bd9Sstevel@tonic-gate kpreempt_disable(); 447*b885580bSAlexander Kolbasov save_spl = spl_xcall(); 4487c478bd9Sstevel@tonic-gate 449*b885580bSAlexander Kolbasov if (ctx->kc_flags & KCPC_CTX_INVALID) { 450*b885580bSAlexander Kolbasov splx(save_spl); 451*b885580bSAlexander Kolbasov kpreempt_enable(); 452*b885580bSAlexander Kolbasov return (EAGAIN); 453*b885580bSAlexander Kolbasov } 454*b885580bSAlexander Kolbasov 455*b885580bSAlexander Kolbasov if ((ctx->kc_flags & KCPC_CTX_FREEZE) == 0) { 4567c478bd9Sstevel@tonic-gate if (ctx->kc_cpuid != -1) { 4577c478bd9Sstevel@tonic-gate if (curthread->t_bind_cpu != ctx->kc_cpuid) { 458*b885580bSAlexander Kolbasov splx(save_spl); 4597c478bd9Sstevel@tonic-gate kpreempt_enable(); 4607c478bd9Sstevel@tonic-gate return (EAGAIN); 4617c478bd9Sstevel@tonic-gate } 4627c478bd9Sstevel@tonic-gate } 4637c478bd9Sstevel@tonic-gate 4647c478bd9Sstevel@tonic-gate if (ctx->kc_thread == curthread) { 465*b885580bSAlexander Kolbasov uint64_t curtick = KCPC_GET_TICK(); 466*b885580bSAlexander Kolbasov 467*b885580bSAlexander Kolbasov ctx->kc_hrtime = gethrtime_waitfree(); 4687c478bd9Sstevel@tonic-gate pcbe_ops->pcbe_sample(ctx); 4697c478bd9Sstevel@tonic-gate ctx->kc_vtick += curtick - ctx->kc_rawtick; 4707c478bd9Sstevel@tonic-gate ctx->kc_rawtick = curtick; 4717c478bd9Sstevel@tonic-gate } 4727c478bd9Sstevel@tonic-gate 4738d4e547dSae112802 /* 4748d4e547dSae112802 * The config may have been invalidated by 4758d4e547dSae112802 * the pcbe_sample op. 4768d4e547dSae112802 */ 477*b885580bSAlexander Kolbasov if (ctx->kc_flags & KCPC_CTX_INVALID) { 478*b885580bSAlexander Kolbasov splx(save_spl); 479*b885580bSAlexander Kolbasov kpreempt_enable(); 4808d4e547dSae112802 return (EAGAIN); 4817c478bd9Sstevel@tonic-gate } 4827c478bd9Sstevel@tonic-gate 483*b885580bSAlexander Kolbasov } 484*b885580bSAlexander Kolbasov 485*b885580bSAlexander Kolbasov splx(save_spl); 486*b885580bSAlexander Kolbasov kpreempt_enable(); 487*b885580bSAlexander Kolbasov 4887c478bd9Sstevel@tonic-gate if (copyout(set->ks_data, buf, 4897c478bd9Sstevel@tonic-gate set->ks_nreqs * sizeof (uint64_t)) == -1) 4907c478bd9Sstevel@tonic-gate return (EFAULT); 4917c478bd9Sstevel@tonic-gate if (copyout(&ctx->kc_hrtime, hrtime, sizeof (uint64_t)) == -1) 4927c478bd9Sstevel@tonic-gate return (EFAULT); 4937c478bd9Sstevel@tonic-gate if (copyout(&ctx->kc_vtick, tick, sizeof (uint64_t)) == -1) 4947c478bd9Sstevel@tonic-gate return (EFAULT); 4957c478bd9Sstevel@tonic-gate 4967c478bd9Sstevel@tonic-gate return (0); 4977c478bd9Sstevel@tonic-gate } 4987c478bd9Sstevel@tonic-gate 4997c478bd9Sstevel@tonic-gate /* 5007c478bd9Sstevel@tonic-gate * Stop the counters on the CPU this context is bound to. 5017c478bd9Sstevel@tonic-gate */ 5027c478bd9Sstevel@tonic-gate static void 5037c478bd9Sstevel@tonic-gate kcpc_stop_hw(kcpc_ctx_t *ctx) 5047c478bd9Sstevel@tonic-gate { 5057c478bd9Sstevel@tonic-gate cpu_t *cp; 5067c478bd9Sstevel@tonic-gate 5077c478bd9Sstevel@tonic-gate kpreempt_disable(); 5087c478bd9Sstevel@tonic-gate 509*b885580bSAlexander Kolbasov if (ctx->kc_cpuid == CPU->cpu_id) { 510*b885580bSAlexander Kolbasov cp = CPU; 511*b885580bSAlexander Kolbasov } else { 5127c478bd9Sstevel@tonic-gate cp = cpu_get(ctx->kc_cpuid); 513*b885580bSAlexander Kolbasov } 5147c478bd9Sstevel@tonic-gate 515*b885580bSAlexander Kolbasov ASSERT(cp != NULL && cp->cpu_cpc_ctx == ctx); 516*b885580bSAlexander Kolbasov kcpc_cpu_stop(cp, B_FALSE); 517*b885580bSAlexander Kolbasov 5187c478bd9Sstevel@tonic-gate kpreempt_enable(); 5197c478bd9Sstevel@tonic-gate } 5207c478bd9Sstevel@tonic-gate 5217c478bd9Sstevel@tonic-gate int 5227c478bd9Sstevel@tonic-gate kcpc_unbind(kcpc_set_t *set) 5237c478bd9Sstevel@tonic-gate { 5244568bee7Strevtom kcpc_ctx_t *ctx; 5257c478bd9Sstevel@tonic-gate kthread_t *t; 5267c478bd9Sstevel@tonic-gate 5274568bee7Strevtom /* 5284568bee7Strevtom * We could be racing with the process's agent thread as it 5294568bee7Strevtom * binds the set; we must wait for the set to finish binding 5304568bee7Strevtom * before attempting to tear it down. 5314568bee7Strevtom */ 5324568bee7Strevtom mutex_enter(&set->ks_lock); 5334568bee7Strevtom while ((set->ks_state & KCPC_SET_BOUND) == 0) 5344568bee7Strevtom cv_wait(&set->ks_condv, &set->ks_lock); 5354568bee7Strevtom mutex_exit(&set->ks_lock); 5367c478bd9Sstevel@tonic-gate 5374568bee7Strevtom ctx = set->ks_ctx; 5384568bee7Strevtom 5394568bee7Strevtom /* 5404568bee7Strevtom * Use kc_lock to synchronize with kcpc_restore(). 5414568bee7Strevtom */ 5424568bee7Strevtom mutex_enter(&ctx->kc_lock); 543*b885580bSAlexander Kolbasov KCPC_CTX_FLAG_SET(ctx, KCPC_CTX_INVALID); 5444568bee7Strevtom mutex_exit(&ctx->kc_lock); 5457c478bd9Sstevel@tonic-gate 5467c478bd9Sstevel@tonic-gate if (ctx->kc_cpuid == -1) { 5477c478bd9Sstevel@tonic-gate t = ctx->kc_thread; 5487c478bd9Sstevel@tonic-gate /* 5497c478bd9Sstevel@tonic-gate * The context is thread-bound and therefore has a device 5507c478bd9Sstevel@tonic-gate * context. It will be freed via removectx() calling 5517c478bd9Sstevel@tonic-gate * freectx() calling kcpc_free(). 5527c478bd9Sstevel@tonic-gate */ 553*b885580bSAlexander Kolbasov if (t == curthread) { 554*b885580bSAlexander Kolbasov int save_spl; 555*b885580bSAlexander Kolbasov 5567c478bd9Sstevel@tonic-gate kpreempt_disable(); 557*b885580bSAlexander Kolbasov save_spl = spl_xcall(); 558*b885580bSAlexander Kolbasov if (!(ctx->kc_flags & KCPC_CTX_INVALID_STOPPED)) 559*b885580bSAlexander Kolbasov kcpc_unprogram(ctx, B_TRUE); 560*b885580bSAlexander Kolbasov splx(save_spl); 5617c478bd9Sstevel@tonic-gate kpreempt_enable(); 5627c478bd9Sstevel@tonic-gate } 5637c478bd9Sstevel@tonic-gate #ifdef DEBUG 5647c478bd9Sstevel@tonic-gate if (removectx(t, ctx, kcpc_save, kcpc_restore, NULL, 5657c478bd9Sstevel@tonic-gate kcpc_lwp_create, NULL, kcpc_free) == 0) 5667c478bd9Sstevel@tonic-gate panic("kcpc_unbind: context %p not preset on thread %p", 5678793b36bSNick Todd (void *)ctx, (void *)t); 5687c478bd9Sstevel@tonic-gate #else 5697c478bd9Sstevel@tonic-gate (void) removectx(t, ctx, kcpc_save, kcpc_restore, NULL, 5707c478bd9Sstevel@tonic-gate kcpc_lwp_create, NULL, kcpc_free); 5717c478bd9Sstevel@tonic-gate #endif /* DEBUG */ 5727c478bd9Sstevel@tonic-gate t->t_cpc_set = NULL; 5737c478bd9Sstevel@tonic-gate t->t_cpc_ctx = NULL; 5747c478bd9Sstevel@tonic-gate } else { 5757c478bd9Sstevel@tonic-gate /* 5767c478bd9Sstevel@tonic-gate * If we are unbinding a CPU-bound set from a remote CPU, the 5777c478bd9Sstevel@tonic-gate * native CPU's idle thread could be in the midst of programming 5787c478bd9Sstevel@tonic-gate * this context onto the CPU. We grab the context's lock here to 5797c478bd9Sstevel@tonic-gate * ensure that the idle thread is done with it. When we release 5807c478bd9Sstevel@tonic-gate * the lock, the CPU no longer has a context and the idle thread 5817c478bd9Sstevel@tonic-gate * will move on. 5827c478bd9Sstevel@tonic-gate * 5837c478bd9Sstevel@tonic-gate * cpu_lock must be held to prevent the CPU from being DR'd out 5847c478bd9Sstevel@tonic-gate * while we disassociate the context from the cpu_t. 5857c478bd9Sstevel@tonic-gate */ 5867c478bd9Sstevel@tonic-gate cpu_t *cp; 5877c478bd9Sstevel@tonic-gate mutex_enter(&cpu_lock); 5887c478bd9Sstevel@tonic-gate cp = cpu_get(ctx->kc_cpuid); 5897c478bd9Sstevel@tonic-gate if (cp != NULL) { 5907c478bd9Sstevel@tonic-gate /* 5917c478bd9Sstevel@tonic-gate * The CPU may have been DR'd out of the system. 5927c478bd9Sstevel@tonic-gate */ 5937c478bd9Sstevel@tonic-gate mutex_enter(&cp->cpu_cpc_ctxlock); 5947c478bd9Sstevel@tonic-gate if ((ctx->kc_flags & KCPC_CTX_INVALID_STOPPED) == 0) 5957c478bd9Sstevel@tonic-gate kcpc_stop_hw(ctx); 5967c478bd9Sstevel@tonic-gate ASSERT(ctx->kc_flags & KCPC_CTX_INVALID_STOPPED); 5977c478bd9Sstevel@tonic-gate mutex_exit(&cp->cpu_cpc_ctxlock); 5987c478bd9Sstevel@tonic-gate } 5997c478bd9Sstevel@tonic-gate mutex_exit(&cpu_lock); 6007c478bd9Sstevel@tonic-gate if (ctx->kc_thread == curthread) { 6017c478bd9Sstevel@tonic-gate kcpc_free(ctx, 0); 6027c478bd9Sstevel@tonic-gate curthread->t_cpc_set = NULL; 6037c478bd9Sstevel@tonic-gate } 6047c478bd9Sstevel@tonic-gate } 6057c478bd9Sstevel@tonic-gate 6067c478bd9Sstevel@tonic-gate return (0); 6077c478bd9Sstevel@tonic-gate } 6087c478bd9Sstevel@tonic-gate 6097c478bd9Sstevel@tonic-gate int 6107c478bd9Sstevel@tonic-gate kcpc_preset(kcpc_set_t *set, int index, uint64_t preset) 6117c478bd9Sstevel@tonic-gate { 6127c478bd9Sstevel@tonic-gate int i; 6137c478bd9Sstevel@tonic-gate 6147c478bd9Sstevel@tonic-gate ASSERT(set != NULL); 6154568bee7Strevtom ASSERT(set->ks_state & KCPC_SET_BOUND); 6167c478bd9Sstevel@tonic-gate ASSERT(set->ks_ctx->kc_thread == curthread); 6177c478bd9Sstevel@tonic-gate ASSERT(set->ks_ctx->kc_cpuid == -1); 6187c478bd9Sstevel@tonic-gate 6197c478bd9Sstevel@tonic-gate if (index < 0 || index >= set->ks_nreqs) 6207c478bd9Sstevel@tonic-gate return (EINVAL); 6217c478bd9Sstevel@tonic-gate 6227c478bd9Sstevel@tonic-gate for (i = 0; i < set->ks_nreqs; i++) 6237c478bd9Sstevel@tonic-gate if (set->ks_req[i].kr_index == index) 6247c478bd9Sstevel@tonic-gate break; 6257c478bd9Sstevel@tonic-gate ASSERT(i != set->ks_nreqs); 6267c478bd9Sstevel@tonic-gate 6277c478bd9Sstevel@tonic-gate set->ks_req[i].kr_preset = preset; 6287c478bd9Sstevel@tonic-gate return (0); 6297c478bd9Sstevel@tonic-gate } 6307c478bd9Sstevel@tonic-gate 6317c478bd9Sstevel@tonic-gate int 6327c478bd9Sstevel@tonic-gate kcpc_restart(kcpc_set_t *set) 6337c478bd9Sstevel@tonic-gate { 6347c478bd9Sstevel@tonic-gate kcpc_ctx_t *ctx = set->ks_ctx; 6357c478bd9Sstevel@tonic-gate int i; 636*b885580bSAlexander Kolbasov int save_spl; 6377c478bd9Sstevel@tonic-gate 6384568bee7Strevtom ASSERT(set->ks_state & KCPC_SET_BOUND); 6397c478bd9Sstevel@tonic-gate ASSERT(ctx->kc_thread == curthread); 6407c478bd9Sstevel@tonic-gate ASSERT(ctx->kc_cpuid == -1); 6417c478bd9Sstevel@tonic-gate 642*b885580bSAlexander Kolbasov for (i = 0; i < set->ks_nreqs; i++) { 643*b885580bSAlexander Kolbasov *(set->ks_req[i].kr_data) = set->ks_req[i].kr_preset; 644*b885580bSAlexander Kolbasov pcbe_ops->pcbe_configure(0, NULL, set->ks_req[i].kr_preset, 645*b885580bSAlexander Kolbasov 0, 0, NULL, &set->ks_req[i].kr_config, NULL); 646*b885580bSAlexander Kolbasov } 647*b885580bSAlexander Kolbasov 6487c478bd9Sstevel@tonic-gate kpreempt_disable(); 649*b885580bSAlexander Kolbasov save_spl = spl_xcall(); 6507c478bd9Sstevel@tonic-gate 6517c478bd9Sstevel@tonic-gate /* 6527c478bd9Sstevel@tonic-gate * If the user is doing this on a running set, make sure the counters 6537c478bd9Sstevel@tonic-gate * are stopped first. 6547c478bd9Sstevel@tonic-gate */ 6557c478bd9Sstevel@tonic-gate if ((ctx->kc_flags & KCPC_CTX_FREEZE) == 0) 6567c478bd9Sstevel@tonic-gate pcbe_ops->pcbe_allstop(); 6577c478bd9Sstevel@tonic-gate 6587c478bd9Sstevel@tonic-gate /* 6597c478bd9Sstevel@tonic-gate * Ask the backend to program the hardware. 6607c478bd9Sstevel@tonic-gate */ 6617c478bd9Sstevel@tonic-gate ctx->kc_rawtick = KCPC_GET_TICK(); 662*b885580bSAlexander Kolbasov KCPC_CTX_FLAG_CLR(ctx, KCPC_CTX_FREEZE); 6637c478bd9Sstevel@tonic-gate pcbe_ops->pcbe_program(ctx); 664*b885580bSAlexander Kolbasov splx(save_spl); 6657c478bd9Sstevel@tonic-gate kpreempt_enable(); 6667c478bd9Sstevel@tonic-gate 6677c478bd9Sstevel@tonic-gate return (0); 6687c478bd9Sstevel@tonic-gate } 6697c478bd9Sstevel@tonic-gate 6707c478bd9Sstevel@tonic-gate /* 6717c478bd9Sstevel@tonic-gate * Caller must hold kcpc_cpuctx_lock. 6727c478bd9Sstevel@tonic-gate */ 6737c478bd9Sstevel@tonic-gate int 6747c478bd9Sstevel@tonic-gate kcpc_enable(kthread_t *t, int cmd, int enable) 6757c478bd9Sstevel@tonic-gate { 6767c478bd9Sstevel@tonic-gate kcpc_ctx_t *ctx = t->t_cpc_ctx; 6777c478bd9Sstevel@tonic-gate kcpc_set_t *set = t->t_cpc_set; 6787c478bd9Sstevel@tonic-gate kcpc_set_t *newset; 6797c478bd9Sstevel@tonic-gate int i; 6807c478bd9Sstevel@tonic-gate int flag; 6817c478bd9Sstevel@tonic-gate int err; 6827c478bd9Sstevel@tonic-gate 6837c478bd9Sstevel@tonic-gate ASSERT(RW_READ_HELD(&kcpc_cpuctx_lock)); 6847c478bd9Sstevel@tonic-gate 6857c478bd9Sstevel@tonic-gate if (ctx == NULL) { 6867c478bd9Sstevel@tonic-gate /* 6877c478bd9Sstevel@tonic-gate * This thread has a set but no context; it must be a 6887c478bd9Sstevel@tonic-gate * CPU-bound set. 6897c478bd9Sstevel@tonic-gate */ 6907c478bd9Sstevel@tonic-gate ASSERT(t->t_cpc_set != NULL); 6917c478bd9Sstevel@tonic-gate ASSERT(t->t_cpc_set->ks_ctx->kc_cpuid != -1); 6927c478bd9Sstevel@tonic-gate return (EINVAL); 6937c478bd9Sstevel@tonic-gate } else if (ctx->kc_flags & KCPC_CTX_INVALID) 6947c478bd9Sstevel@tonic-gate return (EAGAIN); 6957c478bd9Sstevel@tonic-gate 6967c478bd9Sstevel@tonic-gate if (cmd == CPC_ENABLE) { 6977c478bd9Sstevel@tonic-gate if ((ctx->kc_flags & KCPC_CTX_FREEZE) == 0) 6987c478bd9Sstevel@tonic-gate return (EINVAL); 6997c478bd9Sstevel@tonic-gate kpreempt_disable(); 700*b885580bSAlexander Kolbasov KCPC_CTX_FLAG_CLR(ctx, KCPC_CTX_FREEZE); 7017c478bd9Sstevel@tonic-gate kcpc_restore(ctx); 7027c478bd9Sstevel@tonic-gate kpreempt_enable(); 7037c478bd9Sstevel@tonic-gate } else if (cmd == CPC_DISABLE) { 7047c478bd9Sstevel@tonic-gate if (ctx->kc_flags & KCPC_CTX_FREEZE) 7057c478bd9Sstevel@tonic-gate return (EINVAL); 7067c478bd9Sstevel@tonic-gate kpreempt_disable(); 7077c478bd9Sstevel@tonic-gate kcpc_save(ctx); 708*b885580bSAlexander Kolbasov KCPC_CTX_FLAG_SET(ctx, KCPC_CTX_FREEZE); 7097c478bd9Sstevel@tonic-gate kpreempt_enable(); 7107c478bd9Sstevel@tonic-gate } else if (cmd == CPC_USR_EVENTS || cmd == CPC_SYS_EVENTS) { 7117c478bd9Sstevel@tonic-gate /* 7127c478bd9Sstevel@tonic-gate * Strategy for usr/sys: stop counters and update set's presets 7137c478bd9Sstevel@tonic-gate * with current counter values, unbind, update requests with 7147c478bd9Sstevel@tonic-gate * new config, then re-bind. 7157c478bd9Sstevel@tonic-gate */ 7167c478bd9Sstevel@tonic-gate flag = (cmd == CPC_USR_EVENTS) ? 7177c478bd9Sstevel@tonic-gate CPC_COUNT_USER: CPC_COUNT_SYSTEM; 7187c478bd9Sstevel@tonic-gate 7197c478bd9Sstevel@tonic-gate kpreempt_disable(); 720*b885580bSAlexander Kolbasov KCPC_CTX_FLAG_SET(ctx, 7217c478bd9Sstevel@tonic-gate KCPC_CTX_INVALID | KCPC_CTX_INVALID_STOPPED); 7227c478bd9Sstevel@tonic-gate pcbe_ops->pcbe_allstop(); 7237c478bd9Sstevel@tonic-gate kpreempt_enable(); 724*b885580bSAlexander Kolbasov 7257c478bd9Sstevel@tonic-gate for (i = 0; i < set->ks_nreqs; i++) { 7267c478bd9Sstevel@tonic-gate set->ks_req[i].kr_preset = *(set->ks_req[i].kr_data); 7277c478bd9Sstevel@tonic-gate if (enable) 7287c478bd9Sstevel@tonic-gate set->ks_req[i].kr_flags |= flag; 7297c478bd9Sstevel@tonic-gate else 7307c478bd9Sstevel@tonic-gate set->ks_req[i].kr_flags &= ~flag; 7317c478bd9Sstevel@tonic-gate } 7327c478bd9Sstevel@tonic-gate newset = kcpc_dup_set(set); 7337c478bd9Sstevel@tonic-gate if (kcpc_unbind(set) != 0) 7347c478bd9Sstevel@tonic-gate return (EINVAL); 7357c478bd9Sstevel@tonic-gate t->t_cpc_set = newset; 7367c478bd9Sstevel@tonic-gate if (kcpc_bind_thread(newset, t, &err) != 0) { 7377c478bd9Sstevel@tonic-gate t->t_cpc_set = NULL; 7387c478bd9Sstevel@tonic-gate kcpc_free_set(newset); 7397c478bd9Sstevel@tonic-gate return (EINVAL); 7407c478bd9Sstevel@tonic-gate } 7417c478bd9Sstevel@tonic-gate } else 7427c478bd9Sstevel@tonic-gate return (EINVAL); 7437c478bd9Sstevel@tonic-gate 7447c478bd9Sstevel@tonic-gate return (0); 7457c478bd9Sstevel@tonic-gate } 7467c478bd9Sstevel@tonic-gate 7477c478bd9Sstevel@tonic-gate /* 7487c478bd9Sstevel@tonic-gate * Provide PCBEs with a way of obtaining the configs of every counter which will 7497c478bd9Sstevel@tonic-gate * be programmed together. 7507c478bd9Sstevel@tonic-gate * 7517c478bd9Sstevel@tonic-gate * If current is NULL, provide the first config. 7527c478bd9Sstevel@tonic-gate * 7537c478bd9Sstevel@tonic-gate * If data != NULL, caller wants to know where the data store associated with 7547c478bd9Sstevel@tonic-gate * the config we return is located. 7557c478bd9Sstevel@tonic-gate */ 7567c478bd9Sstevel@tonic-gate void * 7577c478bd9Sstevel@tonic-gate kcpc_next_config(void *token, void *current, uint64_t **data) 7587c478bd9Sstevel@tonic-gate { 7597c478bd9Sstevel@tonic-gate int i; 7607c478bd9Sstevel@tonic-gate kcpc_pic_t *pic; 7617c478bd9Sstevel@tonic-gate kcpc_ctx_t *ctx = (kcpc_ctx_t *)token; 7627c478bd9Sstevel@tonic-gate 7637c478bd9Sstevel@tonic-gate if (current == NULL) { 7647c478bd9Sstevel@tonic-gate /* 7657c478bd9Sstevel@tonic-gate * Client would like the first config, which may not be in 7667c478bd9Sstevel@tonic-gate * counter 0; we need to search through the counters for the 7677c478bd9Sstevel@tonic-gate * first config. 7687c478bd9Sstevel@tonic-gate */ 7697c478bd9Sstevel@tonic-gate for (i = 0; i < cpc_ncounters; i++) 7707c478bd9Sstevel@tonic-gate if (ctx->kc_pics[i].kp_req != NULL) 7717c478bd9Sstevel@tonic-gate break; 7727c478bd9Sstevel@tonic-gate /* 7737c478bd9Sstevel@tonic-gate * There are no counters configured for the given context. 7747c478bd9Sstevel@tonic-gate */ 7757c478bd9Sstevel@tonic-gate if (i == cpc_ncounters) 7767c478bd9Sstevel@tonic-gate return (NULL); 7777c478bd9Sstevel@tonic-gate } else { 7787c478bd9Sstevel@tonic-gate /* 7797c478bd9Sstevel@tonic-gate * There surely is a faster way to do this. 7807c478bd9Sstevel@tonic-gate */ 7817c478bd9Sstevel@tonic-gate for (i = 0; i < cpc_ncounters; i++) { 7827c478bd9Sstevel@tonic-gate pic = &ctx->kc_pics[i]; 7837c478bd9Sstevel@tonic-gate 7847c478bd9Sstevel@tonic-gate if (pic->kp_req != NULL && 7857c478bd9Sstevel@tonic-gate current == pic->kp_req->kr_config) 7867c478bd9Sstevel@tonic-gate break; 7877c478bd9Sstevel@tonic-gate } 7887c478bd9Sstevel@tonic-gate 7897c478bd9Sstevel@tonic-gate /* 7907c478bd9Sstevel@tonic-gate * We found the current config at picnum i. Now search for the 7917c478bd9Sstevel@tonic-gate * next configured PIC. 7927c478bd9Sstevel@tonic-gate */ 7937c478bd9Sstevel@tonic-gate for (i++; i < cpc_ncounters; i++) { 7947c478bd9Sstevel@tonic-gate pic = &ctx->kc_pics[i]; 7957c478bd9Sstevel@tonic-gate if (pic->kp_req != NULL) 7967c478bd9Sstevel@tonic-gate break; 7977c478bd9Sstevel@tonic-gate } 7987c478bd9Sstevel@tonic-gate 7997c478bd9Sstevel@tonic-gate if (i == cpc_ncounters) 8007c478bd9Sstevel@tonic-gate return (NULL); 8017c478bd9Sstevel@tonic-gate } 8027c478bd9Sstevel@tonic-gate 8037c478bd9Sstevel@tonic-gate if (data != NULL) { 8047c478bd9Sstevel@tonic-gate *data = ctx->kc_pics[i].kp_req->kr_data; 8057c478bd9Sstevel@tonic-gate } 8067c478bd9Sstevel@tonic-gate 8077c478bd9Sstevel@tonic-gate return (ctx->kc_pics[i].kp_req->kr_config); 8087c478bd9Sstevel@tonic-gate } 8097c478bd9Sstevel@tonic-gate 8107c478bd9Sstevel@tonic-gate 811b9e93c10SJonathan Haslam kcpc_ctx_t * 812*b885580bSAlexander Kolbasov kcpc_ctx_alloc(int kmem_flags) 8137c478bd9Sstevel@tonic-gate { 8147c478bd9Sstevel@tonic-gate kcpc_ctx_t *ctx; 8157c478bd9Sstevel@tonic-gate long hash; 8167c478bd9Sstevel@tonic-gate 817*b885580bSAlexander Kolbasov ctx = (kcpc_ctx_t *)kmem_zalloc(sizeof (kcpc_ctx_t), kmem_flags); 818*b885580bSAlexander Kolbasov if (ctx == NULL) 819*b885580bSAlexander Kolbasov return (NULL); 8207c478bd9Sstevel@tonic-gate 8217c478bd9Sstevel@tonic-gate hash = CPC_HASH_CTX(ctx); 8227c478bd9Sstevel@tonic-gate mutex_enter(&kcpc_ctx_llock[hash]); 8237c478bd9Sstevel@tonic-gate ctx->kc_next = kcpc_ctx_list[hash]; 8247c478bd9Sstevel@tonic-gate kcpc_ctx_list[hash] = ctx; 8257c478bd9Sstevel@tonic-gate mutex_exit(&kcpc_ctx_llock[hash]); 8267c478bd9Sstevel@tonic-gate 8277c478bd9Sstevel@tonic-gate ctx->kc_pics = (kcpc_pic_t *)kmem_zalloc(sizeof (kcpc_pic_t) * 8287c478bd9Sstevel@tonic-gate cpc_ncounters, KM_SLEEP); 8297c478bd9Sstevel@tonic-gate 8307c478bd9Sstevel@tonic-gate ctx->kc_cpuid = -1; 8317c478bd9Sstevel@tonic-gate 8327c478bd9Sstevel@tonic-gate return (ctx); 8337c478bd9Sstevel@tonic-gate } 8347c478bd9Sstevel@tonic-gate 8357c478bd9Sstevel@tonic-gate /* 8367c478bd9Sstevel@tonic-gate * Copy set from ctx to the child context, cctx, if it has CPC_BIND_LWP_INHERIT 8377c478bd9Sstevel@tonic-gate * in the flags. 8387c478bd9Sstevel@tonic-gate */ 8397c478bd9Sstevel@tonic-gate static void 8407c478bd9Sstevel@tonic-gate kcpc_ctx_clone(kcpc_ctx_t *ctx, kcpc_ctx_t *cctx) 8417c478bd9Sstevel@tonic-gate { 8427c478bd9Sstevel@tonic-gate kcpc_set_t *ks = ctx->kc_set, *cks; 8437c478bd9Sstevel@tonic-gate int i, j; 8447c478bd9Sstevel@tonic-gate int code; 8457c478bd9Sstevel@tonic-gate 8467c478bd9Sstevel@tonic-gate ASSERT(ks != NULL); 8477c478bd9Sstevel@tonic-gate 8487c478bd9Sstevel@tonic-gate if ((ks->ks_flags & CPC_BIND_LWP_INHERIT) == 0) 8497c478bd9Sstevel@tonic-gate return; 8507c478bd9Sstevel@tonic-gate 8514568bee7Strevtom cks = kmem_zalloc(sizeof (*cks), KM_SLEEP); 8524568bee7Strevtom cks->ks_state &= ~KCPC_SET_BOUND; 8537c478bd9Sstevel@tonic-gate cctx->kc_set = cks; 8547c478bd9Sstevel@tonic-gate cks->ks_flags = ks->ks_flags; 8557c478bd9Sstevel@tonic-gate cks->ks_nreqs = ks->ks_nreqs; 8567c478bd9Sstevel@tonic-gate cks->ks_req = kmem_alloc(cks->ks_nreqs * 8577c478bd9Sstevel@tonic-gate sizeof (kcpc_request_t), KM_SLEEP); 8587c478bd9Sstevel@tonic-gate cks->ks_data = kmem_alloc(cks->ks_nreqs * sizeof (uint64_t), 8597c478bd9Sstevel@tonic-gate KM_SLEEP); 8607c478bd9Sstevel@tonic-gate cks->ks_ctx = cctx; 8617c478bd9Sstevel@tonic-gate 8627c478bd9Sstevel@tonic-gate for (i = 0; i < cks->ks_nreqs; i++) { 8637c478bd9Sstevel@tonic-gate cks->ks_req[i].kr_index = ks->ks_req[i].kr_index; 8647c478bd9Sstevel@tonic-gate cks->ks_req[i].kr_picnum = ks->ks_req[i].kr_picnum; 8657c478bd9Sstevel@tonic-gate (void) strncpy(cks->ks_req[i].kr_event, 8667c478bd9Sstevel@tonic-gate ks->ks_req[i].kr_event, CPC_MAX_EVENT_LEN); 8677c478bd9Sstevel@tonic-gate cks->ks_req[i].kr_preset = ks->ks_req[i].kr_preset; 8687c478bd9Sstevel@tonic-gate cks->ks_req[i].kr_flags = ks->ks_req[i].kr_flags; 8697c478bd9Sstevel@tonic-gate cks->ks_req[i].kr_nattrs = ks->ks_req[i].kr_nattrs; 8707c478bd9Sstevel@tonic-gate if (ks->ks_req[i].kr_nattrs > 0) { 8717c478bd9Sstevel@tonic-gate cks->ks_req[i].kr_attr = 8727c478bd9Sstevel@tonic-gate kmem_alloc(ks->ks_req[i].kr_nattrs * 8737c478bd9Sstevel@tonic-gate sizeof (kcpc_attr_t), KM_SLEEP); 8747c478bd9Sstevel@tonic-gate } 8757c478bd9Sstevel@tonic-gate for (j = 0; j < ks->ks_req[i].kr_nattrs; j++) { 8767c478bd9Sstevel@tonic-gate (void) strncpy(cks->ks_req[i].kr_attr[j].ka_name, 8777c478bd9Sstevel@tonic-gate ks->ks_req[i].kr_attr[j].ka_name, 8787c478bd9Sstevel@tonic-gate CPC_MAX_ATTR_LEN); 8797c478bd9Sstevel@tonic-gate cks->ks_req[i].kr_attr[j].ka_val = 8807c478bd9Sstevel@tonic-gate ks->ks_req[i].kr_attr[j].ka_val; 8817c478bd9Sstevel@tonic-gate } 8827c478bd9Sstevel@tonic-gate } 8837c478bd9Sstevel@tonic-gate if (kcpc_configure_reqs(cctx, cks, &code) != 0) 8848d4e547dSae112802 kcpc_invalidate_config(cctx); 8854568bee7Strevtom 8864568bee7Strevtom mutex_enter(&cks->ks_lock); 8874568bee7Strevtom cks->ks_state |= KCPC_SET_BOUND; 8884568bee7Strevtom cv_signal(&cks->ks_condv); 8894568bee7Strevtom mutex_exit(&cks->ks_lock); 8907c478bd9Sstevel@tonic-gate } 8917c478bd9Sstevel@tonic-gate 8927c478bd9Sstevel@tonic-gate 893b9e93c10SJonathan Haslam void 8947c478bd9Sstevel@tonic-gate kcpc_ctx_free(kcpc_ctx_t *ctx) 8957c478bd9Sstevel@tonic-gate { 8967c478bd9Sstevel@tonic-gate kcpc_ctx_t **loc; 8977c478bd9Sstevel@tonic-gate long hash = CPC_HASH_CTX(ctx); 8987c478bd9Sstevel@tonic-gate 8997c478bd9Sstevel@tonic-gate mutex_enter(&kcpc_ctx_llock[hash]); 9007c478bd9Sstevel@tonic-gate loc = &kcpc_ctx_list[hash]; 9017c478bd9Sstevel@tonic-gate ASSERT(*loc != NULL); 9027c478bd9Sstevel@tonic-gate while (*loc != ctx) 9037c478bd9Sstevel@tonic-gate loc = &(*loc)->kc_next; 9047c478bd9Sstevel@tonic-gate *loc = ctx->kc_next; 9057c478bd9Sstevel@tonic-gate mutex_exit(&kcpc_ctx_llock[hash]); 9067c478bd9Sstevel@tonic-gate 9077c478bd9Sstevel@tonic-gate kmem_free(ctx->kc_pics, cpc_ncounters * sizeof (kcpc_pic_t)); 9084568bee7Strevtom cv_destroy(&ctx->kc_condv); 9094568bee7Strevtom mutex_destroy(&ctx->kc_lock); 9107c478bd9Sstevel@tonic-gate kmem_free(ctx, sizeof (*ctx)); 9117c478bd9Sstevel@tonic-gate } 9127c478bd9Sstevel@tonic-gate 9137c478bd9Sstevel@tonic-gate /* 9147c478bd9Sstevel@tonic-gate * Generic interrupt handler used on hardware that generates 9157c478bd9Sstevel@tonic-gate * overflow interrupts. 9167c478bd9Sstevel@tonic-gate * 9177c478bd9Sstevel@tonic-gate * Note: executed at high-level interrupt context! 9187c478bd9Sstevel@tonic-gate */ 9197c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 9207c478bd9Sstevel@tonic-gate kcpc_ctx_t * 9217c478bd9Sstevel@tonic-gate kcpc_overflow_intr(caddr_t arg, uint64_t bitmap) 9227c478bd9Sstevel@tonic-gate { 9237c478bd9Sstevel@tonic-gate kcpc_ctx_t *ctx; 9247c478bd9Sstevel@tonic-gate kthread_t *t = curthread; 9257c478bd9Sstevel@tonic-gate int i; 9267c478bd9Sstevel@tonic-gate 9277c478bd9Sstevel@tonic-gate /* 9287c478bd9Sstevel@tonic-gate * On both x86 and UltraSPARC, we may deliver the high-level 9297c478bd9Sstevel@tonic-gate * interrupt in kernel mode, just after we've started to run an 9307c478bd9Sstevel@tonic-gate * interrupt thread. (That's because the hardware helpfully 9317c478bd9Sstevel@tonic-gate * delivers the overflow interrupt some random number of cycles 9327c478bd9Sstevel@tonic-gate * after the instruction that caused the overflow by which time 9337c478bd9Sstevel@tonic-gate * we're in some part of the kernel, not necessarily running on 9347c478bd9Sstevel@tonic-gate * the right thread). 9357c478bd9Sstevel@tonic-gate * 9367c478bd9Sstevel@tonic-gate * Check for this case here -- find the pinned thread 9377c478bd9Sstevel@tonic-gate * that was running when the interrupt went off. 9387c478bd9Sstevel@tonic-gate */ 9397c478bd9Sstevel@tonic-gate if (t->t_flag & T_INTR_THREAD) { 9407c478bd9Sstevel@tonic-gate klwp_t *lwp; 9417c478bd9Sstevel@tonic-gate 9427c478bd9Sstevel@tonic-gate atomic_add_32(&kcpc_intrctx_count, 1); 9437c478bd9Sstevel@tonic-gate 9447c478bd9Sstevel@tonic-gate /* 9457c478bd9Sstevel@tonic-gate * Note that t_lwp is always set to point at the underlying 9467c478bd9Sstevel@tonic-gate * thread, thus this will work in the presence of nested 9477c478bd9Sstevel@tonic-gate * interrupts. 9487c478bd9Sstevel@tonic-gate */ 9497c478bd9Sstevel@tonic-gate ctx = NULL; 9507c478bd9Sstevel@tonic-gate if ((lwp = t->t_lwp) != NULL) { 9517c478bd9Sstevel@tonic-gate t = lwptot(lwp); 9527c478bd9Sstevel@tonic-gate ctx = t->t_cpc_ctx; 9537c478bd9Sstevel@tonic-gate } 9547c478bd9Sstevel@tonic-gate } else 9557c478bd9Sstevel@tonic-gate ctx = t->t_cpc_ctx; 9567c478bd9Sstevel@tonic-gate 9577c478bd9Sstevel@tonic-gate if (ctx == NULL) { 9587c478bd9Sstevel@tonic-gate /* 9597c478bd9Sstevel@tonic-gate * This can easily happen if we're using the counters in 9607c478bd9Sstevel@tonic-gate * "shared" mode, for example, and an overflow interrupt 9617c478bd9Sstevel@tonic-gate * occurs while we are running cpustat. In that case, the 9627c478bd9Sstevel@tonic-gate * bound thread that has the context that belongs to this 9637c478bd9Sstevel@tonic-gate * CPU is almost certainly sleeping (if it was running on 9647c478bd9Sstevel@tonic-gate * the CPU we'd have found it above), and the actual 9657c478bd9Sstevel@tonic-gate * interrupted thread has no knowledge of performance counters! 9667c478bd9Sstevel@tonic-gate */ 9677c478bd9Sstevel@tonic-gate ctx = curthread->t_cpu->cpu_cpc_ctx; 9687c478bd9Sstevel@tonic-gate if (ctx != NULL) { 9697c478bd9Sstevel@tonic-gate /* 9707c478bd9Sstevel@tonic-gate * Return the bound context for this CPU to 9717c478bd9Sstevel@tonic-gate * the interrupt handler so that it can synchronously 9727c478bd9Sstevel@tonic-gate * sample the hardware counters and restart them. 9737c478bd9Sstevel@tonic-gate */ 9747c478bd9Sstevel@tonic-gate return (ctx); 9757c478bd9Sstevel@tonic-gate } 9767c478bd9Sstevel@tonic-gate 9777c478bd9Sstevel@tonic-gate /* 9787c478bd9Sstevel@tonic-gate * As long as the overflow interrupt really is delivered early 9797c478bd9Sstevel@tonic-gate * enough after trapping into the kernel to avoid switching 9807c478bd9Sstevel@tonic-gate * threads, we must always be able to find the cpc context, 9817c478bd9Sstevel@tonic-gate * or something went terribly wrong i.e. we ended up 9827c478bd9Sstevel@tonic-gate * running a passivated interrupt thread, a kernel 9837c478bd9Sstevel@tonic-gate * thread or we interrupted idle, all of which are Very Bad. 984b9e93c10SJonathan Haslam * 985b9e93c10SJonathan Haslam * We also could end up here owing to an incredibly unlikely 986b9e93c10SJonathan Haslam * race condition that exists on x86 based architectures when 987b9e93c10SJonathan Haslam * the cpc provider is in use; overflow interrupts are directed 988b9e93c10SJonathan Haslam * to the cpc provider if the 'dtrace_cpc_in_use' variable is 989b9e93c10SJonathan Haslam * set when we enter the handler. This variable is unset after 990b9e93c10SJonathan Haslam * overflow interrupts have been disabled on all CPUs and all 991b9e93c10SJonathan Haslam * contexts have been torn down. To stop interrupts, the cpc 992b9e93c10SJonathan Haslam * provider issues a xcall to the remote CPU before it tears 993b9e93c10SJonathan Haslam * down that CPUs context. As high priority xcalls, on an x86 994b9e93c10SJonathan Haslam * architecture, execute at a higher PIL than this handler, it 995b9e93c10SJonathan Haslam * is possible (though extremely unlikely) that the xcall could 996b9e93c10SJonathan Haslam * interrupt the overflow handler before the handler has 997b9e93c10SJonathan Haslam * checked the 'dtrace_cpc_in_use' variable, stop the counters, 998b9e93c10SJonathan Haslam * return to the cpc provider which could then rip down 999b9e93c10SJonathan Haslam * contexts and unset 'dtrace_cpc_in_use' *before* the CPUs 1000b9e93c10SJonathan Haslam * overflow handler has had a chance to check the variable. In 1001b9e93c10SJonathan Haslam * that case, the handler would direct the overflow into this 1002b9e93c10SJonathan Haslam * code and no valid context will be found. The default behavior 1003b9e93c10SJonathan Haslam * when no valid context is found is now to shout a warning to 1004b9e93c10SJonathan Haslam * the console and bump the 'kcpc_nullctx_count' variable. 10057c478bd9Sstevel@tonic-gate */ 10067c478bd9Sstevel@tonic-gate if (kcpc_nullctx_panic) 10077c478bd9Sstevel@tonic-gate panic("null cpc context, thread %p", (void *)t); 1008*b885580bSAlexander Kolbasov #ifdef DEBUG 1009*b885580bSAlexander Kolbasov cmn_err(CE_NOTE, 1010b9e93c10SJonathan Haslam "null cpc context found in overflow handler!\n"); 1011*b885580bSAlexander Kolbasov #endif 10127c478bd9Sstevel@tonic-gate atomic_add_32(&kcpc_nullctx_count, 1); 10137c478bd9Sstevel@tonic-gate } else if ((ctx->kc_flags & KCPC_CTX_INVALID) == 0) { 10147c478bd9Sstevel@tonic-gate /* 10157c478bd9Sstevel@tonic-gate * Schedule an ast to sample the counters, which will 10167c478bd9Sstevel@tonic-gate * propagate any overflow into the virtualized performance 10177c478bd9Sstevel@tonic-gate * counter(s), and may deliver a signal. 10187c478bd9Sstevel@tonic-gate */ 10197c478bd9Sstevel@tonic-gate ttolwp(t)->lwp_pcb.pcb_flags |= CPC_OVERFLOW; 10207c478bd9Sstevel@tonic-gate /* 10217c478bd9Sstevel@tonic-gate * If a counter has overflowed which was counting on behalf of 10227c478bd9Sstevel@tonic-gate * a request which specified CPC_OVF_NOTIFY_EMT, send the 10237c478bd9Sstevel@tonic-gate * process a signal. 10247c478bd9Sstevel@tonic-gate */ 10257c478bd9Sstevel@tonic-gate for (i = 0; i < cpc_ncounters; i++) { 10267c478bd9Sstevel@tonic-gate if (ctx->kc_pics[i].kp_req != NULL && 10277c478bd9Sstevel@tonic-gate bitmap & (1 << i) && 10287c478bd9Sstevel@tonic-gate ctx->kc_pics[i].kp_req->kr_flags & 10297c478bd9Sstevel@tonic-gate CPC_OVF_NOTIFY_EMT) { 10307c478bd9Sstevel@tonic-gate /* 10317c478bd9Sstevel@tonic-gate * A signal has been requested for this PIC, so 10327c478bd9Sstevel@tonic-gate * so freeze the context. The interrupt handler 10337c478bd9Sstevel@tonic-gate * has already stopped the counter hardware. 10347c478bd9Sstevel@tonic-gate */ 1035*b885580bSAlexander Kolbasov KCPC_CTX_FLAG_SET(ctx, KCPC_CTX_FREEZE); 10367c478bd9Sstevel@tonic-gate atomic_or_uint(&ctx->kc_pics[i].kp_flags, 10377c478bd9Sstevel@tonic-gate KCPC_PIC_OVERFLOWED); 10387c478bd9Sstevel@tonic-gate } 10397c478bd9Sstevel@tonic-gate } 10407c478bd9Sstevel@tonic-gate aston(t); 1041*b885580bSAlexander Kolbasov } else if (ctx->kc_flags & KCPC_CTX_INVALID_STOPPED) { 1042*b885580bSAlexander Kolbasov /* 1043*b885580bSAlexander Kolbasov * Thread context is no longer valid, but here may be a valid 1044*b885580bSAlexander Kolbasov * CPU context. 1045*b885580bSAlexander Kolbasov */ 1046*b885580bSAlexander Kolbasov return (curthread->t_cpu->cpu_cpc_ctx); 10477c478bd9Sstevel@tonic-gate } 1048*b885580bSAlexander Kolbasov 10497c478bd9Sstevel@tonic-gate return (NULL); 10507c478bd9Sstevel@tonic-gate } 10517c478bd9Sstevel@tonic-gate 10527c478bd9Sstevel@tonic-gate /* 10537c478bd9Sstevel@tonic-gate * The current thread context had an overflow interrupt; we're 10547c478bd9Sstevel@tonic-gate * executing here in high-level interrupt context. 10557c478bd9Sstevel@tonic-gate */ 10567c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 10577c478bd9Sstevel@tonic-gate uint_t 10587c478bd9Sstevel@tonic-gate kcpc_hw_overflow_intr(caddr_t arg1, caddr_t arg2) 10597c478bd9Sstevel@tonic-gate { 10607c478bd9Sstevel@tonic-gate kcpc_ctx_t *ctx; 10617c478bd9Sstevel@tonic-gate uint64_t bitmap; 1062b9e93c10SJonathan Haslam uint8_t *state; 1063*b885580bSAlexander Kolbasov int save_spl; 10647c478bd9Sstevel@tonic-gate 10657c478bd9Sstevel@tonic-gate if (pcbe_ops == NULL || 10667c478bd9Sstevel@tonic-gate (bitmap = pcbe_ops->pcbe_overflow_bitmap()) == 0) 10677c478bd9Sstevel@tonic-gate return (DDI_INTR_UNCLAIMED); 1068bb4f5042Sha137994 10697c478bd9Sstevel@tonic-gate /* 10707c478bd9Sstevel@tonic-gate * Prevent any further interrupts. 10717c478bd9Sstevel@tonic-gate */ 10727c478bd9Sstevel@tonic-gate pcbe_ops->pcbe_allstop(); 10737c478bd9Sstevel@tonic-gate 1074b9e93c10SJonathan Haslam if (dtrace_cpc_in_use) { 1075b9e93c10SJonathan Haslam state = &cpu_core[CPU->cpu_id].cpuc_dcpc_intr_state; 1076b9e93c10SJonathan Haslam 10777c478bd9Sstevel@tonic-gate /* 1078b9e93c10SJonathan Haslam * Set the per-CPU state bit to indicate that we are currently 1079b9e93c10SJonathan Haslam * processing an interrupt if it is currently free. Drop the 1080b9e93c10SJonathan Haslam * interrupt if the state isn't free (i.e. a configuration 1081b9e93c10SJonathan Haslam * event is taking place). 1082b9e93c10SJonathan Haslam */ 1083b9e93c10SJonathan Haslam if (atomic_cas_8(state, DCPC_INTR_FREE, 1084b9e93c10SJonathan Haslam DCPC_INTR_PROCESSING) == DCPC_INTR_FREE) { 1085b9e93c10SJonathan Haslam int i; 1086b9e93c10SJonathan Haslam kcpc_request_t req; 1087b9e93c10SJonathan Haslam 1088b9e93c10SJonathan Haslam ASSERT(dtrace_cpc_fire != NULL); 1089b9e93c10SJonathan Haslam 1090b9e93c10SJonathan Haslam (*dtrace_cpc_fire)(bitmap); 1091b9e93c10SJonathan Haslam 1092b9e93c10SJonathan Haslam ctx = curthread->t_cpu->cpu_cpc_ctx; 1093*b885580bSAlexander Kolbasov if (ctx == NULL) { 1094*b885580bSAlexander Kolbasov #ifdef DEBUG 1095*b885580bSAlexander Kolbasov cmn_err(CE_NOTE, "null cpc context in" 1096*b885580bSAlexander Kolbasov "hardware overflow handler!\n"); 1097*b885580bSAlexander Kolbasov #endif 1098*b885580bSAlexander Kolbasov return (DDI_INTR_CLAIMED); 1099*b885580bSAlexander Kolbasov } 1100b9e93c10SJonathan Haslam 1101b9e93c10SJonathan Haslam /* Reset any counters that have overflowed */ 1102b9e93c10SJonathan Haslam for (i = 0; i < ctx->kc_set->ks_nreqs; i++) { 1103b9e93c10SJonathan Haslam req = ctx->kc_set->ks_req[i]; 1104b9e93c10SJonathan Haslam 1105b9e93c10SJonathan Haslam if (bitmap & (1 << req.kr_picnum)) { 1106b9e93c10SJonathan Haslam pcbe_ops->pcbe_configure(req.kr_picnum, 1107b9e93c10SJonathan Haslam req.kr_event, req.kr_preset, 1108b9e93c10SJonathan Haslam req.kr_flags, req.kr_nattrs, 1109b9e93c10SJonathan Haslam req.kr_attr, &(req.kr_config), 1110b9e93c10SJonathan Haslam (void *)ctx); 1111b9e93c10SJonathan Haslam } 1112b9e93c10SJonathan Haslam } 1113b9e93c10SJonathan Haslam pcbe_ops->pcbe_program(ctx); 1114b9e93c10SJonathan Haslam 1115b9e93c10SJonathan Haslam /* 1116b9e93c10SJonathan Haslam * We've finished processing the interrupt so set 1117b9e93c10SJonathan Haslam * the state back to free. 1118b9e93c10SJonathan Haslam */ 1119b9e93c10SJonathan Haslam cpu_core[CPU->cpu_id].cpuc_dcpc_intr_state = 1120b9e93c10SJonathan Haslam DCPC_INTR_FREE; 1121b9e93c10SJonathan Haslam membar_producer(); 1122b9e93c10SJonathan Haslam } 1123b9e93c10SJonathan Haslam return (DDI_INTR_CLAIMED); 1124b9e93c10SJonathan Haslam } 1125b9e93c10SJonathan Haslam 1126b9e93c10SJonathan Haslam /* 1127b9e93c10SJonathan Haslam * DTrace isn't involved so pass on accordingly. 11287c478bd9Sstevel@tonic-gate * 11297c478bd9Sstevel@tonic-gate * If the interrupt has occurred in the context of an lwp owning 11307c478bd9Sstevel@tonic-gate * the counters, then the handler posts an AST to the lwp to 11317c478bd9Sstevel@tonic-gate * trigger the actual sampling, and optionally deliver a signal or 11327c478bd9Sstevel@tonic-gate * restart the counters, on the way out of the kernel using 11337c478bd9Sstevel@tonic-gate * kcpc_hw_overflow_ast() (see below). 11347c478bd9Sstevel@tonic-gate * 11357c478bd9Sstevel@tonic-gate * On the other hand, if the handler returns the context to us 11367c478bd9Sstevel@tonic-gate * directly, then it means that there are no other threads in 11377c478bd9Sstevel@tonic-gate * the middle of updating it, no AST has been posted, and so we 11387c478bd9Sstevel@tonic-gate * should sample the counters here, and restart them with no 11397c478bd9Sstevel@tonic-gate * further fuss. 1140*b885580bSAlexander Kolbasov * 1141*b885580bSAlexander Kolbasov * The CPU's CPC context may disappear as a result of cross-call which 1142*b885580bSAlexander Kolbasov * has higher PIL on x86, so protect the context by raising PIL to the 1143*b885580bSAlexander Kolbasov * cross-call level. 11447c478bd9Sstevel@tonic-gate */ 1145*b885580bSAlexander Kolbasov save_spl = spl_xcall(); 11467c478bd9Sstevel@tonic-gate if ((ctx = kcpc_overflow_intr(arg1, bitmap)) != NULL) { 11477c478bd9Sstevel@tonic-gate uint64_t curtick = KCPC_GET_TICK(); 11487c478bd9Sstevel@tonic-gate 11497c478bd9Sstevel@tonic-gate ctx->kc_hrtime = gethrtime_waitfree(); 11507c478bd9Sstevel@tonic-gate ctx->kc_vtick += curtick - ctx->kc_rawtick; 11517c478bd9Sstevel@tonic-gate ctx->kc_rawtick = curtick; 11527c478bd9Sstevel@tonic-gate pcbe_ops->pcbe_sample(ctx); 11537c478bd9Sstevel@tonic-gate pcbe_ops->pcbe_program(ctx); 11547c478bd9Sstevel@tonic-gate } 1155*b885580bSAlexander Kolbasov splx(save_spl); 11567c478bd9Sstevel@tonic-gate 11577c478bd9Sstevel@tonic-gate return (DDI_INTR_CLAIMED); 11587c478bd9Sstevel@tonic-gate } 11597c478bd9Sstevel@tonic-gate 11607c478bd9Sstevel@tonic-gate /* 11617c478bd9Sstevel@tonic-gate * Called from trap() when processing the ast posted by the high-level 11627c478bd9Sstevel@tonic-gate * interrupt handler. 11637c478bd9Sstevel@tonic-gate */ 11647c478bd9Sstevel@tonic-gate int 11657c478bd9Sstevel@tonic-gate kcpc_overflow_ast() 11667c478bd9Sstevel@tonic-gate { 11677c478bd9Sstevel@tonic-gate kcpc_ctx_t *ctx = curthread->t_cpc_ctx; 11687c478bd9Sstevel@tonic-gate int i; 11697c478bd9Sstevel@tonic-gate int found = 0; 11707c478bd9Sstevel@tonic-gate uint64_t curtick = KCPC_GET_TICK(); 11717c478bd9Sstevel@tonic-gate 11727c478bd9Sstevel@tonic-gate ASSERT(ctx != NULL); /* Beware of interrupt skid. */ 11737c478bd9Sstevel@tonic-gate 11747c478bd9Sstevel@tonic-gate /* 11757c478bd9Sstevel@tonic-gate * An overflow happened: sample the context to ensure that 11767c478bd9Sstevel@tonic-gate * the overflow is propagated into the upper bits of the 11777c478bd9Sstevel@tonic-gate * virtualized 64-bit counter(s). 11787c478bd9Sstevel@tonic-gate */ 11797c478bd9Sstevel@tonic-gate kpreempt_disable(); 11807c478bd9Sstevel@tonic-gate ctx->kc_hrtime = gethrtime_waitfree(); 11817c478bd9Sstevel@tonic-gate pcbe_ops->pcbe_sample(ctx); 11827c478bd9Sstevel@tonic-gate kpreempt_enable(); 11837c478bd9Sstevel@tonic-gate 11847c478bd9Sstevel@tonic-gate ctx->kc_vtick += curtick - ctx->kc_rawtick; 11857c478bd9Sstevel@tonic-gate 11867c478bd9Sstevel@tonic-gate /* 11877c478bd9Sstevel@tonic-gate * The interrupt handler has marked any pics with KCPC_PIC_OVERFLOWED 11887c478bd9Sstevel@tonic-gate * if that pic generated an overflow and if the request it was counting 11897c478bd9Sstevel@tonic-gate * on behalf of had CPC_OVERFLOW_REQUEST specified. We go through all 11907c478bd9Sstevel@tonic-gate * pics in the context and clear the KCPC_PIC_OVERFLOWED flags. If we 11917c478bd9Sstevel@tonic-gate * found any overflowed pics, keep the context frozen and return true 11927c478bd9Sstevel@tonic-gate * (thus causing a signal to be sent). 11937c478bd9Sstevel@tonic-gate */ 11947c478bd9Sstevel@tonic-gate for (i = 0; i < cpc_ncounters; i++) { 11957c478bd9Sstevel@tonic-gate if (ctx->kc_pics[i].kp_flags & KCPC_PIC_OVERFLOWED) { 11967c478bd9Sstevel@tonic-gate atomic_and_uint(&ctx->kc_pics[i].kp_flags, 11977c478bd9Sstevel@tonic-gate ~KCPC_PIC_OVERFLOWED); 11987c478bd9Sstevel@tonic-gate found = 1; 11997c478bd9Sstevel@tonic-gate } 12007c478bd9Sstevel@tonic-gate } 12017c478bd9Sstevel@tonic-gate if (found) 12027c478bd9Sstevel@tonic-gate return (1); 12037c478bd9Sstevel@tonic-gate 12047c478bd9Sstevel@tonic-gate /* 12057c478bd9Sstevel@tonic-gate * Otherwise, re-enable the counters and continue life as before. 12067c478bd9Sstevel@tonic-gate */ 12077c478bd9Sstevel@tonic-gate kpreempt_disable(); 1208*b885580bSAlexander Kolbasov KCPC_CTX_FLAG_CLR(ctx, KCPC_CTX_FREEZE); 12097c478bd9Sstevel@tonic-gate pcbe_ops->pcbe_program(ctx); 12107c478bd9Sstevel@tonic-gate kpreempt_enable(); 12117c478bd9Sstevel@tonic-gate return (0); 12127c478bd9Sstevel@tonic-gate } 12137c478bd9Sstevel@tonic-gate 12147c478bd9Sstevel@tonic-gate /* 12157c478bd9Sstevel@tonic-gate * Called when switching away from current thread. 12167c478bd9Sstevel@tonic-gate */ 12177c478bd9Sstevel@tonic-gate static void 12187c478bd9Sstevel@tonic-gate kcpc_save(kcpc_ctx_t *ctx) 12197c478bd9Sstevel@tonic-gate { 1220*b885580bSAlexander Kolbasov int err; 1221*b885580bSAlexander Kolbasov int save_spl; 1222*b885580bSAlexander Kolbasov 1223*b885580bSAlexander Kolbasov kpreempt_disable(); 1224*b885580bSAlexander Kolbasov save_spl = spl_xcall(); 1225*b885580bSAlexander Kolbasov 12267c478bd9Sstevel@tonic-gate if (ctx->kc_flags & KCPC_CTX_INVALID) { 1227*b885580bSAlexander Kolbasov if (ctx->kc_flags & KCPC_CTX_INVALID_STOPPED) { 1228*b885580bSAlexander Kolbasov splx(save_spl); 1229*b885580bSAlexander Kolbasov kpreempt_enable(); 12307c478bd9Sstevel@tonic-gate return; 1231*b885580bSAlexander Kolbasov } 12327c478bd9Sstevel@tonic-gate /* 12337c478bd9Sstevel@tonic-gate * This context has been invalidated but the counters have not 12347c478bd9Sstevel@tonic-gate * been stopped. Stop them here and mark the context stopped. 12357c478bd9Sstevel@tonic-gate */ 1236*b885580bSAlexander Kolbasov kcpc_unprogram(ctx, B_TRUE); 1237*b885580bSAlexander Kolbasov splx(save_spl); 1238*b885580bSAlexander Kolbasov kpreempt_enable(); 12397c478bd9Sstevel@tonic-gate return; 12407c478bd9Sstevel@tonic-gate } 12417c478bd9Sstevel@tonic-gate 12427c478bd9Sstevel@tonic-gate pcbe_ops->pcbe_allstop(); 1243*b885580bSAlexander Kolbasov if (ctx->kc_flags & KCPC_CTX_FREEZE) { 1244*b885580bSAlexander Kolbasov splx(save_spl); 1245*b885580bSAlexander Kolbasov kpreempt_enable(); 12467c478bd9Sstevel@tonic-gate return; 1247*b885580bSAlexander Kolbasov } 12487c478bd9Sstevel@tonic-gate 12497c478bd9Sstevel@tonic-gate /* 12507c478bd9Sstevel@tonic-gate * Need to sample for all reqs into each req's current mpic. 12517c478bd9Sstevel@tonic-gate */ 1252*b885580bSAlexander Kolbasov ctx->kc_hrtime = gethrtime_waitfree(); 12537c478bd9Sstevel@tonic-gate ctx->kc_vtick += KCPC_GET_TICK() - ctx->kc_rawtick; 12547c478bd9Sstevel@tonic-gate pcbe_ops->pcbe_sample(ctx); 1255*b885580bSAlexander Kolbasov 1256*b885580bSAlexander Kolbasov /* 1257*b885580bSAlexander Kolbasov * Program counter for measuring capacity and utilization since user 1258*b885580bSAlexander Kolbasov * thread isn't using counter anymore 1259*b885580bSAlexander Kolbasov */ 1260*b885580bSAlexander Kolbasov ASSERT(ctx->kc_cpuid == -1); 1261*b885580bSAlexander Kolbasov cu_cpc_program(CPU, &err); 1262*b885580bSAlexander Kolbasov splx(save_spl); 1263*b885580bSAlexander Kolbasov kpreempt_enable(); 12647c478bd9Sstevel@tonic-gate } 12657c478bd9Sstevel@tonic-gate 12667c478bd9Sstevel@tonic-gate static void 12677c478bd9Sstevel@tonic-gate kcpc_restore(kcpc_ctx_t *ctx) 12687c478bd9Sstevel@tonic-gate { 1269*b885580bSAlexander Kolbasov int save_spl; 1270*b885580bSAlexander Kolbasov 12714568bee7Strevtom mutex_enter(&ctx->kc_lock); 1272*b885580bSAlexander Kolbasov 12737c478bd9Sstevel@tonic-gate if ((ctx->kc_flags & (KCPC_CTX_INVALID | KCPC_CTX_INVALID_STOPPED)) == 1274*b885580bSAlexander Kolbasov KCPC_CTX_INVALID) { 12757c478bd9Sstevel@tonic-gate /* 12767c478bd9Sstevel@tonic-gate * The context is invalidated but has not been marked stopped. 12777c478bd9Sstevel@tonic-gate * We mark it as such here because we will not start the 12787c478bd9Sstevel@tonic-gate * counters during this context switch. 12797c478bd9Sstevel@tonic-gate */ 1280*b885580bSAlexander Kolbasov KCPC_CTX_FLAG_SET(ctx, KCPC_CTX_INVALID_STOPPED); 1281*b885580bSAlexander Kolbasov } 12827c478bd9Sstevel@tonic-gate 12834568bee7Strevtom if (ctx->kc_flags & (KCPC_CTX_INVALID | KCPC_CTX_FREEZE)) { 12844568bee7Strevtom mutex_exit(&ctx->kc_lock); 12857c478bd9Sstevel@tonic-gate return; 12864568bee7Strevtom } 12874568bee7Strevtom 12884568bee7Strevtom /* 12894568bee7Strevtom * Set kc_flags to show that a kcpc_restore() is in progress to avoid 12904568bee7Strevtom * ctx & set related memory objects being freed without us knowing. 12914568bee7Strevtom * This can happen if an agent thread is executing a kcpc_unbind(), 12924568bee7Strevtom * with this thread as the target, whilst we're concurrently doing a 12934568bee7Strevtom * restorectx() during, for example, a proc_exit(). Effectively, by 12944568bee7Strevtom * doing this, we're asking kcpc_free() to cv_wait() until 12954568bee7Strevtom * kcpc_restore() has completed. 12964568bee7Strevtom */ 1297*b885580bSAlexander Kolbasov KCPC_CTX_FLAG_SET(ctx, KCPC_CTX_RESTORE); 12984568bee7Strevtom mutex_exit(&ctx->kc_lock); 12997c478bd9Sstevel@tonic-gate 13007c478bd9Sstevel@tonic-gate /* 13017c478bd9Sstevel@tonic-gate * While programming the hardware, the counters should be stopped. We 13027c478bd9Sstevel@tonic-gate * don't do an explicit pcbe_allstop() here because they should have 13037c478bd9Sstevel@tonic-gate * been stopped already by the last consumer. 13047c478bd9Sstevel@tonic-gate */ 1305*b885580bSAlexander Kolbasov kpreempt_disable(); 1306*b885580bSAlexander Kolbasov save_spl = spl_xcall(); 1307*b885580bSAlexander Kolbasov kcpc_program(ctx, B_TRUE, B_TRUE); 1308*b885580bSAlexander Kolbasov splx(save_spl); 1309*b885580bSAlexander Kolbasov kpreempt_enable(); 13104568bee7Strevtom 13114568bee7Strevtom /* 13124568bee7Strevtom * Wake the agent thread if it's waiting in kcpc_free(). 13134568bee7Strevtom */ 13144568bee7Strevtom mutex_enter(&ctx->kc_lock); 1315*b885580bSAlexander Kolbasov KCPC_CTX_FLAG_CLR(ctx, KCPC_CTX_RESTORE); 13164568bee7Strevtom cv_signal(&ctx->kc_condv); 13174568bee7Strevtom mutex_exit(&ctx->kc_lock); 13187c478bd9Sstevel@tonic-gate } 13197c478bd9Sstevel@tonic-gate 13207c478bd9Sstevel@tonic-gate /* 13217c478bd9Sstevel@tonic-gate * If kcpc_counts_include_idle is set to 0 by the sys admin, we add the the 13227c478bd9Sstevel@tonic-gate * following context operators to the idle thread on each CPU. They stop the 13237c478bd9Sstevel@tonic-gate * counters when the idle thread is switched on, and they start them again when 13247c478bd9Sstevel@tonic-gate * it is switched off. 13257c478bd9Sstevel@tonic-gate */ 13267c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 13277c478bd9Sstevel@tonic-gate void 13287c478bd9Sstevel@tonic-gate kcpc_idle_save(struct cpu *cp) 13297c478bd9Sstevel@tonic-gate { 13307c478bd9Sstevel@tonic-gate /* 13317c478bd9Sstevel@tonic-gate * The idle thread shouldn't be run anywhere else. 13327c478bd9Sstevel@tonic-gate */ 13337c478bd9Sstevel@tonic-gate ASSERT(CPU == cp); 13347c478bd9Sstevel@tonic-gate 13357c478bd9Sstevel@tonic-gate /* 13367c478bd9Sstevel@tonic-gate * We must hold the CPU's context lock to ensure the context isn't freed 13377c478bd9Sstevel@tonic-gate * while we're looking at it. 13387c478bd9Sstevel@tonic-gate */ 13397c478bd9Sstevel@tonic-gate mutex_enter(&cp->cpu_cpc_ctxlock); 13407c478bd9Sstevel@tonic-gate 13417c478bd9Sstevel@tonic-gate if ((cp->cpu_cpc_ctx == NULL) || 13427c478bd9Sstevel@tonic-gate (cp->cpu_cpc_ctx->kc_flags & KCPC_CTX_INVALID)) { 13437c478bd9Sstevel@tonic-gate mutex_exit(&cp->cpu_cpc_ctxlock); 13447c478bd9Sstevel@tonic-gate return; 13457c478bd9Sstevel@tonic-gate } 13467c478bd9Sstevel@tonic-gate 13477c478bd9Sstevel@tonic-gate pcbe_ops->pcbe_program(cp->cpu_cpc_ctx); 13487c478bd9Sstevel@tonic-gate mutex_exit(&cp->cpu_cpc_ctxlock); 13497c478bd9Sstevel@tonic-gate } 13507c478bd9Sstevel@tonic-gate 13517c478bd9Sstevel@tonic-gate void 13527c478bd9Sstevel@tonic-gate kcpc_idle_restore(struct cpu *cp) 13537c478bd9Sstevel@tonic-gate { 13547c478bd9Sstevel@tonic-gate /* 13557c478bd9Sstevel@tonic-gate * The idle thread shouldn't be run anywhere else. 13567c478bd9Sstevel@tonic-gate */ 13577c478bd9Sstevel@tonic-gate ASSERT(CPU == cp); 13587c478bd9Sstevel@tonic-gate 13597c478bd9Sstevel@tonic-gate /* 13607c478bd9Sstevel@tonic-gate * We must hold the CPU's context lock to ensure the context isn't freed 13617c478bd9Sstevel@tonic-gate * while we're looking at it. 13627c478bd9Sstevel@tonic-gate */ 13637c478bd9Sstevel@tonic-gate mutex_enter(&cp->cpu_cpc_ctxlock); 13647c478bd9Sstevel@tonic-gate 13657c478bd9Sstevel@tonic-gate if ((cp->cpu_cpc_ctx == NULL) || 13667c478bd9Sstevel@tonic-gate (cp->cpu_cpc_ctx->kc_flags & KCPC_CTX_INVALID)) { 13677c478bd9Sstevel@tonic-gate mutex_exit(&cp->cpu_cpc_ctxlock); 13687c478bd9Sstevel@tonic-gate return; 13697c478bd9Sstevel@tonic-gate } 13707c478bd9Sstevel@tonic-gate 13717c478bd9Sstevel@tonic-gate pcbe_ops->pcbe_allstop(); 13727c478bd9Sstevel@tonic-gate mutex_exit(&cp->cpu_cpc_ctxlock); 13737c478bd9Sstevel@tonic-gate } 13747c478bd9Sstevel@tonic-gate 13757c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 13767c478bd9Sstevel@tonic-gate static void 13777c478bd9Sstevel@tonic-gate kcpc_lwp_create(kthread_t *t, kthread_t *ct) 13787c478bd9Sstevel@tonic-gate { 13797c478bd9Sstevel@tonic-gate kcpc_ctx_t *ctx = t->t_cpc_ctx, *cctx; 13807c478bd9Sstevel@tonic-gate int i; 13817c478bd9Sstevel@tonic-gate 13827c478bd9Sstevel@tonic-gate if (ctx == NULL || (ctx->kc_flags & KCPC_CTX_LWPINHERIT) == 0) 13837c478bd9Sstevel@tonic-gate return; 13847c478bd9Sstevel@tonic-gate 13857c478bd9Sstevel@tonic-gate rw_enter(&kcpc_cpuctx_lock, RW_READER); 13867c478bd9Sstevel@tonic-gate if (ctx->kc_flags & KCPC_CTX_INVALID) { 13877c478bd9Sstevel@tonic-gate rw_exit(&kcpc_cpuctx_lock); 13887c478bd9Sstevel@tonic-gate return; 13897c478bd9Sstevel@tonic-gate } 1390*b885580bSAlexander Kolbasov cctx = kcpc_ctx_alloc(KM_SLEEP); 13917c478bd9Sstevel@tonic-gate kcpc_ctx_clone(ctx, cctx); 13927c478bd9Sstevel@tonic-gate rw_exit(&kcpc_cpuctx_lock); 13937c478bd9Sstevel@tonic-gate 13948d4e547dSae112802 /* 13958d4e547dSae112802 * Copy the parent context's kc_flags field, but don't overwrite 13968d4e547dSae112802 * the child's in case it was modified during kcpc_ctx_clone. 13978d4e547dSae112802 */ 1398*b885580bSAlexander Kolbasov KCPC_CTX_FLAG_SET(cctx, ctx->kc_flags); 13997c478bd9Sstevel@tonic-gate cctx->kc_thread = ct; 14007c478bd9Sstevel@tonic-gate cctx->kc_cpuid = -1; 14017c478bd9Sstevel@tonic-gate ct->t_cpc_set = cctx->kc_set; 14027c478bd9Sstevel@tonic-gate ct->t_cpc_ctx = cctx; 14037c478bd9Sstevel@tonic-gate 14047c478bd9Sstevel@tonic-gate if (cctx->kc_flags & KCPC_CTX_SIGOVF) { 14057c478bd9Sstevel@tonic-gate kcpc_set_t *ks = cctx->kc_set; 14067c478bd9Sstevel@tonic-gate /* 14077c478bd9Sstevel@tonic-gate * Our contract with the user requires us to immediately send an 14087c478bd9Sstevel@tonic-gate * overflow signal to all children if we have the LWPINHERIT 14097c478bd9Sstevel@tonic-gate * and SIGOVF flags set. In addition, all counters should be 14107c478bd9Sstevel@tonic-gate * set to UINT64_MAX, and their pic's overflow flag turned on 14117c478bd9Sstevel@tonic-gate * so that our trap() processing knows to send a signal. 14127c478bd9Sstevel@tonic-gate */ 1413*b885580bSAlexander Kolbasov KCPC_CTX_FLAG_SET(ctx, KCPC_CTX_FREEZE); 14147c478bd9Sstevel@tonic-gate for (i = 0; i < ks->ks_nreqs; i++) { 14157c478bd9Sstevel@tonic-gate kcpc_request_t *kr = &ks->ks_req[i]; 14167c478bd9Sstevel@tonic-gate 14177c478bd9Sstevel@tonic-gate if (kr->kr_flags & CPC_OVF_NOTIFY_EMT) { 14187c478bd9Sstevel@tonic-gate *(kr->kr_data) = UINT64_MAX; 1419*b885580bSAlexander Kolbasov atomic_or_uint(&kr->kr_picp->kp_flags, 1420*b885580bSAlexander Kolbasov KCPC_PIC_OVERFLOWED); 14217c478bd9Sstevel@tonic-gate } 14227c478bd9Sstevel@tonic-gate } 14237c478bd9Sstevel@tonic-gate ttolwp(ct)->lwp_pcb.pcb_flags |= CPC_OVERFLOW; 14247c478bd9Sstevel@tonic-gate aston(ct); 14257c478bd9Sstevel@tonic-gate } 14267c478bd9Sstevel@tonic-gate 14277c478bd9Sstevel@tonic-gate installctx(ct, cctx, kcpc_save, kcpc_restore, 14287c478bd9Sstevel@tonic-gate NULL, kcpc_lwp_create, NULL, kcpc_free); 14297c478bd9Sstevel@tonic-gate } 14307c478bd9Sstevel@tonic-gate 14317c478bd9Sstevel@tonic-gate /* 14327c478bd9Sstevel@tonic-gate * Counter Stoppage Theory 14337c478bd9Sstevel@tonic-gate * 14347c478bd9Sstevel@tonic-gate * The counters may need to be stopped properly at the following occasions: 14357c478bd9Sstevel@tonic-gate * 14367c478bd9Sstevel@tonic-gate * 1) An LWP exits. 14377c478bd9Sstevel@tonic-gate * 2) A thread exits. 14387c478bd9Sstevel@tonic-gate * 3) An LWP performs an exec(). 14397c478bd9Sstevel@tonic-gate * 4) A bound set is unbound. 14407c478bd9Sstevel@tonic-gate * 14417c478bd9Sstevel@tonic-gate * In addition to stopping the counters, the CPC context (a kcpc_ctx_t) may need 14427c478bd9Sstevel@tonic-gate * to be freed as well. 14437c478bd9Sstevel@tonic-gate * 14447c478bd9Sstevel@tonic-gate * Case 1: kcpc_passivate(), called via lwp_exit(), stops the counters. Later on 14457c478bd9Sstevel@tonic-gate * when the thread is freed, kcpc_free(), called by freectx(), frees the 14467c478bd9Sstevel@tonic-gate * context. 14477c478bd9Sstevel@tonic-gate * 14487c478bd9Sstevel@tonic-gate * Case 2: same as case 1 except kcpc_passivate is called from thread_exit(). 14497c478bd9Sstevel@tonic-gate * 14507c478bd9Sstevel@tonic-gate * Case 3: kcpc_free(), called via freectx() via exec(), recognizes that it has 14517c478bd9Sstevel@tonic-gate * been called from exec. It stops the counters _and_ frees the context. 14527c478bd9Sstevel@tonic-gate * 14537c478bd9Sstevel@tonic-gate * Case 4: kcpc_unbind() stops the hardware _and_ frees the context. 14547c478bd9Sstevel@tonic-gate * 14557c478bd9Sstevel@tonic-gate * CPU-bound counters are always stopped via kcpc_unbind(). 14567c478bd9Sstevel@tonic-gate */ 14577c478bd9Sstevel@tonic-gate 14587c478bd9Sstevel@tonic-gate /* 14597c478bd9Sstevel@tonic-gate * We're being called to delete the context; we ensure that all associated data 14607c478bd9Sstevel@tonic-gate * structures are freed, and that the hardware is passivated if this is an exec. 14617c478bd9Sstevel@tonic-gate */ 14627c478bd9Sstevel@tonic-gate 14637c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 1464*b885580bSAlexander Kolbasov void 14657c478bd9Sstevel@tonic-gate kcpc_free(kcpc_ctx_t *ctx, int isexec) 14667c478bd9Sstevel@tonic-gate { 14677c478bd9Sstevel@tonic-gate int i; 14687c478bd9Sstevel@tonic-gate kcpc_set_t *set = ctx->kc_set; 14697c478bd9Sstevel@tonic-gate 14707c478bd9Sstevel@tonic-gate ASSERT(set != NULL); 14717c478bd9Sstevel@tonic-gate 14724568bee7Strevtom /* 14734568bee7Strevtom * Wait for kcpc_restore() to finish before we tear things down. 14744568bee7Strevtom */ 14754568bee7Strevtom mutex_enter(&ctx->kc_lock); 14764568bee7Strevtom while (ctx->kc_flags & KCPC_CTX_RESTORE) 14774568bee7Strevtom cv_wait(&ctx->kc_condv, &ctx->kc_lock); 1478*b885580bSAlexander Kolbasov KCPC_CTX_FLAG_SET(ctx, KCPC_CTX_INVALID); 14794568bee7Strevtom mutex_exit(&ctx->kc_lock); 14807c478bd9Sstevel@tonic-gate 14817c478bd9Sstevel@tonic-gate if (isexec) { 14827c478bd9Sstevel@tonic-gate /* 14837c478bd9Sstevel@tonic-gate * This thread is execing, and after the exec it should not have 14847c478bd9Sstevel@tonic-gate * any performance counter context. Stop the counters properly 14857c478bd9Sstevel@tonic-gate * here so the system isn't surprised by an overflow interrupt 14867c478bd9Sstevel@tonic-gate * later. 14877c478bd9Sstevel@tonic-gate */ 14887c478bd9Sstevel@tonic-gate if (ctx->kc_cpuid != -1) { 14897c478bd9Sstevel@tonic-gate cpu_t *cp; 14907c478bd9Sstevel@tonic-gate /* 14917c478bd9Sstevel@tonic-gate * CPU-bound context; stop the appropriate CPU's ctrs. 14927c478bd9Sstevel@tonic-gate * Hold cpu_lock while examining the CPU to ensure it 14937c478bd9Sstevel@tonic-gate * doesn't go away. 14947c478bd9Sstevel@tonic-gate */ 14957c478bd9Sstevel@tonic-gate mutex_enter(&cpu_lock); 14967c478bd9Sstevel@tonic-gate cp = cpu_get(ctx->kc_cpuid); 14977c478bd9Sstevel@tonic-gate /* 14987c478bd9Sstevel@tonic-gate * The CPU could have been DR'd out, so only stop the 14997c478bd9Sstevel@tonic-gate * CPU and clear its context pointer if the CPU still 15007c478bd9Sstevel@tonic-gate * exists. 15017c478bd9Sstevel@tonic-gate */ 15027c478bd9Sstevel@tonic-gate if (cp != NULL) { 15037c478bd9Sstevel@tonic-gate mutex_enter(&cp->cpu_cpc_ctxlock); 15047c478bd9Sstevel@tonic-gate kcpc_stop_hw(ctx); 15057c478bd9Sstevel@tonic-gate mutex_exit(&cp->cpu_cpc_ctxlock); 15067c478bd9Sstevel@tonic-gate } 15077c478bd9Sstevel@tonic-gate mutex_exit(&cpu_lock); 15087c478bd9Sstevel@tonic-gate ASSERT(curthread->t_cpc_ctx == NULL); 15097c478bd9Sstevel@tonic-gate } else { 1510*b885580bSAlexander Kolbasov int save_spl; 1511*b885580bSAlexander Kolbasov 15127c478bd9Sstevel@tonic-gate /* 15137c478bd9Sstevel@tonic-gate * Thread-bound context; stop _this_ CPU's counters. 15147c478bd9Sstevel@tonic-gate */ 15157c478bd9Sstevel@tonic-gate kpreempt_disable(); 1516*b885580bSAlexander Kolbasov save_spl = spl_xcall(); 1517*b885580bSAlexander Kolbasov kcpc_unprogram(ctx, B_TRUE); 15187c478bd9Sstevel@tonic-gate curthread->t_cpc_ctx = NULL; 1519*b885580bSAlexander Kolbasov splx(save_spl); 1520*b885580bSAlexander Kolbasov kpreempt_enable(); 15217c478bd9Sstevel@tonic-gate } 15227c478bd9Sstevel@tonic-gate 15237c478bd9Sstevel@tonic-gate /* 15247c478bd9Sstevel@tonic-gate * Since we are being called from an exec and we know that 15257c478bd9Sstevel@tonic-gate * exec is not permitted via the agent thread, we should clean 15267c478bd9Sstevel@tonic-gate * up this thread's CPC state completely, and not leave dangling 15277c478bd9Sstevel@tonic-gate * CPC pointers behind. 15287c478bd9Sstevel@tonic-gate */ 15297c478bd9Sstevel@tonic-gate ASSERT(ctx->kc_thread == curthread); 15307c478bd9Sstevel@tonic-gate curthread->t_cpc_set = NULL; 15317c478bd9Sstevel@tonic-gate } 15327c478bd9Sstevel@tonic-gate 15337c478bd9Sstevel@tonic-gate /* 15347c478bd9Sstevel@tonic-gate * Walk through each request in this context's set and free the PCBE's 15357c478bd9Sstevel@tonic-gate * configuration if it exists. 15367c478bd9Sstevel@tonic-gate */ 15377c478bd9Sstevel@tonic-gate for (i = 0; i < set->ks_nreqs; i++) { 15387c478bd9Sstevel@tonic-gate if (set->ks_req[i].kr_config != NULL) 15397c478bd9Sstevel@tonic-gate pcbe_ops->pcbe_free(set->ks_req[i].kr_config); 15407c478bd9Sstevel@tonic-gate } 15417c478bd9Sstevel@tonic-gate 15427c478bd9Sstevel@tonic-gate kmem_free(set->ks_data, set->ks_nreqs * sizeof (uint64_t)); 15437c478bd9Sstevel@tonic-gate kcpc_ctx_free(ctx); 15447c478bd9Sstevel@tonic-gate kcpc_free_set(set); 15457c478bd9Sstevel@tonic-gate } 15467c478bd9Sstevel@tonic-gate 15477c478bd9Sstevel@tonic-gate /* 15487c478bd9Sstevel@tonic-gate * Free the memory associated with a request set. 15497c478bd9Sstevel@tonic-gate */ 15507c478bd9Sstevel@tonic-gate void 15517c478bd9Sstevel@tonic-gate kcpc_free_set(kcpc_set_t *set) 15527c478bd9Sstevel@tonic-gate { 15537c478bd9Sstevel@tonic-gate int i; 15547c478bd9Sstevel@tonic-gate kcpc_request_t *req; 15557c478bd9Sstevel@tonic-gate 15567c478bd9Sstevel@tonic-gate ASSERT(set->ks_req != NULL); 15577c478bd9Sstevel@tonic-gate 15587c478bd9Sstevel@tonic-gate for (i = 0; i < set->ks_nreqs; i++) { 15597c478bd9Sstevel@tonic-gate req = &set->ks_req[i]; 15607c478bd9Sstevel@tonic-gate 15617c478bd9Sstevel@tonic-gate if (req->kr_nattrs != 0) { 15627c478bd9Sstevel@tonic-gate kmem_free(req->kr_attr, 15637c478bd9Sstevel@tonic-gate req->kr_nattrs * sizeof (kcpc_attr_t)); 15647c478bd9Sstevel@tonic-gate } 15657c478bd9Sstevel@tonic-gate } 15667c478bd9Sstevel@tonic-gate 15677c478bd9Sstevel@tonic-gate kmem_free(set->ks_req, sizeof (kcpc_request_t) * set->ks_nreqs); 15684568bee7Strevtom cv_destroy(&set->ks_condv); 15694568bee7Strevtom mutex_destroy(&set->ks_lock); 15707c478bd9Sstevel@tonic-gate kmem_free(set, sizeof (kcpc_set_t)); 15717c478bd9Sstevel@tonic-gate } 15727c478bd9Sstevel@tonic-gate 15737c478bd9Sstevel@tonic-gate /* 15747c478bd9Sstevel@tonic-gate * Grab every existing context and mark it as invalid. 15757c478bd9Sstevel@tonic-gate */ 15767c478bd9Sstevel@tonic-gate void 15777c478bd9Sstevel@tonic-gate kcpc_invalidate_all(void) 15787c478bd9Sstevel@tonic-gate { 15797c478bd9Sstevel@tonic-gate kcpc_ctx_t *ctx; 15807c478bd9Sstevel@tonic-gate long hash; 15817c478bd9Sstevel@tonic-gate 15827c478bd9Sstevel@tonic-gate for (hash = 0; hash < CPC_HASH_BUCKETS; hash++) { 15837c478bd9Sstevel@tonic-gate mutex_enter(&kcpc_ctx_llock[hash]); 15847c478bd9Sstevel@tonic-gate for (ctx = kcpc_ctx_list[hash]; ctx; ctx = ctx->kc_next) 1585*b885580bSAlexander Kolbasov KCPC_CTX_FLAG_SET(ctx, KCPC_CTX_INVALID); 15867c478bd9Sstevel@tonic-gate mutex_exit(&kcpc_ctx_llock[hash]); 15877c478bd9Sstevel@tonic-gate } 15887c478bd9Sstevel@tonic-gate } 15897c478bd9Sstevel@tonic-gate 15907c478bd9Sstevel@tonic-gate /* 15918d4e547dSae112802 * Interface for PCBEs to signal that an existing configuration has suddenly 15928d4e547dSae112802 * become invalid. 15938d4e547dSae112802 */ 15948d4e547dSae112802 void 15958d4e547dSae112802 kcpc_invalidate_config(void *token) 15968d4e547dSae112802 { 15978d4e547dSae112802 kcpc_ctx_t *ctx = token; 15988d4e547dSae112802 15998d4e547dSae112802 ASSERT(ctx != NULL); 16008d4e547dSae112802 1601*b885580bSAlexander Kolbasov KCPC_CTX_FLAG_SET(ctx, KCPC_CTX_INVALID); 16028d4e547dSae112802 } 16038d4e547dSae112802 16048d4e547dSae112802 /* 16057c478bd9Sstevel@tonic-gate * Called from lwp_exit() and thread_exit() 16067c478bd9Sstevel@tonic-gate */ 16077c478bd9Sstevel@tonic-gate void 16087c478bd9Sstevel@tonic-gate kcpc_passivate(void) 16097c478bd9Sstevel@tonic-gate { 16107c478bd9Sstevel@tonic-gate kcpc_ctx_t *ctx = curthread->t_cpc_ctx; 16117c478bd9Sstevel@tonic-gate kcpc_set_t *set = curthread->t_cpc_set; 1612*b885580bSAlexander Kolbasov int save_spl; 16137c478bd9Sstevel@tonic-gate 16147c478bd9Sstevel@tonic-gate if (set == NULL) 16157c478bd9Sstevel@tonic-gate return; 16167c478bd9Sstevel@tonic-gate 16177c478bd9Sstevel@tonic-gate if (ctx == NULL) { 16187c478bd9Sstevel@tonic-gate /* 16197c478bd9Sstevel@tonic-gate * This thread has a set but no context; it must be a CPU-bound 16207c478bd9Sstevel@tonic-gate * set. The hardware will be stopped via kcpc_unbind() when the 16217c478bd9Sstevel@tonic-gate * process exits and closes its file descriptors with 16227c478bd9Sstevel@tonic-gate * kcpc_close(). Our only job here is to clean up this thread's 16237c478bd9Sstevel@tonic-gate * state; the set will be freed with the unbind(). 16247c478bd9Sstevel@tonic-gate */ 16257c478bd9Sstevel@tonic-gate (void) kcpc_unbind(set); 16267c478bd9Sstevel@tonic-gate /* 16277c478bd9Sstevel@tonic-gate * Unbinding a set belonging to the current thread should clear 16287c478bd9Sstevel@tonic-gate * its set pointer. 16297c478bd9Sstevel@tonic-gate */ 16307c478bd9Sstevel@tonic-gate ASSERT(curthread->t_cpc_set == NULL); 16317c478bd9Sstevel@tonic-gate return; 16327c478bd9Sstevel@tonic-gate } 16337c478bd9Sstevel@tonic-gate 1634*b885580bSAlexander Kolbasov kpreempt_disable(); 1635*b885580bSAlexander Kolbasov save_spl = spl_xcall(); 16367c478bd9Sstevel@tonic-gate curthread->t_cpc_set = NULL; 16377c478bd9Sstevel@tonic-gate 16387c478bd9Sstevel@tonic-gate /* 16397c478bd9Sstevel@tonic-gate * This thread/LWP is exiting but context switches will continue to 16407c478bd9Sstevel@tonic-gate * happen for a bit as the exit proceeds. Kernel preemption must be 16417c478bd9Sstevel@tonic-gate * disabled here to prevent a race between checking or setting the 16427c478bd9Sstevel@tonic-gate * INVALID_STOPPED flag here and kcpc_restore() setting the flag during 16437c478bd9Sstevel@tonic-gate * a context switch. 16447c478bd9Sstevel@tonic-gate */ 16457c478bd9Sstevel@tonic-gate if ((ctx->kc_flags & KCPC_CTX_INVALID_STOPPED) == 0) { 1646*b885580bSAlexander Kolbasov kcpc_unprogram(ctx, B_TRUE); 1647*b885580bSAlexander Kolbasov KCPC_CTX_FLAG_SET(ctx, 16487c478bd9Sstevel@tonic-gate KCPC_CTX_INVALID | KCPC_CTX_INVALID_STOPPED); 16497c478bd9Sstevel@tonic-gate } 1650*b885580bSAlexander Kolbasov 1651*b885580bSAlexander Kolbasov /* 1652*b885580bSAlexander Kolbasov * We're cleaning up after this thread; ensure there are no dangling 1653*b885580bSAlexander Kolbasov * CPC pointers left behind. The context and set will be freed by 1654*b885580bSAlexander Kolbasov * freectx(). 1655*b885580bSAlexander Kolbasov */ 1656*b885580bSAlexander Kolbasov curthread->t_cpc_ctx = NULL; 1657*b885580bSAlexander Kolbasov 1658*b885580bSAlexander Kolbasov splx(save_spl); 16597c478bd9Sstevel@tonic-gate kpreempt_enable(); 16607c478bd9Sstevel@tonic-gate } 16617c478bd9Sstevel@tonic-gate 16627c478bd9Sstevel@tonic-gate /* 16637c478bd9Sstevel@tonic-gate * Assign the requests in the given set to the PICs in the context. 16647c478bd9Sstevel@tonic-gate * Returns 0 if successful, -1 on failure. 16657c478bd9Sstevel@tonic-gate */ 16667c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 1667b9e93c10SJonathan Haslam int 16687c478bd9Sstevel@tonic-gate kcpc_assign_reqs(kcpc_set_t *set, kcpc_ctx_t *ctx) 16697c478bd9Sstevel@tonic-gate { 16707c478bd9Sstevel@tonic-gate int i; 16717c478bd9Sstevel@tonic-gate int *picnum_save; 16727c478bd9Sstevel@tonic-gate 16737c478bd9Sstevel@tonic-gate ASSERT(set->ks_nreqs <= cpc_ncounters); 16747c478bd9Sstevel@tonic-gate 16757c478bd9Sstevel@tonic-gate /* 16767c478bd9Sstevel@tonic-gate * Provide kcpc_tryassign() with scratch space to avoid doing an 16777c478bd9Sstevel@tonic-gate * alloc/free with every invocation. 16787c478bd9Sstevel@tonic-gate */ 16797c478bd9Sstevel@tonic-gate picnum_save = kmem_alloc(set->ks_nreqs * sizeof (int), KM_SLEEP); 16807c478bd9Sstevel@tonic-gate /* 16817c478bd9Sstevel@tonic-gate * kcpc_tryassign() blindly walks through each request in the set, 16827c478bd9Sstevel@tonic-gate * seeing if a counter can count its event. If yes, it assigns that 16837c478bd9Sstevel@tonic-gate * counter. However, that counter may have been the only capable counter 16847c478bd9Sstevel@tonic-gate * for _another_ request's event. The solution is to try every possible 16857c478bd9Sstevel@tonic-gate * request first. Note that this does not cover all solutions, as 16867c478bd9Sstevel@tonic-gate * that would require all unique orderings of requests, an n^n operation 16877c478bd9Sstevel@tonic-gate * which would be unacceptable for architectures with many counters. 16887c478bd9Sstevel@tonic-gate */ 16897c478bd9Sstevel@tonic-gate for (i = 0; i < set->ks_nreqs; i++) 16907c478bd9Sstevel@tonic-gate if (kcpc_tryassign(set, i, picnum_save) == 0) 16917c478bd9Sstevel@tonic-gate break; 16927c478bd9Sstevel@tonic-gate 16937c478bd9Sstevel@tonic-gate kmem_free(picnum_save, set->ks_nreqs * sizeof (int)); 16947c478bd9Sstevel@tonic-gate if (i == set->ks_nreqs) 16957c478bd9Sstevel@tonic-gate return (-1); 16967c478bd9Sstevel@tonic-gate return (0); 16977c478bd9Sstevel@tonic-gate } 16987c478bd9Sstevel@tonic-gate 16997c478bd9Sstevel@tonic-gate static int 17007c478bd9Sstevel@tonic-gate kcpc_tryassign(kcpc_set_t *set, int starting_req, int *scratch) 17017c478bd9Sstevel@tonic-gate { 17027c478bd9Sstevel@tonic-gate int i; 17037c478bd9Sstevel@tonic-gate int j; 17047c478bd9Sstevel@tonic-gate uint64_t bitmap = 0, resmap = 0; 17057c478bd9Sstevel@tonic-gate uint64_t ctrmap; 17067c478bd9Sstevel@tonic-gate 17077c478bd9Sstevel@tonic-gate /* 17087c478bd9Sstevel@tonic-gate * We are attempting to assign the reqs to pics, but we may fail. If we 17097c478bd9Sstevel@tonic-gate * fail, we need to restore the state of the requests to what it was 17107c478bd9Sstevel@tonic-gate * when we found it, as some reqs may have been explicitly assigned to 17117c478bd9Sstevel@tonic-gate * a specific PIC beforehand. We do this by snapshotting the assignments 17127c478bd9Sstevel@tonic-gate * now and restoring from it later if we fail. 17137c478bd9Sstevel@tonic-gate * 17147c478bd9Sstevel@tonic-gate * Also we note here which counters have already been claimed by 17157c478bd9Sstevel@tonic-gate * requests with explicit counter assignments. 17167c478bd9Sstevel@tonic-gate */ 17177c478bd9Sstevel@tonic-gate for (i = 0; i < set->ks_nreqs; i++) { 17187c478bd9Sstevel@tonic-gate scratch[i] = set->ks_req[i].kr_picnum; 17197c478bd9Sstevel@tonic-gate if (set->ks_req[i].kr_picnum != -1) 17207c478bd9Sstevel@tonic-gate resmap |= (1 << set->ks_req[i].kr_picnum); 17217c478bd9Sstevel@tonic-gate } 17227c478bd9Sstevel@tonic-gate 17237c478bd9Sstevel@tonic-gate /* 17247c478bd9Sstevel@tonic-gate * Walk through requests assigning them to the first PIC that is 17257c478bd9Sstevel@tonic-gate * capable. 17267c478bd9Sstevel@tonic-gate */ 17277c478bd9Sstevel@tonic-gate i = starting_req; 17287c478bd9Sstevel@tonic-gate do { 17297c478bd9Sstevel@tonic-gate if (set->ks_req[i].kr_picnum != -1) { 17307c478bd9Sstevel@tonic-gate ASSERT((bitmap & (1 << set->ks_req[i].kr_picnum)) == 0); 17317c478bd9Sstevel@tonic-gate bitmap |= (1 << set->ks_req[i].kr_picnum); 17327c478bd9Sstevel@tonic-gate if (++i == set->ks_nreqs) 17337c478bd9Sstevel@tonic-gate i = 0; 17347c478bd9Sstevel@tonic-gate continue; 17357c478bd9Sstevel@tonic-gate } 17367c478bd9Sstevel@tonic-gate 17377c478bd9Sstevel@tonic-gate ctrmap = pcbe_ops->pcbe_event_coverage(set->ks_req[i].kr_event); 17387c478bd9Sstevel@tonic-gate for (j = 0; j < cpc_ncounters; j++) { 17397c478bd9Sstevel@tonic-gate if (ctrmap & (1 << j) && (bitmap & (1 << j)) == 0 && 17407c478bd9Sstevel@tonic-gate (resmap & (1 << j)) == 0) { 17417c478bd9Sstevel@tonic-gate /* 17427c478bd9Sstevel@tonic-gate * We can assign this counter because: 17437c478bd9Sstevel@tonic-gate * 17447c478bd9Sstevel@tonic-gate * 1. It can count the event (ctrmap) 17457c478bd9Sstevel@tonic-gate * 2. It hasn't been assigned yet (bitmap) 17467c478bd9Sstevel@tonic-gate * 3. It wasn't reserved by a request (resmap) 17477c478bd9Sstevel@tonic-gate */ 17487c478bd9Sstevel@tonic-gate bitmap |= (1 << j); 17497c478bd9Sstevel@tonic-gate break; 17507c478bd9Sstevel@tonic-gate } 17517c478bd9Sstevel@tonic-gate } 17527c478bd9Sstevel@tonic-gate if (j == cpc_ncounters) { 17537c478bd9Sstevel@tonic-gate for (i = 0; i < set->ks_nreqs; i++) 17547c478bd9Sstevel@tonic-gate set->ks_req[i].kr_picnum = scratch[i]; 17557c478bd9Sstevel@tonic-gate return (-1); 17567c478bd9Sstevel@tonic-gate } 17577c478bd9Sstevel@tonic-gate set->ks_req[i].kr_picnum = j; 17587c478bd9Sstevel@tonic-gate 17597c478bd9Sstevel@tonic-gate if (++i == set->ks_nreqs) 17607c478bd9Sstevel@tonic-gate i = 0; 17617c478bd9Sstevel@tonic-gate } while (i != starting_req); 17627c478bd9Sstevel@tonic-gate 17637c478bd9Sstevel@tonic-gate return (0); 17647c478bd9Sstevel@tonic-gate } 17657c478bd9Sstevel@tonic-gate 17667c478bd9Sstevel@tonic-gate kcpc_set_t * 17677c478bd9Sstevel@tonic-gate kcpc_dup_set(kcpc_set_t *set) 17687c478bd9Sstevel@tonic-gate { 17697c478bd9Sstevel@tonic-gate kcpc_set_t *new; 17707c478bd9Sstevel@tonic-gate int i; 17717c478bd9Sstevel@tonic-gate int j; 17727c478bd9Sstevel@tonic-gate 17734568bee7Strevtom new = kmem_zalloc(sizeof (*new), KM_SLEEP); 17744568bee7Strevtom new->ks_state &= ~KCPC_SET_BOUND; 17757c478bd9Sstevel@tonic-gate new->ks_flags = set->ks_flags; 17767c478bd9Sstevel@tonic-gate new->ks_nreqs = set->ks_nreqs; 17777c478bd9Sstevel@tonic-gate new->ks_req = kmem_alloc(set->ks_nreqs * sizeof (kcpc_request_t), 17787c478bd9Sstevel@tonic-gate KM_SLEEP); 17797c478bd9Sstevel@tonic-gate new->ks_data = NULL; 17807c478bd9Sstevel@tonic-gate new->ks_ctx = NULL; 17817c478bd9Sstevel@tonic-gate 17827c478bd9Sstevel@tonic-gate for (i = 0; i < new->ks_nreqs; i++) { 17837c478bd9Sstevel@tonic-gate new->ks_req[i].kr_config = NULL; 17847c478bd9Sstevel@tonic-gate new->ks_req[i].kr_index = set->ks_req[i].kr_index; 17857c478bd9Sstevel@tonic-gate new->ks_req[i].kr_picnum = set->ks_req[i].kr_picnum; 17867c478bd9Sstevel@tonic-gate new->ks_req[i].kr_picp = NULL; 17877c478bd9Sstevel@tonic-gate new->ks_req[i].kr_data = NULL; 17887c478bd9Sstevel@tonic-gate (void) strncpy(new->ks_req[i].kr_event, set->ks_req[i].kr_event, 17897c478bd9Sstevel@tonic-gate CPC_MAX_EVENT_LEN); 17907c478bd9Sstevel@tonic-gate new->ks_req[i].kr_preset = set->ks_req[i].kr_preset; 17917c478bd9Sstevel@tonic-gate new->ks_req[i].kr_flags = set->ks_req[i].kr_flags; 17927c478bd9Sstevel@tonic-gate new->ks_req[i].kr_nattrs = set->ks_req[i].kr_nattrs; 17937c478bd9Sstevel@tonic-gate new->ks_req[i].kr_attr = kmem_alloc(new->ks_req[i].kr_nattrs * 17947c478bd9Sstevel@tonic-gate sizeof (kcpc_attr_t), KM_SLEEP); 17957c478bd9Sstevel@tonic-gate for (j = 0; j < new->ks_req[i].kr_nattrs; j++) { 17967c478bd9Sstevel@tonic-gate new->ks_req[i].kr_attr[j].ka_val = 17977c478bd9Sstevel@tonic-gate set->ks_req[i].kr_attr[j].ka_val; 17987c478bd9Sstevel@tonic-gate (void) strncpy(new->ks_req[i].kr_attr[j].ka_name, 17997c478bd9Sstevel@tonic-gate set->ks_req[i].kr_attr[j].ka_name, 18007c478bd9Sstevel@tonic-gate CPC_MAX_ATTR_LEN); 18017c478bd9Sstevel@tonic-gate } 18027c478bd9Sstevel@tonic-gate } 18037c478bd9Sstevel@tonic-gate 18047c478bd9Sstevel@tonic-gate return (new); 18057c478bd9Sstevel@tonic-gate } 18067c478bd9Sstevel@tonic-gate 18077c478bd9Sstevel@tonic-gate int 18087c478bd9Sstevel@tonic-gate kcpc_allow_nonpriv(void *token) 18097c478bd9Sstevel@tonic-gate { 18107c478bd9Sstevel@tonic-gate return (((kcpc_ctx_t *)token)->kc_flags & KCPC_CTX_NONPRIV); 18117c478bd9Sstevel@tonic-gate } 18127c478bd9Sstevel@tonic-gate 18137c478bd9Sstevel@tonic-gate void 18147c478bd9Sstevel@tonic-gate kcpc_invalidate(kthread_t *t) 18157c478bd9Sstevel@tonic-gate { 18167c478bd9Sstevel@tonic-gate kcpc_ctx_t *ctx = t->t_cpc_ctx; 18177c478bd9Sstevel@tonic-gate 18187c478bd9Sstevel@tonic-gate if (ctx != NULL) 1819*b885580bSAlexander Kolbasov KCPC_CTX_FLAG_SET(ctx, KCPC_CTX_INVALID); 18207c478bd9Sstevel@tonic-gate } 18217c478bd9Sstevel@tonic-gate 18227c478bd9Sstevel@tonic-gate /* 18237c478bd9Sstevel@tonic-gate * Given a PCBE ID, attempt to load a matching PCBE module. The strings given 18247c478bd9Sstevel@tonic-gate * are used to construct PCBE names, starting with the most specific, 18257c478bd9Sstevel@tonic-gate * "pcbe.first.second.third.fourth" and ending with the least specific, 18267c478bd9Sstevel@tonic-gate * "pcbe.first". 18277c478bd9Sstevel@tonic-gate * 18287c478bd9Sstevel@tonic-gate * Returns 0 if a PCBE was successfully loaded and -1 upon error. 18297c478bd9Sstevel@tonic-gate */ 18307c478bd9Sstevel@tonic-gate int 18317c478bd9Sstevel@tonic-gate kcpc_pcbe_tryload(const char *prefix, uint_t first, uint_t second, uint_t third) 18327c478bd9Sstevel@tonic-gate { 18337aec1d6eScindi uint_t s[3]; 18347c478bd9Sstevel@tonic-gate 18357aec1d6eScindi s[0] = first; 18367aec1d6eScindi s[1] = second; 18377aec1d6eScindi s[2] = third; 18387c478bd9Sstevel@tonic-gate 18397aec1d6eScindi return (modload_qualified("pcbe", 184020c794b3Sgavinm "pcbe", prefix, ".", s, 3, NULL) < 0 ? -1 : 0); 18417c478bd9Sstevel@tonic-gate } 1842b9e93c10SJonathan Haslam 1843*b885580bSAlexander Kolbasov /* 1844*b885580bSAlexander Kolbasov * Create one or more CPC context for given CPU with specified counter event 1845*b885580bSAlexander Kolbasov * requests 1846*b885580bSAlexander Kolbasov * 1847*b885580bSAlexander Kolbasov * If number of requested counter events is less than or equal number of 1848*b885580bSAlexander Kolbasov * hardware counters on a CPU and can all be assigned to the counters on a CPU 1849*b885580bSAlexander Kolbasov * at the same time, then make one CPC context. 1850*b885580bSAlexander Kolbasov * 1851*b885580bSAlexander Kolbasov * Otherwise, multiple CPC contexts are created to allow multiplexing more 1852*b885580bSAlexander Kolbasov * counter events than existing counters onto the counters by iterating through 1853*b885580bSAlexander Kolbasov * all of the CPC contexts, programming the counters with each CPC context one 1854*b885580bSAlexander Kolbasov * at a time and measuring the resulting counter values. Each of the resulting 1855*b885580bSAlexander Kolbasov * CPC contexts contains some number of requested counter events less than or 1856*b885580bSAlexander Kolbasov * equal the number of counters on a CPU depending on whether all the counter 1857*b885580bSAlexander Kolbasov * events can be programmed on all the counters at the same time or not. 1858*b885580bSAlexander Kolbasov * 1859*b885580bSAlexander Kolbasov * Flags to kmem_{,z}alloc() are passed in as an argument to allow specifying 1860*b885580bSAlexander Kolbasov * whether memory allocation should be non-blocking or not. The code will try 1861*b885580bSAlexander Kolbasov * to allocate *whole* CPC contexts if possible. If there is any memory 1862*b885580bSAlexander Kolbasov * allocation failure during the allocations needed for a given CPC context, it 1863*b885580bSAlexander Kolbasov * will skip allocating that CPC context because it cannot allocate the whole 1864*b885580bSAlexander Kolbasov * thing. Thus, the only time that it will end up allocating none (ie. no CPC 1865*b885580bSAlexander Kolbasov * contexts whatsoever) is when it cannot even allocate *one* whole CPC context 1866*b885580bSAlexander Kolbasov * without a memory allocation failure occurring. 1867*b885580bSAlexander Kolbasov */ 1868*b885580bSAlexander Kolbasov int 1869*b885580bSAlexander Kolbasov kcpc_cpu_ctx_create(cpu_t *cp, kcpc_request_list_t *req_list, int kmem_flags, 1870*b885580bSAlexander Kolbasov kcpc_ctx_t ***ctx_ptr_array, size_t *ctx_ptr_array_sz) 1871*b885580bSAlexander Kolbasov { 1872*b885580bSAlexander Kolbasov kcpc_ctx_t **ctx_ptrs; 1873*b885580bSAlexander Kolbasov int nctx; 1874*b885580bSAlexander Kolbasov int nctx_ptrs; 1875*b885580bSAlexander Kolbasov int nreqs; 1876*b885580bSAlexander Kolbasov kcpc_request_t *reqs; 1877*b885580bSAlexander Kolbasov 1878*b885580bSAlexander Kolbasov if (cp == NULL || ctx_ptr_array == NULL || ctx_ptr_array_sz == NULL || 1879*b885580bSAlexander Kolbasov req_list == NULL || req_list->krl_cnt < 1) 1880*b885580bSAlexander Kolbasov return (-1); 1881*b885580bSAlexander Kolbasov 1882*b885580bSAlexander Kolbasov /* 1883*b885580bSAlexander Kolbasov * Allocate number of sets assuming that each set contains one and only 1884*b885580bSAlexander Kolbasov * one counter event request for each counter on a CPU 1885*b885580bSAlexander Kolbasov */ 1886*b885580bSAlexander Kolbasov nreqs = req_list->krl_cnt; 1887*b885580bSAlexander Kolbasov nctx_ptrs = (nreqs + cpc_ncounters - 1) / cpc_ncounters; 1888*b885580bSAlexander Kolbasov ctx_ptrs = kmem_zalloc(nctx_ptrs * sizeof (kcpc_ctx_t *), kmem_flags); 1889*b885580bSAlexander Kolbasov if (ctx_ptrs == NULL) 1890*b885580bSAlexander Kolbasov return (-2); 1891*b885580bSAlexander Kolbasov 1892*b885580bSAlexander Kolbasov /* 1893*b885580bSAlexander Kolbasov * Fill in sets of requests 1894*b885580bSAlexander Kolbasov */ 1895*b885580bSAlexander Kolbasov nctx = 0; 1896*b885580bSAlexander Kolbasov reqs = req_list->krl_list; 1897*b885580bSAlexander Kolbasov while (nreqs > 0) { 1898*b885580bSAlexander Kolbasov kcpc_ctx_t *ctx; 1899*b885580bSAlexander Kolbasov kcpc_set_t *set; 1900*b885580bSAlexander Kolbasov int subcode; 1901*b885580bSAlexander Kolbasov 1902*b885580bSAlexander Kolbasov /* 1903*b885580bSAlexander Kolbasov * Allocate CPC context and set for requested counter events 1904*b885580bSAlexander Kolbasov */ 1905*b885580bSAlexander Kolbasov ctx = kcpc_ctx_alloc(kmem_flags); 1906*b885580bSAlexander Kolbasov set = kcpc_set_create(reqs, nreqs, 0, kmem_flags); 1907*b885580bSAlexander Kolbasov if (set == NULL) { 1908*b885580bSAlexander Kolbasov kcpc_ctx_free(ctx); 1909*b885580bSAlexander Kolbasov break; 1910*b885580bSAlexander Kolbasov } 1911*b885580bSAlexander Kolbasov 1912*b885580bSAlexander Kolbasov /* 1913*b885580bSAlexander Kolbasov * Determine assignment of requested counter events to specific 1914*b885580bSAlexander Kolbasov * counters 1915*b885580bSAlexander Kolbasov */ 1916*b885580bSAlexander Kolbasov if (kcpc_assign_reqs(set, ctx) != 0) { 1917*b885580bSAlexander Kolbasov /* 1918*b885580bSAlexander Kolbasov * May not be able to assign requested counter events 1919*b885580bSAlexander Kolbasov * to all counters since all counters may not be able 1920*b885580bSAlexander Kolbasov * to do all events, so only do one counter event in 1921*b885580bSAlexander Kolbasov * set of counter requests when this happens since at 1922*b885580bSAlexander Kolbasov * least one of the counters must be able to do the 1923*b885580bSAlexander Kolbasov * event. 1924*b885580bSAlexander Kolbasov */ 1925*b885580bSAlexander Kolbasov kcpc_free_set(set); 1926*b885580bSAlexander Kolbasov set = kcpc_set_create(reqs, 1, 0, kmem_flags); 1927*b885580bSAlexander Kolbasov if (set == NULL) { 1928*b885580bSAlexander Kolbasov kcpc_ctx_free(ctx); 1929*b885580bSAlexander Kolbasov break; 1930*b885580bSAlexander Kolbasov } 1931*b885580bSAlexander Kolbasov if (kcpc_assign_reqs(set, ctx) != 0) { 1932*b885580bSAlexander Kolbasov #ifdef DEBUG 1933*b885580bSAlexander Kolbasov cmn_err(CE_NOTE, "!kcpc_cpu_ctx_create: can't " 1934*b885580bSAlexander Kolbasov "assign counter event %s!\n", 1935*b885580bSAlexander Kolbasov set->ks_req->kr_event); 1936*b885580bSAlexander Kolbasov #endif 1937*b885580bSAlexander Kolbasov kcpc_free_set(set); 1938*b885580bSAlexander Kolbasov kcpc_ctx_free(ctx); 1939*b885580bSAlexander Kolbasov reqs++; 1940*b885580bSAlexander Kolbasov nreqs--; 1941*b885580bSAlexander Kolbasov continue; 1942*b885580bSAlexander Kolbasov } 1943*b885580bSAlexander Kolbasov } 1944*b885580bSAlexander Kolbasov 1945*b885580bSAlexander Kolbasov /* 1946*b885580bSAlexander Kolbasov * Allocate memory needed to hold requested counter event data 1947*b885580bSAlexander Kolbasov */ 1948*b885580bSAlexander Kolbasov set->ks_data = kmem_zalloc(set->ks_nreqs * sizeof (uint64_t), 1949*b885580bSAlexander Kolbasov kmem_flags); 1950*b885580bSAlexander Kolbasov if (set->ks_data == NULL) { 1951*b885580bSAlexander Kolbasov kcpc_free_set(set); 1952*b885580bSAlexander Kolbasov kcpc_ctx_free(ctx); 1953*b885580bSAlexander Kolbasov break; 1954*b885580bSAlexander Kolbasov } 1955*b885580bSAlexander Kolbasov 1956*b885580bSAlexander Kolbasov /* 1957*b885580bSAlexander Kolbasov * Configure requested counter events 1958*b885580bSAlexander Kolbasov */ 1959*b885580bSAlexander Kolbasov if (kcpc_configure_reqs(ctx, set, &subcode) != 0) { 1960*b885580bSAlexander Kolbasov #ifdef DEBUG 1961*b885580bSAlexander Kolbasov cmn_err(CE_NOTE, 1962*b885580bSAlexander Kolbasov "!kcpc_cpu_ctx_create: can't configure " 1963*b885580bSAlexander Kolbasov "set of counter event requests!\n"); 1964*b885580bSAlexander Kolbasov #endif 1965*b885580bSAlexander Kolbasov reqs += set->ks_nreqs; 1966*b885580bSAlexander Kolbasov nreqs -= set->ks_nreqs; 1967*b885580bSAlexander Kolbasov kmem_free(set->ks_data, 1968*b885580bSAlexander Kolbasov set->ks_nreqs * sizeof (uint64_t)); 1969*b885580bSAlexander Kolbasov kcpc_free_set(set); 1970*b885580bSAlexander Kolbasov kcpc_ctx_free(ctx); 1971*b885580bSAlexander Kolbasov continue; 1972*b885580bSAlexander Kolbasov } 1973*b885580bSAlexander Kolbasov 1974*b885580bSAlexander Kolbasov /* 1975*b885580bSAlexander Kolbasov * Point set of counter event requests at this context and fill 1976*b885580bSAlexander Kolbasov * in CPC context 1977*b885580bSAlexander Kolbasov */ 1978*b885580bSAlexander Kolbasov set->ks_ctx = ctx; 1979*b885580bSAlexander Kolbasov ctx->kc_set = set; 1980*b885580bSAlexander Kolbasov ctx->kc_cpuid = cp->cpu_id; 1981*b885580bSAlexander Kolbasov ctx->kc_thread = curthread; 1982*b885580bSAlexander Kolbasov 1983*b885580bSAlexander Kolbasov ctx_ptrs[nctx] = ctx; 1984*b885580bSAlexander Kolbasov 1985*b885580bSAlexander Kolbasov /* 1986*b885580bSAlexander Kolbasov * Update requests and how many are left to be assigned to sets 1987*b885580bSAlexander Kolbasov */ 1988*b885580bSAlexander Kolbasov reqs += set->ks_nreqs; 1989*b885580bSAlexander Kolbasov nreqs -= set->ks_nreqs; 1990*b885580bSAlexander Kolbasov 1991*b885580bSAlexander Kolbasov /* 1992*b885580bSAlexander Kolbasov * Increment number of CPC contexts and allocate bigger array 1993*b885580bSAlexander Kolbasov * for context pointers as needed 1994*b885580bSAlexander Kolbasov */ 1995*b885580bSAlexander Kolbasov nctx++; 1996*b885580bSAlexander Kolbasov if (nctx >= nctx_ptrs) { 1997*b885580bSAlexander Kolbasov kcpc_ctx_t **new; 1998*b885580bSAlexander Kolbasov int new_cnt; 1999*b885580bSAlexander Kolbasov 2000*b885580bSAlexander Kolbasov /* 2001*b885580bSAlexander Kolbasov * Allocate more CPC contexts based on how many 2002*b885580bSAlexander Kolbasov * contexts allocated so far and how many counter 2003*b885580bSAlexander Kolbasov * requests left to assign 2004*b885580bSAlexander Kolbasov */ 2005*b885580bSAlexander Kolbasov new_cnt = nctx_ptrs + 2006*b885580bSAlexander Kolbasov ((nreqs + cpc_ncounters - 1) / cpc_ncounters); 2007*b885580bSAlexander Kolbasov new = kmem_zalloc(new_cnt * sizeof (kcpc_ctx_t *), 2008*b885580bSAlexander Kolbasov kmem_flags); 2009*b885580bSAlexander Kolbasov if (new == NULL) 2010*b885580bSAlexander Kolbasov break; 2011*b885580bSAlexander Kolbasov 2012*b885580bSAlexander Kolbasov /* 2013*b885580bSAlexander Kolbasov * Copy contents of old sets into new ones 2014*b885580bSAlexander Kolbasov */ 2015*b885580bSAlexander Kolbasov bcopy(ctx_ptrs, new, 2016*b885580bSAlexander Kolbasov nctx_ptrs * sizeof (kcpc_ctx_t *)); 2017*b885580bSAlexander Kolbasov 2018*b885580bSAlexander Kolbasov /* 2019*b885580bSAlexander Kolbasov * Free old array of context pointers and use newly 2020*b885580bSAlexander Kolbasov * allocated one instead now 2021*b885580bSAlexander Kolbasov */ 2022*b885580bSAlexander Kolbasov kmem_free(ctx_ptrs, nctx_ptrs * sizeof (kcpc_ctx_t *)); 2023*b885580bSAlexander Kolbasov ctx_ptrs = new; 2024*b885580bSAlexander Kolbasov nctx_ptrs = new_cnt; 2025*b885580bSAlexander Kolbasov } 2026*b885580bSAlexander Kolbasov } 2027*b885580bSAlexander Kolbasov 2028*b885580bSAlexander Kolbasov /* 2029*b885580bSAlexander Kolbasov * Return NULL if no CPC contexts filled in 2030*b885580bSAlexander Kolbasov */ 2031*b885580bSAlexander Kolbasov if (nctx == 0) { 2032*b885580bSAlexander Kolbasov kmem_free(ctx_ptrs, nctx_ptrs * sizeof (kcpc_ctx_t *)); 2033*b885580bSAlexander Kolbasov *ctx_ptr_array = NULL; 2034*b885580bSAlexander Kolbasov *ctx_ptr_array_sz = 0; 2035*b885580bSAlexander Kolbasov return (-2); 2036*b885580bSAlexander Kolbasov } 2037*b885580bSAlexander Kolbasov 2038*b885580bSAlexander Kolbasov *ctx_ptr_array = ctx_ptrs; 2039*b885580bSAlexander Kolbasov *ctx_ptr_array_sz = nctx_ptrs * sizeof (kcpc_ctx_t *); 2040*b885580bSAlexander Kolbasov return (nctx); 2041*b885580bSAlexander Kolbasov } 2042*b885580bSAlexander Kolbasov 2043*b885580bSAlexander Kolbasov /* 2044*b885580bSAlexander Kolbasov * Return whether PCBE supports given counter event 2045*b885580bSAlexander Kolbasov */ 2046*b885580bSAlexander Kolbasov boolean_t 2047*b885580bSAlexander Kolbasov kcpc_event_supported(char *event) 2048*b885580bSAlexander Kolbasov { 2049*b885580bSAlexander Kolbasov if (pcbe_ops == NULL || pcbe_ops->pcbe_event_coverage(event) == 0) 2050*b885580bSAlexander Kolbasov return (B_FALSE); 2051*b885580bSAlexander Kolbasov 2052*b885580bSAlexander Kolbasov return (B_TRUE); 2053*b885580bSAlexander Kolbasov } 2054*b885580bSAlexander Kolbasov 2055*b885580bSAlexander Kolbasov /* 2056*b885580bSAlexander Kolbasov * Program counters on current CPU with given CPC context 2057*b885580bSAlexander Kolbasov * 2058*b885580bSAlexander Kolbasov * If kernel is interposing on counters to measure hardware capacity and 2059*b885580bSAlexander Kolbasov * utilization, then unprogram counters for kernel *before* programming them 2060*b885580bSAlexander Kolbasov * with specified CPC context. 2061*b885580bSAlexander Kolbasov * 2062*b885580bSAlexander Kolbasov * kcpc_{program,unprogram}() may be called either directly by a thread running 2063*b885580bSAlexander Kolbasov * on the target CPU or from a cross-call from another CPU. To protect 2064*b885580bSAlexander Kolbasov * programming and unprogramming from being interrupted by cross-calls, callers 2065*b885580bSAlexander Kolbasov * who execute kcpc_{program,unprogram} should raise PIL to the level used by 2066*b885580bSAlexander Kolbasov * cross-calls. 2067*b885580bSAlexander Kolbasov */ 2068*b885580bSAlexander Kolbasov void 2069*b885580bSAlexander Kolbasov kcpc_program(kcpc_ctx_t *ctx, boolean_t for_thread, boolean_t cu_interpose) 2070*b885580bSAlexander Kolbasov { 2071*b885580bSAlexander Kolbasov int error; 2072*b885580bSAlexander Kolbasov 2073*b885580bSAlexander Kolbasov ASSERT(IS_HIPIL()); 2074*b885580bSAlexander Kolbasov 2075*b885580bSAlexander Kolbasov /* 2076*b885580bSAlexander Kolbasov * CPC context shouldn't be NULL, its CPU field should specify current 2077*b885580bSAlexander Kolbasov * CPU or be -1 to specify any CPU when the context is bound to a 2078*b885580bSAlexander Kolbasov * thread, and preemption should be disabled 2079*b885580bSAlexander Kolbasov */ 2080*b885580bSAlexander Kolbasov ASSERT(ctx != NULL && (ctx->kc_cpuid == CPU->cpu_id || 2081*b885580bSAlexander Kolbasov ctx->kc_cpuid == -1) && curthread->t_preempt > 0); 2082*b885580bSAlexander Kolbasov if (ctx == NULL || (ctx->kc_cpuid != CPU->cpu_id && 2083*b885580bSAlexander Kolbasov ctx->kc_cpuid != -1) || curthread->t_preempt < 1) 2084*b885580bSAlexander Kolbasov return; 2085*b885580bSAlexander Kolbasov 2086*b885580bSAlexander Kolbasov /* 2087*b885580bSAlexander Kolbasov * Unprogram counters for kernel measuring hardware capacity and 2088*b885580bSAlexander Kolbasov * utilization 2089*b885580bSAlexander Kolbasov */ 2090*b885580bSAlexander Kolbasov if (cu_interpose == B_TRUE) { 2091*b885580bSAlexander Kolbasov cu_cpc_unprogram(CPU, &error); 2092*b885580bSAlexander Kolbasov } else { 2093*b885580bSAlexander Kolbasov kcpc_set_t *set = ctx->kc_set; 2094*b885580bSAlexander Kolbasov int i; 2095*b885580bSAlexander Kolbasov 2096*b885580bSAlexander Kolbasov ASSERT(set != NULL); 2097*b885580bSAlexander Kolbasov 2098*b885580bSAlexander Kolbasov /* 2099*b885580bSAlexander Kolbasov * Since cu_interpose is false, we are programming CU context. 2100*b885580bSAlexander Kolbasov * In general, PCBE can continue from the state saved in the 2101*b885580bSAlexander Kolbasov * set, but it is not very reliable, so we start again from the 2102*b885580bSAlexander Kolbasov * preset value. 2103*b885580bSAlexander Kolbasov */ 2104*b885580bSAlexander Kolbasov for (i = 0; i < set->ks_nreqs; i++) { 2105*b885580bSAlexander Kolbasov /* 2106*b885580bSAlexander Kolbasov * Reset the virtual counter value to the preset value. 2107*b885580bSAlexander Kolbasov */ 2108*b885580bSAlexander Kolbasov *(set->ks_req[i].kr_data) = set->ks_req[i].kr_preset; 2109*b885580bSAlexander Kolbasov 2110*b885580bSAlexander Kolbasov /* 2111*b885580bSAlexander Kolbasov * Reset PCBE to the preset value. 2112*b885580bSAlexander Kolbasov */ 2113*b885580bSAlexander Kolbasov pcbe_ops->pcbe_configure(0, NULL, 2114*b885580bSAlexander Kolbasov set->ks_req[i].kr_preset, 2115*b885580bSAlexander Kolbasov 0, 0, NULL, &set->ks_req[i].kr_config, NULL); 2116*b885580bSAlexander Kolbasov } 2117*b885580bSAlexander Kolbasov } 2118*b885580bSAlexander Kolbasov 2119*b885580bSAlexander Kolbasov /* 2120*b885580bSAlexander Kolbasov * Program counters with specified CPC context 2121*b885580bSAlexander Kolbasov */ 2122*b885580bSAlexander Kolbasov ctx->kc_rawtick = KCPC_GET_TICK(); 2123*b885580bSAlexander Kolbasov pcbe_ops->pcbe_program(ctx); 2124*b885580bSAlexander Kolbasov 2125*b885580bSAlexander Kolbasov /* 2126*b885580bSAlexander Kolbasov * Denote that counters programmed for thread or CPU CPC context 2127*b885580bSAlexander Kolbasov * differently 2128*b885580bSAlexander Kolbasov */ 2129*b885580bSAlexander Kolbasov if (for_thread == B_TRUE) 2130*b885580bSAlexander Kolbasov KCPC_CTX_FLAG_CLR(ctx, KCPC_CTX_FREEZE); 2131*b885580bSAlexander Kolbasov else 2132*b885580bSAlexander Kolbasov CPU->cpu_cpc_ctx = ctx; 2133*b885580bSAlexander Kolbasov } 2134*b885580bSAlexander Kolbasov 2135*b885580bSAlexander Kolbasov /* 2136*b885580bSAlexander Kolbasov * Unprogram counters with given CPC context on current CPU 2137*b885580bSAlexander Kolbasov * 2138*b885580bSAlexander Kolbasov * If kernel is interposing on counters to measure hardware capacity and 2139*b885580bSAlexander Kolbasov * utilization, then program counters for the kernel capacity and utilization 2140*b885580bSAlexander Kolbasov * *after* unprogramming them for given CPC context. 2141*b885580bSAlexander Kolbasov * 2142*b885580bSAlexander Kolbasov * See the comment for kcpc_program regarding the synchronization with 2143*b885580bSAlexander Kolbasov * cross-calls. 2144*b885580bSAlexander Kolbasov */ 2145*b885580bSAlexander Kolbasov void 2146*b885580bSAlexander Kolbasov kcpc_unprogram(kcpc_ctx_t *ctx, boolean_t cu_interpose) 2147*b885580bSAlexander Kolbasov { 2148*b885580bSAlexander Kolbasov int error; 2149*b885580bSAlexander Kolbasov 2150*b885580bSAlexander Kolbasov ASSERT(IS_HIPIL()); 2151*b885580bSAlexander Kolbasov 2152*b885580bSAlexander Kolbasov /* 2153*b885580bSAlexander Kolbasov * CPC context shouldn't be NULL, its CPU field should specify current 2154*b885580bSAlexander Kolbasov * CPU or be -1 to specify any CPU when the context is bound to a 2155*b885580bSAlexander Kolbasov * thread, and preemption should be disabled 2156*b885580bSAlexander Kolbasov */ 2157*b885580bSAlexander Kolbasov ASSERT(ctx != NULL && (ctx->kc_cpuid == CPU->cpu_id || 2158*b885580bSAlexander Kolbasov ctx->kc_cpuid == -1) && curthread->t_preempt > 0); 2159*b885580bSAlexander Kolbasov 2160*b885580bSAlexander Kolbasov if (ctx == NULL || (ctx->kc_cpuid != CPU->cpu_id && 2161*b885580bSAlexander Kolbasov ctx->kc_cpuid != -1) || curthread->t_preempt < 1 || 2162*b885580bSAlexander Kolbasov (ctx->kc_flags & KCPC_CTX_INVALID_STOPPED) != 0) { 2163*b885580bSAlexander Kolbasov return; 2164*b885580bSAlexander Kolbasov } 2165*b885580bSAlexander Kolbasov 2166*b885580bSAlexander Kolbasov /* 2167*b885580bSAlexander Kolbasov * Specified CPC context to be unprogrammed should be bound to current 2168*b885580bSAlexander Kolbasov * CPU or thread 2169*b885580bSAlexander Kolbasov */ 2170*b885580bSAlexander Kolbasov ASSERT(CPU->cpu_cpc_ctx == ctx || curthread->t_cpc_ctx == ctx); 2171*b885580bSAlexander Kolbasov 2172*b885580bSAlexander Kolbasov /* 2173*b885580bSAlexander Kolbasov * Stop counters 2174*b885580bSAlexander Kolbasov */ 2175*b885580bSAlexander Kolbasov pcbe_ops->pcbe_allstop(); 2176*b885580bSAlexander Kolbasov KCPC_CTX_FLAG_SET(ctx, KCPC_CTX_INVALID_STOPPED); 2177*b885580bSAlexander Kolbasov 2178*b885580bSAlexander Kolbasov /* 2179*b885580bSAlexander Kolbasov * Allow kernel to interpose on counters and program them for its own 2180*b885580bSAlexander Kolbasov * use to measure hardware capacity and utilization if cu_interpose 2181*b885580bSAlexander Kolbasov * argument is true 2182*b885580bSAlexander Kolbasov */ 2183*b885580bSAlexander Kolbasov if (cu_interpose == B_TRUE) 2184*b885580bSAlexander Kolbasov cu_cpc_program(CPU, &error); 2185*b885580bSAlexander Kolbasov } 2186*b885580bSAlexander Kolbasov 2187*b885580bSAlexander Kolbasov /* 2188*b885580bSAlexander Kolbasov * Read CPU Performance Counter (CPC) on current CPU and call specified update 2189*b885580bSAlexander Kolbasov * routine with data for each counter event currently programmed on CPU 2190*b885580bSAlexander Kolbasov */ 2191*b885580bSAlexander Kolbasov int 2192*b885580bSAlexander Kolbasov kcpc_read(kcpc_update_func_t update_func) 2193*b885580bSAlexander Kolbasov { 2194*b885580bSAlexander Kolbasov kcpc_ctx_t *ctx; 2195*b885580bSAlexander Kolbasov int i; 2196*b885580bSAlexander Kolbasov kcpc_request_t *req; 2197*b885580bSAlexander Kolbasov int retval; 2198*b885580bSAlexander Kolbasov kcpc_set_t *set; 2199*b885580bSAlexander Kolbasov 2200*b885580bSAlexander Kolbasov ASSERT(IS_HIPIL()); 2201*b885580bSAlexander Kolbasov 2202*b885580bSAlexander Kolbasov /* 2203*b885580bSAlexander Kolbasov * Can't grab locks or block because may be called inside dispatcher 2204*b885580bSAlexander Kolbasov */ 2205*b885580bSAlexander Kolbasov kpreempt_disable(); 2206*b885580bSAlexander Kolbasov 2207*b885580bSAlexander Kolbasov ctx = CPU->cpu_cpc_ctx; 2208*b885580bSAlexander Kolbasov if (ctx == NULL) { 2209*b885580bSAlexander Kolbasov kpreempt_enable(); 2210*b885580bSAlexander Kolbasov return (0); 2211*b885580bSAlexander Kolbasov } 2212*b885580bSAlexander Kolbasov 2213*b885580bSAlexander Kolbasov /* 2214*b885580bSAlexander Kolbasov * Read counter data from current CPU 2215*b885580bSAlexander Kolbasov */ 2216*b885580bSAlexander Kolbasov pcbe_ops->pcbe_sample(ctx); 2217*b885580bSAlexander Kolbasov 2218*b885580bSAlexander Kolbasov set = ctx->kc_set; 2219*b885580bSAlexander Kolbasov if (set == NULL || set->ks_req == NULL) { 2220*b885580bSAlexander Kolbasov kpreempt_enable(); 2221*b885580bSAlexander Kolbasov return (0); 2222*b885580bSAlexander Kolbasov } 2223*b885580bSAlexander Kolbasov 2224*b885580bSAlexander Kolbasov /* 2225*b885580bSAlexander Kolbasov * Call update function with preset pointer and data for each CPC event 2226*b885580bSAlexander Kolbasov * request currently programmed on current CPU 2227*b885580bSAlexander Kolbasov */ 2228*b885580bSAlexander Kolbasov req = set->ks_req; 2229*b885580bSAlexander Kolbasov retval = 0; 2230*b885580bSAlexander Kolbasov for (i = 0; i < set->ks_nreqs; i++) { 2231*b885580bSAlexander Kolbasov int ret; 2232*b885580bSAlexander Kolbasov 2233*b885580bSAlexander Kolbasov if (req[i].kr_data == NULL) 2234*b885580bSAlexander Kolbasov break; 2235*b885580bSAlexander Kolbasov 2236*b885580bSAlexander Kolbasov ret = update_func(req[i].kr_ptr, *req[i].kr_data); 2237*b885580bSAlexander Kolbasov if (ret < 0) 2238*b885580bSAlexander Kolbasov retval = ret; 2239*b885580bSAlexander Kolbasov } 2240*b885580bSAlexander Kolbasov 2241*b885580bSAlexander Kolbasov kpreempt_enable(); 2242*b885580bSAlexander Kolbasov 2243*b885580bSAlexander Kolbasov return (retval); 2244*b885580bSAlexander Kolbasov } 2245*b885580bSAlexander Kolbasov 2246*b885580bSAlexander Kolbasov /* 2247*b885580bSAlexander Kolbasov * Initialize list of counter event requests 2248*b885580bSAlexander Kolbasov */ 2249*b885580bSAlexander Kolbasov kcpc_request_list_t * 2250*b885580bSAlexander Kolbasov kcpc_reqs_init(int nreqs, int kmem_flags) 2251*b885580bSAlexander Kolbasov { 2252*b885580bSAlexander Kolbasov kcpc_request_list_t *req_list; 2253*b885580bSAlexander Kolbasov kcpc_request_t *reqs; 2254*b885580bSAlexander Kolbasov 2255*b885580bSAlexander Kolbasov if (nreqs < 1) 2256*b885580bSAlexander Kolbasov return (NULL); 2257*b885580bSAlexander Kolbasov 2258*b885580bSAlexander Kolbasov req_list = kmem_zalloc(sizeof (kcpc_request_list_t), kmem_flags); 2259*b885580bSAlexander Kolbasov if (req_list == NULL) 2260*b885580bSAlexander Kolbasov return (NULL); 2261*b885580bSAlexander Kolbasov 2262*b885580bSAlexander Kolbasov reqs = kmem_zalloc(nreqs * sizeof (kcpc_request_t), kmem_flags); 2263*b885580bSAlexander Kolbasov if (reqs == NULL) { 2264*b885580bSAlexander Kolbasov kmem_free(req_list, sizeof (kcpc_request_list_t)); 2265*b885580bSAlexander Kolbasov return (NULL); 2266*b885580bSAlexander Kolbasov } 2267*b885580bSAlexander Kolbasov 2268*b885580bSAlexander Kolbasov req_list->krl_list = reqs; 2269*b885580bSAlexander Kolbasov req_list->krl_cnt = 0; 2270*b885580bSAlexander Kolbasov req_list->krl_max = nreqs; 2271*b885580bSAlexander Kolbasov return (req_list); 2272*b885580bSAlexander Kolbasov } 2273*b885580bSAlexander Kolbasov 2274*b885580bSAlexander Kolbasov 2275*b885580bSAlexander Kolbasov /* 2276*b885580bSAlexander Kolbasov * Add counter event request to given list of counter event requests 2277*b885580bSAlexander Kolbasov */ 2278*b885580bSAlexander Kolbasov int 2279*b885580bSAlexander Kolbasov kcpc_reqs_add(kcpc_request_list_t *req_list, char *event, uint64_t preset, 2280*b885580bSAlexander Kolbasov uint_t flags, uint_t nattrs, kcpc_attr_t *attr, void *ptr, int kmem_flags) 2281*b885580bSAlexander Kolbasov { 2282*b885580bSAlexander Kolbasov kcpc_request_t *req; 2283*b885580bSAlexander Kolbasov 2284*b885580bSAlexander Kolbasov ASSERT(req_list->krl_max != 0); 2285*b885580bSAlexander Kolbasov if (req_list == NULL || req_list->krl_list == NULL) 2286*b885580bSAlexander Kolbasov return (-1); 2287*b885580bSAlexander Kolbasov 2288*b885580bSAlexander Kolbasov /* 2289*b885580bSAlexander Kolbasov * Allocate more space (if needed) 2290*b885580bSAlexander Kolbasov */ 2291*b885580bSAlexander Kolbasov if (req_list->krl_cnt > req_list->krl_max) { 2292*b885580bSAlexander Kolbasov kcpc_request_t *new; 2293*b885580bSAlexander Kolbasov kcpc_request_t *old; 2294*b885580bSAlexander Kolbasov 2295*b885580bSAlexander Kolbasov old = req_list->krl_list; 2296*b885580bSAlexander Kolbasov new = kmem_zalloc((req_list->krl_max + 2297*b885580bSAlexander Kolbasov cpc_ncounters) * sizeof (kcpc_request_t), kmem_flags); 2298*b885580bSAlexander Kolbasov if (new == NULL) 2299*b885580bSAlexander Kolbasov return (-2); 2300*b885580bSAlexander Kolbasov 2301*b885580bSAlexander Kolbasov req_list->krl_list = new; 2302*b885580bSAlexander Kolbasov bcopy(old, req_list->krl_list, 2303*b885580bSAlexander Kolbasov req_list->krl_cnt * sizeof (kcpc_request_t)); 2304*b885580bSAlexander Kolbasov kmem_free(old, req_list->krl_max * sizeof (kcpc_request_t)); 2305*b885580bSAlexander Kolbasov req_list->krl_cnt = 0; 2306*b885580bSAlexander Kolbasov req_list->krl_max += cpc_ncounters; 2307*b885580bSAlexander Kolbasov } 2308*b885580bSAlexander Kolbasov 2309*b885580bSAlexander Kolbasov /* 2310*b885580bSAlexander Kolbasov * Fill in request as much as possible now, but some fields will need 2311*b885580bSAlexander Kolbasov * to be set when request is assigned to a set. 2312*b885580bSAlexander Kolbasov */ 2313*b885580bSAlexander Kolbasov req = &req_list->krl_list[req_list->krl_cnt]; 2314*b885580bSAlexander Kolbasov req->kr_config = NULL; 2315*b885580bSAlexander Kolbasov req->kr_picnum = -1; /* have CPC pick this */ 2316*b885580bSAlexander Kolbasov req->kr_index = -1; /* set when assigning request to set */ 2317*b885580bSAlexander Kolbasov req->kr_data = NULL; /* set when configuring request */ 2318*b885580bSAlexander Kolbasov (void) strcpy(req->kr_event, event); 2319*b885580bSAlexander Kolbasov req->kr_preset = preset; 2320*b885580bSAlexander Kolbasov req->kr_flags = flags; 2321*b885580bSAlexander Kolbasov req->kr_nattrs = nattrs; 2322*b885580bSAlexander Kolbasov req->kr_attr = attr; 2323*b885580bSAlexander Kolbasov /* 2324*b885580bSAlexander Kolbasov * Keep pointer given by caller to give to update function when this 2325*b885580bSAlexander Kolbasov * counter event is sampled/read 2326*b885580bSAlexander Kolbasov */ 2327*b885580bSAlexander Kolbasov req->kr_ptr = ptr; 2328*b885580bSAlexander Kolbasov 2329*b885580bSAlexander Kolbasov req_list->krl_cnt++; 2330*b885580bSAlexander Kolbasov 2331*b885580bSAlexander Kolbasov return (0); 2332*b885580bSAlexander Kolbasov } 2333*b885580bSAlexander Kolbasov 2334*b885580bSAlexander Kolbasov /* 2335*b885580bSAlexander Kolbasov * Reset list of CPC event requests so its space can be used for another set 2336*b885580bSAlexander Kolbasov * of requests 2337*b885580bSAlexander Kolbasov */ 2338*b885580bSAlexander Kolbasov int 2339*b885580bSAlexander Kolbasov kcpc_reqs_reset(kcpc_request_list_t *req_list) 2340*b885580bSAlexander Kolbasov { 2341*b885580bSAlexander Kolbasov /* 2342*b885580bSAlexander Kolbasov * Return when pointer to request list structure or request is NULL or 2343*b885580bSAlexander Kolbasov * when max requests is less than or equal to 0 2344*b885580bSAlexander Kolbasov */ 2345*b885580bSAlexander Kolbasov if (req_list == NULL || req_list->krl_list == NULL || 2346*b885580bSAlexander Kolbasov req_list->krl_max <= 0) 2347*b885580bSAlexander Kolbasov return (-1); 2348*b885580bSAlexander Kolbasov 2349*b885580bSAlexander Kolbasov /* 2350*b885580bSAlexander Kolbasov * Zero out requests and number of requests used 2351*b885580bSAlexander Kolbasov */ 2352*b885580bSAlexander Kolbasov bzero(req_list->krl_list, req_list->krl_max * sizeof (kcpc_request_t)); 2353*b885580bSAlexander Kolbasov req_list->krl_cnt = 0; 2354*b885580bSAlexander Kolbasov return (0); 2355*b885580bSAlexander Kolbasov } 2356*b885580bSAlexander Kolbasov 2357*b885580bSAlexander Kolbasov /* 2358*b885580bSAlexander Kolbasov * Free given list of counter event requests 2359*b885580bSAlexander Kolbasov */ 2360*b885580bSAlexander Kolbasov int 2361*b885580bSAlexander Kolbasov kcpc_reqs_fini(kcpc_request_list_t *req_list) 2362*b885580bSAlexander Kolbasov { 2363*b885580bSAlexander Kolbasov kmem_free(req_list->krl_list, 2364*b885580bSAlexander Kolbasov req_list->krl_max * sizeof (kcpc_request_t)); 2365*b885580bSAlexander Kolbasov kmem_free(req_list, sizeof (kcpc_request_list_t)); 2366*b885580bSAlexander Kolbasov return (0); 2367*b885580bSAlexander Kolbasov } 2368*b885580bSAlexander Kolbasov 2369*b885580bSAlexander Kolbasov /* 2370*b885580bSAlexander Kolbasov * Create set of given counter event requests 2371*b885580bSAlexander Kolbasov */ 2372*b885580bSAlexander Kolbasov static kcpc_set_t * 2373*b885580bSAlexander Kolbasov kcpc_set_create(kcpc_request_t *reqs, int nreqs, int set_flags, int kmem_flags) 2374*b885580bSAlexander Kolbasov { 2375*b885580bSAlexander Kolbasov int i; 2376*b885580bSAlexander Kolbasov kcpc_set_t *set; 2377*b885580bSAlexander Kolbasov 2378*b885580bSAlexander Kolbasov /* 2379*b885580bSAlexander Kolbasov * Allocate set and assign number of requests in set and flags 2380*b885580bSAlexander Kolbasov */ 2381*b885580bSAlexander Kolbasov set = kmem_zalloc(sizeof (kcpc_set_t), kmem_flags); 2382*b885580bSAlexander Kolbasov if (set == NULL) 2383*b885580bSAlexander Kolbasov return (NULL); 2384*b885580bSAlexander Kolbasov 2385*b885580bSAlexander Kolbasov if (nreqs < cpc_ncounters) 2386*b885580bSAlexander Kolbasov set->ks_nreqs = nreqs; 2387*b885580bSAlexander Kolbasov else 2388*b885580bSAlexander Kolbasov set->ks_nreqs = cpc_ncounters; 2389*b885580bSAlexander Kolbasov 2390*b885580bSAlexander Kolbasov set->ks_flags = set_flags; 2391*b885580bSAlexander Kolbasov 2392*b885580bSAlexander Kolbasov /* 2393*b885580bSAlexander Kolbasov * Allocate requests needed, copy requests into set, and set index into 2394*b885580bSAlexander Kolbasov * data for each request (which may change when we assign requested 2395*b885580bSAlexander Kolbasov * counter events to counters) 2396*b885580bSAlexander Kolbasov */ 2397*b885580bSAlexander Kolbasov set->ks_req = (kcpc_request_t *)kmem_zalloc(sizeof (kcpc_request_t) * 2398*b885580bSAlexander Kolbasov set->ks_nreqs, kmem_flags); 2399*b885580bSAlexander Kolbasov if (set->ks_req == NULL) { 2400*b885580bSAlexander Kolbasov kmem_free(set, sizeof (kcpc_set_t)); 2401*b885580bSAlexander Kolbasov return (NULL); 2402*b885580bSAlexander Kolbasov } 2403*b885580bSAlexander Kolbasov 2404*b885580bSAlexander Kolbasov bcopy(reqs, set->ks_req, sizeof (kcpc_request_t) * set->ks_nreqs); 2405*b885580bSAlexander Kolbasov 2406*b885580bSAlexander Kolbasov for (i = 0; i < set->ks_nreqs; i++) 2407*b885580bSAlexander Kolbasov set->ks_req[i].kr_index = i; 2408*b885580bSAlexander Kolbasov 2409*b885580bSAlexander Kolbasov return (set); 2410*b885580bSAlexander Kolbasov } 2411*b885580bSAlexander Kolbasov 2412*b885580bSAlexander Kolbasov 2413*b885580bSAlexander Kolbasov /* 2414*b885580bSAlexander Kolbasov * Stop counters on current CPU. 2415*b885580bSAlexander Kolbasov * 2416*b885580bSAlexander Kolbasov * If preserve_context is true, the caller is interested in the CPU's CPC 2417*b885580bSAlexander Kolbasov * context and wants it to be preserved. 2418*b885580bSAlexander Kolbasov * 2419*b885580bSAlexander Kolbasov * If preserve_context is false, the caller does not need the CPU's CPC context 2420*b885580bSAlexander Kolbasov * to be preserved, so it is set to NULL. 2421*b885580bSAlexander Kolbasov */ 2422*b885580bSAlexander Kolbasov static void 2423*b885580bSAlexander Kolbasov kcpc_cpustop_func(boolean_t preserve_context) 2424*b885580bSAlexander Kolbasov { 2425*b885580bSAlexander Kolbasov kpreempt_disable(); 2426*b885580bSAlexander Kolbasov 2427*b885580bSAlexander Kolbasov /* 2428*b885580bSAlexander Kolbasov * Someone already stopped this context before us, so there is nothing 2429*b885580bSAlexander Kolbasov * to do. 2430*b885580bSAlexander Kolbasov */ 2431*b885580bSAlexander Kolbasov if (CPU->cpu_cpc_ctx == NULL) { 2432*b885580bSAlexander Kolbasov kpreempt_enable(); 2433*b885580bSAlexander Kolbasov return; 2434*b885580bSAlexander Kolbasov } 2435*b885580bSAlexander Kolbasov 2436*b885580bSAlexander Kolbasov kcpc_unprogram(CPU->cpu_cpc_ctx, B_TRUE); 2437*b885580bSAlexander Kolbasov /* 2438*b885580bSAlexander Kolbasov * If CU does not use counters, then clear the CPU's CPC context 2439*b885580bSAlexander Kolbasov * If the caller requested to preserve context it should disable CU 2440*b885580bSAlexander Kolbasov * first, so there should be no CU context now. 2441*b885580bSAlexander Kolbasov */ 2442*b885580bSAlexander Kolbasov ASSERT(!preserve_context || !CU_CPC_ON(CPU)); 2443*b885580bSAlexander Kolbasov if (!preserve_context && CPU->cpu_cpc_ctx != NULL && !CU_CPC_ON(CPU)) 2444*b885580bSAlexander Kolbasov CPU->cpu_cpc_ctx = NULL; 2445*b885580bSAlexander Kolbasov 2446*b885580bSAlexander Kolbasov kpreempt_enable(); 2447*b885580bSAlexander Kolbasov } 2448*b885580bSAlexander Kolbasov 2449*b885580bSAlexander Kolbasov /* 2450*b885580bSAlexander Kolbasov * Stop counters on given CPU and set its CPC context to NULL unless 2451*b885580bSAlexander Kolbasov * preserve_context is true. 2452*b885580bSAlexander Kolbasov */ 2453*b885580bSAlexander Kolbasov void 2454*b885580bSAlexander Kolbasov kcpc_cpu_stop(cpu_t *cp, boolean_t preserve_context) 2455*b885580bSAlexander Kolbasov { 2456*b885580bSAlexander Kolbasov cpu_call(cp, (cpu_call_func_t)kcpc_cpustop_func, 2457*b885580bSAlexander Kolbasov preserve_context, 0); 2458*b885580bSAlexander Kolbasov } 2459*b885580bSAlexander Kolbasov 2460*b885580bSAlexander Kolbasov /* 2461*b885580bSAlexander Kolbasov * Program the context on the current CPU 2462*b885580bSAlexander Kolbasov */ 2463*b885580bSAlexander Kolbasov static void 2464*b885580bSAlexander Kolbasov kcpc_remoteprogram_func(kcpc_ctx_t *ctx, uintptr_t arg) 2465*b885580bSAlexander Kolbasov { 2466*b885580bSAlexander Kolbasov boolean_t for_thread = (boolean_t)arg; 2467*b885580bSAlexander Kolbasov 2468*b885580bSAlexander Kolbasov ASSERT(ctx != NULL); 2469*b885580bSAlexander Kolbasov 2470*b885580bSAlexander Kolbasov kpreempt_disable(); 2471*b885580bSAlexander Kolbasov kcpc_program(ctx, for_thread, B_TRUE); 2472*b885580bSAlexander Kolbasov kpreempt_enable(); 2473*b885580bSAlexander Kolbasov } 2474*b885580bSAlexander Kolbasov 2475*b885580bSAlexander Kolbasov /* 2476*b885580bSAlexander Kolbasov * Program counters on given CPU 2477*b885580bSAlexander Kolbasov */ 2478*b885580bSAlexander Kolbasov void 2479*b885580bSAlexander Kolbasov kcpc_cpu_program(cpu_t *cp, kcpc_ctx_t *ctx) 2480*b885580bSAlexander Kolbasov { 2481*b885580bSAlexander Kolbasov cpu_call(cp, (cpu_call_func_t)kcpc_remoteprogram_func, (uintptr_t)ctx, 2482*b885580bSAlexander Kolbasov (uintptr_t)B_FALSE); 2483*b885580bSAlexander Kolbasov } 2484*b885580bSAlexander Kolbasov 2485b9e93c10SJonathan Haslam char * 2486b9e93c10SJonathan Haslam kcpc_list_attrs(void) 2487b9e93c10SJonathan Haslam { 2488b9e93c10SJonathan Haslam ASSERT(pcbe_ops != NULL); 2489b9e93c10SJonathan Haslam 2490b9e93c10SJonathan Haslam return (pcbe_ops->pcbe_list_attrs()); 2491b9e93c10SJonathan Haslam } 2492b9e93c10SJonathan Haslam 2493b9e93c10SJonathan Haslam char * 2494b9e93c10SJonathan Haslam kcpc_list_events(uint_t pic) 2495b9e93c10SJonathan Haslam { 2496b9e93c10SJonathan Haslam ASSERT(pcbe_ops != NULL); 2497b9e93c10SJonathan Haslam 2498b9e93c10SJonathan Haslam return (pcbe_ops->pcbe_list_events(pic)); 2499b9e93c10SJonathan Haslam } 2500b9e93c10SJonathan Haslam 2501b9e93c10SJonathan Haslam uint_t 2502b9e93c10SJonathan Haslam kcpc_pcbe_capabilities(void) 2503b9e93c10SJonathan Haslam { 2504b9e93c10SJonathan Haslam ASSERT(pcbe_ops != NULL); 2505b9e93c10SJonathan Haslam 2506b9e93c10SJonathan Haslam return (pcbe_ops->pcbe_caps); 2507b9e93c10SJonathan Haslam } 2508b9e93c10SJonathan Haslam 2509b9e93c10SJonathan Haslam int 2510b9e93c10SJonathan Haslam kcpc_pcbe_loaded(void) 2511b9e93c10SJonathan Haslam { 2512b9e93c10SJonathan Haslam return (pcbe_ops == NULL ? -1 : 0); 2513b9e93c10SJonathan Haslam } 2514