17c478bd9Sstevel@tonic-gate /* 27c478bd9Sstevel@tonic-gate * CDDL HEADER START 37c478bd9Sstevel@tonic-gate * 47c478bd9Sstevel@tonic-gate * The contents of this file are subject to the terms of the 544961713Sgirish * Common Development and Distribution License (the "License"). 644961713Sgirish * You may not use this file except in compliance with the License. 77c478bd9Sstevel@tonic-gate * 87c478bd9Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 97c478bd9Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 107c478bd9Sstevel@tonic-gate * See the License for the specific language governing permissions 117c478bd9Sstevel@tonic-gate * and limitations under the License. 127c478bd9Sstevel@tonic-gate * 137c478bd9Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 147c478bd9Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 157c478bd9Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 167c478bd9Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 177c478bd9Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 187c478bd9Sstevel@tonic-gate * 197c478bd9Sstevel@tonic-gate * CDDL HEADER END 207c478bd9Sstevel@tonic-gate */ 217aec1d6eScindi 227c478bd9Sstevel@tonic-gate /* 2396992ee7SEthindra Ramamurthy * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved. 247c478bd9Sstevel@tonic-gate */ 257c478bd9Sstevel@tonic-gate 267c478bd9Sstevel@tonic-gate #include <sys/param.h> 277c478bd9Sstevel@tonic-gate #include <sys/thread.h> 287c478bd9Sstevel@tonic-gate #include <sys/cpuvar.h> 297c478bd9Sstevel@tonic-gate #include <sys/inttypes.h> 307c478bd9Sstevel@tonic-gate #include <sys/cmn_err.h> 317c478bd9Sstevel@tonic-gate #include <sys/time.h> 324568bee7Strevtom #include <sys/ksynch.h> 337c478bd9Sstevel@tonic-gate #include <sys/systm.h> 347c478bd9Sstevel@tonic-gate #include <sys/kcpc.h> 357c478bd9Sstevel@tonic-gate #include <sys/cpc_impl.h> 367c478bd9Sstevel@tonic-gate #include <sys/cpc_pcbe.h> 377c478bd9Sstevel@tonic-gate #include <sys/atomic.h> 387c478bd9Sstevel@tonic-gate #include <sys/sunddi.h> 397c478bd9Sstevel@tonic-gate #include <sys/modctl.h> 407c478bd9Sstevel@tonic-gate #include <sys/sdt.h> 41b885580bSAlexander Kolbasov #include <sys/archsystm.h> 42b885580bSAlexander Kolbasov #include <sys/promif.h> 43b885580bSAlexander Kolbasov #include <sys/x_call.h> 44b885580bSAlexander Kolbasov #include <sys/cap_util.h> 457c478bd9Sstevel@tonic-gate #if defined(__x86) 467c478bd9Sstevel@tonic-gate #include <asm/clock.h> 47b885580bSAlexander Kolbasov #include <sys/xc_levels.h> 487c478bd9Sstevel@tonic-gate #endif 497c478bd9Sstevel@tonic-gate 50b885580bSAlexander Kolbasov static kmutex_t kcpc_ctx_llock[CPC_HASH_BUCKETS]; /* protects ctx_list */ 51b885580bSAlexander Kolbasov static kcpc_ctx_t *kcpc_ctx_list[CPC_HASH_BUCKETS]; /* head of list */ 527c478bd9Sstevel@tonic-gate 537c478bd9Sstevel@tonic-gate 547c478bd9Sstevel@tonic-gate krwlock_t kcpc_cpuctx_lock; /* lock for 'kcpc_cpuctx' below */ 557c478bd9Sstevel@tonic-gate int kcpc_cpuctx; /* number of cpu-specific contexts */ 567c478bd9Sstevel@tonic-gate 577c478bd9Sstevel@tonic-gate int kcpc_counts_include_idle = 1; /* Project Private /etc/system variable */ 587c478bd9Sstevel@tonic-gate 597c478bd9Sstevel@tonic-gate /* 607c478bd9Sstevel@tonic-gate * These are set when a PCBE module is loaded. 617c478bd9Sstevel@tonic-gate */ 627c478bd9Sstevel@tonic-gate uint_t cpc_ncounters = 0; 637c478bd9Sstevel@tonic-gate pcbe_ops_t *pcbe_ops = NULL; 647c478bd9Sstevel@tonic-gate 657c478bd9Sstevel@tonic-gate /* 667c478bd9Sstevel@tonic-gate * Statistics on (mis)behavior 677c478bd9Sstevel@tonic-gate */ 687c478bd9Sstevel@tonic-gate static uint32_t kcpc_intrctx_count; /* # overflows in an interrupt handler */ 697c478bd9Sstevel@tonic-gate static uint32_t kcpc_nullctx_count; /* # overflows in a thread with no ctx */ 707c478bd9Sstevel@tonic-gate 717c478bd9Sstevel@tonic-gate /* 72b9e93c10SJonathan Haslam * By setting 'kcpc_nullctx_panic' to 1, any overflow interrupts in a thread 73b9e93c10SJonathan Haslam * with no valid context will result in a panic. 747c478bd9Sstevel@tonic-gate */ 757c478bd9Sstevel@tonic-gate static int kcpc_nullctx_panic = 0; 767c478bd9Sstevel@tonic-gate 777c478bd9Sstevel@tonic-gate static void kcpc_lwp_create(kthread_t *t, kthread_t *ct); 787c478bd9Sstevel@tonic-gate static void kcpc_restore(kcpc_ctx_t *ctx); 797c478bd9Sstevel@tonic-gate static void kcpc_save(kcpc_ctx_t *ctx); 807c478bd9Sstevel@tonic-gate static void kcpc_ctx_clone(kcpc_ctx_t *ctx, kcpc_ctx_t *cctx); 817c478bd9Sstevel@tonic-gate static int kcpc_tryassign(kcpc_set_t *set, int starting_req, int *scratch); 827c478bd9Sstevel@tonic-gate static kcpc_set_t *kcpc_dup_set(kcpc_set_t *set); 83b885580bSAlexander Kolbasov static kcpc_set_t *kcpc_set_create(kcpc_request_t *reqs, int nreqs, 84b885580bSAlexander Kolbasov int set_flags, int kmem_flags); 85b885580bSAlexander Kolbasov 86b885580bSAlexander Kolbasov /* 87b885580bSAlexander Kolbasov * Macros to manipulate context flags. All flag updates should use one of these 88b885580bSAlexander Kolbasov * two macros 89b885580bSAlexander Kolbasov * 90b885580bSAlexander Kolbasov * Flags should be always be updated atomically since some of the updates are 91b885580bSAlexander Kolbasov * not protected by locks. 92b885580bSAlexander Kolbasov */ 93b885580bSAlexander Kolbasov #define KCPC_CTX_FLAG_SET(ctx, flag) atomic_or_uint(&(ctx)->kc_flags, (flag)) 94b885580bSAlexander Kolbasov #define KCPC_CTX_FLAG_CLR(ctx, flag) atomic_and_uint(&(ctx)->kc_flags, ~(flag)) 95b885580bSAlexander Kolbasov 96b885580bSAlexander Kolbasov /* 97b885580bSAlexander Kolbasov * The IS_HIPIL() macro verifies that the code is executed either from a 98b885580bSAlexander Kolbasov * cross-call or from high-PIL interrupt 99b885580bSAlexander Kolbasov */ 100b885580bSAlexander Kolbasov #ifdef DEBUG 101b885580bSAlexander Kolbasov #define IS_HIPIL() (getpil() >= XCALL_PIL) 102b885580bSAlexander Kolbasov #else 103b885580bSAlexander Kolbasov #define IS_HIPIL() 104b885580bSAlexander Kolbasov #endif /* DEBUG */ 105b885580bSAlexander Kolbasov 106b885580bSAlexander Kolbasov 107b885580bSAlexander Kolbasov extern int kcpc_hw_load_pcbe(void); 108b885580bSAlexander Kolbasov 109b885580bSAlexander Kolbasov /* 110b885580bSAlexander Kolbasov * Return value from kcpc_hw_load_pcbe() 111b885580bSAlexander Kolbasov */ 112b885580bSAlexander Kolbasov static int kcpc_pcbe_error = 0; 113b885580bSAlexander Kolbasov 114b885580bSAlexander Kolbasov /* 115b885580bSAlexander Kolbasov * Perform one-time initialization of kcpc framework. 116b885580bSAlexander Kolbasov * This function performs the initialization only the first time it is called. 117b885580bSAlexander Kolbasov * It is safe to call it multiple times. 118b885580bSAlexander Kolbasov */ 119b885580bSAlexander Kolbasov int 120b885580bSAlexander Kolbasov kcpc_init(void) 121b885580bSAlexander Kolbasov { 122b885580bSAlexander Kolbasov long hash; 123b885580bSAlexander Kolbasov static uint32_t kcpc_initialized = 0; 124b885580bSAlexander Kolbasov 125b885580bSAlexander Kolbasov /* 126b885580bSAlexander Kolbasov * We already tried loading platform pcbe module and failed 127b885580bSAlexander Kolbasov */ 128b885580bSAlexander Kolbasov if (kcpc_pcbe_error != 0) 129b885580bSAlexander Kolbasov return (-1); 130b885580bSAlexander Kolbasov 131b885580bSAlexander Kolbasov /* 132b885580bSAlexander Kolbasov * The kcpc framework should be initialized at most once 133b885580bSAlexander Kolbasov */ 134b885580bSAlexander Kolbasov if (atomic_cas_32(&kcpc_initialized, 0, 1) != 0) 135b885580bSAlexander Kolbasov return (0); 136b885580bSAlexander Kolbasov 137b885580bSAlexander Kolbasov rw_init(&kcpc_cpuctx_lock, NULL, RW_DEFAULT, NULL); 138b885580bSAlexander Kolbasov for (hash = 0; hash < CPC_HASH_BUCKETS; hash++) 139b885580bSAlexander Kolbasov mutex_init(&kcpc_ctx_llock[hash], 140b885580bSAlexander Kolbasov NULL, MUTEX_DRIVER, (void *)(uintptr_t)15); 141b885580bSAlexander Kolbasov 142b885580bSAlexander Kolbasov /* 143b885580bSAlexander Kolbasov * Load platform-specific pcbe module 144b885580bSAlexander Kolbasov */ 145b885580bSAlexander Kolbasov kcpc_pcbe_error = kcpc_hw_load_pcbe(); 146b885580bSAlexander Kolbasov 147b885580bSAlexander Kolbasov return (kcpc_pcbe_error == 0 ? 0 : -1); 148b885580bSAlexander Kolbasov } 1497c478bd9Sstevel@tonic-gate 1507c478bd9Sstevel@tonic-gate void 1517c478bd9Sstevel@tonic-gate kcpc_register_pcbe(pcbe_ops_t *ops) 1527c478bd9Sstevel@tonic-gate { 1537c478bd9Sstevel@tonic-gate pcbe_ops = ops; 1547c478bd9Sstevel@tonic-gate cpc_ncounters = pcbe_ops->pcbe_ncounters(); 1557c478bd9Sstevel@tonic-gate } 1567c478bd9Sstevel@tonic-gate 157b9e93c10SJonathan Haslam void 158b9e93c10SJonathan Haslam kcpc_register_dcpc(void (*func)(uint64_t)) 159b9e93c10SJonathan Haslam { 160b9e93c10SJonathan Haslam dtrace_cpc_fire = func; 161b9e93c10SJonathan Haslam } 162b9e93c10SJonathan Haslam 163b9e93c10SJonathan Haslam void 164b9e93c10SJonathan Haslam kcpc_unregister_dcpc(void) 165b9e93c10SJonathan Haslam { 166b9e93c10SJonathan Haslam dtrace_cpc_fire = NULL; 167b9e93c10SJonathan Haslam } 168b9e93c10SJonathan Haslam 1697c478bd9Sstevel@tonic-gate int 1707c478bd9Sstevel@tonic-gate kcpc_bind_cpu(kcpc_set_t *set, processorid_t cpuid, int *subcode) 1717c478bd9Sstevel@tonic-gate { 1727c478bd9Sstevel@tonic-gate cpu_t *cp; 1737c478bd9Sstevel@tonic-gate kcpc_ctx_t *ctx; 1747c478bd9Sstevel@tonic-gate int error; 175b885580bSAlexander Kolbasov int save_spl; 1767c478bd9Sstevel@tonic-gate 177b885580bSAlexander Kolbasov ctx = kcpc_ctx_alloc(KM_SLEEP); 1787c478bd9Sstevel@tonic-gate 1797c478bd9Sstevel@tonic-gate if (kcpc_assign_reqs(set, ctx) != 0) { 1807c478bd9Sstevel@tonic-gate kcpc_ctx_free(ctx); 1817c478bd9Sstevel@tonic-gate *subcode = CPC_RESOURCE_UNAVAIL; 1827c478bd9Sstevel@tonic-gate return (EINVAL); 1837c478bd9Sstevel@tonic-gate } 1847c478bd9Sstevel@tonic-gate 1857c478bd9Sstevel@tonic-gate ctx->kc_cpuid = cpuid; 1867c478bd9Sstevel@tonic-gate ctx->kc_thread = curthread; 1877c478bd9Sstevel@tonic-gate 1887c478bd9Sstevel@tonic-gate set->ks_data = kmem_zalloc(set->ks_nreqs * sizeof (uint64_t), KM_SLEEP); 1897c478bd9Sstevel@tonic-gate 1907c478bd9Sstevel@tonic-gate if ((error = kcpc_configure_reqs(ctx, set, subcode)) != 0) { 1917c478bd9Sstevel@tonic-gate kmem_free(set->ks_data, set->ks_nreqs * sizeof (uint64_t)); 1927c478bd9Sstevel@tonic-gate kcpc_ctx_free(ctx); 1937c478bd9Sstevel@tonic-gate return (error); 1947c478bd9Sstevel@tonic-gate } 1957c478bd9Sstevel@tonic-gate 1967c478bd9Sstevel@tonic-gate set->ks_ctx = ctx; 1977c478bd9Sstevel@tonic-gate ctx->kc_set = set; 1987c478bd9Sstevel@tonic-gate 1997c478bd9Sstevel@tonic-gate /* 2007c478bd9Sstevel@tonic-gate * We must hold cpu_lock to prevent DR, offlining, or unbinding while 2017c478bd9Sstevel@tonic-gate * we are manipulating the cpu_t and programming the hardware, else the 2027c478bd9Sstevel@tonic-gate * the cpu_t could go away while we're looking at it. 2037c478bd9Sstevel@tonic-gate */ 2047c478bd9Sstevel@tonic-gate mutex_enter(&cpu_lock); 2057c478bd9Sstevel@tonic-gate cp = cpu_get(cpuid); 2067c478bd9Sstevel@tonic-gate 2077c478bd9Sstevel@tonic-gate if (cp == NULL) 2087c478bd9Sstevel@tonic-gate /* 2097c478bd9Sstevel@tonic-gate * The CPU could have been DRd out while we were getting set up. 2107c478bd9Sstevel@tonic-gate */ 2117c478bd9Sstevel@tonic-gate goto unbound; 2127c478bd9Sstevel@tonic-gate 2137c478bd9Sstevel@tonic-gate mutex_enter(&cp->cpu_cpc_ctxlock); 214b885580bSAlexander Kolbasov kpreempt_disable(); 215b885580bSAlexander Kolbasov save_spl = spl_xcall(); 2167c478bd9Sstevel@tonic-gate 217b885580bSAlexander Kolbasov /* 218b885580bSAlexander Kolbasov * Check to see whether counters for CPU already being used by someone 219b885580bSAlexander Kolbasov * other than kernel for capacity and utilization (since kernel will 220b885580bSAlexander Kolbasov * let go of counters for user in kcpc_program() below) 221b885580bSAlexander Kolbasov */ 222b885580bSAlexander Kolbasov if (cp->cpu_cpc_ctx != NULL && !CU_CPC_ON(cp)) { 2237c478bd9Sstevel@tonic-gate /* 2247c478bd9Sstevel@tonic-gate * If this CPU already has a bound set, return an error. 2257c478bd9Sstevel@tonic-gate */ 226b885580bSAlexander Kolbasov splx(save_spl); 227b885580bSAlexander Kolbasov kpreempt_enable(); 2287c478bd9Sstevel@tonic-gate mutex_exit(&cp->cpu_cpc_ctxlock); 2297c478bd9Sstevel@tonic-gate goto unbound; 2307c478bd9Sstevel@tonic-gate } 2317c478bd9Sstevel@tonic-gate 2327c478bd9Sstevel@tonic-gate if (curthread->t_bind_cpu != cpuid) { 233b885580bSAlexander Kolbasov splx(save_spl); 234b885580bSAlexander Kolbasov kpreempt_enable(); 2357c478bd9Sstevel@tonic-gate mutex_exit(&cp->cpu_cpc_ctxlock); 2367c478bd9Sstevel@tonic-gate goto unbound; 2377c478bd9Sstevel@tonic-gate } 2387c478bd9Sstevel@tonic-gate 239b885580bSAlexander Kolbasov kcpc_program(ctx, B_FALSE, B_TRUE); 240b885580bSAlexander Kolbasov 241b885580bSAlexander Kolbasov splx(save_spl); 2427c478bd9Sstevel@tonic-gate kpreempt_enable(); 2437c478bd9Sstevel@tonic-gate 2447c478bd9Sstevel@tonic-gate mutex_exit(&cp->cpu_cpc_ctxlock); 2457c478bd9Sstevel@tonic-gate mutex_exit(&cpu_lock); 2467c478bd9Sstevel@tonic-gate 2474568bee7Strevtom mutex_enter(&set->ks_lock); 2484568bee7Strevtom set->ks_state |= KCPC_SET_BOUND; 2494568bee7Strevtom cv_signal(&set->ks_condv); 2504568bee7Strevtom mutex_exit(&set->ks_lock); 2514568bee7Strevtom 2527c478bd9Sstevel@tonic-gate return (0); 2537c478bd9Sstevel@tonic-gate 2547c478bd9Sstevel@tonic-gate unbound: 2557c478bd9Sstevel@tonic-gate mutex_exit(&cpu_lock); 2567c478bd9Sstevel@tonic-gate set->ks_ctx = NULL; 2577c478bd9Sstevel@tonic-gate kmem_free(set->ks_data, set->ks_nreqs * sizeof (uint64_t)); 2587c478bd9Sstevel@tonic-gate kcpc_ctx_free(ctx); 2597c478bd9Sstevel@tonic-gate return (EAGAIN); 2607c478bd9Sstevel@tonic-gate } 2617c478bd9Sstevel@tonic-gate 2627c478bd9Sstevel@tonic-gate int 2637c478bd9Sstevel@tonic-gate kcpc_bind_thread(kcpc_set_t *set, kthread_t *t, int *subcode) 2647c478bd9Sstevel@tonic-gate { 2657c478bd9Sstevel@tonic-gate kcpc_ctx_t *ctx; 2667c478bd9Sstevel@tonic-gate int error; 2677c478bd9Sstevel@tonic-gate 2687c478bd9Sstevel@tonic-gate /* 2697c478bd9Sstevel@tonic-gate * Only one set is allowed per context, so ensure there is no 2707c478bd9Sstevel@tonic-gate * existing context. 2717c478bd9Sstevel@tonic-gate */ 2727c478bd9Sstevel@tonic-gate 2737c478bd9Sstevel@tonic-gate if (t->t_cpc_ctx != NULL) 2747c478bd9Sstevel@tonic-gate return (EEXIST); 2757c478bd9Sstevel@tonic-gate 276b885580bSAlexander Kolbasov ctx = kcpc_ctx_alloc(KM_SLEEP); 2777c478bd9Sstevel@tonic-gate 2787c478bd9Sstevel@tonic-gate /* 2797c478bd9Sstevel@tonic-gate * The context must begin life frozen until it has been properly 2807c478bd9Sstevel@tonic-gate * programmed onto the hardware. This prevents the context ops from 2817c478bd9Sstevel@tonic-gate * worrying about it until we're ready. 2827c478bd9Sstevel@tonic-gate */ 283b885580bSAlexander Kolbasov KCPC_CTX_FLAG_SET(ctx, KCPC_CTX_FREEZE); 2847c478bd9Sstevel@tonic-gate ctx->kc_hrtime = gethrtime(); 2857c478bd9Sstevel@tonic-gate 2867c478bd9Sstevel@tonic-gate if (kcpc_assign_reqs(set, ctx) != 0) { 2877c478bd9Sstevel@tonic-gate kcpc_ctx_free(ctx); 2887c478bd9Sstevel@tonic-gate *subcode = CPC_RESOURCE_UNAVAIL; 2897c478bd9Sstevel@tonic-gate return (EINVAL); 2907c478bd9Sstevel@tonic-gate } 2917c478bd9Sstevel@tonic-gate 2927c478bd9Sstevel@tonic-gate ctx->kc_cpuid = -1; 2937c478bd9Sstevel@tonic-gate if (set->ks_flags & CPC_BIND_LWP_INHERIT) 294b885580bSAlexander Kolbasov KCPC_CTX_FLAG_SET(ctx, KCPC_CTX_LWPINHERIT); 2957c478bd9Sstevel@tonic-gate ctx->kc_thread = t; 2967c478bd9Sstevel@tonic-gate t->t_cpc_ctx = ctx; 2977c478bd9Sstevel@tonic-gate /* 2987c478bd9Sstevel@tonic-gate * Permit threads to look at their own hardware counters from userland. 2997c478bd9Sstevel@tonic-gate */ 300b885580bSAlexander Kolbasov KCPC_CTX_FLAG_SET(ctx, KCPC_CTX_NONPRIV); 3017c478bd9Sstevel@tonic-gate 3027c478bd9Sstevel@tonic-gate /* 3037c478bd9Sstevel@tonic-gate * Create the data store for this set. 3047c478bd9Sstevel@tonic-gate */ 3057c478bd9Sstevel@tonic-gate set->ks_data = kmem_alloc(set->ks_nreqs * sizeof (uint64_t), KM_SLEEP); 3067c478bd9Sstevel@tonic-gate 3077c478bd9Sstevel@tonic-gate if ((error = kcpc_configure_reqs(ctx, set, subcode)) != 0) { 3087c478bd9Sstevel@tonic-gate kmem_free(set->ks_data, set->ks_nreqs * sizeof (uint64_t)); 3097c478bd9Sstevel@tonic-gate kcpc_ctx_free(ctx); 3107c478bd9Sstevel@tonic-gate t->t_cpc_ctx = NULL; 3117c478bd9Sstevel@tonic-gate return (error); 3127c478bd9Sstevel@tonic-gate } 3137c478bd9Sstevel@tonic-gate 3147c478bd9Sstevel@tonic-gate set->ks_ctx = ctx; 3157c478bd9Sstevel@tonic-gate ctx->kc_set = set; 3167c478bd9Sstevel@tonic-gate 3177c478bd9Sstevel@tonic-gate /* 3187c478bd9Sstevel@tonic-gate * Add a device context to the subject thread. 3197c478bd9Sstevel@tonic-gate */ 3207c478bd9Sstevel@tonic-gate installctx(t, ctx, kcpc_save, kcpc_restore, NULL, 3217c478bd9Sstevel@tonic-gate kcpc_lwp_create, NULL, kcpc_free); 3227c478bd9Sstevel@tonic-gate 3237c478bd9Sstevel@tonic-gate /* 3247c478bd9Sstevel@tonic-gate * Ask the backend to program the hardware. 3257c478bd9Sstevel@tonic-gate */ 3267c478bd9Sstevel@tonic-gate if (t == curthread) { 327b885580bSAlexander Kolbasov int save_spl; 328b885580bSAlexander Kolbasov 3297c478bd9Sstevel@tonic-gate kpreempt_disable(); 330b885580bSAlexander Kolbasov save_spl = spl_xcall(); 331b885580bSAlexander Kolbasov kcpc_program(ctx, B_TRUE, B_TRUE); 332b885580bSAlexander Kolbasov splx(save_spl); 3337c478bd9Sstevel@tonic-gate kpreempt_enable(); 334b885580bSAlexander Kolbasov } else { 3357c478bd9Sstevel@tonic-gate /* 3367c478bd9Sstevel@tonic-gate * Since we are the agent LWP, we know the victim LWP is stopped 3377c478bd9Sstevel@tonic-gate * until we're done here; no need to worry about preemption or 3387c478bd9Sstevel@tonic-gate * migration here. We still use an atomic op to clear the flag 3397c478bd9Sstevel@tonic-gate * to ensure the flags are always self-consistent; they can 3407c478bd9Sstevel@tonic-gate * still be accessed from, for instance, another CPU doing a 3417c478bd9Sstevel@tonic-gate * kcpc_invalidate_all(). 3427c478bd9Sstevel@tonic-gate */ 343b885580bSAlexander Kolbasov KCPC_CTX_FLAG_CLR(ctx, KCPC_CTX_FREEZE); 344b885580bSAlexander Kolbasov } 3457c478bd9Sstevel@tonic-gate 3464568bee7Strevtom mutex_enter(&set->ks_lock); 3474568bee7Strevtom set->ks_state |= KCPC_SET_BOUND; 3484568bee7Strevtom cv_signal(&set->ks_condv); 3494568bee7Strevtom mutex_exit(&set->ks_lock); 3507c478bd9Sstevel@tonic-gate 3517c478bd9Sstevel@tonic-gate return (0); 3527c478bd9Sstevel@tonic-gate } 3537c478bd9Sstevel@tonic-gate 3547c478bd9Sstevel@tonic-gate /* 3557c478bd9Sstevel@tonic-gate * Walk through each request in the set and ask the PCBE to configure a 3567c478bd9Sstevel@tonic-gate * corresponding counter. 3577c478bd9Sstevel@tonic-gate */ 358b9e93c10SJonathan Haslam int 3597c478bd9Sstevel@tonic-gate kcpc_configure_reqs(kcpc_ctx_t *ctx, kcpc_set_t *set, int *subcode) 3607c478bd9Sstevel@tonic-gate { 3617c478bd9Sstevel@tonic-gate int i; 3627c478bd9Sstevel@tonic-gate int ret; 3637c478bd9Sstevel@tonic-gate kcpc_request_t *rp; 3647c478bd9Sstevel@tonic-gate 3657c478bd9Sstevel@tonic-gate for (i = 0; i < set->ks_nreqs; i++) { 3667c478bd9Sstevel@tonic-gate int n; 3677c478bd9Sstevel@tonic-gate rp = &set->ks_req[i]; 3687c478bd9Sstevel@tonic-gate 3697c478bd9Sstevel@tonic-gate n = rp->kr_picnum; 3707c478bd9Sstevel@tonic-gate 3717c478bd9Sstevel@tonic-gate ASSERT(n >= 0 && n < cpc_ncounters); 3727c478bd9Sstevel@tonic-gate 3737c478bd9Sstevel@tonic-gate ASSERT(ctx->kc_pics[n].kp_req == NULL); 3747c478bd9Sstevel@tonic-gate 3757c478bd9Sstevel@tonic-gate if (rp->kr_flags & CPC_OVF_NOTIFY_EMT) { 3767c478bd9Sstevel@tonic-gate if ((pcbe_ops->pcbe_caps & CPC_CAP_OVERFLOW_INTERRUPT) 3777c478bd9Sstevel@tonic-gate == 0) { 3787c478bd9Sstevel@tonic-gate *subcode = -1; 3797c478bd9Sstevel@tonic-gate return (ENOTSUP); 3807c478bd9Sstevel@tonic-gate } 3817c478bd9Sstevel@tonic-gate /* 3827c478bd9Sstevel@tonic-gate * If any of the counters have requested overflow 3837c478bd9Sstevel@tonic-gate * notification, we flag the context as being one that 3847c478bd9Sstevel@tonic-gate * cares about overflow. 3857c478bd9Sstevel@tonic-gate */ 386b885580bSAlexander Kolbasov KCPC_CTX_FLAG_SET(ctx, KCPC_CTX_SIGOVF); 3877c478bd9Sstevel@tonic-gate } 3887c478bd9Sstevel@tonic-gate 3897c478bd9Sstevel@tonic-gate rp->kr_config = NULL; 3907c478bd9Sstevel@tonic-gate if ((ret = pcbe_ops->pcbe_configure(n, rp->kr_event, 3917c478bd9Sstevel@tonic-gate rp->kr_preset, rp->kr_flags, rp->kr_nattrs, rp->kr_attr, 3927c478bd9Sstevel@tonic-gate &(rp->kr_config), (void *)ctx)) != 0) { 3937c478bd9Sstevel@tonic-gate kcpc_free_configs(set); 3947c478bd9Sstevel@tonic-gate *subcode = ret; 3958d4e547dSae112802 switch (ret) { 3968d4e547dSae112802 case CPC_ATTR_REQUIRES_PRIVILEGE: 3978d4e547dSae112802 case CPC_HV_NO_ACCESS: 3987c478bd9Sstevel@tonic-gate return (EACCES); 3998d4e547dSae112802 default: 4007c478bd9Sstevel@tonic-gate return (EINVAL); 4017c478bd9Sstevel@tonic-gate } 4028d4e547dSae112802 } 4037c478bd9Sstevel@tonic-gate 4047c478bd9Sstevel@tonic-gate ctx->kc_pics[n].kp_req = rp; 4057c478bd9Sstevel@tonic-gate rp->kr_picp = &ctx->kc_pics[n]; 4067c478bd9Sstevel@tonic-gate rp->kr_data = set->ks_data + rp->kr_index; 4077c478bd9Sstevel@tonic-gate *rp->kr_data = rp->kr_preset; 4087c478bd9Sstevel@tonic-gate } 4097c478bd9Sstevel@tonic-gate 4107c478bd9Sstevel@tonic-gate return (0); 4117c478bd9Sstevel@tonic-gate } 4127c478bd9Sstevel@tonic-gate 413b9e93c10SJonathan Haslam void 4147c478bd9Sstevel@tonic-gate kcpc_free_configs(kcpc_set_t *set) 4157c478bd9Sstevel@tonic-gate { 4167c478bd9Sstevel@tonic-gate int i; 4177c478bd9Sstevel@tonic-gate 4187c478bd9Sstevel@tonic-gate for (i = 0; i < set->ks_nreqs; i++) 4197c478bd9Sstevel@tonic-gate if (set->ks_req[i].kr_config != NULL) 4207c478bd9Sstevel@tonic-gate pcbe_ops->pcbe_free(set->ks_req[i].kr_config); 4217c478bd9Sstevel@tonic-gate } 4227c478bd9Sstevel@tonic-gate 4237c478bd9Sstevel@tonic-gate /* 4247c478bd9Sstevel@tonic-gate * buf points to a user address and the data should be copied out to that 4257c478bd9Sstevel@tonic-gate * address in the current process. 4267c478bd9Sstevel@tonic-gate */ 4277c478bd9Sstevel@tonic-gate int 4287c478bd9Sstevel@tonic-gate kcpc_sample(kcpc_set_t *set, uint64_t *buf, hrtime_t *hrtime, uint64_t *tick) 4297c478bd9Sstevel@tonic-gate { 4307c478bd9Sstevel@tonic-gate kcpc_ctx_t *ctx = set->ks_ctx; 431b885580bSAlexander Kolbasov int save_spl; 4327c478bd9Sstevel@tonic-gate 4334568bee7Strevtom mutex_enter(&set->ks_lock); 4344568bee7Strevtom if ((set->ks_state & KCPC_SET_BOUND) == 0) { 4354568bee7Strevtom mutex_exit(&set->ks_lock); 4367c478bd9Sstevel@tonic-gate return (EINVAL); 4374568bee7Strevtom } 4384568bee7Strevtom mutex_exit(&set->ks_lock); 4394568bee7Strevtom 4407c478bd9Sstevel@tonic-gate /* 441b885580bSAlexander Kolbasov * Kernel preemption must be disabled while reading the hardware regs, 442b885580bSAlexander Kolbasov * and if this is a CPU-bound context, while checking the CPU binding of 443b885580bSAlexander Kolbasov * the current thread. 4447c478bd9Sstevel@tonic-gate */ 4457c478bd9Sstevel@tonic-gate kpreempt_disable(); 446b885580bSAlexander Kolbasov save_spl = spl_xcall(); 4477c478bd9Sstevel@tonic-gate 448b885580bSAlexander Kolbasov if (ctx->kc_flags & KCPC_CTX_INVALID) { 449b885580bSAlexander Kolbasov splx(save_spl); 450b885580bSAlexander Kolbasov kpreempt_enable(); 451b885580bSAlexander Kolbasov return (EAGAIN); 452b885580bSAlexander Kolbasov } 453b885580bSAlexander Kolbasov 454b885580bSAlexander Kolbasov if ((ctx->kc_flags & KCPC_CTX_FREEZE) == 0) { 4557c478bd9Sstevel@tonic-gate if (ctx->kc_cpuid != -1) { 4567c478bd9Sstevel@tonic-gate if (curthread->t_bind_cpu != ctx->kc_cpuid) { 457b885580bSAlexander Kolbasov splx(save_spl); 4587c478bd9Sstevel@tonic-gate kpreempt_enable(); 4597c478bd9Sstevel@tonic-gate return (EAGAIN); 4607c478bd9Sstevel@tonic-gate } 4617c478bd9Sstevel@tonic-gate } 4627c478bd9Sstevel@tonic-gate 4637c478bd9Sstevel@tonic-gate if (ctx->kc_thread == curthread) { 464b885580bSAlexander Kolbasov uint64_t curtick = KCPC_GET_TICK(); 465b885580bSAlexander Kolbasov 466b885580bSAlexander Kolbasov ctx->kc_hrtime = gethrtime_waitfree(); 4677c478bd9Sstevel@tonic-gate pcbe_ops->pcbe_sample(ctx); 4687c478bd9Sstevel@tonic-gate ctx->kc_vtick += curtick - ctx->kc_rawtick; 4697c478bd9Sstevel@tonic-gate ctx->kc_rawtick = curtick; 4707c478bd9Sstevel@tonic-gate } 4717c478bd9Sstevel@tonic-gate 4728d4e547dSae112802 /* 4738d4e547dSae112802 * The config may have been invalidated by 4748d4e547dSae112802 * the pcbe_sample op. 4758d4e547dSae112802 */ 476b885580bSAlexander Kolbasov if (ctx->kc_flags & KCPC_CTX_INVALID) { 477b885580bSAlexander Kolbasov splx(save_spl); 478b885580bSAlexander Kolbasov kpreempt_enable(); 4798d4e547dSae112802 return (EAGAIN); 4807c478bd9Sstevel@tonic-gate } 4817c478bd9Sstevel@tonic-gate 482b885580bSAlexander Kolbasov } 483b885580bSAlexander Kolbasov 484b885580bSAlexander Kolbasov splx(save_spl); 485b885580bSAlexander Kolbasov kpreempt_enable(); 486b885580bSAlexander Kolbasov 4877c478bd9Sstevel@tonic-gate if (copyout(set->ks_data, buf, 4887c478bd9Sstevel@tonic-gate set->ks_nreqs * sizeof (uint64_t)) == -1) 4897c478bd9Sstevel@tonic-gate return (EFAULT); 4907c478bd9Sstevel@tonic-gate if (copyout(&ctx->kc_hrtime, hrtime, sizeof (uint64_t)) == -1) 4917c478bd9Sstevel@tonic-gate return (EFAULT); 4927c478bd9Sstevel@tonic-gate if (copyout(&ctx->kc_vtick, tick, sizeof (uint64_t)) == -1) 4937c478bd9Sstevel@tonic-gate return (EFAULT); 4947c478bd9Sstevel@tonic-gate 4957c478bd9Sstevel@tonic-gate return (0); 4967c478bd9Sstevel@tonic-gate } 4977c478bd9Sstevel@tonic-gate 4987c478bd9Sstevel@tonic-gate /* 4997c478bd9Sstevel@tonic-gate * Stop the counters on the CPU this context is bound to. 5007c478bd9Sstevel@tonic-gate */ 5017c478bd9Sstevel@tonic-gate static void 5027c478bd9Sstevel@tonic-gate kcpc_stop_hw(kcpc_ctx_t *ctx) 5037c478bd9Sstevel@tonic-gate { 5047c478bd9Sstevel@tonic-gate cpu_t *cp; 5057c478bd9Sstevel@tonic-gate 5067c478bd9Sstevel@tonic-gate kpreempt_disable(); 5077c478bd9Sstevel@tonic-gate 508b885580bSAlexander Kolbasov if (ctx->kc_cpuid == CPU->cpu_id) { 509b885580bSAlexander Kolbasov cp = CPU; 510b885580bSAlexander Kolbasov } else { 5117c478bd9Sstevel@tonic-gate cp = cpu_get(ctx->kc_cpuid); 512b885580bSAlexander Kolbasov } 5137c478bd9Sstevel@tonic-gate 514b885580bSAlexander Kolbasov ASSERT(cp != NULL && cp->cpu_cpc_ctx == ctx); 515b885580bSAlexander Kolbasov kcpc_cpu_stop(cp, B_FALSE); 516b885580bSAlexander Kolbasov 5177c478bd9Sstevel@tonic-gate kpreempt_enable(); 5187c478bd9Sstevel@tonic-gate } 5197c478bd9Sstevel@tonic-gate 5207c478bd9Sstevel@tonic-gate int 5217c478bd9Sstevel@tonic-gate kcpc_unbind(kcpc_set_t *set) 5227c478bd9Sstevel@tonic-gate { 5234568bee7Strevtom kcpc_ctx_t *ctx; 5247c478bd9Sstevel@tonic-gate kthread_t *t; 5257c478bd9Sstevel@tonic-gate 5264568bee7Strevtom /* 5274568bee7Strevtom * We could be racing with the process's agent thread as it 5284568bee7Strevtom * binds the set; we must wait for the set to finish binding 5294568bee7Strevtom * before attempting to tear it down. 5304568bee7Strevtom */ 5314568bee7Strevtom mutex_enter(&set->ks_lock); 5324568bee7Strevtom while ((set->ks_state & KCPC_SET_BOUND) == 0) 5334568bee7Strevtom cv_wait(&set->ks_condv, &set->ks_lock); 5344568bee7Strevtom mutex_exit(&set->ks_lock); 5357c478bd9Sstevel@tonic-gate 5364568bee7Strevtom ctx = set->ks_ctx; 5374568bee7Strevtom 5384568bee7Strevtom /* 5394568bee7Strevtom * Use kc_lock to synchronize with kcpc_restore(). 5404568bee7Strevtom */ 5414568bee7Strevtom mutex_enter(&ctx->kc_lock); 542b885580bSAlexander Kolbasov KCPC_CTX_FLAG_SET(ctx, KCPC_CTX_INVALID); 5434568bee7Strevtom mutex_exit(&ctx->kc_lock); 5447c478bd9Sstevel@tonic-gate 5457c478bd9Sstevel@tonic-gate if (ctx->kc_cpuid == -1) { 5467c478bd9Sstevel@tonic-gate t = ctx->kc_thread; 5477c478bd9Sstevel@tonic-gate /* 5487c478bd9Sstevel@tonic-gate * The context is thread-bound and therefore has a device 5497c478bd9Sstevel@tonic-gate * context. It will be freed via removectx() calling 5507c478bd9Sstevel@tonic-gate * freectx() calling kcpc_free(). 5517c478bd9Sstevel@tonic-gate */ 552b885580bSAlexander Kolbasov if (t == curthread) { 553b885580bSAlexander Kolbasov int save_spl; 554b885580bSAlexander Kolbasov 5557c478bd9Sstevel@tonic-gate kpreempt_disable(); 556b885580bSAlexander Kolbasov save_spl = spl_xcall(); 557b885580bSAlexander Kolbasov if (!(ctx->kc_flags & KCPC_CTX_INVALID_STOPPED)) 558b885580bSAlexander Kolbasov kcpc_unprogram(ctx, B_TRUE); 559b885580bSAlexander Kolbasov splx(save_spl); 5607c478bd9Sstevel@tonic-gate kpreempt_enable(); 5617c478bd9Sstevel@tonic-gate } 5627c478bd9Sstevel@tonic-gate #ifdef DEBUG 5637c478bd9Sstevel@tonic-gate if (removectx(t, ctx, kcpc_save, kcpc_restore, NULL, 5647c478bd9Sstevel@tonic-gate kcpc_lwp_create, NULL, kcpc_free) == 0) 5657c478bd9Sstevel@tonic-gate panic("kcpc_unbind: context %p not preset on thread %p", 5668793b36bSNick Todd (void *)ctx, (void *)t); 5677c478bd9Sstevel@tonic-gate #else 5687c478bd9Sstevel@tonic-gate (void) removectx(t, ctx, kcpc_save, kcpc_restore, NULL, 5697c478bd9Sstevel@tonic-gate kcpc_lwp_create, NULL, kcpc_free); 5707c478bd9Sstevel@tonic-gate #endif /* DEBUG */ 5717c478bd9Sstevel@tonic-gate t->t_cpc_set = NULL; 5727c478bd9Sstevel@tonic-gate t->t_cpc_ctx = NULL; 5737c478bd9Sstevel@tonic-gate } else { 5747c478bd9Sstevel@tonic-gate /* 5757c478bd9Sstevel@tonic-gate * If we are unbinding a CPU-bound set from a remote CPU, the 5767c478bd9Sstevel@tonic-gate * native CPU's idle thread could be in the midst of programming 5777c478bd9Sstevel@tonic-gate * this context onto the CPU. We grab the context's lock here to 5787c478bd9Sstevel@tonic-gate * ensure that the idle thread is done with it. When we release 5797c478bd9Sstevel@tonic-gate * the lock, the CPU no longer has a context and the idle thread 5807c478bd9Sstevel@tonic-gate * will move on. 5817c478bd9Sstevel@tonic-gate * 5827c478bd9Sstevel@tonic-gate * cpu_lock must be held to prevent the CPU from being DR'd out 5837c478bd9Sstevel@tonic-gate * while we disassociate the context from the cpu_t. 5847c478bd9Sstevel@tonic-gate */ 5857c478bd9Sstevel@tonic-gate cpu_t *cp; 5867c478bd9Sstevel@tonic-gate mutex_enter(&cpu_lock); 5877c478bd9Sstevel@tonic-gate cp = cpu_get(ctx->kc_cpuid); 5887c478bd9Sstevel@tonic-gate if (cp != NULL) { 5897c478bd9Sstevel@tonic-gate /* 5907c478bd9Sstevel@tonic-gate * The CPU may have been DR'd out of the system. 5917c478bd9Sstevel@tonic-gate */ 5927c478bd9Sstevel@tonic-gate mutex_enter(&cp->cpu_cpc_ctxlock); 5937c478bd9Sstevel@tonic-gate if ((ctx->kc_flags & KCPC_CTX_INVALID_STOPPED) == 0) 5947c478bd9Sstevel@tonic-gate kcpc_stop_hw(ctx); 5957c478bd9Sstevel@tonic-gate ASSERT(ctx->kc_flags & KCPC_CTX_INVALID_STOPPED); 5967c478bd9Sstevel@tonic-gate mutex_exit(&cp->cpu_cpc_ctxlock); 5977c478bd9Sstevel@tonic-gate } 5987c478bd9Sstevel@tonic-gate mutex_exit(&cpu_lock); 5997c478bd9Sstevel@tonic-gate if (ctx->kc_thread == curthread) { 6007c478bd9Sstevel@tonic-gate kcpc_free(ctx, 0); 6017c478bd9Sstevel@tonic-gate curthread->t_cpc_set = NULL; 6027c478bd9Sstevel@tonic-gate } 6037c478bd9Sstevel@tonic-gate } 6047c478bd9Sstevel@tonic-gate 6057c478bd9Sstevel@tonic-gate return (0); 6067c478bd9Sstevel@tonic-gate } 6077c478bd9Sstevel@tonic-gate 6087c478bd9Sstevel@tonic-gate int 6097c478bd9Sstevel@tonic-gate kcpc_preset(kcpc_set_t *set, int index, uint64_t preset) 6107c478bd9Sstevel@tonic-gate { 6117c478bd9Sstevel@tonic-gate int i; 6127c478bd9Sstevel@tonic-gate 6137c478bd9Sstevel@tonic-gate ASSERT(set != NULL); 6144568bee7Strevtom ASSERT(set->ks_state & KCPC_SET_BOUND); 6157c478bd9Sstevel@tonic-gate ASSERT(set->ks_ctx->kc_thread == curthread); 6167c478bd9Sstevel@tonic-gate ASSERT(set->ks_ctx->kc_cpuid == -1); 6177c478bd9Sstevel@tonic-gate 6187c478bd9Sstevel@tonic-gate if (index < 0 || index >= set->ks_nreqs) 6197c478bd9Sstevel@tonic-gate return (EINVAL); 6207c478bd9Sstevel@tonic-gate 6217c478bd9Sstevel@tonic-gate for (i = 0; i < set->ks_nreqs; i++) 6227c478bd9Sstevel@tonic-gate if (set->ks_req[i].kr_index == index) 6237c478bd9Sstevel@tonic-gate break; 6247c478bd9Sstevel@tonic-gate ASSERT(i != set->ks_nreqs); 6257c478bd9Sstevel@tonic-gate 6267c478bd9Sstevel@tonic-gate set->ks_req[i].kr_preset = preset; 6277c478bd9Sstevel@tonic-gate return (0); 6287c478bd9Sstevel@tonic-gate } 6297c478bd9Sstevel@tonic-gate 6307c478bd9Sstevel@tonic-gate int 6317c478bd9Sstevel@tonic-gate kcpc_restart(kcpc_set_t *set) 6327c478bd9Sstevel@tonic-gate { 6337c478bd9Sstevel@tonic-gate kcpc_ctx_t *ctx = set->ks_ctx; 6347c478bd9Sstevel@tonic-gate int i; 635b885580bSAlexander Kolbasov int save_spl; 6367c478bd9Sstevel@tonic-gate 6374568bee7Strevtom ASSERT(set->ks_state & KCPC_SET_BOUND); 6387c478bd9Sstevel@tonic-gate ASSERT(ctx->kc_thread == curthread); 6397c478bd9Sstevel@tonic-gate ASSERT(ctx->kc_cpuid == -1); 6407c478bd9Sstevel@tonic-gate 641b885580bSAlexander Kolbasov for (i = 0; i < set->ks_nreqs; i++) { 642b885580bSAlexander Kolbasov *(set->ks_req[i].kr_data) = set->ks_req[i].kr_preset; 643b885580bSAlexander Kolbasov pcbe_ops->pcbe_configure(0, NULL, set->ks_req[i].kr_preset, 644b885580bSAlexander Kolbasov 0, 0, NULL, &set->ks_req[i].kr_config, NULL); 645b885580bSAlexander Kolbasov } 646b885580bSAlexander Kolbasov 6477c478bd9Sstevel@tonic-gate kpreempt_disable(); 648b885580bSAlexander Kolbasov save_spl = spl_xcall(); 6497c478bd9Sstevel@tonic-gate 6507c478bd9Sstevel@tonic-gate /* 6517c478bd9Sstevel@tonic-gate * If the user is doing this on a running set, make sure the counters 6527c478bd9Sstevel@tonic-gate * are stopped first. 6537c478bd9Sstevel@tonic-gate */ 6547c478bd9Sstevel@tonic-gate if ((ctx->kc_flags & KCPC_CTX_FREEZE) == 0) 6557c478bd9Sstevel@tonic-gate pcbe_ops->pcbe_allstop(); 6567c478bd9Sstevel@tonic-gate 6577c478bd9Sstevel@tonic-gate /* 6587c478bd9Sstevel@tonic-gate * Ask the backend to program the hardware. 6597c478bd9Sstevel@tonic-gate */ 6607c478bd9Sstevel@tonic-gate ctx->kc_rawtick = KCPC_GET_TICK(); 661b885580bSAlexander Kolbasov KCPC_CTX_FLAG_CLR(ctx, KCPC_CTX_FREEZE); 6627c478bd9Sstevel@tonic-gate pcbe_ops->pcbe_program(ctx); 663b885580bSAlexander Kolbasov splx(save_spl); 6647c478bd9Sstevel@tonic-gate kpreempt_enable(); 6657c478bd9Sstevel@tonic-gate 6667c478bd9Sstevel@tonic-gate return (0); 6677c478bd9Sstevel@tonic-gate } 6687c478bd9Sstevel@tonic-gate 6697c478bd9Sstevel@tonic-gate /* 6707c478bd9Sstevel@tonic-gate * Caller must hold kcpc_cpuctx_lock. 6717c478bd9Sstevel@tonic-gate */ 6727c478bd9Sstevel@tonic-gate int 6737c478bd9Sstevel@tonic-gate kcpc_enable(kthread_t *t, int cmd, int enable) 6747c478bd9Sstevel@tonic-gate { 6757c478bd9Sstevel@tonic-gate kcpc_ctx_t *ctx = t->t_cpc_ctx; 6767c478bd9Sstevel@tonic-gate kcpc_set_t *set = t->t_cpc_set; 6777c478bd9Sstevel@tonic-gate kcpc_set_t *newset; 6787c478bd9Sstevel@tonic-gate int i; 6797c478bd9Sstevel@tonic-gate int flag; 6807c478bd9Sstevel@tonic-gate int err; 6817c478bd9Sstevel@tonic-gate 6827c478bd9Sstevel@tonic-gate ASSERT(RW_READ_HELD(&kcpc_cpuctx_lock)); 6837c478bd9Sstevel@tonic-gate 6847c478bd9Sstevel@tonic-gate if (ctx == NULL) { 6857c478bd9Sstevel@tonic-gate /* 6867c478bd9Sstevel@tonic-gate * This thread has a set but no context; it must be a 6877c478bd9Sstevel@tonic-gate * CPU-bound set. 6887c478bd9Sstevel@tonic-gate */ 6897c478bd9Sstevel@tonic-gate ASSERT(t->t_cpc_set != NULL); 6907c478bd9Sstevel@tonic-gate ASSERT(t->t_cpc_set->ks_ctx->kc_cpuid != -1); 6917c478bd9Sstevel@tonic-gate return (EINVAL); 6927c478bd9Sstevel@tonic-gate } else if (ctx->kc_flags & KCPC_CTX_INVALID) 6937c478bd9Sstevel@tonic-gate return (EAGAIN); 6947c478bd9Sstevel@tonic-gate 6957c478bd9Sstevel@tonic-gate if (cmd == CPC_ENABLE) { 6967c478bd9Sstevel@tonic-gate if ((ctx->kc_flags & KCPC_CTX_FREEZE) == 0) 6977c478bd9Sstevel@tonic-gate return (EINVAL); 6987c478bd9Sstevel@tonic-gate kpreempt_disable(); 699b885580bSAlexander Kolbasov KCPC_CTX_FLAG_CLR(ctx, KCPC_CTX_FREEZE); 7007c478bd9Sstevel@tonic-gate kcpc_restore(ctx); 7017c478bd9Sstevel@tonic-gate kpreempt_enable(); 7027c478bd9Sstevel@tonic-gate } else if (cmd == CPC_DISABLE) { 7037c478bd9Sstevel@tonic-gate if (ctx->kc_flags & KCPC_CTX_FREEZE) 7047c478bd9Sstevel@tonic-gate return (EINVAL); 7057c478bd9Sstevel@tonic-gate kpreempt_disable(); 7067c478bd9Sstevel@tonic-gate kcpc_save(ctx); 707b885580bSAlexander Kolbasov KCPC_CTX_FLAG_SET(ctx, KCPC_CTX_FREEZE); 7087c478bd9Sstevel@tonic-gate kpreempt_enable(); 7097c478bd9Sstevel@tonic-gate } else if (cmd == CPC_USR_EVENTS || cmd == CPC_SYS_EVENTS) { 7107c478bd9Sstevel@tonic-gate /* 7117c478bd9Sstevel@tonic-gate * Strategy for usr/sys: stop counters and update set's presets 7127c478bd9Sstevel@tonic-gate * with current counter values, unbind, update requests with 7137c478bd9Sstevel@tonic-gate * new config, then re-bind. 7147c478bd9Sstevel@tonic-gate */ 7157c478bd9Sstevel@tonic-gate flag = (cmd == CPC_USR_EVENTS) ? 7167c478bd9Sstevel@tonic-gate CPC_COUNT_USER: CPC_COUNT_SYSTEM; 7177c478bd9Sstevel@tonic-gate 7187c478bd9Sstevel@tonic-gate kpreempt_disable(); 719b885580bSAlexander Kolbasov KCPC_CTX_FLAG_SET(ctx, 7207c478bd9Sstevel@tonic-gate KCPC_CTX_INVALID | KCPC_CTX_INVALID_STOPPED); 7217c478bd9Sstevel@tonic-gate pcbe_ops->pcbe_allstop(); 7227c478bd9Sstevel@tonic-gate kpreempt_enable(); 723b885580bSAlexander Kolbasov 7247c478bd9Sstevel@tonic-gate for (i = 0; i < set->ks_nreqs; i++) { 7257c478bd9Sstevel@tonic-gate set->ks_req[i].kr_preset = *(set->ks_req[i].kr_data); 7267c478bd9Sstevel@tonic-gate if (enable) 7277c478bd9Sstevel@tonic-gate set->ks_req[i].kr_flags |= flag; 7287c478bd9Sstevel@tonic-gate else 7297c478bd9Sstevel@tonic-gate set->ks_req[i].kr_flags &= ~flag; 7307c478bd9Sstevel@tonic-gate } 7317c478bd9Sstevel@tonic-gate newset = kcpc_dup_set(set); 7327c478bd9Sstevel@tonic-gate if (kcpc_unbind(set) != 0) 7337c478bd9Sstevel@tonic-gate return (EINVAL); 7347c478bd9Sstevel@tonic-gate t->t_cpc_set = newset; 7357c478bd9Sstevel@tonic-gate if (kcpc_bind_thread(newset, t, &err) != 0) { 7367c478bd9Sstevel@tonic-gate t->t_cpc_set = NULL; 7377c478bd9Sstevel@tonic-gate kcpc_free_set(newset); 7387c478bd9Sstevel@tonic-gate return (EINVAL); 7397c478bd9Sstevel@tonic-gate } 7407c478bd9Sstevel@tonic-gate } else 7417c478bd9Sstevel@tonic-gate return (EINVAL); 7427c478bd9Sstevel@tonic-gate 7437c478bd9Sstevel@tonic-gate return (0); 7447c478bd9Sstevel@tonic-gate } 7457c478bd9Sstevel@tonic-gate 7467c478bd9Sstevel@tonic-gate /* 7477c478bd9Sstevel@tonic-gate * Provide PCBEs with a way of obtaining the configs of every counter which will 7487c478bd9Sstevel@tonic-gate * be programmed together. 7497c478bd9Sstevel@tonic-gate * 7507c478bd9Sstevel@tonic-gate * If current is NULL, provide the first config. 7517c478bd9Sstevel@tonic-gate * 7527c478bd9Sstevel@tonic-gate * If data != NULL, caller wants to know where the data store associated with 7537c478bd9Sstevel@tonic-gate * the config we return is located. 7547c478bd9Sstevel@tonic-gate */ 7557c478bd9Sstevel@tonic-gate void * 7567c478bd9Sstevel@tonic-gate kcpc_next_config(void *token, void *current, uint64_t **data) 7577c478bd9Sstevel@tonic-gate { 7587c478bd9Sstevel@tonic-gate int i; 7597c478bd9Sstevel@tonic-gate kcpc_pic_t *pic; 7607c478bd9Sstevel@tonic-gate kcpc_ctx_t *ctx = (kcpc_ctx_t *)token; 7617c478bd9Sstevel@tonic-gate 7627c478bd9Sstevel@tonic-gate if (current == NULL) { 7637c478bd9Sstevel@tonic-gate /* 7647c478bd9Sstevel@tonic-gate * Client would like the first config, which may not be in 7657c478bd9Sstevel@tonic-gate * counter 0; we need to search through the counters for the 7667c478bd9Sstevel@tonic-gate * first config. 7677c478bd9Sstevel@tonic-gate */ 7687c478bd9Sstevel@tonic-gate for (i = 0; i < cpc_ncounters; i++) 7697c478bd9Sstevel@tonic-gate if (ctx->kc_pics[i].kp_req != NULL) 7707c478bd9Sstevel@tonic-gate break; 7717c478bd9Sstevel@tonic-gate /* 7727c478bd9Sstevel@tonic-gate * There are no counters configured for the given context. 7737c478bd9Sstevel@tonic-gate */ 7747c478bd9Sstevel@tonic-gate if (i == cpc_ncounters) 7757c478bd9Sstevel@tonic-gate return (NULL); 7767c478bd9Sstevel@tonic-gate } else { 7777c478bd9Sstevel@tonic-gate /* 7787c478bd9Sstevel@tonic-gate * There surely is a faster way to do this. 7797c478bd9Sstevel@tonic-gate */ 7807c478bd9Sstevel@tonic-gate for (i = 0; i < cpc_ncounters; i++) { 7817c478bd9Sstevel@tonic-gate pic = &ctx->kc_pics[i]; 7827c478bd9Sstevel@tonic-gate 7837c478bd9Sstevel@tonic-gate if (pic->kp_req != NULL && 7847c478bd9Sstevel@tonic-gate current == pic->kp_req->kr_config) 7857c478bd9Sstevel@tonic-gate break; 7867c478bd9Sstevel@tonic-gate } 7877c478bd9Sstevel@tonic-gate 7887c478bd9Sstevel@tonic-gate /* 7897c478bd9Sstevel@tonic-gate * We found the current config at picnum i. Now search for the 7907c478bd9Sstevel@tonic-gate * next configured PIC. 7917c478bd9Sstevel@tonic-gate */ 7927c478bd9Sstevel@tonic-gate for (i++; i < cpc_ncounters; i++) { 7937c478bd9Sstevel@tonic-gate pic = &ctx->kc_pics[i]; 7947c478bd9Sstevel@tonic-gate if (pic->kp_req != NULL) 7957c478bd9Sstevel@tonic-gate break; 7967c478bd9Sstevel@tonic-gate } 7977c478bd9Sstevel@tonic-gate 7987c478bd9Sstevel@tonic-gate if (i == cpc_ncounters) 7997c478bd9Sstevel@tonic-gate return (NULL); 8007c478bd9Sstevel@tonic-gate } 8017c478bd9Sstevel@tonic-gate 8027c478bd9Sstevel@tonic-gate if (data != NULL) { 8037c478bd9Sstevel@tonic-gate *data = ctx->kc_pics[i].kp_req->kr_data; 8047c478bd9Sstevel@tonic-gate } 8057c478bd9Sstevel@tonic-gate 8067c478bd9Sstevel@tonic-gate return (ctx->kc_pics[i].kp_req->kr_config); 8077c478bd9Sstevel@tonic-gate } 8087c478bd9Sstevel@tonic-gate 8097c478bd9Sstevel@tonic-gate 810b9e93c10SJonathan Haslam kcpc_ctx_t * 811b885580bSAlexander Kolbasov kcpc_ctx_alloc(int kmem_flags) 8127c478bd9Sstevel@tonic-gate { 8137c478bd9Sstevel@tonic-gate kcpc_ctx_t *ctx; 8147c478bd9Sstevel@tonic-gate long hash; 8157c478bd9Sstevel@tonic-gate 816b885580bSAlexander Kolbasov ctx = (kcpc_ctx_t *)kmem_zalloc(sizeof (kcpc_ctx_t), kmem_flags); 817b885580bSAlexander Kolbasov if (ctx == NULL) 818b885580bSAlexander Kolbasov return (NULL); 8197c478bd9Sstevel@tonic-gate 8207c478bd9Sstevel@tonic-gate hash = CPC_HASH_CTX(ctx); 8217c478bd9Sstevel@tonic-gate mutex_enter(&kcpc_ctx_llock[hash]); 8227c478bd9Sstevel@tonic-gate ctx->kc_next = kcpc_ctx_list[hash]; 8237c478bd9Sstevel@tonic-gate kcpc_ctx_list[hash] = ctx; 8247c478bd9Sstevel@tonic-gate mutex_exit(&kcpc_ctx_llock[hash]); 8257c478bd9Sstevel@tonic-gate 8267c478bd9Sstevel@tonic-gate ctx->kc_pics = (kcpc_pic_t *)kmem_zalloc(sizeof (kcpc_pic_t) * 8277c478bd9Sstevel@tonic-gate cpc_ncounters, KM_SLEEP); 8287c478bd9Sstevel@tonic-gate 8297c478bd9Sstevel@tonic-gate ctx->kc_cpuid = -1; 8307c478bd9Sstevel@tonic-gate 8317c478bd9Sstevel@tonic-gate return (ctx); 8327c478bd9Sstevel@tonic-gate } 8337c478bd9Sstevel@tonic-gate 8347c478bd9Sstevel@tonic-gate /* 8357c478bd9Sstevel@tonic-gate * Copy set from ctx to the child context, cctx, if it has CPC_BIND_LWP_INHERIT 8367c478bd9Sstevel@tonic-gate * in the flags. 8377c478bd9Sstevel@tonic-gate */ 8387c478bd9Sstevel@tonic-gate static void 8397c478bd9Sstevel@tonic-gate kcpc_ctx_clone(kcpc_ctx_t *ctx, kcpc_ctx_t *cctx) 8407c478bd9Sstevel@tonic-gate { 8417c478bd9Sstevel@tonic-gate kcpc_set_t *ks = ctx->kc_set, *cks; 8427c478bd9Sstevel@tonic-gate int i, j; 8437c478bd9Sstevel@tonic-gate int code; 8447c478bd9Sstevel@tonic-gate 8457c478bd9Sstevel@tonic-gate ASSERT(ks != NULL); 8467c478bd9Sstevel@tonic-gate 8477c478bd9Sstevel@tonic-gate if ((ks->ks_flags & CPC_BIND_LWP_INHERIT) == 0) 8487c478bd9Sstevel@tonic-gate return; 8497c478bd9Sstevel@tonic-gate 8504568bee7Strevtom cks = kmem_zalloc(sizeof (*cks), KM_SLEEP); 8514568bee7Strevtom cks->ks_state &= ~KCPC_SET_BOUND; 8527c478bd9Sstevel@tonic-gate cctx->kc_set = cks; 8537c478bd9Sstevel@tonic-gate cks->ks_flags = ks->ks_flags; 8547c478bd9Sstevel@tonic-gate cks->ks_nreqs = ks->ks_nreqs; 8557c478bd9Sstevel@tonic-gate cks->ks_req = kmem_alloc(cks->ks_nreqs * 8567c478bd9Sstevel@tonic-gate sizeof (kcpc_request_t), KM_SLEEP); 8577c478bd9Sstevel@tonic-gate cks->ks_data = kmem_alloc(cks->ks_nreqs * sizeof (uint64_t), 8587c478bd9Sstevel@tonic-gate KM_SLEEP); 8597c478bd9Sstevel@tonic-gate cks->ks_ctx = cctx; 8607c478bd9Sstevel@tonic-gate 8617c478bd9Sstevel@tonic-gate for (i = 0; i < cks->ks_nreqs; i++) { 8627c478bd9Sstevel@tonic-gate cks->ks_req[i].kr_index = ks->ks_req[i].kr_index; 8637c478bd9Sstevel@tonic-gate cks->ks_req[i].kr_picnum = ks->ks_req[i].kr_picnum; 8647c478bd9Sstevel@tonic-gate (void) strncpy(cks->ks_req[i].kr_event, 8657c478bd9Sstevel@tonic-gate ks->ks_req[i].kr_event, CPC_MAX_EVENT_LEN); 8667c478bd9Sstevel@tonic-gate cks->ks_req[i].kr_preset = ks->ks_req[i].kr_preset; 8677c478bd9Sstevel@tonic-gate cks->ks_req[i].kr_flags = ks->ks_req[i].kr_flags; 8687c478bd9Sstevel@tonic-gate cks->ks_req[i].kr_nattrs = ks->ks_req[i].kr_nattrs; 8697c478bd9Sstevel@tonic-gate if (ks->ks_req[i].kr_nattrs > 0) { 8707c478bd9Sstevel@tonic-gate cks->ks_req[i].kr_attr = 8717c478bd9Sstevel@tonic-gate kmem_alloc(ks->ks_req[i].kr_nattrs * 8727c478bd9Sstevel@tonic-gate sizeof (kcpc_attr_t), KM_SLEEP); 8737c478bd9Sstevel@tonic-gate } 8747c478bd9Sstevel@tonic-gate for (j = 0; j < ks->ks_req[i].kr_nattrs; j++) { 8757c478bd9Sstevel@tonic-gate (void) strncpy(cks->ks_req[i].kr_attr[j].ka_name, 8767c478bd9Sstevel@tonic-gate ks->ks_req[i].kr_attr[j].ka_name, 8777c478bd9Sstevel@tonic-gate CPC_MAX_ATTR_LEN); 8787c478bd9Sstevel@tonic-gate cks->ks_req[i].kr_attr[j].ka_val = 8797c478bd9Sstevel@tonic-gate ks->ks_req[i].kr_attr[j].ka_val; 8807c478bd9Sstevel@tonic-gate } 8817c478bd9Sstevel@tonic-gate } 8827c478bd9Sstevel@tonic-gate if (kcpc_configure_reqs(cctx, cks, &code) != 0) 8838d4e547dSae112802 kcpc_invalidate_config(cctx); 8844568bee7Strevtom 8854568bee7Strevtom mutex_enter(&cks->ks_lock); 8864568bee7Strevtom cks->ks_state |= KCPC_SET_BOUND; 8874568bee7Strevtom cv_signal(&cks->ks_condv); 8884568bee7Strevtom mutex_exit(&cks->ks_lock); 8897c478bd9Sstevel@tonic-gate } 8907c478bd9Sstevel@tonic-gate 8917c478bd9Sstevel@tonic-gate 892b9e93c10SJonathan Haslam void 8937c478bd9Sstevel@tonic-gate kcpc_ctx_free(kcpc_ctx_t *ctx) 8947c478bd9Sstevel@tonic-gate { 8957c478bd9Sstevel@tonic-gate kcpc_ctx_t **loc; 8967c478bd9Sstevel@tonic-gate long hash = CPC_HASH_CTX(ctx); 8977c478bd9Sstevel@tonic-gate 8987c478bd9Sstevel@tonic-gate mutex_enter(&kcpc_ctx_llock[hash]); 8997c478bd9Sstevel@tonic-gate loc = &kcpc_ctx_list[hash]; 9007c478bd9Sstevel@tonic-gate ASSERT(*loc != NULL); 9017c478bd9Sstevel@tonic-gate while (*loc != ctx) 9027c478bd9Sstevel@tonic-gate loc = &(*loc)->kc_next; 9037c478bd9Sstevel@tonic-gate *loc = ctx->kc_next; 9047c478bd9Sstevel@tonic-gate mutex_exit(&kcpc_ctx_llock[hash]); 9057c478bd9Sstevel@tonic-gate 9067c478bd9Sstevel@tonic-gate kmem_free(ctx->kc_pics, cpc_ncounters * sizeof (kcpc_pic_t)); 9074568bee7Strevtom cv_destroy(&ctx->kc_condv); 9084568bee7Strevtom mutex_destroy(&ctx->kc_lock); 9097c478bd9Sstevel@tonic-gate kmem_free(ctx, sizeof (*ctx)); 9107c478bd9Sstevel@tonic-gate } 9117c478bd9Sstevel@tonic-gate 9127c478bd9Sstevel@tonic-gate /* 9137c478bd9Sstevel@tonic-gate * Generic interrupt handler used on hardware that generates 9147c478bd9Sstevel@tonic-gate * overflow interrupts. 9157c478bd9Sstevel@tonic-gate * 9167c478bd9Sstevel@tonic-gate * Note: executed at high-level interrupt context! 9177c478bd9Sstevel@tonic-gate */ 9187c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 9197c478bd9Sstevel@tonic-gate kcpc_ctx_t * 9207c478bd9Sstevel@tonic-gate kcpc_overflow_intr(caddr_t arg, uint64_t bitmap) 9217c478bd9Sstevel@tonic-gate { 9227c478bd9Sstevel@tonic-gate kcpc_ctx_t *ctx; 9237c478bd9Sstevel@tonic-gate kthread_t *t = curthread; 9247c478bd9Sstevel@tonic-gate int i; 9257c478bd9Sstevel@tonic-gate 9267c478bd9Sstevel@tonic-gate /* 9277c478bd9Sstevel@tonic-gate * On both x86 and UltraSPARC, we may deliver the high-level 9287c478bd9Sstevel@tonic-gate * interrupt in kernel mode, just after we've started to run an 9297c478bd9Sstevel@tonic-gate * interrupt thread. (That's because the hardware helpfully 9307c478bd9Sstevel@tonic-gate * delivers the overflow interrupt some random number of cycles 9317c478bd9Sstevel@tonic-gate * after the instruction that caused the overflow by which time 9327c478bd9Sstevel@tonic-gate * we're in some part of the kernel, not necessarily running on 9337c478bd9Sstevel@tonic-gate * the right thread). 9347c478bd9Sstevel@tonic-gate * 9357c478bd9Sstevel@tonic-gate * Check for this case here -- find the pinned thread 9367c478bd9Sstevel@tonic-gate * that was running when the interrupt went off. 9377c478bd9Sstevel@tonic-gate */ 9387c478bd9Sstevel@tonic-gate if (t->t_flag & T_INTR_THREAD) { 9397c478bd9Sstevel@tonic-gate klwp_t *lwp; 9407c478bd9Sstevel@tonic-gate 941*1a5e258fSJosef 'Jeff' Sipek atomic_inc_32(&kcpc_intrctx_count); 9427c478bd9Sstevel@tonic-gate 9437c478bd9Sstevel@tonic-gate /* 9447c478bd9Sstevel@tonic-gate * Note that t_lwp is always set to point at the underlying 9457c478bd9Sstevel@tonic-gate * thread, thus this will work in the presence of nested 9467c478bd9Sstevel@tonic-gate * interrupts. 9477c478bd9Sstevel@tonic-gate */ 9487c478bd9Sstevel@tonic-gate ctx = NULL; 9497c478bd9Sstevel@tonic-gate if ((lwp = t->t_lwp) != NULL) { 9507c478bd9Sstevel@tonic-gate t = lwptot(lwp); 9517c478bd9Sstevel@tonic-gate ctx = t->t_cpc_ctx; 9527c478bd9Sstevel@tonic-gate } 9537c478bd9Sstevel@tonic-gate } else 9547c478bd9Sstevel@tonic-gate ctx = t->t_cpc_ctx; 9557c478bd9Sstevel@tonic-gate 9567c478bd9Sstevel@tonic-gate if (ctx == NULL) { 9577c478bd9Sstevel@tonic-gate /* 9587c478bd9Sstevel@tonic-gate * This can easily happen if we're using the counters in 9597c478bd9Sstevel@tonic-gate * "shared" mode, for example, and an overflow interrupt 9607c478bd9Sstevel@tonic-gate * occurs while we are running cpustat. In that case, the 9617c478bd9Sstevel@tonic-gate * bound thread that has the context that belongs to this 9627c478bd9Sstevel@tonic-gate * CPU is almost certainly sleeping (if it was running on 9637c478bd9Sstevel@tonic-gate * the CPU we'd have found it above), and the actual 9647c478bd9Sstevel@tonic-gate * interrupted thread has no knowledge of performance counters! 9657c478bd9Sstevel@tonic-gate */ 9667c478bd9Sstevel@tonic-gate ctx = curthread->t_cpu->cpu_cpc_ctx; 9677c478bd9Sstevel@tonic-gate if (ctx != NULL) { 9687c478bd9Sstevel@tonic-gate /* 9697c478bd9Sstevel@tonic-gate * Return the bound context for this CPU to 9707c478bd9Sstevel@tonic-gate * the interrupt handler so that it can synchronously 9717c478bd9Sstevel@tonic-gate * sample the hardware counters and restart them. 9727c478bd9Sstevel@tonic-gate */ 9737c478bd9Sstevel@tonic-gate return (ctx); 9747c478bd9Sstevel@tonic-gate } 9757c478bd9Sstevel@tonic-gate 9767c478bd9Sstevel@tonic-gate /* 9777c478bd9Sstevel@tonic-gate * As long as the overflow interrupt really is delivered early 9787c478bd9Sstevel@tonic-gate * enough after trapping into the kernel to avoid switching 9797c478bd9Sstevel@tonic-gate * threads, we must always be able to find the cpc context, 9807c478bd9Sstevel@tonic-gate * or something went terribly wrong i.e. we ended up 9817c478bd9Sstevel@tonic-gate * running a passivated interrupt thread, a kernel 9827c478bd9Sstevel@tonic-gate * thread or we interrupted idle, all of which are Very Bad. 983b9e93c10SJonathan Haslam * 984b9e93c10SJonathan Haslam * We also could end up here owing to an incredibly unlikely 985b9e93c10SJonathan Haslam * race condition that exists on x86 based architectures when 986b9e93c10SJonathan Haslam * the cpc provider is in use; overflow interrupts are directed 987b9e93c10SJonathan Haslam * to the cpc provider if the 'dtrace_cpc_in_use' variable is 988b9e93c10SJonathan Haslam * set when we enter the handler. This variable is unset after 989b9e93c10SJonathan Haslam * overflow interrupts have been disabled on all CPUs and all 990b9e93c10SJonathan Haslam * contexts have been torn down. To stop interrupts, the cpc 991b9e93c10SJonathan Haslam * provider issues a xcall to the remote CPU before it tears 992b9e93c10SJonathan Haslam * down that CPUs context. As high priority xcalls, on an x86 993b9e93c10SJonathan Haslam * architecture, execute at a higher PIL than this handler, it 994b9e93c10SJonathan Haslam * is possible (though extremely unlikely) that the xcall could 995b9e93c10SJonathan Haslam * interrupt the overflow handler before the handler has 996b9e93c10SJonathan Haslam * checked the 'dtrace_cpc_in_use' variable, stop the counters, 997b9e93c10SJonathan Haslam * return to the cpc provider which could then rip down 998b9e93c10SJonathan Haslam * contexts and unset 'dtrace_cpc_in_use' *before* the CPUs 999b9e93c10SJonathan Haslam * overflow handler has had a chance to check the variable. In 1000b9e93c10SJonathan Haslam * that case, the handler would direct the overflow into this 1001b9e93c10SJonathan Haslam * code and no valid context will be found. The default behavior 1002b9e93c10SJonathan Haslam * when no valid context is found is now to shout a warning to 1003b9e93c10SJonathan Haslam * the console and bump the 'kcpc_nullctx_count' variable. 10047c478bd9Sstevel@tonic-gate */ 10057c478bd9Sstevel@tonic-gate if (kcpc_nullctx_panic) 10067c478bd9Sstevel@tonic-gate panic("null cpc context, thread %p", (void *)t); 1007b885580bSAlexander Kolbasov #ifdef DEBUG 1008b885580bSAlexander Kolbasov cmn_err(CE_NOTE, 1009b9e93c10SJonathan Haslam "null cpc context found in overflow handler!\n"); 1010b885580bSAlexander Kolbasov #endif 1011*1a5e258fSJosef 'Jeff' Sipek atomic_inc_32(&kcpc_nullctx_count); 10127c478bd9Sstevel@tonic-gate } else if ((ctx->kc_flags & KCPC_CTX_INVALID) == 0) { 10137c478bd9Sstevel@tonic-gate /* 10147c478bd9Sstevel@tonic-gate * Schedule an ast to sample the counters, which will 10157c478bd9Sstevel@tonic-gate * propagate any overflow into the virtualized performance 10167c478bd9Sstevel@tonic-gate * counter(s), and may deliver a signal. 10177c478bd9Sstevel@tonic-gate */ 10187c478bd9Sstevel@tonic-gate ttolwp(t)->lwp_pcb.pcb_flags |= CPC_OVERFLOW; 10197c478bd9Sstevel@tonic-gate /* 10207c478bd9Sstevel@tonic-gate * If a counter has overflowed which was counting on behalf of 10217c478bd9Sstevel@tonic-gate * a request which specified CPC_OVF_NOTIFY_EMT, send the 10227c478bd9Sstevel@tonic-gate * process a signal. 10237c478bd9Sstevel@tonic-gate */ 10247c478bd9Sstevel@tonic-gate for (i = 0; i < cpc_ncounters; i++) { 10257c478bd9Sstevel@tonic-gate if (ctx->kc_pics[i].kp_req != NULL && 10267c478bd9Sstevel@tonic-gate bitmap & (1 << i) && 10277c478bd9Sstevel@tonic-gate ctx->kc_pics[i].kp_req->kr_flags & 10287c478bd9Sstevel@tonic-gate CPC_OVF_NOTIFY_EMT) { 10297c478bd9Sstevel@tonic-gate /* 10307c478bd9Sstevel@tonic-gate * A signal has been requested for this PIC, so 10317c478bd9Sstevel@tonic-gate * so freeze the context. The interrupt handler 10327c478bd9Sstevel@tonic-gate * has already stopped the counter hardware. 10337c478bd9Sstevel@tonic-gate */ 1034b885580bSAlexander Kolbasov KCPC_CTX_FLAG_SET(ctx, KCPC_CTX_FREEZE); 10357c478bd9Sstevel@tonic-gate atomic_or_uint(&ctx->kc_pics[i].kp_flags, 10367c478bd9Sstevel@tonic-gate KCPC_PIC_OVERFLOWED); 10377c478bd9Sstevel@tonic-gate } 10387c478bd9Sstevel@tonic-gate } 10397c478bd9Sstevel@tonic-gate aston(t); 1040b885580bSAlexander Kolbasov } else if (ctx->kc_flags & KCPC_CTX_INVALID_STOPPED) { 1041b885580bSAlexander Kolbasov /* 1042b885580bSAlexander Kolbasov * Thread context is no longer valid, but here may be a valid 1043b885580bSAlexander Kolbasov * CPU context. 1044b885580bSAlexander Kolbasov */ 1045b885580bSAlexander Kolbasov return (curthread->t_cpu->cpu_cpc_ctx); 10467c478bd9Sstevel@tonic-gate } 1047b885580bSAlexander Kolbasov 10487c478bd9Sstevel@tonic-gate return (NULL); 10497c478bd9Sstevel@tonic-gate } 10507c478bd9Sstevel@tonic-gate 10517c478bd9Sstevel@tonic-gate /* 10527c478bd9Sstevel@tonic-gate * The current thread context had an overflow interrupt; we're 10537c478bd9Sstevel@tonic-gate * executing here in high-level interrupt context. 10547c478bd9Sstevel@tonic-gate */ 10557c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 10567c478bd9Sstevel@tonic-gate uint_t 10577c478bd9Sstevel@tonic-gate kcpc_hw_overflow_intr(caddr_t arg1, caddr_t arg2) 10587c478bd9Sstevel@tonic-gate { 10597c478bd9Sstevel@tonic-gate kcpc_ctx_t *ctx; 10607c478bd9Sstevel@tonic-gate uint64_t bitmap; 1061b9e93c10SJonathan Haslam uint8_t *state; 1062b885580bSAlexander Kolbasov int save_spl; 10637c478bd9Sstevel@tonic-gate 10647c478bd9Sstevel@tonic-gate if (pcbe_ops == NULL || 10657c478bd9Sstevel@tonic-gate (bitmap = pcbe_ops->pcbe_overflow_bitmap()) == 0) 10667c478bd9Sstevel@tonic-gate return (DDI_INTR_UNCLAIMED); 1067bb4f5042Sha137994 10687c478bd9Sstevel@tonic-gate /* 10697c478bd9Sstevel@tonic-gate * Prevent any further interrupts. 10707c478bd9Sstevel@tonic-gate */ 10717c478bd9Sstevel@tonic-gate pcbe_ops->pcbe_allstop(); 10727c478bd9Sstevel@tonic-gate 1073b9e93c10SJonathan Haslam if (dtrace_cpc_in_use) { 1074b9e93c10SJonathan Haslam state = &cpu_core[CPU->cpu_id].cpuc_dcpc_intr_state; 1075b9e93c10SJonathan Haslam 10767c478bd9Sstevel@tonic-gate /* 1077b9e93c10SJonathan Haslam * Set the per-CPU state bit to indicate that we are currently 1078b9e93c10SJonathan Haslam * processing an interrupt if it is currently free. Drop the 1079b9e93c10SJonathan Haslam * interrupt if the state isn't free (i.e. a configuration 1080b9e93c10SJonathan Haslam * event is taking place). 1081b9e93c10SJonathan Haslam */ 1082b9e93c10SJonathan Haslam if (atomic_cas_8(state, DCPC_INTR_FREE, 1083b9e93c10SJonathan Haslam DCPC_INTR_PROCESSING) == DCPC_INTR_FREE) { 1084b9e93c10SJonathan Haslam int i; 1085b9e93c10SJonathan Haslam kcpc_request_t req; 1086b9e93c10SJonathan Haslam 1087b9e93c10SJonathan Haslam ASSERT(dtrace_cpc_fire != NULL); 1088b9e93c10SJonathan Haslam 1089b9e93c10SJonathan Haslam (*dtrace_cpc_fire)(bitmap); 1090b9e93c10SJonathan Haslam 1091b9e93c10SJonathan Haslam ctx = curthread->t_cpu->cpu_cpc_ctx; 1092b885580bSAlexander Kolbasov if (ctx == NULL) { 1093b885580bSAlexander Kolbasov #ifdef DEBUG 1094b885580bSAlexander Kolbasov cmn_err(CE_NOTE, "null cpc context in" 1095b885580bSAlexander Kolbasov "hardware overflow handler!\n"); 1096b885580bSAlexander Kolbasov #endif 1097b885580bSAlexander Kolbasov return (DDI_INTR_CLAIMED); 1098b885580bSAlexander Kolbasov } 1099b9e93c10SJonathan Haslam 1100b9e93c10SJonathan Haslam /* Reset any counters that have overflowed */ 1101b9e93c10SJonathan Haslam for (i = 0; i < ctx->kc_set->ks_nreqs; i++) { 1102b9e93c10SJonathan Haslam req = ctx->kc_set->ks_req[i]; 1103b9e93c10SJonathan Haslam 1104b9e93c10SJonathan Haslam if (bitmap & (1 << req.kr_picnum)) { 1105b9e93c10SJonathan Haslam pcbe_ops->pcbe_configure(req.kr_picnum, 1106b9e93c10SJonathan Haslam req.kr_event, req.kr_preset, 1107b9e93c10SJonathan Haslam req.kr_flags, req.kr_nattrs, 1108b9e93c10SJonathan Haslam req.kr_attr, &(req.kr_config), 1109b9e93c10SJonathan Haslam (void *)ctx); 1110b9e93c10SJonathan Haslam } 1111b9e93c10SJonathan Haslam } 1112b9e93c10SJonathan Haslam pcbe_ops->pcbe_program(ctx); 1113b9e93c10SJonathan Haslam 1114b9e93c10SJonathan Haslam /* 1115b9e93c10SJonathan Haslam * We've finished processing the interrupt so set 1116b9e93c10SJonathan Haslam * the state back to free. 1117b9e93c10SJonathan Haslam */ 1118b9e93c10SJonathan Haslam cpu_core[CPU->cpu_id].cpuc_dcpc_intr_state = 1119b9e93c10SJonathan Haslam DCPC_INTR_FREE; 1120b9e93c10SJonathan Haslam membar_producer(); 1121b9e93c10SJonathan Haslam } 1122b9e93c10SJonathan Haslam return (DDI_INTR_CLAIMED); 1123b9e93c10SJonathan Haslam } 1124b9e93c10SJonathan Haslam 1125b9e93c10SJonathan Haslam /* 1126b9e93c10SJonathan Haslam * DTrace isn't involved so pass on accordingly. 11277c478bd9Sstevel@tonic-gate * 11287c478bd9Sstevel@tonic-gate * If the interrupt has occurred in the context of an lwp owning 11297c478bd9Sstevel@tonic-gate * the counters, then the handler posts an AST to the lwp to 11307c478bd9Sstevel@tonic-gate * trigger the actual sampling, and optionally deliver a signal or 11317c478bd9Sstevel@tonic-gate * restart the counters, on the way out of the kernel using 11327c478bd9Sstevel@tonic-gate * kcpc_hw_overflow_ast() (see below). 11337c478bd9Sstevel@tonic-gate * 11347c478bd9Sstevel@tonic-gate * On the other hand, if the handler returns the context to us 11357c478bd9Sstevel@tonic-gate * directly, then it means that there are no other threads in 11367c478bd9Sstevel@tonic-gate * the middle of updating it, no AST has been posted, and so we 11377c478bd9Sstevel@tonic-gate * should sample the counters here, and restart them with no 11387c478bd9Sstevel@tonic-gate * further fuss. 1139b885580bSAlexander Kolbasov * 1140b885580bSAlexander Kolbasov * The CPU's CPC context may disappear as a result of cross-call which 1141b885580bSAlexander Kolbasov * has higher PIL on x86, so protect the context by raising PIL to the 1142b885580bSAlexander Kolbasov * cross-call level. 11437c478bd9Sstevel@tonic-gate */ 1144b885580bSAlexander Kolbasov save_spl = spl_xcall(); 11457c478bd9Sstevel@tonic-gate if ((ctx = kcpc_overflow_intr(arg1, bitmap)) != NULL) { 11467c478bd9Sstevel@tonic-gate uint64_t curtick = KCPC_GET_TICK(); 11477c478bd9Sstevel@tonic-gate 11487c478bd9Sstevel@tonic-gate ctx->kc_hrtime = gethrtime_waitfree(); 11497c478bd9Sstevel@tonic-gate ctx->kc_vtick += curtick - ctx->kc_rawtick; 11507c478bd9Sstevel@tonic-gate ctx->kc_rawtick = curtick; 11517c478bd9Sstevel@tonic-gate pcbe_ops->pcbe_sample(ctx); 11527c478bd9Sstevel@tonic-gate pcbe_ops->pcbe_program(ctx); 11537c478bd9Sstevel@tonic-gate } 1154b885580bSAlexander Kolbasov splx(save_spl); 11557c478bd9Sstevel@tonic-gate 11567c478bd9Sstevel@tonic-gate return (DDI_INTR_CLAIMED); 11577c478bd9Sstevel@tonic-gate } 11587c478bd9Sstevel@tonic-gate 11597c478bd9Sstevel@tonic-gate /* 11607c478bd9Sstevel@tonic-gate * Called from trap() when processing the ast posted by the high-level 11617c478bd9Sstevel@tonic-gate * interrupt handler. 11627c478bd9Sstevel@tonic-gate */ 11637c478bd9Sstevel@tonic-gate int 11647c478bd9Sstevel@tonic-gate kcpc_overflow_ast() 11657c478bd9Sstevel@tonic-gate { 11667c478bd9Sstevel@tonic-gate kcpc_ctx_t *ctx = curthread->t_cpc_ctx; 11677c478bd9Sstevel@tonic-gate int i; 11687c478bd9Sstevel@tonic-gate int found = 0; 11697c478bd9Sstevel@tonic-gate uint64_t curtick = KCPC_GET_TICK(); 11707c478bd9Sstevel@tonic-gate 11717c478bd9Sstevel@tonic-gate ASSERT(ctx != NULL); /* Beware of interrupt skid. */ 11727c478bd9Sstevel@tonic-gate 11737c478bd9Sstevel@tonic-gate /* 11747c478bd9Sstevel@tonic-gate * An overflow happened: sample the context to ensure that 11757c478bd9Sstevel@tonic-gate * the overflow is propagated into the upper bits of the 11767c478bd9Sstevel@tonic-gate * virtualized 64-bit counter(s). 11777c478bd9Sstevel@tonic-gate */ 11787c478bd9Sstevel@tonic-gate kpreempt_disable(); 11797c478bd9Sstevel@tonic-gate ctx->kc_hrtime = gethrtime_waitfree(); 11807c478bd9Sstevel@tonic-gate pcbe_ops->pcbe_sample(ctx); 11817c478bd9Sstevel@tonic-gate kpreempt_enable(); 11827c478bd9Sstevel@tonic-gate 11837c478bd9Sstevel@tonic-gate ctx->kc_vtick += curtick - ctx->kc_rawtick; 11847c478bd9Sstevel@tonic-gate 11857c478bd9Sstevel@tonic-gate /* 11867c478bd9Sstevel@tonic-gate * The interrupt handler has marked any pics with KCPC_PIC_OVERFLOWED 11877c478bd9Sstevel@tonic-gate * if that pic generated an overflow and if the request it was counting 11887c478bd9Sstevel@tonic-gate * on behalf of had CPC_OVERFLOW_REQUEST specified. We go through all 11897c478bd9Sstevel@tonic-gate * pics in the context and clear the KCPC_PIC_OVERFLOWED flags. If we 11907c478bd9Sstevel@tonic-gate * found any overflowed pics, keep the context frozen and return true 11917c478bd9Sstevel@tonic-gate * (thus causing a signal to be sent). 11927c478bd9Sstevel@tonic-gate */ 11937c478bd9Sstevel@tonic-gate for (i = 0; i < cpc_ncounters; i++) { 11947c478bd9Sstevel@tonic-gate if (ctx->kc_pics[i].kp_flags & KCPC_PIC_OVERFLOWED) { 11957c478bd9Sstevel@tonic-gate atomic_and_uint(&ctx->kc_pics[i].kp_flags, 11967c478bd9Sstevel@tonic-gate ~KCPC_PIC_OVERFLOWED); 11977c478bd9Sstevel@tonic-gate found = 1; 11987c478bd9Sstevel@tonic-gate } 11997c478bd9Sstevel@tonic-gate } 12007c478bd9Sstevel@tonic-gate if (found) 12017c478bd9Sstevel@tonic-gate return (1); 12027c478bd9Sstevel@tonic-gate 12037c478bd9Sstevel@tonic-gate /* 12047c478bd9Sstevel@tonic-gate * Otherwise, re-enable the counters and continue life as before. 12057c478bd9Sstevel@tonic-gate */ 12067c478bd9Sstevel@tonic-gate kpreempt_disable(); 1207b885580bSAlexander Kolbasov KCPC_CTX_FLAG_CLR(ctx, KCPC_CTX_FREEZE); 12087c478bd9Sstevel@tonic-gate pcbe_ops->pcbe_program(ctx); 12097c478bd9Sstevel@tonic-gate kpreempt_enable(); 12107c478bd9Sstevel@tonic-gate return (0); 12117c478bd9Sstevel@tonic-gate } 12127c478bd9Sstevel@tonic-gate 12137c478bd9Sstevel@tonic-gate /* 12147c478bd9Sstevel@tonic-gate * Called when switching away from current thread. 12157c478bd9Sstevel@tonic-gate */ 12167c478bd9Sstevel@tonic-gate static void 12177c478bd9Sstevel@tonic-gate kcpc_save(kcpc_ctx_t *ctx) 12187c478bd9Sstevel@tonic-gate { 1219b885580bSAlexander Kolbasov int err; 1220b885580bSAlexander Kolbasov int save_spl; 1221b885580bSAlexander Kolbasov 1222b885580bSAlexander Kolbasov kpreempt_disable(); 1223b885580bSAlexander Kolbasov save_spl = spl_xcall(); 1224b885580bSAlexander Kolbasov 12257c478bd9Sstevel@tonic-gate if (ctx->kc_flags & KCPC_CTX_INVALID) { 1226b885580bSAlexander Kolbasov if (ctx->kc_flags & KCPC_CTX_INVALID_STOPPED) { 1227b885580bSAlexander Kolbasov splx(save_spl); 1228b885580bSAlexander Kolbasov kpreempt_enable(); 12297c478bd9Sstevel@tonic-gate return; 1230b885580bSAlexander Kolbasov } 12317c478bd9Sstevel@tonic-gate /* 12327c478bd9Sstevel@tonic-gate * This context has been invalidated but the counters have not 12337c478bd9Sstevel@tonic-gate * been stopped. Stop them here and mark the context stopped. 12347c478bd9Sstevel@tonic-gate */ 1235b885580bSAlexander Kolbasov kcpc_unprogram(ctx, B_TRUE); 1236b885580bSAlexander Kolbasov splx(save_spl); 1237b885580bSAlexander Kolbasov kpreempt_enable(); 12387c478bd9Sstevel@tonic-gate return; 12397c478bd9Sstevel@tonic-gate } 12407c478bd9Sstevel@tonic-gate 12417c478bd9Sstevel@tonic-gate pcbe_ops->pcbe_allstop(); 1242b885580bSAlexander Kolbasov if (ctx->kc_flags & KCPC_CTX_FREEZE) { 1243b885580bSAlexander Kolbasov splx(save_spl); 1244b885580bSAlexander Kolbasov kpreempt_enable(); 12457c478bd9Sstevel@tonic-gate return; 1246b885580bSAlexander Kolbasov } 12477c478bd9Sstevel@tonic-gate 12487c478bd9Sstevel@tonic-gate /* 12497c478bd9Sstevel@tonic-gate * Need to sample for all reqs into each req's current mpic. 12507c478bd9Sstevel@tonic-gate */ 1251b885580bSAlexander Kolbasov ctx->kc_hrtime = gethrtime_waitfree(); 12527c478bd9Sstevel@tonic-gate ctx->kc_vtick += KCPC_GET_TICK() - ctx->kc_rawtick; 12537c478bd9Sstevel@tonic-gate pcbe_ops->pcbe_sample(ctx); 1254b885580bSAlexander Kolbasov 1255b885580bSAlexander Kolbasov /* 1256b885580bSAlexander Kolbasov * Program counter for measuring capacity and utilization since user 1257b885580bSAlexander Kolbasov * thread isn't using counter anymore 1258b885580bSAlexander Kolbasov */ 1259b885580bSAlexander Kolbasov ASSERT(ctx->kc_cpuid == -1); 1260b885580bSAlexander Kolbasov cu_cpc_program(CPU, &err); 1261b885580bSAlexander Kolbasov splx(save_spl); 1262b885580bSAlexander Kolbasov kpreempt_enable(); 12637c478bd9Sstevel@tonic-gate } 12647c478bd9Sstevel@tonic-gate 12657c478bd9Sstevel@tonic-gate static void 12667c478bd9Sstevel@tonic-gate kcpc_restore(kcpc_ctx_t *ctx) 12677c478bd9Sstevel@tonic-gate { 1268b885580bSAlexander Kolbasov int save_spl; 1269b885580bSAlexander Kolbasov 12704568bee7Strevtom mutex_enter(&ctx->kc_lock); 1271b885580bSAlexander Kolbasov 12727c478bd9Sstevel@tonic-gate if ((ctx->kc_flags & (KCPC_CTX_INVALID | KCPC_CTX_INVALID_STOPPED)) == 1273b885580bSAlexander Kolbasov KCPC_CTX_INVALID) { 12747c478bd9Sstevel@tonic-gate /* 12757c478bd9Sstevel@tonic-gate * The context is invalidated but has not been marked stopped. 12767c478bd9Sstevel@tonic-gate * We mark it as such here because we will not start the 12777c478bd9Sstevel@tonic-gate * counters during this context switch. 12787c478bd9Sstevel@tonic-gate */ 1279b885580bSAlexander Kolbasov KCPC_CTX_FLAG_SET(ctx, KCPC_CTX_INVALID_STOPPED); 1280b885580bSAlexander Kolbasov } 12817c478bd9Sstevel@tonic-gate 12824568bee7Strevtom if (ctx->kc_flags & (KCPC_CTX_INVALID | KCPC_CTX_FREEZE)) { 12834568bee7Strevtom mutex_exit(&ctx->kc_lock); 12847c478bd9Sstevel@tonic-gate return; 12854568bee7Strevtom } 12864568bee7Strevtom 12874568bee7Strevtom /* 12884568bee7Strevtom * Set kc_flags to show that a kcpc_restore() is in progress to avoid 12894568bee7Strevtom * ctx & set related memory objects being freed without us knowing. 12904568bee7Strevtom * This can happen if an agent thread is executing a kcpc_unbind(), 12914568bee7Strevtom * with this thread as the target, whilst we're concurrently doing a 12924568bee7Strevtom * restorectx() during, for example, a proc_exit(). Effectively, by 12934568bee7Strevtom * doing this, we're asking kcpc_free() to cv_wait() until 12944568bee7Strevtom * kcpc_restore() has completed. 12954568bee7Strevtom */ 1296b885580bSAlexander Kolbasov KCPC_CTX_FLAG_SET(ctx, KCPC_CTX_RESTORE); 12974568bee7Strevtom mutex_exit(&ctx->kc_lock); 12987c478bd9Sstevel@tonic-gate 12997c478bd9Sstevel@tonic-gate /* 13007c478bd9Sstevel@tonic-gate * While programming the hardware, the counters should be stopped. We 13017c478bd9Sstevel@tonic-gate * don't do an explicit pcbe_allstop() here because they should have 13027c478bd9Sstevel@tonic-gate * been stopped already by the last consumer. 13037c478bd9Sstevel@tonic-gate */ 1304b885580bSAlexander Kolbasov kpreempt_disable(); 1305b885580bSAlexander Kolbasov save_spl = spl_xcall(); 1306b885580bSAlexander Kolbasov kcpc_program(ctx, B_TRUE, B_TRUE); 1307b885580bSAlexander Kolbasov splx(save_spl); 1308b885580bSAlexander Kolbasov kpreempt_enable(); 13094568bee7Strevtom 13104568bee7Strevtom /* 13114568bee7Strevtom * Wake the agent thread if it's waiting in kcpc_free(). 13124568bee7Strevtom */ 13134568bee7Strevtom mutex_enter(&ctx->kc_lock); 1314b885580bSAlexander Kolbasov KCPC_CTX_FLAG_CLR(ctx, KCPC_CTX_RESTORE); 13154568bee7Strevtom cv_signal(&ctx->kc_condv); 13164568bee7Strevtom mutex_exit(&ctx->kc_lock); 13177c478bd9Sstevel@tonic-gate } 13187c478bd9Sstevel@tonic-gate 13197c478bd9Sstevel@tonic-gate /* 13207c478bd9Sstevel@tonic-gate * If kcpc_counts_include_idle is set to 0 by the sys admin, we add the the 13217c478bd9Sstevel@tonic-gate * following context operators to the idle thread on each CPU. They stop the 13227c478bd9Sstevel@tonic-gate * counters when the idle thread is switched on, and they start them again when 13237c478bd9Sstevel@tonic-gate * it is switched off. 13247c478bd9Sstevel@tonic-gate */ 13257c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 13267c478bd9Sstevel@tonic-gate void 13277c478bd9Sstevel@tonic-gate kcpc_idle_save(struct cpu *cp) 13287c478bd9Sstevel@tonic-gate { 13297c478bd9Sstevel@tonic-gate /* 13307c478bd9Sstevel@tonic-gate * The idle thread shouldn't be run anywhere else. 13317c478bd9Sstevel@tonic-gate */ 13327c478bd9Sstevel@tonic-gate ASSERT(CPU == cp); 13337c478bd9Sstevel@tonic-gate 13347c478bd9Sstevel@tonic-gate /* 13357c478bd9Sstevel@tonic-gate * We must hold the CPU's context lock to ensure the context isn't freed 13367c478bd9Sstevel@tonic-gate * while we're looking at it. 13377c478bd9Sstevel@tonic-gate */ 13387c478bd9Sstevel@tonic-gate mutex_enter(&cp->cpu_cpc_ctxlock); 13397c478bd9Sstevel@tonic-gate 13407c478bd9Sstevel@tonic-gate if ((cp->cpu_cpc_ctx == NULL) || 13417c478bd9Sstevel@tonic-gate (cp->cpu_cpc_ctx->kc_flags & KCPC_CTX_INVALID)) { 13427c478bd9Sstevel@tonic-gate mutex_exit(&cp->cpu_cpc_ctxlock); 13437c478bd9Sstevel@tonic-gate return; 13447c478bd9Sstevel@tonic-gate } 13457c478bd9Sstevel@tonic-gate 13467c478bd9Sstevel@tonic-gate pcbe_ops->pcbe_program(cp->cpu_cpc_ctx); 13477c478bd9Sstevel@tonic-gate mutex_exit(&cp->cpu_cpc_ctxlock); 13487c478bd9Sstevel@tonic-gate } 13497c478bd9Sstevel@tonic-gate 13507c478bd9Sstevel@tonic-gate void 13517c478bd9Sstevel@tonic-gate kcpc_idle_restore(struct cpu *cp) 13527c478bd9Sstevel@tonic-gate { 13537c478bd9Sstevel@tonic-gate /* 13547c478bd9Sstevel@tonic-gate * The idle thread shouldn't be run anywhere else. 13557c478bd9Sstevel@tonic-gate */ 13567c478bd9Sstevel@tonic-gate ASSERT(CPU == cp); 13577c478bd9Sstevel@tonic-gate 13587c478bd9Sstevel@tonic-gate /* 13597c478bd9Sstevel@tonic-gate * We must hold the CPU's context lock to ensure the context isn't freed 13607c478bd9Sstevel@tonic-gate * while we're looking at it. 13617c478bd9Sstevel@tonic-gate */ 13627c478bd9Sstevel@tonic-gate mutex_enter(&cp->cpu_cpc_ctxlock); 13637c478bd9Sstevel@tonic-gate 13647c478bd9Sstevel@tonic-gate if ((cp->cpu_cpc_ctx == NULL) || 13657c478bd9Sstevel@tonic-gate (cp->cpu_cpc_ctx->kc_flags & KCPC_CTX_INVALID)) { 13667c478bd9Sstevel@tonic-gate mutex_exit(&cp->cpu_cpc_ctxlock); 13677c478bd9Sstevel@tonic-gate return; 13687c478bd9Sstevel@tonic-gate } 13697c478bd9Sstevel@tonic-gate 13707c478bd9Sstevel@tonic-gate pcbe_ops->pcbe_allstop(); 13717c478bd9Sstevel@tonic-gate mutex_exit(&cp->cpu_cpc_ctxlock); 13727c478bd9Sstevel@tonic-gate } 13737c478bd9Sstevel@tonic-gate 13747c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 13757c478bd9Sstevel@tonic-gate static void 13767c478bd9Sstevel@tonic-gate kcpc_lwp_create(kthread_t *t, kthread_t *ct) 13777c478bd9Sstevel@tonic-gate { 13787c478bd9Sstevel@tonic-gate kcpc_ctx_t *ctx = t->t_cpc_ctx, *cctx; 13797c478bd9Sstevel@tonic-gate int i; 13807c478bd9Sstevel@tonic-gate 13817c478bd9Sstevel@tonic-gate if (ctx == NULL || (ctx->kc_flags & KCPC_CTX_LWPINHERIT) == 0) 13827c478bd9Sstevel@tonic-gate return; 13837c478bd9Sstevel@tonic-gate 13847c478bd9Sstevel@tonic-gate rw_enter(&kcpc_cpuctx_lock, RW_READER); 13857c478bd9Sstevel@tonic-gate if (ctx->kc_flags & KCPC_CTX_INVALID) { 13867c478bd9Sstevel@tonic-gate rw_exit(&kcpc_cpuctx_lock); 13877c478bd9Sstevel@tonic-gate return; 13887c478bd9Sstevel@tonic-gate } 1389b885580bSAlexander Kolbasov cctx = kcpc_ctx_alloc(KM_SLEEP); 13907c478bd9Sstevel@tonic-gate kcpc_ctx_clone(ctx, cctx); 13917c478bd9Sstevel@tonic-gate rw_exit(&kcpc_cpuctx_lock); 13927c478bd9Sstevel@tonic-gate 13938d4e547dSae112802 /* 13948d4e547dSae112802 * Copy the parent context's kc_flags field, but don't overwrite 13958d4e547dSae112802 * the child's in case it was modified during kcpc_ctx_clone. 13968d4e547dSae112802 */ 1397b885580bSAlexander Kolbasov KCPC_CTX_FLAG_SET(cctx, ctx->kc_flags); 13987c478bd9Sstevel@tonic-gate cctx->kc_thread = ct; 13997c478bd9Sstevel@tonic-gate cctx->kc_cpuid = -1; 14007c478bd9Sstevel@tonic-gate ct->t_cpc_set = cctx->kc_set; 14017c478bd9Sstevel@tonic-gate ct->t_cpc_ctx = cctx; 14027c478bd9Sstevel@tonic-gate 14037c478bd9Sstevel@tonic-gate if (cctx->kc_flags & KCPC_CTX_SIGOVF) { 14047c478bd9Sstevel@tonic-gate kcpc_set_t *ks = cctx->kc_set; 14057c478bd9Sstevel@tonic-gate /* 14067c478bd9Sstevel@tonic-gate * Our contract with the user requires us to immediately send an 14077c478bd9Sstevel@tonic-gate * overflow signal to all children if we have the LWPINHERIT 14087c478bd9Sstevel@tonic-gate * and SIGOVF flags set. In addition, all counters should be 14097c478bd9Sstevel@tonic-gate * set to UINT64_MAX, and their pic's overflow flag turned on 14107c478bd9Sstevel@tonic-gate * so that our trap() processing knows to send a signal. 14117c478bd9Sstevel@tonic-gate */ 1412b885580bSAlexander Kolbasov KCPC_CTX_FLAG_SET(ctx, KCPC_CTX_FREEZE); 14137c478bd9Sstevel@tonic-gate for (i = 0; i < ks->ks_nreqs; i++) { 14147c478bd9Sstevel@tonic-gate kcpc_request_t *kr = &ks->ks_req[i]; 14157c478bd9Sstevel@tonic-gate 14167c478bd9Sstevel@tonic-gate if (kr->kr_flags & CPC_OVF_NOTIFY_EMT) { 14177c478bd9Sstevel@tonic-gate *(kr->kr_data) = UINT64_MAX; 1418b885580bSAlexander Kolbasov atomic_or_uint(&kr->kr_picp->kp_flags, 1419b885580bSAlexander Kolbasov KCPC_PIC_OVERFLOWED); 14207c478bd9Sstevel@tonic-gate } 14217c478bd9Sstevel@tonic-gate } 14227c478bd9Sstevel@tonic-gate ttolwp(ct)->lwp_pcb.pcb_flags |= CPC_OVERFLOW; 14237c478bd9Sstevel@tonic-gate aston(ct); 14247c478bd9Sstevel@tonic-gate } 14257c478bd9Sstevel@tonic-gate 14267c478bd9Sstevel@tonic-gate installctx(ct, cctx, kcpc_save, kcpc_restore, 14277c478bd9Sstevel@tonic-gate NULL, kcpc_lwp_create, NULL, kcpc_free); 14287c478bd9Sstevel@tonic-gate } 14297c478bd9Sstevel@tonic-gate 14307c478bd9Sstevel@tonic-gate /* 14317c478bd9Sstevel@tonic-gate * Counter Stoppage Theory 14327c478bd9Sstevel@tonic-gate * 14337c478bd9Sstevel@tonic-gate * The counters may need to be stopped properly at the following occasions: 14347c478bd9Sstevel@tonic-gate * 14357c478bd9Sstevel@tonic-gate * 1) An LWP exits. 14367c478bd9Sstevel@tonic-gate * 2) A thread exits. 14377c478bd9Sstevel@tonic-gate * 3) An LWP performs an exec(). 14387c478bd9Sstevel@tonic-gate * 4) A bound set is unbound. 14397c478bd9Sstevel@tonic-gate * 14407c478bd9Sstevel@tonic-gate * In addition to stopping the counters, the CPC context (a kcpc_ctx_t) may need 14417c478bd9Sstevel@tonic-gate * to be freed as well. 14427c478bd9Sstevel@tonic-gate * 14437c478bd9Sstevel@tonic-gate * Case 1: kcpc_passivate(), called via lwp_exit(), stops the counters. Later on 14447c478bd9Sstevel@tonic-gate * when the thread is freed, kcpc_free(), called by freectx(), frees the 14457c478bd9Sstevel@tonic-gate * context. 14467c478bd9Sstevel@tonic-gate * 14477c478bd9Sstevel@tonic-gate * Case 2: same as case 1 except kcpc_passivate is called from thread_exit(). 14487c478bd9Sstevel@tonic-gate * 14497c478bd9Sstevel@tonic-gate * Case 3: kcpc_free(), called via freectx() via exec(), recognizes that it has 14507c478bd9Sstevel@tonic-gate * been called from exec. It stops the counters _and_ frees the context. 14517c478bd9Sstevel@tonic-gate * 14527c478bd9Sstevel@tonic-gate * Case 4: kcpc_unbind() stops the hardware _and_ frees the context. 14537c478bd9Sstevel@tonic-gate * 14547c478bd9Sstevel@tonic-gate * CPU-bound counters are always stopped via kcpc_unbind(). 14557c478bd9Sstevel@tonic-gate */ 14567c478bd9Sstevel@tonic-gate 14577c478bd9Sstevel@tonic-gate /* 14587c478bd9Sstevel@tonic-gate * We're being called to delete the context; we ensure that all associated data 14597c478bd9Sstevel@tonic-gate * structures are freed, and that the hardware is passivated if this is an exec. 14607c478bd9Sstevel@tonic-gate */ 14617c478bd9Sstevel@tonic-gate 14627c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 1463b885580bSAlexander Kolbasov void 14647c478bd9Sstevel@tonic-gate kcpc_free(kcpc_ctx_t *ctx, int isexec) 14657c478bd9Sstevel@tonic-gate { 14667c478bd9Sstevel@tonic-gate int i; 14677c478bd9Sstevel@tonic-gate kcpc_set_t *set = ctx->kc_set; 14687c478bd9Sstevel@tonic-gate 14697c478bd9Sstevel@tonic-gate ASSERT(set != NULL); 14707c478bd9Sstevel@tonic-gate 14714568bee7Strevtom /* 14724568bee7Strevtom * Wait for kcpc_restore() to finish before we tear things down. 14734568bee7Strevtom */ 14744568bee7Strevtom mutex_enter(&ctx->kc_lock); 14754568bee7Strevtom while (ctx->kc_flags & KCPC_CTX_RESTORE) 14764568bee7Strevtom cv_wait(&ctx->kc_condv, &ctx->kc_lock); 1477b885580bSAlexander Kolbasov KCPC_CTX_FLAG_SET(ctx, KCPC_CTX_INVALID); 14784568bee7Strevtom mutex_exit(&ctx->kc_lock); 14797c478bd9Sstevel@tonic-gate 14807c478bd9Sstevel@tonic-gate if (isexec) { 14817c478bd9Sstevel@tonic-gate /* 14827c478bd9Sstevel@tonic-gate * This thread is execing, and after the exec it should not have 14837c478bd9Sstevel@tonic-gate * any performance counter context. Stop the counters properly 14847c478bd9Sstevel@tonic-gate * here so the system isn't surprised by an overflow interrupt 14857c478bd9Sstevel@tonic-gate * later. 14867c478bd9Sstevel@tonic-gate */ 14877c478bd9Sstevel@tonic-gate if (ctx->kc_cpuid != -1) { 14887c478bd9Sstevel@tonic-gate cpu_t *cp; 14897c478bd9Sstevel@tonic-gate /* 14907c478bd9Sstevel@tonic-gate * CPU-bound context; stop the appropriate CPU's ctrs. 14917c478bd9Sstevel@tonic-gate * Hold cpu_lock while examining the CPU to ensure it 14927c478bd9Sstevel@tonic-gate * doesn't go away. 14937c478bd9Sstevel@tonic-gate */ 14947c478bd9Sstevel@tonic-gate mutex_enter(&cpu_lock); 14957c478bd9Sstevel@tonic-gate cp = cpu_get(ctx->kc_cpuid); 14967c478bd9Sstevel@tonic-gate /* 14977c478bd9Sstevel@tonic-gate * The CPU could have been DR'd out, so only stop the 14987c478bd9Sstevel@tonic-gate * CPU and clear its context pointer if the CPU still 14997c478bd9Sstevel@tonic-gate * exists. 15007c478bd9Sstevel@tonic-gate */ 15017c478bd9Sstevel@tonic-gate if (cp != NULL) { 15027c478bd9Sstevel@tonic-gate mutex_enter(&cp->cpu_cpc_ctxlock); 15037c478bd9Sstevel@tonic-gate kcpc_stop_hw(ctx); 15047c478bd9Sstevel@tonic-gate mutex_exit(&cp->cpu_cpc_ctxlock); 15057c478bd9Sstevel@tonic-gate } 15067c478bd9Sstevel@tonic-gate mutex_exit(&cpu_lock); 15077c478bd9Sstevel@tonic-gate ASSERT(curthread->t_cpc_ctx == NULL); 15087c478bd9Sstevel@tonic-gate } else { 1509b885580bSAlexander Kolbasov int save_spl; 1510b885580bSAlexander Kolbasov 15117c478bd9Sstevel@tonic-gate /* 15127c478bd9Sstevel@tonic-gate * Thread-bound context; stop _this_ CPU's counters. 15137c478bd9Sstevel@tonic-gate */ 15147c478bd9Sstevel@tonic-gate kpreempt_disable(); 1515b885580bSAlexander Kolbasov save_spl = spl_xcall(); 1516b885580bSAlexander Kolbasov kcpc_unprogram(ctx, B_TRUE); 15177c478bd9Sstevel@tonic-gate curthread->t_cpc_ctx = NULL; 1518b885580bSAlexander Kolbasov splx(save_spl); 1519b885580bSAlexander Kolbasov kpreempt_enable(); 15207c478bd9Sstevel@tonic-gate } 15217c478bd9Sstevel@tonic-gate 15227c478bd9Sstevel@tonic-gate /* 15237c478bd9Sstevel@tonic-gate * Since we are being called from an exec and we know that 15247c478bd9Sstevel@tonic-gate * exec is not permitted via the agent thread, we should clean 15257c478bd9Sstevel@tonic-gate * up this thread's CPC state completely, and not leave dangling 15267c478bd9Sstevel@tonic-gate * CPC pointers behind. 15277c478bd9Sstevel@tonic-gate */ 15287c478bd9Sstevel@tonic-gate ASSERT(ctx->kc_thread == curthread); 15297c478bd9Sstevel@tonic-gate curthread->t_cpc_set = NULL; 15307c478bd9Sstevel@tonic-gate } 15317c478bd9Sstevel@tonic-gate 15327c478bd9Sstevel@tonic-gate /* 15337c478bd9Sstevel@tonic-gate * Walk through each request in this context's set and free the PCBE's 15347c478bd9Sstevel@tonic-gate * configuration if it exists. 15357c478bd9Sstevel@tonic-gate */ 15367c478bd9Sstevel@tonic-gate for (i = 0; i < set->ks_nreqs; i++) { 15377c478bd9Sstevel@tonic-gate if (set->ks_req[i].kr_config != NULL) 15387c478bd9Sstevel@tonic-gate pcbe_ops->pcbe_free(set->ks_req[i].kr_config); 15397c478bd9Sstevel@tonic-gate } 15407c478bd9Sstevel@tonic-gate 15417c478bd9Sstevel@tonic-gate kmem_free(set->ks_data, set->ks_nreqs * sizeof (uint64_t)); 15427c478bd9Sstevel@tonic-gate kcpc_ctx_free(ctx); 15437c478bd9Sstevel@tonic-gate kcpc_free_set(set); 15447c478bd9Sstevel@tonic-gate } 15457c478bd9Sstevel@tonic-gate 15467c478bd9Sstevel@tonic-gate /* 15477c478bd9Sstevel@tonic-gate * Free the memory associated with a request set. 15487c478bd9Sstevel@tonic-gate */ 15497c478bd9Sstevel@tonic-gate void 15507c478bd9Sstevel@tonic-gate kcpc_free_set(kcpc_set_t *set) 15517c478bd9Sstevel@tonic-gate { 15527c478bd9Sstevel@tonic-gate int i; 15537c478bd9Sstevel@tonic-gate kcpc_request_t *req; 15547c478bd9Sstevel@tonic-gate 15557c478bd9Sstevel@tonic-gate ASSERT(set->ks_req != NULL); 15567c478bd9Sstevel@tonic-gate 15577c478bd9Sstevel@tonic-gate for (i = 0; i < set->ks_nreqs; i++) { 15587c478bd9Sstevel@tonic-gate req = &set->ks_req[i]; 15597c478bd9Sstevel@tonic-gate 15607c478bd9Sstevel@tonic-gate if (req->kr_nattrs != 0) { 15617c478bd9Sstevel@tonic-gate kmem_free(req->kr_attr, 15627c478bd9Sstevel@tonic-gate req->kr_nattrs * sizeof (kcpc_attr_t)); 15637c478bd9Sstevel@tonic-gate } 15647c478bd9Sstevel@tonic-gate } 15657c478bd9Sstevel@tonic-gate 15667c478bd9Sstevel@tonic-gate kmem_free(set->ks_req, sizeof (kcpc_request_t) * set->ks_nreqs); 15674568bee7Strevtom cv_destroy(&set->ks_condv); 15684568bee7Strevtom mutex_destroy(&set->ks_lock); 15697c478bd9Sstevel@tonic-gate kmem_free(set, sizeof (kcpc_set_t)); 15707c478bd9Sstevel@tonic-gate } 15717c478bd9Sstevel@tonic-gate 15727c478bd9Sstevel@tonic-gate /* 15737c478bd9Sstevel@tonic-gate * Grab every existing context and mark it as invalid. 15747c478bd9Sstevel@tonic-gate */ 15757c478bd9Sstevel@tonic-gate void 15767c478bd9Sstevel@tonic-gate kcpc_invalidate_all(void) 15777c478bd9Sstevel@tonic-gate { 15787c478bd9Sstevel@tonic-gate kcpc_ctx_t *ctx; 15797c478bd9Sstevel@tonic-gate long hash; 15807c478bd9Sstevel@tonic-gate 15817c478bd9Sstevel@tonic-gate for (hash = 0; hash < CPC_HASH_BUCKETS; hash++) { 15827c478bd9Sstevel@tonic-gate mutex_enter(&kcpc_ctx_llock[hash]); 15837c478bd9Sstevel@tonic-gate for (ctx = kcpc_ctx_list[hash]; ctx; ctx = ctx->kc_next) 1584b885580bSAlexander Kolbasov KCPC_CTX_FLAG_SET(ctx, KCPC_CTX_INVALID); 15857c478bd9Sstevel@tonic-gate mutex_exit(&kcpc_ctx_llock[hash]); 15867c478bd9Sstevel@tonic-gate } 15877c478bd9Sstevel@tonic-gate } 15887c478bd9Sstevel@tonic-gate 15897c478bd9Sstevel@tonic-gate /* 15908d4e547dSae112802 * Interface for PCBEs to signal that an existing configuration has suddenly 15918d4e547dSae112802 * become invalid. 15928d4e547dSae112802 */ 15938d4e547dSae112802 void 15948d4e547dSae112802 kcpc_invalidate_config(void *token) 15958d4e547dSae112802 { 15968d4e547dSae112802 kcpc_ctx_t *ctx = token; 15978d4e547dSae112802 15988d4e547dSae112802 ASSERT(ctx != NULL); 15998d4e547dSae112802 1600b885580bSAlexander Kolbasov KCPC_CTX_FLAG_SET(ctx, KCPC_CTX_INVALID); 16018d4e547dSae112802 } 16028d4e547dSae112802 16038d4e547dSae112802 /* 16047c478bd9Sstevel@tonic-gate * Called from lwp_exit() and thread_exit() 16057c478bd9Sstevel@tonic-gate */ 16067c478bd9Sstevel@tonic-gate void 16077c478bd9Sstevel@tonic-gate kcpc_passivate(void) 16087c478bd9Sstevel@tonic-gate { 16097c478bd9Sstevel@tonic-gate kcpc_ctx_t *ctx = curthread->t_cpc_ctx; 16107c478bd9Sstevel@tonic-gate kcpc_set_t *set = curthread->t_cpc_set; 1611b885580bSAlexander Kolbasov int save_spl; 16127c478bd9Sstevel@tonic-gate 16137c478bd9Sstevel@tonic-gate if (set == NULL) 16147c478bd9Sstevel@tonic-gate return; 16157c478bd9Sstevel@tonic-gate 16167c478bd9Sstevel@tonic-gate if (ctx == NULL) { 16177c478bd9Sstevel@tonic-gate /* 16187c478bd9Sstevel@tonic-gate * This thread has a set but no context; it must be a CPU-bound 16197c478bd9Sstevel@tonic-gate * set. The hardware will be stopped via kcpc_unbind() when the 16207c478bd9Sstevel@tonic-gate * process exits and closes its file descriptors with 16217c478bd9Sstevel@tonic-gate * kcpc_close(). Our only job here is to clean up this thread's 16227c478bd9Sstevel@tonic-gate * state; the set will be freed with the unbind(). 16237c478bd9Sstevel@tonic-gate */ 16247c478bd9Sstevel@tonic-gate (void) kcpc_unbind(set); 16257c478bd9Sstevel@tonic-gate /* 16267c478bd9Sstevel@tonic-gate * Unbinding a set belonging to the current thread should clear 16277c478bd9Sstevel@tonic-gate * its set pointer. 16287c478bd9Sstevel@tonic-gate */ 16297c478bd9Sstevel@tonic-gate ASSERT(curthread->t_cpc_set == NULL); 16307c478bd9Sstevel@tonic-gate return; 16317c478bd9Sstevel@tonic-gate } 16327c478bd9Sstevel@tonic-gate 1633b885580bSAlexander Kolbasov kpreempt_disable(); 1634b885580bSAlexander Kolbasov save_spl = spl_xcall(); 16357c478bd9Sstevel@tonic-gate curthread->t_cpc_set = NULL; 16367c478bd9Sstevel@tonic-gate 16377c478bd9Sstevel@tonic-gate /* 16387c478bd9Sstevel@tonic-gate * This thread/LWP is exiting but context switches will continue to 16397c478bd9Sstevel@tonic-gate * happen for a bit as the exit proceeds. Kernel preemption must be 16407c478bd9Sstevel@tonic-gate * disabled here to prevent a race between checking or setting the 16417c478bd9Sstevel@tonic-gate * INVALID_STOPPED flag here and kcpc_restore() setting the flag during 16427c478bd9Sstevel@tonic-gate * a context switch. 16437c478bd9Sstevel@tonic-gate */ 16447c478bd9Sstevel@tonic-gate if ((ctx->kc_flags & KCPC_CTX_INVALID_STOPPED) == 0) { 1645b885580bSAlexander Kolbasov kcpc_unprogram(ctx, B_TRUE); 1646b885580bSAlexander Kolbasov KCPC_CTX_FLAG_SET(ctx, 16477c478bd9Sstevel@tonic-gate KCPC_CTX_INVALID | KCPC_CTX_INVALID_STOPPED); 16487c478bd9Sstevel@tonic-gate } 1649b885580bSAlexander Kolbasov 1650b885580bSAlexander Kolbasov /* 1651b885580bSAlexander Kolbasov * We're cleaning up after this thread; ensure there are no dangling 1652b885580bSAlexander Kolbasov * CPC pointers left behind. The context and set will be freed by 1653b885580bSAlexander Kolbasov * freectx(). 1654b885580bSAlexander Kolbasov */ 1655b885580bSAlexander Kolbasov curthread->t_cpc_ctx = NULL; 1656b885580bSAlexander Kolbasov 1657b885580bSAlexander Kolbasov splx(save_spl); 16587c478bd9Sstevel@tonic-gate kpreempt_enable(); 16597c478bd9Sstevel@tonic-gate } 16607c478bd9Sstevel@tonic-gate 16617c478bd9Sstevel@tonic-gate /* 16627c478bd9Sstevel@tonic-gate * Assign the requests in the given set to the PICs in the context. 16637c478bd9Sstevel@tonic-gate * Returns 0 if successful, -1 on failure. 16647c478bd9Sstevel@tonic-gate */ 16657c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 1666b9e93c10SJonathan Haslam int 16677c478bd9Sstevel@tonic-gate kcpc_assign_reqs(kcpc_set_t *set, kcpc_ctx_t *ctx) 16687c478bd9Sstevel@tonic-gate { 16697c478bd9Sstevel@tonic-gate int i; 16707c478bd9Sstevel@tonic-gate int *picnum_save; 16717c478bd9Sstevel@tonic-gate 16727c478bd9Sstevel@tonic-gate ASSERT(set->ks_nreqs <= cpc_ncounters); 16737c478bd9Sstevel@tonic-gate 16747c478bd9Sstevel@tonic-gate /* 16757c478bd9Sstevel@tonic-gate * Provide kcpc_tryassign() with scratch space to avoid doing an 16767c478bd9Sstevel@tonic-gate * alloc/free with every invocation. 16777c478bd9Sstevel@tonic-gate */ 16787c478bd9Sstevel@tonic-gate picnum_save = kmem_alloc(set->ks_nreqs * sizeof (int), KM_SLEEP); 16797c478bd9Sstevel@tonic-gate /* 16807c478bd9Sstevel@tonic-gate * kcpc_tryassign() blindly walks through each request in the set, 16817c478bd9Sstevel@tonic-gate * seeing if a counter can count its event. If yes, it assigns that 16827c478bd9Sstevel@tonic-gate * counter. However, that counter may have been the only capable counter 16837c478bd9Sstevel@tonic-gate * for _another_ request's event. The solution is to try every possible 16847c478bd9Sstevel@tonic-gate * request first. Note that this does not cover all solutions, as 16857c478bd9Sstevel@tonic-gate * that would require all unique orderings of requests, an n^n operation 16867c478bd9Sstevel@tonic-gate * which would be unacceptable for architectures with many counters. 16877c478bd9Sstevel@tonic-gate */ 16887c478bd9Sstevel@tonic-gate for (i = 0; i < set->ks_nreqs; i++) 16897c478bd9Sstevel@tonic-gate if (kcpc_tryassign(set, i, picnum_save) == 0) 16907c478bd9Sstevel@tonic-gate break; 16917c478bd9Sstevel@tonic-gate 16927c478bd9Sstevel@tonic-gate kmem_free(picnum_save, set->ks_nreqs * sizeof (int)); 16937c478bd9Sstevel@tonic-gate if (i == set->ks_nreqs) 16947c478bd9Sstevel@tonic-gate return (-1); 16957c478bd9Sstevel@tonic-gate return (0); 16967c478bd9Sstevel@tonic-gate } 16977c478bd9Sstevel@tonic-gate 16987c478bd9Sstevel@tonic-gate static int 16997c478bd9Sstevel@tonic-gate kcpc_tryassign(kcpc_set_t *set, int starting_req, int *scratch) 17007c478bd9Sstevel@tonic-gate { 17017c478bd9Sstevel@tonic-gate int i; 17027c478bd9Sstevel@tonic-gate int j; 17037c478bd9Sstevel@tonic-gate uint64_t bitmap = 0, resmap = 0; 17047c478bd9Sstevel@tonic-gate uint64_t ctrmap; 17057c478bd9Sstevel@tonic-gate 17067c478bd9Sstevel@tonic-gate /* 17077c478bd9Sstevel@tonic-gate * We are attempting to assign the reqs to pics, but we may fail. If we 17087c478bd9Sstevel@tonic-gate * fail, we need to restore the state of the requests to what it was 17097c478bd9Sstevel@tonic-gate * when we found it, as some reqs may have been explicitly assigned to 17107c478bd9Sstevel@tonic-gate * a specific PIC beforehand. We do this by snapshotting the assignments 17117c478bd9Sstevel@tonic-gate * now and restoring from it later if we fail. 17127c478bd9Sstevel@tonic-gate * 17137c478bd9Sstevel@tonic-gate * Also we note here which counters have already been claimed by 17147c478bd9Sstevel@tonic-gate * requests with explicit counter assignments. 17157c478bd9Sstevel@tonic-gate */ 17167c478bd9Sstevel@tonic-gate for (i = 0; i < set->ks_nreqs; i++) { 17177c478bd9Sstevel@tonic-gate scratch[i] = set->ks_req[i].kr_picnum; 17187c478bd9Sstevel@tonic-gate if (set->ks_req[i].kr_picnum != -1) 17197c478bd9Sstevel@tonic-gate resmap |= (1 << set->ks_req[i].kr_picnum); 17207c478bd9Sstevel@tonic-gate } 17217c478bd9Sstevel@tonic-gate 17227c478bd9Sstevel@tonic-gate /* 17237c478bd9Sstevel@tonic-gate * Walk through requests assigning them to the first PIC that is 17247c478bd9Sstevel@tonic-gate * capable. 17257c478bd9Sstevel@tonic-gate */ 17267c478bd9Sstevel@tonic-gate i = starting_req; 17277c478bd9Sstevel@tonic-gate do { 17287c478bd9Sstevel@tonic-gate if (set->ks_req[i].kr_picnum != -1) { 17297c478bd9Sstevel@tonic-gate ASSERT((bitmap & (1 << set->ks_req[i].kr_picnum)) == 0); 17307c478bd9Sstevel@tonic-gate bitmap |= (1 << set->ks_req[i].kr_picnum); 17317c478bd9Sstevel@tonic-gate if (++i == set->ks_nreqs) 17327c478bd9Sstevel@tonic-gate i = 0; 17337c478bd9Sstevel@tonic-gate continue; 17347c478bd9Sstevel@tonic-gate } 17357c478bd9Sstevel@tonic-gate 17367c478bd9Sstevel@tonic-gate ctrmap = pcbe_ops->pcbe_event_coverage(set->ks_req[i].kr_event); 17377c478bd9Sstevel@tonic-gate for (j = 0; j < cpc_ncounters; j++) { 17387c478bd9Sstevel@tonic-gate if (ctrmap & (1 << j) && (bitmap & (1 << j)) == 0 && 17397c478bd9Sstevel@tonic-gate (resmap & (1 << j)) == 0) { 17407c478bd9Sstevel@tonic-gate /* 17417c478bd9Sstevel@tonic-gate * We can assign this counter because: 17427c478bd9Sstevel@tonic-gate * 17437c478bd9Sstevel@tonic-gate * 1. It can count the event (ctrmap) 17447c478bd9Sstevel@tonic-gate * 2. It hasn't been assigned yet (bitmap) 17457c478bd9Sstevel@tonic-gate * 3. It wasn't reserved by a request (resmap) 17467c478bd9Sstevel@tonic-gate */ 17477c478bd9Sstevel@tonic-gate bitmap |= (1 << j); 17487c478bd9Sstevel@tonic-gate break; 17497c478bd9Sstevel@tonic-gate } 17507c478bd9Sstevel@tonic-gate } 17517c478bd9Sstevel@tonic-gate if (j == cpc_ncounters) { 17527c478bd9Sstevel@tonic-gate for (i = 0; i < set->ks_nreqs; i++) 17537c478bd9Sstevel@tonic-gate set->ks_req[i].kr_picnum = scratch[i]; 17547c478bd9Sstevel@tonic-gate return (-1); 17557c478bd9Sstevel@tonic-gate } 17567c478bd9Sstevel@tonic-gate set->ks_req[i].kr_picnum = j; 17577c478bd9Sstevel@tonic-gate 17587c478bd9Sstevel@tonic-gate if (++i == set->ks_nreqs) 17597c478bd9Sstevel@tonic-gate i = 0; 17607c478bd9Sstevel@tonic-gate } while (i != starting_req); 17617c478bd9Sstevel@tonic-gate 17627c478bd9Sstevel@tonic-gate return (0); 17637c478bd9Sstevel@tonic-gate } 17647c478bd9Sstevel@tonic-gate 17657c478bd9Sstevel@tonic-gate kcpc_set_t * 17667c478bd9Sstevel@tonic-gate kcpc_dup_set(kcpc_set_t *set) 17677c478bd9Sstevel@tonic-gate { 17687c478bd9Sstevel@tonic-gate kcpc_set_t *new; 17697c478bd9Sstevel@tonic-gate int i; 17707c478bd9Sstevel@tonic-gate int j; 17717c478bd9Sstevel@tonic-gate 17724568bee7Strevtom new = kmem_zalloc(sizeof (*new), KM_SLEEP); 17734568bee7Strevtom new->ks_state &= ~KCPC_SET_BOUND; 17747c478bd9Sstevel@tonic-gate new->ks_flags = set->ks_flags; 17757c478bd9Sstevel@tonic-gate new->ks_nreqs = set->ks_nreqs; 17767c478bd9Sstevel@tonic-gate new->ks_req = kmem_alloc(set->ks_nreqs * sizeof (kcpc_request_t), 17777c478bd9Sstevel@tonic-gate KM_SLEEP); 17787c478bd9Sstevel@tonic-gate new->ks_data = NULL; 17797c478bd9Sstevel@tonic-gate new->ks_ctx = NULL; 17807c478bd9Sstevel@tonic-gate 17817c478bd9Sstevel@tonic-gate for (i = 0; i < new->ks_nreqs; i++) { 17827c478bd9Sstevel@tonic-gate new->ks_req[i].kr_config = NULL; 17837c478bd9Sstevel@tonic-gate new->ks_req[i].kr_index = set->ks_req[i].kr_index; 17847c478bd9Sstevel@tonic-gate new->ks_req[i].kr_picnum = set->ks_req[i].kr_picnum; 17857c478bd9Sstevel@tonic-gate new->ks_req[i].kr_picp = NULL; 17867c478bd9Sstevel@tonic-gate new->ks_req[i].kr_data = NULL; 17877c478bd9Sstevel@tonic-gate (void) strncpy(new->ks_req[i].kr_event, set->ks_req[i].kr_event, 17887c478bd9Sstevel@tonic-gate CPC_MAX_EVENT_LEN); 17897c478bd9Sstevel@tonic-gate new->ks_req[i].kr_preset = set->ks_req[i].kr_preset; 17907c478bd9Sstevel@tonic-gate new->ks_req[i].kr_flags = set->ks_req[i].kr_flags; 17917c478bd9Sstevel@tonic-gate new->ks_req[i].kr_nattrs = set->ks_req[i].kr_nattrs; 17927c478bd9Sstevel@tonic-gate new->ks_req[i].kr_attr = kmem_alloc(new->ks_req[i].kr_nattrs * 17937c478bd9Sstevel@tonic-gate sizeof (kcpc_attr_t), KM_SLEEP); 17947c478bd9Sstevel@tonic-gate for (j = 0; j < new->ks_req[i].kr_nattrs; j++) { 17957c478bd9Sstevel@tonic-gate new->ks_req[i].kr_attr[j].ka_val = 17967c478bd9Sstevel@tonic-gate set->ks_req[i].kr_attr[j].ka_val; 17977c478bd9Sstevel@tonic-gate (void) strncpy(new->ks_req[i].kr_attr[j].ka_name, 17987c478bd9Sstevel@tonic-gate set->ks_req[i].kr_attr[j].ka_name, 17997c478bd9Sstevel@tonic-gate CPC_MAX_ATTR_LEN); 18007c478bd9Sstevel@tonic-gate } 18017c478bd9Sstevel@tonic-gate } 18027c478bd9Sstevel@tonic-gate 18037c478bd9Sstevel@tonic-gate return (new); 18047c478bd9Sstevel@tonic-gate } 18057c478bd9Sstevel@tonic-gate 18067c478bd9Sstevel@tonic-gate int 18077c478bd9Sstevel@tonic-gate kcpc_allow_nonpriv(void *token) 18087c478bd9Sstevel@tonic-gate { 18097c478bd9Sstevel@tonic-gate return (((kcpc_ctx_t *)token)->kc_flags & KCPC_CTX_NONPRIV); 18107c478bd9Sstevel@tonic-gate } 18117c478bd9Sstevel@tonic-gate 18127c478bd9Sstevel@tonic-gate void 18137c478bd9Sstevel@tonic-gate kcpc_invalidate(kthread_t *t) 18147c478bd9Sstevel@tonic-gate { 18157c478bd9Sstevel@tonic-gate kcpc_ctx_t *ctx = t->t_cpc_ctx; 18167c478bd9Sstevel@tonic-gate 18177c478bd9Sstevel@tonic-gate if (ctx != NULL) 1818b885580bSAlexander Kolbasov KCPC_CTX_FLAG_SET(ctx, KCPC_CTX_INVALID); 18197c478bd9Sstevel@tonic-gate } 18207c478bd9Sstevel@tonic-gate 18217c478bd9Sstevel@tonic-gate /* 18227c478bd9Sstevel@tonic-gate * Given a PCBE ID, attempt to load a matching PCBE module. The strings given 18237c478bd9Sstevel@tonic-gate * are used to construct PCBE names, starting with the most specific, 18247c478bd9Sstevel@tonic-gate * "pcbe.first.second.third.fourth" and ending with the least specific, 18257c478bd9Sstevel@tonic-gate * "pcbe.first". 18267c478bd9Sstevel@tonic-gate * 18277c478bd9Sstevel@tonic-gate * Returns 0 if a PCBE was successfully loaded and -1 upon error. 18287c478bd9Sstevel@tonic-gate */ 18297c478bd9Sstevel@tonic-gate int 18307c478bd9Sstevel@tonic-gate kcpc_pcbe_tryload(const char *prefix, uint_t first, uint_t second, uint_t third) 18317c478bd9Sstevel@tonic-gate { 18327aec1d6eScindi uint_t s[3]; 18337c478bd9Sstevel@tonic-gate 18347aec1d6eScindi s[0] = first; 18357aec1d6eScindi s[1] = second; 18367aec1d6eScindi s[2] = third; 18377c478bd9Sstevel@tonic-gate 18387aec1d6eScindi return (modload_qualified("pcbe", 183920c794b3Sgavinm "pcbe", prefix, ".", s, 3, NULL) < 0 ? -1 : 0); 18407c478bd9Sstevel@tonic-gate } 1841b9e93c10SJonathan Haslam 1842b885580bSAlexander Kolbasov /* 1843b885580bSAlexander Kolbasov * Create one or more CPC context for given CPU with specified counter event 1844b885580bSAlexander Kolbasov * requests 1845b885580bSAlexander Kolbasov * 1846b885580bSAlexander Kolbasov * If number of requested counter events is less than or equal number of 1847b885580bSAlexander Kolbasov * hardware counters on a CPU and can all be assigned to the counters on a CPU 1848b885580bSAlexander Kolbasov * at the same time, then make one CPC context. 1849b885580bSAlexander Kolbasov * 1850b885580bSAlexander Kolbasov * Otherwise, multiple CPC contexts are created to allow multiplexing more 1851b885580bSAlexander Kolbasov * counter events than existing counters onto the counters by iterating through 1852b885580bSAlexander Kolbasov * all of the CPC contexts, programming the counters with each CPC context one 1853b885580bSAlexander Kolbasov * at a time and measuring the resulting counter values. Each of the resulting 1854b885580bSAlexander Kolbasov * CPC contexts contains some number of requested counter events less than or 1855b885580bSAlexander Kolbasov * equal the number of counters on a CPU depending on whether all the counter 1856b885580bSAlexander Kolbasov * events can be programmed on all the counters at the same time or not. 1857b885580bSAlexander Kolbasov * 1858b885580bSAlexander Kolbasov * Flags to kmem_{,z}alloc() are passed in as an argument to allow specifying 1859b885580bSAlexander Kolbasov * whether memory allocation should be non-blocking or not. The code will try 1860b885580bSAlexander Kolbasov * to allocate *whole* CPC contexts if possible. If there is any memory 1861b885580bSAlexander Kolbasov * allocation failure during the allocations needed for a given CPC context, it 1862b885580bSAlexander Kolbasov * will skip allocating that CPC context because it cannot allocate the whole 1863b885580bSAlexander Kolbasov * thing. Thus, the only time that it will end up allocating none (ie. no CPC 1864b885580bSAlexander Kolbasov * contexts whatsoever) is when it cannot even allocate *one* whole CPC context 1865b885580bSAlexander Kolbasov * without a memory allocation failure occurring. 1866b885580bSAlexander Kolbasov */ 1867b885580bSAlexander Kolbasov int 1868b885580bSAlexander Kolbasov kcpc_cpu_ctx_create(cpu_t *cp, kcpc_request_list_t *req_list, int kmem_flags, 1869b885580bSAlexander Kolbasov kcpc_ctx_t ***ctx_ptr_array, size_t *ctx_ptr_array_sz) 1870b885580bSAlexander Kolbasov { 1871b885580bSAlexander Kolbasov kcpc_ctx_t **ctx_ptrs; 1872b885580bSAlexander Kolbasov int nctx; 1873b885580bSAlexander Kolbasov int nctx_ptrs; 1874b885580bSAlexander Kolbasov int nreqs; 1875b885580bSAlexander Kolbasov kcpc_request_t *reqs; 1876b885580bSAlexander Kolbasov 1877b885580bSAlexander Kolbasov if (cp == NULL || ctx_ptr_array == NULL || ctx_ptr_array_sz == NULL || 1878b885580bSAlexander Kolbasov req_list == NULL || req_list->krl_cnt < 1) 1879b885580bSAlexander Kolbasov return (-1); 1880b885580bSAlexander Kolbasov 1881b885580bSAlexander Kolbasov /* 1882b885580bSAlexander Kolbasov * Allocate number of sets assuming that each set contains one and only 1883b885580bSAlexander Kolbasov * one counter event request for each counter on a CPU 1884b885580bSAlexander Kolbasov */ 1885b885580bSAlexander Kolbasov nreqs = req_list->krl_cnt; 1886b885580bSAlexander Kolbasov nctx_ptrs = (nreqs + cpc_ncounters - 1) / cpc_ncounters; 1887b885580bSAlexander Kolbasov ctx_ptrs = kmem_zalloc(nctx_ptrs * sizeof (kcpc_ctx_t *), kmem_flags); 1888b885580bSAlexander Kolbasov if (ctx_ptrs == NULL) 1889b885580bSAlexander Kolbasov return (-2); 1890b885580bSAlexander Kolbasov 1891b885580bSAlexander Kolbasov /* 1892b885580bSAlexander Kolbasov * Fill in sets of requests 1893b885580bSAlexander Kolbasov */ 1894b885580bSAlexander Kolbasov nctx = 0; 1895b885580bSAlexander Kolbasov reqs = req_list->krl_list; 1896b885580bSAlexander Kolbasov while (nreqs > 0) { 1897b885580bSAlexander Kolbasov kcpc_ctx_t *ctx; 1898b885580bSAlexander Kolbasov kcpc_set_t *set; 1899b885580bSAlexander Kolbasov int subcode; 1900b885580bSAlexander Kolbasov 1901b885580bSAlexander Kolbasov /* 1902b885580bSAlexander Kolbasov * Allocate CPC context and set for requested counter events 1903b885580bSAlexander Kolbasov */ 1904b885580bSAlexander Kolbasov ctx = kcpc_ctx_alloc(kmem_flags); 1905b885580bSAlexander Kolbasov set = kcpc_set_create(reqs, nreqs, 0, kmem_flags); 1906b885580bSAlexander Kolbasov if (set == NULL) { 1907b885580bSAlexander Kolbasov kcpc_ctx_free(ctx); 1908b885580bSAlexander Kolbasov break; 1909b885580bSAlexander Kolbasov } 1910b885580bSAlexander Kolbasov 1911b885580bSAlexander Kolbasov /* 1912b885580bSAlexander Kolbasov * Determine assignment of requested counter events to specific 1913b885580bSAlexander Kolbasov * counters 1914b885580bSAlexander Kolbasov */ 1915b885580bSAlexander Kolbasov if (kcpc_assign_reqs(set, ctx) != 0) { 1916b885580bSAlexander Kolbasov /* 1917b885580bSAlexander Kolbasov * May not be able to assign requested counter events 1918b885580bSAlexander Kolbasov * to all counters since all counters may not be able 1919b885580bSAlexander Kolbasov * to do all events, so only do one counter event in 1920b885580bSAlexander Kolbasov * set of counter requests when this happens since at 1921b885580bSAlexander Kolbasov * least one of the counters must be able to do the 1922b885580bSAlexander Kolbasov * event. 1923b885580bSAlexander Kolbasov */ 1924b885580bSAlexander Kolbasov kcpc_free_set(set); 1925b885580bSAlexander Kolbasov set = kcpc_set_create(reqs, 1, 0, kmem_flags); 1926b885580bSAlexander Kolbasov if (set == NULL) { 1927b885580bSAlexander Kolbasov kcpc_ctx_free(ctx); 1928b885580bSAlexander Kolbasov break; 1929b885580bSAlexander Kolbasov } 1930b885580bSAlexander Kolbasov if (kcpc_assign_reqs(set, ctx) != 0) { 1931b885580bSAlexander Kolbasov #ifdef DEBUG 1932b885580bSAlexander Kolbasov cmn_err(CE_NOTE, "!kcpc_cpu_ctx_create: can't " 1933b885580bSAlexander Kolbasov "assign counter event %s!\n", 1934b885580bSAlexander Kolbasov set->ks_req->kr_event); 1935b885580bSAlexander Kolbasov #endif 1936b885580bSAlexander Kolbasov kcpc_free_set(set); 1937b885580bSAlexander Kolbasov kcpc_ctx_free(ctx); 1938b885580bSAlexander Kolbasov reqs++; 1939b885580bSAlexander Kolbasov nreqs--; 1940b885580bSAlexander Kolbasov continue; 1941b885580bSAlexander Kolbasov } 1942b885580bSAlexander Kolbasov } 1943b885580bSAlexander Kolbasov 1944b885580bSAlexander Kolbasov /* 1945b885580bSAlexander Kolbasov * Allocate memory needed to hold requested counter event data 1946b885580bSAlexander Kolbasov */ 1947b885580bSAlexander Kolbasov set->ks_data = kmem_zalloc(set->ks_nreqs * sizeof (uint64_t), 1948b885580bSAlexander Kolbasov kmem_flags); 1949b885580bSAlexander Kolbasov if (set->ks_data == NULL) { 1950b885580bSAlexander Kolbasov kcpc_free_set(set); 1951b885580bSAlexander Kolbasov kcpc_ctx_free(ctx); 1952b885580bSAlexander Kolbasov break; 1953b885580bSAlexander Kolbasov } 1954b885580bSAlexander Kolbasov 1955b885580bSAlexander Kolbasov /* 1956b885580bSAlexander Kolbasov * Configure requested counter events 1957b885580bSAlexander Kolbasov */ 1958b885580bSAlexander Kolbasov if (kcpc_configure_reqs(ctx, set, &subcode) != 0) { 1959b885580bSAlexander Kolbasov #ifdef DEBUG 1960b885580bSAlexander Kolbasov cmn_err(CE_NOTE, 1961b885580bSAlexander Kolbasov "!kcpc_cpu_ctx_create: can't configure " 1962b885580bSAlexander Kolbasov "set of counter event requests!\n"); 1963b885580bSAlexander Kolbasov #endif 1964b885580bSAlexander Kolbasov reqs += set->ks_nreqs; 1965b885580bSAlexander Kolbasov nreqs -= set->ks_nreqs; 1966b885580bSAlexander Kolbasov kmem_free(set->ks_data, 1967b885580bSAlexander Kolbasov set->ks_nreqs * sizeof (uint64_t)); 1968b885580bSAlexander Kolbasov kcpc_free_set(set); 1969b885580bSAlexander Kolbasov kcpc_ctx_free(ctx); 1970b885580bSAlexander Kolbasov continue; 1971b885580bSAlexander Kolbasov } 1972b885580bSAlexander Kolbasov 1973b885580bSAlexander Kolbasov /* 1974b885580bSAlexander Kolbasov * Point set of counter event requests at this context and fill 1975b885580bSAlexander Kolbasov * in CPC context 1976b885580bSAlexander Kolbasov */ 1977b885580bSAlexander Kolbasov set->ks_ctx = ctx; 1978b885580bSAlexander Kolbasov ctx->kc_set = set; 1979b885580bSAlexander Kolbasov ctx->kc_cpuid = cp->cpu_id; 1980b885580bSAlexander Kolbasov ctx->kc_thread = curthread; 1981b885580bSAlexander Kolbasov 1982b885580bSAlexander Kolbasov ctx_ptrs[nctx] = ctx; 1983b885580bSAlexander Kolbasov 1984b885580bSAlexander Kolbasov /* 1985b885580bSAlexander Kolbasov * Update requests and how many are left to be assigned to sets 1986b885580bSAlexander Kolbasov */ 1987b885580bSAlexander Kolbasov reqs += set->ks_nreqs; 1988b885580bSAlexander Kolbasov nreqs -= set->ks_nreqs; 1989b885580bSAlexander Kolbasov 1990b885580bSAlexander Kolbasov /* 1991b885580bSAlexander Kolbasov * Increment number of CPC contexts and allocate bigger array 1992b885580bSAlexander Kolbasov * for context pointers as needed 1993b885580bSAlexander Kolbasov */ 1994b885580bSAlexander Kolbasov nctx++; 1995b885580bSAlexander Kolbasov if (nctx >= nctx_ptrs) { 1996b885580bSAlexander Kolbasov kcpc_ctx_t **new; 1997b885580bSAlexander Kolbasov int new_cnt; 1998b885580bSAlexander Kolbasov 1999b885580bSAlexander Kolbasov /* 2000b885580bSAlexander Kolbasov * Allocate more CPC contexts based on how many 2001b885580bSAlexander Kolbasov * contexts allocated so far and how many counter 2002b885580bSAlexander Kolbasov * requests left to assign 2003b885580bSAlexander Kolbasov */ 2004b885580bSAlexander Kolbasov new_cnt = nctx_ptrs + 2005b885580bSAlexander Kolbasov ((nreqs + cpc_ncounters - 1) / cpc_ncounters); 2006b885580bSAlexander Kolbasov new = kmem_zalloc(new_cnt * sizeof (kcpc_ctx_t *), 2007b885580bSAlexander Kolbasov kmem_flags); 2008b885580bSAlexander Kolbasov if (new == NULL) 2009b885580bSAlexander Kolbasov break; 2010b885580bSAlexander Kolbasov 2011b885580bSAlexander Kolbasov /* 2012b885580bSAlexander Kolbasov * Copy contents of old sets into new ones 2013b885580bSAlexander Kolbasov */ 2014b885580bSAlexander Kolbasov bcopy(ctx_ptrs, new, 2015b885580bSAlexander Kolbasov nctx_ptrs * sizeof (kcpc_ctx_t *)); 2016b885580bSAlexander Kolbasov 2017b885580bSAlexander Kolbasov /* 2018b885580bSAlexander Kolbasov * Free old array of context pointers and use newly 2019b885580bSAlexander Kolbasov * allocated one instead now 2020b885580bSAlexander Kolbasov */ 2021b885580bSAlexander Kolbasov kmem_free(ctx_ptrs, nctx_ptrs * sizeof (kcpc_ctx_t *)); 2022b885580bSAlexander Kolbasov ctx_ptrs = new; 2023b885580bSAlexander Kolbasov nctx_ptrs = new_cnt; 2024b885580bSAlexander Kolbasov } 2025b885580bSAlexander Kolbasov } 2026b885580bSAlexander Kolbasov 2027b885580bSAlexander Kolbasov /* 2028b885580bSAlexander Kolbasov * Return NULL if no CPC contexts filled in 2029b885580bSAlexander Kolbasov */ 2030b885580bSAlexander Kolbasov if (nctx == 0) { 2031b885580bSAlexander Kolbasov kmem_free(ctx_ptrs, nctx_ptrs * sizeof (kcpc_ctx_t *)); 2032b885580bSAlexander Kolbasov *ctx_ptr_array = NULL; 2033b885580bSAlexander Kolbasov *ctx_ptr_array_sz = 0; 2034b885580bSAlexander Kolbasov return (-2); 2035b885580bSAlexander Kolbasov } 2036b885580bSAlexander Kolbasov 2037b885580bSAlexander Kolbasov *ctx_ptr_array = ctx_ptrs; 2038b885580bSAlexander Kolbasov *ctx_ptr_array_sz = nctx_ptrs * sizeof (kcpc_ctx_t *); 2039b885580bSAlexander Kolbasov return (nctx); 2040b885580bSAlexander Kolbasov } 2041b885580bSAlexander Kolbasov 2042b885580bSAlexander Kolbasov /* 2043b885580bSAlexander Kolbasov * Return whether PCBE supports given counter event 2044b885580bSAlexander Kolbasov */ 2045b885580bSAlexander Kolbasov boolean_t 2046b885580bSAlexander Kolbasov kcpc_event_supported(char *event) 2047b885580bSAlexander Kolbasov { 2048b885580bSAlexander Kolbasov if (pcbe_ops == NULL || pcbe_ops->pcbe_event_coverage(event) == 0) 2049b885580bSAlexander Kolbasov return (B_FALSE); 2050b885580bSAlexander Kolbasov 2051b885580bSAlexander Kolbasov return (B_TRUE); 2052b885580bSAlexander Kolbasov } 2053b885580bSAlexander Kolbasov 2054b885580bSAlexander Kolbasov /* 2055b885580bSAlexander Kolbasov * Program counters on current CPU with given CPC context 2056b885580bSAlexander Kolbasov * 2057b885580bSAlexander Kolbasov * If kernel is interposing on counters to measure hardware capacity and 2058b885580bSAlexander Kolbasov * utilization, then unprogram counters for kernel *before* programming them 2059b885580bSAlexander Kolbasov * with specified CPC context. 2060b885580bSAlexander Kolbasov * 2061b885580bSAlexander Kolbasov * kcpc_{program,unprogram}() may be called either directly by a thread running 2062b885580bSAlexander Kolbasov * on the target CPU or from a cross-call from another CPU. To protect 2063b885580bSAlexander Kolbasov * programming and unprogramming from being interrupted by cross-calls, callers 2064b885580bSAlexander Kolbasov * who execute kcpc_{program,unprogram} should raise PIL to the level used by 2065b885580bSAlexander Kolbasov * cross-calls. 2066b885580bSAlexander Kolbasov */ 2067b885580bSAlexander Kolbasov void 2068b885580bSAlexander Kolbasov kcpc_program(kcpc_ctx_t *ctx, boolean_t for_thread, boolean_t cu_interpose) 2069b885580bSAlexander Kolbasov { 2070b885580bSAlexander Kolbasov int error; 2071b885580bSAlexander Kolbasov 2072b885580bSAlexander Kolbasov ASSERT(IS_HIPIL()); 2073b885580bSAlexander Kolbasov 2074b885580bSAlexander Kolbasov /* 2075b885580bSAlexander Kolbasov * CPC context shouldn't be NULL, its CPU field should specify current 2076b885580bSAlexander Kolbasov * CPU or be -1 to specify any CPU when the context is bound to a 2077b885580bSAlexander Kolbasov * thread, and preemption should be disabled 2078b885580bSAlexander Kolbasov */ 2079b885580bSAlexander Kolbasov ASSERT(ctx != NULL && (ctx->kc_cpuid == CPU->cpu_id || 2080b885580bSAlexander Kolbasov ctx->kc_cpuid == -1) && curthread->t_preempt > 0); 2081b885580bSAlexander Kolbasov if (ctx == NULL || (ctx->kc_cpuid != CPU->cpu_id && 2082b885580bSAlexander Kolbasov ctx->kc_cpuid != -1) || curthread->t_preempt < 1) 2083b885580bSAlexander Kolbasov return; 2084b885580bSAlexander Kolbasov 2085b885580bSAlexander Kolbasov /* 2086b885580bSAlexander Kolbasov * Unprogram counters for kernel measuring hardware capacity and 2087b885580bSAlexander Kolbasov * utilization 2088b885580bSAlexander Kolbasov */ 2089b885580bSAlexander Kolbasov if (cu_interpose == B_TRUE) { 2090b885580bSAlexander Kolbasov cu_cpc_unprogram(CPU, &error); 2091b885580bSAlexander Kolbasov } else { 2092b885580bSAlexander Kolbasov kcpc_set_t *set = ctx->kc_set; 2093b885580bSAlexander Kolbasov int i; 2094b885580bSAlexander Kolbasov 2095b885580bSAlexander Kolbasov ASSERT(set != NULL); 2096b885580bSAlexander Kolbasov 2097b885580bSAlexander Kolbasov /* 2098b885580bSAlexander Kolbasov * Since cu_interpose is false, we are programming CU context. 2099b885580bSAlexander Kolbasov * In general, PCBE can continue from the state saved in the 2100b885580bSAlexander Kolbasov * set, but it is not very reliable, so we start again from the 2101b885580bSAlexander Kolbasov * preset value. 2102b885580bSAlexander Kolbasov */ 2103b885580bSAlexander Kolbasov for (i = 0; i < set->ks_nreqs; i++) { 2104b885580bSAlexander Kolbasov /* 2105b885580bSAlexander Kolbasov * Reset the virtual counter value to the preset value. 2106b885580bSAlexander Kolbasov */ 2107b885580bSAlexander Kolbasov *(set->ks_req[i].kr_data) = set->ks_req[i].kr_preset; 2108b885580bSAlexander Kolbasov 2109b885580bSAlexander Kolbasov /* 2110b885580bSAlexander Kolbasov * Reset PCBE to the preset value. 2111b885580bSAlexander Kolbasov */ 2112b885580bSAlexander Kolbasov pcbe_ops->pcbe_configure(0, NULL, 2113b885580bSAlexander Kolbasov set->ks_req[i].kr_preset, 2114b885580bSAlexander Kolbasov 0, 0, NULL, &set->ks_req[i].kr_config, NULL); 2115b885580bSAlexander Kolbasov } 2116b885580bSAlexander Kolbasov } 2117b885580bSAlexander Kolbasov 2118b885580bSAlexander Kolbasov /* 2119b885580bSAlexander Kolbasov * Program counters with specified CPC context 2120b885580bSAlexander Kolbasov */ 2121b885580bSAlexander Kolbasov ctx->kc_rawtick = KCPC_GET_TICK(); 2122b885580bSAlexander Kolbasov pcbe_ops->pcbe_program(ctx); 2123b885580bSAlexander Kolbasov 2124b885580bSAlexander Kolbasov /* 2125b885580bSAlexander Kolbasov * Denote that counters programmed for thread or CPU CPC context 2126b885580bSAlexander Kolbasov * differently 2127b885580bSAlexander Kolbasov */ 2128b885580bSAlexander Kolbasov if (for_thread == B_TRUE) 2129b885580bSAlexander Kolbasov KCPC_CTX_FLAG_CLR(ctx, KCPC_CTX_FREEZE); 2130b885580bSAlexander Kolbasov else 2131b885580bSAlexander Kolbasov CPU->cpu_cpc_ctx = ctx; 2132b885580bSAlexander Kolbasov } 2133b885580bSAlexander Kolbasov 2134b885580bSAlexander Kolbasov /* 2135b885580bSAlexander Kolbasov * Unprogram counters with given CPC context on current CPU 2136b885580bSAlexander Kolbasov * 2137b885580bSAlexander Kolbasov * If kernel is interposing on counters to measure hardware capacity and 2138b885580bSAlexander Kolbasov * utilization, then program counters for the kernel capacity and utilization 2139b885580bSAlexander Kolbasov * *after* unprogramming them for given CPC context. 2140b885580bSAlexander Kolbasov * 2141b885580bSAlexander Kolbasov * See the comment for kcpc_program regarding the synchronization with 2142b885580bSAlexander Kolbasov * cross-calls. 2143b885580bSAlexander Kolbasov */ 2144b885580bSAlexander Kolbasov void 2145b885580bSAlexander Kolbasov kcpc_unprogram(kcpc_ctx_t *ctx, boolean_t cu_interpose) 2146b885580bSAlexander Kolbasov { 2147b885580bSAlexander Kolbasov int error; 2148b885580bSAlexander Kolbasov 2149b885580bSAlexander Kolbasov ASSERT(IS_HIPIL()); 2150b885580bSAlexander Kolbasov 2151b885580bSAlexander Kolbasov /* 2152b885580bSAlexander Kolbasov * CPC context shouldn't be NULL, its CPU field should specify current 2153b885580bSAlexander Kolbasov * CPU or be -1 to specify any CPU when the context is bound to a 2154b885580bSAlexander Kolbasov * thread, and preemption should be disabled 2155b885580bSAlexander Kolbasov */ 2156b885580bSAlexander Kolbasov ASSERT(ctx != NULL && (ctx->kc_cpuid == CPU->cpu_id || 2157b885580bSAlexander Kolbasov ctx->kc_cpuid == -1) && curthread->t_preempt > 0); 2158b885580bSAlexander Kolbasov 2159b885580bSAlexander Kolbasov if (ctx == NULL || (ctx->kc_cpuid != CPU->cpu_id && 2160b885580bSAlexander Kolbasov ctx->kc_cpuid != -1) || curthread->t_preempt < 1 || 2161b885580bSAlexander Kolbasov (ctx->kc_flags & KCPC_CTX_INVALID_STOPPED) != 0) { 2162b885580bSAlexander Kolbasov return; 2163b885580bSAlexander Kolbasov } 2164b885580bSAlexander Kolbasov 2165b885580bSAlexander Kolbasov /* 2166b885580bSAlexander Kolbasov * Specified CPC context to be unprogrammed should be bound to current 2167b885580bSAlexander Kolbasov * CPU or thread 2168b885580bSAlexander Kolbasov */ 2169b885580bSAlexander Kolbasov ASSERT(CPU->cpu_cpc_ctx == ctx || curthread->t_cpc_ctx == ctx); 2170b885580bSAlexander Kolbasov 2171b885580bSAlexander Kolbasov /* 2172b885580bSAlexander Kolbasov * Stop counters 2173b885580bSAlexander Kolbasov */ 2174b885580bSAlexander Kolbasov pcbe_ops->pcbe_allstop(); 2175b885580bSAlexander Kolbasov KCPC_CTX_FLAG_SET(ctx, KCPC_CTX_INVALID_STOPPED); 2176b885580bSAlexander Kolbasov 2177b885580bSAlexander Kolbasov /* 2178b885580bSAlexander Kolbasov * Allow kernel to interpose on counters and program them for its own 2179b885580bSAlexander Kolbasov * use to measure hardware capacity and utilization if cu_interpose 2180b885580bSAlexander Kolbasov * argument is true 2181b885580bSAlexander Kolbasov */ 2182b885580bSAlexander Kolbasov if (cu_interpose == B_TRUE) 2183b885580bSAlexander Kolbasov cu_cpc_program(CPU, &error); 2184b885580bSAlexander Kolbasov } 2185b885580bSAlexander Kolbasov 2186b885580bSAlexander Kolbasov /* 2187b885580bSAlexander Kolbasov * Read CPU Performance Counter (CPC) on current CPU and call specified update 2188b885580bSAlexander Kolbasov * routine with data for each counter event currently programmed on CPU 2189b885580bSAlexander Kolbasov */ 2190b885580bSAlexander Kolbasov int 2191b885580bSAlexander Kolbasov kcpc_read(kcpc_update_func_t update_func) 2192b885580bSAlexander Kolbasov { 2193b885580bSAlexander Kolbasov kcpc_ctx_t *ctx; 2194b885580bSAlexander Kolbasov int i; 2195b885580bSAlexander Kolbasov kcpc_request_t *req; 2196b885580bSAlexander Kolbasov int retval; 2197b885580bSAlexander Kolbasov kcpc_set_t *set; 2198b885580bSAlexander Kolbasov 2199b885580bSAlexander Kolbasov ASSERT(IS_HIPIL()); 2200b885580bSAlexander Kolbasov 2201b885580bSAlexander Kolbasov /* 2202b885580bSAlexander Kolbasov * Can't grab locks or block because may be called inside dispatcher 2203b885580bSAlexander Kolbasov */ 2204b885580bSAlexander Kolbasov kpreempt_disable(); 2205b885580bSAlexander Kolbasov 2206b885580bSAlexander Kolbasov ctx = CPU->cpu_cpc_ctx; 2207b885580bSAlexander Kolbasov if (ctx == NULL) { 2208b885580bSAlexander Kolbasov kpreempt_enable(); 2209b885580bSAlexander Kolbasov return (0); 2210b885580bSAlexander Kolbasov } 2211b885580bSAlexander Kolbasov 2212b885580bSAlexander Kolbasov /* 2213b885580bSAlexander Kolbasov * Read counter data from current CPU 2214b885580bSAlexander Kolbasov */ 2215b885580bSAlexander Kolbasov pcbe_ops->pcbe_sample(ctx); 2216b885580bSAlexander Kolbasov 2217b885580bSAlexander Kolbasov set = ctx->kc_set; 2218b885580bSAlexander Kolbasov if (set == NULL || set->ks_req == NULL) { 2219b885580bSAlexander Kolbasov kpreempt_enable(); 2220b885580bSAlexander Kolbasov return (0); 2221b885580bSAlexander Kolbasov } 2222b885580bSAlexander Kolbasov 2223b885580bSAlexander Kolbasov /* 2224b885580bSAlexander Kolbasov * Call update function with preset pointer and data for each CPC event 2225b885580bSAlexander Kolbasov * request currently programmed on current CPU 2226b885580bSAlexander Kolbasov */ 2227b885580bSAlexander Kolbasov req = set->ks_req; 2228b885580bSAlexander Kolbasov retval = 0; 2229b885580bSAlexander Kolbasov for (i = 0; i < set->ks_nreqs; i++) { 2230b885580bSAlexander Kolbasov int ret; 2231b885580bSAlexander Kolbasov 2232b885580bSAlexander Kolbasov if (req[i].kr_data == NULL) 2233b885580bSAlexander Kolbasov break; 2234b885580bSAlexander Kolbasov 2235b885580bSAlexander Kolbasov ret = update_func(req[i].kr_ptr, *req[i].kr_data); 2236b885580bSAlexander Kolbasov if (ret < 0) 2237b885580bSAlexander Kolbasov retval = ret; 2238b885580bSAlexander Kolbasov } 2239b885580bSAlexander Kolbasov 2240b885580bSAlexander Kolbasov kpreempt_enable(); 2241b885580bSAlexander Kolbasov 2242b885580bSAlexander Kolbasov return (retval); 2243b885580bSAlexander Kolbasov } 2244b885580bSAlexander Kolbasov 2245b885580bSAlexander Kolbasov /* 2246b885580bSAlexander Kolbasov * Initialize list of counter event requests 2247b885580bSAlexander Kolbasov */ 2248b885580bSAlexander Kolbasov kcpc_request_list_t * 2249b885580bSAlexander Kolbasov kcpc_reqs_init(int nreqs, int kmem_flags) 2250b885580bSAlexander Kolbasov { 2251b885580bSAlexander Kolbasov kcpc_request_list_t *req_list; 2252b885580bSAlexander Kolbasov kcpc_request_t *reqs; 2253b885580bSAlexander Kolbasov 2254b885580bSAlexander Kolbasov if (nreqs < 1) 2255b885580bSAlexander Kolbasov return (NULL); 2256b885580bSAlexander Kolbasov 2257b885580bSAlexander Kolbasov req_list = kmem_zalloc(sizeof (kcpc_request_list_t), kmem_flags); 2258b885580bSAlexander Kolbasov if (req_list == NULL) 2259b885580bSAlexander Kolbasov return (NULL); 2260b885580bSAlexander Kolbasov 2261b885580bSAlexander Kolbasov reqs = kmem_zalloc(nreqs * sizeof (kcpc_request_t), kmem_flags); 2262b885580bSAlexander Kolbasov if (reqs == NULL) { 2263b885580bSAlexander Kolbasov kmem_free(req_list, sizeof (kcpc_request_list_t)); 2264b885580bSAlexander Kolbasov return (NULL); 2265b885580bSAlexander Kolbasov } 2266b885580bSAlexander Kolbasov 2267b885580bSAlexander Kolbasov req_list->krl_list = reqs; 2268b885580bSAlexander Kolbasov req_list->krl_cnt = 0; 2269b885580bSAlexander Kolbasov req_list->krl_max = nreqs; 2270b885580bSAlexander Kolbasov return (req_list); 2271b885580bSAlexander Kolbasov } 2272b885580bSAlexander Kolbasov 2273b885580bSAlexander Kolbasov 2274b885580bSAlexander Kolbasov /* 2275b885580bSAlexander Kolbasov * Add counter event request to given list of counter event requests 2276b885580bSAlexander Kolbasov */ 2277b885580bSAlexander Kolbasov int 2278b885580bSAlexander Kolbasov kcpc_reqs_add(kcpc_request_list_t *req_list, char *event, uint64_t preset, 2279b885580bSAlexander Kolbasov uint_t flags, uint_t nattrs, kcpc_attr_t *attr, void *ptr, int kmem_flags) 2280b885580bSAlexander Kolbasov { 2281b885580bSAlexander Kolbasov kcpc_request_t *req; 2282b885580bSAlexander Kolbasov 2283b885580bSAlexander Kolbasov if (req_list == NULL || req_list->krl_list == NULL) 2284b885580bSAlexander Kolbasov return (-1); 2285b885580bSAlexander Kolbasov 228696992ee7SEthindra Ramamurthy ASSERT(req_list->krl_max != 0); 228796992ee7SEthindra Ramamurthy 2288b885580bSAlexander Kolbasov /* 2289b885580bSAlexander Kolbasov * Allocate more space (if needed) 2290b885580bSAlexander Kolbasov */ 2291b885580bSAlexander Kolbasov if (req_list->krl_cnt > req_list->krl_max) { 2292b885580bSAlexander Kolbasov kcpc_request_t *new; 2293b885580bSAlexander Kolbasov kcpc_request_t *old; 2294b885580bSAlexander Kolbasov 2295b885580bSAlexander Kolbasov old = req_list->krl_list; 2296b885580bSAlexander Kolbasov new = kmem_zalloc((req_list->krl_max + 2297b885580bSAlexander Kolbasov cpc_ncounters) * sizeof (kcpc_request_t), kmem_flags); 2298b885580bSAlexander Kolbasov if (new == NULL) 2299b885580bSAlexander Kolbasov return (-2); 2300b885580bSAlexander Kolbasov 2301b885580bSAlexander Kolbasov req_list->krl_list = new; 2302b885580bSAlexander Kolbasov bcopy(old, req_list->krl_list, 2303b885580bSAlexander Kolbasov req_list->krl_cnt * sizeof (kcpc_request_t)); 2304b885580bSAlexander Kolbasov kmem_free(old, req_list->krl_max * sizeof (kcpc_request_t)); 2305b885580bSAlexander Kolbasov req_list->krl_cnt = 0; 2306b885580bSAlexander Kolbasov req_list->krl_max += cpc_ncounters; 2307b885580bSAlexander Kolbasov } 2308b885580bSAlexander Kolbasov 2309b885580bSAlexander Kolbasov /* 2310b885580bSAlexander Kolbasov * Fill in request as much as possible now, but some fields will need 2311b885580bSAlexander Kolbasov * to be set when request is assigned to a set. 2312b885580bSAlexander Kolbasov */ 2313b885580bSAlexander Kolbasov req = &req_list->krl_list[req_list->krl_cnt]; 2314b885580bSAlexander Kolbasov req->kr_config = NULL; 2315b885580bSAlexander Kolbasov req->kr_picnum = -1; /* have CPC pick this */ 2316b885580bSAlexander Kolbasov req->kr_index = -1; /* set when assigning request to set */ 2317b885580bSAlexander Kolbasov req->kr_data = NULL; /* set when configuring request */ 2318b885580bSAlexander Kolbasov (void) strcpy(req->kr_event, event); 2319b885580bSAlexander Kolbasov req->kr_preset = preset; 2320b885580bSAlexander Kolbasov req->kr_flags = flags; 2321b885580bSAlexander Kolbasov req->kr_nattrs = nattrs; 2322b885580bSAlexander Kolbasov req->kr_attr = attr; 2323b885580bSAlexander Kolbasov /* 2324b885580bSAlexander Kolbasov * Keep pointer given by caller to give to update function when this 2325b885580bSAlexander Kolbasov * counter event is sampled/read 2326b885580bSAlexander Kolbasov */ 2327b885580bSAlexander Kolbasov req->kr_ptr = ptr; 2328b885580bSAlexander Kolbasov 2329b885580bSAlexander Kolbasov req_list->krl_cnt++; 2330b885580bSAlexander Kolbasov 2331b885580bSAlexander Kolbasov return (0); 2332b885580bSAlexander Kolbasov } 2333b885580bSAlexander Kolbasov 2334b885580bSAlexander Kolbasov /* 2335b885580bSAlexander Kolbasov * Reset list of CPC event requests so its space can be used for another set 2336b885580bSAlexander Kolbasov * of requests 2337b885580bSAlexander Kolbasov */ 2338b885580bSAlexander Kolbasov int 2339b885580bSAlexander Kolbasov kcpc_reqs_reset(kcpc_request_list_t *req_list) 2340b885580bSAlexander Kolbasov { 2341b885580bSAlexander Kolbasov /* 2342b885580bSAlexander Kolbasov * Return when pointer to request list structure or request is NULL or 2343b885580bSAlexander Kolbasov * when max requests is less than or equal to 0 2344b885580bSAlexander Kolbasov */ 2345b885580bSAlexander Kolbasov if (req_list == NULL || req_list->krl_list == NULL || 2346b885580bSAlexander Kolbasov req_list->krl_max <= 0) 2347b885580bSAlexander Kolbasov return (-1); 2348b885580bSAlexander Kolbasov 2349b885580bSAlexander Kolbasov /* 2350b885580bSAlexander Kolbasov * Zero out requests and number of requests used 2351b885580bSAlexander Kolbasov */ 2352b885580bSAlexander Kolbasov bzero(req_list->krl_list, req_list->krl_max * sizeof (kcpc_request_t)); 2353b885580bSAlexander Kolbasov req_list->krl_cnt = 0; 2354b885580bSAlexander Kolbasov return (0); 2355b885580bSAlexander Kolbasov } 2356b885580bSAlexander Kolbasov 2357b885580bSAlexander Kolbasov /* 2358b885580bSAlexander Kolbasov * Free given list of counter event requests 2359b885580bSAlexander Kolbasov */ 2360b885580bSAlexander Kolbasov int 2361b885580bSAlexander Kolbasov kcpc_reqs_fini(kcpc_request_list_t *req_list) 2362b885580bSAlexander Kolbasov { 2363b885580bSAlexander Kolbasov kmem_free(req_list->krl_list, 2364b885580bSAlexander Kolbasov req_list->krl_max * sizeof (kcpc_request_t)); 2365b885580bSAlexander Kolbasov kmem_free(req_list, sizeof (kcpc_request_list_t)); 2366b885580bSAlexander Kolbasov return (0); 2367b885580bSAlexander Kolbasov } 2368b885580bSAlexander Kolbasov 2369b885580bSAlexander Kolbasov /* 2370b885580bSAlexander Kolbasov * Create set of given counter event requests 2371b885580bSAlexander Kolbasov */ 2372b885580bSAlexander Kolbasov static kcpc_set_t * 2373b885580bSAlexander Kolbasov kcpc_set_create(kcpc_request_t *reqs, int nreqs, int set_flags, int kmem_flags) 2374b885580bSAlexander Kolbasov { 2375b885580bSAlexander Kolbasov int i; 2376b885580bSAlexander Kolbasov kcpc_set_t *set; 2377b885580bSAlexander Kolbasov 2378b885580bSAlexander Kolbasov /* 2379b885580bSAlexander Kolbasov * Allocate set and assign number of requests in set and flags 2380b885580bSAlexander Kolbasov */ 2381b885580bSAlexander Kolbasov set = kmem_zalloc(sizeof (kcpc_set_t), kmem_flags); 2382b885580bSAlexander Kolbasov if (set == NULL) 2383b885580bSAlexander Kolbasov return (NULL); 2384b885580bSAlexander Kolbasov 2385b885580bSAlexander Kolbasov if (nreqs < cpc_ncounters) 2386b885580bSAlexander Kolbasov set->ks_nreqs = nreqs; 2387b885580bSAlexander Kolbasov else 2388b885580bSAlexander Kolbasov set->ks_nreqs = cpc_ncounters; 2389b885580bSAlexander Kolbasov 2390b885580bSAlexander Kolbasov set->ks_flags = set_flags; 2391b885580bSAlexander Kolbasov 2392b885580bSAlexander Kolbasov /* 2393b885580bSAlexander Kolbasov * Allocate requests needed, copy requests into set, and set index into 2394b885580bSAlexander Kolbasov * data for each request (which may change when we assign requested 2395b885580bSAlexander Kolbasov * counter events to counters) 2396b885580bSAlexander Kolbasov */ 2397b885580bSAlexander Kolbasov set->ks_req = (kcpc_request_t *)kmem_zalloc(sizeof (kcpc_request_t) * 2398b885580bSAlexander Kolbasov set->ks_nreqs, kmem_flags); 2399b885580bSAlexander Kolbasov if (set->ks_req == NULL) { 2400b885580bSAlexander Kolbasov kmem_free(set, sizeof (kcpc_set_t)); 2401b885580bSAlexander Kolbasov return (NULL); 2402b885580bSAlexander Kolbasov } 2403b885580bSAlexander Kolbasov 2404b885580bSAlexander Kolbasov bcopy(reqs, set->ks_req, sizeof (kcpc_request_t) * set->ks_nreqs); 2405b885580bSAlexander Kolbasov 2406b885580bSAlexander Kolbasov for (i = 0; i < set->ks_nreqs; i++) 2407b885580bSAlexander Kolbasov set->ks_req[i].kr_index = i; 2408b885580bSAlexander Kolbasov 2409b885580bSAlexander Kolbasov return (set); 2410b885580bSAlexander Kolbasov } 2411b885580bSAlexander Kolbasov 2412b885580bSAlexander Kolbasov 2413b885580bSAlexander Kolbasov /* 2414b885580bSAlexander Kolbasov * Stop counters on current CPU. 2415b885580bSAlexander Kolbasov * 2416b885580bSAlexander Kolbasov * If preserve_context is true, the caller is interested in the CPU's CPC 2417b885580bSAlexander Kolbasov * context and wants it to be preserved. 2418b885580bSAlexander Kolbasov * 2419b885580bSAlexander Kolbasov * If preserve_context is false, the caller does not need the CPU's CPC context 2420b885580bSAlexander Kolbasov * to be preserved, so it is set to NULL. 2421b885580bSAlexander Kolbasov */ 2422b885580bSAlexander Kolbasov static void 2423b885580bSAlexander Kolbasov kcpc_cpustop_func(boolean_t preserve_context) 2424b885580bSAlexander Kolbasov { 2425b885580bSAlexander Kolbasov kpreempt_disable(); 2426b885580bSAlexander Kolbasov 2427b885580bSAlexander Kolbasov /* 2428b885580bSAlexander Kolbasov * Someone already stopped this context before us, so there is nothing 2429b885580bSAlexander Kolbasov * to do. 2430b885580bSAlexander Kolbasov */ 2431b885580bSAlexander Kolbasov if (CPU->cpu_cpc_ctx == NULL) { 2432b885580bSAlexander Kolbasov kpreempt_enable(); 2433b885580bSAlexander Kolbasov return; 2434b885580bSAlexander Kolbasov } 2435b885580bSAlexander Kolbasov 2436b885580bSAlexander Kolbasov kcpc_unprogram(CPU->cpu_cpc_ctx, B_TRUE); 2437b885580bSAlexander Kolbasov /* 2438b885580bSAlexander Kolbasov * If CU does not use counters, then clear the CPU's CPC context 2439b885580bSAlexander Kolbasov * If the caller requested to preserve context it should disable CU 2440b885580bSAlexander Kolbasov * first, so there should be no CU context now. 2441b885580bSAlexander Kolbasov */ 2442b885580bSAlexander Kolbasov ASSERT(!preserve_context || !CU_CPC_ON(CPU)); 2443b885580bSAlexander Kolbasov if (!preserve_context && CPU->cpu_cpc_ctx != NULL && !CU_CPC_ON(CPU)) 2444b885580bSAlexander Kolbasov CPU->cpu_cpc_ctx = NULL; 2445b885580bSAlexander Kolbasov 2446b885580bSAlexander Kolbasov kpreempt_enable(); 2447b885580bSAlexander Kolbasov } 2448b885580bSAlexander Kolbasov 2449b885580bSAlexander Kolbasov /* 2450b885580bSAlexander Kolbasov * Stop counters on given CPU and set its CPC context to NULL unless 2451b885580bSAlexander Kolbasov * preserve_context is true. 2452b885580bSAlexander Kolbasov */ 2453b885580bSAlexander Kolbasov void 2454b885580bSAlexander Kolbasov kcpc_cpu_stop(cpu_t *cp, boolean_t preserve_context) 2455b885580bSAlexander Kolbasov { 2456b885580bSAlexander Kolbasov cpu_call(cp, (cpu_call_func_t)kcpc_cpustop_func, 2457b885580bSAlexander Kolbasov preserve_context, 0); 2458b885580bSAlexander Kolbasov } 2459b885580bSAlexander Kolbasov 2460b885580bSAlexander Kolbasov /* 2461b885580bSAlexander Kolbasov * Program the context on the current CPU 2462b885580bSAlexander Kolbasov */ 2463b885580bSAlexander Kolbasov static void 2464b885580bSAlexander Kolbasov kcpc_remoteprogram_func(kcpc_ctx_t *ctx, uintptr_t arg) 2465b885580bSAlexander Kolbasov { 2466b885580bSAlexander Kolbasov boolean_t for_thread = (boolean_t)arg; 2467b885580bSAlexander Kolbasov 2468b885580bSAlexander Kolbasov ASSERT(ctx != NULL); 2469b885580bSAlexander Kolbasov 2470b885580bSAlexander Kolbasov kpreempt_disable(); 2471b885580bSAlexander Kolbasov kcpc_program(ctx, for_thread, B_TRUE); 2472b885580bSAlexander Kolbasov kpreempt_enable(); 2473b885580bSAlexander Kolbasov } 2474b885580bSAlexander Kolbasov 2475b885580bSAlexander Kolbasov /* 2476b885580bSAlexander Kolbasov * Program counters on given CPU 2477b885580bSAlexander Kolbasov */ 2478b885580bSAlexander Kolbasov void 2479b885580bSAlexander Kolbasov kcpc_cpu_program(cpu_t *cp, kcpc_ctx_t *ctx) 2480b885580bSAlexander Kolbasov { 2481b885580bSAlexander Kolbasov cpu_call(cp, (cpu_call_func_t)kcpc_remoteprogram_func, (uintptr_t)ctx, 2482b885580bSAlexander Kolbasov (uintptr_t)B_FALSE); 2483b885580bSAlexander Kolbasov } 2484b885580bSAlexander Kolbasov 2485b9e93c10SJonathan Haslam char * 2486b9e93c10SJonathan Haslam kcpc_list_attrs(void) 2487b9e93c10SJonathan Haslam { 2488b9e93c10SJonathan Haslam ASSERT(pcbe_ops != NULL); 2489b9e93c10SJonathan Haslam 2490b9e93c10SJonathan Haslam return (pcbe_ops->pcbe_list_attrs()); 2491b9e93c10SJonathan Haslam } 2492b9e93c10SJonathan Haslam 2493b9e93c10SJonathan Haslam char * 2494b9e93c10SJonathan Haslam kcpc_list_events(uint_t pic) 2495b9e93c10SJonathan Haslam { 2496b9e93c10SJonathan Haslam ASSERT(pcbe_ops != NULL); 2497b9e93c10SJonathan Haslam 2498b9e93c10SJonathan Haslam return (pcbe_ops->pcbe_list_events(pic)); 2499b9e93c10SJonathan Haslam } 2500b9e93c10SJonathan Haslam 2501b9e93c10SJonathan Haslam uint_t 2502b9e93c10SJonathan Haslam kcpc_pcbe_capabilities(void) 2503b9e93c10SJonathan Haslam { 2504b9e93c10SJonathan Haslam ASSERT(pcbe_ops != NULL); 2505b9e93c10SJonathan Haslam 2506b9e93c10SJonathan Haslam return (pcbe_ops->pcbe_caps); 2507b9e93c10SJonathan Haslam } 2508b9e93c10SJonathan Haslam 2509b9e93c10SJonathan Haslam int 2510b9e93c10SJonathan Haslam kcpc_pcbe_loaded(void) 2511b9e93c10SJonathan Haslam { 2512b9e93c10SJonathan Haslam return (pcbe_ops == NULL ? -1 : 0); 2513b9e93c10SJonathan Haslam } 2514