1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 /* 23 * Copyright 1998-2003 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 #include <sys/types.h> 30 #include <sys/kmem.h> 31 #include <sys/mutex.h> 32 #include <sys/cpuvar.h> 33 #include <sys/cmn_err.h> 34 #include <sys/systm.h> 35 #include <sys/ddi.h> 36 #include <sys/sunddi.h> 37 #include <sys/debug.h> 38 #include <sys/param.h> 39 #include <sys/atomic.h> 40 #include <sys/ftrace.h> 41 42 /* 43 * Tunable parameters: 44 * 45 * ftrace_atboot - whether to start fast tracing at boot. 46 * ftrace_nent - size of the per-CPU event ring buffer. 47 */ 48 int ftrace_atboot = 0; 49 int ftrace_nent = FTRACE_NENT; 50 51 /* 52 * The current overall state of the ftrace subsystem. 53 * If FTRACE_READY is set, then tracing can be enabled. 54 * If FTRACE_ENABLED is set, tracing is enabled on the set of CPUs 55 * which are currently FTRACE_READY. 56 */ 57 static int ftrace_state = 0; 58 59 /* 60 * Protects assignments to: 61 * ftrace_state 62 * cpu[N]->cpu_ftrace.ftd_state 63 * cpu[N]->cpu_ftrace.ftd_cur 64 * cpu[N]->cpu_ftrace.ftd_first 65 * cpu[N]->cpu_ftrace.ftd_last 66 * Does _not_ protect readers of cpu[N]->cpu_ftrace.ftd_state. 67 * Does not protect reading the FTRACE_READY bit in ftrace_state, 68 * since non-READY to READY is a stable transition. This is used 69 * to ensure ftrace_init() has been called. 70 */ 71 static kmutex_t ftrace_lock; 72 73 /* 74 * Check whether a CPU is installed. 75 */ 76 #define IS_CPU(i) (cpu[i] != NULL) 77 78 static void 79 ftrace_cpu_init(int cpuid) 80 { 81 ftrace_data_t *ftd; 82 83 /* 84 * This can be called with "cpu[cpuid]->cpu_flags & CPU_EXISTS" 85 * being false - e.g. when a CPU is DR'ed in. 86 */ 87 ASSERT(MUTEX_HELD(&ftrace_lock)); 88 ASSERT(IS_CPU(cpuid)); 89 90 ftd = &cpu[cpuid]->cpu_ftrace; 91 if (ftd->ftd_state & FTRACE_READY) 92 return; 93 94 /* 95 * We don't allocate the buffers until the first time 96 * ftrace_cpu_start() is called, so that they're not 97 * allocated if ftrace is never enabled. 98 */ 99 ftd->ftd_state |= FTRACE_READY; 100 ASSERT(!(ftd->ftd_state & FTRACE_ENABLED)); 101 } 102 103 /* 104 * Only called from cpu_unconfigure() (and cpu_configure() on error). 105 * At this point, cpu[cpuid] is about to be freed and NULLed out, 106 * so we'd better clean up after ourselves. 107 */ 108 static void 109 ftrace_cpu_fini(int cpuid) 110 { 111 ftrace_data_t *ftd; 112 113 ASSERT(MUTEX_HELD(&ftrace_lock)); 114 ASSERT(IS_CPU(cpuid)); 115 ASSERT((cpu[cpuid]->cpu_flags & CPU_POWEROFF) != 0); 116 117 ftd = &cpu[cpuid]->cpu_ftrace; 118 if (!(ftd->ftd_state & FTRACE_READY)) 119 return; 120 121 /* 122 * Do not free mutex and the the trace buffer once they are 123 * allocated. A thread, preempted from the now powered-off CPU 124 * may be holding the mutex and in the middle of adding a trace 125 * record. 126 */ 127 } 128 129 static void 130 ftrace_cpu_start(int cpuid) 131 { 132 ftrace_data_t *ftd; 133 134 ASSERT(MUTEX_HELD(&ftrace_lock)); 135 ASSERT(IS_CPU(cpuid)); 136 ASSERT(ftrace_state & FTRACE_ENABLED); 137 138 ftd = &cpu[cpuid]->cpu_ftrace; 139 if (ftd->ftd_state & FTRACE_READY) { 140 if (ftd->ftd_first == NULL) { 141 ftrace_record_t *ptrs; 142 143 mutex_init(&ftd->ftd_mutex, NULL, MUTEX_DEFAULT, NULL); 144 mutex_exit(&ftrace_lock); 145 ptrs = kmem_zalloc(ftrace_nent * 146 sizeof (ftrace_record_t), KM_SLEEP); 147 mutex_enter(&ftrace_lock); 148 149 ftd->ftd_first = ptrs; 150 ftd->ftd_last = ptrs + (ftrace_nent - 1); 151 ftd->ftd_cur = ptrs; 152 membar_producer(); 153 } 154 ftd->ftd_state |= FTRACE_ENABLED; 155 } 156 } 157 158 static void 159 ftrace_cpu_stop(int cpuid) 160 { 161 ASSERT(MUTEX_HELD(&ftrace_lock)); 162 ASSERT(IS_CPU(cpuid)); 163 cpu[cpuid]->cpu_ftrace.ftd_state &= ~(FTRACE_ENABLED); 164 } 165 166 /* 167 * Hook for DR. 168 */ 169 /*ARGSUSED*/ 170 int 171 ftrace_cpu_setup(cpu_setup_t what, int id, void *arg) 172 { 173 if (!(ftrace_state & FTRACE_READY)) 174 return (0); 175 176 switch (what) { 177 case CPU_CONFIG: 178 mutex_enter(&ftrace_lock); 179 ftrace_cpu_init(id); 180 if (ftrace_state & FTRACE_ENABLED) 181 ftrace_cpu_start(id); 182 mutex_exit(&ftrace_lock); 183 break; 184 185 case CPU_UNCONFIG: 186 mutex_enter(&ftrace_lock); 187 ftrace_cpu_fini(id); 188 mutex_exit(&ftrace_lock); 189 break; 190 191 default: 192 break; 193 } 194 return (0); 195 } 196 197 void 198 ftrace_init(void) 199 { 200 int i; 201 202 ASSERT(!(ftrace_state & FTRACE_READY)); 203 mutex_init(&ftrace_lock, NULL, MUTEX_DEFAULT, NULL); 204 205 mutex_enter(&ftrace_lock); 206 for (i = 0; i < NCPU; i++) { 207 if (IS_CPU(i)) { 208 /* should have been kmem_zalloc()'ed */ 209 ASSERT(cpu[i]->cpu_ftrace.ftd_state == 0); 210 ASSERT(cpu[i]->cpu_ftrace.ftd_first == NULL); 211 ASSERT(cpu[i]->cpu_ftrace.ftd_last == NULL); 212 ASSERT(cpu[i]->cpu_ftrace.ftd_cur == NULL); 213 } 214 } 215 216 if (ftrace_nent < 1) { 217 mutex_exit(&ftrace_lock); 218 return; 219 } 220 221 for (i = 0; i < NCPU; i++) 222 if (IS_CPU(i)) 223 ftrace_cpu_init(i); 224 225 ftrace_state |= FTRACE_READY; 226 mutex_enter(&cpu_lock); 227 register_cpu_setup_func(ftrace_cpu_setup, NULL); 228 mutex_exit(&cpu_lock); 229 mutex_exit(&ftrace_lock); 230 231 if (ftrace_atboot) 232 (void) ftrace_start(); 233 } 234 235 int 236 ftrace_start(void) 237 { 238 int i, was_enabled = 0; 239 240 if (ftrace_state & FTRACE_READY) { 241 mutex_enter(&ftrace_lock); 242 was_enabled = ((ftrace_state & FTRACE_ENABLED) != 0); 243 ftrace_state |= FTRACE_ENABLED; 244 for (i = 0; i < NCPU; i++) 245 if (IS_CPU(i)) 246 ftrace_cpu_start(i); 247 mutex_exit(&ftrace_lock); 248 } 249 250 return (was_enabled); 251 } 252 253 int 254 ftrace_stop(void) 255 { 256 int i, was_enabled = 0; 257 258 if (ftrace_state & FTRACE_READY) { 259 mutex_enter(&ftrace_lock); 260 if (ftrace_state & FTRACE_ENABLED) { 261 was_enabled = 1; 262 for (i = 0; i < NCPU; i++) 263 if (IS_CPU(i)) 264 ftrace_cpu_stop(i); 265 ftrace_state &= ~(FTRACE_ENABLED); 266 } 267 mutex_exit(&ftrace_lock); 268 } 269 return (was_enabled); 270 } 271 272 void 273 ftrace_0(char *str) 274 { 275 ftrace_record_t *r; 276 struct cpu *cp = CPU; 277 ftrace_data_t *ftd = &cp->cpu_ftrace; 278 279 if (mutex_tryenter(&ftd->ftd_mutex) == 0) { 280 if (CPU_ON_INTR(cp)) 281 return; 282 else 283 mutex_enter(&ftd->ftd_mutex); 284 } 285 r = ftd->ftd_cur; 286 r->ftr_event = str; 287 r->ftr_thread = curthread; 288 r->ftr_tick = gethrtime_unscaled(); 289 r->ftr_caller = caller(); 290 291 if (r++ == ftd->ftd_last) 292 r = ftd->ftd_first; 293 ftd->ftd_cur = r; 294 mutex_exit(&ftd->ftd_mutex); 295 } 296 297 void 298 ftrace_1(char *str, ulong_t arg1) 299 { 300 ftrace_record_t *r; 301 struct cpu *cp = CPU; 302 ftrace_data_t *ftd = &cp->cpu_ftrace; 303 304 if (mutex_tryenter(&ftd->ftd_mutex) == 0) { 305 if (CPU_ON_INTR(cp)) 306 return; 307 else 308 mutex_enter(&ftd->ftd_mutex); 309 } 310 r = ftd->ftd_cur; 311 r->ftr_event = str; 312 r->ftr_thread = curthread; 313 r->ftr_tick = gethrtime_unscaled(); 314 r->ftr_caller = caller(); 315 r->ftr_data1 = arg1; 316 317 if (r++ == ftd->ftd_last) 318 r = ftd->ftd_first; 319 ftd->ftd_cur = r; 320 mutex_exit(&ftd->ftd_mutex); 321 } 322 323 void 324 ftrace_2(char *str, ulong_t arg1, ulong_t arg2) 325 { 326 ftrace_record_t *r; 327 struct cpu *cp = CPU; 328 ftrace_data_t *ftd = &cp->cpu_ftrace; 329 330 if (mutex_tryenter(&ftd->ftd_mutex) == 0) { 331 if (CPU_ON_INTR(cp)) 332 return; 333 else 334 mutex_enter(&ftd->ftd_mutex); 335 } 336 r = ftd->ftd_cur; 337 r->ftr_event = str; 338 r->ftr_thread = curthread; 339 r->ftr_tick = gethrtime_unscaled(); 340 r->ftr_caller = caller(); 341 r->ftr_data1 = arg1; 342 r->ftr_data2 = arg2; 343 344 if (r++ == ftd->ftd_last) 345 r = ftd->ftd_first; 346 ftd->ftd_cur = r; 347 mutex_exit(&ftd->ftd_mutex); 348 } 349 350 void 351 ftrace_3(char *str, ulong_t arg1, ulong_t arg2, ulong_t arg3) 352 { 353 ftrace_record_t *r; 354 struct cpu *cp = CPU; 355 ftrace_data_t *ftd = &cp->cpu_ftrace; 356 357 if (mutex_tryenter(&ftd->ftd_mutex) == 0) { 358 if (CPU_ON_INTR(cp)) 359 return; 360 else 361 mutex_enter(&ftd->ftd_mutex); 362 } 363 r = ftd->ftd_cur; 364 r->ftr_event = str; 365 r->ftr_thread = curthread; 366 r->ftr_tick = gethrtime_unscaled(); 367 r->ftr_caller = caller(); 368 r->ftr_data1 = arg1; 369 r->ftr_data2 = arg2; 370 r->ftr_data3 = arg3; 371 372 if (r++ == ftd->ftd_last) 373 r = ftd->ftd_first; 374 ftd->ftd_cur = r; 375 mutex_exit(&ftd->ftd_mutex); 376 } 377 378 void 379 ftrace_3_notick(char *str, ulong_t arg1, ulong_t arg2, ulong_t arg3) 380 { 381 ftrace_record_t *r; 382 struct cpu *cp = CPU; 383 ftrace_data_t *ftd = &cp->cpu_ftrace; 384 385 if (mutex_tryenter(&ftd->ftd_mutex) == 0) { 386 if (CPU_ON_INTR(cp)) 387 return; 388 else 389 mutex_enter(&ftd->ftd_mutex); 390 } 391 r = ftd->ftd_cur; 392 r->ftr_event = str; 393 r->ftr_thread = curthread; 394 r->ftr_tick = 0; 395 r->ftr_caller = caller(); 396 r->ftr_data1 = arg1; 397 r->ftr_data2 = arg2; 398 r->ftr_data3 = arg3; 399 400 if (r++ == ftd->ftd_last) 401 r = ftd->ftd_first; 402 ftd->ftd_cur = r; 403 mutex_exit(&ftd->ftd_mutex); 404 } 405