1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #include <sys/types.h> 27 #include <sys/kmem.h> 28 #include <sys/mutex.h> 29 #include <sys/cpuvar.h> 30 #include <sys/cmn_err.h> 31 #include <sys/systm.h> 32 #include <sys/ddi.h> 33 #include <sys/sunddi.h> 34 #include <sys/debug.h> 35 #include <sys/param.h> 36 #include <sys/atomic.h> 37 #include <sys/ftrace.h> 38 39 /* 40 * Tunable parameters: 41 * 42 * ftrace_atboot - whether to start fast tracing at boot. 43 * ftrace_nent - size of the per-CPU event ring buffer. 44 */ 45 int ftrace_atboot = 0; 46 int ftrace_nent = FTRACE_NENT; 47 48 /* 49 * Global Tracing State: 50 * 51 * NOTREADY(=0) 52 * | 53 * ftrace_init() 54 * | 55 * | 56 * v 57 * +-------->READY-------+ 58 * | | 59 * ftrace_stop() ftrace_start() 60 * | | 61 * +---(ENABLED|READY)<--+ 62 * 63 * During boot, ftrace_init() is called and the state becomes 64 * READY. If ftrace_atboot is set, ftrace_start() is called at 65 * this time. 66 * 67 * If FTRACE_READY is set, then tracing can be enabled. 68 * If FTRACE_ENABLED is set, tracing is enabled on the set of CPUs 69 * which are currently FTRACE_READY. 70 */ 71 static int ftrace_state = 0; 72 73 /* 74 * Per-CPU Tracing State: 75 * 76 * +-----------------READY<--------------+ 77 * | ^ | | 78 * | | ftrace_cpu_fini() | 79 * | | | | 80 * | ftrace_cpu_init() | | 81 * | | v ftrace_cpu_stop() 82 * | NOTREADY(=0) | 83 * | ^ | 84 * ftrace_cpu_start() | | 85 * | ftrace_cpu_fini() | 86 * | | | 87 * +----------->(ENABLED|READY)----------+ 88 * 89 */ 90 91 /* 92 * Locking : 93 * 94 * Trace context code does not use any lock. There is a per-cpu circular trace 95 * buffer that has a head, a tail and a current pointer. Each record of this 96 * buffer is of equal length. Before doing anything, trace context code checks 97 * the per-cpu ENABLED bit. Trace buffer is allocated in non-trace context and 98 * it sets this bit only after allocating and setting up the buffer. So trace 99 * context code can't access the buffer till it is set up completely. The 100 * buffer is freed also in non-trace context. The code that frees the buffer is 101 * executed only after the corresponding cpu is powered off. So when this 102 * happens, no trace context code can be running on it. We only need to make 103 * sure that trace context code is not preempted from the cpu in the middle of 104 * accessing the trace buffer. This can be achieved simply by disabling 105 * interrupts temporarily. This approach makes the least assumption about the 106 * state of the callers of tracing functions. 107 * 108 * A single global lock, ftrace_lock protects assignments to all global and 109 * per-cpu trace variables. It does not protect reading of those in some cases. 110 * 111 * More specifically, it protects assignments to: 112 * 113 * ftrace_state 114 * cpu[N]->cpu_ftrace.ftd_state 115 * cpu[N]->cpu_ftrace.ftd_first 116 * cpu[N]->cpu_ftrace.ftd_last 117 * 118 * Does _not_ protect reading of cpu[N]->cpu_ftrace.ftd_state 119 * Does _not_ protect cpu[N]->cpu_ftrace.ftd_cur 120 * Does _not_ protect reading of ftrace_state 121 */ 122 static kmutex_t ftrace_lock; 123 124 /* 125 * Check whether a CPU is installed. 126 */ 127 #define IS_CPU(i) (cpu[i] != NULL) 128 129 static void 130 ftrace_cpu_init(int cpuid) 131 { 132 ftrace_data_t *ftd; 133 134 /* 135 * This can be called with "cpu[cpuid]->cpu_flags & CPU_EXISTS" 136 * being false - e.g. when a CPU is DR'ed in. 137 */ 138 ASSERT(MUTEX_HELD(&ftrace_lock)); 139 ASSERT(IS_CPU(cpuid)); 140 141 ftd = &cpu[cpuid]->cpu_ftrace; 142 if (ftd->ftd_state & FTRACE_READY) 143 return; 144 145 /* 146 * We don't allocate the buffers until the first time 147 * ftrace_cpu_start() is called, so that they're not 148 * allocated if ftrace is never enabled. 149 */ 150 ftd->ftd_state |= FTRACE_READY; 151 ASSERT(!(ftd->ftd_state & FTRACE_ENABLED)); 152 } 153 154 /* 155 * Only called from cpu_unconfigure() (and cpu_configure() on error). 156 * At this point, cpu[cpuid] is about to be freed and NULLed out, 157 * so we'd better clean up after ourselves. 158 */ 159 static void 160 ftrace_cpu_fini(int cpuid) 161 { 162 ftrace_data_t *ftd; 163 164 ASSERT(MUTEX_HELD(&ftrace_lock)); 165 ASSERT(IS_CPU(cpuid)); 166 ASSERT((cpu[cpuid]->cpu_flags & CPU_POWEROFF) != 0); 167 168 ftd = &cpu[cpuid]->cpu_ftrace; 169 if (!(ftd->ftd_state & FTRACE_READY)) 170 return; 171 172 /* 173 * This cpu is powered off and no code can be executing on it. So 174 * we can simply finish our cleanup. There is no need for a xcall 175 * to make sure that this cpu is out of trace context. 176 * 177 * The cpu structure will be cleared soon. But, for the sake of 178 * debugging, clear our pointers and state. 179 */ 180 if (ftd->ftd_first != NULL) { 181 kmem_free(ftd->ftd_first, 182 ftrace_nent * sizeof (ftrace_record_t)); 183 } 184 bzero(ftd, sizeof (ftrace_data_t)); 185 } 186 187 static void 188 ftrace_cpu_start(int cpuid) 189 { 190 ftrace_data_t *ftd; 191 192 ASSERT(MUTEX_HELD(&ftrace_lock)); 193 ASSERT(IS_CPU(cpuid)); 194 ASSERT(ftrace_state & FTRACE_ENABLED); 195 196 ftd = &cpu[cpuid]->cpu_ftrace; 197 if (ftd->ftd_state & FTRACE_READY) { 198 if (ftd->ftd_first == NULL) { 199 ftrace_record_t *ptrs; 200 201 mutex_exit(&ftrace_lock); 202 ptrs = kmem_zalloc(ftrace_nent * 203 sizeof (ftrace_record_t), KM_SLEEP); 204 mutex_enter(&ftrace_lock); 205 if (ftd->ftd_first != NULL) { 206 /* 207 * Someone else beat us to it. The winner will 208 * set up the pointers and the state. 209 */ 210 kmem_free(ptrs, 211 ftrace_nent * sizeof (ftrace_record_t)); 212 return; 213 } 214 215 ftd->ftd_first = ptrs; 216 ftd->ftd_last = ptrs + (ftrace_nent - 1); 217 ftd->ftd_cur = ptrs; 218 membar_producer(); 219 } 220 ftd->ftd_state |= FTRACE_ENABLED; 221 } 222 } 223 224 static void 225 ftrace_cpu_stop(int cpuid) 226 { 227 ASSERT(MUTEX_HELD(&ftrace_lock)); 228 ASSERT(IS_CPU(cpuid)); 229 cpu[cpuid]->cpu_ftrace.ftd_state &= ~(FTRACE_ENABLED); 230 } 231 232 /* 233 * Hook for DR. 234 */ 235 /*ARGSUSED*/ 236 int 237 ftrace_cpu_setup(cpu_setup_t what, int id, void *arg) 238 { 239 if (!(ftrace_state & FTRACE_READY)) 240 return (0); 241 242 switch (what) { 243 case CPU_CONFIG: 244 mutex_enter(&ftrace_lock); 245 ftrace_cpu_init(id); 246 if (ftrace_state & FTRACE_ENABLED) 247 ftrace_cpu_start(id); 248 mutex_exit(&ftrace_lock); 249 break; 250 251 case CPU_UNCONFIG: 252 mutex_enter(&ftrace_lock); 253 ftrace_cpu_fini(id); 254 mutex_exit(&ftrace_lock); 255 break; 256 257 default: 258 break; 259 } 260 return (0); 261 } 262 263 void 264 ftrace_init(void) 265 { 266 int i; 267 268 ASSERT(!(ftrace_state & FTRACE_READY)); 269 mutex_init(&ftrace_lock, NULL, MUTEX_DEFAULT, NULL); 270 271 mutex_enter(&ftrace_lock); 272 for (i = 0; i < NCPU; i++) { 273 if (IS_CPU(i)) { 274 /* should have been kmem_zalloc()'ed */ 275 ASSERT(cpu[i]->cpu_ftrace.ftd_state == 0); 276 ASSERT(cpu[i]->cpu_ftrace.ftd_first == NULL); 277 ASSERT(cpu[i]->cpu_ftrace.ftd_last == NULL); 278 ASSERT(cpu[i]->cpu_ftrace.ftd_cur == NULL); 279 } 280 } 281 282 if (ftrace_nent < 1) { 283 mutex_exit(&ftrace_lock); 284 return; 285 } 286 287 for (i = 0; i < NCPU; i++) 288 if (IS_CPU(i)) 289 ftrace_cpu_init(i); 290 291 ftrace_state |= FTRACE_READY; 292 mutex_enter(&cpu_lock); 293 register_cpu_setup_func(ftrace_cpu_setup, NULL); 294 mutex_exit(&cpu_lock); 295 mutex_exit(&ftrace_lock); 296 297 if (ftrace_atboot) 298 (void) ftrace_start(); 299 } 300 301 /* 302 * Called from uadmin ioctl, or via mp_init_table[] during boot. 303 */ 304 int 305 ftrace_start(void) 306 { 307 int i, was_enabled = 0; 308 309 if (ftrace_state & FTRACE_READY) { 310 mutex_enter(&ftrace_lock); 311 was_enabled = ((ftrace_state & FTRACE_ENABLED) != 0); 312 ftrace_state |= FTRACE_ENABLED; 313 for (i = 0; i < NCPU; i++) 314 if (IS_CPU(i)) 315 ftrace_cpu_start(i); 316 mutex_exit(&ftrace_lock); 317 } 318 319 return (was_enabled); 320 } 321 322 /* 323 * Called from uadmin ioctl, to stop tracing. 324 */ 325 int 326 ftrace_stop(void) 327 { 328 int i, was_enabled = 0; 329 330 if (ftrace_state & FTRACE_READY) { 331 mutex_enter(&ftrace_lock); 332 if (ftrace_state & FTRACE_ENABLED) { 333 was_enabled = 1; 334 for (i = 0; i < NCPU; i++) 335 if (IS_CPU(i)) 336 ftrace_cpu_stop(i); 337 ftrace_state &= ~(FTRACE_ENABLED); 338 } 339 mutex_exit(&ftrace_lock); 340 } 341 return (was_enabled); 342 } 343 344 /* 345 * ftrace_X() functions are called from trace context. All callers of ftrace_X() 346 * tests FTRACE_ENABLED first. Although this is not very accurate, it keeps the 347 * overhead very low when tracing is not enabled. 348 * 349 * gethrtime_unscaled() appears to be safe to be called in trace context. As an 350 * added precaution, we call these before we disable interrupts on this cpu. 351 */ 352 353 void 354 ftrace_0(char *str, caddr_t caller) 355 { 356 ftrace_record_t *r; 357 struct cpu *cp; 358 ftrace_data_t *ftd; 359 ftrace_icookie_t cookie; 360 hrtime_t timestamp; 361 362 timestamp = gethrtime_unscaled(); 363 364 cookie = ftrace_interrupt_disable(); 365 366 cp = CPU; 367 ftd = &cp->cpu_ftrace; 368 369 if (!(ftd->ftd_state & FTRACE_ENABLED)) { 370 ftrace_interrupt_enable(cookie); 371 return; 372 } 373 374 r = ftd->ftd_cur; 375 r->ftr_event = str; 376 r->ftr_thread = curthread; 377 r->ftr_tick = timestamp; 378 r->ftr_caller = caller; 379 380 if (r++ == ftd->ftd_last) 381 r = ftd->ftd_first; 382 ftd->ftd_cur = r; 383 384 ftrace_interrupt_enable(cookie); 385 } 386 387 void 388 ftrace_1(char *str, ulong_t arg1, caddr_t caller) 389 { 390 ftrace_record_t *r; 391 struct cpu *cp; 392 ftrace_data_t *ftd; 393 ftrace_icookie_t cookie; 394 hrtime_t timestamp; 395 396 timestamp = gethrtime_unscaled(); 397 398 cookie = ftrace_interrupt_disable(); 399 400 cp = CPU; 401 ftd = &cp->cpu_ftrace; 402 403 if (!(ftd->ftd_state & FTRACE_ENABLED)) { 404 ftrace_interrupt_enable(cookie); 405 return; 406 } 407 408 r = ftd->ftd_cur; 409 r->ftr_event = str; 410 r->ftr_thread = curthread; 411 r->ftr_tick = timestamp; 412 r->ftr_caller = caller; 413 r->ftr_data1 = arg1; 414 415 if (r++ == ftd->ftd_last) 416 r = ftd->ftd_first; 417 ftd->ftd_cur = r; 418 419 ftrace_interrupt_enable(cookie); 420 } 421 422 void 423 ftrace_2(char *str, ulong_t arg1, ulong_t arg2, caddr_t caller) 424 { 425 ftrace_record_t *r; 426 struct cpu *cp; 427 ftrace_data_t *ftd; 428 ftrace_icookie_t cookie; 429 hrtime_t timestamp; 430 431 timestamp = gethrtime_unscaled(); 432 433 cookie = ftrace_interrupt_disable(); 434 435 cp = CPU; 436 ftd = &cp->cpu_ftrace; 437 438 if (!(ftd->ftd_state & FTRACE_ENABLED)) { 439 ftrace_interrupt_enable(cookie); 440 return; 441 } 442 443 r = ftd->ftd_cur; 444 r->ftr_event = str; 445 r->ftr_thread = curthread; 446 r->ftr_tick = timestamp; 447 r->ftr_caller = caller; 448 r->ftr_data1 = arg1; 449 r->ftr_data2 = arg2; 450 451 if (r++ == ftd->ftd_last) 452 r = ftd->ftd_first; 453 ftd->ftd_cur = r; 454 455 ftrace_interrupt_enable(cookie); 456 } 457 458 void 459 ftrace_3(char *str, ulong_t arg1, ulong_t arg2, ulong_t arg3, caddr_t caller) 460 { 461 ftrace_record_t *r; 462 struct cpu *cp; 463 ftrace_data_t *ftd; 464 ftrace_icookie_t cookie; 465 hrtime_t timestamp; 466 467 timestamp = gethrtime_unscaled(); 468 469 cookie = ftrace_interrupt_disable(); 470 471 cp = CPU; 472 ftd = &cp->cpu_ftrace; 473 474 if (!(ftd->ftd_state & FTRACE_ENABLED)) { 475 ftrace_interrupt_enable(cookie); 476 return; 477 } 478 479 r = ftd->ftd_cur; 480 r->ftr_event = str; 481 r->ftr_thread = curthread; 482 r->ftr_tick = timestamp; 483 r->ftr_caller = caller; 484 r->ftr_data1 = arg1; 485 r->ftr_data2 = arg2; 486 r->ftr_data3 = arg3; 487 488 if (r++ == ftd->ftd_last) 489 r = ftd->ftd_first; 490 ftd->ftd_cur = r; 491 492 ftrace_interrupt_enable(cookie); 493 } 494 495 void 496 ftrace_3_notick(char *str, ulong_t arg1, ulong_t arg2, 497 ulong_t arg3, caddr_t caller) 498 { 499 ftrace_record_t *r; 500 struct cpu *cp; 501 ftrace_data_t *ftd; 502 ftrace_icookie_t cookie; 503 504 cookie = ftrace_interrupt_disable(); 505 506 cp = CPU; 507 ftd = &cp->cpu_ftrace; 508 509 if (!(ftd->ftd_state & FTRACE_ENABLED)) { 510 ftrace_interrupt_enable(cookie); 511 return; 512 } 513 514 r = ftd->ftd_cur; 515 r->ftr_event = str; 516 r->ftr_thread = curthread; 517 r->ftr_tick = 0; 518 r->ftr_caller = caller; 519 r->ftr_data1 = arg1; 520 r->ftr_data2 = arg2; 521 r->ftr_data3 = arg3; 522 523 if (r++ == ftd->ftd_last) 524 r = ftd->ftd_first; 525 ftd->ftd_cur = r; 526 527 ftrace_interrupt_enable(cookie); 528 } 529