xref: /titanic_51/usr/src/uts/common/os/ftrace.c (revision 4e942d8cd27c7f8bb80549d7c2564445f19ba4a3)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 #include <sys/types.h>
29 #include <sys/kmem.h>
30 #include <sys/mutex.h>
31 #include <sys/cpuvar.h>
32 #include <sys/cmn_err.h>
33 #include <sys/systm.h>
34 #include <sys/ddi.h>
35 #include <sys/sunddi.h>
36 #include <sys/debug.h>
37 #include <sys/param.h>
38 #include <sys/atomic.h>
39 #include <sys/ftrace.h>
40 
41 /*
42  * Tunable parameters:
43  *
44  * ftrace_atboot	- whether to start fast tracing at boot.
45  * ftrace_nent		- size of the per-CPU event ring buffer.
46  */
47 int ftrace_atboot = 0;
48 int ftrace_nent = FTRACE_NENT;
49 
50 /*
51  * Global Tracing State:
52  *
53  *                NOTREADY(=0)
54  *                  |
55  *            ftrace_init()
56  *                  |
57  *                  |
58  *                  v
59  *      +-------->READY-------+
60  *      |                     |
61  *  ftrace_stop()         ftrace_start()
62  *      |                     |
63  *      +---(ENABLED|READY)<--+
64  *
65  * During boot, ftrace_init() is called and the state becomes
66  * READY. If ftrace_atboot is set, ftrace_start() is called at
67  * this time.
68  *
69  * If FTRACE_READY is set, then tracing can be enabled.
70  * If FTRACE_ENABLED is set, tracing is enabled on the set of CPUs
71  *   which are currently FTRACE_READY.
72  */
73 static int ftrace_state = 0;
74 
75 /*
76  * Per-CPU Tracing State:
77  *
78  *     +-----------------READY<--------------+
79  *     |                 ^   |               |
80  *     |                 | ftrace_cpu_fini() |
81  *     |                 |   |               |
82  *     |   ftrace_cpu_init() |               |
83  *     |                 |   v     ftrace_cpu_stop()
84  *     |              NOTREADY(=0)           |
85  *     |                   ^                 |
86  * ftrace_cpu_start()      |                 |
87  *     |              ftrace_cpu_fini()      |
88  *     |                   |                 |
89  *     +----------->(ENABLED|READY)----------+
90  *
91  */
92 
93 /*
94  * Locking :
95  *
96  * Trace context code does not use any lock. There is a per-cpu circular trace
97  * buffer that has a head, a tail and a current pointer. Each record of this
98  * buffer is of equal length. Before doing anything, trace context code checks
99  * the per-cpu ENABLED bit. Trace buffer is allocated in non-trace context and
100  * it sets this bit only after allocating and setting up the buffer. So trace
101  * context code can't access the buffer till it is set up completely. The
102  * buffer is freed also in non-trace context. The code that frees the buffer is
103  * executed only after the corresponding cpu is powered off. So when this
104  * happens, no trace context code can be running on it. We only need to make
105  * sure that trace context code is not preempted from the cpu in the middle of
106  * accessing the trace buffer. This can be achieved simply by disabling
107  * interrupts temporarily. This approach makes the least assumption about the
108  * state of the callers of tracing functions.
109  *
110  * A single global lock, ftrace_lock protects assignments to all global and
111  * per-cpu trace variables. It does not protect reading of those in some cases.
112  *
113  * More specifically, it protects assignments to:
114  *
115  *   ftrace_state
116  *   cpu[N]->cpu_ftrace.ftd_state
117  *   cpu[N]->cpu_ftrace.ftd_first
118  *   cpu[N]->cpu_ftrace.ftd_last
119  *
120  * Does _not_ protect reading of cpu[N]->cpu_ftrace.ftd_state
121  * Does _not_ protect cpu[N]->cpu_ftrace.ftd_cur
122  * Does _not_ protect reading of ftrace_state
123  */
124 static kmutex_t ftrace_lock;
125 
126 /*
127  * Check whether a CPU is installed.
128  */
129 #define	IS_CPU(i) (cpu[i] != NULL)
130 
131 static void
132 ftrace_cpu_init(int cpuid)
133 {
134 	ftrace_data_t *ftd;
135 
136 	/*
137 	 * This can be called with "cpu[cpuid]->cpu_flags & CPU_EXISTS"
138 	 * being false - e.g. when a CPU is DR'ed in.
139 	 */
140 	ASSERT(MUTEX_HELD(&ftrace_lock));
141 	ASSERT(IS_CPU(cpuid));
142 
143 	ftd = &cpu[cpuid]->cpu_ftrace;
144 	if (ftd->ftd_state & FTRACE_READY)
145 		return;
146 
147 	/*
148 	 * We don't allocate the buffers until the first time
149 	 *   ftrace_cpu_start() is called, so that they're not
150 	 *   allocated if ftrace is never enabled.
151 	 */
152 	ftd->ftd_state |= FTRACE_READY;
153 	ASSERT(!(ftd->ftd_state & FTRACE_ENABLED));
154 }
155 
156 /*
157  * Only called from cpu_unconfigure() (and cpu_configure() on error).
158  * At this point, cpu[cpuid] is about to be freed and NULLed out,
159  *   so we'd better clean up after ourselves.
160  */
161 static void
162 ftrace_cpu_fini(int cpuid)
163 {
164 	ftrace_data_t *ftd;
165 
166 	ASSERT(MUTEX_HELD(&ftrace_lock));
167 	ASSERT(IS_CPU(cpuid));
168 	ASSERT((cpu[cpuid]->cpu_flags & CPU_POWEROFF) != 0);
169 
170 	ftd = &cpu[cpuid]->cpu_ftrace;
171 	if (!(ftd->ftd_state & FTRACE_READY))
172 		return;
173 
174 	/*
175 	 * This cpu is powered off and no code can be executing on it. So
176 	 * we can simply finish our cleanup. There is no need for a xcall
177 	 * to make sure that this cpu is out of trace context.
178 	 *
179 	 * The cpu structure will be cleared soon. But, for the sake of
180 	 * debugging, clear our pointers and state.
181 	 */
182 	if (ftd->ftd_first != NULL) {
183 		kmem_free(ftd->ftd_first,
184 		    ftrace_nent * sizeof (ftrace_record_t));
185 	}
186 	bzero(ftd, sizeof (ftrace_data_t));
187 }
188 
189 static void
190 ftrace_cpu_start(int cpuid)
191 {
192 	ftrace_data_t *ftd;
193 
194 	ASSERT(MUTEX_HELD(&ftrace_lock));
195 	ASSERT(IS_CPU(cpuid));
196 	ASSERT(ftrace_state & FTRACE_ENABLED);
197 
198 	ftd = &cpu[cpuid]->cpu_ftrace;
199 	if (ftd->ftd_state & FTRACE_READY) {
200 		if (ftd->ftd_first == NULL) {
201 			ftrace_record_t *ptrs;
202 
203 			mutex_exit(&ftrace_lock);
204 			ptrs = kmem_zalloc(ftrace_nent *
205 			    sizeof (ftrace_record_t), KM_SLEEP);
206 			mutex_enter(&ftrace_lock);
207 			if (ftd->ftd_first != NULL) {
208 				/*
209 				 * Someone else beat us to it. The winner will
210 				 * set up the pointers and the state.
211 				 */
212 				kmem_free(ptrs,
213 				    ftrace_nent * sizeof (ftrace_record_t));
214 				return;
215 			}
216 
217 			ftd->ftd_first = ptrs;
218 			ftd->ftd_last = ptrs + (ftrace_nent - 1);
219 			ftd->ftd_cur = ptrs;
220 			membar_producer();
221 		}
222 		ftd->ftd_state |= FTRACE_ENABLED;
223 	}
224 }
225 
226 static void
227 ftrace_cpu_stop(int cpuid)
228 {
229 	ASSERT(MUTEX_HELD(&ftrace_lock));
230 	ASSERT(IS_CPU(cpuid));
231 	cpu[cpuid]->cpu_ftrace.ftd_state &= ~(FTRACE_ENABLED);
232 }
233 
234 /*
235  * Hook for DR.
236  */
237 /*ARGSUSED*/
238 int
239 ftrace_cpu_setup(cpu_setup_t what, int id, void *arg)
240 {
241 	if (!(ftrace_state & FTRACE_READY))
242 		return (0);
243 
244 	switch (what) {
245 	case CPU_CONFIG:
246 		mutex_enter(&ftrace_lock);
247 		ftrace_cpu_init(id);
248 		if (ftrace_state & FTRACE_ENABLED)
249 			ftrace_cpu_start(id);
250 		mutex_exit(&ftrace_lock);
251 		break;
252 
253 	case CPU_UNCONFIG:
254 		mutex_enter(&ftrace_lock);
255 		ftrace_cpu_fini(id);
256 		mutex_exit(&ftrace_lock);
257 		break;
258 
259 	default:
260 		break;
261 	}
262 	return (0);
263 }
264 
265 void
266 ftrace_init(void)
267 {
268 	int i;
269 
270 	ASSERT(!(ftrace_state & FTRACE_READY));
271 	mutex_init(&ftrace_lock, NULL, MUTEX_DEFAULT, NULL);
272 
273 	mutex_enter(&ftrace_lock);
274 	for (i = 0; i < NCPU; i++) {
275 		if (IS_CPU(i)) {
276 			/* should have been kmem_zalloc()'ed */
277 			ASSERT(cpu[i]->cpu_ftrace.ftd_state == 0);
278 			ASSERT(cpu[i]->cpu_ftrace.ftd_first == NULL);
279 			ASSERT(cpu[i]->cpu_ftrace.ftd_last == NULL);
280 			ASSERT(cpu[i]->cpu_ftrace.ftd_cur == NULL);
281 		}
282 	}
283 
284 	if (ftrace_nent < 1) {
285 		mutex_exit(&ftrace_lock);
286 		return;
287 	}
288 
289 	for (i = 0; i < NCPU; i++)
290 		if (IS_CPU(i))
291 			ftrace_cpu_init(i);
292 
293 	ftrace_state |= FTRACE_READY;
294 	mutex_enter(&cpu_lock);
295 	register_cpu_setup_func(ftrace_cpu_setup, NULL);
296 	mutex_exit(&cpu_lock);
297 	mutex_exit(&ftrace_lock);
298 
299 	if (ftrace_atboot)
300 		(void) ftrace_start();
301 }
302 
303 /*
304  * Called from uadmin ioctl, or via mp_init_table[] during boot.
305  */
306 int
307 ftrace_start(void)
308 {
309 	int i, was_enabled = 0;
310 
311 	if (ftrace_state & FTRACE_READY) {
312 		mutex_enter(&ftrace_lock);
313 		was_enabled = ((ftrace_state & FTRACE_ENABLED) != 0);
314 		ftrace_state |= FTRACE_ENABLED;
315 		for (i = 0; i < NCPU; i++)
316 			if (IS_CPU(i))
317 				ftrace_cpu_start(i);
318 		mutex_exit(&ftrace_lock);
319 	}
320 
321 	return (was_enabled);
322 }
323 
324 /*
325  * Called from uadmin ioctl, to stop tracing.
326  */
327 int
328 ftrace_stop(void)
329 {
330 	int i, was_enabled = 0;
331 
332 	if (ftrace_state & FTRACE_READY) {
333 		mutex_enter(&ftrace_lock);
334 		if (ftrace_state & FTRACE_ENABLED) {
335 			was_enabled = 1;
336 			for (i = 0; i < NCPU; i++)
337 				if (IS_CPU(i))
338 					ftrace_cpu_stop(i);
339 			ftrace_state &= ~(FTRACE_ENABLED);
340 		}
341 		mutex_exit(&ftrace_lock);
342 	}
343 	return (was_enabled);
344 }
345 
346 /*
347  * ftrace_X() functions are called from trace context. All callers of ftrace_X()
348  * tests FTRACE_ENABLED first. Although this is not very accurate, it keeps the
349  * overhead very low when tracing is not enabled.
350  *
351  * gethrtime_unscaled() appears to be safe to be called in trace context. As an
352  * added precaution, we call these before we disable interrupts on this cpu.
353  */
354 
355 void
356 ftrace_0(char *str, caddr_t caller)
357 {
358 	ftrace_record_t *r;
359 	struct cpu *cp;
360 	ftrace_data_t *ftd;
361 	ftrace_icookie_t cookie;
362 	hrtime_t  timestamp;
363 
364 	timestamp = gethrtime_unscaled();
365 
366 	cookie = ftrace_interrupt_disable();
367 
368 	cp = CPU;
369 	ftd = &cp->cpu_ftrace;
370 
371 	if (!(ftd->ftd_state & FTRACE_ENABLED)) {
372 		ftrace_interrupt_enable(cookie);
373 		return;
374 	}
375 
376 	r = ftd->ftd_cur;
377 	r->ftr_event = str;
378 	r->ftr_thread = curthread;
379 	r->ftr_tick = timestamp;
380 	r->ftr_caller = caller;
381 
382 	if (r++ == ftd->ftd_last)
383 		r = ftd->ftd_first;
384 	ftd->ftd_cur = r;
385 
386 	ftrace_interrupt_enable(cookie);
387 }
388 
389 void
390 ftrace_1(char *str, ulong_t arg1, caddr_t caller)
391 {
392 	ftrace_record_t *r;
393 	struct cpu *cp;
394 	ftrace_data_t *ftd;
395 	ftrace_icookie_t cookie;
396 	hrtime_t  timestamp;
397 
398 	timestamp = gethrtime_unscaled();
399 
400 	cookie = ftrace_interrupt_disable();
401 
402 	cp = CPU;
403 	ftd = &cp->cpu_ftrace;
404 
405 	if (!(ftd->ftd_state & FTRACE_ENABLED)) {
406 		ftrace_interrupt_enable(cookie);
407 		return;
408 	}
409 
410 	r = ftd->ftd_cur;
411 	r->ftr_event = str;
412 	r->ftr_thread = curthread;
413 	r->ftr_tick = timestamp;
414 	r->ftr_caller = caller;
415 	r->ftr_data1 = arg1;
416 
417 	if (r++ == ftd->ftd_last)
418 		r = ftd->ftd_first;
419 	ftd->ftd_cur = r;
420 
421 	ftrace_interrupt_enable(cookie);
422 }
423 
424 void
425 ftrace_2(char *str, ulong_t arg1, ulong_t arg2, caddr_t caller)
426 {
427 	ftrace_record_t *r;
428 	struct cpu *cp;
429 	ftrace_data_t *ftd;
430 	ftrace_icookie_t cookie;
431 	hrtime_t  timestamp;
432 
433 	timestamp = gethrtime_unscaled();
434 
435 	cookie = ftrace_interrupt_disable();
436 
437 	cp = CPU;
438 	ftd = &cp->cpu_ftrace;
439 
440 	if (!(ftd->ftd_state & FTRACE_ENABLED)) {
441 		ftrace_interrupt_enable(cookie);
442 		return;
443 	}
444 
445 	r = ftd->ftd_cur;
446 	r->ftr_event = str;
447 	r->ftr_thread = curthread;
448 	r->ftr_tick = timestamp;
449 	r->ftr_caller = caller;
450 	r->ftr_data1 = arg1;
451 	r->ftr_data2 = arg2;
452 
453 	if (r++ == ftd->ftd_last)
454 		r = ftd->ftd_first;
455 	ftd->ftd_cur = r;
456 
457 	ftrace_interrupt_enable(cookie);
458 }
459 
460 void
461 ftrace_3(char *str, ulong_t arg1, ulong_t arg2, ulong_t arg3, caddr_t caller)
462 {
463 	ftrace_record_t *r;
464 	struct cpu *cp;
465 	ftrace_data_t *ftd;
466 	ftrace_icookie_t cookie;
467 	hrtime_t  timestamp;
468 
469 	timestamp = gethrtime_unscaled();
470 
471 	cookie = ftrace_interrupt_disable();
472 
473 	cp = CPU;
474 	ftd = &cp->cpu_ftrace;
475 
476 	if (!(ftd->ftd_state & FTRACE_ENABLED)) {
477 		ftrace_interrupt_enable(cookie);
478 		return;
479 	}
480 
481 	r = ftd->ftd_cur;
482 	r->ftr_event = str;
483 	r->ftr_thread = curthread;
484 	r->ftr_tick = timestamp;
485 	r->ftr_caller = caller;
486 	r->ftr_data1 = arg1;
487 	r->ftr_data2 = arg2;
488 	r->ftr_data3 = arg3;
489 
490 	if (r++ == ftd->ftd_last)
491 		r = ftd->ftd_first;
492 	ftd->ftd_cur = r;
493 
494 	ftrace_interrupt_enable(cookie);
495 }
496 
497 void
498 ftrace_3_notick(char *str, ulong_t arg1, ulong_t arg2,
499     ulong_t arg3, caddr_t caller)
500 {
501 	ftrace_record_t *r;
502 	struct cpu *cp;
503 	ftrace_data_t *ftd;
504 	ftrace_icookie_t cookie;
505 
506 	cookie = ftrace_interrupt_disable();
507 
508 	cp = CPU;
509 	ftd = &cp->cpu_ftrace;
510 
511 	if (!(ftd->ftd_state & FTRACE_ENABLED)) {
512 		ftrace_interrupt_enable(cookie);
513 		return;
514 	}
515 
516 	r = ftd->ftd_cur;
517 	r->ftr_event = str;
518 	r->ftr_thread = curthread;
519 	r->ftr_tick = 0;
520 	r->ftr_caller = caller;
521 	r->ftr_data1 = arg1;
522 	r->ftr_data2 = arg2;
523 	r->ftr_data3 = arg3;
524 
525 	if (r++ == ftd->ftd_last)
526 		r = ftd->ftd_first;
527 	ftd->ftd_cur = r;
528 
529 	ftrace_interrupt_enable(cookie);
530 }
531