xref: /linux/kernel/trace/trace_selftest.c (revision b233b28eac0cc37d07c2d007ea08c86c778c5af4)
1 /* Include in trace.c */
2 
3 #include <linux/kthread.h>
4 #include <linux/delay.h>
5 
6 static inline int trace_valid_entry(struct trace_entry *entry)
7 {
8 	switch (entry->type) {
9 	case TRACE_FN:
10 	case TRACE_CTX:
11 	case TRACE_WAKE:
12 	case TRACE_CONT:
13 	case TRACE_STACK:
14 	case TRACE_PRINT:
15 	case TRACE_SPECIAL:
16 	case TRACE_BRANCH:
17 		return 1;
18 	}
19 	return 0;
20 }
21 
22 static int trace_test_buffer_cpu(struct trace_array *tr, int cpu)
23 {
24 	struct ring_buffer_event *event;
25 	struct trace_entry *entry;
26 
27 	while ((event = ring_buffer_consume(tr->buffer, cpu, NULL))) {
28 		entry = ring_buffer_event_data(event);
29 
30 		if (!trace_valid_entry(entry)) {
31 			printk(KERN_CONT ".. invalid entry %d ",
32 				entry->type);
33 			goto failed;
34 		}
35 	}
36 	return 0;
37 
38  failed:
39 	/* disable tracing */
40 	tracing_disabled = 1;
41 	printk(KERN_CONT ".. corrupted trace buffer .. ");
42 	return -1;
43 }
44 
45 /*
46  * Test the trace buffer to see if all the elements
47  * are still sane.
48  */
49 static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
50 {
51 	unsigned long flags, cnt = 0;
52 	int cpu, ret = 0;
53 
54 	/* Don't allow flipping of max traces now */
55 	local_irq_save(flags);
56 	__raw_spin_lock(&ftrace_max_lock);
57 
58 	cnt = ring_buffer_entries(tr->buffer);
59 
60 	for_each_possible_cpu(cpu) {
61 		ret = trace_test_buffer_cpu(tr, cpu);
62 		if (ret)
63 			break;
64 	}
65 	__raw_spin_unlock(&ftrace_max_lock);
66 	local_irq_restore(flags);
67 
68 	if (count)
69 		*count = cnt;
70 
71 	return ret;
72 }
73 
74 static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret)
75 {
76 	printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n",
77 		trace->name, init_ret);
78 }
79 #ifdef CONFIG_FUNCTION_TRACER
80 
81 #ifdef CONFIG_DYNAMIC_FTRACE
82 
83 #define __STR(x) #x
84 #define STR(x) __STR(x)
85 
86 /* Test dynamic code modification and ftrace filters */
87 int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
88 					   struct trace_array *tr,
89 					   int (*func)(void))
90 {
91 	int save_ftrace_enabled = ftrace_enabled;
92 	int save_tracer_enabled = tracer_enabled;
93 	unsigned long count;
94 	char *func_name;
95 	int ret;
96 
97 	/* The ftrace test PASSED */
98 	printk(KERN_CONT "PASSED\n");
99 	pr_info("Testing dynamic ftrace: ");
100 
101 	/* enable tracing, and record the filter function */
102 	ftrace_enabled = 1;
103 	tracer_enabled = 1;
104 
105 	/* passed in by parameter to fool gcc from optimizing */
106 	func();
107 
108 	/*
109 	 * Some archs *cough*PowerPC*cough* add charachters to the
110 	 * start of the function names. We simply put a '*' to
111 	 * accomodate them.
112 	 */
113 	func_name = "*" STR(DYN_FTRACE_TEST_NAME);
114 
115 	/* filter only on our function */
116 	ftrace_set_filter(func_name, strlen(func_name), 1);
117 
118 	/* enable tracing */
119 	ret = trace->init(tr);
120 	if (ret) {
121 		warn_failed_init_tracer(trace, ret);
122 		goto out;
123 	}
124 
125 	/* Sleep for a 1/10 of a second */
126 	msleep(100);
127 
128 	/* we should have nothing in the buffer */
129 	ret = trace_test_buffer(tr, &count);
130 	if (ret)
131 		goto out;
132 
133 	if (count) {
134 		ret = -1;
135 		printk(KERN_CONT ".. filter did not filter .. ");
136 		goto out;
137 	}
138 
139 	/* call our function again */
140 	func();
141 
142 	/* sleep again */
143 	msleep(100);
144 
145 	/* stop the tracing. */
146 	tracing_stop();
147 	ftrace_enabled = 0;
148 
149 	/* check the trace buffer */
150 	ret = trace_test_buffer(tr, &count);
151 	trace->reset(tr);
152 	tracing_start();
153 
154 	/* we should only have one item */
155 	if (!ret && count != 1) {
156 		printk(KERN_CONT ".. filter failed count=%ld ..", count);
157 		ret = -1;
158 		goto out;
159 	}
160 
161  out:
162 	ftrace_enabled = save_ftrace_enabled;
163 	tracer_enabled = save_tracer_enabled;
164 
165 	/* Enable tracing on all functions again */
166 	ftrace_set_filter(NULL, 0, 1);
167 
168 	return ret;
169 }
170 #else
171 # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
172 #endif /* CONFIG_DYNAMIC_FTRACE */
173 /*
174  * Simple verification test of ftrace function tracer.
175  * Enable ftrace, sleep 1/10 second, and then read the trace
176  * buffer to see if all is in order.
177  */
178 int
179 trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
180 {
181 	int save_ftrace_enabled = ftrace_enabled;
182 	int save_tracer_enabled = tracer_enabled;
183 	unsigned long count;
184 	int ret;
185 
186 	/* make sure msleep has been recorded */
187 	msleep(1);
188 
189 	/* start the tracing */
190 	ftrace_enabled = 1;
191 	tracer_enabled = 1;
192 
193 	ret = trace->init(tr);
194 	if (ret) {
195 		warn_failed_init_tracer(trace, ret);
196 		goto out;
197 	}
198 
199 	/* Sleep for a 1/10 of a second */
200 	msleep(100);
201 	/* stop the tracing. */
202 	tracing_stop();
203 	ftrace_enabled = 0;
204 
205 	/* check the trace buffer */
206 	ret = trace_test_buffer(tr, &count);
207 	trace->reset(tr);
208 	tracing_start();
209 
210 	if (!ret && !count) {
211 		printk(KERN_CONT ".. no entries found ..");
212 		ret = -1;
213 		goto out;
214 	}
215 
216 	ret = trace_selftest_startup_dynamic_tracing(trace, tr,
217 						     DYN_FTRACE_TEST_NAME);
218 
219  out:
220 	ftrace_enabled = save_ftrace_enabled;
221 	tracer_enabled = save_tracer_enabled;
222 
223 	/* kill ftrace totally if we failed */
224 	if (ret)
225 		ftrace_kill();
226 
227 	return ret;
228 }
229 #endif /* CONFIG_FUNCTION_TRACER */
230 
231 #ifdef CONFIG_IRQSOFF_TRACER
232 int
233 trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
234 {
235 	unsigned long save_max = tracing_max_latency;
236 	unsigned long count;
237 	int ret;
238 
239 	/* start the tracing */
240 	ret = trace->init(tr);
241 	if (ret) {
242 		warn_failed_init_tracer(trace, ret);
243 		return ret;
244 	}
245 
246 	/* reset the max latency */
247 	tracing_max_latency = 0;
248 	/* disable interrupts for a bit */
249 	local_irq_disable();
250 	udelay(100);
251 	local_irq_enable();
252 	/* stop the tracing. */
253 	tracing_stop();
254 	/* check both trace buffers */
255 	ret = trace_test_buffer(tr, NULL);
256 	if (!ret)
257 		ret = trace_test_buffer(&max_tr, &count);
258 	trace->reset(tr);
259 	tracing_start();
260 
261 	if (!ret && !count) {
262 		printk(KERN_CONT ".. no entries found ..");
263 		ret = -1;
264 	}
265 
266 	tracing_max_latency = save_max;
267 
268 	return ret;
269 }
270 #endif /* CONFIG_IRQSOFF_TRACER */
271 
272 #ifdef CONFIG_PREEMPT_TRACER
273 int
274 trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
275 {
276 	unsigned long save_max = tracing_max_latency;
277 	unsigned long count;
278 	int ret;
279 
280 	/*
281 	 * Now that the big kernel lock is no longer preemptable,
282 	 * and this is called with the BKL held, it will always
283 	 * fail. If preemption is already disabled, simply
284 	 * pass the test. When the BKL is removed, or becomes
285 	 * preemptible again, we will once again test this,
286 	 * so keep it in.
287 	 */
288 	if (preempt_count()) {
289 		printk(KERN_CONT "can not test ... force ");
290 		return 0;
291 	}
292 
293 	/* start the tracing */
294 	ret = trace->init(tr);
295 	if (ret) {
296 		warn_failed_init_tracer(trace, ret);
297 		return ret;
298 	}
299 
300 	/* reset the max latency */
301 	tracing_max_latency = 0;
302 	/* disable preemption for a bit */
303 	preempt_disable();
304 	udelay(100);
305 	preempt_enable();
306 	/* stop the tracing. */
307 	tracing_stop();
308 	/* check both trace buffers */
309 	ret = trace_test_buffer(tr, NULL);
310 	if (!ret)
311 		ret = trace_test_buffer(&max_tr, &count);
312 	trace->reset(tr);
313 	tracing_start();
314 
315 	if (!ret && !count) {
316 		printk(KERN_CONT ".. no entries found ..");
317 		ret = -1;
318 	}
319 
320 	tracing_max_latency = save_max;
321 
322 	return ret;
323 }
324 #endif /* CONFIG_PREEMPT_TRACER */
325 
326 #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
327 int
328 trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
329 {
330 	unsigned long save_max = tracing_max_latency;
331 	unsigned long count;
332 	int ret;
333 
334 	/*
335 	 * Now that the big kernel lock is no longer preemptable,
336 	 * and this is called with the BKL held, it will always
337 	 * fail. If preemption is already disabled, simply
338 	 * pass the test. When the BKL is removed, or becomes
339 	 * preemptible again, we will once again test this,
340 	 * so keep it in.
341 	 */
342 	if (preempt_count()) {
343 		printk(KERN_CONT "can not test ... force ");
344 		return 0;
345 	}
346 
347 	/* start the tracing */
348 	ret = trace->init(tr);
349 	if (ret) {
350 		warn_failed_init_tracer(trace, ret);
351 		goto out;
352 	}
353 
354 	/* reset the max latency */
355 	tracing_max_latency = 0;
356 
357 	/* disable preemption and interrupts for a bit */
358 	preempt_disable();
359 	local_irq_disable();
360 	udelay(100);
361 	preempt_enable();
362 	/* reverse the order of preempt vs irqs */
363 	local_irq_enable();
364 
365 	/* stop the tracing. */
366 	tracing_stop();
367 	/* check both trace buffers */
368 	ret = trace_test_buffer(tr, NULL);
369 	if (ret) {
370 		tracing_start();
371 		goto out;
372 	}
373 
374 	ret = trace_test_buffer(&max_tr, &count);
375 	if (ret) {
376 		tracing_start();
377 		goto out;
378 	}
379 
380 	if (!ret && !count) {
381 		printk(KERN_CONT ".. no entries found ..");
382 		ret = -1;
383 		tracing_start();
384 		goto out;
385 	}
386 
387 	/* do the test by disabling interrupts first this time */
388 	tracing_max_latency = 0;
389 	tracing_start();
390 	preempt_disable();
391 	local_irq_disable();
392 	udelay(100);
393 	preempt_enable();
394 	/* reverse the order of preempt vs irqs */
395 	local_irq_enable();
396 
397 	/* stop the tracing. */
398 	tracing_stop();
399 	/* check both trace buffers */
400 	ret = trace_test_buffer(tr, NULL);
401 	if (ret)
402 		goto out;
403 
404 	ret = trace_test_buffer(&max_tr, &count);
405 
406 	if (!ret && !count) {
407 		printk(KERN_CONT ".. no entries found ..");
408 		ret = -1;
409 		goto out;
410 	}
411 
412  out:
413 	trace->reset(tr);
414 	tracing_start();
415 	tracing_max_latency = save_max;
416 
417 	return ret;
418 }
419 #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
420 
421 #ifdef CONFIG_NOP_TRACER
422 int
423 trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr)
424 {
425 	/* What could possibly go wrong? */
426 	return 0;
427 }
428 #endif
429 
430 #ifdef CONFIG_SCHED_TRACER
431 static int trace_wakeup_test_thread(void *data)
432 {
433 	/* Make this a RT thread, doesn't need to be too high */
434 	struct sched_param param = { .sched_priority = 5 };
435 	struct completion *x = data;
436 
437 	sched_setscheduler(current, SCHED_FIFO, &param);
438 
439 	/* Make it know we have a new prio */
440 	complete(x);
441 
442 	/* now go to sleep and let the test wake us up */
443 	set_current_state(TASK_INTERRUPTIBLE);
444 	schedule();
445 
446 	/* we are awake, now wait to disappear */
447 	while (!kthread_should_stop()) {
448 		/*
449 		 * This is an RT task, do short sleeps to let
450 		 * others run.
451 		 */
452 		msleep(100);
453 	}
454 
455 	return 0;
456 }
457 
458 int
459 trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
460 {
461 	unsigned long save_max = tracing_max_latency;
462 	struct task_struct *p;
463 	struct completion isrt;
464 	unsigned long count;
465 	int ret;
466 
467 	init_completion(&isrt);
468 
469 	/* create a high prio thread */
470 	p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test");
471 	if (IS_ERR(p)) {
472 		printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
473 		return -1;
474 	}
475 
476 	/* make sure the thread is running at an RT prio */
477 	wait_for_completion(&isrt);
478 
479 	/* start the tracing */
480 	ret = trace->init(tr);
481 	if (ret) {
482 		warn_failed_init_tracer(trace, ret);
483 		return ret;
484 	}
485 
486 	/* reset the max latency */
487 	tracing_max_latency = 0;
488 
489 	/* sleep to let the RT thread sleep too */
490 	msleep(100);
491 
492 	/*
493 	 * Yes this is slightly racy. It is possible that for some
494 	 * strange reason that the RT thread we created, did not
495 	 * call schedule for 100ms after doing the completion,
496 	 * and we do a wakeup on a task that already is awake.
497 	 * But that is extremely unlikely, and the worst thing that
498 	 * happens in such a case, is that we disable tracing.
499 	 * Honestly, if this race does happen something is horrible
500 	 * wrong with the system.
501 	 */
502 
503 	wake_up_process(p);
504 
505 	/* give a little time to let the thread wake up */
506 	msleep(100);
507 
508 	/* stop the tracing. */
509 	tracing_stop();
510 	/* check both trace buffers */
511 	ret = trace_test_buffer(tr, NULL);
512 	if (!ret)
513 		ret = trace_test_buffer(&max_tr, &count);
514 
515 
516 	trace->reset(tr);
517 	tracing_start();
518 
519 	tracing_max_latency = save_max;
520 
521 	/* kill the thread */
522 	kthread_stop(p);
523 
524 	if (!ret && !count) {
525 		printk(KERN_CONT ".. no entries found ..");
526 		ret = -1;
527 	}
528 
529 	return ret;
530 }
531 #endif /* CONFIG_SCHED_TRACER */
532 
533 #ifdef CONFIG_CONTEXT_SWITCH_TRACER
534 int
535 trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr)
536 {
537 	unsigned long count;
538 	int ret;
539 
540 	/* start the tracing */
541 	ret = trace->init(tr);
542 	if (ret) {
543 		warn_failed_init_tracer(trace, ret);
544 		return ret;
545 	}
546 
547 	/* Sleep for a 1/10 of a second */
548 	msleep(100);
549 	/* stop the tracing. */
550 	tracing_stop();
551 	/* check the trace buffer */
552 	ret = trace_test_buffer(tr, &count);
553 	trace->reset(tr);
554 	tracing_start();
555 
556 	if (!ret && !count) {
557 		printk(KERN_CONT ".. no entries found ..");
558 		ret = -1;
559 	}
560 
561 	return ret;
562 }
563 #endif /* CONFIG_CONTEXT_SWITCH_TRACER */
564 
565 #ifdef CONFIG_SYSPROF_TRACER
566 int
567 trace_selftest_startup_sysprof(struct tracer *trace, struct trace_array *tr)
568 {
569 	unsigned long count;
570 	int ret;
571 
572 	/* start the tracing */
573 	ret = trace->init(tr);
574 	if (ret) {
575 		warn_failed_init_tracer(trace, ret);
576 		return 0;
577 	}
578 
579 	/* Sleep for a 1/10 of a second */
580 	msleep(100);
581 	/* stop the tracing. */
582 	tracing_stop();
583 	/* check the trace buffer */
584 	ret = trace_test_buffer(tr, &count);
585 	trace->reset(tr);
586 	tracing_start();
587 
588 	return ret;
589 }
590 #endif /* CONFIG_SYSPROF_TRACER */
591 
592 #ifdef CONFIG_BRANCH_TRACER
593 int
594 trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
595 {
596 	unsigned long count;
597 	int ret;
598 
599 	/* start the tracing */
600 	ret = trace->init(tr);
601 	if (ret) {
602 		warn_failed_init_tracer(trace, ret);
603 		return ret;
604 	}
605 
606 	/* Sleep for a 1/10 of a second */
607 	msleep(100);
608 	/* stop the tracing. */
609 	tracing_stop();
610 	/* check the trace buffer */
611 	ret = trace_test_buffer(tr, &count);
612 	trace->reset(tr);
613 	tracing_start();
614 
615 	return ret;
616 }
617 #endif /* CONFIG_BRANCH_TRACER */
618