xref: /linux/kernel/trace/trace_selftest.c (revision 3eeebf17f31c583f83e081b17b3076477cb96886)
1 /* Include in trace.c */
2 
3 #include <linux/kthread.h>
4 #include <linux/delay.h>
5 
6 static inline int trace_valid_entry(struct trace_entry *entry)
7 {
8 	switch (entry->type) {
9 	case TRACE_FN:
10 	case TRACE_CTX:
11 	case TRACE_WAKE:
12 	case TRACE_CONT:
13 	case TRACE_STACK:
14 	case TRACE_PRINT:
15 	case TRACE_SPECIAL:
16 		return 1;
17 	}
18 	return 0;
19 }
20 
21 static int trace_test_buffer_cpu(struct trace_array *tr, int cpu)
22 {
23 	struct ring_buffer_event *event;
24 	struct trace_entry *entry;
25 
26 	while ((event = ring_buffer_consume(tr->buffer, cpu, NULL))) {
27 		entry = ring_buffer_event_data(event);
28 
29 		if (!trace_valid_entry(entry)) {
30 			printk(KERN_CONT ".. invalid entry %d ",
31 				entry->type);
32 			goto failed;
33 		}
34 	}
35 	return 0;
36 
37  failed:
38 	/* disable tracing */
39 	tracing_disabled = 1;
40 	printk(KERN_CONT ".. corrupted trace buffer .. ");
41 	return -1;
42 }
43 
44 /*
45  * Test the trace buffer to see if all the elements
46  * are still sane.
47  */
48 static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
49 {
50 	unsigned long flags, cnt = 0;
51 	int cpu, ret = 0;
52 
53 	/* Don't allow flipping of max traces now */
54 	raw_local_irq_save(flags);
55 	__raw_spin_lock(&ftrace_max_lock);
56 
57 	cnt = ring_buffer_entries(tr->buffer);
58 
59 	for_each_possible_cpu(cpu) {
60 		ret = trace_test_buffer_cpu(tr, cpu);
61 		if (ret)
62 			break;
63 	}
64 	__raw_spin_unlock(&ftrace_max_lock);
65 	raw_local_irq_restore(flags);
66 
67 	if (count)
68 		*count = cnt;
69 
70 	return ret;
71 }
72 
73 #ifdef CONFIG_FTRACE
74 
75 #ifdef CONFIG_DYNAMIC_FTRACE
76 
77 #define __STR(x) #x
78 #define STR(x) __STR(x)
79 
80 /* Test dynamic code modification and ftrace filters */
81 int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
82 					   struct trace_array *tr,
83 					   int (*func)(void))
84 {
85 	int save_ftrace_enabled = ftrace_enabled;
86 	int save_tracer_enabled = tracer_enabled;
87 	unsigned long count;
88 	char *func_name;
89 	int ret;
90 
91 	/* The ftrace test PASSED */
92 	printk(KERN_CONT "PASSED\n");
93 	pr_info("Testing dynamic ftrace: ");
94 
95 	/* enable tracing, and record the filter function */
96 	ftrace_enabled = 1;
97 	tracer_enabled = 1;
98 
99 	/* passed in by parameter to fool gcc from optimizing */
100 	func();
101 
102 	/* update the records */
103 	ret = ftrace_force_update();
104 	if (ret) {
105 		printk(KERN_CONT ".. ftraced failed .. ");
106 		return ret;
107 	}
108 
109 	/*
110 	 * Some archs *cough*PowerPC*cough* add charachters to the
111 	 * start of the function names. We simply put a '*' to
112 	 * accomodate them.
113 	 */
114 	func_name = "*" STR(DYN_FTRACE_TEST_NAME);
115 
116 	/* filter only on our function */
117 	ftrace_set_filter(func_name, strlen(func_name), 1);
118 
119 	/* enable tracing */
120 	tr->ctrl = 1;
121 	trace->init(tr);
122 
123 	/* Sleep for a 1/10 of a second */
124 	msleep(100);
125 
126 	/* we should have nothing in the buffer */
127 	ret = trace_test_buffer(tr, &count);
128 	if (ret)
129 		goto out;
130 
131 	if (count) {
132 		ret = -1;
133 		printk(KERN_CONT ".. filter did not filter .. ");
134 		goto out;
135 	}
136 
137 	/* call our function again */
138 	func();
139 
140 	/* sleep again */
141 	msleep(100);
142 
143 	/* stop the tracing. */
144 	tr->ctrl = 0;
145 	trace->ctrl_update(tr);
146 	ftrace_enabled = 0;
147 
148 	/* check the trace buffer */
149 	ret = trace_test_buffer(tr, &count);
150 	trace->reset(tr);
151 
152 	/* we should only have one item */
153 	if (!ret && count != 1) {
154 		printk(KERN_CONT ".. filter failed count=%ld ..", count);
155 		ret = -1;
156 		goto out;
157 	}
158  out:
159 	ftrace_enabled = save_ftrace_enabled;
160 	tracer_enabled = save_tracer_enabled;
161 
162 	/* Enable tracing on all functions again */
163 	ftrace_set_filter(NULL, 0, 1);
164 
165 	return ret;
166 }
167 #else
168 # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
169 #endif /* CONFIG_DYNAMIC_FTRACE */
170 /*
171  * Simple verification test of ftrace function tracer.
172  * Enable ftrace, sleep 1/10 second, and then read the trace
173  * buffer to see if all is in order.
174  */
175 int
176 trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
177 {
178 	int save_ftrace_enabled = ftrace_enabled;
179 	int save_tracer_enabled = tracer_enabled;
180 	unsigned long count;
181 	int ret;
182 
183 	/* make sure msleep has been recorded */
184 	msleep(1);
185 
186 	/* force the recorded functions to be traced */
187 	ret = ftrace_force_update();
188 	if (ret) {
189 		printk(KERN_CONT ".. ftraced failed .. ");
190 		return ret;
191 	}
192 
193 	/* start the tracing */
194 	ftrace_enabled = 1;
195 	tracer_enabled = 1;
196 
197 	tr->ctrl = 1;
198 	trace->init(tr);
199 	/* Sleep for a 1/10 of a second */
200 	msleep(100);
201 	/* stop the tracing. */
202 	tr->ctrl = 0;
203 	trace->ctrl_update(tr);
204 	ftrace_enabled = 0;
205 
206 	/* check the trace buffer */
207 	ret = trace_test_buffer(tr, &count);
208 	trace->reset(tr);
209 
210 	if (!ret && !count) {
211 		printk(KERN_CONT ".. no entries found ..");
212 		ret = -1;
213 		goto out;
214 	}
215 
216 	ret = trace_selftest_startup_dynamic_tracing(trace, tr,
217 						     DYN_FTRACE_TEST_NAME);
218 
219  out:
220 	ftrace_enabled = save_ftrace_enabled;
221 	tracer_enabled = save_tracer_enabled;
222 
223 	/* kill ftrace totally if we failed */
224 	if (ret)
225 		ftrace_kill();
226 
227 	return ret;
228 }
229 #endif /* CONFIG_FTRACE */
230 
231 #ifdef CONFIG_IRQSOFF_TRACER
232 int
233 trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
234 {
235 	unsigned long save_max = tracing_max_latency;
236 	unsigned long count;
237 	int ret;
238 
239 	/* start the tracing */
240 	tr->ctrl = 1;
241 	trace->init(tr);
242 	/* reset the max latency */
243 	tracing_max_latency = 0;
244 	/* disable interrupts for a bit */
245 	local_irq_disable();
246 	udelay(100);
247 	local_irq_enable();
248 	/* stop the tracing. */
249 	tr->ctrl = 0;
250 	trace->ctrl_update(tr);
251 	/* check both trace buffers */
252 	ret = trace_test_buffer(tr, NULL);
253 	if (!ret)
254 		ret = trace_test_buffer(&max_tr, &count);
255 	trace->reset(tr);
256 
257 	if (!ret && !count) {
258 		printk(KERN_CONT ".. no entries found ..");
259 		ret = -1;
260 	}
261 
262 	tracing_max_latency = save_max;
263 
264 	return ret;
265 }
266 #endif /* CONFIG_IRQSOFF_TRACER */
267 
268 #ifdef CONFIG_PREEMPT_TRACER
269 int
270 trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
271 {
272 	unsigned long save_max = tracing_max_latency;
273 	unsigned long count;
274 	int ret;
275 
276 	/* start the tracing */
277 	tr->ctrl = 1;
278 	trace->init(tr);
279 	/* reset the max latency */
280 	tracing_max_latency = 0;
281 	/* disable preemption for a bit */
282 	preempt_disable();
283 	udelay(100);
284 	preempt_enable();
285 	/* stop the tracing. */
286 	tr->ctrl = 0;
287 	trace->ctrl_update(tr);
288 	/* check both trace buffers */
289 	ret = trace_test_buffer(tr, NULL);
290 	if (!ret)
291 		ret = trace_test_buffer(&max_tr, &count);
292 	trace->reset(tr);
293 
294 	if (!ret && !count) {
295 		printk(KERN_CONT ".. no entries found ..");
296 		ret = -1;
297 	}
298 
299 	tracing_max_latency = save_max;
300 
301 	return ret;
302 }
303 #endif /* CONFIG_PREEMPT_TRACER */
304 
305 #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
306 int
307 trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
308 {
309 	unsigned long save_max = tracing_max_latency;
310 	unsigned long count;
311 	int ret;
312 
313 	/* start the tracing */
314 	tr->ctrl = 1;
315 	trace->init(tr);
316 
317 	/* reset the max latency */
318 	tracing_max_latency = 0;
319 
320 	/* disable preemption and interrupts for a bit */
321 	preempt_disable();
322 	local_irq_disable();
323 	udelay(100);
324 	preempt_enable();
325 	/* reverse the order of preempt vs irqs */
326 	local_irq_enable();
327 
328 	/* stop the tracing. */
329 	tr->ctrl = 0;
330 	trace->ctrl_update(tr);
331 	/* check both trace buffers */
332 	ret = trace_test_buffer(tr, NULL);
333 	if (ret)
334 		goto out;
335 
336 	ret = trace_test_buffer(&max_tr, &count);
337 	if (ret)
338 		goto out;
339 
340 	if (!ret && !count) {
341 		printk(KERN_CONT ".. no entries found ..");
342 		ret = -1;
343 		goto out;
344 	}
345 
346 	/* do the test by disabling interrupts first this time */
347 	tracing_max_latency = 0;
348 	tr->ctrl = 1;
349 	trace->ctrl_update(tr);
350 	preempt_disable();
351 	local_irq_disable();
352 	udelay(100);
353 	preempt_enable();
354 	/* reverse the order of preempt vs irqs */
355 	local_irq_enable();
356 
357 	/* stop the tracing. */
358 	tr->ctrl = 0;
359 	trace->ctrl_update(tr);
360 	/* check both trace buffers */
361 	ret = trace_test_buffer(tr, NULL);
362 	if (ret)
363 		goto out;
364 
365 	ret = trace_test_buffer(&max_tr, &count);
366 
367 	if (!ret && !count) {
368 		printk(KERN_CONT ".. no entries found ..");
369 		ret = -1;
370 		goto out;
371 	}
372 
373  out:
374 	trace->reset(tr);
375 	tracing_max_latency = save_max;
376 
377 	return ret;
378 }
379 #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
380 
381 #ifdef CONFIG_NOP_TRACER
382 int
383 trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr)
384 {
385 	/* What could possibly go wrong? */
386 	return 0;
387 }
388 #endif
389 
390 #ifdef CONFIG_SCHED_TRACER
391 static int trace_wakeup_test_thread(void *data)
392 {
393 	/* Make this a RT thread, doesn't need to be too high */
394 	struct sched_param param = { .sched_priority = 5 };
395 	struct completion *x = data;
396 
397 	sched_setscheduler(current, SCHED_FIFO, &param);
398 
399 	/* Make it know we have a new prio */
400 	complete(x);
401 
402 	/* now go to sleep and let the test wake us up */
403 	set_current_state(TASK_INTERRUPTIBLE);
404 	schedule();
405 
406 	/* we are awake, now wait to disappear */
407 	while (!kthread_should_stop()) {
408 		/*
409 		 * This is an RT task, do short sleeps to let
410 		 * others run.
411 		 */
412 		msleep(100);
413 	}
414 
415 	return 0;
416 }
417 
418 int
419 trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
420 {
421 	unsigned long save_max = tracing_max_latency;
422 	struct task_struct *p;
423 	struct completion isrt;
424 	unsigned long count;
425 	int ret;
426 
427 	init_completion(&isrt);
428 
429 	/* create a high prio thread */
430 	p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test");
431 	if (IS_ERR(p)) {
432 		printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
433 		return -1;
434 	}
435 
436 	/* make sure the thread is running at an RT prio */
437 	wait_for_completion(&isrt);
438 
439 	/* start the tracing */
440 	tr->ctrl = 1;
441 	trace->init(tr);
442 	/* reset the max latency */
443 	tracing_max_latency = 0;
444 
445 	/* sleep to let the RT thread sleep too */
446 	msleep(100);
447 
448 	/*
449 	 * Yes this is slightly racy. It is possible that for some
450 	 * strange reason that the RT thread we created, did not
451 	 * call schedule for 100ms after doing the completion,
452 	 * and we do a wakeup on a task that already is awake.
453 	 * But that is extremely unlikely, and the worst thing that
454 	 * happens in such a case, is that we disable tracing.
455 	 * Honestly, if this race does happen something is horrible
456 	 * wrong with the system.
457 	 */
458 
459 	wake_up_process(p);
460 
461 	/* give a little time to let the thread wake up */
462 	msleep(100);
463 
464 	/* stop the tracing. */
465 	tr->ctrl = 0;
466 	trace->ctrl_update(tr);
467 	/* check both trace buffers */
468 	ret = trace_test_buffer(tr, NULL);
469 	if (!ret)
470 		ret = trace_test_buffer(&max_tr, &count);
471 
472 
473 	trace->reset(tr);
474 
475 	tracing_max_latency = save_max;
476 
477 	/* kill the thread */
478 	kthread_stop(p);
479 
480 	if (!ret && !count) {
481 		printk(KERN_CONT ".. no entries found ..");
482 		ret = -1;
483 	}
484 
485 	return ret;
486 }
487 #endif /* CONFIG_SCHED_TRACER */
488 
489 #ifdef CONFIG_CONTEXT_SWITCH_TRACER
490 int
491 trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr)
492 {
493 	unsigned long count;
494 	int ret;
495 
496 	/* start the tracing */
497 	tr->ctrl = 1;
498 	trace->init(tr);
499 	/* Sleep for a 1/10 of a second */
500 	msleep(100);
501 	/* stop the tracing. */
502 	tr->ctrl = 0;
503 	trace->ctrl_update(tr);
504 	/* check the trace buffer */
505 	ret = trace_test_buffer(tr, &count);
506 	trace->reset(tr);
507 
508 	if (!ret && !count) {
509 		printk(KERN_CONT ".. no entries found ..");
510 		ret = -1;
511 	}
512 
513 	return ret;
514 }
515 #endif /* CONFIG_CONTEXT_SWITCH_TRACER */
516 
517 #ifdef CONFIG_SYSPROF_TRACER
518 int
519 trace_selftest_startup_sysprof(struct tracer *trace, struct trace_array *tr)
520 {
521 	unsigned long count;
522 	int ret;
523 
524 	/* start the tracing */
525 	tr->ctrl = 1;
526 	trace->init(tr);
527 	/* Sleep for a 1/10 of a second */
528 	msleep(100);
529 	/* stop the tracing. */
530 	tr->ctrl = 0;
531 	trace->ctrl_update(tr);
532 	/* check the trace buffer */
533 	ret = trace_test_buffer(tr, &count);
534 	trace->reset(tr);
535 
536 	return ret;
537 }
538 #endif /* CONFIG_SYSPROF_TRACER */
539