xref: /linux/kernel/trace/trace_selftest.c (revision a33f32244d8550da8b4a26e277ce07d5c6d158b5)
1 /* Include in trace.c */
2 
3 #include <linux/stringify.h>
4 #include <linux/kthread.h>
5 #include <linux/delay.h>
6 #include <linux/slab.h>
7 
8 static inline int trace_valid_entry(struct trace_entry *entry)
9 {
10 	switch (entry->type) {
11 	case TRACE_FN:
12 	case TRACE_CTX:
13 	case TRACE_WAKE:
14 	case TRACE_STACK:
15 	case TRACE_PRINT:
16 	case TRACE_SPECIAL:
17 	case TRACE_BRANCH:
18 	case TRACE_GRAPH_ENT:
19 	case TRACE_GRAPH_RET:
20 	case TRACE_HW_BRANCHES:
21 	case TRACE_KSYM:
22 		return 1;
23 	}
24 	return 0;
25 }
26 
27 static int trace_test_buffer_cpu(struct trace_array *tr, int cpu)
28 {
29 	struct ring_buffer_event *event;
30 	struct trace_entry *entry;
31 	unsigned int loops = 0;
32 
33 	while ((event = ring_buffer_consume(tr->buffer, cpu, NULL))) {
34 		entry = ring_buffer_event_data(event);
35 
36 		/*
37 		 * The ring buffer is a size of trace_buf_size, if
38 		 * we loop more than the size, there's something wrong
39 		 * with the ring buffer.
40 		 */
41 		if (loops++ > trace_buf_size) {
42 			printk(KERN_CONT ".. bad ring buffer ");
43 			goto failed;
44 		}
45 		if (!trace_valid_entry(entry)) {
46 			printk(KERN_CONT ".. invalid entry %d ",
47 				entry->type);
48 			goto failed;
49 		}
50 	}
51 	return 0;
52 
53  failed:
54 	/* disable tracing */
55 	tracing_disabled = 1;
56 	printk(KERN_CONT ".. corrupted trace buffer .. ");
57 	return -1;
58 }
59 
60 /*
61  * Test the trace buffer to see if all the elements
62  * are still sane.
63  */
64 static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
65 {
66 	unsigned long flags, cnt = 0;
67 	int cpu, ret = 0;
68 
69 	/* Don't allow flipping of max traces now */
70 	local_irq_save(flags);
71 	arch_spin_lock(&ftrace_max_lock);
72 
73 	cnt = ring_buffer_entries(tr->buffer);
74 
75 	/*
76 	 * The trace_test_buffer_cpu runs a while loop to consume all data.
77 	 * If the calling tracer is broken, and is constantly filling
78 	 * the buffer, this will run forever, and hard lock the box.
79 	 * We disable the ring buffer while we do this test to prevent
80 	 * a hard lock up.
81 	 */
82 	tracing_off();
83 	for_each_possible_cpu(cpu) {
84 		ret = trace_test_buffer_cpu(tr, cpu);
85 		if (ret)
86 			break;
87 	}
88 	tracing_on();
89 	arch_spin_unlock(&ftrace_max_lock);
90 	local_irq_restore(flags);
91 
92 	if (count)
93 		*count = cnt;
94 
95 	return ret;
96 }
97 
98 static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret)
99 {
100 	printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n",
101 		trace->name, init_ret);
102 }
103 #ifdef CONFIG_FUNCTION_TRACER
104 
105 #ifdef CONFIG_DYNAMIC_FTRACE
106 
107 /* Test dynamic code modification and ftrace filters */
108 int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
109 					   struct trace_array *tr,
110 					   int (*func)(void))
111 {
112 	int save_ftrace_enabled = ftrace_enabled;
113 	int save_tracer_enabled = tracer_enabled;
114 	unsigned long count;
115 	char *func_name;
116 	int ret;
117 
118 	/* The ftrace test PASSED */
119 	printk(KERN_CONT "PASSED\n");
120 	pr_info("Testing dynamic ftrace: ");
121 
122 	/* enable tracing, and record the filter function */
123 	ftrace_enabled = 1;
124 	tracer_enabled = 1;
125 
126 	/* passed in by parameter to fool gcc from optimizing */
127 	func();
128 
129 	/*
130 	 * Some archs *cough*PowerPC*cough* add characters to the
131 	 * start of the function names. We simply put a '*' to
132 	 * accommodate them.
133 	 */
134 	func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
135 
136 	/* filter only on our function */
137 	ftrace_set_filter(func_name, strlen(func_name), 1);
138 
139 	/* enable tracing */
140 	ret = tracer_init(trace, tr);
141 	if (ret) {
142 		warn_failed_init_tracer(trace, ret);
143 		goto out;
144 	}
145 
146 	/* Sleep for a 1/10 of a second */
147 	msleep(100);
148 
149 	/* we should have nothing in the buffer */
150 	ret = trace_test_buffer(tr, &count);
151 	if (ret)
152 		goto out;
153 
154 	if (count) {
155 		ret = -1;
156 		printk(KERN_CONT ".. filter did not filter .. ");
157 		goto out;
158 	}
159 
160 	/* call our function again */
161 	func();
162 
163 	/* sleep again */
164 	msleep(100);
165 
166 	/* stop the tracing. */
167 	tracing_stop();
168 	ftrace_enabled = 0;
169 
170 	/* check the trace buffer */
171 	ret = trace_test_buffer(tr, &count);
172 	trace->reset(tr);
173 	tracing_start();
174 
175 	/* we should only have one item */
176 	if (!ret && count != 1) {
177 		printk(KERN_CONT ".. filter failed count=%ld ..", count);
178 		ret = -1;
179 		goto out;
180 	}
181 
182  out:
183 	ftrace_enabled = save_ftrace_enabled;
184 	tracer_enabled = save_tracer_enabled;
185 
186 	/* Enable tracing on all functions again */
187 	ftrace_set_filter(NULL, 0, 1);
188 
189 	return ret;
190 }
191 #else
192 # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
193 #endif /* CONFIG_DYNAMIC_FTRACE */
194 
195 /*
196  * Simple verification test of ftrace function tracer.
197  * Enable ftrace, sleep 1/10 second, and then read the trace
198  * buffer to see if all is in order.
199  */
200 int
201 trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
202 {
203 	int save_ftrace_enabled = ftrace_enabled;
204 	int save_tracer_enabled = tracer_enabled;
205 	unsigned long count;
206 	int ret;
207 
208 	/* make sure msleep has been recorded */
209 	msleep(1);
210 
211 	/* start the tracing */
212 	ftrace_enabled = 1;
213 	tracer_enabled = 1;
214 
215 	ret = tracer_init(trace, tr);
216 	if (ret) {
217 		warn_failed_init_tracer(trace, ret);
218 		goto out;
219 	}
220 
221 	/* Sleep for a 1/10 of a second */
222 	msleep(100);
223 	/* stop the tracing. */
224 	tracing_stop();
225 	ftrace_enabled = 0;
226 
227 	/* check the trace buffer */
228 	ret = trace_test_buffer(tr, &count);
229 	trace->reset(tr);
230 	tracing_start();
231 
232 	if (!ret && !count) {
233 		printk(KERN_CONT ".. no entries found ..");
234 		ret = -1;
235 		goto out;
236 	}
237 
238 	ret = trace_selftest_startup_dynamic_tracing(trace, tr,
239 						     DYN_FTRACE_TEST_NAME);
240 
241  out:
242 	ftrace_enabled = save_ftrace_enabled;
243 	tracer_enabled = save_tracer_enabled;
244 
245 	/* kill ftrace totally if we failed */
246 	if (ret)
247 		ftrace_kill();
248 
249 	return ret;
250 }
251 #endif /* CONFIG_FUNCTION_TRACER */
252 
253 
254 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
255 
256 /* Maximum number of functions to trace before diagnosing a hang */
257 #define GRAPH_MAX_FUNC_TEST	100000000
258 
259 static void __ftrace_dump(bool disable_tracing);
260 static unsigned int graph_hang_thresh;
261 
262 /* Wrap the real function entry probe to avoid possible hanging */
263 static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace)
264 {
265 	/* This is harmlessly racy, we want to approximately detect a hang */
266 	if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) {
267 		ftrace_graph_stop();
268 		printk(KERN_WARNING "BUG: Function graph tracer hang!\n");
269 		if (ftrace_dump_on_oops)
270 			__ftrace_dump(false);
271 		return 0;
272 	}
273 
274 	return trace_graph_entry(trace);
275 }
276 
277 /*
278  * Pretty much the same than for the function tracer from which the selftest
279  * has been borrowed.
280  */
281 int
282 trace_selftest_startup_function_graph(struct tracer *trace,
283 					struct trace_array *tr)
284 {
285 	int ret;
286 	unsigned long count;
287 
288 	/*
289 	 * Simulate the init() callback but we attach a watchdog callback
290 	 * to detect and recover from possible hangs
291 	 */
292 	tracing_reset_online_cpus(tr);
293 	set_graph_array(tr);
294 	ret = register_ftrace_graph(&trace_graph_return,
295 				    &trace_graph_entry_watchdog);
296 	if (ret) {
297 		warn_failed_init_tracer(trace, ret);
298 		goto out;
299 	}
300 	tracing_start_cmdline_record();
301 
302 	/* Sleep for a 1/10 of a second */
303 	msleep(100);
304 
305 	/* Have we just recovered from a hang? */
306 	if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) {
307 		tracing_selftest_disabled = true;
308 		ret = -1;
309 		goto out;
310 	}
311 
312 	tracing_stop();
313 
314 	/* check the trace buffer */
315 	ret = trace_test_buffer(tr, &count);
316 
317 	trace->reset(tr);
318 	tracing_start();
319 
320 	if (!ret && !count) {
321 		printk(KERN_CONT ".. no entries found ..");
322 		ret = -1;
323 		goto out;
324 	}
325 
326 	/* Don't test dynamic tracing, the function tracer already did */
327 
328 out:
329 	/* Stop it if we failed */
330 	if (ret)
331 		ftrace_graph_stop();
332 
333 	return ret;
334 }
335 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
336 
337 
338 #ifdef CONFIG_IRQSOFF_TRACER
339 int
340 trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
341 {
342 	unsigned long save_max = tracing_max_latency;
343 	unsigned long count;
344 	int ret;
345 
346 	/* start the tracing */
347 	ret = tracer_init(trace, tr);
348 	if (ret) {
349 		warn_failed_init_tracer(trace, ret);
350 		return ret;
351 	}
352 
353 	/* reset the max latency */
354 	tracing_max_latency = 0;
355 	/* disable interrupts for a bit */
356 	local_irq_disable();
357 	udelay(100);
358 	local_irq_enable();
359 
360 	/*
361 	 * Stop the tracer to avoid a warning subsequent
362 	 * to buffer flipping failure because tracing_stop()
363 	 * disables the tr and max buffers, making flipping impossible
364 	 * in case of parallels max irqs off latencies.
365 	 */
366 	trace->stop(tr);
367 	/* stop the tracing. */
368 	tracing_stop();
369 	/* check both trace buffers */
370 	ret = trace_test_buffer(tr, NULL);
371 	if (!ret)
372 		ret = trace_test_buffer(&max_tr, &count);
373 	trace->reset(tr);
374 	tracing_start();
375 
376 	if (!ret && !count) {
377 		printk(KERN_CONT ".. no entries found ..");
378 		ret = -1;
379 	}
380 
381 	tracing_max_latency = save_max;
382 
383 	return ret;
384 }
385 #endif /* CONFIG_IRQSOFF_TRACER */
386 
387 #ifdef CONFIG_PREEMPT_TRACER
388 int
389 trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
390 {
391 	unsigned long save_max = tracing_max_latency;
392 	unsigned long count;
393 	int ret;
394 
395 	/*
396 	 * Now that the big kernel lock is no longer preemptable,
397 	 * and this is called with the BKL held, it will always
398 	 * fail. If preemption is already disabled, simply
399 	 * pass the test. When the BKL is removed, or becomes
400 	 * preemptible again, we will once again test this,
401 	 * so keep it in.
402 	 */
403 	if (preempt_count()) {
404 		printk(KERN_CONT "can not test ... force ");
405 		return 0;
406 	}
407 
408 	/* start the tracing */
409 	ret = tracer_init(trace, tr);
410 	if (ret) {
411 		warn_failed_init_tracer(trace, ret);
412 		return ret;
413 	}
414 
415 	/* reset the max latency */
416 	tracing_max_latency = 0;
417 	/* disable preemption for a bit */
418 	preempt_disable();
419 	udelay(100);
420 	preempt_enable();
421 
422 	/*
423 	 * Stop the tracer to avoid a warning subsequent
424 	 * to buffer flipping failure because tracing_stop()
425 	 * disables the tr and max buffers, making flipping impossible
426 	 * in case of parallels max preempt off latencies.
427 	 */
428 	trace->stop(tr);
429 	/* stop the tracing. */
430 	tracing_stop();
431 	/* check both trace buffers */
432 	ret = trace_test_buffer(tr, NULL);
433 	if (!ret)
434 		ret = trace_test_buffer(&max_tr, &count);
435 	trace->reset(tr);
436 	tracing_start();
437 
438 	if (!ret && !count) {
439 		printk(KERN_CONT ".. no entries found ..");
440 		ret = -1;
441 	}
442 
443 	tracing_max_latency = save_max;
444 
445 	return ret;
446 }
447 #endif /* CONFIG_PREEMPT_TRACER */
448 
449 #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
450 int
451 trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
452 {
453 	unsigned long save_max = tracing_max_latency;
454 	unsigned long count;
455 	int ret;
456 
457 	/*
458 	 * Now that the big kernel lock is no longer preemptable,
459 	 * and this is called with the BKL held, it will always
460 	 * fail. If preemption is already disabled, simply
461 	 * pass the test. When the BKL is removed, or becomes
462 	 * preemptible again, we will once again test this,
463 	 * so keep it in.
464 	 */
465 	if (preempt_count()) {
466 		printk(KERN_CONT "can not test ... force ");
467 		return 0;
468 	}
469 
470 	/* start the tracing */
471 	ret = tracer_init(trace, tr);
472 	if (ret) {
473 		warn_failed_init_tracer(trace, ret);
474 		goto out_no_start;
475 	}
476 
477 	/* reset the max latency */
478 	tracing_max_latency = 0;
479 
480 	/* disable preemption and interrupts for a bit */
481 	preempt_disable();
482 	local_irq_disable();
483 	udelay(100);
484 	preempt_enable();
485 	/* reverse the order of preempt vs irqs */
486 	local_irq_enable();
487 
488 	/*
489 	 * Stop the tracer to avoid a warning subsequent
490 	 * to buffer flipping failure because tracing_stop()
491 	 * disables the tr and max buffers, making flipping impossible
492 	 * in case of parallels max irqs/preempt off latencies.
493 	 */
494 	trace->stop(tr);
495 	/* stop the tracing. */
496 	tracing_stop();
497 	/* check both trace buffers */
498 	ret = trace_test_buffer(tr, NULL);
499 	if (ret)
500 		goto out;
501 
502 	ret = trace_test_buffer(&max_tr, &count);
503 	if (ret)
504 		goto out;
505 
506 	if (!ret && !count) {
507 		printk(KERN_CONT ".. no entries found ..");
508 		ret = -1;
509 		goto out;
510 	}
511 
512 	/* do the test by disabling interrupts first this time */
513 	tracing_max_latency = 0;
514 	tracing_start();
515 	trace->start(tr);
516 
517 	preempt_disable();
518 	local_irq_disable();
519 	udelay(100);
520 	preempt_enable();
521 	/* reverse the order of preempt vs irqs */
522 	local_irq_enable();
523 
524 	trace->stop(tr);
525 	/* stop the tracing. */
526 	tracing_stop();
527 	/* check both trace buffers */
528 	ret = trace_test_buffer(tr, NULL);
529 	if (ret)
530 		goto out;
531 
532 	ret = trace_test_buffer(&max_tr, &count);
533 
534 	if (!ret && !count) {
535 		printk(KERN_CONT ".. no entries found ..");
536 		ret = -1;
537 		goto out;
538 	}
539 
540 out:
541 	tracing_start();
542 out_no_start:
543 	trace->reset(tr);
544 	tracing_max_latency = save_max;
545 
546 	return ret;
547 }
548 #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
549 
550 #ifdef CONFIG_NOP_TRACER
551 int
552 trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr)
553 {
554 	/* What could possibly go wrong? */
555 	return 0;
556 }
557 #endif
558 
559 #ifdef CONFIG_SCHED_TRACER
560 static int trace_wakeup_test_thread(void *data)
561 {
562 	/* Make this a RT thread, doesn't need to be too high */
563 	struct sched_param param = { .sched_priority = 5 };
564 	struct completion *x = data;
565 
566 	sched_setscheduler(current, SCHED_FIFO, &param);
567 
568 	/* Make it know we have a new prio */
569 	complete(x);
570 
571 	/* now go to sleep and let the test wake us up */
572 	set_current_state(TASK_INTERRUPTIBLE);
573 	schedule();
574 
575 	/* we are awake, now wait to disappear */
576 	while (!kthread_should_stop()) {
577 		/*
578 		 * This is an RT task, do short sleeps to let
579 		 * others run.
580 		 */
581 		msleep(100);
582 	}
583 
584 	return 0;
585 }
586 
587 int
588 trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
589 {
590 	unsigned long save_max = tracing_max_latency;
591 	struct task_struct *p;
592 	struct completion isrt;
593 	unsigned long count;
594 	int ret;
595 
596 	init_completion(&isrt);
597 
598 	/* create a high prio thread */
599 	p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test");
600 	if (IS_ERR(p)) {
601 		printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
602 		return -1;
603 	}
604 
605 	/* make sure the thread is running at an RT prio */
606 	wait_for_completion(&isrt);
607 
608 	/* start the tracing */
609 	ret = tracer_init(trace, tr);
610 	if (ret) {
611 		warn_failed_init_tracer(trace, ret);
612 		return ret;
613 	}
614 
615 	/* reset the max latency */
616 	tracing_max_latency = 0;
617 
618 	/* sleep to let the RT thread sleep too */
619 	msleep(100);
620 
621 	/*
622 	 * Yes this is slightly racy. It is possible that for some
623 	 * strange reason that the RT thread we created, did not
624 	 * call schedule for 100ms after doing the completion,
625 	 * and we do a wakeup on a task that already is awake.
626 	 * But that is extremely unlikely, and the worst thing that
627 	 * happens in such a case, is that we disable tracing.
628 	 * Honestly, if this race does happen something is horrible
629 	 * wrong with the system.
630 	 */
631 
632 	wake_up_process(p);
633 
634 	/* give a little time to let the thread wake up */
635 	msleep(100);
636 
637 	/* stop the tracing. */
638 	tracing_stop();
639 	/* check both trace buffers */
640 	ret = trace_test_buffer(tr, NULL);
641 	if (!ret)
642 		ret = trace_test_buffer(&max_tr, &count);
643 
644 
645 	trace->reset(tr);
646 	tracing_start();
647 
648 	tracing_max_latency = save_max;
649 
650 	/* kill the thread */
651 	kthread_stop(p);
652 
653 	if (!ret && !count) {
654 		printk(KERN_CONT ".. no entries found ..");
655 		ret = -1;
656 	}
657 
658 	return ret;
659 }
660 #endif /* CONFIG_SCHED_TRACER */
661 
662 #ifdef CONFIG_CONTEXT_SWITCH_TRACER
663 int
664 trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr)
665 {
666 	unsigned long count;
667 	int ret;
668 
669 	/* start the tracing */
670 	ret = tracer_init(trace, tr);
671 	if (ret) {
672 		warn_failed_init_tracer(trace, ret);
673 		return ret;
674 	}
675 
676 	/* Sleep for a 1/10 of a second */
677 	msleep(100);
678 	/* stop the tracing. */
679 	tracing_stop();
680 	/* check the trace buffer */
681 	ret = trace_test_buffer(tr, &count);
682 	trace->reset(tr);
683 	tracing_start();
684 
685 	if (!ret && !count) {
686 		printk(KERN_CONT ".. no entries found ..");
687 		ret = -1;
688 	}
689 
690 	return ret;
691 }
692 #endif /* CONFIG_CONTEXT_SWITCH_TRACER */
693 
694 #ifdef CONFIG_SYSPROF_TRACER
695 int
696 trace_selftest_startup_sysprof(struct tracer *trace, struct trace_array *tr)
697 {
698 	unsigned long count;
699 	int ret;
700 
701 	/* start the tracing */
702 	ret = tracer_init(trace, tr);
703 	if (ret) {
704 		warn_failed_init_tracer(trace, ret);
705 		return ret;
706 	}
707 
708 	/* Sleep for a 1/10 of a second */
709 	msleep(100);
710 	/* stop the tracing. */
711 	tracing_stop();
712 	/* check the trace buffer */
713 	ret = trace_test_buffer(tr, &count);
714 	trace->reset(tr);
715 	tracing_start();
716 
717 	if (!ret && !count) {
718 		printk(KERN_CONT ".. no entries found ..");
719 		ret = -1;
720 	}
721 
722 	return ret;
723 }
724 #endif /* CONFIG_SYSPROF_TRACER */
725 
726 #ifdef CONFIG_BRANCH_TRACER
727 int
728 trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
729 {
730 	unsigned long count;
731 	int ret;
732 
733 	/* start the tracing */
734 	ret = tracer_init(trace, tr);
735 	if (ret) {
736 		warn_failed_init_tracer(trace, ret);
737 		return ret;
738 	}
739 
740 	/* Sleep for a 1/10 of a second */
741 	msleep(100);
742 	/* stop the tracing. */
743 	tracing_stop();
744 	/* check the trace buffer */
745 	ret = trace_test_buffer(tr, &count);
746 	trace->reset(tr);
747 	tracing_start();
748 
749 	if (!ret && !count) {
750 		printk(KERN_CONT ".. no entries found ..");
751 		ret = -1;
752 	}
753 
754 	return ret;
755 }
756 #endif /* CONFIG_BRANCH_TRACER */
757 
758 #ifdef CONFIG_HW_BRANCH_TRACER
759 int
760 trace_selftest_startup_hw_branches(struct tracer *trace,
761 				   struct trace_array *tr)
762 {
763 	struct trace_iterator *iter;
764 	struct tracer tracer;
765 	unsigned long count;
766 	int ret;
767 
768 	if (!trace->open) {
769 		printk(KERN_CONT "missing open function...");
770 		return -1;
771 	}
772 
773 	ret = tracer_init(trace, tr);
774 	if (ret) {
775 		warn_failed_init_tracer(trace, ret);
776 		return ret;
777 	}
778 
779 	/*
780 	 * The hw-branch tracer needs to collect the trace from the various
781 	 * cpu trace buffers - before tracing is stopped.
782 	 */
783 	iter = kzalloc(sizeof(*iter), GFP_KERNEL);
784 	if (!iter)
785 		return -ENOMEM;
786 
787 	memcpy(&tracer, trace, sizeof(tracer));
788 
789 	iter->trace = &tracer;
790 	iter->tr = tr;
791 	iter->pos = -1;
792 	mutex_init(&iter->mutex);
793 
794 	trace->open(iter);
795 
796 	mutex_destroy(&iter->mutex);
797 	kfree(iter);
798 
799 	tracing_stop();
800 
801 	ret = trace_test_buffer(tr, &count);
802 	trace->reset(tr);
803 	tracing_start();
804 
805 	if (!ret && !count) {
806 		printk(KERN_CONT "no entries found..");
807 		ret = -1;
808 	}
809 
810 	return ret;
811 }
812 #endif /* CONFIG_HW_BRANCH_TRACER */
813 
814 #ifdef CONFIG_KSYM_TRACER
815 static int ksym_selftest_dummy;
816 
817 int
818 trace_selftest_startup_ksym(struct tracer *trace, struct trace_array *tr)
819 {
820 	unsigned long count;
821 	int ret;
822 
823 	/* start the tracing */
824 	ret = tracer_init(trace, tr);
825 	if (ret) {
826 		warn_failed_init_tracer(trace, ret);
827 		return ret;
828 	}
829 
830 	ksym_selftest_dummy = 0;
831 	/* Register the read-write tracing request */
832 
833 	ret = process_new_ksym_entry("ksym_selftest_dummy",
834 				     HW_BREAKPOINT_R | HW_BREAKPOINT_W,
835 					(unsigned long)(&ksym_selftest_dummy));
836 
837 	if (ret < 0) {
838 		printk(KERN_CONT "ksym_trace read-write startup test failed\n");
839 		goto ret_path;
840 	}
841 	/* Perform a read and a write operation over the dummy variable to
842 	 * trigger the tracer
843 	 */
844 	if (ksym_selftest_dummy == 0)
845 		ksym_selftest_dummy++;
846 
847 	/* stop the tracing. */
848 	tracing_stop();
849 	/* check the trace buffer */
850 	ret = trace_test_buffer(tr, &count);
851 	trace->reset(tr);
852 	tracing_start();
853 
854 	/* read & write operations - one each is performed on the dummy variable
855 	 * triggering two entries in the trace buffer
856 	 */
857 	if (!ret && count != 2) {
858 		printk(KERN_CONT "Ksym tracer startup test failed");
859 		ret = -1;
860 	}
861 
862 ret_path:
863 	return ret;
864 }
865 #endif /* CONFIG_KSYM_TRACER */
866 
867