xref: /linux/kernel/trace/ftrace.c (revision 93d546399c2b7d66a54d5fbd5eee17de19246bf6)
1 /*
2  * Infrastructure for profiling code inserted by 'gcc -pg'.
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * Originally ported from the -rt patch by:
8  *   Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9  *
10  * Based on code in the latency_tracer, that is:
11  *
12  *  Copyright (C) 2004-2006 Ingo Molnar
13  *  Copyright (C) 2004 William Lee Irwin III
14  */
15 
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/debugfs.h>
21 #include <linux/hardirq.h>
22 #include <linux/kthread.h>
23 #include <linux/uaccess.h>
24 #include <linux/kprobes.h>
25 #include <linux/ftrace.h>
26 #include <linux/sysctl.h>
27 #include <linux/ctype.h>
28 #include <linux/list.h>
29 
30 #include <asm/ftrace.h>
31 
32 #include "trace.h"
33 
34 #define FTRACE_WARN_ON(cond)			\
35 	do {					\
36 		if (WARN_ON(cond))		\
37 			ftrace_kill();		\
38 	} while (0)
39 
40 #define FTRACE_WARN_ON_ONCE(cond)		\
41 	do {					\
42 		if (WARN_ON_ONCE(cond))		\
43 			ftrace_kill();		\
44 	} while (0)
45 
46 /* ftrace_enabled is a method to turn ftrace on or off */
47 int ftrace_enabled __read_mostly;
48 static int last_ftrace_enabled;
49 
50 /*
51  * ftrace_disabled is set when an anomaly is discovered.
52  * ftrace_disabled is much stronger than ftrace_enabled.
53  */
54 static int ftrace_disabled __read_mostly;
55 
56 static DEFINE_SPINLOCK(ftrace_lock);
57 static DEFINE_MUTEX(ftrace_sysctl_lock);
58 
59 static struct ftrace_ops ftrace_list_end __read_mostly =
60 {
61 	.func = ftrace_stub,
62 };
63 
64 static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
65 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
66 
67 static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
68 {
69 	struct ftrace_ops *op = ftrace_list;
70 
71 	/* in case someone actually ports this to alpha! */
72 	read_barrier_depends();
73 
74 	while (op != &ftrace_list_end) {
75 		/* silly alpha */
76 		read_barrier_depends();
77 		op->func(ip, parent_ip);
78 		op = op->next;
79 	};
80 }
81 
82 /**
83  * clear_ftrace_function - reset the ftrace function
84  *
85  * This NULLs the ftrace function and in essence stops
86  * tracing.  There may be lag
87  */
88 void clear_ftrace_function(void)
89 {
90 	ftrace_trace_function = ftrace_stub;
91 }
92 
93 static int __register_ftrace_function(struct ftrace_ops *ops)
94 {
95 	/* should not be called from interrupt context */
96 	spin_lock(&ftrace_lock);
97 
98 	ops->next = ftrace_list;
99 	/*
100 	 * We are entering ops into the ftrace_list but another
101 	 * CPU might be walking that list. We need to make sure
102 	 * the ops->next pointer is valid before another CPU sees
103 	 * the ops pointer included into the ftrace_list.
104 	 */
105 	smp_wmb();
106 	ftrace_list = ops;
107 
108 	if (ftrace_enabled) {
109 		/*
110 		 * For one func, simply call it directly.
111 		 * For more than one func, call the chain.
112 		 */
113 		if (ops->next == &ftrace_list_end)
114 			ftrace_trace_function = ops->func;
115 		else
116 			ftrace_trace_function = ftrace_list_func;
117 	}
118 
119 	spin_unlock(&ftrace_lock);
120 
121 	return 0;
122 }
123 
124 static int __unregister_ftrace_function(struct ftrace_ops *ops)
125 {
126 	struct ftrace_ops **p;
127 	int ret = 0;
128 
129 	/* should not be called from interrupt context */
130 	spin_lock(&ftrace_lock);
131 
132 	/*
133 	 * If we are removing the last function, then simply point
134 	 * to the ftrace_stub.
135 	 */
136 	if (ftrace_list == ops && ops->next == &ftrace_list_end) {
137 		ftrace_trace_function = ftrace_stub;
138 		ftrace_list = &ftrace_list_end;
139 		goto out;
140 	}
141 
142 	for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
143 		if (*p == ops)
144 			break;
145 
146 	if (*p != ops) {
147 		ret = -1;
148 		goto out;
149 	}
150 
151 	*p = (*p)->next;
152 
153 	if (ftrace_enabled) {
154 		/* If we only have one func left, then call that directly */
155 		if (ftrace_list == &ftrace_list_end ||
156 		    ftrace_list->next == &ftrace_list_end)
157 			ftrace_trace_function = ftrace_list->func;
158 	}
159 
160  out:
161 	spin_unlock(&ftrace_lock);
162 
163 	return ret;
164 }
165 
166 #ifdef CONFIG_DYNAMIC_FTRACE
167 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
168 # error Dynamic ftrace depends on MCOUNT_RECORD
169 #endif
170 
171 /*
172  * Since MCOUNT_ADDR may point to mcount itself, we do not want
173  * to get it confused by reading a reference in the code as we
174  * are parsing on objcopy output of text. Use a variable for
175  * it instead.
176  */
177 static unsigned long mcount_addr = MCOUNT_ADDR;
178 
179 enum {
180 	FTRACE_ENABLE_CALLS		= (1 << 0),
181 	FTRACE_DISABLE_CALLS		= (1 << 1),
182 	FTRACE_UPDATE_TRACE_FUNC	= (1 << 2),
183 	FTRACE_ENABLE_MCOUNT		= (1 << 3),
184 	FTRACE_DISABLE_MCOUNT		= (1 << 4),
185 };
186 
187 static int ftrace_filtered;
188 
189 static LIST_HEAD(ftrace_new_addrs);
190 
191 static DEFINE_MUTEX(ftrace_regex_lock);
192 
193 struct ftrace_page {
194 	struct ftrace_page	*next;
195 	unsigned long		index;
196 	struct dyn_ftrace	records[];
197 };
198 
199 #define ENTRIES_PER_PAGE \
200   ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
201 
202 /* estimate from running different kernels */
203 #define NR_TO_INIT		10000
204 
205 static struct ftrace_page	*ftrace_pages_start;
206 static struct ftrace_page	*ftrace_pages;
207 
208 static struct dyn_ftrace *ftrace_free_records;
209 
210 
211 #ifdef CONFIG_KPROBES
212 
213 static int frozen_record_count;
214 
215 static inline void freeze_record(struct dyn_ftrace *rec)
216 {
217 	if (!(rec->flags & FTRACE_FL_FROZEN)) {
218 		rec->flags |= FTRACE_FL_FROZEN;
219 		frozen_record_count++;
220 	}
221 }
222 
223 static inline void unfreeze_record(struct dyn_ftrace *rec)
224 {
225 	if (rec->flags & FTRACE_FL_FROZEN) {
226 		rec->flags &= ~FTRACE_FL_FROZEN;
227 		frozen_record_count--;
228 	}
229 }
230 
231 static inline int record_frozen(struct dyn_ftrace *rec)
232 {
233 	return rec->flags & FTRACE_FL_FROZEN;
234 }
235 #else
236 # define freeze_record(rec)			({ 0; })
237 # define unfreeze_record(rec)			({ 0; })
238 # define record_frozen(rec)			({ 0; })
239 #endif /* CONFIG_KPROBES */
240 
241 static void ftrace_free_rec(struct dyn_ftrace *rec)
242 {
243 	rec->ip = (unsigned long)ftrace_free_records;
244 	ftrace_free_records = rec;
245 	rec->flags |= FTRACE_FL_FREE;
246 }
247 
248 void ftrace_release(void *start, unsigned long size)
249 {
250 	struct dyn_ftrace *rec;
251 	struct ftrace_page *pg;
252 	unsigned long s = (unsigned long)start;
253 	unsigned long e = s + size;
254 	int i;
255 
256 	if (ftrace_disabled || !start)
257 		return;
258 
259 	/* should not be called from interrupt context */
260 	spin_lock(&ftrace_lock);
261 
262 	for (pg = ftrace_pages_start; pg; pg = pg->next) {
263 		for (i = 0; i < pg->index; i++) {
264 			rec = &pg->records[i];
265 
266 			if ((rec->ip >= s) && (rec->ip < e))
267 				ftrace_free_rec(rec);
268 		}
269 	}
270 	spin_unlock(&ftrace_lock);
271 }
272 
273 static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
274 {
275 	struct dyn_ftrace *rec;
276 
277 	/* First check for freed records */
278 	if (ftrace_free_records) {
279 		rec = ftrace_free_records;
280 
281 		if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
282 			FTRACE_WARN_ON_ONCE(1);
283 			ftrace_free_records = NULL;
284 			return NULL;
285 		}
286 
287 		ftrace_free_records = (void *)rec->ip;
288 		memset(rec, 0, sizeof(*rec));
289 		return rec;
290 	}
291 
292 	if (ftrace_pages->index == ENTRIES_PER_PAGE) {
293 		if (!ftrace_pages->next) {
294 			/* allocate another page */
295 			ftrace_pages->next =
296 				(void *)get_zeroed_page(GFP_KERNEL);
297 			if (!ftrace_pages->next)
298 				return NULL;
299 		}
300 		ftrace_pages = ftrace_pages->next;
301 	}
302 
303 	return &ftrace_pages->records[ftrace_pages->index++];
304 }
305 
306 static struct dyn_ftrace *
307 ftrace_record_ip(unsigned long ip)
308 {
309 	struct dyn_ftrace *rec;
310 
311 	if (!ftrace_enabled || ftrace_disabled)
312 		return NULL;
313 
314 	rec = ftrace_alloc_dyn_node(ip);
315 	if (!rec)
316 		return NULL;
317 
318 	rec->ip = ip;
319 
320 	list_add(&rec->list, &ftrace_new_addrs);
321 
322 	return rec;
323 }
324 
325 #define FTRACE_ADDR ((long)(ftrace_caller))
326 
327 static int
328 __ftrace_replace_code(struct dyn_ftrace *rec,
329 		      unsigned char *nop, int enable)
330 {
331 	unsigned long ip, fl;
332 	unsigned char *call, *old, *new;
333 
334 	ip = rec->ip;
335 
336 	/*
337 	 * If this record is not to be traced and
338 	 * it is not enabled then do nothing.
339 	 *
340 	 * If this record is not to be traced and
341 	 * it is enabled then disabled it.
342 	 *
343 	 */
344 	if (rec->flags & FTRACE_FL_NOTRACE) {
345 		if (rec->flags & FTRACE_FL_ENABLED)
346 			rec->flags &= ~FTRACE_FL_ENABLED;
347 		else
348 			return 0;
349 
350 	} else if (ftrace_filtered && enable) {
351 		/*
352 		 * Filtering is on:
353 		 */
354 
355 		fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_ENABLED);
356 
357 		/* Record is filtered and enabled, do nothing */
358 		if (fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED))
359 			return 0;
360 
361 		/* Record is not filtered and is not enabled do nothing */
362 		if (!fl)
363 			return 0;
364 
365 		/* Record is not filtered but enabled, disable it */
366 		if (fl == FTRACE_FL_ENABLED)
367 			rec->flags &= ~FTRACE_FL_ENABLED;
368 		else
369 		/* Otherwise record is filtered but not enabled, enable it */
370 			rec->flags |= FTRACE_FL_ENABLED;
371 	} else {
372 		/* Disable or not filtered */
373 
374 		if (enable) {
375 			/* if record is enabled, do nothing */
376 			if (rec->flags & FTRACE_FL_ENABLED)
377 				return 0;
378 
379 			rec->flags |= FTRACE_FL_ENABLED;
380 
381 		} else {
382 
383 			/* if record is not enabled do nothing */
384 			if (!(rec->flags & FTRACE_FL_ENABLED))
385 				return 0;
386 
387 			rec->flags &= ~FTRACE_FL_ENABLED;
388 		}
389 	}
390 
391 	call = ftrace_call_replace(ip, FTRACE_ADDR);
392 
393 	if (rec->flags & FTRACE_FL_ENABLED) {
394 		old = nop;
395 		new = call;
396 	} else {
397 		old = call;
398 		new = nop;
399 	}
400 
401 	return ftrace_modify_code(ip, old, new);
402 }
403 
404 static void ftrace_replace_code(int enable)
405 {
406 	int i, failed;
407 	unsigned char *nop = NULL;
408 	struct dyn_ftrace *rec;
409 	struct ftrace_page *pg;
410 
411 	nop = ftrace_nop_replace();
412 
413 	for (pg = ftrace_pages_start; pg; pg = pg->next) {
414 		for (i = 0; i < pg->index; i++) {
415 			rec = &pg->records[i];
416 
417 			/* don't modify code that has already faulted */
418 			if (rec->flags & FTRACE_FL_FAILED)
419 				continue;
420 
421 			/* ignore updates to this record's mcount site */
422 			if (get_kprobe((void *)rec->ip)) {
423 				freeze_record(rec);
424 				continue;
425 			} else {
426 				unfreeze_record(rec);
427 			}
428 
429 			failed = __ftrace_replace_code(rec, nop, enable);
430 			if (failed && (rec->flags & FTRACE_FL_CONVERTED)) {
431 				rec->flags |= FTRACE_FL_FAILED;
432 				if ((system_state == SYSTEM_BOOTING) ||
433 				    !core_kernel_text(rec->ip)) {
434 					ftrace_free_rec(rec);
435 				}
436 			}
437 		}
438 	}
439 }
440 
441 static void print_ip_ins(const char *fmt, unsigned char *p)
442 {
443 	int i;
444 
445 	printk(KERN_CONT "%s", fmt);
446 
447 	for (i = 0; i < MCOUNT_INSN_SIZE; i++)
448 		printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
449 }
450 
451 static int
452 ftrace_code_disable(struct dyn_ftrace *rec)
453 {
454 	unsigned long ip;
455 	unsigned char *nop, *call;
456 	int ret;
457 
458 	ip = rec->ip;
459 
460 	nop = ftrace_nop_replace();
461 	call = ftrace_call_replace(ip, mcount_addr);
462 
463 	ret = ftrace_modify_code(ip, call, nop);
464 	if (ret) {
465 		switch (ret) {
466 		case -EFAULT:
467 			FTRACE_WARN_ON_ONCE(1);
468 			pr_info("ftrace faulted on modifying ");
469 			print_ip_sym(ip);
470 			break;
471 		case -EINVAL:
472 			FTRACE_WARN_ON_ONCE(1);
473 			pr_info("ftrace failed to modify ");
474 			print_ip_sym(ip);
475 			print_ip_ins(" expected: ", call);
476 			print_ip_ins(" actual: ", (unsigned char *)ip);
477 			print_ip_ins(" replace: ", nop);
478 			printk(KERN_CONT "\n");
479 			break;
480 		case -EPERM:
481 			FTRACE_WARN_ON_ONCE(1);
482 			pr_info("ftrace faulted on writing ");
483 			print_ip_sym(ip);
484 			break;
485 		default:
486 			FTRACE_WARN_ON_ONCE(1);
487 			pr_info("ftrace faulted on unknown error ");
488 			print_ip_sym(ip);
489 		}
490 
491 		rec->flags |= FTRACE_FL_FAILED;
492 		return 0;
493 	}
494 	return 1;
495 }
496 
497 static int __ftrace_modify_code(void *data)
498 {
499 	int *command = data;
500 
501 	if (*command & FTRACE_ENABLE_CALLS)
502 		ftrace_replace_code(1);
503 	else if (*command & FTRACE_DISABLE_CALLS)
504 		ftrace_replace_code(0);
505 
506 	if (*command & FTRACE_UPDATE_TRACE_FUNC)
507 		ftrace_update_ftrace_func(ftrace_trace_function);
508 
509 	return 0;
510 }
511 
512 static void ftrace_run_update_code(int command)
513 {
514 	stop_machine(__ftrace_modify_code, &command, NULL);
515 }
516 
517 static ftrace_func_t saved_ftrace_func;
518 static int ftrace_start;
519 static DEFINE_MUTEX(ftrace_start_lock);
520 
521 static void ftrace_startup(void)
522 {
523 	int command = 0;
524 
525 	if (unlikely(ftrace_disabled))
526 		return;
527 
528 	mutex_lock(&ftrace_start_lock);
529 	ftrace_start++;
530 	command |= FTRACE_ENABLE_CALLS;
531 
532 	if (saved_ftrace_func != ftrace_trace_function) {
533 		saved_ftrace_func = ftrace_trace_function;
534 		command |= FTRACE_UPDATE_TRACE_FUNC;
535 	}
536 
537 	if (!command || !ftrace_enabled)
538 		goto out;
539 
540 	ftrace_run_update_code(command);
541  out:
542 	mutex_unlock(&ftrace_start_lock);
543 }
544 
545 static void ftrace_shutdown(void)
546 {
547 	int command = 0;
548 
549 	if (unlikely(ftrace_disabled))
550 		return;
551 
552 	mutex_lock(&ftrace_start_lock);
553 	ftrace_start--;
554 	if (!ftrace_start)
555 		command |= FTRACE_DISABLE_CALLS;
556 
557 	if (saved_ftrace_func != ftrace_trace_function) {
558 		saved_ftrace_func = ftrace_trace_function;
559 		command |= FTRACE_UPDATE_TRACE_FUNC;
560 	}
561 
562 	if (!command || !ftrace_enabled)
563 		goto out;
564 
565 	ftrace_run_update_code(command);
566  out:
567 	mutex_unlock(&ftrace_start_lock);
568 }
569 
570 static void ftrace_startup_sysctl(void)
571 {
572 	int command = FTRACE_ENABLE_MCOUNT;
573 
574 	if (unlikely(ftrace_disabled))
575 		return;
576 
577 	mutex_lock(&ftrace_start_lock);
578 	/* Force update next time */
579 	saved_ftrace_func = NULL;
580 	/* ftrace_start is true if we want ftrace running */
581 	if (ftrace_start)
582 		command |= FTRACE_ENABLE_CALLS;
583 
584 	ftrace_run_update_code(command);
585 	mutex_unlock(&ftrace_start_lock);
586 }
587 
588 static void ftrace_shutdown_sysctl(void)
589 {
590 	int command = FTRACE_DISABLE_MCOUNT;
591 
592 	if (unlikely(ftrace_disabled))
593 		return;
594 
595 	mutex_lock(&ftrace_start_lock);
596 	/* ftrace_start is true if ftrace is running */
597 	if (ftrace_start)
598 		command |= FTRACE_DISABLE_CALLS;
599 
600 	ftrace_run_update_code(command);
601 	mutex_unlock(&ftrace_start_lock);
602 }
603 
604 static cycle_t		ftrace_update_time;
605 static unsigned long	ftrace_update_cnt;
606 unsigned long		ftrace_update_tot_cnt;
607 
608 static int ftrace_update_code(void)
609 {
610 	struct dyn_ftrace *p, *t;
611 	cycle_t start, stop;
612 
613 	start = ftrace_now(raw_smp_processor_id());
614 	ftrace_update_cnt = 0;
615 
616 	list_for_each_entry_safe(p, t, &ftrace_new_addrs, list) {
617 
618 		/* If something went wrong, bail without enabling anything */
619 		if (unlikely(ftrace_disabled))
620 			return -1;
621 
622 		list_del_init(&p->list);
623 
624 		/* convert record (i.e, patch mcount-call with NOP) */
625 		if (ftrace_code_disable(p)) {
626 			p->flags |= FTRACE_FL_CONVERTED;
627 			ftrace_update_cnt++;
628 		} else
629 			ftrace_free_rec(p);
630 	}
631 
632 	stop = ftrace_now(raw_smp_processor_id());
633 	ftrace_update_time = stop - start;
634 	ftrace_update_tot_cnt += ftrace_update_cnt;
635 
636 	return 0;
637 }
638 
639 static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
640 {
641 	struct ftrace_page *pg;
642 	int cnt;
643 	int i;
644 
645 	/* allocate a few pages */
646 	ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
647 	if (!ftrace_pages_start)
648 		return -1;
649 
650 	/*
651 	 * Allocate a few more pages.
652 	 *
653 	 * TODO: have some parser search vmlinux before
654 	 *   final linking to find all calls to ftrace.
655 	 *   Then we can:
656 	 *    a) know how many pages to allocate.
657 	 *     and/or
658 	 *    b) set up the table then.
659 	 *
660 	 *  The dynamic code is still necessary for
661 	 *  modules.
662 	 */
663 
664 	pg = ftrace_pages = ftrace_pages_start;
665 
666 	cnt = num_to_init / ENTRIES_PER_PAGE;
667 	pr_info("ftrace: allocating %ld entries in %d pages\n",
668 		num_to_init, cnt + 1);
669 
670 	for (i = 0; i < cnt; i++) {
671 		pg->next = (void *)get_zeroed_page(GFP_KERNEL);
672 
673 		/* If we fail, we'll try later anyway */
674 		if (!pg->next)
675 			break;
676 
677 		pg = pg->next;
678 	}
679 
680 	return 0;
681 }
682 
683 enum {
684 	FTRACE_ITER_FILTER	= (1 << 0),
685 	FTRACE_ITER_CONT	= (1 << 1),
686 	FTRACE_ITER_NOTRACE	= (1 << 2),
687 	FTRACE_ITER_FAILURES	= (1 << 3),
688 };
689 
690 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
691 
692 struct ftrace_iterator {
693 	loff_t			pos;
694 	struct ftrace_page	*pg;
695 	unsigned		idx;
696 	unsigned		flags;
697 	unsigned char		buffer[FTRACE_BUFF_MAX+1];
698 	unsigned		buffer_idx;
699 	unsigned		filtered;
700 };
701 
702 static void *
703 t_next(struct seq_file *m, void *v, loff_t *pos)
704 {
705 	struct ftrace_iterator *iter = m->private;
706 	struct dyn_ftrace *rec = NULL;
707 
708 	(*pos)++;
709 
710 	/* should not be called from interrupt context */
711 	spin_lock(&ftrace_lock);
712  retry:
713 	if (iter->idx >= iter->pg->index) {
714 		if (iter->pg->next) {
715 			iter->pg = iter->pg->next;
716 			iter->idx = 0;
717 			goto retry;
718 		}
719 	} else {
720 		rec = &iter->pg->records[iter->idx++];
721 		if ((rec->flags & FTRACE_FL_FREE) ||
722 
723 		    (!(iter->flags & FTRACE_ITER_FAILURES) &&
724 		     (rec->flags & FTRACE_FL_FAILED)) ||
725 
726 		    ((iter->flags & FTRACE_ITER_FAILURES) &&
727 		     !(rec->flags & FTRACE_FL_FAILED)) ||
728 
729 		    ((iter->flags & FTRACE_ITER_FILTER) &&
730 		     !(rec->flags & FTRACE_FL_FILTER)) ||
731 
732 		    ((iter->flags & FTRACE_ITER_NOTRACE) &&
733 		     !(rec->flags & FTRACE_FL_NOTRACE))) {
734 			rec = NULL;
735 			goto retry;
736 		}
737 	}
738 	spin_unlock(&ftrace_lock);
739 
740 	iter->pos = *pos;
741 
742 	return rec;
743 }
744 
745 static void *t_start(struct seq_file *m, loff_t *pos)
746 {
747 	struct ftrace_iterator *iter = m->private;
748 	void *p = NULL;
749 	loff_t l = -1;
750 
751 	if (*pos > iter->pos)
752 		*pos = iter->pos;
753 
754 	l = *pos;
755 	p = t_next(m, p, &l);
756 
757 	return p;
758 }
759 
760 static void t_stop(struct seq_file *m, void *p)
761 {
762 }
763 
764 static int t_show(struct seq_file *m, void *v)
765 {
766 	struct ftrace_iterator *iter = m->private;
767 	struct dyn_ftrace *rec = v;
768 	char str[KSYM_SYMBOL_LEN];
769 	int ret = 0;
770 
771 	if (!rec)
772 		return 0;
773 
774 	kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
775 
776 	ret = seq_printf(m, "%s\n", str);
777 	if (ret < 0) {
778 		iter->pos--;
779 		iter->idx--;
780 	}
781 
782 	return 0;
783 }
784 
785 static struct seq_operations show_ftrace_seq_ops = {
786 	.start = t_start,
787 	.next = t_next,
788 	.stop = t_stop,
789 	.show = t_show,
790 };
791 
792 static int
793 ftrace_avail_open(struct inode *inode, struct file *file)
794 {
795 	struct ftrace_iterator *iter;
796 	int ret;
797 
798 	if (unlikely(ftrace_disabled))
799 		return -ENODEV;
800 
801 	iter = kzalloc(sizeof(*iter), GFP_KERNEL);
802 	if (!iter)
803 		return -ENOMEM;
804 
805 	iter->pg = ftrace_pages_start;
806 	iter->pos = 0;
807 
808 	ret = seq_open(file, &show_ftrace_seq_ops);
809 	if (!ret) {
810 		struct seq_file *m = file->private_data;
811 
812 		m->private = iter;
813 	} else {
814 		kfree(iter);
815 	}
816 
817 	return ret;
818 }
819 
820 int ftrace_avail_release(struct inode *inode, struct file *file)
821 {
822 	struct seq_file *m = (struct seq_file *)file->private_data;
823 	struct ftrace_iterator *iter = m->private;
824 
825 	seq_release(inode, file);
826 	kfree(iter);
827 
828 	return 0;
829 }
830 
831 static int
832 ftrace_failures_open(struct inode *inode, struct file *file)
833 {
834 	int ret;
835 	struct seq_file *m;
836 	struct ftrace_iterator *iter;
837 
838 	ret = ftrace_avail_open(inode, file);
839 	if (!ret) {
840 		m = (struct seq_file *)file->private_data;
841 		iter = (struct ftrace_iterator *)m->private;
842 		iter->flags = FTRACE_ITER_FAILURES;
843 	}
844 
845 	return ret;
846 }
847 
848 
849 static void ftrace_filter_reset(int enable)
850 {
851 	struct ftrace_page *pg;
852 	struct dyn_ftrace *rec;
853 	unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
854 	unsigned i;
855 
856 	/* should not be called from interrupt context */
857 	spin_lock(&ftrace_lock);
858 	if (enable)
859 		ftrace_filtered = 0;
860 	pg = ftrace_pages_start;
861 	while (pg) {
862 		for (i = 0; i < pg->index; i++) {
863 			rec = &pg->records[i];
864 			if (rec->flags & FTRACE_FL_FAILED)
865 				continue;
866 			rec->flags &= ~type;
867 		}
868 		pg = pg->next;
869 	}
870 	spin_unlock(&ftrace_lock);
871 }
872 
873 static int
874 ftrace_regex_open(struct inode *inode, struct file *file, int enable)
875 {
876 	struct ftrace_iterator *iter;
877 	int ret = 0;
878 
879 	if (unlikely(ftrace_disabled))
880 		return -ENODEV;
881 
882 	iter = kzalloc(sizeof(*iter), GFP_KERNEL);
883 	if (!iter)
884 		return -ENOMEM;
885 
886 	mutex_lock(&ftrace_regex_lock);
887 	if ((file->f_mode & FMODE_WRITE) &&
888 	    !(file->f_flags & O_APPEND))
889 		ftrace_filter_reset(enable);
890 
891 	if (file->f_mode & FMODE_READ) {
892 		iter->pg = ftrace_pages_start;
893 		iter->pos = 0;
894 		iter->flags = enable ? FTRACE_ITER_FILTER :
895 			FTRACE_ITER_NOTRACE;
896 
897 		ret = seq_open(file, &show_ftrace_seq_ops);
898 		if (!ret) {
899 			struct seq_file *m = file->private_data;
900 			m->private = iter;
901 		} else
902 			kfree(iter);
903 	} else
904 		file->private_data = iter;
905 	mutex_unlock(&ftrace_regex_lock);
906 
907 	return ret;
908 }
909 
910 static int
911 ftrace_filter_open(struct inode *inode, struct file *file)
912 {
913 	return ftrace_regex_open(inode, file, 1);
914 }
915 
916 static int
917 ftrace_notrace_open(struct inode *inode, struct file *file)
918 {
919 	return ftrace_regex_open(inode, file, 0);
920 }
921 
922 static ssize_t
923 ftrace_regex_read(struct file *file, char __user *ubuf,
924 		       size_t cnt, loff_t *ppos)
925 {
926 	if (file->f_mode & FMODE_READ)
927 		return seq_read(file, ubuf, cnt, ppos);
928 	else
929 		return -EPERM;
930 }
931 
932 static loff_t
933 ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
934 {
935 	loff_t ret;
936 
937 	if (file->f_mode & FMODE_READ)
938 		ret = seq_lseek(file, offset, origin);
939 	else
940 		file->f_pos = ret = 1;
941 
942 	return ret;
943 }
944 
945 enum {
946 	MATCH_FULL,
947 	MATCH_FRONT_ONLY,
948 	MATCH_MIDDLE_ONLY,
949 	MATCH_END_ONLY,
950 };
951 
952 static void
953 ftrace_match(unsigned char *buff, int len, int enable)
954 {
955 	char str[KSYM_SYMBOL_LEN];
956 	char *search = NULL;
957 	struct ftrace_page *pg;
958 	struct dyn_ftrace *rec;
959 	int type = MATCH_FULL;
960 	unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
961 	unsigned i, match = 0, search_len = 0;
962 
963 	for (i = 0; i < len; i++) {
964 		if (buff[i] == '*') {
965 			if (!i) {
966 				search = buff + i + 1;
967 				type = MATCH_END_ONLY;
968 				search_len = len - (i + 1);
969 			} else {
970 				if (type == MATCH_END_ONLY) {
971 					type = MATCH_MIDDLE_ONLY;
972 				} else {
973 					match = i;
974 					type = MATCH_FRONT_ONLY;
975 				}
976 				buff[i] = 0;
977 				break;
978 			}
979 		}
980 	}
981 
982 	/* should not be called from interrupt context */
983 	spin_lock(&ftrace_lock);
984 	if (enable)
985 		ftrace_filtered = 1;
986 	pg = ftrace_pages_start;
987 	while (pg) {
988 		for (i = 0; i < pg->index; i++) {
989 			int matched = 0;
990 			char *ptr;
991 
992 			rec = &pg->records[i];
993 			if (rec->flags & FTRACE_FL_FAILED)
994 				continue;
995 			kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
996 			switch (type) {
997 			case MATCH_FULL:
998 				if (strcmp(str, buff) == 0)
999 					matched = 1;
1000 				break;
1001 			case MATCH_FRONT_ONLY:
1002 				if (memcmp(str, buff, match) == 0)
1003 					matched = 1;
1004 				break;
1005 			case MATCH_MIDDLE_ONLY:
1006 				if (strstr(str, search))
1007 					matched = 1;
1008 				break;
1009 			case MATCH_END_ONLY:
1010 				ptr = strstr(str, search);
1011 				if (ptr && (ptr[search_len] == 0))
1012 					matched = 1;
1013 				break;
1014 			}
1015 			if (matched)
1016 				rec->flags |= flag;
1017 		}
1018 		pg = pg->next;
1019 	}
1020 	spin_unlock(&ftrace_lock);
1021 }
1022 
1023 static ssize_t
1024 ftrace_regex_write(struct file *file, const char __user *ubuf,
1025 		   size_t cnt, loff_t *ppos, int enable)
1026 {
1027 	struct ftrace_iterator *iter;
1028 	char ch;
1029 	size_t read = 0;
1030 	ssize_t ret;
1031 
1032 	if (!cnt || cnt < 0)
1033 		return 0;
1034 
1035 	mutex_lock(&ftrace_regex_lock);
1036 
1037 	if (file->f_mode & FMODE_READ) {
1038 		struct seq_file *m = file->private_data;
1039 		iter = m->private;
1040 	} else
1041 		iter = file->private_data;
1042 
1043 	if (!*ppos) {
1044 		iter->flags &= ~FTRACE_ITER_CONT;
1045 		iter->buffer_idx = 0;
1046 	}
1047 
1048 	ret = get_user(ch, ubuf++);
1049 	if (ret)
1050 		goto out;
1051 	read++;
1052 	cnt--;
1053 
1054 	if (!(iter->flags & ~FTRACE_ITER_CONT)) {
1055 		/* skip white space */
1056 		while (cnt && isspace(ch)) {
1057 			ret = get_user(ch, ubuf++);
1058 			if (ret)
1059 				goto out;
1060 			read++;
1061 			cnt--;
1062 		}
1063 
1064 		if (isspace(ch)) {
1065 			file->f_pos += read;
1066 			ret = read;
1067 			goto out;
1068 		}
1069 
1070 		iter->buffer_idx = 0;
1071 	}
1072 
1073 	while (cnt && !isspace(ch)) {
1074 		if (iter->buffer_idx < FTRACE_BUFF_MAX)
1075 			iter->buffer[iter->buffer_idx++] = ch;
1076 		else {
1077 			ret = -EINVAL;
1078 			goto out;
1079 		}
1080 		ret = get_user(ch, ubuf++);
1081 		if (ret)
1082 			goto out;
1083 		read++;
1084 		cnt--;
1085 	}
1086 
1087 	if (isspace(ch)) {
1088 		iter->filtered++;
1089 		iter->buffer[iter->buffer_idx] = 0;
1090 		ftrace_match(iter->buffer, iter->buffer_idx, enable);
1091 		iter->buffer_idx = 0;
1092 	} else
1093 		iter->flags |= FTRACE_ITER_CONT;
1094 
1095 
1096 	file->f_pos += read;
1097 
1098 	ret = read;
1099  out:
1100 	mutex_unlock(&ftrace_regex_lock);
1101 
1102 	return ret;
1103 }
1104 
1105 static ssize_t
1106 ftrace_filter_write(struct file *file, const char __user *ubuf,
1107 		    size_t cnt, loff_t *ppos)
1108 {
1109 	return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
1110 }
1111 
1112 static ssize_t
1113 ftrace_notrace_write(struct file *file, const char __user *ubuf,
1114 		     size_t cnt, loff_t *ppos)
1115 {
1116 	return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
1117 }
1118 
1119 static void
1120 ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
1121 {
1122 	if (unlikely(ftrace_disabled))
1123 		return;
1124 
1125 	mutex_lock(&ftrace_regex_lock);
1126 	if (reset)
1127 		ftrace_filter_reset(enable);
1128 	if (buf)
1129 		ftrace_match(buf, len, enable);
1130 	mutex_unlock(&ftrace_regex_lock);
1131 }
1132 
1133 /**
1134  * ftrace_set_filter - set a function to filter on in ftrace
1135  * @buf - the string that holds the function filter text.
1136  * @len - the length of the string.
1137  * @reset - non zero to reset all filters before applying this filter.
1138  *
1139  * Filters denote which functions should be enabled when tracing is enabled.
1140  * If @buf is NULL and reset is set, all functions will be enabled for tracing.
1141  */
1142 void ftrace_set_filter(unsigned char *buf, int len, int reset)
1143 {
1144 	ftrace_set_regex(buf, len, reset, 1);
1145 }
1146 
1147 /**
1148  * ftrace_set_notrace - set a function to not trace in ftrace
1149  * @buf - the string that holds the function notrace text.
1150  * @len - the length of the string.
1151  * @reset - non zero to reset all filters before applying this filter.
1152  *
1153  * Notrace Filters denote which functions should not be enabled when tracing
1154  * is enabled. If @buf is NULL and reset is set, all functions will be enabled
1155  * for tracing.
1156  */
1157 void ftrace_set_notrace(unsigned char *buf, int len, int reset)
1158 {
1159 	ftrace_set_regex(buf, len, reset, 0);
1160 }
1161 
1162 static int
1163 ftrace_regex_release(struct inode *inode, struct file *file, int enable)
1164 {
1165 	struct seq_file *m = (struct seq_file *)file->private_data;
1166 	struct ftrace_iterator *iter;
1167 
1168 	mutex_lock(&ftrace_regex_lock);
1169 	if (file->f_mode & FMODE_READ) {
1170 		iter = m->private;
1171 
1172 		seq_release(inode, file);
1173 	} else
1174 		iter = file->private_data;
1175 
1176 	if (iter->buffer_idx) {
1177 		iter->filtered++;
1178 		iter->buffer[iter->buffer_idx] = 0;
1179 		ftrace_match(iter->buffer, iter->buffer_idx, enable);
1180 	}
1181 
1182 	mutex_lock(&ftrace_sysctl_lock);
1183 	mutex_lock(&ftrace_start_lock);
1184 	if (ftrace_start && ftrace_enabled)
1185 		ftrace_run_update_code(FTRACE_ENABLE_CALLS);
1186 	mutex_unlock(&ftrace_start_lock);
1187 	mutex_unlock(&ftrace_sysctl_lock);
1188 
1189 	kfree(iter);
1190 	mutex_unlock(&ftrace_regex_lock);
1191 	return 0;
1192 }
1193 
1194 static int
1195 ftrace_filter_release(struct inode *inode, struct file *file)
1196 {
1197 	return ftrace_regex_release(inode, file, 1);
1198 }
1199 
1200 static int
1201 ftrace_notrace_release(struct inode *inode, struct file *file)
1202 {
1203 	return ftrace_regex_release(inode, file, 0);
1204 }
1205 
1206 static struct file_operations ftrace_avail_fops = {
1207 	.open = ftrace_avail_open,
1208 	.read = seq_read,
1209 	.llseek = seq_lseek,
1210 	.release = ftrace_avail_release,
1211 };
1212 
1213 static struct file_operations ftrace_failures_fops = {
1214 	.open = ftrace_failures_open,
1215 	.read = seq_read,
1216 	.llseek = seq_lseek,
1217 	.release = ftrace_avail_release,
1218 };
1219 
1220 static struct file_operations ftrace_filter_fops = {
1221 	.open = ftrace_filter_open,
1222 	.read = ftrace_regex_read,
1223 	.write = ftrace_filter_write,
1224 	.llseek = ftrace_regex_lseek,
1225 	.release = ftrace_filter_release,
1226 };
1227 
1228 static struct file_operations ftrace_notrace_fops = {
1229 	.open = ftrace_notrace_open,
1230 	.read = ftrace_regex_read,
1231 	.write = ftrace_notrace_write,
1232 	.llseek = ftrace_regex_lseek,
1233 	.release = ftrace_notrace_release,
1234 };
1235 
1236 static __init int ftrace_init_debugfs(void)
1237 {
1238 	struct dentry *d_tracer;
1239 	struct dentry *entry;
1240 
1241 	d_tracer = tracing_init_dentry();
1242 
1243 	entry = debugfs_create_file("available_filter_functions", 0444,
1244 				    d_tracer, NULL, &ftrace_avail_fops);
1245 	if (!entry)
1246 		pr_warning("Could not create debugfs "
1247 			   "'available_filter_functions' entry\n");
1248 
1249 	entry = debugfs_create_file("failures", 0444,
1250 				    d_tracer, NULL, &ftrace_failures_fops);
1251 	if (!entry)
1252 		pr_warning("Could not create debugfs 'failures' entry\n");
1253 
1254 	entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
1255 				    NULL, &ftrace_filter_fops);
1256 	if (!entry)
1257 		pr_warning("Could not create debugfs "
1258 			   "'set_ftrace_filter' entry\n");
1259 
1260 	entry = debugfs_create_file("set_ftrace_notrace", 0644, d_tracer,
1261 				    NULL, &ftrace_notrace_fops);
1262 	if (!entry)
1263 		pr_warning("Could not create debugfs "
1264 			   "'set_ftrace_notrace' entry\n");
1265 
1266 	return 0;
1267 }
1268 
1269 fs_initcall(ftrace_init_debugfs);
1270 
1271 static int ftrace_convert_nops(unsigned long *start,
1272 			       unsigned long *end)
1273 {
1274 	unsigned long *p;
1275 	unsigned long addr;
1276 	unsigned long flags;
1277 
1278 	mutex_lock(&ftrace_start_lock);
1279 	p = start;
1280 	while (p < end) {
1281 		addr = ftrace_call_adjust(*p++);
1282 		ftrace_record_ip(addr);
1283 	}
1284 
1285 	/* disable interrupts to prevent kstop machine */
1286 	local_irq_save(flags);
1287 	ftrace_update_code();
1288 	local_irq_restore(flags);
1289 	mutex_unlock(&ftrace_start_lock);
1290 
1291 	return 0;
1292 }
1293 
1294 void ftrace_init_module(unsigned long *start, unsigned long *end)
1295 {
1296 	if (ftrace_disabled || start == end)
1297 		return;
1298 	ftrace_convert_nops(start, end);
1299 }
1300 
1301 extern unsigned long __start_mcount_loc[];
1302 extern unsigned long __stop_mcount_loc[];
1303 
1304 void __init ftrace_init(void)
1305 {
1306 	unsigned long count, addr, flags;
1307 	int ret;
1308 
1309 	/* Keep the ftrace pointer to the stub */
1310 	addr = (unsigned long)ftrace_stub;
1311 
1312 	local_irq_save(flags);
1313 	ftrace_dyn_arch_init(&addr);
1314 	local_irq_restore(flags);
1315 
1316 	/* ftrace_dyn_arch_init places the return code in addr */
1317 	if (addr)
1318 		goto failed;
1319 
1320 	count = __stop_mcount_loc - __start_mcount_loc;
1321 
1322 	ret = ftrace_dyn_table_alloc(count);
1323 	if (ret)
1324 		goto failed;
1325 
1326 	last_ftrace_enabled = ftrace_enabled = 1;
1327 
1328 	ret = ftrace_convert_nops(__start_mcount_loc,
1329 				  __stop_mcount_loc);
1330 
1331 	return;
1332  failed:
1333 	ftrace_disabled = 1;
1334 }
1335 
1336 #else
1337 
1338 static int __init ftrace_nodyn_init(void)
1339 {
1340 	ftrace_enabled = 1;
1341 	return 0;
1342 }
1343 device_initcall(ftrace_nodyn_init);
1344 
1345 # define ftrace_startup()		do { } while (0)
1346 # define ftrace_shutdown()		do { } while (0)
1347 # define ftrace_startup_sysctl()	do { } while (0)
1348 # define ftrace_shutdown_sysctl()	do { } while (0)
1349 #endif /* CONFIG_DYNAMIC_FTRACE */
1350 
1351 /**
1352  * ftrace_kill - kill ftrace
1353  *
1354  * This function should be used by panic code. It stops ftrace
1355  * but in a not so nice way. If you need to simply kill ftrace
1356  * from a non-atomic section, use ftrace_kill.
1357  */
1358 void ftrace_kill(void)
1359 {
1360 	ftrace_disabled = 1;
1361 	ftrace_enabled = 0;
1362 	clear_ftrace_function();
1363 }
1364 
1365 /**
1366  * register_ftrace_function - register a function for profiling
1367  * @ops - ops structure that holds the function for profiling.
1368  *
1369  * Register a function to be called by all functions in the
1370  * kernel.
1371  *
1372  * Note: @ops->func and all the functions it calls must be labeled
1373  *       with "notrace", otherwise it will go into a
1374  *       recursive loop.
1375  */
1376 int register_ftrace_function(struct ftrace_ops *ops)
1377 {
1378 	int ret;
1379 
1380 	if (unlikely(ftrace_disabled))
1381 		return -1;
1382 
1383 	mutex_lock(&ftrace_sysctl_lock);
1384 	ret = __register_ftrace_function(ops);
1385 	ftrace_startup();
1386 	mutex_unlock(&ftrace_sysctl_lock);
1387 
1388 	return ret;
1389 }
1390 
1391 /**
1392  * unregister_ftrace_function - unresgister a function for profiling.
1393  * @ops - ops structure that holds the function to unregister
1394  *
1395  * Unregister a function that was added to be called by ftrace profiling.
1396  */
1397 int unregister_ftrace_function(struct ftrace_ops *ops)
1398 {
1399 	int ret;
1400 
1401 	mutex_lock(&ftrace_sysctl_lock);
1402 	ret = __unregister_ftrace_function(ops);
1403 	ftrace_shutdown();
1404 	mutex_unlock(&ftrace_sysctl_lock);
1405 
1406 	return ret;
1407 }
1408 
1409 int
1410 ftrace_enable_sysctl(struct ctl_table *table, int write,
1411 		     struct file *file, void __user *buffer, size_t *lenp,
1412 		     loff_t *ppos)
1413 {
1414 	int ret;
1415 
1416 	if (unlikely(ftrace_disabled))
1417 		return -ENODEV;
1418 
1419 	mutex_lock(&ftrace_sysctl_lock);
1420 
1421 	ret  = proc_dointvec(table, write, file, buffer, lenp, ppos);
1422 
1423 	if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
1424 		goto out;
1425 
1426 	last_ftrace_enabled = ftrace_enabled;
1427 
1428 	if (ftrace_enabled) {
1429 
1430 		ftrace_startup_sysctl();
1431 
1432 		/* we are starting ftrace again */
1433 		if (ftrace_list != &ftrace_list_end) {
1434 			if (ftrace_list->next == &ftrace_list_end)
1435 				ftrace_trace_function = ftrace_list->func;
1436 			else
1437 				ftrace_trace_function = ftrace_list_func;
1438 		}
1439 
1440 	} else {
1441 		/* stopping ftrace calls (just send to ftrace_stub) */
1442 		ftrace_trace_function = ftrace_stub;
1443 
1444 		ftrace_shutdown_sysctl();
1445 	}
1446 
1447  out:
1448 	mutex_unlock(&ftrace_sysctl_lock);
1449 	return ret;
1450 }
1451 
1452