xref: /linux/kernel/trace/ftrace.c (revision 7ec7fb394298c212c30e063c57e0aa895efe9439)
1 /*
2  * Infrastructure for profiling code inserted by 'gcc -pg'.
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * Originally ported from the -rt patch by:
8  *   Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9  *
10  * Based on code in the latency_tracer, that is:
11  *
12  *  Copyright (C) 2004-2006 Ingo Molnar
13  *  Copyright (C) 2004 William Lee Irwin III
14  */
15 
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/debugfs.h>
21 #include <linux/hardirq.h>
22 #include <linux/kthread.h>
23 #include <linux/uaccess.h>
24 #include <linux/kprobes.h>
25 #include <linux/ftrace.h>
26 #include <linux/sysctl.h>
27 #include <linux/ctype.h>
28 #include <linux/list.h>
29 
30 #include <asm/ftrace.h>
31 
32 #include "trace.h"
33 
34 #define FTRACE_WARN_ON(cond)			\
35 	do {					\
36 		if (WARN_ON(cond))		\
37 			ftrace_kill();		\
38 	} while (0)
39 
40 #define FTRACE_WARN_ON_ONCE(cond)		\
41 	do {					\
42 		if (WARN_ON_ONCE(cond))		\
43 			ftrace_kill();		\
44 	} while (0)
45 
46 /* ftrace_enabled is a method to turn ftrace on or off */
47 int ftrace_enabled __read_mostly;
48 static int last_ftrace_enabled;
49 
50 /* set when tracing only a pid */
51 struct pid *ftrace_pid_trace;
52 static struct pid * const ftrace_swapper_pid = &init_struct_pid;
53 
54 /* Quick disabling of function tracer. */
55 int function_trace_stop;
56 
57 /*
58  * ftrace_disabled is set when an anomaly is discovered.
59  * ftrace_disabled is much stronger than ftrace_enabled.
60  */
61 static int ftrace_disabled __read_mostly;
62 
63 static DEFINE_SPINLOCK(ftrace_lock);
64 static DEFINE_MUTEX(ftrace_sysctl_lock);
65 static DEFINE_MUTEX(ftrace_start_lock);
66 
67 static struct ftrace_ops ftrace_list_end __read_mostly =
68 {
69 	.func = ftrace_stub,
70 };
71 
72 static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
73 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
74 ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
75 ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
76 
77 static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
78 {
79 	struct ftrace_ops *op = ftrace_list;
80 
81 	/* in case someone actually ports this to alpha! */
82 	read_barrier_depends();
83 
84 	while (op != &ftrace_list_end) {
85 		/* silly alpha */
86 		read_barrier_depends();
87 		op->func(ip, parent_ip);
88 		op = op->next;
89 	};
90 }
91 
92 static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip)
93 {
94 	if (!test_tsk_trace_trace(current))
95 		return;
96 
97 	ftrace_pid_function(ip, parent_ip);
98 }
99 
100 static void set_ftrace_pid_function(ftrace_func_t func)
101 {
102 	/* do not set ftrace_pid_function to itself! */
103 	if (func != ftrace_pid_func)
104 		ftrace_pid_function = func;
105 }
106 
107 /**
108  * clear_ftrace_function - reset the ftrace function
109  *
110  * This NULLs the ftrace function and in essence stops
111  * tracing.  There may be lag
112  */
113 void clear_ftrace_function(void)
114 {
115 	ftrace_trace_function = ftrace_stub;
116 	__ftrace_trace_function = ftrace_stub;
117 	ftrace_pid_function = ftrace_stub;
118 }
119 
120 #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
121 /*
122  * For those archs that do not test ftrace_trace_stop in their
123  * mcount call site, we need to do it from C.
124  */
125 static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
126 {
127 	if (function_trace_stop)
128 		return;
129 
130 	__ftrace_trace_function(ip, parent_ip);
131 }
132 #endif
133 
134 static int __register_ftrace_function(struct ftrace_ops *ops)
135 {
136 	/* should not be called from interrupt context */
137 	spin_lock(&ftrace_lock);
138 
139 	ops->next = ftrace_list;
140 	/*
141 	 * We are entering ops into the ftrace_list but another
142 	 * CPU might be walking that list. We need to make sure
143 	 * the ops->next pointer is valid before another CPU sees
144 	 * the ops pointer included into the ftrace_list.
145 	 */
146 	smp_wmb();
147 	ftrace_list = ops;
148 
149 	if (ftrace_enabled) {
150 		ftrace_func_t func;
151 
152 		if (ops->next == &ftrace_list_end)
153 			func = ops->func;
154 		else
155 			func = ftrace_list_func;
156 
157 		if (ftrace_pid_trace) {
158 			set_ftrace_pid_function(func);
159 			func = ftrace_pid_func;
160 		}
161 
162 		/*
163 		 * For one func, simply call it directly.
164 		 * For more than one func, call the chain.
165 		 */
166 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
167 		ftrace_trace_function = func;
168 #else
169 		__ftrace_trace_function = func;
170 		ftrace_trace_function = ftrace_test_stop_func;
171 #endif
172 	}
173 
174 	spin_unlock(&ftrace_lock);
175 
176 	return 0;
177 }
178 
179 static int __unregister_ftrace_function(struct ftrace_ops *ops)
180 {
181 	struct ftrace_ops **p;
182 	int ret = 0;
183 
184 	/* should not be called from interrupt context */
185 	spin_lock(&ftrace_lock);
186 
187 	/*
188 	 * If we are removing the last function, then simply point
189 	 * to the ftrace_stub.
190 	 */
191 	if (ftrace_list == ops && ops->next == &ftrace_list_end) {
192 		ftrace_trace_function = ftrace_stub;
193 		ftrace_list = &ftrace_list_end;
194 		goto out;
195 	}
196 
197 	for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
198 		if (*p == ops)
199 			break;
200 
201 	if (*p != ops) {
202 		ret = -1;
203 		goto out;
204 	}
205 
206 	*p = (*p)->next;
207 
208 	if (ftrace_enabled) {
209 		/* If we only have one func left, then call that directly */
210 		if (ftrace_list->next == &ftrace_list_end) {
211 			ftrace_func_t func = ftrace_list->func;
212 
213 			if (ftrace_pid_trace) {
214 				set_ftrace_pid_function(func);
215 				func = ftrace_pid_func;
216 			}
217 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
218 			ftrace_trace_function = func;
219 #else
220 			__ftrace_trace_function = func;
221 #endif
222 		}
223 	}
224 
225  out:
226 	spin_unlock(&ftrace_lock);
227 
228 	return ret;
229 }
230 
231 static void ftrace_update_pid_func(void)
232 {
233 	ftrace_func_t func;
234 
235 	/* should not be called from interrupt context */
236 	spin_lock(&ftrace_lock);
237 
238 	if (ftrace_trace_function == ftrace_stub)
239 		goto out;
240 
241 	func = ftrace_trace_function;
242 
243 	if (ftrace_pid_trace) {
244 		set_ftrace_pid_function(func);
245 		func = ftrace_pid_func;
246 	} else {
247 		if (func == ftrace_pid_func)
248 			func = ftrace_pid_function;
249 	}
250 
251 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
252 	ftrace_trace_function = func;
253 #else
254 	__ftrace_trace_function = func;
255 #endif
256 
257  out:
258 	spin_unlock(&ftrace_lock);
259 }
260 
261 #ifdef CONFIG_DYNAMIC_FTRACE
262 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
263 # error Dynamic ftrace depends on MCOUNT_RECORD
264 #endif
265 
266 /*
267  * Since MCOUNT_ADDR may point to mcount itself, we do not want
268  * to get it confused by reading a reference in the code as we
269  * are parsing on objcopy output of text. Use a variable for
270  * it instead.
271  */
272 static unsigned long mcount_addr = MCOUNT_ADDR;
273 
274 enum {
275 	FTRACE_ENABLE_CALLS		= (1 << 0),
276 	FTRACE_DISABLE_CALLS		= (1 << 1),
277 	FTRACE_UPDATE_TRACE_FUNC	= (1 << 2),
278 	FTRACE_ENABLE_MCOUNT		= (1 << 3),
279 	FTRACE_DISABLE_MCOUNT		= (1 << 4),
280 	FTRACE_START_FUNC_RET		= (1 << 5),
281 	FTRACE_STOP_FUNC_RET		= (1 << 6),
282 };
283 
284 static int ftrace_filtered;
285 
286 static LIST_HEAD(ftrace_new_addrs);
287 
288 static DEFINE_MUTEX(ftrace_regex_lock);
289 
290 struct ftrace_page {
291 	struct ftrace_page	*next;
292 	unsigned long		index;
293 	struct dyn_ftrace	records[];
294 };
295 
296 #define ENTRIES_PER_PAGE \
297   ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
298 
299 /* estimate from running different kernels */
300 #define NR_TO_INIT		10000
301 
302 static struct ftrace_page	*ftrace_pages_start;
303 static struct ftrace_page	*ftrace_pages;
304 
305 static struct dyn_ftrace *ftrace_free_records;
306 
307 
308 #ifdef CONFIG_KPROBES
309 
310 static int frozen_record_count;
311 
312 static inline void freeze_record(struct dyn_ftrace *rec)
313 {
314 	if (!(rec->flags & FTRACE_FL_FROZEN)) {
315 		rec->flags |= FTRACE_FL_FROZEN;
316 		frozen_record_count++;
317 	}
318 }
319 
320 static inline void unfreeze_record(struct dyn_ftrace *rec)
321 {
322 	if (rec->flags & FTRACE_FL_FROZEN) {
323 		rec->flags &= ~FTRACE_FL_FROZEN;
324 		frozen_record_count--;
325 	}
326 }
327 
328 static inline int record_frozen(struct dyn_ftrace *rec)
329 {
330 	return rec->flags & FTRACE_FL_FROZEN;
331 }
332 #else
333 # define freeze_record(rec)			({ 0; })
334 # define unfreeze_record(rec)			({ 0; })
335 # define record_frozen(rec)			({ 0; })
336 #endif /* CONFIG_KPROBES */
337 
338 static void ftrace_free_rec(struct dyn_ftrace *rec)
339 {
340 	rec->ip = (unsigned long)ftrace_free_records;
341 	ftrace_free_records = rec;
342 	rec->flags |= FTRACE_FL_FREE;
343 }
344 
345 void ftrace_release(void *start, unsigned long size)
346 {
347 	struct dyn_ftrace *rec;
348 	struct ftrace_page *pg;
349 	unsigned long s = (unsigned long)start;
350 	unsigned long e = s + size;
351 	int i;
352 
353 	if (ftrace_disabled || !start)
354 		return;
355 
356 	/* should not be called from interrupt context */
357 	spin_lock(&ftrace_lock);
358 
359 	for (pg = ftrace_pages_start; pg; pg = pg->next) {
360 		for (i = 0; i < pg->index; i++) {
361 			rec = &pg->records[i];
362 
363 			if ((rec->ip >= s) && (rec->ip < e))
364 				ftrace_free_rec(rec);
365 		}
366 	}
367 	spin_unlock(&ftrace_lock);
368 }
369 
370 static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
371 {
372 	struct dyn_ftrace *rec;
373 
374 	/* First check for freed records */
375 	if (ftrace_free_records) {
376 		rec = ftrace_free_records;
377 
378 		if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
379 			FTRACE_WARN_ON_ONCE(1);
380 			ftrace_free_records = NULL;
381 			return NULL;
382 		}
383 
384 		ftrace_free_records = (void *)rec->ip;
385 		memset(rec, 0, sizeof(*rec));
386 		return rec;
387 	}
388 
389 	if (ftrace_pages->index == ENTRIES_PER_PAGE) {
390 		if (!ftrace_pages->next) {
391 			/* allocate another page */
392 			ftrace_pages->next =
393 				(void *)get_zeroed_page(GFP_KERNEL);
394 			if (!ftrace_pages->next)
395 				return NULL;
396 		}
397 		ftrace_pages = ftrace_pages->next;
398 	}
399 
400 	return &ftrace_pages->records[ftrace_pages->index++];
401 }
402 
403 static struct dyn_ftrace *
404 ftrace_record_ip(unsigned long ip)
405 {
406 	struct dyn_ftrace *rec;
407 
408 	if (ftrace_disabled)
409 		return NULL;
410 
411 	rec = ftrace_alloc_dyn_node(ip);
412 	if (!rec)
413 		return NULL;
414 
415 	rec->ip = ip;
416 
417 	list_add(&rec->list, &ftrace_new_addrs);
418 
419 	return rec;
420 }
421 
422 static void print_ip_ins(const char *fmt, unsigned char *p)
423 {
424 	int i;
425 
426 	printk(KERN_CONT "%s", fmt);
427 
428 	for (i = 0; i < MCOUNT_INSN_SIZE; i++)
429 		printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
430 }
431 
432 static void ftrace_bug(int failed, unsigned long ip)
433 {
434 	switch (failed) {
435 	case -EFAULT:
436 		FTRACE_WARN_ON_ONCE(1);
437 		pr_info("ftrace faulted on modifying ");
438 		print_ip_sym(ip);
439 		break;
440 	case -EINVAL:
441 		FTRACE_WARN_ON_ONCE(1);
442 		pr_info("ftrace failed to modify ");
443 		print_ip_sym(ip);
444 		print_ip_ins(" actual: ", (unsigned char *)ip);
445 		printk(KERN_CONT "\n");
446 		break;
447 	case -EPERM:
448 		FTRACE_WARN_ON_ONCE(1);
449 		pr_info("ftrace faulted on writing ");
450 		print_ip_sym(ip);
451 		break;
452 	default:
453 		FTRACE_WARN_ON_ONCE(1);
454 		pr_info("ftrace faulted on unknown error ");
455 		print_ip_sym(ip);
456 	}
457 }
458 
459 
460 static int
461 __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
462 {
463 	unsigned long ip, fl;
464 	unsigned long ftrace_addr;
465 
466 	ftrace_addr = (unsigned long)ftrace_caller;
467 
468 	ip = rec->ip;
469 
470 	/*
471 	 * If this record is not to be traced and
472 	 * it is not enabled then do nothing.
473 	 *
474 	 * If this record is not to be traced and
475 	 * it is enabled then disabled it.
476 	 *
477 	 */
478 	if (rec->flags & FTRACE_FL_NOTRACE) {
479 		if (rec->flags & FTRACE_FL_ENABLED)
480 			rec->flags &= ~FTRACE_FL_ENABLED;
481 		else
482 			return 0;
483 
484 	} else if (ftrace_filtered && enable) {
485 		/*
486 		 * Filtering is on:
487 		 */
488 
489 		fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_ENABLED);
490 
491 		/* Record is filtered and enabled, do nothing */
492 		if (fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED))
493 			return 0;
494 
495 		/* Record is not filtered and is not enabled do nothing */
496 		if (!fl)
497 			return 0;
498 
499 		/* Record is not filtered but enabled, disable it */
500 		if (fl == FTRACE_FL_ENABLED)
501 			rec->flags &= ~FTRACE_FL_ENABLED;
502 		else
503 		/* Otherwise record is filtered but not enabled, enable it */
504 			rec->flags |= FTRACE_FL_ENABLED;
505 	} else {
506 		/* Disable or not filtered */
507 
508 		if (enable) {
509 			/* if record is enabled, do nothing */
510 			if (rec->flags & FTRACE_FL_ENABLED)
511 				return 0;
512 
513 			rec->flags |= FTRACE_FL_ENABLED;
514 
515 		} else {
516 
517 			/* if record is not enabled do nothing */
518 			if (!(rec->flags & FTRACE_FL_ENABLED))
519 				return 0;
520 
521 			rec->flags &= ~FTRACE_FL_ENABLED;
522 		}
523 	}
524 
525 	if (rec->flags & FTRACE_FL_ENABLED)
526 		return ftrace_make_call(rec, ftrace_addr);
527 	else
528 		return ftrace_make_nop(NULL, rec, ftrace_addr);
529 }
530 
531 static void ftrace_replace_code(int enable)
532 {
533 	int i, failed;
534 	struct dyn_ftrace *rec;
535 	struct ftrace_page *pg;
536 
537 	for (pg = ftrace_pages_start; pg; pg = pg->next) {
538 		for (i = 0; i < pg->index; i++) {
539 			rec = &pg->records[i];
540 
541 			/*
542 			 * Skip over free records and records that have
543 			 * failed.
544 			 */
545 			if (rec->flags & FTRACE_FL_FREE ||
546 			    rec->flags & FTRACE_FL_FAILED)
547 				continue;
548 
549 			/* ignore updates to this record's mcount site */
550 			if (get_kprobe((void *)rec->ip)) {
551 				freeze_record(rec);
552 				continue;
553 			} else {
554 				unfreeze_record(rec);
555 			}
556 
557 			failed = __ftrace_replace_code(rec, enable);
558 			if (failed && (rec->flags & FTRACE_FL_CONVERTED)) {
559 				rec->flags |= FTRACE_FL_FAILED;
560 				if ((system_state == SYSTEM_BOOTING) ||
561 				    !core_kernel_text(rec->ip)) {
562 					ftrace_free_rec(rec);
563 				} else
564 					ftrace_bug(failed, rec->ip);
565 			}
566 		}
567 	}
568 }
569 
570 static int
571 ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
572 {
573 	unsigned long ip;
574 	int ret;
575 
576 	ip = rec->ip;
577 
578 	ret = ftrace_make_nop(mod, rec, mcount_addr);
579 	if (ret) {
580 		ftrace_bug(ret, ip);
581 		rec->flags |= FTRACE_FL_FAILED;
582 		return 0;
583 	}
584 	return 1;
585 }
586 
587 static int __ftrace_modify_code(void *data)
588 {
589 	int *command = data;
590 
591 	if (*command & FTRACE_ENABLE_CALLS)
592 		ftrace_replace_code(1);
593 	else if (*command & FTRACE_DISABLE_CALLS)
594 		ftrace_replace_code(0);
595 
596 	if (*command & FTRACE_UPDATE_TRACE_FUNC)
597 		ftrace_update_ftrace_func(ftrace_trace_function);
598 
599 	if (*command & FTRACE_START_FUNC_RET)
600 		ftrace_enable_ftrace_graph_caller();
601 	else if (*command & FTRACE_STOP_FUNC_RET)
602 		ftrace_disable_ftrace_graph_caller();
603 
604 	return 0;
605 }
606 
607 static void ftrace_run_update_code(int command)
608 {
609 	stop_machine(__ftrace_modify_code, &command, NULL);
610 }
611 
612 static ftrace_func_t saved_ftrace_func;
613 static int ftrace_start_up;
614 
615 static void ftrace_startup_enable(int command)
616 {
617 	if (saved_ftrace_func != ftrace_trace_function) {
618 		saved_ftrace_func = ftrace_trace_function;
619 		command |= FTRACE_UPDATE_TRACE_FUNC;
620 	}
621 
622 	if (!command || !ftrace_enabled)
623 		return;
624 
625 	ftrace_run_update_code(command);
626 }
627 
628 static void ftrace_startup(int command)
629 {
630 	if (unlikely(ftrace_disabled))
631 		return;
632 
633 	mutex_lock(&ftrace_start_lock);
634 	ftrace_start_up++;
635 	command |= FTRACE_ENABLE_CALLS;
636 
637 	ftrace_startup_enable(command);
638 
639 	mutex_unlock(&ftrace_start_lock);
640 }
641 
642 static void ftrace_shutdown(int command)
643 {
644 	if (unlikely(ftrace_disabled))
645 		return;
646 
647 	mutex_lock(&ftrace_start_lock);
648 	ftrace_start_up--;
649 	if (!ftrace_start_up)
650 		command |= FTRACE_DISABLE_CALLS;
651 
652 	if (saved_ftrace_func != ftrace_trace_function) {
653 		saved_ftrace_func = ftrace_trace_function;
654 		command |= FTRACE_UPDATE_TRACE_FUNC;
655 	}
656 
657 	if (!command || !ftrace_enabled)
658 		goto out;
659 
660 	ftrace_run_update_code(command);
661  out:
662 	mutex_unlock(&ftrace_start_lock);
663 }
664 
665 static void ftrace_startup_sysctl(void)
666 {
667 	int command = FTRACE_ENABLE_MCOUNT;
668 
669 	if (unlikely(ftrace_disabled))
670 		return;
671 
672 	mutex_lock(&ftrace_start_lock);
673 	/* Force update next time */
674 	saved_ftrace_func = NULL;
675 	/* ftrace_start_up is true if we want ftrace running */
676 	if (ftrace_start_up)
677 		command |= FTRACE_ENABLE_CALLS;
678 
679 	ftrace_run_update_code(command);
680 	mutex_unlock(&ftrace_start_lock);
681 }
682 
683 static void ftrace_shutdown_sysctl(void)
684 {
685 	int command = FTRACE_DISABLE_MCOUNT;
686 
687 	if (unlikely(ftrace_disabled))
688 		return;
689 
690 	mutex_lock(&ftrace_start_lock);
691 	/* ftrace_start_up is true if ftrace is running */
692 	if (ftrace_start_up)
693 		command |= FTRACE_DISABLE_CALLS;
694 
695 	ftrace_run_update_code(command);
696 	mutex_unlock(&ftrace_start_lock);
697 }
698 
699 static cycle_t		ftrace_update_time;
700 static unsigned long	ftrace_update_cnt;
701 unsigned long		ftrace_update_tot_cnt;
702 
703 static int ftrace_update_code(struct module *mod)
704 {
705 	struct dyn_ftrace *p, *t;
706 	cycle_t start, stop;
707 
708 	start = ftrace_now(raw_smp_processor_id());
709 	ftrace_update_cnt = 0;
710 
711 	list_for_each_entry_safe(p, t, &ftrace_new_addrs, list) {
712 
713 		/* If something went wrong, bail without enabling anything */
714 		if (unlikely(ftrace_disabled))
715 			return -1;
716 
717 		list_del_init(&p->list);
718 
719 		/* convert record (i.e, patch mcount-call with NOP) */
720 		if (ftrace_code_disable(mod, p)) {
721 			p->flags |= FTRACE_FL_CONVERTED;
722 			ftrace_update_cnt++;
723 		} else
724 			ftrace_free_rec(p);
725 	}
726 
727 	stop = ftrace_now(raw_smp_processor_id());
728 	ftrace_update_time = stop - start;
729 	ftrace_update_tot_cnt += ftrace_update_cnt;
730 
731 	return 0;
732 }
733 
734 static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
735 {
736 	struct ftrace_page *pg;
737 	int cnt;
738 	int i;
739 
740 	/* allocate a few pages */
741 	ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
742 	if (!ftrace_pages_start)
743 		return -1;
744 
745 	/*
746 	 * Allocate a few more pages.
747 	 *
748 	 * TODO: have some parser search vmlinux before
749 	 *   final linking to find all calls to ftrace.
750 	 *   Then we can:
751 	 *    a) know how many pages to allocate.
752 	 *     and/or
753 	 *    b) set up the table then.
754 	 *
755 	 *  The dynamic code is still necessary for
756 	 *  modules.
757 	 */
758 
759 	pg = ftrace_pages = ftrace_pages_start;
760 
761 	cnt = num_to_init / ENTRIES_PER_PAGE;
762 	pr_info("ftrace: allocating %ld entries in %d pages\n",
763 		num_to_init, cnt + 1);
764 
765 	for (i = 0; i < cnt; i++) {
766 		pg->next = (void *)get_zeroed_page(GFP_KERNEL);
767 
768 		/* If we fail, we'll try later anyway */
769 		if (!pg->next)
770 			break;
771 
772 		pg = pg->next;
773 	}
774 
775 	return 0;
776 }
777 
778 enum {
779 	FTRACE_ITER_FILTER	= (1 << 0),
780 	FTRACE_ITER_CONT	= (1 << 1),
781 	FTRACE_ITER_NOTRACE	= (1 << 2),
782 	FTRACE_ITER_FAILURES	= (1 << 3),
783 };
784 
785 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
786 
787 struct ftrace_iterator {
788 	struct ftrace_page	*pg;
789 	unsigned		idx;
790 	unsigned		flags;
791 	unsigned char		buffer[FTRACE_BUFF_MAX+1];
792 	unsigned		buffer_idx;
793 	unsigned		filtered;
794 };
795 
796 static void *
797 t_next(struct seq_file *m, void *v, loff_t *pos)
798 {
799 	struct ftrace_iterator *iter = m->private;
800 	struct dyn_ftrace *rec = NULL;
801 
802 	(*pos)++;
803 
804 	/* should not be called from interrupt context */
805 	spin_lock(&ftrace_lock);
806  retry:
807 	if (iter->idx >= iter->pg->index) {
808 		if (iter->pg->next) {
809 			iter->pg = iter->pg->next;
810 			iter->idx = 0;
811 			goto retry;
812 		} else {
813 			iter->idx = -1;
814 		}
815 	} else {
816 		rec = &iter->pg->records[iter->idx++];
817 		if ((rec->flags & FTRACE_FL_FREE) ||
818 
819 		    (!(iter->flags & FTRACE_ITER_FAILURES) &&
820 		     (rec->flags & FTRACE_FL_FAILED)) ||
821 
822 		    ((iter->flags & FTRACE_ITER_FAILURES) &&
823 		     !(rec->flags & FTRACE_FL_FAILED)) ||
824 
825 		    ((iter->flags & FTRACE_ITER_FILTER) &&
826 		     !(rec->flags & FTRACE_FL_FILTER)) ||
827 
828 		    ((iter->flags & FTRACE_ITER_NOTRACE) &&
829 		     !(rec->flags & FTRACE_FL_NOTRACE))) {
830 			rec = NULL;
831 			goto retry;
832 		}
833 	}
834 	spin_unlock(&ftrace_lock);
835 
836 	return rec;
837 }
838 
839 static void *t_start(struct seq_file *m, loff_t *pos)
840 {
841 	struct ftrace_iterator *iter = m->private;
842 	void *p = NULL;
843 
844 	if (*pos > 0) {
845 		if (iter->idx < 0)
846 			return p;
847 		(*pos)--;
848 		iter->idx--;
849 	}
850 
851 	p = t_next(m, p, pos);
852 
853 	return p;
854 }
855 
856 static void t_stop(struct seq_file *m, void *p)
857 {
858 }
859 
860 static int t_show(struct seq_file *m, void *v)
861 {
862 	struct dyn_ftrace *rec = v;
863 	char str[KSYM_SYMBOL_LEN];
864 
865 	if (!rec)
866 		return 0;
867 
868 	kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
869 
870 	seq_printf(m, "%s\n", str);
871 
872 	return 0;
873 }
874 
875 static struct seq_operations show_ftrace_seq_ops = {
876 	.start = t_start,
877 	.next = t_next,
878 	.stop = t_stop,
879 	.show = t_show,
880 };
881 
882 static int
883 ftrace_avail_open(struct inode *inode, struct file *file)
884 {
885 	struct ftrace_iterator *iter;
886 	int ret;
887 
888 	if (unlikely(ftrace_disabled))
889 		return -ENODEV;
890 
891 	iter = kzalloc(sizeof(*iter), GFP_KERNEL);
892 	if (!iter)
893 		return -ENOMEM;
894 
895 	iter->pg = ftrace_pages_start;
896 
897 	ret = seq_open(file, &show_ftrace_seq_ops);
898 	if (!ret) {
899 		struct seq_file *m = file->private_data;
900 
901 		m->private = iter;
902 	} else {
903 		kfree(iter);
904 	}
905 
906 	return ret;
907 }
908 
909 int ftrace_avail_release(struct inode *inode, struct file *file)
910 {
911 	struct seq_file *m = (struct seq_file *)file->private_data;
912 	struct ftrace_iterator *iter = m->private;
913 
914 	seq_release(inode, file);
915 	kfree(iter);
916 
917 	return 0;
918 }
919 
920 static int
921 ftrace_failures_open(struct inode *inode, struct file *file)
922 {
923 	int ret;
924 	struct seq_file *m;
925 	struct ftrace_iterator *iter;
926 
927 	ret = ftrace_avail_open(inode, file);
928 	if (!ret) {
929 		m = (struct seq_file *)file->private_data;
930 		iter = (struct ftrace_iterator *)m->private;
931 		iter->flags = FTRACE_ITER_FAILURES;
932 	}
933 
934 	return ret;
935 }
936 
937 
938 static void ftrace_filter_reset(int enable)
939 {
940 	struct ftrace_page *pg;
941 	struct dyn_ftrace *rec;
942 	unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
943 	unsigned i;
944 
945 	/* should not be called from interrupt context */
946 	spin_lock(&ftrace_lock);
947 	if (enable)
948 		ftrace_filtered = 0;
949 	pg = ftrace_pages_start;
950 	while (pg) {
951 		for (i = 0; i < pg->index; i++) {
952 			rec = &pg->records[i];
953 			if (rec->flags & FTRACE_FL_FAILED)
954 				continue;
955 			rec->flags &= ~type;
956 		}
957 		pg = pg->next;
958 	}
959 	spin_unlock(&ftrace_lock);
960 }
961 
962 static int
963 ftrace_regex_open(struct inode *inode, struct file *file, int enable)
964 {
965 	struct ftrace_iterator *iter;
966 	int ret = 0;
967 
968 	if (unlikely(ftrace_disabled))
969 		return -ENODEV;
970 
971 	iter = kzalloc(sizeof(*iter), GFP_KERNEL);
972 	if (!iter)
973 		return -ENOMEM;
974 
975 	mutex_lock(&ftrace_regex_lock);
976 	if ((file->f_mode & FMODE_WRITE) &&
977 	    !(file->f_flags & O_APPEND))
978 		ftrace_filter_reset(enable);
979 
980 	if (file->f_mode & FMODE_READ) {
981 		iter->pg = ftrace_pages_start;
982 		iter->flags = enable ? FTRACE_ITER_FILTER :
983 			FTRACE_ITER_NOTRACE;
984 
985 		ret = seq_open(file, &show_ftrace_seq_ops);
986 		if (!ret) {
987 			struct seq_file *m = file->private_data;
988 			m->private = iter;
989 		} else
990 			kfree(iter);
991 	} else
992 		file->private_data = iter;
993 	mutex_unlock(&ftrace_regex_lock);
994 
995 	return ret;
996 }
997 
998 static int
999 ftrace_filter_open(struct inode *inode, struct file *file)
1000 {
1001 	return ftrace_regex_open(inode, file, 1);
1002 }
1003 
1004 static int
1005 ftrace_notrace_open(struct inode *inode, struct file *file)
1006 {
1007 	return ftrace_regex_open(inode, file, 0);
1008 }
1009 
1010 static ssize_t
1011 ftrace_regex_read(struct file *file, char __user *ubuf,
1012 		       size_t cnt, loff_t *ppos)
1013 {
1014 	if (file->f_mode & FMODE_READ)
1015 		return seq_read(file, ubuf, cnt, ppos);
1016 	else
1017 		return -EPERM;
1018 }
1019 
1020 static loff_t
1021 ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
1022 {
1023 	loff_t ret;
1024 
1025 	if (file->f_mode & FMODE_READ)
1026 		ret = seq_lseek(file, offset, origin);
1027 	else
1028 		file->f_pos = ret = 1;
1029 
1030 	return ret;
1031 }
1032 
1033 enum {
1034 	MATCH_FULL,
1035 	MATCH_FRONT_ONLY,
1036 	MATCH_MIDDLE_ONLY,
1037 	MATCH_END_ONLY,
1038 };
1039 
1040 static void
1041 ftrace_match(unsigned char *buff, int len, int enable)
1042 {
1043 	char str[KSYM_SYMBOL_LEN];
1044 	char *search = NULL;
1045 	struct ftrace_page *pg;
1046 	struct dyn_ftrace *rec;
1047 	int type = MATCH_FULL;
1048 	unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1049 	unsigned i, match = 0, search_len = 0;
1050 	int not = 0;
1051 
1052 	if (buff[0] == '!') {
1053 		not = 1;
1054 		buff++;
1055 		len--;
1056 	}
1057 
1058 	for (i = 0; i < len; i++) {
1059 		if (buff[i] == '*') {
1060 			if (!i) {
1061 				search = buff + i + 1;
1062 				type = MATCH_END_ONLY;
1063 				search_len = len - (i + 1);
1064 			} else {
1065 				if (type == MATCH_END_ONLY) {
1066 					type = MATCH_MIDDLE_ONLY;
1067 				} else {
1068 					match = i;
1069 					type = MATCH_FRONT_ONLY;
1070 				}
1071 				buff[i] = 0;
1072 				break;
1073 			}
1074 		}
1075 	}
1076 
1077 	/* should not be called from interrupt context */
1078 	spin_lock(&ftrace_lock);
1079 	if (enable)
1080 		ftrace_filtered = 1;
1081 	pg = ftrace_pages_start;
1082 	while (pg) {
1083 		for (i = 0; i < pg->index; i++) {
1084 			int matched = 0;
1085 			char *ptr;
1086 
1087 			rec = &pg->records[i];
1088 			if (rec->flags & FTRACE_FL_FAILED)
1089 				continue;
1090 			kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1091 			switch (type) {
1092 			case MATCH_FULL:
1093 				if (strcmp(str, buff) == 0)
1094 					matched = 1;
1095 				break;
1096 			case MATCH_FRONT_ONLY:
1097 				if (memcmp(str, buff, match) == 0)
1098 					matched = 1;
1099 				break;
1100 			case MATCH_MIDDLE_ONLY:
1101 				if (strstr(str, search))
1102 					matched = 1;
1103 				break;
1104 			case MATCH_END_ONLY:
1105 				ptr = strstr(str, search);
1106 				if (ptr && (ptr[search_len] == 0))
1107 					matched = 1;
1108 				break;
1109 			}
1110 			if (matched) {
1111 				if (not)
1112 					rec->flags &= ~flag;
1113 				else
1114 					rec->flags |= flag;
1115 			}
1116 		}
1117 		pg = pg->next;
1118 	}
1119 	spin_unlock(&ftrace_lock);
1120 }
1121 
1122 static ssize_t
1123 ftrace_regex_write(struct file *file, const char __user *ubuf,
1124 		   size_t cnt, loff_t *ppos, int enable)
1125 {
1126 	struct ftrace_iterator *iter;
1127 	char ch;
1128 	size_t read = 0;
1129 	ssize_t ret;
1130 
1131 	if (!cnt || cnt < 0)
1132 		return 0;
1133 
1134 	mutex_lock(&ftrace_regex_lock);
1135 
1136 	if (file->f_mode & FMODE_READ) {
1137 		struct seq_file *m = file->private_data;
1138 		iter = m->private;
1139 	} else
1140 		iter = file->private_data;
1141 
1142 	if (!*ppos) {
1143 		iter->flags &= ~FTRACE_ITER_CONT;
1144 		iter->buffer_idx = 0;
1145 	}
1146 
1147 	ret = get_user(ch, ubuf++);
1148 	if (ret)
1149 		goto out;
1150 	read++;
1151 	cnt--;
1152 
1153 	if (!(iter->flags & ~FTRACE_ITER_CONT)) {
1154 		/* skip white space */
1155 		while (cnt && isspace(ch)) {
1156 			ret = get_user(ch, ubuf++);
1157 			if (ret)
1158 				goto out;
1159 			read++;
1160 			cnt--;
1161 		}
1162 
1163 		if (isspace(ch)) {
1164 			file->f_pos += read;
1165 			ret = read;
1166 			goto out;
1167 		}
1168 
1169 		iter->buffer_idx = 0;
1170 	}
1171 
1172 	while (cnt && !isspace(ch)) {
1173 		if (iter->buffer_idx < FTRACE_BUFF_MAX)
1174 			iter->buffer[iter->buffer_idx++] = ch;
1175 		else {
1176 			ret = -EINVAL;
1177 			goto out;
1178 		}
1179 		ret = get_user(ch, ubuf++);
1180 		if (ret)
1181 			goto out;
1182 		read++;
1183 		cnt--;
1184 	}
1185 
1186 	if (isspace(ch)) {
1187 		iter->filtered++;
1188 		iter->buffer[iter->buffer_idx] = 0;
1189 		ftrace_match(iter->buffer, iter->buffer_idx, enable);
1190 		iter->buffer_idx = 0;
1191 	} else
1192 		iter->flags |= FTRACE_ITER_CONT;
1193 
1194 
1195 	file->f_pos += read;
1196 
1197 	ret = read;
1198  out:
1199 	mutex_unlock(&ftrace_regex_lock);
1200 
1201 	return ret;
1202 }
1203 
1204 static ssize_t
1205 ftrace_filter_write(struct file *file, const char __user *ubuf,
1206 		    size_t cnt, loff_t *ppos)
1207 {
1208 	return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
1209 }
1210 
1211 static ssize_t
1212 ftrace_notrace_write(struct file *file, const char __user *ubuf,
1213 		     size_t cnt, loff_t *ppos)
1214 {
1215 	return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
1216 }
1217 
1218 static void
1219 ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
1220 {
1221 	if (unlikely(ftrace_disabled))
1222 		return;
1223 
1224 	mutex_lock(&ftrace_regex_lock);
1225 	if (reset)
1226 		ftrace_filter_reset(enable);
1227 	if (buf)
1228 		ftrace_match(buf, len, enable);
1229 	mutex_unlock(&ftrace_regex_lock);
1230 }
1231 
1232 /**
1233  * ftrace_set_filter - set a function to filter on in ftrace
1234  * @buf - the string that holds the function filter text.
1235  * @len - the length of the string.
1236  * @reset - non zero to reset all filters before applying this filter.
1237  *
1238  * Filters denote which functions should be enabled when tracing is enabled.
1239  * If @buf is NULL and reset is set, all functions will be enabled for tracing.
1240  */
1241 void ftrace_set_filter(unsigned char *buf, int len, int reset)
1242 {
1243 	ftrace_set_regex(buf, len, reset, 1);
1244 }
1245 
1246 /**
1247  * ftrace_set_notrace - set a function to not trace in ftrace
1248  * @buf - the string that holds the function notrace text.
1249  * @len - the length of the string.
1250  * @reset - non zero to reset all filters before applying this filter.
1251  *
1252  * Notrace Filters denote which functions should not be enabled when tracing
1253  * is enabled. If @buf is NULL and reset is set, all functions will be enabled
1254  * for tracing.
1255  */
1256 void ftrace_set_notrace(unsigned char *buf, int len, int reset)
1257 {
1258 	ftrace_set_regex(buf, len, reset, 0);
1259 }
1260 
1261 static int
1262 ftrace_regex_release(struct inode *inode, struct file *file, int enable)
1263 {
1264 	struct seq_file *m = (struct seq_file *)file->private_data;
1265 	struct ftrace_iterator *iter;
1266 
1267 	mutex_lock(&ftrace_regex_lock);
1268 	if (file->f_mode & FMODE_READ) {
1269 		iter = m->private;
1270 
1271 		seq_release(inode, file);
1272 	} else
1273 		iter = file->private_data;
1274 
1275 	if (iter->buffer_idx) {
1276 		iter->filtered++;
1277 		iter->buffer[iter->buffer_idx] = 0;
1278 		ftrace_match(iter->buffer, iter->buffer_idx, enable);
1279 	}
1280 
1281 	mutex_lock(&ftrace_sysctl_lock);
1282 	mutex_lock(&ftrace_start_lock);
1283 	if (ftrace_start_up && ftrace_enabled)
1284 		ftrace_run_update_code(FTRACE_ENABLE_CALLS);
1285 	mutex_unlock(&ftrace_start_lock);
1286 	mutex_unlock(&ftrace_sysctl_lock);
1287 
1288 	kfree(iter);
1289 	mutex_unlock(&ftrace_regex_lock);
1290 	return 0;
1291 }
1292 
1293 static int
1294 ftrace_filter_release(struct inode *inode, struct file *file)
1295 {
1296 	return ftrace_regex_release(inode, file, 1);
1297 }
1298 
1299 static int
1300 ftrace_notrace_release(struct inode *inode, struct file *file)
1301 {
1302 	return ftrace_regex_release(inode, file, 0);
1303 }
1304 
1305 static struct file_operations ftrace_avail_fops = {
1306 	.open = ftrace_avail_open,
1307 	.read = seq_read,
1308 	.llseek = seq_lseek,
1309 	.release = ftrace_avail_release,
1310 };
1311 
1312 static struct file_operations ftrace_failures_fops = {
1313 	.open = ftrace_failures_open,
1314 	.read = seq_read,
1315 	.llseek = seq_lseek,
1316 	.release = ftrace_avail_release,
1317 };
1318 
1319 static struct file_operations ftrace_filter_fops = {
1320 	.open = ftrace_filter_open,
1321 	.read = ftrace_regex_read,
1322 	.write = ftrace_filter_write,
1323 	.llseek = ftrace_regex_lseek,
1324 	.release = ftrace_filter_release,
1325 };
1326 
1327 static struct file_operations ftrace_notrace_fops = {
1328 	.open = ftrace_notrace_open,
1329 	.read = ftrace_regex_read,
1330 	.write = ftrace_notrace_write,
1331 	.llseek = ftrace_regex_lseek,
1332 	.release = ftrace_notrace_release,
1333 };
1334 
1335 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1336 
1337 static DEFINE_MUTEX(graph_lock);
1338 
1339 int ftrace_graph_count;
1340 unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
1341 
1342 static void *
1343 g_next(struct seq_file *m, void *v, loff_t *pos)
1344 {
1345 	unsigned long *array = m->private;
1346 	int index = *pos;
1347 
1348 	(*pos)++;
1349 
1350 	if (index >= ftrace_graph_count)
1351 		return NULL;
1352 
1353 	return &array[index];
1354 }
1355 
1356 static void *g_start(struct seq_file *m, loff_t *pos)
1357 {
1358 	void *p = NULL;
1359 
1360 	mutex_lock(&graph_lock);
1361 
1362 	p = g_next(m, p, pos);
1363 
1364 	return p;
1365 }
1366 
1367 static void g_stop(struct seq_file *m, void *p)
1368 {
1369 	mutex_unlock(&graph_lock);
1370 }
1371 
1372 static int g_show(struct seq_file *m, void *v)
1373 {
1374 	unsigned long *ptr = v;
1375 	char str[KSYM_SYMBOL_LEN];
1376 
1377 	if (!ptr)
1378 		return 0;
1379 
1380 	kallsyms_lookup(*ptr, NULL, NULL, NULL, str);
1381 
1382 	seq_printf(m, "%s\n", str);
1383 
1384 	return 0;
1385 }
1386 
1387 static struct seq_operations ftrace_graph_seq_ops = {
1388 	.start = g_start,
1389 	.next = g_next,
1390 	.stop = g_stop,
1391 	.show = g_show,
1392 };
1393 
1394 static int
1395 ftrace_graph_open(struct inode *inode, struct file *file)
1396 {
1397 	int ret = 0;
1398 
1399 	if (unlikely(ftrace_disabled))
1400 		return -ENODEV;
1401 
1402 	mutex_lock(&graph_lock);
1403 	if ((file->f_mode & FMODE_WRITE) &&
1404 	    !(file->f_flags & O_APPEND)) {
1405 		ftrace_graph_count = 0;
1406 		memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
1407 	}
1408 
1409 	if (file->f_mode & FMODE_READ) {
1410 		ret = seq_open(file, &ftrace_graph_seq_ops);
1411 		if (!ret) {
1412 			struct seq_file *m = file->private_data;
1413 			m->private = ftrace_graph_funcs;
1414 		}
1415 	} else
1416 		file->private_data = ftrace_graph_funcs;
1417 	mutex_unlock(&graph_lock);
1418 
1419 	return ret;
1420 }
1421 
1422 static ssize_t
1423 ftrace_graph_read(struct file *file, char __user *ubuf,
1424 		       size_t cnt, loff_t *ppos)
1425 {
1426 	if (file->f_mode & FMODE_READ)
1427 		return seq_read(file, ubuf, cnt, ppos);
1428 	else
1429 		return -EPERM;
1430 }
1431 
1432 static int
1433 ftrace_set_func(unsigned long *array, int idx, char *buffer)
1434 {
1435 	char str[KSYM_SYMBOL_LEN];
1436 	struct dyn_ftrace *rec;
1437 	struct ftrace_page *pg;
1438 	int found = 0;
1439 	int i, j;
1440 
1441 	if (ftrace_disabled)
1442 		return -ENODEV;
1443 
1444 	/* should not be called from interrupt context */
1445 	spin_lock(&ftrace_lock);
1446 
1447 	for (pg = ftrace_pages_start; pg; pg = pg->next) {
1448 		for (i = 0; i < pg->index; i++) {
1449 			rec = &pg->records[i];
1450 
1451 			if (rec->flags & (FTRACE_FL_FAILED | FTRACE_FL_FREE))
1452 				continue;
1453 
1454 			kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1455 			if (strcmp(str, buffer) == 0) {
1456 				found = 1;
1457 				for (j = 0; j < idx; j++)
1458 					if (array[j] == rec->ip) {
1459 						found = 0;
1460 						break;
1461 					}
1462 				if (found)
1463 					array[idx] = rec->ip;
1464 				break;
1465 			}
1466 		}
1467 	}
1468 	spin_unlock(&ftrace_lock);
1469 
1470 	return found ? 0 : -EINVAL;
1471 }
1472 
1473 static ssize_t
1474 ftrace_graph_write(struct file *file, const char __user *ubuf,
1475 		   size_t cnt, loff_t *ppos)
1476 {
1477 	unsigned char buffer[FTRACE_BUFF_MAX+1];
1478 	unsigned long *array;
1479 	size_t read = 0;
1480 	ssize_t ret;
1481 	int index = 0;
1482 	char ch;
1483 
1484 	if (!cnt || cnt < 0)
1485 		return 0;
1486 
1487 	mutex_lock(&graph_lock);
1488 
1489 	if (ftrace_graph_count >= FTRACE_GRAPH_MAX_FUNCS) {
1490 		ret = -EBUSY;
1491 		goto out;
1492 	}
1493 
1494 	if (file->f_mode & FMODE_READ) {
1495 		struct seq_file *m = file->private_data;
1496 		array = m->private;
1497 	} else
1498 		array = file->private_data;
1499 
1500 	ret = get_user(ch, ubuf++);
1501 	if (ret)
1502 		goto out;
1503 	read++;
1504 	cnt--;
1505 
1506 	/* skip white space */
1507 	while (cnt && isspace(ch)) {
1508 		ret = get_user(ch, ubuf++);
1509 		if (ret)
1510 			goto out;
1511 		read++;
1512 		cnt--;
1513 	}
1514 
1515 	if (isspace(ch)) {
1516 		*ppos += read;
1517 		ret = read;
1518 		goto out;
1519 	}
1520 
1521 	while (cnt && !isspace(ch)) {
1522 		if (index < FTRACE_BUFF_MAX)
1523 			buffer[index++] = ch;
1524 		else {
1525 			ret = -EINVAL;
1526 			goto out;
1527 		}
1528 		ret = get_user(ch, ubuf++);
1529 		if (ret)
1530 			goto out;
1531 		read++;
1532 		cnt--;
1533 	}
1534 	buffer[index] = 0;
1535 
1536 	/* we allow only one at a time */
1537 	ret = ftrace_set_func(array, ftrace_graph_count, buffer);
1538 	if (ret)
1539 		goto out;
1540 
1541 	ftrace_graph_count++;
1542 
1543 	file->f_pos += read;
1544 
1545 	ret = read;
1546  out:
1547 	mutex_unlock(&graph_lock);
1548 
1549 	return ret;
1550 }
1551 
1552 static const struct file_operations ftrace_graph_fops = {
1553 	.open = ftrace_graph_open,
1554 	.read = ftrace_graph_read,
1555 	.write = ftrace_graph_write,
1556 };
1557 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1558 
1559 static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
1560 {
1561 	struct dentry *entry;
1562 
1563 	entry = debugfs_create_file("available_filter_functions", 0444,
1564 				    d_tracer, NULL, &ftrace_avail_fops);
1565 	if (!entry)
1566 		pr_warning("Could not create debugfs "
1567 			   "'available_filter_functions' entry\n");
1568 
1569 	entry = debugfs_create_file("failures", 0444,
1570 				    d_tracer, NULL, &ftrace_failures_fops);
1571 	if (!entry)
1572 		pr_warning("Could not create debugfs 'failures' entry\n");
1573 
1574 	entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
1575 				    NULL, &ftrace_filter_fops);
1576 	if (!entry)
1577 		pr_warning("Could not create debugfs "
1578 			   "'set_ftrace_filter' entry\n");
1579 
1580 	entry = debugfs_create_file("set_ftrace_notrace", 0644, d_tracer,
1581 				    NULL, &ftrace_notrace_fops);
1582 	if (!entry)
1583 		pr_warning("Could not create debugfs "
1584 			   "'set_ftrace_notrace' entry\n");
1585 
1586 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1587 	entry = debugfs_create_file("set_graph_function", 0444, d_tracer,
1588 				    NULL,
1589 				    &ftrace_graph_fops);
1590 	if (!entry)
1591 		pr_warning("Could not create debugfs "
1592 			   "'set_graph_function' entry\n");
1593 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1594 
1595 	return 0;
1596 }
1597 
1598 static int ftrace_convert_nops(struct module *mod,
1599 			       unsigned long *start,
1600 			       unsigned long *end)
1601 {
1602 	unsigned long *p;
1603 	unsigned long addr;
1604 	unsigned long flags;
1605 
1606 	mutex_lock(&ftrace_start_lock);
1607 	p = start;
1608 	while (p < end) {
1609 		addr = ftrace_call_adjust(*p++);
1610 		/*
1611 		 * Some architecture linkers will pad between
1612 		 * the different mcount_loc sections of different
1613 		 * object files to satisfy alignments.
1614 		 * Skip any NULL pointers.
1615 		 */
1616 		if (!addr)
1617 			continue;
1618 		ftrace_record_ip(addr);
1619 	}
1620 
1621 	/* disable interrupts to prevent kstop machine */
1622 	local_irq_save(flags);
1623 	ftrace_update_code(mod);
1624 	local_irq_restore(flags);
1625 	mutex_unlock(&ftrace_start_lock);
1626 
1627 	return 0;
1628 }
1629 
1630 void ftrace_init_module(struct module *mod,
1631 			unsigned long *start, unsigned long *end)
1632 {
1633 	if (ftrace_disabled || start == end)
1634 		return;
1635 	ftrace_convert_nops(mod, start, end);
1636 }
1637 
1638 extern unsigned long __start_mcount_loc[];
1639 extern unsigned long __stop_mcount_loc[];
1640 
1641 void __init ftrace_init(void)
1642 {
1643 	unsigned long count, addr, flags;
1644 	int ret;
1645 
1646 	/* Keep the ftrace pointer to the stub */
1647 	addr = (unsigned long)ftrace_stub;
1648 
1649 	local_irq_save(flags);
1650 	ftrace_dyn_arch_init(&addr);
1651 	local_irq_restore(flags);
1652 
1653 	/* ftrace_dyn_arch_init places the return code in addr */
1654 	if (addr)
1655 		goto failed;
1656 
1657 	count = __stop_mcount_loc - __start_mcount_loc;
1658 
1659 	ret = ftrace_dyn_table_alloc(count);
1660 	if (ret)
1661 		goto failed;
1662 
1663 	last_ftrace_enabled = ftrace_enabled = 1;
1664 
1665 	ret = ftrace_convert_nops(NULL,
1666 				  __start_mcount_loc,
1667 				  __stop_mcount_loc);
1668 
1669 	return;
1670  failed:
1671 	ftrace_disabled = 1;
1672 }
1673 
1674 #else
1675 
1676 static int __init ftrace_nodyn_init(void)
1677 {
1678 	ftrace_enabled = 1;
1679 	return 0;
1680 }
1681 device_initcall(ftrace_nodyn_init);
1682 
1683 static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
1684 static inline void ftrace_startup_enable(int command) { }
1685 /* Keep as macros so we do not need to define the commands */
1686 # define ftrace_startup(command)	do { } while (0)
1687 # define ftrace_shutdown(command)	do { } while (0)
1688 # define ftrace_startup_sysctl()	do { } while (0)
1689 # define ftrace_shutdown_sysctl()	do { } while (0)
1690 #endif /* CONFIG_DYNAMIC_FTRACE */
1691 
1692 static ssize_t
1693 ftrace_pid_read(struct file *file, char __user *ubuf,
1694 		       size_t cnt, loff_t *ppos)
1695 {
1696 	char buf[64];
1697 	int r;
1698 
1699 	if (ftrace_pid_trace == ftrace_swapper_pid)
1700 		r = sprintf(buf, "swapper tasks\n");
1701 	else if (ftrace_pid_trace)
1702 		r = sprintf(buf, "%u\n", pid_nr(ftrace_pid_trace));
1703 	else
1704 		r = sprintf(buf, "no pid\n");
1705 
1706 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
1707 }
1708 
1709 static void clear_ftrace_swapper(void)
1710 {
1711 	struct task_struct *p;
1712 	int cpu;
1713 
1714 	get_online_cpus();
1715 	for_each_online_cpu(cpu) {
1716 		p = idle_task(cpu);
1717 		clear_tsk_trace_trace(p);
1718 	}
1719 	put_online_cpus();
1720 }
1721 
1722 static void set_ftrace_swapper(void)
1723 {
1724 	struct task_struct *p;
1725 	int cpu;
1726 
1727 	get_online_cpus();
1728 	for_each_online_cpu(cpu) {
1729 		p = idle_task(cpu);
1730 		set_tsk_trace_trace(p);
1731 	}
1732 	put_online_cpus();
1733 }
1734 
1735 static void clear_ftrace_pid(struct pid *pid)
1736 {
1737 	struct task_struct *p;
1738 
1739 	do_each_pid_task(pid, PIDTYPE_PID, p) {
1740 		clear_tsk_trace_trace(p);
1741 	} while_each_pid_task(pid, PIDTYPE_PID, p);
1742 	put_pid(pid);
1743 }
1744 
1745 static void set_ftrace_pid(struct pid *pid)
1746 {
1747 	struct task_struct *p;
1748 
1749 	do_each_pid_task(pid, PIDTYPE_PID, p) {
1750 		set_tsk_trace_trace(p);
1751 	} while_each_pid_task(pid, PIDTYPE_PID, p);
1752 }
1753 
1754 static void clear_ftrace_pid_task(struct pid **pid)
1755 {
1756 	if (*pid == ftrace_swapper_pid)
1757 		clear_ftrace_swapper();
1758 	else
1759 		clear_ftrace_pid(*pid);
1760 
1761 	*pid = NULL;
1762 }
1763 
1764 static void set_ftrace_pid_task(struct pid *pid)
1765 {
1766 	if (pid == ftrace_swapper_pid)
1767 		set_ftrace_swapper();
1768 	else
1769 		set_ftrace_pid(pid);
1770 }
1771 
1772 static ssize_t
1773 ftrace_pid_write(struct file *filp, const char __user *ubuf,
1774 		   size_t cnt, loff_t *ppos)
1775 {
1776 	struct pid *pid;
1777 	char buf[64];
1778 	long val;
1779 	int ret;
1780 
1781 	if (cnt >= sizeof(buf))
1782 		return -EINVAL;
1783 
1784 	if (copy_from_user(&buf, ubuf, cnt))
1785 		return -EFAULT;
1786 
1787 	buf[cnt] = 0;
1788 
1789 	ret = strict_strtol(buf, 10, &val);
1790 	if (ret < 0)
1791 		return ret;
1792 
1793 	mutex_lock(&ftrace_start_lock);
1794 	if (val < 0) {
1795 		/* disable pid tracing */
1796 		if (!ftrace_pid_trace)
1797 			goto out;
1798 
1799 		clear_ftrace_pid_task(&ftrace_pid_trace);
1800 
1801 	} else {
1802 		/* swapper task is special */
1803 		if (!val) {
1804 			pid = ftrace_swapper_pid;
1805 			if (pid == ftrace_pid_trace)
1806 				goto out;
1807 		} else {
1808 			pid = find_get_pid(val);
1809 
1810 			if (pid == ftrace_pid_trace) {
1811 				put_pid(pid);
1812 				goto out;
1813 			}
1814 		}
1815 
1816 		if (ftrace_pid_trace)
1817 			clear_ftrace_pid_task(&ftrace_pid_trace);
1818 
1819 		if (!pid)
1820 			goto out;
1821 
1822 		ftrace_pid_trace = pid;
1823 
1824 		set_ftrace_pid_task(ftrace_pid_trace);
1825 	}
1826 
1827 	/* update the function call */
1828 	ftrace_update_pid_func();
1829 	ftrace_startup_enable(0);
1830 
1831  out:
1832 	mutex_unlock(&ftrace_start_lock);
1833 
1834 	return cnt;
1835 }
1836 
1837 static struct file_operations ftrace_pid_fops = {
1838 	.read = ftrace_pid_read,
1839 	.write = ftrace_pid_write,
1840 };
1841 
1842 static __init int ftrace_init_debugfs(void)
1843 {
1844 	struct dentry *d_tracer;
1845 	struct dentry *entry;
1846 
1847 	d_tracer = tracing_init_dentry();
1848 	if (!d_tracer)
1849 		return 0;
1850 
1851 	ftrace_init_dyn_debugfs(d_tracer);
1852 
1853 	entry = debugfs_create_file("set_ftrace_pid", 0644, d_tracer,
1854 				    NULL, &ftrace_pid_fops);
1855 	if (!entry)
1856 		pr_warning("Could not create debugfs "
1857 			   "'set_ftrace_pid' entry\n");
1858 	return 0;
1859 }
1860 
1861 fs_initcall(ftrace_init_debugfs);
1862 
1863 /**
1864  * ftrace_kill - kill ftrace
1865  *
1866  * This function should be used by panic code. It stops ftrace
1867  * but in a not so nice way. If you need to simply kill ftrace
1868  * from a non-atomic section, use ftrace_kill.
1869  */
1870 void ftrace_kill(void)
1871 {
1872 	ftrace_disabled = 1;
1873 	ftrace_enabled = 0;
1874 	clear_ftrace_function();
1875 }
1876 
1877 /**
1878  * register_ftrace_function - register a function for profiling
1879  * @ops - ops structure that holds the function for profiling.
1880  *
1881  * Register a function to be called by all functions in the
1882  * kernel.
1883  *
1884  * Note: @ops->func and all the functions it calls must be labeled
1885  *       with "notrace", otherwise it will go into a
1886  *       recursive loop.
1887  */
1888 int register_ftrace_function(struct ftrace_ops *ops)
1889 {
1890 	int ret;
1891 
1892 	if (unlikely(ftrace_disabled))
1893 		return -1;
1894 
1895 	mutex_lock(&ftrace_sysctl_lock);
1896 
1897 	ret = __register_ftrace_function(ops);
1898 	ftrace_startup(0);
1899 
1900 	mutex_unlock(&ftrace_sysctl_lock);
1901 	return ret;
1902 }
1903 
1904 /**
1905  * unregister_ftrace_function - unresgister a function for profiling.
1906  * @ops - ops structure that holds the function to unregister
1907  *
1908  * Unregister a function that was added to be called by ftrace profiling.
1909  */
1910 int unregister_ftrace_function(struct ftrace_ops *ops)
1911 {
1912 	int ret;
1913 
1914 	mutex_lock(&ftrace_sysctl_lock);
1915 	ret = __unregister_ftrace_function(ops);
1916 	ftrace_shutdown(0);
1917 	mutex_unlock(&ftrace_sysctl_lock);
1918 
1919 	return ret;
1920 }
1921 
1922 int
1923 ftrace_enable_sysctl(struct ctl_table *table, int write,
1924 		     struct file *file, void __user *buffer, size_t *lenp,
1925 		     loff_t *ppos)
1926 {
1927 	int ret;
1928 
1929 	if (unlikely(ftrace_disabled))
1930 		return -ENODEV;
1931 
1932 	mutex_lock(&ftrace_sysctl_lock);
1933 
1934 	ret  = proc_dointvec(table, write, file, buffer, lenp, ppos);
1935 
1936 	if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
1937 		goto out;
1938 
1939 	last_ftrace_enabled = ftrace_enabled;
1940 
1941 	if (ftrace_enabled) {
1942 
1943 		ftrace_startup_sysctl();
1944 
1945 		/* we are starting ftrace again */
1946 		if (ftrace_list != &ftrace_list_end) {
1947 			if (ftrace_list->next == &ftrace_list_end)
1948 				ftrace_trace_function = ftrace_list->func;
1949 			else
1950 				ftrace_trace_function = ftrace_list_func;
1951 		}
1952 
1953 	} else {
1954 		/* stopping ftrace calls (just send to ftrace_stub) */
1955 		ftrace_trace_function = ftrace_stub;
1956 
1957 		ftrace_shutdown_sysctl();
1958 	}
1959 
1960  out:
1961 	mutex_unlock(&ftrace_sysctl_lock);
1962 	return ret;
1963 }
1964 
1965 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1966 
1967 static atomic_t ftrace_graph_active;
1968 
1969 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
1970 {
1971 	return 0;
1972 }
1973 
1974 /* The callbacks that hook a function */
1975 trace_func_graph_ret_t ftrace_graph_return =
1976 			(trace_func_graph_ret_t)ftrace_stub;
1977 trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
1978 
1979 /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
1980 static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
1981 {
1982 	int i;
1983 	int ret = 0;
1984 	unsigned long flags;
1985 	int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
1986 	struct task_struct *g, *t;
1987 
1988 	for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
1989 		ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
1990 					* sizeof(struct ftrace_ret_stack),
1991 					GFP_KERNEL);
1992 		if (!ret_stack_list[i]) {
1993 			start = 0;
1994 			end = i;
1995 			ret = -ENOMEM;
1996 			goto free;
1997 		}
1998 	}
1999 
2000 	read_lock_irqsave(&tasklist_lock, flags);
2001 	do_each_thread(g, t) {
2002 		if (start == end) {
2003 			ret = -EAGAIN;
2004 			goto unlock;
2005 		}
2006 
2007 		if (t->ret_stack == NULL) {
2008 			t->curr_ret_stack = -1;
2009 			/* Make sure IRQs see the -1 first: */
2010 			barrier();
2011 			t->ret_stack = ret_stack_list[start++];
2012 			atomic_set(&t->tracing_graph_pause, 0);
2013 			atomic_set(&t->trace_overrun, 0);
2014 		}
2015 	} while_each_thread(g, t);
2016 
2017 unlock:
2018 	read_unlock_irqrestore(&tasklist_lock, flags);
2019 free:
2020 	for (i = start; i < end; i++)
2021 		kfree(ret_stack_list[i]);
2022 	return ret;
2023 }
2024 
2025 /* Allocate a return stack for each task */
2026 static int start_graph_tracing(void)
2027 {
2028 	struct ftrace_ret_stack **ret_stack_list;
2029 	int ret;
2030 
2031 	ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
2032 				sizeof(struct ftrace_ret_stack *),
2033 				GFP_KERNEL);
2034 
2035 	if (!ret_stack_list)
2036 		return -ENOMEM;
2037 
2038 	do {
2039 		ret = alloc_retstack_tasklist(ret_stack_list);
2040 	} while (ret == -EAGAIN);
2041 
2042 	kfree(ret_stack_list);
2043 	return ret;
2044 }
2045 
2046 int register_ftrace_graph(trace_func_graph_ret_t retfunc,
2047 			trace_func_graph_ent_t entryfunc)
2048 {
2049 	int ret = 0;
2050 
2051 	mutex_lock(&ftrace_sysctl_lock);
2052 
2053 	atomic_inc(&ftrace_graph_active);
2054 	ret = start_graph_tracing();
2055 	if (ret) {
2056 		atomic_dec(&ftrace_graph_active);
2057 		goto out;
2058 	}
2059 
2060 	ftrace_graph_return = retfunc;
2061 	ftrace_graph_entry = entryfunc;
2062 
2063 	ftrace_startup(FTRACE_START_FUNC_RET);
2064 
2065 out:
2066 	mutex_unlock(&ftrace_sysctl_lock);
2067 	return ret;
2068 }
2069 
2070 void unregister_ftrace_graph(void)
2071 {
2072 	mutex_lock(&ftrace_sysctl_lock);
2073 
2074 	atomic_dec(&ftrace_graph_active);
2075 	ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
2076 	ftrace_graph_entry = ftrace_graph_entry_stub;
2077 	ftrace_shutdown(FTRACE_STOP_FUNC_RET);
2078 
2079 	mutex_unlock(&ftrace_sysctl_lock);
2080 }
2081 
2082 /* Allocate a return stack for newly created task */
2083 void ftrace_graph_init_task(struct task_struct *t)
2084 {
2085 	if (atomic_read(&ftrace_graph_active)) {
2086 		t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
2087 				* sizeof(struct ftrace_ret_stack),
2088 				GFP_KERNEL);
2089 		if (!t->ret_stack)
2090 			return;
2091 		t->curr_ret_stack = -1;
2092 		atomic_set(&t->tracing_graph_pause, 0);
2093 		atomic_set(&t->trace_overrun, 0);
2094 	} else
2095 		t->ret_stack = NULL;
2096 }
2097 
2098 void ftrace_graph_exit_task(struct task_struct *t)
2099 {
2100 	struct ftrace_ret_stack	*ret_stack = t->ret_stack;
2101 
2102 	t->ret_stack = NULL;
2103 	/* NULL must become visible to IRQs before we free it: */
2104 	barrier();
2105 
2106 	kfree(ret_stack);
2107 }
2108 
2109 void ftrace_graph_stop(void)
2110 {
2111 	ftrace_stop();
2112 }
2113 #endif
2114 
2115