xref: /linux/kernel/trace/ftrace.c (revision 3932b9ca55b0be314a36d3e84faff3e823c081f5)
1 /*
2  * Infrastructure for profiling code inserted by 'gcc -pg'.
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * Originally ported from the -rt patch by:
8  *   Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9  *
10  * Based on code in the latency_tracer, that is:
11  *
12  *  Copyright (C) 2004-2006 Ingo Molnar
13  *  Copyright (C) 2004 Nadia Yvette Chambers
14  */
15 
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/suspend.h>
21 #include <linux/debugfs.h>
22 #include <linux/hardirq.h>
23 #include <linux/kthread.h>
24 #include <linux/uaccess.h>
25 #include <linux/bsearch.h>
26 #include <linux/module.h>
27 #include <linux/ftrace.h>
28 #include <linux/sysctl.h>
29 #include <linux/slab.h>
30 #include <linux/ctype.h>
31 #include <linux/sort.h>
32 #include <linux/list.h>
33 #include <linux/hash.h>
34 #include <linux/rcupdate.h>
35 
36 #include <trace/events/sched.h>
37 
38 #include <asm/setup.h>
39 
40 #include "trace_output.h"
41 #include "trace_stat.h"
42 
43 #define FTRACE_WARN_ON(cond)			\
44 	({					\
45 		int ___r = cond;		\
46 		if (WARN_ON(___r))		\
47 			ftrace_kill();		\
48 		___r;				\
49 	})
50 
51 #define FTRACE_WARN_ON_ONCE(cond)		\
52 	({					\
53 		int ___r = cond;		\
54 		if (WARN_ON_ONCE(___r))		\
55 			ftrace_kill();		\
56 		___r;				\
57 	})
58 
59 /* hash bits for specific function selection */
60 #define FTRACE_HASH_BITS 7
61 #define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
62 #define FTRACE_HASH_DEFAULT_BITS 10
63 #define FTRACE_HASH_MAX_BITS 12
64 
65 #define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_CONTROL)
66 
67 #ifdef CONFIG_DYNAMIC_FTRACE
68 #define INIT_OPS_HASH(opsname)	\
69 	.func_hash		= &opsname.local_hash,			\
70 	.local_hash.regex_lock	= __MUTEX_INITIALIZER(opsname.local_hash.regex_lock),
71 #define ASSIGN_OPS_HASH(opsname, val) \
72 	.func_hash		= val, \
73 	.local_hash.regex_lock	= __MUTEX_INITIALIZER(opsname.local_hash.regex_lock),
74 #else
75 #define INIT_OPS_HASH(opsname)
76 #define ASSIGN_OPS_HASH(opsname, val)
77 #endif
78 
79 static struct ftrace_ops ftrace_list_end __read_mostly = {
80 	.func		= ftrace_stub,
81 	.flags		= FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_STUB,
82 	INIT_OPS_HASH(ftrace_list_end)
83 };
84 
85 /* ftrace_enabled is a method to turn ftrace on or off */
86 int ftrace_enabled __read_mostly;
87 static int last_ftrace_enabled;
88 
89 /* Current function tracing op */
90 struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end;
91 /* What to set function_trace_op to */
92 static struct ftrace_ops *set_function_trace_op;
93 
94 /* List for set_ftrace_pid's pids. */
95 LIST_HEAD(ftrace_pids);
96 struct ftrace_pid {
97 	struct list_head list;
98 	struct pid *pid;
99 };
100 
101 /*
102  * ftrace_disabled is set when an anomaly is discovered.
103  * ftrace_disabled is much stronger than ftrace_enabled.
104  */
105 static int ftrace_disabled __read_mostly;
106 
107 static DEFINE_MUTEX(ftrace_lock);
108 
109 static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end;
110 static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
111 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
112 ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
113 static struct ftrace_ops global_ops;
114 static struct ftrace_ops control_ops;
115 
116 #if ARCH_SUPPORTS_FTRACE_OPS
117 static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
118 				 struct ftrace_ops *op, struct pt_regs *regs);
119 #else
120 /* See comment below, where ftrace_ops_list_func is defined */
121 static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip);
122 #define ftrace_ops_list_func ((ftrace_func_t)ftrace_ops_no_ops)
123 #endif
124 
125 /*
126  * Traverse the ftrace_global_list, invoking all entries.  The reason that we
127  * can use rcu_dereference_raw_notrace() is that elements removed from this list
128  * are simply leaked, so there is no need to interact with a grace-period
129  * mechanism.  The rcu_dereference_raw_notrace() calls are needed to handle
130  * concurrent insertions into the ftrace_global_list.
131  *
132  * Silly Alpha and silly pointer-speculation compiler optimizations!
133  */
134 #define do_for_each_ftrace_op(op, list)			\
135 	op = rcu_dereference_raw_notrace(list);			\
136 	do
137 
138 /*
139  * Optimized for just a single item in the list (as that is the normal case).
140  */
141 #define while_for_each_ftrace_op(op)				\
142 	while (likely(op = rcu_dereference_raw_notrace((op)->next)) &&	\
143 	       unlikely((op) != &ftrace_list_end))
144 
145 static inline void ftrace_ops_init(struct ftrace_ops *ops)
146 {
147 #ifdef CONFIG_DYNAMIC_FTRACE
148 	if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) {
149 		mutex_init(&ops->local_hash.regex_lock);
150 		ops->func_hash = &ops->local_hash;
151 		ops->flags |= FTRACE_OPS_FL_INITIALIZED;
152 	}
153 #endif
154 }
155 
156 /**
157  * ftrace_nr_registered_ops - return number of ops registered
158  *
159  * Returns the number of ftrace_ops registered and tracing functions
160  */
161 int ftrace_nr_registered_ops(void)
162 {
163 	struct ftrace_ops *ops;
164 	int cnt = 0;
165 
166 	mutex_lock(&ftrace_lock);
167 
168 	for (ops = ftrace_ops_list;
169 	     ops != &ftrace_list_end; ops = ops->next)
170 		cnt++;
171 
172 	mutex_unlock(&ftrace_lock);
173 
174 	return cnt;
175 }
176 
177 static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
178 			    struct ftrace_ops *op, struct pt_regs *regs)
179 {
180 	if (!test_tsk_trace_trace(current))
181 		return;
182 
183 	ftrace_pid_function(ip, parent_ip, op, regs);
184 }
185 
186 static void set_ftrace_pid_function(ftrace_func_t func)
187 {
188 	/* do not set ftrace_pid_function to itself! */
189 	if (func != ftrace_pid_func)
190 		ftrace_pid_function = func;
191 }
192 
193 /**
194  * clear_ftrace_function - reset the ftrace function
195  *
196  * This NULLs the ftrace function and in essence stops
197  * tracing.  There may be lag
198  */
199 void clear_ftrace_function(void)
200 {
201 	ftrace_trace_function = ftrace_stub;
202 	ftrace_pid_function = ftrace_stub;
203 }
204 
205 static void control_ops_disable_all(struct ftrace_ops *ops)
206 {
207 	int cpu;
208 
209 	for_each_possible_cpu(cpu)
210 		*per_cpu_ptr(ops->disabled, cpu) = 1;
211 }
212 
213 static int control_ops_alloc(struct ftrace_ops *ops)
214 {
215 	int __percpu *disabled;
216 
217 	disabled = alloc_percpu(int);
218 	if (!disabled)
219 		return -ENOMEM;
220 
221 	ops->disabled = disabled;
222 	control_ops_disable_all(ops);
223 	return 0;
224 }
225 
226 static void ftrace_sync(struct work_struct *work)
227 {
228 	/*
229 	 * This function is just a stub to implement a hard force
230 	 * of synchronize_sched(). This requires synchronizing
231 	 * tasks even in userspace and idle.
232 	 *
233 	 * Yes, function tracing is rude.
234 	 */
235 }
236 
237 static void ftrace_sync_ipi(void *data)
238 {
239 	/* Probably not needed, but do it anyway */
240 	smp_rmb();
241 }
242 
243 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
244 static void update_function_graph_func(void);
245 #else
246 static inline void update_function_graph_func(void) { }
247 #endif
248 
249 static void update_ftrace_function(void)
250 {
251 	ftrace_func_t func;
252 
253 	/*
254 	 * If we are at the end of the list and this ops is
255 	 * recursion safe and not dynamic and the arch supports passing ops,
256 	 * then have the mcount trampoline call the function directly.
257 	 */
258 	if (ftrace_ops_list == &ftrace_list_end ||
259 	    (ftrace_ops_list->next == &ftrace_list_end &&
260 	     !(ftrace_ops_list->flags & FTRACE_OPS_FL_DYNAMIC) &&
261 	     (ftrace_ops_list->flags & FTRACE_OPS_FL_RECURSION_SAFE) &&
262 	     !FTRACE_FORCE_LIST_FUNC)) {
263 		/* Set the ftrace_ops that the arch callback uses */
264 		set_function_trace_op = ftrace_ops_list;
265 		func = ftrace_ops_list->func;
266 	} else {
267 		/* Just use the default ftrace_ops */
268 		set_function_trace_op = &ftrace_list_end;
269 		func = ftrace_ops_list_func;
270 	}
271 
272 	update_function_graph_func();
273 
274 	/* If there's no change, then do nothing more here */
275 	if (ftrace_trace_function == func)
276 		return;
277 
278 	/*
279 	 * If we are using the list function, it doesn't care
280 	 * about the function_trace_ops.
281 	 */
282 	if (func == ftrace_ops_list_func) {
283 		ftrace_trace_function = func;
284 		/*
285 		 * Don't even bother setting function_trace_ops,
286 		 * it would be racy to do so anyway.
287 		 */
288 		return;
289 	}
290 
291 #ifndef CONFIG_DYNAMIC_FTRACE
292 	/*
293 	 * For static tracing, we need to be a bit more careful.
294 	 * The function change takes affect immediately. Thus,
295 	 * we need to coorditate the setting of the function_trace_ops
296 	 * with the setting of the ftrace_trace_function.
297 	 *
298 	 * Set the function to the list ops, which will call the
299 	 * function we want, albeit indirectly, but it handles the
300 	 * ftrace_ops and doesn't depend on function_trace_op.
301 	 */
302 	ftrace_trace_function = ftrace_ops_list_func;
303 	/*
304 	 * Make sure all CPUs see this. Yes this is slow, but static
305 	 * tracing is slow and nasty to have enabled.
306 	 */
307 	schedule_on_each_cpu(ftrace_sync);
308 	/* Now all cpus are using the list ops. */
309 	function_trace_op = set_function_trace_op;
310 	/* Make sure the function_trace_op is visible on all CPUs */
311 	smp_wmb();
312 	/* Nasty way to force a rmb on all cpus */
313 	smp_call_function(ftrace_sync_ipi, NULL, 1);
314 	/* OK, we are all set to update the ftrace_trace_function now! */
315 #endif /* !CONFIG_DYNAMIC_FTRACE */
316 
317 	ftrace_trace_function = func;
318 }
319 
320 int using_ftrace_ops_list_func(void)
321 {
322 	return ftrace_trace_function == ftrace_ops_list_func;
323 }
324 
325 static void add_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
326 {
327 	ops->next = *list;
328 	/*
329 	 * We are entering ops into the list but another
330 	 * CPU might be walking that list. We need to make sure
331 	 * the ops->next pointer is valid before another CPU sees
332 	 * the ops pointer included into the list.
333 	 */
334 	rcu_assign_pointer(*list, ops);
335 }
336 
337 static int remove_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
338 {
339 	struct ftrace_ops **p;
340 
341 	/*
342 	 * If we are removing the last function, then simply point
343 	 * to the ftrace_stub.
344 	 */
345 	if (*list == ops && ops->next == &ftrace_list_end) {
346 		*list = &ftrace_list_end;
347 		return 0;
348 	}
349 
350 	for (p = list; *p != &ftrace_list_end; p = &(*p)->next)
351 		if (*p == ops)
352 			break;
353 
354 	if (*p != ops)
355 		return -1;
356 
357 	*p = (*p)->next;
358 	return 0;
359 }
360 
361 static void add_ftrace_list_ops(struct ftrace_ops **list,
362 				struct ftrace_ops *main_ops,
363 				struct ftrace_ops *ops)
364 {
365 	int first = *list == &ftrace_list_end;
366 	add_ftrace_ops(list, ops);
367 	if (first)
368 		add_ftrace_ops(&ftrace_ops_list, main_ops);
369 }
370 
371 static int remove_ftrace_list_ops(struct ftrace_ops **list,
372 				  struct ftrace_ops *main_ops,
373 				  struct ftrace_ops *ops)
374 {
375 	int ret = remove_ftrace_ops(list, ops);
376 	if (!ret && *list == &ftrace_list_end)
377 		ret = remove_ftrace_ops(&ftrace_ops_list, main_ops);
378 	return ret;
379 }
380 
381 static int __register_ftrace_function(struct ftrace_ops *ops)
382 {
383 	if (ops->flags & FTRACE_OPS_FL_DELETED)
384 		return -EINVAL;
385 
386 	if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED))
387 		return -EBUSY;
388 
389 #ifndef CONFIG_DYNAMIC_FTRACE_WITH_REGS
390 	/*
391 	 * If the ftrace_ops specifies SAVE_REGS, then it only can be used
392 	 * if the arch supports it, or SAVE_REGS_IF_SUPPORTED is also set.
393 	 * Setting SAVE_REGS_IF_SUPPORTED makes SAVE_REGS irrelevant.
394 	 */
395 	if (ops->flags & FTRACE_OPS_FL_SAVE_REGS &&
396 	    !(ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED))
397 		return -EINVAL;
398 
399 	if (ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED)
400 		ops->flags |= FTRACE_OPS_FL_SAVE_REGS;
401 #endif
402 
403 	if (!core_kernel_data((unsigned long)ops))
404 		ops->flags |= FTRACE_OPS_FL_DYNAMIC;
405 
406 	if (ops->flags & FTRACE_OPS_FL_CONTROL) {
407 		if (control_ops_alloc(ops))
408 			return -ENOMEM;
409 		add_ftrace_list_ops(&ftrace_control_list, &control_ops, ops);
410 	} else
411 		add_ftrace_ops(&ftrace_ops_list, ops);
412 
413 	if (ftrace_enabled)
414 		update_ftrace_function();
415 
416 	return 0;
417 }
418 
419 static int __unregister_ftrace_function(struct ftrace_ops *ops)
420 {
421 	int ret;
422 
423 	if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED)))
424 		return -EBUSY;
425 
426 	if (ops->flags & FTRACE_OPS_FL_CONTROL) {
427 		ret = remove_ftrace_list_ops(&ftrace_control_list,
428 					     &control_ops, ops);
429 	} else
430 		ret = remove_ftrace_ops(&ftrace_ops_list, ops);
431 
432 	if (ret < 0)
433 		return ret;
434 
435 	if (ftrace_enabled)
436 		update_ftrace_function();
437 
438 	return 0;
439 }
440 
441 static void ftrace_update_pid_func(void)
442 {
443 	/* Only do something if we are tracing something */
444 	if (ftrace_trace_function == ftrace_stub)
445 		return;
446 
447 	update_ftrace_function();
448 }
449 
450 #ifdef CONFIG_FUNCTION_PROFILER
451 struct ftrace_profile {
452 	struct hlist_node		node;
453 	unsigned long			ip;
454 	unsigned long			counter;
455 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
456 	unsigned long long		time;
457 	unsigned long long		time_squared;
458 #endif
459 };
460 
461 struct ftrace_profile_page {
462 	struct ftrace_profile_page	*next;
463 	unsigned long			index;
464 	struct ftrace_profile		records[];
465 };
466 
467 struct ftrace_profile_stat {
468 	atomic_t			disabled;
469 	struct hlist_head		*hash;
470 	struct ftrace_profile_page	*pages;
471 	struct ftrace_profile_page	*start;
472 	struct tracer_stat		stat;
473 };
474 
475 #define PROFILE_RECORDS_SIZE						\
476 	(PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
477 
478 #define PROFILES_PER_PAGE					\
479 	(PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
480 
481 static int ftrace_profile_enabled __read_mostly;
482 
483 /* ftrace_profile_lock - synchronize the enable and disable of the profiler */
484 static DEFINE_MUTEX(ftrace_profile_lock);
485 
486 static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats);
487 
488 #define FTRACE_PROFILE_HASH_BITS 10
489 #define FTRACE_PROFILE_HASH_SIZE (1 << FTRACE_PROFILE_HASH_BITS)
490 
491 static void *
492 function_stat_next(void *v, int idx)
493 {
494 	struct ftrace_profile *rec = v;
495 	struct ftrace_profile_page *pg;
496 
497 	pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
498 
499  again:
500 	if (idx != 0)
501 		rec++;
502 
503 	if ((void *)rec >= (void *)&pg->records[pg->index]) {
504 		pg = pg->next;
505 		if (!pg)
506 			return NULL;
507 		rec = &pg->records[0];
508 		if (!rec->counter)
509 			goto again;
510 	}
511 
512 	return rec;
513 }
514 
515 static void *function_stat_start(struct tracer_stat *trace)
516 {
517 	struct ftrace_profile_stat *stat =
518 		container_of(trace, struct ftrace_profile_stat, stat);
519 
520 	if (!stat || !stat->start)
521 		return NULL;
522 
523 	return function_stat_next(&stat->start->records[0], 0);
524 }
525 
526 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
527 /* function graph compares on total time */
528 static int function_stat_cmp(void *p1, void *p2)
529 {
530 	struct ftrace_profile *a = p1;
531 	struct ftrace_profile *b = p2;
532 
533 	if (a->time < b->time)
534 		return -1;
535 	if (a->time > b->time)
536 		return 1;
537 	else
538 		return 0;
539 }
540 #else
541 /* not function graph compares against hits */
542 static int function_stat_cmp(void *p1, void *p2)
543 {
544 	struct ftrace_profile *a = p1;
545 	struct ftrace_profile *b = p2;
546 
547 	if (a->counter < b->counter)
548 		return -1;
549 	if (a->counter > b->counter)
550 		return 1;
551 	else
552 		return 0;
553 }
554 #endif
555 
556 static int function_stat_headers(struct seq_file *m)
557 {
558 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
559 	seq_printf(m, "  Function                               "
560 		   "Hit    Time            Avg             s^2\n"
561 		      "  --------                               "
562 		   "---    ----            ---             ---\n");
563 #else
564 	seq_printf(m, "  Function                               Hit\n"
565 		      "  --------                               ---\n");
566 #endif
567 	return 0;
568 }
569 
570 static int function_stat_show(struct seq_file *m, void *v)
571 {
572 	struct ftrace_profile *rec = v;
573 	char str[KSYM_SYMBOL_LEN];
574 	int ret = 0;
575 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
576 	static struct trace_seq s;
577 	unsigned long long avg;
578 	unsigned long long stddev;
579 #endif
580 	mutex_lock(&ftrace_profile_lock);
581 
582 	/* we raced with function_profile_reset() */
583 	if (unlikely(rec->counter == 0)) {
584 		ret = -EBUSY;
585 		goto out;
586 	}
587 
588 	kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
589 	seq_printf(m, "  %-30.30s  %10lu", str, rec->counter);
590 
591 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
592 	seq_printf(m, "    ");
593 	avg = rec->time;
594 	do_div(avg, rec->counter);
595 
596 	/* Sample standard deviation (s^2) */
597 	if (rec->counter <= 1)
598 		stddev = 0;
599 	else {
600 		/*
601 		 * Apply Welford's method:
602 		 * s^2 = 1 / (n * (n-1)) * (n * \Sum (x_i)^2 - (\Sum x_i)^2)
603 		 */
604 		stddev = rec->counter * rec->time_squared -
605 			 rec->time * rec->time;
606 
607 		/*
608 		 * Divide only 1000 for ns^2 -> us^2 conversion.
609 		 * trace_print_graph_duration will divide 1000 again.
610 		 */
611 		do_div(stddev, rec->counter * (rec->counter - 1) * 1000);
612 	}
613 
614 	trace_seq_init(&s);
615 	trace_print_graph_duration(rec->time, &s);
616 	trace_seq_puts(&s, "    ");
617 	trace_print_graph_duration(avg, &s);
618 	trace_seq_puts(&s, "    ");
619 	trace_print_graph_duration(stddev, &s);
620 	trace_print_seq(m, &s);
621 #endif
622 	seq_putc(m, '\n');
623 out:
624 	mutex_unlock(&ftrace_profile_lock);
625 
626 	return ret;
627 }
628 
629 static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
630 {
631 	struct ftrace_profile_page *pg;
632 
633 	pg = stat->pages = stat->start;
634 
635 	while (pg) {
636 		memset(pg->records, 0, PROFILE_RECORDS_SIZE);
637 		pg->index = 0;
638 		pg = pg->next;
639 	}
640 
641 	memset(stat->hash, 0,
642 	       FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head));
643 }
644 
645 int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
646 {
647 	struct ftrace_profile_page *pg;
648 	int functions;
649 	int pages;
650 	int i;
651 
652 	/* If we already allocated, do nothing */
653 	if (stat->pages)
654 		return 0;
655 
656 	stat->pages = (void *)get_zeroed_page(GFP_KERNEL);
657 	if (!stat->pages)
658 		return -ENOMEM;
659 
660 #ifdef CONFIG_DYNAMIC_FTRACE
661 	functions = ftrace_update_tot_cnt;
662 #else
663 	/*
664 	 * We do not know the number of functions that exist because
665 	 * dynamic tracing is what counts them. With past experience
666 	 * we have around 20K functions. That should be more than enough.
667 	 * It is highly unlikely we will execute every function in
668 	 * the kernel.
669 	 */
670 	functions = 20000;
671 #endif
672 
673 	pg = stat->start = stat->pages;
674 
675 	pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
676 
677 	for (i = 1; i < pages; i++) {
678 		pg->next = (void *)get_zeroed_page(GFP_KERNEL);
679 		if (!pg->next)
680 			goto out_free;
681 		pg = pg->next;
682 	}
683 
684 	return 0;
685 
686  out_free:
687 	pg = stat->start;
688 	while (pg) {
689 		unsigned long tmp = (unsigned long)pg;
690 
691 		pg = pg->next;
692 		free_page(tmp);
693 	}
694 
695 	stat->pages = NULL;
696 	stat->start = NULL;
697 
698 	return -ENOMEM;
699 }
700 
701 static int ftrace_profile_init_cpu(int cpu)
702 {
703 	struct ftrace_profile_stat *stat;
704 	int size;
705 
706 	stat = &per_cpu(ftrace_profile_stats, cpu);
707 
708 	if (stat->hash) {
709 		/* If the profile is already created, simply reset it */
710 		ftrace_profile_reset(stat);
711 		return 0;
712 	}
713 
714 	/*
715 	 * We are profiling all functions, but usually only a few thousand
716 	 * functions are hit. We'll make a hash of 1024 items.
717 	 */
718 	size = FTRACE_PROFILE_HASH_SIZE;
719 
720 	stat->hash = kzalloc(sizeof(struct hlist_head) * size, GFP_KERNEL);
721 
722 	if (!stat->hash)
723 		return -ENOMEM;
724 
725 	/* Preallocate the function profiling pages */
726 	if (ftrace_profile_pages_init(stat) < 0) {
727 		kfree(stat->hash);
728 		stat->hash = NULL;
729 		return -ENOMEM;
730 	}
731 
732 	return 0;
733 }
734 
735 static int ftrace_profile_init(void)
736 {
737 	int cpu;
738 	int ret = 0;
739 
740 	for_each_possible_cpu(cpu) {
741 		ret = ftrace_profile_init_cpu(cpu);
742 		if (ret)
743 			break;
744 	}
745 
746 	return ret;
747 }
748 
749 /* interrupts must be disabled */
750 static struct ftrace_profile *
751 ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
752 {
753 	struct ftrace_profile *rec;
754 	struct hlist_head *hhd;
755 	unsigned long key;
756 
757 	key = hash_long(ip, FTRACE_PROFILE_HASH_BITS);
758 	hhd = &stat->hash[key];
759 
760 	if (hlist_empty(hhd))
761 		return NULL;
762 
763 	hlist_for_each_entry_rcu_notrace(rec, hhd, node) {
764 		if (rec->ip == ip)
765 			return rec;
766 	}
767 
768 	return NULL;
769 }
770 
771 static void ftrace_add_profile(struct ftrace_profile_stat *stat,
772 			       struct ftrace_profile *rec)
773 {
774 	unsigned long key;
775 
776 	key = hash_long(rec->ip, FTRACE_PROFILE_HASH_BITS);
777 	hlist_add_head_rcu(&rec->node, &stat->hash[key]);
778 }
779 
780 /*
781  * The memory is already allocated, this simply finds a new record to use.
782  */
783 static struct ftrace_profile *
784 ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
785 {
786 	struct ftrace_profile *rec = NULL;
787 
788 	/* prevent recursion (from NMIs) */
789 	if (atomic_inc_return(&stat->disabled) != 1)
790 		goto out;
791 
792 	/*
793 	 * Try to find the function again since an NMI
794 	 * could have added it
795 	 */
796 	rec = ftrace_find_profiled_func(stat, ip);
797 	if (rec)
798 		goto out;
799 
800 	if (stat->pages->index == PROFILES_PER_PAGE) {
801 		if (!stat->pages->next)
802 			goto out;
803 		stat->pages = stat->pages->next;
804 	}
805 
806 	rec = &stat->pages->records[stat->pages->index++];
807 	rec->ip = ip;
808 	ftrace_add_profile(stat, rec);
809 
810  out:
811 	atomic_dec(&stat->disabled);
812 
813 	return rec;
814 }
815 
816 static void
817 function_profile_call(unsigned long ip, unsigned long parent_ip,
818 		      struct ftrace_ops *ops, struct pt_regs *regs)
819 {
820 	struct ftrace_profile_stat *stat;
821 	struct ftrace_profile *rec;
822 	unsigned long flags;
823 
824 	if (!ftrace_profile_enabled)
825 		return;
826 
827 	local_irq_save(flags);
828 
829 	stat = this_cpu_ptr(&ftrace_profile_stats);
830 	if (!stat->hash || !ftrace_profile_enabled)
831 		goto out;
832 
833 	rec = ftrace_find_profiled_func(stat, ip);
834 	if (!rec) {
835 		rec = ftrace_profile_alloc(stat, ip);
836 		if (!rec)
837 			goto out;
838 	}
839 
840 	rec->counter++;
841  out:
842 	local_irq_restore(flags);
843 }
844 
845 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
846 static int profile_graph_entry(struct ftrace_graph_ent *trace)
847 {
848 	function_profile_call(trace->func, 0, NULL, NULL);
849 	return 1;
850 }
851 
852 static void profile_graph_return(struct ftrace_graph_ret *trace)
853 {
854 	struct ftrace_profile_stat *stat;
855 	unsigned long long calltime;
856 	struct ftrace_profile *rec;
857 	unsigned long flags;
858 
859 	local_irq_save(flags);
860 	stat = this_cpu_ptr(&ftrace_profile_stats);
861 	if (!stat->hash || !ftrace_profile_enabled)
862 		goto out;
863 
864 	/* If the calltime was zero'd ignore it */
865 	if (!trace->calltime)
866 		goto out;
867 
868 	calltime = trace->rettime - trace->calltime;
869 
870 	if (!(trace_flags & TRACE_ITER_GRAPH_TIME)) {
871 		int index;
872 
873 		index = trace->depth;
874 
875 		/* Append this call time to the parent time to subtract */
876 		if (index)
877 			current->ret_stack[index - 1].subtime += calltime;
878 
879 		if (current->ret_stack[index].subtime < calltime)
880 			calltime -= current->ret_stack[index].subtime;
881 		else
882 			calltime = 0;
883 	}
884 
885 	rec = ftrace_find_profiled_func(stat, trace->func);
886 	if (rec) {
887 		rec->time += calltime;
888 		rec->time_squared += calltime * calltime;
889 	}
890 
891  out:
892 	local_irq_restore(flags);
893 }
894 
895 static int register_ftrace_profiler(void)
896 {
897 	return register_ftrace_graph(&profile_graph_return,
898 				     &profile_graph_entry);
899 }
900 
901 static void unregister_ftrace_profiler(void)
902 {
903 	unregister_ftrace_graph();
904 }
905 #else
906 static struct ftrace_ops ftrace_profile_ops __read_mostly = {
907 	.func		= function_profile_call,
908 	.flags		= FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
909 	INIT_OPS_HASH(ftrace_profile_ops)
910 };
911 
912 static int register_ftrace_profiler(void)
913 {
914 	return register_ftrace_function(&ftrace_profile_ops);
915 }
916 
917 static void unregister_ftrace_profiler(void)
918 {
919 	unregister_ftrace_function(&ftrace_profile_ops);
920 }
921 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
922 
923 static ssize_t
924 ftrace_profile_write(struct file *filp, const char __user *ubuf,
925 		     size_t cnt, loff_t *ppos)
926 {
927 	unsigned long val;
928 	int ret;
929 
930 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
931 	if (ret)
932 		return ret;
933 
934 	val = !!val;
935 
936 	mutex_lock(&ftrace_profile_lock);
937 	if (ftrace_profile_enabled ^ val) {
938 		if (val) {
939 			ret = ftrace_profile_init();
940 			if (ret < 0) {
941 				cnt = ret;
942 				goto out;
943 			}
944 
945 			ret = register_ftrace_profiler();
946 			if (ret < 0) {
947 				cnt = ret;
948 				goto out;
949 			}
950 			ftrace_profile_enabled = 1;
951 		} else {
952 			ftrace_profile_enabled = 0;
953 			/*
954 			 * unregister_ftrace_profiler calls stop_machine
955 			 * so this acts like an synchronize_sched.
956 			 */
957 			unregister_ftrace_profiler();
958 		}
959 	}
960  out:
961 	mutex_unlock(&ftrace_profile_lock);
962 
963 	*ppos += cnt;
964 
965 	return cnt;
966 }
967 
968 static ssize_t
969 ftrace_profile_read(struct file *filp, char __user *ubuf,
970 		     size_t cnt, loff_t *ppos)
971 {
972 	char buf[64];		/* big enough to hold a number */
973 	int r;
974 
975 	r = sprintf(buf, "%u\n", ftrace_profile_enabled);
976 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
977 }
978 
979 static const struct file_operations ftrace_profile_fops = {
980 	.open		= tracing_open_generic,
981 	.read		= ftrace_profile_read,
982 	.write		= ftrace_profile_write,
983 	.llseek		= default_llseek,
984 };
985 
986 /* used to initialize the real stat files */
987 static struct tracer_stat function_stats __initdata = {
988 	.name		= "functions",
989 	.stat_start	= function_stat_start,
990 	.stat_next	= function_stat_next,
991 	.stat_cmp	= function_stat_cmp,
992 	.stat_headers	= function_stat_headers,
993 	.stat_show	= function_stat_show
994 };
995 
996 static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
997 {
998 	struct ftrace_profile_stat *stat;
999 	struct dentry *entry;
1000 	char *name;
1001 	int ret;
1002 	int cpu;
1003 
1004 	for_each_possible_cpu(cpu) {
1005 		stat = &per_cpu(ftrace_profile_stats, cpu);
1006 
1007 		/* allocate enough for function name + cpu number */
1008 		name = kmalloc(32, GFP_KERNEL);
1009 		if (!name) {
1010 			/*
1011 			 * The files created are permanent, if something happens
1012 			 * we still do not free memory.
1013 			 */
1014 			WARN(1,
1015 			     "Could not allocate stat file for cpu %d\n",
1016 			     cpu);
1017 			return;
1018 		}
1019 		stat->stat = function_stats;
1020 		snprintf(name, 32, "function%d", cpu);
1021 		stat->stat.name = name;
1022 		ret = register_stat_tracer(&stat->stat);
1023 		if (ret) {
1024 			WARN(1,
1025 			     "Could not register function stat for cpu %d\n",
1026 			     cpu);
1027 			kfree(name);
1028 			return;
1029 		}
1030 	}
1031 
1032 	entry = debugfs_create_file("function_profile_enabled", 0644,
1033 				    d_tracer, NULL, &ftrace_profile_fops);
1034 	if (!entry)
1035 		pr_warning("Could not create debugfs "
1036 			   "'function_profile_enabled' entry\n");
1037 }
1038 
1039 #else /* CONFIG_FUNCTION_PROFILER */
1040 static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
1041 {
1042 }
1043 #endif /* CONFIG_FUNCTION_PROFILER */
1044 
1045 static struct pid * const ftrace_swapper_pid = &init_struct_pid;
1046 
1047 #ifdef CONFIG_DYNAMIC_FTRACE
1048 
1049 static struct ftrace_ops *removed_ops;
1050 
1051 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
1052 # error Dynamic ftrace depends on MCOUNT_RECORD
1053 #endif
1054 
1055 static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly;
1056 
1057 struct ftrace_func_probe {
1058 	struct hlist_node	node;
1059 	struct ftrace_probe_ops	*ops;
1060 	unsigned long		flags;
1061 	unsigned long		ip;
1062 	void			*data;
1063 	struct list_head	free_list;
1064 };
1065 
1066 struct ftrace_func_entry {
1067 	struct hlist_node hlist;
1068 	unsigned long ip;
1069 };
1070 
1071 struct ftrace_hash {
1072 	unsigned long		size_bits;
1073 	struct hlist_head	*buckets;
1074 	unsigned long		count;
1075 	struct rcu_head		rcu;
1076 };
1077 
1078 /*
1079  * We make these constant because no one should touch them,
1080  * but they are used as the default "empty hash", to avoid allocating
1081  * it all the time. These are in a read only section such that if
1082  * anyone does try to modify it, it will cause an exception.
1083  */
1084 static const struct hlist_head empty_buckets[1];
1085 static const struct ftrace_hash empty_hash = {
1086 	.buckets = (struct hlist_head *)empty_buckets,
1087 };
1088 #define EMPTY_HASH	((struct ftrace_hash *)&empty_hash)
1089 
1090 static struct ftrace_ops global_ops = {
1091 	.func				= ftrace_stub,
1092 	.local_hash.notrace_hash	= EMPTY_HASH,
1093 	.local_hash.filter_hash		= EMPTY_HASH,
1094 	INIT_OPS_HASH(global_ops)
1095 	.flags				= FTRACE_OPS_FL_RECURSION_SAFE |
1096 					  FTRACE_OPS_FL_INITIALIZED,
1097 };
1098 
1099 struct ftrace_page {
1100 	struct ftrace_page	*next;
1101 	struct dyn_ftrace	*records;
1102 	int			index;
1103 	int			size;
1104 };
1105 
1106 #define ENTRY_SIZE sizeof(struct dyn_ftrace)
1107 #define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE)
1108 
1109 /* estimate from running different kernels */
1110 #define NR_TO_INIT		10000
1111 
1112 static struct ftrace_page	*ftrace_pages_start;
1113 static struct ftrace_page	*ftrace_pages;
1114 
1115 static bool __always_inline ftrace_hash_empty(struct ftrace_hash *hash)
1116 {
1117 	return !hash || !hash->count;
1118 }
1119 
1120 static struct ftrace_func_entry *
1121 ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
1122 {
1123 	unsigned long key;
1124 	struct ftrace_func_entry *entry;
1125 	struct hlist_head *hhd;
1126 
1127 	if (ftrace_hash_empty(hash))
1128 		return NULL;
1129 
1130 	if (hash->size_bits > 0)
1131 		key = hash_long(ip, hash->size_bits);
1132 	else
1133 		key = 0;
1134 
1135 	hhd = &hash->buckets[key];
1136 
1137 	hlist_for_each_entry_rcu_notrace(entry, hhd, hlist) {
1138 		if (entry->ip == ip)
1139 			return entry;
1140 	}
1141 	return NULL;
1142 }
1143 
1144 static void __add_hash_entry(struct ftrace_hash *hash,
1145 			     struct ftrace_func_entry *entry)
1146 {
1147 	struct hlist_head *hhd;
1148 	unsigned long key;
1149 
1150 	if (hash->size_bits)
1151 		key = hash_long(entry->ip, hash->size_bits);
1152 	else
1153 		key = 0;
1154 
1155 	hhd = &hash->buckets[key];
1156 	hlist_add_head(&entry->hlist, hhd);
1157 	hash->count++;
1158 }
1159 
1160 static int add_hash_entry(struct ftrace_hash *hash, unsigned long ip)
1161 {
1162 	struct ftrace_func_entry *entry;
1163 
1164 	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1165 	if (!entry)
1166 		return -ENOMEM;
1167 
1168 	entry->ip = ip;
1169 	__add_hash_entry(hash, entry);
1170 
1171 	return 0;
1172 }
1173 
1174 static void
1175 free_hash_entry(struct ftrace_hash *hash,
1176 		  struct ftrace_func_entry *entry)
1177 {
1178 	hlist_del(&entry->hlist);
1179 	kfree(entry);
1180 	hash->count--;
1181 }
1182 
1183 static void
1184 remove_hash_entry(struct ftrace_hash *hash,
1185 		  struct ftrace_func_entry *entry)
1186 {
1187 	hlist_del(&entry->hlist);
1188 	hash->count--;
1189 }
1190 
1191 static void ftrace_hash_clear(struct ftrace_hash *hash)
1192 {
1193 	struct hlist_head *hhd;
1194 	struct hlist_node *tn;
1195 	struct ftrace_func_entry *entry;
1196 	int size = 1 << hash->size_bits;
1197 	int i;
1198 
1199 	if (!hash->count)
1200 		return;
1201 
1202 	for (i = 0; i < size; i++) {
1203 		hhd = &hash->buckets[i];
1204 		hlist_for_each_entry_safe(entry, tn, hhd, hlist)
1205 			free_hash_entry(hash, entry);
1206 	}
1207 	FTRACE_WARN_ON(hash->count);
1208 }
1209 
1210 static void free_ftrace_hash(struct ftrace_hash *hash)
1211 {
1212 	if (!hash || hash == EMPTY_HASH)
1213 		return;
1214 	ftrace_hash_clear(hash);
1215 	kfree(hash->buckets);
1216 	kfree(hash);
1217 }
1218 
1219 static void __free_ftrace_hash_rcu(struct rcu_head *rcu)
1220 {
1221 	struct ftrace_hash *hash;
1222 
1223 	hash = container_of(rcu, struct ftrace_hash, rcu);
1224 	free_ftrace_hash(hash);
1225 }
1226 
1227 static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
1228 {
1229 	if (!hash || hash == EMPTY_HASH)
1230 		return;
1231 	call_rcu_sched(&hash->rcu, __free_ftrace_hash_rcu);
1232 }
1233 
1234 void ftrace_free_filter(struct ftrace_ops *ops)
1235 {
1236 	ftrace_ops_init(ops);
1237 	free_ftrace_hash(ops->func_hash->filter_hash);
1238 	free_ftrace_hash(ops->func_hash->notrace_hash);
1239 }
1240 
1241 static struct ftrace_hash *alloc_ftrace_hash(int size_bits)
1242 {
1243 	struct ftrace_hash *hash;
1244 	int size;
1245 
1246 	hash = kzalloc(sizeof(*hash), GFP_KERNEL);
1247 	if (!hash)
1248 		return NULL;
1249 
1250 	size = 1 << size_bits;
1251 	hash->buckets = kcalloc(size, sizeof(*hash->buckets), GFP_KERNEL);
1252 
1253 	if (!hash->buckets) {
1254 		kfree(hash);
1255 		return NULL;
1256 	}
1257 
1258 	hash->size_bits = size_bits;
1259 
1260 	return hash;
1261 }
1262 
1263 static struct ftrace_hash *
1264 alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
1265 {
1266 	struct ftrace_func_entry *entry;
1267 	struct ftrace_hash *new_hash;
1268 	int size;
1269 	int ret;
1270 	int i;
1271 
1272 	new_hash = alloc_ftrace_hash(size_bits);
1273 	if (!new_hash)
1274 		return NULL;
1275 
1276 	/* Empty hash? */
1277 	if (ftrace_hash_empty(hash))
1278 		return new_hash;
1279 
1280 	size = 1 << hash->size_bits;
1281 	for (i = 0; i < size; i++) {
1282 		hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
1283 			ret = add_hash_entry(new_hash, entry->ip);
1284 			if (ret < 0)
1285 				goto free_hash;
1286 		}
1287 	}
1288 
1289 	FTRACE_WARN_ON(new_hash->count != hash->count);
1290 
1291 	return new_hash;
1292 
1293  free_hash:
1294 	free_ftrace_hash(new_hash);
1295 	return NULL;
1296 }
1297 
1298 static void
1299 ftrace_hash_rec_disable_modify(struct ftrace_ops *ops, int filter_hash);
1300 static void
1301 ftrace_hash_rec_enable_modify(struct ftrace_ops *ops, int filter_hash);
1302 
1303 static int
1304 ftrace_hash_move(struct ftrace_ops *ops, int enable,
1305 		 struct ftrace_hash **dst, struct ftrace_hash *src)
1306 {
1307 	struct ftrace_func_entry *entry;
1308 	struct hlist_node *tn;
1309 	struct hlist_head *hhd;
1310 	struct ftrace_hash *old_hash;
1311 	struct ftrace_hash *new_hash;
1312 	int size = src->count;
1313 	int bits = 0;
1314 	int i;
1315 
1316 	/*
1317 	 * If the new source is empty, just free dst and assign it
1318 	 * the empty_hash.
1319 	 */
1320 	if (!src->count) {
1321 		new_hash = EMPTY_HASH;
1322 		goto update;
1323 	}
1324 
1325 	/*
1326 	 * Make the hash size about 1/2 the # found
1327 	 */
1328 	for (size /= 2; size; size >>= 1)
1329 		bits++;
1330 
1331 	/* Don't allocate too much */
1332 	if (bits > FTRACE_HASH_MAX_BITS)
1333 		bits = FTRACE_HASH_MAX_BITS;
1334 
1335 	new_hash = alloc_ftrace_hash(bits);
1336 	if (!new_hash)
1337 		return -ENOMEM;
1338 
1339 	size = 1 << src->size_bits;
1340 	for (i = 0; i < size; i++) {
1341 		hhd = &src->buckets[i];
1342 		hlist_for_each_entry_safe(entry, tn, hhd, hlist) {
1343 			remove_hash_entry(src, entry);
1344 			__add_hash_entry(new_hash, entry);
1345 		}
1346 	}
1347 
1348 update:
1349 	/*
1350 	 * Remove the current set, update the hash and add
1351 	 * them back.
1352 	 */
1353 	ftrace_hash_rec_disable_modify(ops, enable);
1354 
1355 	old_hash = *dst;
1356 	rcu_assign_pointer(*dst, new_hash);
1357 	free_ftrace_hash_rcu(old_hash);
1358 
1359 	ftrace_hash_rec_enable_modify(ops, enable);
1360 
1361 	return 0;
1362 }
1363 
1364 /*
1365  * Test the hashes for this ops to see if we want to call
1366  * the ops->func or not.
1367  *
1368  * It's a match if the ip is in the ops->filter_hash or
1369  * the filter_hash does not exist or is empty,
1370  *  AND
1371  * the ip is not in the ops->notrace_hash.
1372  *
1373  * This needs to be called with preemption disabled as
1374  * the hashes are freed with call_rcu_sched().
1375  */
1376 static int
1377 ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
1378 {
1379 	struct ftrace_hash *filter_hash;
1380 	struct ftrace_hash *notrace_hash;
1381 	int ret;
1382 
1383 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
1384 	/*
1385 	 * There's a small race when adding ops that the ftrace handler
1386 	 * that wants regs, may be called without them. We can not
1387 	 * allow that handler to be called if regs is NULL.
1388 	 */
1389 	if (regs == NULL && (ops->flags & FTRACE_OPS_FL_SAVE_REGS))
1390 		return 0;
1391 #endif
1392 
1393 	filter_hash = rcu_dereference_raw_notrace(ops->func_hash->filter_hash);
1394 	notrace_hash = rcu_dereference_raw_notrace(ops->func_hash->notrace_hash);
1395 
1396 	if ((ftrace_hash_empty(filter_hash) ||
1397 	     ftrace_lookup_ip(filter_hash, ip)) &&
1398 	    (ftrace_hash_empty(notrace_hash) ||
1399 	     !ftrace_lookup_ip(notrace_hash, ip)))
1400 		ret = 1;
1401 	else
1402 		ret = 0;
1403 
1404 	return ret;
1405 }
1406 
1407 /*
1408  * This is a double for. Do not use 'break' to break out of the loop,
1409  * you must use a goto.
1410  */
1411 #define do_for_each_ftrace_rec(pg, rec)					\
1412 	for (pg = ftrace_pages_start; pg; pg = pg->next) {		\
1413 		int _____i;						\
1414 		for (_____i = 0; _____i < pg->index; _____i++) {	\
1415 			rec = &pg->records[_____i];
1416 
1417 #define while_for_each_ftrace_rec()		\
1418 		}				\
1419 	}
1420 
1421 
1422 static int ftrace_cmp_recs(const void *a, const void *b)
1423 {
1424 	const struct dyn_ftrace *key = a;
1425 	const struct dyn_ftrace *rec = b;
1426 
1427 	if (key->flags < rec->ip)
1428 		return -1;
1429 	if (key->ip >= rec->ip + MCOUNT_INSN_SIZE)
1430 		return 1;
1431 	return 0;
1432 }
1433 
1434 static unsigned long ftrace_location_range(unsigned long start, unsigned long end)
1435 {
1436 	struct ftrace_page *pg;
1437 	struct dyn_ftrace *rec;
1438 	struct dyn_ftrace key;
1439 
1440 	key.ip = start;
1441 	key.flags = end;	/* overload flags, as it is unsigned long */
1442 
1443 	for (pg = ftrace_pages_start; pg; pg = pg->next) {
1444 		if (end < pg->records[0].ip ||
1445 		    start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
1446 			continue;
1447 		rec = bsearch(&key, pg->records, pg->index,
1448 			      sizeof(struct dyn_ftrace),
1449 			      ftrace_cmp_recs);
1450 		if (rec)
1451 			return rec->ip;
1452 	}
1453 
1454 	return 0;
1455 }
1456 
1457 /**
1458  * ftrace_location - return true if the ip giving is a traced location
1459  * @ip: the instruction pointer to check
1460  *
1461  * Returns rec->ip if @ip given is a pointer to a ftrace location.
1462  * That is, the instruction that is either a NOP or call to
1463  * the function tracer. It checks the ftrace internal tables to
1464  * determine if the address belongs or not.
1465  */
1466 unsigned long ftrace_location(unsigned long ip)
1467 {
1468 	return ftrace_location_range(ip, ip);
1469 }
1470 
1471 /**
1472  * ftrace_text_reserved - return true if range contains an ftrace location
1473  * @start: start of range to search
1474  * @end: end of range to search (inclusive). @end points to the last byte to check.
1475  *
1476  * Returns 1 if @start and @end contains a ftrace location.
1477  * That is, the instruction that is either a NOP or call to
1478  * the function tracer. It checks the ftrace internal tables to
1479  * determine if the address belongs or not.
1480  */
1481 int ftrace_text_reserved(const void *start, const void *end)
1482 {
1483 	unsigned long ret;
1484 
1485 	ret = ftrace_location_range((unsigned long)start,
1486 				    (unsigned long)end);
1487 
1488 	return (int)!!ret;
1489 }
1490 
1491 /* Test if ops registered to this rec needs regs */
1492 static bool test_rec_ops_needs_regs(struct dyn_ftrace *rec)
1493 {
1494 	struct ftrace_ops *ops;
1495 	bool keep_regs = false;
1496 
1497 	for (ops = ftrace_ops_list;
1498 	     ops != &ftrace_list_end; ops = ops->next) {
1499 		/* pass rec in as regs to have non-NULL val */
1500 		if (ftrace_ops_test(ops, rec->ip, rec)) {
1501 			if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
1502 				keep_regs = true;
1503 				break;
1504 			}
1505 		}
1506 	}
1507 
1508 	return  keep_regs;
1509 }
1510 
1511 static void ftrace_remove_tramp(struct ftrace_ops *ops,
1512 				struct dyn_ftrace *rec)
1513 {
1514 	/* If TRAMP is not set, no ops should have a trampoline for this */
1515 	if (!(rec->flags & FTRACE_FL_TRAMP))
1516 		return;
1517 
1518 	rec->flags &= ~FTRACE_FL_TRAMP;
1519 
1520 	if ((!ftrace_hash_empty(ops->func_hash->filter_hash) &&
1521 	     !ftrace_lookup_ip(ops->func_hash->filter_hash, rec->ip)) ||
1522 	    ftrace_lookup_ip(ops->func_hash->notrace_hash, rec->ip))
1523 		return;
1524 	/*
1525 	 * The tramp_hash entry will be removed at time
1526 	 * of update.
1527 	 */
1528 	ops->nr_trampolines--;
1529 }
1530 
1531 static void ftrace_clear_tramps(struct dyn_ftrace *rec, struct ftrace_ops *ops)
1532 {
1533 	struct ftrace_ops *op;
1534 
1535 	/* If TRAMP is not set, no ops should have a trampoline for this */
1536 	if (!(rec->flags & FTRACE_FL_TRAMP))
1537 		return;
1538 
1539 	do_for_each_ftrace_op(op, ftrace_ops_list) {
1540 		/*
1541 		 * This function is called to clear other tramps
1542 		 * not the one that is being updated.
1543 		 */
1544 		if (op == ops)
1545 			continue;
1546 		if (op->nr_trampolines)
1547 			ftrace_remove_tramp(op, rec);
1548 	} while_for_each_ftrace_op(op);
1549 }
1550 
1551 static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
1552 				     int filter_hash,
1553 				     bool inc)
1554 {
1555 	struct ftrace_hash *hash;
1556 	struct ftrace_hash *other_hash;
1557 	struct ftrace_page *pg;
1558 	struct dyn_ftrace *rec;
1559 	int count = 0;
1560 	int all = 0;
1561 
1562 	/* Only update if the ops has been registered */
1563 	if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
1564 		return;
1565 
1566 	/*
1567 	 * In the filter_hash case:
1568 	 *   If the count is zero, we update all records.
1569 	 *   Otherwise we just update the items in the hash.
1570 	 *
1571 	 * In the notrace_hash case:
1572 	 *   We enable the update in the hash.
1573 	 *   As disabling notrace means enabling the tracing,
1574 	 *   and enabling notrace means disabling, the inc variable
1575 	 *   gets inversed.
1576 	 */
1577 	if (filter_hash) {
1578 		hash = ops->func_hash->filter_hash;
1579 		other_hash = ops->func_hash->notrace_hash;
1580 		if (ftrace_hash_empty(hash))
1581 			all = 1;
1582 	} else {
1583 		inc = !inc;
1584 		hash = ops->func_hash->notrace_hash;
1585 		other_hash = ops->func_hash->filter_hash;
1586 		/*
1587 		 * If the notrace hash has no items,
1588 		 * then there's nothing to do.
1589 		 */
1590 		if (ftrace_hash_empty(hash))
1591 			return;
1592 	}
1593 
1594 	do_for_each_ftrace_rec(pg, rec) {
1595 		int in_other_hash = 0;
1596 		int in_hash = 0;
1597 		int match = 0;
1598 
1599 		if (all) {
1600 			/*
1601 			 * Only the filter_hash affects all records.
1602 			 * Update if the record is not in the notrace hash.
1603 			 */
1604 			if (!other_hash || !ftrace_lookup_ip(other_hash, rec->ip))
1605 				match = 1;
1606 		} else {
1607 			in_hash = !!ftrace_lookup_ip(hash, rec->ip);
1608 			in_other_hash = !!ftrace_lookup_ip(other_hash, rec->ip);
1609 
1610 			/*
1611 			 * If filter_hash is set, we want to match all functions
1612 			 * that are in the hash but not in the other hash.
1613 			 *
1614 			 * If filter_hash is not set, then we are decrementing.
1615 			 * That means we match anything that is in the hash
1616 			 * and also in the other_hash. That is, we need to turn
1617 			 * off functions in the other hash because they are disabled
1618 			 * by this hash.
1619 			 */
1620 			if (filter_hash && in_hash && !in_other_hash)
1621 				match = 1;
1622 			else if (!filter_hash && in_hash &&
1623 				 (in_other_hash || ftrace_hash_empty(other_hash)))
1624 				match = 1;
1625 		}
1626 		if (!match)
1627 			continue;
1628 
1629 		if (inc) {
1630 			rec->flags++;
1631 			if (FTRACE_WARN_ON(ftrace_rec_count(rec) == FTRACE_REF_MAX))
1632 				return;
1633 
1634 			/*
1635 			 * If there's only a single callback registered to a
1636 			 * function, and the ops has a trampoline registered
1637 			 * for it, then we can call it directly.
1638 			 */
1639 			if (ftrace_rec_count(rec) == 1 && ops->trampoline) {
1640 				rec->flags |= FTRACE_FL_TRAMP;
1641 				ops->nr_trampolines++;
1642 			} else {
1643 				/*
1644 				 * If we are adding another function callback
1645 				 * to this function, and the previous had a
1646 				 * custom trampoline in use, then we need to go
1647 				 * back to the default trampoline.
1648 				 */
1649 				ftrace_clear_tramps(rec, ops);
1650 			}
1651 
1652 			/*
1653 			 * If any ops wants regs saved for this function
1654 			 * then all ops will get saved regs.
1655 			 */
1656 			if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
1657 				rec->flags |= FTRACE_FL_REGS;
1658 		} else {
1659 			if (FTRACE_WARN_ON(ftrace_rec_count(rec) == 0))
1660 				return;
1661 			rec->flags--;
1662 
1663 			if (ops->trampoline && !ftrace_rec_count(rec))
1664 				ftrace_remove_tramp(ops, rec);
1665 
1666 			/*
1667 			 * If the rec had REGS enabled and the ops that is
1668 			 * being removed had REGS set, then see if there is
1669 			 * still any ops for this record that wants regs.
1670 			 * If not, we can stop recording them.
1671 			 */
1672 			if (ftrace_rec_count(rec) > 0 &&
1673 			    rec->flags & FTRACE_FL_REGS &&
1674 			    ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
1675 				if (!test_rec_ops_needs_regs(rec))
1676 					rec->flags &= ~FTRACE_FL_REGS;
1677 			}
1678 
1679 			/*
1680 			 * flags will be cleared in ftrace_check_record()
1681 			 * if rec count is zero.
1682 			 */
1683 		}
1684 		count++;
1685 		/* Shortcut, if we handled all records, we are done. */
1686 		if (!all && count == hash->count)
1687 			return;
1688 	} while_for_each_ftrace_rec();
1689 }
1690 
1691 static void ftrace_hash_rec_disable(struct ftrace_ops *ops,
1692 				    int filter_hash)
1693 {
1694 	__ftrace_hash_rec_update(ops, filter_hash, 0);
1695 }
1696 
1697 static void ftrace_hash_rec_enable(struct ftrace_ops *ops,
1698 				   int filter_hash)
1699 {
1700 	__ftrace_hash_rec_update(ops, filter_hash, 1);
1701 }
1702 
1703 static void ftrace_hash_rec_update_modify(struct ftrace_ops *ops,
1704 					  int filter_hash, int inc)
1705 {
1706 	struct ftrace_ops *op;
1707 
1708 	__ftrace_hash_rec_update(ops, filter_hash, inc);
1709 
1710 	if (ops->func_hash != &global_ops.local_hash)
1711 		return;
1712 
1713 	/*
1714 	 * If the ops shares the global_ops hash, then we need to update
1715 	 * all ops that are enabled and use this hash.
1716 	 */
1717 	do_for_each_ftrace_op(op, ftrace_ops_list) {
1718 		/* Already done */
1719 		if (op == ops)
1720 			continue;
1721 		if (op->func_hash == &global_ops.local_hash)
1722 			__ftrace_hash_rec_update(op, filter_hash, inc);
1723 	} while_for_each_ftrace_op(op);
1724 }
1725 
1726 static void ftrace_hash_rec_disable_modify(struct ftrace_ops *ops,
1727 					   int filter_hash)
1728 {
1729 	ftrace_hash_rec_update_modify(ops, filter_hash, 0);
1730 }
1731 
1732 static void ftrace_hash_rec_enable_modify(struct ftrace_ops *ops,
1733 					  int filter_hash)
1734 {
1735 	ftrace_hash_rec_update_modify(ops, filter_hash, 1);
1736 }
1737 
1738 static void print_ip_ins(const char *fmt, unsigned char *p)
1739 {
1740 	int i;
1741 
1742 	printk(KERN_CONT "%s", fmt);
1743 
1744 	for (i = 0; i < MCOUNT_INSN_SIZE; i++)
1745 		printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
1746 }
1747 
1748 /**
1749  * ftrace_bug - report and shutdown function tracer
1750  * @failed: The failed type (EFAULT, EINVAL, EPERM)
1751  * @ip: The address that failed
1752  *
1753  * The arch code that enables or disables the function tracing
1754  * can call ftrace_bug() when it has detected a problem in
1755  * modifying the code. @failed should be one of either:
1756  * EFAULT - if the problem happens on reading the @ip address
1757  * EINVAL - if what is read at @ip is not what was expected
1758  * EPERM - if the problem happens on writting to the @ip address
1759  */
1760 void ftrace_bug(int failed, unsigned long ip)
1761 {
1762 	switch (failed) {
1763 	case -EFAULT:
1764 		FTRACE_WARN_ON_ONCE(1);
1765 		pr_info("ftrace faulted on modifying ");
1766 		print_ip_sym(ip);
1767 		break;
1768 	case -EINVAL:
1769 		FTRACE_WARN_ON_ONCE(1);
1770 		pr_info("ftrace failed to modify ");
1771 		print_ip_sym(ip);
1772 		print_ip_ins(" actual: ", (unsigned char *)ip);
1773 		printk(KERN_CONT "\n");
1774 		break;
1775 	case -EPERM:
1776 		FTRACE_WARN_ON_ONCE(1);
1777 		pr_info("ftrace faulted on writing ");
1778 		print_ip_sym(ip);
1779 		break;
1780 	default:
1781 		FTRACE_WARN_ON_ONCE(1);
1782 		pr_info("ftrace faulted on unknown error ");
1783 		print_ip_sym(ip);
1784 	}
1785 }
1786 
1787 static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update)
1788 {
1789 	unsigned long flag = 0UL;
1790 
1791 	/*
1792 	 * If we are updating calls:
1793 	 *
1794 	 *   If the record has a ref count, then we need to enable it
1795 	 *   because someone is using it.
1796 	 *
1797 	 *   Otherwise we make sure its disabled.
1798 	 *
1799 	 * If we are disabling calls, then disable all records that
1800 	 * are enabled.
1801 	 */
1802 	if (enable && ftrace_rec_count(rec))
1803 		flag = FTRACE_FL_ENABLED;
1804 
1805 	/*
1806 	 * If enabling and the REGS flag does not match the REGS_EN, or
1807 	 * the TRAMP flag doesn't match the TRAMP_EN, then do not ignore
1808 	 * this record. Set flags to fail the compare against ENABLED.
1809 	 */
1810 	if (flag) {
1811 		if (!(rec->flags & FTRACE_FL_REGS) !=
1812 		    !(rec->flags & FTRACE_FL_REGS_EN))
1813 			flag |= FTRACE_FL_REGS;
1814 
1815 		if (!(rec->flags & FTRACE_FL_TRAMP) !=
1816 		    !(rec->flags & FTRACE_FL_TRAMP_EN))
1817 			flag |= FTRACE_FL_TRAMP;
1818 	}
1819 
1820 	/* If the state of this record hasn't changed, then do nothing */
1821 	if ((rec->flags & FTRACE_FL_ENABLED) == flag)
1822 		return FTRACE_UPDATE_IGNORE;
1823 
1824 	if (flag) {
1825 		/* Save off if rec is being enabled (for return value) */
1826 		flag ^= rec->flags & FTRACE_FL_ENABLED;
1827 
1828 		if (update) {
1829 			rec->flags |= FTRACE_FL_ENABLED;
1830 			if (flag & FTRACE_FL_REGS) {
1831 				if (rec->flags & FTRACE_FL_REGS)
1832 					rec->flags |= FTRACE_FL_REGS_EN;
1833 				else
1834 					rec->flags &= ~FTRACE_FL_REGS_EN;
1835 			}
1836 			if (flag & FTRACE_FL_TRAMP) {
1837 				if (rec->flags & FTRACE_FL_TRAMP)
1838 					rec->flags |= FTRACE_FL_TRAMP_EN;
1839 				else
1840 					rec->flags &= ~FTRACE_FL_TRAMP_EN;
1841 			}
1842 		}
1843 
1844 		/*
1845 		 * If this record is being updated from a nop, then
1846 		 *   return UPDATE_MAKE_CALL.
1847 		 * Otherwise,
1848 		 *   return UPDATE_MODIFY_CALL to tell the caller to convert
1849 		 *   from the save regs, to a non-save regs function or
1850 		 *   vice versa, or from a trampoline call.
1851 		 */
1852 		if (flag & FTRACE_FL_ENABLED)
1853 			return FTRACE_UPDATE_MAKE_CALL;
1854 
1855 		return FTRACE_UPDATE_MODIFY_CALL;
1856 	}
1857 
1858 	if (update) {
1859 		/* If there's no more users, clear all flags */
1860 		if (!ftrace_rec_count(rec))
1861 			rec->flags = 0;
1862 		else
1863 			/* Just disable the record (keep REGS state) */
1864 			rec->flags &= ~FTRACE_FL_ENABLED;
1865 	}
1866 
1867 	return FTRACE_UPDATE_MAKE_NOP;
1868 }
1869 
1870 /**
1871  * ftrace_update_record, set a record that now is tracing or not
1872  * @rec: the record to update
1873  * @enable: set to 1 if the record is tracing, zero to force disable
1874  *
1875  * The records that represent all functions that can be traced need
1876  * to be updated when tracing has been enabled.
1877  */
1878 int ftrace_update_record(struct dyn_ftrace *rec, int enable)
1879 {
1880 	return ftrace_check_record(rec, enable, 1);
1881 }
1882 
1883 /**
1884  * ftrace_test_record, check if the record has been enabled or not
1885  * @rec: the record to test
1886  * @enable: set to 1 to check if enabled, 0 if it is disabled
1887  *
1888  * The arch code may need to test if a record is already set to
1889  * tracing to determine how to modify the function code that it
1890  * represents.
1891  */
1892 int ftrace_test_record(struct dyn_ftrace *rec, int enable)
1893 {
1894 	return ftrace_check_record(rec, enable, 0);
1895 }
1896 
1897 static struct ftrace_ops *
1898 ftrace_find_tramp_ops_curr(struct dyn_ftrace *rec)
1899 {
1900 	struct ftrace_ops *op;
1901 
1902 	/* Removed ops need to be tested first */
1903 	if (removed_ops && removed_ops->tramp_hash) {
1904 		if (ftrace_lookup_ip(removed_ops->tramp_hash, rec->ip))
1905 			return removed_ops;
1906 	}
1907 
1908 	do_for_each_ftrace_op(op, ftrace_ops_list) {
1909 		if (!op->tramp_hash)
1910 			continue;
1911 
1912 		if (ftrace_lookup_ip(op->tramp_hash, rec->ip))
1913 			return op;
1914 
1915 	} while_for_each_ftrace_op(op);
1916 
1917 	return NULL;
1918 }
1919 
1920 static struct ftrace_ops *
1921 ftrace_find_tramp_ops_new(struct dyn_ftrace *rec)
1922 {
1923 	struct ftrace_ops *op;
1924 
1925 	do_for_each_ftrace_op(op, ftrace_ops_list) {
1926 		/* pass rec in as regs to have non-NULL val */
1927 		if (ftrace_ops_test(op, rec->ip, rec))
1928 			return op;
1929 	} while_for_each_ftrace_op(op);
1930 
1931 	return NULL;
1932 }
1933 
1934 /**
1935  * ftrace_get_addr_new - Get the call address to set to
1936  * @rec:  The ftrace record descriptor
1937  *
1938  * If the record has the FTRACE_FL_REGS set, that means that it
1939  * wants to convert to a callback that saves all regs. If FTRACE_FL_REGS
1940  * is not not set, then it wants to convert to the normal callback.
1941  *
1942  * Returns the address of the trampoline to set to
1943  */
1944 unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec)
1945 {
1946 	struct ftrace_ops *ops;
1947 
1948 	/* Trampolines take precedence over regs */
1949 	if (rec->flags & FTRACE_FL_TRAMP) {
1950 		ops = ftrace_find_tramp_ops_new(rec);
1951 		if (FTRACE_WARN_ON(!ops || !ops->trampoline)) {
1952 			pr_warn("Bad trampoline accounting at: %p (%pS) (%lx)\n",
1953 				(void *)rec->ip, (void *)rec->ip, rec->flags);
1954 			/* Ftrace is shutting down, return anything */
1955 			return (unsigned long)FTRACE_ADDR;
1956 		}
1957 		return ops->trampoline;
1958 	}
1959 
1960 	if (rec->flags & FTRACE_FL_REGS)
1961 		return (unsigned long)FTRACE_REGS_ADDR;
1962 	else
1963 		return (unsigned long)FTRACE_ADDR;
1964 }
1965 
1966 /**
1967  * ftrace_get_addr_curr - Get the call address that is already there
1968  * @rec:  The ftrace record descriptor
1969  *
1970  * The FTRACE_FL_REGS_EN is set when the record already points to
1971  * a function that saves all the regs. Basically the '_EN' version
1972  * represents the current state of the function.
1973  *
1974  * Returns the address of the trampoline that is currently being called
1975  */
1976 unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec)
1977 {
1978 	struct ftrace_ops *ops;
1979 
1980 	/* Trampolines take precedence over regs */
1981 	if (rec->flags & FTRACE_FL_TRAMP_EN) {
1982 		ops = ftrace_find_tramp_ops_curr(rec);
1983 		if (FTRACE_WARN_ON(!ops)) {
1984 			pr_warning("Bad trampoline accounting at: %p (%pS)\n",
1985 				    (void *)rec->ip, (void *)rec->ip);
1986 			/* Ftrace is shutting down, return anything */
1987 			return (unsigned long)FTRACE_ADDR;
1988 		}
1989 		return ops->trampoline;
1990 	}
1991 
1992 	if (rec->flags & FTRACE_FL_REGS_EN)
1993 		return (unsigned long)FTRACE_REGS_ADDR;
1994 	else
1995 		return (unsigned long)FTRACE_ADDR;
1996 }
1997 
1998 static int
1999 __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
2000 {
2001 	unsigned long ftrace_old_addr;
2002 	unsigned long ftrace_addr;
2003 	int ret;
2004 
2005 	ftrace_addr = ftrace_get_addr_new(rec);
2006 
2007 	/* This needs to be done before we call ftrace_update_record */
2008 	ftrace_old_addr = ftrace_get_addr_curr(rec);
2009 
2010 	ret = ftrace_update_record(rec, enable);
2011 
2012 	switch (ret) {
2013 	case FTRACE_UPDATE_IGNORE:
2014 		return 0;
2015 
2016 	case FTRACE_UPDATE_MAKE_CALL:
2017 		return ftrace_make_call(rec, ftrace_addr);
2018 
2019 	case FTRACE_UPDATE_MAKE_NOP:
2020 		return ftrace_make_nop(NULL, rec, ftrace_old_addr);
2021 
2022 	case FTRACE_UPDATE_MODIFY_CALL:
2023 		return ftrace_modify_call(rec, ftrace_old_addr, ftrace_addr);
2024 	}
2025 
2026 	return -1; /* unknow ftrace bug */
2027 }
2028 
2029 void __weak ftrace_replace_code(int enable)
2030 {
2031 	struct dyn_ftrace *rec;
2032 	struct ftrace_page *pg;
2033 	int failed;
2034 
2035 	if (unlikely(ftrace_disabled))
2036 		return;
2037 
2038 	do_for_each_ftrace_rec(pg, rec) {
2039 		failed = __ftrace_replace_code(rec, enable);
2040 		if (failed) {
2041 			ftrace_bug(failed, rec->ip);
2042 			/* Stop processing */
2043 			return;
2044 		}
2045 	} while_for_each_ftrace_rec();
2046 }
2047 
2048 struct ftrace_rec_iter {
2049 	struct ftrace_page	*pg;
2050 	int			index;
2051 };
2052 
2053 /**
2054  * ftrace_rec_iter_start, start up iterating over traced functions
2055  *
2056  * Returns an iterator handle that is used to iterate over all
2057  * the records that represent address locations where functions
2058  * are traced.
2059  *
2060  * May return NULL if no records are available.
2061  */
2062 struct ftrace_rec_iter *ftrace_rec_iter_start(void)
2063 {
2064 	/*
2065 	 * We only use a single iterator.
2066 	 * Protected by the ftrace_lock mutex.
2067 	 */
2068 	static struct ftrace_rec_iter ftrace_rec_iter;
2069 	struct ftrace_rec_iter *iter = &ftrace_rec_iter;
2070 
2071 	iter->pg = ftrace_pages_start;
2072 	iter->index = 0;
2073 
2074 	/* Could have empty pages */
2075 	while (iter->pg && !iter->pg->index)
2076 		iter->pg = iter->pg->next;
2077 
2078 	if (!iter->pg)
2079 		return NULL;
2080 
2081 	return iter;
2082 }
2083 
2084 /**
2085  * ftrace_rec_iter_next, get the next record to process.
2086  * @iter: The handle to the iterator.
2087  *
2088  * Returns the next iterator after the given iterator @iter.
2089  */
2090 struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter)
2091 {
2092 	iter->index++;
2093 
2094 	if (iter->index >= iter->pg->index) {
2095 		iter->pg = iter->pg->next;
2096 		iter->index = 0;
2097 
2098 		/* Could have empty pages */
2099 		while (iter->pg && !iter->pg->index)
2100 			iter->pg = iter->pg->next;
2101 	}
2102 
2103 	if (!iter->pg)
2104 		return NULL;
2105 
2106 	return iter;
2107 }
2108 
2109 /**
2110  * ftrace_rec_iter_record, get the record at the iterator location
2111  * @iter: The current iterator location
2112  *
2113  * Returns the record that the current @iter is at.
2114  */
2115 struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter)
2116 {
2117 	return &iter->pg->records[iter->index];
2118 }
2119 
2120 static int
2121 ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
2122 {
2123 	unsigned long ip;
2124 	int ret;
2125 
2126 	ip = rec->ip;
2127 
2128 	if (unlikely(ftrace_disabled))
2129 		return 0;
2130 
2131 	ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
2132 	if (ret) {
2133 		ftrace_bug(ret, ip);
2134 		return 0;
2135 	}
2136 	return 1;
2137 }
2138 
2139 /*
2140  * archs can override this function if they must do something
2141  * before the modifying code is performed.
2142  */
2143 int __weak ftrace_arch_code_modify_prepare(void)
2144 {
2145 	return 0;
2146 }
2147 
2148 /*
2149  * archs can override this function if they must do something
2150  * after the modifying code is performed.
2151  */
2152 int __weak ftrace_arch_code_modify_post_process(void)
2153 {
2154 	return 0;
2155 }
2156 
2157 void ftrace_modify_all_code(int command)
2158 {
2159 	int update = command & FTRACE_UPDATE_TRACE_FUNC;
2160 	int err = 0;
2161 
2162 	/*
2163 	 * If the ftrace_caller calls a ftrace_ops func directly,
2164 	 * we need to make sure that it only traces functions it
2165 	 * expects to trace. When doing the switch of functions,
2166 	 * we need to update to the ftrace_ops_list_func first
2167 	 * before the transition between old and new calls are set,
2168 	 * as the ftrace_ops_list_func will check the ops hashes
2169 	 * to make sure the ops are having the right functions
2170 	 * traced.
2171 	 */
2172 	if (update) {
2173 		err = ftrace_update_ftrace_func(ftrace_ops_list_func);
2174 		if (FTRACE_WARN_ON(err))
2175 			return;
2176 	}
2177 
2178 	if (command & FTRACE_UPDATE_CALLS)
2179 		ftrace_replace_code(1);
2180 	else if (command & FTRACE_DISABLE_CALLS)
2181 		ftrace_replace_code(0);
2182 
2183 	if (update && ftrace_trace_function != ftrace_ops_list_func) {
2184 		function_trace_op = set_function_trace_op;
2185 		smp_wmb();
2186 		/* If irqs are disabled, we are in stop machine */
2187 		if (!irqs_disabled())
2188 			smp_call_function(ftrace_sync_ipi, NULL, 1);
2189 		err = ftrace_update_ftrace_func(ftrace_trace_function);
2190 		if (FTRACE_WARN_ON(err))
2191 			return;
2192 	}
2193 
2194 	if (command & FTRACE_START_FUNC_RET)
2195 		err = ftrace_enable_ftrace_graph_caller();
2196 	else if (command & FTRACE_STOP_FUNC_RET)
2197 		err = ftrace_disable_ftrace_graph_caller();
2198 	FTRACE_WARN_ON(err);
2199 }
2200 
2201 static int __ftrace_modify_code(void *data)
2202 {
2203 	int *command = data;
2204 
2205 	ftrace_modify_all_code(*command);
2206 
2207 	return 0;
2208 }
2209 
2210 /**
2211  * ftrace_run_stop_machine, go back to the stop machine method
2212  * @command: The command to tell ftrace what to do
2213  *
2214  * If an arch needs to fall back to the stop machine method, the
2215  * it can call this function.
2216  */
2217 void ftrace_run_stop_machine(int command)
2218 {
2219 	stop_machine(__ftrace_modify_code, &command, NULL);
2220 }
2221 
2222 /**
2223  * arch_ftrace_update_code, modify the code to trace or not trace
2224  * @command: The command that needs to be done
2225  *
2226  * Archs can override this function if it does not need to
2227  * run stop_machine() to modify code.
2228  */
2229 void __weak arch_ftrace_update_code(int command)
2230 {
2231 	ftrace_run_stop_machine(command);
2232 }
2233 
2234 static int ftrace_save_ops_tramp_hash(struct ftrace_ops *ops)
2235 {
2236 	struct ftrace_page *pg;
2237 	struct dyn_ftrace *rec;
2238 	int size, bits;
2239 	int ret;
2240 
2241 	size = ops->nr_trampolines;
2242 	bits = 0;
2243 	/*
2244 	 * Make the hash size about 1/2 the # found
2245 	 */
2246 	for (size /= 2; size; size >>= 1)
2247 		bits++;
2248 
2249 	ops->tramp_hash = alloc_ftrace_hash(bits);
2250 	/*
2251 	 * TODO: a failed allocation is going to screw up
2252 	 * the accounting of what needs to be modified
2253 	 * and not. For now, we kill ftrace if we fail
2254 	 * to allocate here. But there are ways around this,
2255 	 * but that will take a little more work.
2256 	 */
2257 	if (!ops->tramp_hash)
2258 		return -ENOMEM;
2259 
2260 	do_for_each_ftrace_rec(pg, rec) {
2261 		if (ftrace_rec_count(rec) == 1 &&
2262 		    ftrace_ops_test(ops, rec->ip, rec)) {
2263 
2264 			/*
2265 			 * If another ops adds to a rec, the rec will
2266 			 * lose its trampoline and never get it back
2267 			 * until all ops are off of it.
2268 			 */
2269 			if (!(rec->flags & FTRACE_FL_TRAMP))
2270 				continue;
2271 
2272 			/* This record had better have a trampoline */
2273 			if (FTRACE_WARN_ON(!(rec->flags & FTRACE_FL_TRAMP_EN)))
2274 				return -1;
2275 
2276 			ret = add_hash_entry(ops->tramp_hash, rec->ip);
2277 			if (ret < 0)
2278 				return ret;
2279 		}
2280 	} while_for_each_ftrace_rec();
2281 
2282 	/* The number of recs in the hash must match nr_trampolines */
2283 	if (FTRACE_WARN_ON(ops->tramp_hash->count != ops->nr_trampolines))
2284 		pr_warn("count=%ld trampolines=%d\n",
2285 			ops->tramp_hash->count,
2286 			ops->nr_trampolines);
2287 
2288 	return 0;
2289 }
2290 
2291 static int ftrace_save_tramp_hashes(void)
2292 {
2293 	struct ftrace_ops *op;
2294 	int ret;
2295 
2296 	/*
2297 	 * Now that any trampoline is being used, we need to save the
2298 	 * hashes for the ops that have them. This allows the mapping
2299 	 * back from the record to the ops that has the trampoline to
2300 	 * know what code is being replaced. Modifying code must always
2301 	 * verify what it is changing.
2302 	 */
2303 	do_for_each_ftrace_op(op, ftrace_ops_list) {
2304 
2305 		/* The tramp_hash is recreated each time. */
2306 		free_ftrace_hash(op->tramp_hash);
2307 		op->tramp_hash = NULL;
2308 
2309 		if (op->nr_trampolines) {
2310 			ret = ftrace_save_ops_tramp_hash(op);
2311 			if (ret)
2312 				return ret;
2313 		}
2314 
2315 	} while_for_each_ftrace_op(op);
2316 
2317 	return 0;
2318 }
2319 
2320 static void ftrace_run_update_code(int command)
2321 {
2322 	int ret;
2323 
2324 	ret = ftrace_arch_code_modify_prepare();
2325 	FTRACE_WARN_ON(ret);
2326 	if (ret)
2327 		return;
2328 
2329 	/*
2330 	 * By default we use stop_machine() to modify the code.
2331 	 * But archs can do what ever they want as long as it
2332 	 * is safe. The stop_machine() is the safest, but also
2333 	 * produces the most overhead.
2334 	 */
2335 	arch_ftrace_update_code(command);
2336 
2337 	ret = ftrace_arch_code_modify_post_process();
2338 	FTRACE_WARN_ON(ret);
2339 
2340 	ret = ftrace_save_tramp_hashes();
2341 	FTRACE_WARN_ON(ret);
2342 }
2343 
2344 static ftrace_func_t saved_ftrace_func;
2345 static int ftrace_start_up;
2346 
2347 static void control_ops_free(struct ftrace_ops *ops)
2348 {
2349 	free_percpu(ops->disabled);
2350 }
2351 
2352 static void ftrace_startup_enable(int command)
2353 {
2354 	if (saved_ftrace_func != ftrace_trace_function) {
2355 		saved_ftrace_func = ftrace_trace_function;
2356 		command |= FTRACE_UPDATE_TRACE_FUNC;
2357 	}
2358 
2359 	if (!command || !ftrace_enabled)
2360 		return;
2361 
2362 	ftrace_run_update_code(command);
2363 }
2364 
2365 static int ftrace_startup(struct ftrace_ops *ops, int command)
2366 {
2367 	int ret;
2368 
2369 	if (unlikely(ftrace_disabled))
2370 		return -ENODEV;
2371 
2372 	ret = __register_ftrace_function(ops);
2373 	if (ret)
2374 		return ret;
2375 
2376 	ftrace_start_up++;
2377 	command |= FTRACE_UPDATE_CALLS;
2378 
2379 	ops->flags |= FTRACE_OPS_FL_ENABLED;
2380 
2381 	ftrace_hash_rec_enable(ops, 1);
2382 
2383 	ftrace_startup_enable(command);
2384 
2385 	return 0;
2386 }
2387 
2388 static int ftrace_shutdown(struct ftrace_ops *ops, int command)
2389 {
2390 	int ret;
2391 
2392 	if (unlikely(ftrace_disabled))
2393 		return -ENODEV;
2394 
2395 	ret = __unregister_ftrace_function(ops);
2396 	if (ret)
2397 		return ret;
2398 
2399 	ftrace_start_up--;
2400 	/*
2401 	 * Just warn in case of unbalance, no need to kill ftrace, it's not
2402 	 * critical but the ftrace_call callers may be never nopped again after
2403 	 * further ftrace uses.
2404 	 */
2405 	WARN_ON_ONCE(ftrace_start_up < 0);
2406 
2407 	ftrace_hash_rec_disable(ops, 1);
2408 
2409 	ops->flags &= ~FTRACE_OPS_FL_ENABLED;
2410 
2411 	command |= FTRACE_UPDATE_CALLS;
2412 
2413 	if (saved_ftrace_func != ftrace_trace_function) {
2414 		saved_ftrace_func = ftrace_trace_function;
2415 		command |= FTRACE_UPDATE_TRACE_FUNC;
2416 	}
2417 
2418 	if (!command || !ftrace_enabled) {
2419 		/*
2420 		 * If these are control ops, they still need their
2421 		 * per_cpu field freed. Since, function tracing is
2422 		 * not currently active, we can just free them
2423 		 * without synchronizing all CPUs.
2424 		 */
2425 		if (ops->flags & FTRACE_OPS_FL_CONTROL)
2426 			control_ops_free(ops);
2427 		return 0;
2428 	}
2429 
2430 	/*
2431 	 * If the ops uses a trampoline, then it needs to be
2432 	 * tested first on update.
2433 	 */
2434 	removed_ops = ops;
2435 
2436 	ftrace_run_update_code(command);
2437 
2438 	removed_ops = NULL;
2439 
2440 	/*
2441 	 * Dynamic ops may be freed, we must make sure that all
2442 	 * callers are done before leaving this function.
2443 	 * The same goes for freeing the per_cpu data of the control
2444 	 * ops.
2445 	 *
2446 	 * Again, normal synchronize_sched() is not good enough.
2447 	 * We need to do a hard force of sched synchronization.
2448 	 * This is because we use preempt_disable() to do RCU, but
2449 	 * the function tracers can be called where RCU is not watching
2450 	 * (like before user_exit()). We can not rely on the RCU
2451 	 * infrastructure to do the synchronization, thus we must do it
2452 	 * ourselves.
2453 	 */
2454 	if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_CONTROL)) {
2455 		schedule_on_each_cpu(ftrace_sync);
2456 
2457 		if (ops->flags & FTRACE_OPS_FL_CONTROL)
2458 			control_ops_free(ops);
2459 	}
2460 
2461 	return 0;
2462 }
2463 
2464 static void ftrace_startup_sysctl(void)
2465 {
2466 	if (unlikely(ftrace_disabled))
2467 		return;
2468 
2469 	/* Force update next time */
2470 	saved_ftrace_func = NULL;
2471 	/* ftrace_start_up is true if we want ftrace running */
2472 	if (ftrace_start_up)
2473 		ftrace_run_update_code(FTRACE_UPDATE_CALLS);
2474 }
2475 
2476 static void ftrace_shutdown_sysctl(void)
2477 {
2478 	if (unlikely(ftrace_disabled))
2479 		return;
2480 
2481 	/* ftrace_start_up is true if ftrace is running */
2482 	if (ftrace_start_up)
2483 		ftrace_run_update_code(FTRACE_DISABLE_CALLS);
2484 }
2485 
2486 static cycle_t		ftrace_update_time;
2487 unsigned long		ftrace_update_tot_cnt;
2488 
2489 static inline int ops_traces_mod(struct ftrace_ops *ops)
2490 {
2491 	/*
2492 	 * Filter_hash being empty will default to trace module.
2493 	 * But notrace hash requires a test of individual module functions.
2494 	 */
2495 	return ftrace_hash_empty(ops->func_hash->filter_hash) &&
2496 		ftrace_hash_empty(ops->func_hash->notrace_hash);
2497 }
2498 
2499 /*
2500  * Check if the current ops references the record.
2501  *
2502  * If the ops traces all functions, then it was already accounted for.
2503  * If the ops does not trace the current record function, skip it.
2504  * If the ops ignores the function via notrace filter, skip it.
2505  */
2506 static inline bool
2507 ops_references_rec(struct ftrace_ops *ops, struct dyn_ftrace *rec)
2508 {
2509 	/* If ops isn't enabled, ignore it */
2510 	if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
2511 		return 0;
2512 
2513 	/* If ops traces all mods, we already accounted for it */
2514 	if (ops_traces_mod(ops))
2515 		return 0;
2516 
2517 	/* The function must be in the filter */
2518 	if (!ftrace_hash_empty(ops->func_hash->filter_hash) &&
2519 	    !ftrace_lookup_ip(ops->func_hash->filter_hash, rec->ip))
2520 		return 0;
2521 
2522 	/* If in notrace hash, we ignore it too */
2523 	if (ftrace_lookup_ip(ops->func_hash->notrace_hash, rec->ip))
2524 		return 0;
2525 
2526 	return 1;
2527 }
2528 
2529 static int referenced_filters(struct dyn_ftrace *rec)
2530 {
2531 	struct ftrace_ops *ops;
2532 	int cnt = 0;
2533 
2534 	for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) {
2535 		if (ops_references_rec(ops, rec))
2536 		    cnt++;
2537 	}
2538 
2539 	return cnt;
2540 }
2541 
2542 static int ftrace_update_code(struct module *mod, struct ftrace_page *new_pgs)
2543 {
2544 	struct ftrace_page *pg;
2545 	struct dyn_ftrace *p;
2546 	cycle_t start, stop;
2547 	unsigned long update_cnt = 0;
2548 	unsigned long ref = 0;
2549 	bool test = false;
2550 	int i;
2551 
2552 	/*
2553 	 * When adding a module, we need to check if tracers are
2554 	 * currently enabled and if they are set to trace all functions.
2555 	 * If they are, we need to enable the module functions as well
2556 	 * as update the reference counts for those function records.
2557 	 */
2558 	if (mod) {
2559 		struct ftrace_ops *ops;
2560 
2561 		for (ops = ftrace_ops_list;
2562 		     ops != &ftrace_list_end; ops = ops->next) {
2563 			if (ops->flags & FTRACE_OPS_FL_ENABLED) {
2564 				if (ops_traces_mod(ops))
2565 					ref++;
2566 				else
2567 					test = true;
2568 			}
2569 		}
2570 	}
2571 
2572 	start = ftrace_now(raw_smp_processor_id());
2573 
2574 	for (pg = new_pgs; pg; pg = pg->next) {
2575 
2576 		for (i = 0; i < pg->index; i++) {
2577 			int cnt = ref;
2578 
2579 			/* If something went wrong, bail without enabling anything */
2580 			if (unlikely(ftrace_disabled))
2581 				return -1;
2582 
2583 			p = &pg->records[i];
2584 			if (test)
2585 				cnt += referenced_filters(p);
2586 			p->flags = cnt;
2587 
2588 			/*
2589 			 * Do the initial record conversion from mcount jump
2590 			 * to the NOP instructions.
2591 			 */
2592 			if (!ftrace_code_disable(mod, p))
2593 				break;
2594 
2595 			update_cnt++;
2596 
2597 			/*
2598 			 * If the tracing is enabled, go ahead and enable the record.
2599 			 *
2600 			 * The reason not to enable the record immediatelly is the
2601 			 * inherent check of ftrace_make_nop/ftrace_make_call for
2602 			 * correct previous instructions.  Making first the NOP
2603 			 * conversion puts the module to the correct state, thus
2604 			 * passing the ftrace_make_call check.
2605 			 */
2606 			if (ftrace_start_up && cnt) {
2607 				int failed = __ftrace_replace_code(p, 1);
2608 				if (failed)
2609 					ftrace_bug(failed, p->ip);
2610 			}
2611 		}
2612 	}
2613 
2614 	stop = ftrace_now(raw_smp_processor_id());
2615 	ftrace_update_time = stop - start;
2616 	ftrace_update_tot_cnt += update_cnt;
2617 
2618 	return 0;
2619 }
2620 
2621 static int ftrace_allocate_records(struct ftrace_page *pg, int count)
2622 {
2623 	int order;
2624 	int cnt;
2625 
2626 	if (WARN_ON(!count))
2627 		return -EINVAL;
2628 
2629 	order = get_count_order(DIV_ROUND_UP(count, ENTRIES_PER_PAGE));
2630 
2631 	/*
2632 	 * We want to fill as much as possible. No more than a page
2633 	 * may be empty.
2634 	 */
2635 	while ((PAGE_SIZE << order) / ENTRY_SIZE >= count + ENTRIES_PER_PAGE)
2636 		order--;
2637 
2638  again:
2639 	pg->records = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
2640 
2641 	if (!pg->records) {
2642 		/* if we can't allocate this size, try something smaller */
2643 		if (!order)
2644 			return -ENOMEM;
2645 		order >>= 1;
2646 		goto again;
2647 	}
2648 
2649 	cnt = (PAGE_SIZE << order) / ENTRY_SIZE;
2650 	pg->size = cnt;
2651 
2652 	if (cnt > count)
2653 		cnt = count;
2654 
2655 	return cnt;
2656 }
2657 
2658 static struct ftrace_page *
2659 ftrace_allocate_pages(unsigned long num_to_init)
2660 {
2661 	struct ftrace_page *start_pg;
2662 	struct ftrace_page *pg;
2663 	int order;
2664 	int cnt;
2665 
2666 	if (!num_to_init)
2667 		return 0;
2668 
2669 	start_pg = pg = kzalloc(sizeof(*pg), GFP_KERNEL);
2670 	if (!pg)
2671 		return NULL;
2672 
2673 	/*
2674 	 * Try to allocate as much as possible in one continues
2675 	 * location that fills in all of the space. We want to
2676 	 * waste as little space as possible.
2677 	 */
2678 	for (;;) {
2679 		cnt = ftrace_allocate_records(pg, num_to_init);
2680 		if (cnt < 0)
2681 			goto free_pages;
2682 
2683 		num_to_init -= cnt;
2684 		if (!num_to_init)
2685 			break;
2686 
2687 		pg->next = kzalloc(sizeof(*pg), GFP_KERNEL);
2688 		if (!pg->next)
2689 			goto free_pages;
2690 
2691 		pg = pg->next;
2692 	}
2693 
2694 	return start_pg;
2695 
2696  free_pages:
2697 	pg = start_pg;
2698 	while (pg) {
2699 		order = get_count_order(pg->size / ENTRIES_PER_PAGE);
2700 		free_pages((unsigned long)pg->records, order);
2701 		start_pg = pg->next;
2702 		kfree(pg);
2703 		pg = start_pg;
2704 	}
2705 	pr_info("ftrace: FAILED to allocate memory for functions\n");
2706 	return NULL;
2707 }
2708 
2709 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
2710 
2711 struct ftrace_iterator {
2712 	loff_t				pos;
2713 	loff_t				func_pos;
2714 	struct ftrace_page		*pg;
2715 	struct dyn_ftrace		*func;
2716 	struct ftrace_func_probe	*probe;
2717 	struct trace_parser		parser;
2718 	struct ftrace_hash		*hash;
2719 	struct ftrace_ops		*ops;
2720 	int				hidx;
2721 	int				idx;
2722 	unsigned			flags;
2723 };
2724 
2725 static void *
2726 t_hash_next(struct seq_file *m, loff_t *pos)
2727 {
2728 	struct ftrace_iterator *iter = m->private;
2729 	struct hlist_node *hnd = NULL;
2730 	struct hlist_head *hhd;
2731 
2732 	(*pos)++;
2733 	iter->pos = *pos;
2734 
2735 	if (iter->probe)
2736 		hnd = &iter->probe->node;
2737  retry:
2738 	if (iter->hidx >= FTRACE_FUNC_HASHSIZE)
2739 		return NULL;
2740 
2741 	hhd = &ftrace_func_hash[iter->hidx];
2742 
2743 	if (hlist_empty(hhd)) {
2744 		iter->hidx++;
2745 		hnd = NULL;
2746 		goto retry;
2747 	}
2748 
2749 	if (!hnd)
2750 		hnd = hhd->first;
2751 	else {
2752 		hnd = hnd->next;
2753 		if (!hnd) {
2754 			iter->hidx++;
2755 			goto retry;
2756 		}
2757 	}
2758 
2759 	if (WARN_ON_ONCE(!hnd))
2760 		return NULL;
2761 
2762 	iter->probe = hlist_entry(hnd, struct ftrace_func_probe, node);
2763 
2764 	return iter;
2765 }
2766 
2767 static void *t_hash_start(struct seq_file *m, loff_t *pos)
2768 {
2769 	struct ftrace_iterator *iter = m->private;
2770 	void *p = NULL;
2771 	loff_t l;
2772 
2773 	if (!(iter->flags & FTRACE_ITER_DO_HASH))
2774 		return NULL;
2775 
2776 	if (iter->func_pos > *pos)
2777 		return NULL;
2778 
2779 	iter->hidx = 0;
2780 	for (l = 0; l <= (*pos - iter->func_pos); ) {
2781 		p = t_hash_next(m, &l);
2782 		if (!p)
2783 			break;
2784 	}
2785 	if (!p)
2786 		return NULL;
2787 
2788 	/* Only set this if we have an item */
2789 	iter->flags |= FTRACE_ITER_HASH;
2790 
2791 	return iter;
2792 }
2793 
2794 static int
2795 t_hash_show(struct seq_file *m, struct ftrace_iterator *iter)
2796 {
2797 	struct ftrace_func_probe *rec;
2798 
2799 	rec = iter->probe;
2800 	if (WARN_ON_ONCE(!rec))
2801 		return -EIO;
2802 
2803 	if (rec->ops->print)
2804 		return rec->ops->print(m, rec->ip, rec->ops, rec->data);
2805 
2806 	seq_printf(m, "%ps:%ps", (void *)rec->ip, (void *)rec->ops->func);
2807 
2808 	if (rec->data)
2809 		seq_printf(m, ":%p", rec->data);
2810 	seq_putc(m, '\n');
2811 
2812 	return 0;
2813 }
2814 
2815 static void *
2816 t_next(struct seq_file *m, void *v, loff_t *pos)
2817 {
2818 	struct ftrace_iterator *iter = m->private;
2819 	struct ftrace_ops *ops = iter->ops;
2820 	struct dyn_ftrace *rec = NULL;
2821 
2822 	if (unlikely(ftrace_disabled))
2823 		return NULL;
2824 
2825 	if (iter->flags & FTRACE_ITER_HASH)
2826 		return t_hash_next(m, pos);
2827 
2828 	(*pos)++;
2829 	iter->pos = iter->func_pos = *pos;
2830 
2831 	if (iter->flags & FTRACE_ITER_PRINTALL)
2832 		return t_hash_start(m, pos);
2833 
2834  retry:
2835 	if (iter->idx >= iter->pg->index) {
2836 		if (iter->pg->next) {
2837 			iter->pg = iter->pg->next;
2838 			iter->idx = 0;
2839 			goto retry;
2840 		}
2841 	} else {
2842 		rec = &iter->pg->records[iter->idx++];
2843 		if (((iter->flags & FTRACE_ITER_FILTER) &&
2844 		     !(ftrace_lookup_ip(ops->func_hash->filter_hash, rec->ip))) ||
2845 
2846 		    ((iter->flags & FTRACE_ITER_NOTRACE) &&
2847 		     !ftrace_lookup_ip(ops->func_hash->notrace_hash, rec->ip)) ||
2848 
2849 		    ((iter->flags & FTRACE_ITER_ENABLED) &&
2850 		     !(rec->flags & FTRACE_FL_ENABLED))) {
2851 
2852 			rec = NULL;
2853 			goto retry;
2854 		}
2855 	}
2856 
2857 	if (!rec)
2858 		return t_hash_start(m, pos);
2859 
2860 	iter->func = rec;
2861 
2862 	return iter;
2863 }
2864 
2865 static void reset_iter_read(struct ftrace_iterator *iter)
2866 {
2867 	iter->pos = 0;
2868 	iter->func_pos = 0;
2869 	iter->flags &= ~(FTRACE_ITER_PRINTALL | FTRACE_ITER_HASH);
2870 }
2871 
2872 static void *t_start(struct seq_file *m, loff_t *pos)
2873 {
2874 	struct ftrace_iterator *iter = m->private;
2875 	struct ftrace_ops *ops = iter->ops;
2876 	void *p = NULL;
2877 	loff_t l;
2878 
2879 	mutex_lock(&ftrace_lock);
2880 
2881 	if (unlikely(ftrace_disabled))
2882 		return NULL;
2883 
2884 	/*
2885 	 * If an lseek was done, then reset and start from beginning.
2886 	 */
2887 	if (*pos < iter->pos)
2888 		reset_iter_read(iter);
2889 
2890 	/*
2891 	 * For set_ftrace_filter reading, if we have the filter
2892 	 * off, we can short cut and just print out that all
2893 	 * functions are enabled.
2894 	 */
2895 	if ((iter->flags & FTRACE_ITER_FILTER &&
2896 	     ftrace_hash_empty(ops->func_hash->filter_hash)) ||
2897 	    (iter->flags & FTRACE_ITER_NOTRACE &&
2898 	     ftrace_hash_empty(ops->func_hash->notrace_hash))) {
2899 		if (*pos > 0)
2900 			return t_hash_start(m, pos);
2901 		iter->flags |= FTRACE_ITER_PRINTALL;
2902 		/* reset in case of seek/pread */
2903 		iter->flags &= ~FTRACE_ITER_HASH;
2904 		return iter;
2905 	}
2906 
2907 	if (iter->flags & FTRACE_ITER_HASH)
2908 		return t_hash_start(m, pos);
2909 
2910 	/*
2911 	 * Unfortunately, we need to restart at ftrace_pages_start
2912 	 * every time we let go of the ftrace_mutex. This is because
2913 	 * those pointers can change without the lock.
2914 	 */
2915 	iter->pg = ftrace_pages_start;
2916 	iter->idx = 0;
2917 	for (l = 0; l <= *pos; ) {
2918 		p = t_next(m, p, &l);
2919 		if (!p)
2920 			break;
2921 	}
2922 
2923 	if (!p)
2924 		return t_hash_start(m, pos);
2925 
2926 	return iter;
2927 }
2928 
2929 static void t_stop(struct seq_file *m, void *p)
2930 {
2931 	mutex_unlock(&ftrace_lock);
2932 }
2933 
2934 static int t_show(struct seq_file *m, void *v)
2935 {
2936 	struct ftrace_iterator *iter = m->private;
2937 	struct dyn_ftrace *rec;
2938 
2939 	if (iter->flags & FTRACE_ITER_HASH)
2940 		return t_hash_show(m, iter);
2941 
2942 	if (iter->flags & FTRACE_ITER_PRINTALL) {
2943 		if (iter->flags & FTRACE_ITER_NOTRACE)
2944 			seq_printf(m, "#### no functions disabled ####\n");
2945 		else
2946 			seq_printf(m, "#### all functions enabled ####\n");
2947 		return 0;
2948 	}
2949 
2950 	rec = iter->func;
2951 
2952 	if (!rec)
2953 		return 0;
2954 
2955 	seq_printf(m, "%ps", (void *)rec->ip);
2956 	if (iter->flags & FTRACE_ITER_ENABLED) {
2957 		seq_printf(m, " (%ld)%s",
2958 			   ftrace_rec_count(rec),
2959 			   rec->flags & FTRACE_FL_REGS ? " R" : "  ");
2960 		if (rec->flags & FTRACE_FL_TRAMP_EN) {
2961 			struct ftrace_ops *ops;
2962 
2963 			ops = ftrace_find_tramp_ops_curr(rec);
2964 			if (ops && ops->trampoline)
2965 				seq_printf(m, "\ttramp: %pS",
2966 					   (void *)ops->trampoline);
2967 			else
2968 				seq_printf(m, "\ttramp: ERROR!");
2969 		}
2970 	}
2971 
2972 	seq_printf(m, "\n");
2973 
2974 	return 0;
2975 }
2976 
2977 static const struct seq_operations show_ftrace_seq_ops = {
2978 	.start = t_start,
2979 	.next = t_next,
2980 	.stop = t_stop,
2981 	.show = t_show,
2982 };
2983 
2984 static int
2985 ftrace_avail_open(struct inode *inode, struct file *file)
2986 {
2987 	struct ftrace_iterator *iter;
2988 
2989 	if (unlikely(ftrace_disabled))
2990 		return -ENODEV;
2991 
2992 	iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
2993 	if (iter) {
2994 		iter->pg = ftrace_pages_start;
2995 		iter->ops = &global_ops;
2996 	}
2997 
2998 	return iter ? 0 : -ENOMEM;
2999 }
3000 
3001 static int
3002 ftrace_enabled_open(struct inode *inode, struct file *file)
3003 {
3004 	struct ftrace_iterator *iter;
3005 
3006 	if (unlikely(ftrace_disabled))
3007 		return -ENODEV;
3008 
3009 	iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
3010 	if (iter) {
3011 		iter->pg = ftrace_pages_start;
3012 		iter->flags = FTRACE_ITER_ENABLED;
3013 		iter->ops = &global_ops;
3014 	}
3015 
3016 	return iter ? 0 : -ENOMEM;
3017 }
3018 
3019 /**
3020  * ftrace_regex_open - initialize function tracer filter files
3021  * @ops: The ftrace_ops that hold the hash filters
3022  * @flag: The type of filter to process
3023  * @inode: The inode, usually passed in to your open routine
3024  * @file: The file, usually passed in to your open routine
3025  *
3026  * ftrace_regex_open() initializes the filter files for the
3027  * @ops. Depending on @flag it may process the filter hash or
3028  * the notrace hash of @ops. With this called from the open
3029  * routine, you can use ftrace_filter_write() for the write
3030  * routine if @flag has FTRACE_ITER_FILTER set, or
3031  * ftrace_notrace_write() if @flag has FTRACE_ITER_NOTRACE set.
3032  * tracing_lseek() should be used as the lseek routine, and
3033  * release must call ftrace_regex_release().
3034  */
3035 int
3036 ftrace_regex_open(struct ftrace_ops *ops, int flag,
3037 		  struct inode *inode, struct file *file)
3038 {
3039 	struct ftrace_iterator *iter;
3040 	struct ftrace_hash *hash;
3041 	int ret = 0;
3042 
3043 	ftrace_ops_init(ops);
3044 
3045 	if (unlikely(ftrace_disabled))
3046 		return -ENODEV;
3047 
3048 	iter = kzalloc(sizeof(*iter), GFP_KERNEL);
3049 	if (!iter)
3050 		return -ENOMEM;
3051 
3052 	if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX)) {
3053 		kfree(iter);
3054 		return -ENOMEM;
3055 	}
3056 
3057 	iter->ops = ops;
3058 	iter->flags = flag;
3059 
3060 	mutex_lock(&ops->func_hash->regex_lock);
3061 
3062 	if (flag & FTRACE_ITER_NOTRACE)
3063 		hash = ops->func_hash->notrace_hash;
3064 	else
3065 		hash = ops->func_hash->filter_hash;
3066 
3067 	if (file->f_mode & FMODE_WRITE) {
3068 		const int size_bits = FTRACE_HASH_DEFAULT_BITS;
3069 
3070 		if (file->f_flags & O_TRUNC)
3071 			iter->hash = alloc_ftrace_hash(size_bits);
3072 		else
3073 			iter->hash = alloc_and_copy_ftrace_hash(size_bits, hash);
3074 
3075 		if (!iter->hash) {
3076 			trace_parser_put(&iter->parser);
3077 			kfree(iter);
3078 			ret = -ENOMEM;
3079 			goto out_unlock;
3080 		}
3081 	}
3082 
3083 	if (file->f_mode & FMODE_READ) {
3084 		iter->pg = ftrace_pages_start;
3085 
3086 		ret = seq_open(file, &show_ftrace_seq_ops);
3087 		if (!ret) {
3088 			struct seq_file *m = file->private_data;
3089 			m->private = iter;
3090 		} else {
3091 			/* Failed */
3092 			free_ftrace_hash(iter->hash);
3093 			trace_parser_put(&iter->parser);
3094 			kfree(iter);
3095 		}
3096 	} else
3097 		file->private_data = iter;
3098 
3099  out_unlock:
3100 	mutex_unlock(&ops->func_hash->regex_lock);
3101 
3102 	return ret;
3103 }
3104 
3105 static int
3106 ftrace_filter_open(struct inode *inode, struct file *file)
3107 {
3108 	struct ftrace_ops *ops = inode->i_private;
3109 
3110 	return ftrace_regex_open(ops,
3111 			FTRACE_ITER_FILTER | FTRACE_ITER_DO_HASH,
3112 			inode, file);
3113 }
3114 
3115 static int
3116 ftrace_notrace_open(struct inode *inode, struct file *file)
3117 {
3118 	struct ftrace_ops *ops = inode->i_private;
3119 
3120 	return ftrace_regex_open(ops, FTRACE_ITER_NOTRACE,
3121 				 inode, file);
3122 }
3123 
3124 static int ftrace_match(char *str, char *regex, int len, int type)
3125 {
3126 	int matched = 0;
3127 	int slen;
3128 
3129 	switch (type) {
3130 	case MATCH_FULL:
3131 		if (strcmp(str, regex) == 0)
3132 			matched = 1;
3133 		break;
3134 	case MATCH_FRONT_ONLY:
3135 		if (strncmp(str, regex, len) == 0)
3136 			matched = 1;
3137 		break;
3138 	case MATCH_MIDDLE_ONLY:
3139 		if (strstr(str, regex))
3140 			matched = 1;
3141 		break;
3142 	case MATCH_END_ONLY:
3143 		slen = strlen(str);
3144 		if (slen >= len && memcmp(str + slen - len, regex, len) == 0)
3145 			matched = 1;
3146 		break;
3147 	}
3148 
3149 	return matched;
3150 }
3151 
3152 static int
3153 enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int not)
3154 {
3155 	struct ftrace_func_entry *entry;
3156 	int ret = 0;
3157 
3158 	entry = ftrace_lookup_ip(hash, rec->ip);
3159 	if (not) {
3160 		/* Do nothing if it doesn't exist */
3161 		if (!entry)
3162 			return 0;
3163 
3164 		free_hash_entry(hash, entry);
3165 	} else {
3166 		/* Do nothing if it exists */
3167 		if (entry)
3168 			return 0;
3169 
3170 		ret = add_hash_entry(hash, rec->ip);
3171 	}
3172 	return ret;
3173 }
3174 
3175 static int
3176 ftrace_match_record(struct dyn_ftrace *rec, char *mod,
3177 		    char *regex, int len, int type)
3178 {
3179 	char str[KSYM_SYMBOL_LEN];
3180 	char *modname;
3181 
3182 	kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
3183 
3184 	if (mod) {
3185 		/* module lookup requires matching the module */
3186 		if (!modname || strcmp(modname, mod))
3187 			return 0;
3188 
3189 		/* blank search means to match all funcs in the mod */
3190 		if (!len)
3191 			return 1;
3192 	}
3193 
3194 	return ftrace_match(str, regex, len, type);
3195 }
3196 
3197 static int
3198 match_records(struct ftrace_hash *hash, char *buff,
3199 	      int len, char *mod, int not)
3200 {
3201 	unsigned search_len = 0;
3202 	struct ftrace_page *pg;
3203 	struct dyn_ftrace *rec;
3204 	int type = MATCH_FULL;
3205 	char *search = buff;
3206 	int found = 0;
3207 	int ret;
3208 
3209 	if (len) {
3210 		type = filter_parse_regex(buff, len, &search, &not);
3211 		search_len = strlen(search);
3212 	}
3213 
3214 	mutex_lock(&ftrace_lock);
3215 
3216 	if (unlikely(ftrace_disabled))
3217 		goto out_unlock;
3218 
3219 	do_for_each_ftrace_rec(pg, rec) {
3220 		if (ftrace_match_record(rec, mod, search, search_len, type)) {
3221 			ret = enter_record(hash, rec, not);
3222 			if (ret < 0) {
3223 				found = ret;
3224 				goto out_unlock;
3225 			}
3226 			found = 1;
3227 		}
3228 	} while_for_each_ftrace_rec();
3229  out_unlock:
3230 	mutex_unlock(&ftrace_lock);
3231 
3232 	return found;
3233 }
3234 
3235 static int
3236 ftrace_match_records(struct ftrace_hash *hash, char *buff, int len)
3237 {
3238 	return match_records(hash, buff, len, NULL, 0);
3239 }
3240 
3241 static int
3242 ftrace_match_module_records(struct ftrace_hash *hash, char *buff, char *mod)
3243 {
3244 	int not = 0;
3245 
3246 	/* blank or '*' mean the same */
3247 	if (strcmp(buff, "*") == 0)
3248 		buff[0] = 0;
3249 
3250 	/* handle the case of 'dont filter this module' */
3251 	if (strcmp(buff, "!") == 0 || strcmp(buff, "!*") == 0) {
3252 		buff[0] = 0;
3253 		not = 1;
3254 	}
3255 
3256 	return match_records(hash, buff, strlen(buff), mod, not);
3257 }
3258 
3259 /*
3260  * We register the module command as a template to show others how
3261  * to register the a command as well.
3262  */
3263 
3264 static int
3265 ftrace_mod_callback(struct ftrace_hash *hash,
3266 		    char *func, char *cmd, char *param, int enable)
3267 {
3268 	char *mod;
3269 	int ret = -EINVAL;
3270 
3271 	/*
3272 	 * cmd == 'mod' because we only registered this func
3273 	 * for the 'mod' ftrace_func_command.
3274 	 * But if you register one func with multiple commands,
3275 	 * you can tell which command was used by the cmd
3276 	 * parameter.
3277 	 */
3278 
3279 	/* we must have a module name */
3280 	if (!param)
3281 		return ret;
3282 
3283 	mod = strsep(&param, ":");
3284 	if (!strlen(mod))
3285 		return ret;
3286 
3287 	ret = ftrace_match_module_records(hash, func, mod);
3288 	if (!ret)
3289 		ret = -EINVAL;
3290 	if (ret < 0)
3291 		return ret;
3292 
3293 	return 0;
3294 }
3295 
3296 static struct ftrace_func_command ftrace_mod_cmd = {
3297 	.name			= "mod",
3298 	.func			= ftrace_mod_callback,
3299 };
3300 
3301 static int __init ftrace_mod_cmd_init(void)
3302 {
3303 	return register_ftrace_command(&ftrace_mod_cmd);
3304 }
3305 core_initcall(ftrace_mod_cmd_init);
3306 
3307 static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
3308 				      struct ftrace_ops *op, struct pt_regs *pt_regs)
3309 {
3310 	struct ftrace_func_probe *entry;
3311 	struct hlist_head *hhd;
3312 	unsigned long key;
3313 
3314 	key = hash_long(ip, FTRACE_HASH_BITS);
3315 
3316 	hhd = &ftrace_func_hash[key];
3317 
3318 	if (hlist_empty(hhd))
3319 		return;
3320 
3321 	/*
3322 	 * Disable preemption for these calls to prevent a RCU grace
3323 	 * period. This syncs the hash iteration and freeing of items
3324 	 * on the hash. rcu_read_lock is too dangerous here.
3325 	 */
3326 	preempt_disable_notrace();
3327 	hlist_for_each_entry_rcu_notrace(entry, hhd, node) {
3328 		if (entry->ip == ip)
3329 			entry->ops->func(ip, parent_ip, &entry->data);
3330 	}
3331 	preempt_enable_notrace();
3332 }
3333 
3334 static struct ftrace_ops trace_probe_ops __read_mostly =
3335 {
3336 	.func		= function_trace_probe_call,
3337 	.flags		= FTRACE_OPS_FL_INITIALIZED,
3338 	INIT_OPS_HASH(trace_probe_ops)
3339 };
3340 
3341 static int ftrace_probe_registered;
3342 
3343 static void __enable_ftrace_function_probe(void)
3344 {
3345 	int ret;
3346 	int i;
3347 
3348 	if (ftrace_probe_registered) {
3349 		/* still need to update the function call sites */
3350 		if (ftrace_enabled)
3351 			ftrace_run_update_code(FTRACE_UPDATE_CALLS);
3352 		return;
3353 	}
3354 
3355 	for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
3356 		struct hlist_head *hhd = &ftrace_func_hash[i];
3357 		if (hhd->first)
3358 			break;
3359 	}
3360 	/* Nothing registered? */
3361 	if (i == FTRACE_FUNC_HASHSIZE)
3362 		return;
3363 
3364 	ret = ftrace_startup(&trace_probe_ops, 0);
3365 
3366 	ftrace_probe_registered = 1;
3367 }
3368 
3369 static void __disable_ftrace_function_probe(void)
3370 {
3371 	int i;
3372 
3373 	if (!ftrace_probe_registered)
3374 		return;
3375 
3376 	for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
3377 		struct hlist_head *hhd = &ftrace_func_hash[i];
3378 		if (hhd->first)
3379 			return;
3380 	}
3381 
3382 	/* no more funcs left */
3383 	ftrace_shutdown(&trace_probe_ops, 0);
3384 
3385 	ftrace_probe_registered = 0;
3386 }
3387 
3388 
3389 static void ftrace_free_entry(struct ftrace_func_probe *entry)
3390 {
3391 	if (entry->ops->free)
3392 		entry->ops->free(entry->ops, entry->ip, &entry->data);
3393 	kfree(entry);
3394 }
3395 
3396 int
3397 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3398 			      void *data)
3399 {
3400 	struct ftrace_func_probe *entry;
3401 	struct ftrace_hash **orig_hash = &trace_probe_ops.func_hash->filter_hash;
3402 	struct ftrace_hash *hash;
3403 	struct ftrace_page *pg;
3404 	struct dyn_ftrace *rec;
3405 	int type, len, not;
3406 	unsigned long key;
3407 	int count = 0;
3408 	char *search;
3409 	int ret;
3410 
3411 	type = filter_parse_regex(glob, strlen(glob), &search, &not);
3412 	len = strlen(search);
3413 
3414 	/* we do not support '!' for function probes */
3415 	if (WARN_ON(not))
3416 		return -EINVAL;
3417 
3418 	mutex_lock(&trace_probe_ops.func_hash->regex_lock);
3419 
3420 	hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
3421 	if (!hash) {
3422 		count = -ENOMEM;
3423 		goto out;
3424 	}
3425 
3426 	if (unlikely(ftrace_disabled)) {
3427 		count = -ENODEV;
3428 		goto out;
3429 	}
3430 
3431 	mutex_lock(&ftrace_lock);
3432 
3433 	do_for_each_ftrace_rec(pg, rec) {
3434 
3435 		if (!ftrace_match_record(rec, NULL, search, len, type))
3436 			continue;
3437 
3438 		entry = kmalloc(sizeof(*entry), GFP_KERNEL);
3439 		if (!entry) {
3440 			/* If we did not process any, then return error */
3441 			if (!count)
3442 				count = -ENOMEM;
3443 			goto out_unlock;
3444 		}
3445 
3446 		count++;
3447 
3448 		entry->data = data;
3449 
3450 		/*
3451 		 * The caller might want to do something special
3452 		 * for each function we find. We call the callback
3453 		 * to give the caller an opportunity to do so.
3454 		 */
3455 		if (ops->init) {
3456 			if (ops->init(ops, rec->ip, &entry->data) < 0) {
3457 				/* caller does not like this func */
3458 				kfree(entry);
3459 				continue;
3460 			}
3461 		}
3462 
3463 		ret = enter_record(hash, rec, 0);
3464 		if (ret < 0) {
3465 			kfree(entry);
3466 			count = ret;
3467 			goto out_unlock;
3468 		}
3469 
3470 		entry->ops = ops;
3471 		entry->ip = rec->ip;
3472 
3473 		key = hash_long(entry->ip, FTRACE_HASH_BITS);
3474 		hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]);
3475 
3476 	} while_for_each_ftrace_rec();
3477 
3478 	ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash);
3479 	if (ret < 0)
3480 		count = ret;
3481 
3482 	__enable_ftrace_function_probe();
3483 
3484  out_unlock:
3485 	mutex_unlock(&ftrace_lock);
3486  out:
3487 	mutex_unlock(&trace_probe_ops.func_hash->regex_lock);
3488 	free_ftrace_hash(hash);
3489 
3490 	return count;
3491 }
3492 
3493 enum {
3494 	PROBE_TEST_FUNC		= 1,
3495 	PROBE_TEST_DATA		= 2
3496 };
3497 
3498 static void
3499 __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3500 				  void *data, int flags)
3501 {
3502 	struct ftrace_func_entry *rec_entry;
3503 	struct ftrace_func_probe *entry;
3504 	struct ftrace_func_probe *p;
3505 	struct ftrace_hash **orig_hash = &trace_probe_ops.func_hash->filter_hash;
3506 	struct list_head free_list;
3507 	struct ftrace_hash *hash;
3508 	struct hlist_node *tmp;
3509 	char str[KSYM_SYMBOL_LEN];
3510 	int type = MATCH_FULL;
3511 	int i, len = 0;
3512 	char *search;
3513 
3514 	if (glob && (strcmp(glob, "*") == 0 || !strlen(glob)))
3515 		glob = NULL;
3516 	else if (glob) {
3517 		int not;
3518 
3519 		type = filter_parse_regex(glob, strlen(glob), &search, &not);
3520 		len = strlen(search);
3521 
3522 		/* we do not support '!' for function probes */
3523 		if (WARN_ON(not))
3524 			return;
3525 	}
3526 
3527 	mutex_lock(&trace_probe_ops.func_hash->regex_lock);
3528 
3529 	hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
3530 	if (!hash)
3531 		/* Hmm, should report this somehow */
3532 		goto out_unlock;
3533 
3534 	INIT_LIST_HEAD(&free_list);
3535 
3536 	for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
3537 		struct hlist_head *hhd = &ftrace_func_hash[i];
3538 
3539 		hlist_for_each_entry_safe(entry, tmp, hhd, node) {
3540 
3541 			/* break up if statements for readability */
3542 			if ((flags & PROBE_TEST_FUNC) && entry->ops != ops)
3543 				continue;
3544 
3545 			if ((flags & PROBE_TEST_DATA) && entry->data != data)
3546 				continue;
3547 
3548 			/* do this last, since it is the most expensive */
3549 			if (glob) {
3550 				kallsyms_lookup(entry->ip, NULL, NULL,
3551 						NULL, str);
3552 				if (!ftrace_match(str, glob, len, type))
3553 					continue;
3554 			}
3555 
3556 			rec_entry = ftrace_lookup_ip(hash, entry->ip);
3557 			/* It is possible more than one entry had this ip */
3558 			if (rec_entry)
3559 				free_hash_entry(hash, rec_entry);
3560 
3561 			hlist_del_rcu(&entry->node);
3562 			list_add(&entry->free_list, &free_list);
3563 		}
3564 	}
3565 	mutex_lock(&ftrace_lock);
3566 	__disable_ftrace_function_probe();
3567 	/*
3568 	 * Remove after the disable is called. Otherwise, if the last
3569 	 * probe is removed, a null hash means *all enabled*.
3570 	 */
3571 	ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash);
3572 	synchronize_sched();
3573 	list_for_each_entry_safe(entry, p, &free_list, free_list) {
3574 		list_del(&entry->free_list);
3575 		ftrace_free_entry(entry);
3576 	}
3577 	mutex_unlock(&ftrace_lock);
3578 
3579  out_unlock:
3580 	mutex_unlock(&trace_probe_ops.func_hash->regex_lock);
3581 	free_ftrace_hash(hash);
3582 }
3583 
3584 void
3585 unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3586 				void *data)
3587 {
3588 	__unregister_ftrace_function_probe(glob, ops, data,
3589 					  PROBE_TEST_FUNC | PROBE_TEST_DATA);
3590 }
3591 
3592 void
3593 unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops)
3594 {
3595 	__unregister_ftrace_function_probe(glob, ops, NULL, PROBE_TEST_FUNC);
3596 }
3597 
3598 void unregister_ftrace_function_probe_all(char *glob)
3599 {
3600 	__unregister_ftrace_function_probe(glob, NULL, NULL, 0);
3601 }
3602 
3603 static LIST_HEAD(ftrace_commands);
3604 static DEFINE_MUTEX(ftrace_cmd_mutex);
3605 
3606 /*
3607  * Currently we only register ftrace commands from __init, so mark this
3608  * __init too.
3609  */
3610 __init int register_ftrace_command(struct ftrace_func_command *cmd)
3611 {
3612 	struct ftrace_func_command *p;
3613 	int ret = 0;
3614 
3615 	mutex_lock(&ftrace_cmd_mutex);
3616 	list_for_each_entry(p, &ftrace_commands, list) {
3617 		if (strcmp(cmd->name, p->name) == 0) {
3618 			ret = -EBUSY;
3619 			goto out_unlock;
3620 		}
3621 	}
3622 	list_add(&cmd->list, &ftrace_commands);
3623  out_unlock:
3624 	mutex_unlock(&ftrace_cmd_mutex);
3625 
3626 	return ret;
3627 }
3628 
3629 /*
3630  * Currently we only unregister ftrace commands from __init, so mark
3631  * this __init too.
3632  */
3633 __init int unregister_ftrace_command(struct ftrace_func_command *cmd)
3634 {
3635 	struct ftrace_func_command *p, *n;
3636 	int ret = -ENODEV;
3637 
3638 	mutex_lock(&ftrace_cmd_mutex);
3639 	list_for_each_entry_safe(p, n, &ftrace_commands, list) {
3640 		if (strcmp(cmd->name, p->name) == 0) {
3641 			ret = 0;
3642 			list_del_init(&p->list);
3643 			goto out_unlock;
3644 		}
3645 	}
3646  out_unlock:
3647 	mutex_unlock(&ftrace_cmd_mutex);
3648 
3649 	return ret;
3650 }
3651 
3652 static int ftrace_process_regex(struct ftrace_hash *hash,
3653 				char *buff, int len, int enable)
3654 {
3655 	char *func, *command, *next = buff;
3656 	struct ftrace_func_command *p;
3657 	int ret = -EINVAL;
3658 
3659 	func = strsep(&next, ":");
3660 
3661 	if (!next) {
3662 		ret = ftrace_match_records(hash, func, len);
3663 		if (!ret)
3664 			ret = -EINVAL;
3665 		if (ret < 0)
3666 			return ret;
3667 		return 0;
3668 	}
3669 
3670 	/* command found */
3671 
3672 	command = strsep(&next, ":");
3673 
3674 	mutex_lock(&ftrace_cmd_mutex);
3675 	list_for_each_entry(p, &ftrace_commands, list) {
3676 		if (strcmp(p->name, command) == 0) {
3677 			ret = p->func(hash, func, command, next, enable);
3678 			goto out_unlock;
3679 		}
3680 	}
3681  out_unlock:
3682 	mutex_unlock(&ftrace_cmd_mutex);
3683 
3684 	return ret;
3685 }
3686 
3687 static ssize_t
3688 ftrace_regex_write(struct file *file, const char __user *ubuf,
3689 		   size_t cnt, loff_t *ppos, int enable)
3690 {
3691 	struct ftrace_iterator *iter;
3692 	struct trace_parser *parser;
3693 	ssize_t ret, read;
3694 
3695 	if (!cnt)
3696 		return 0;
3697 
3698 	if (file->f_mode & FMODE_READ) {
3699 		struct seq_file *m = file->private_data;
3700 		iter = m->private;
3701 	} else
3702 		iter = file->private_data;
3703 
3704 	if (unlikely(ftrace_disabled))
3705 		return -ENODEV;
3706 
3707 	/* iter->hash is a local copy, so we don't need regex_lock */
3708 
3709 	parser = &iter->parser;
3710 	read = trace_get_user(parser, ubuf, cnt, ppos);
3711 
3712 	if (read >= 0 && trace_parser_loaded(parser) &&
3713 	    !trace_parser_cont(parser)) {
3714 		ret = ftrace_process_regex(iter->hash, parser->buffer,
3715 					   parser->idx, enable);
3716 		trace_parser_clear(parser);
3717 		if (ret < 0)
3718 			goto out;
3719 	}
3720 
3721 	ret = read;
3722  out:
3723 	return ret;
3724 }
3725 
3726 ssize_t
3727 ftrace_filter_write(struct file *file, const char __user *ubuf,
3728 		    size_t cnt, loff_t *ppos)
3729 {
3730 	return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
3731 }
3732 
3733 ssize_t
3734 ftrace_notrace_write(struct file *file, const char __user *ubuf,
3735 		     size_t cnt, loff_t *ppos)
3736 {
3737 	return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
3738 }
3739 
3740 static int
3741 ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove)
3742 {
3743 	struct ftrace_func_entry *entry;
3744 
3745 	if (!ftrace_location(ip))
3746 		return -EINVAL;
3747 
3748 	if (remove) {
3749 		entry = ftrace_lookup_ip(hash, ip);
3750 		if (!entry)
3751 			return -ENOENT;
3752 		free_hash_entry(hash, entry);
3753 		return 0;
3754 	}
3755 
3756 	return add_hash_entry(hash, ip);
3757 }
3758 
3759 static void ftrace_ops_update_code(struct ftrace_ops *ops)
3760 {
3761 	if (ops->flags & FTRACE_OPS_FL_ENABLED && ftrace_enabled)
3762 		ftrace_run_update_code(FTRACE_UPDATE_CALLS);
3763 }
3764 
3765 static int
3766 ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
3767 		unsigned long ip, int remove, int reset, int enable)
3768 {
3769 	struct ftrace_hash **orig_hash;
3770 	struct ftrace_hash *hash;
3771 	int ret;
3772 
3773 	if (unlikely(ftrace_disabled))
3774 		return -ENODEV;
3775 
3776 	mutex_lock(&ops->func_hash->regex_lock);
3777 
3778 	if (enable)
3779 		orig_hash = &ops->func_hash->filter_hash;
3780 	else
3781 		orig_hash = &ops->func_hash->notrace_hash;
3782 
3783 	if (reset)
3784 		hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
3785 	else
3786 		hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
3787 
3788 	if (!hash) {
3789 		ret = -ENOMEM;
3790 		goto out_regex_unlock;
3791 	}
3792 
3793 	if (buf && !ftrace_match_records(hash, buf, len)) {
3794 		ret = -EINVAL;
3795 		goto out_regex_unlock;
3796 	}
3797 	if (ip) {
3798 		ret = ftrace_match_addr(hash, ip, remove);
3799 		if (ret < 0)
3800 			goto out_regex_unlock;
3801 	}
3802 
3803 	mutex_lock(&ftrace_lock);
3804 	ret = ftrace_hash_move(ops, enable, orig_hash, hash);
3805 	if (!ret)
3806 		ftrace_ops_update_code(ops);
3807 
3808 	mutex_unlock(&ftrace_lock);
3809 
3810  out_regex_unlock:
3811 	mutex_unlock(&ops->func_hash->regex_lock);
3812 
3813 	free_ftrace_hash(hash);
3814 	return ret;
3815 }
3816 
3817 static int
3818 ftrace_set_addr(struct ftrace_ops *ops, unsigned long ip, int remove,
3819 		int reset, int enable)
3820 {
3821 	return ftrace_set_hash(ops, 0, 0, ip, remove, reset, enable);
3822 }
3823 
3824 /**
3825  * ftrace_set_filter_ip - set a function to filter on in ftrace by address
3826  * @ops - the ops to set the filter with
3827  * @ip - the address to add to or remove from the filter.
3828  * @remove - non zero to remove the ip from the filter
3829  * @reset - non zero to reset all filters before applying this filter.
3830  *
3831  * Filters denote which functions should be enabled when tracing is enabled
3832  * If @ip is NULL, it failes to update filter.
3833  */
3834 int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
3835 			 int remove, int reset)
3836 {
3837 	ftrace_ops_init(ops);
3838 	return ftrace_set_addr(ops, ip, remove, reset, 1);
3839 }
3840 EXPORT_SYMBOL_GPL(ftrace_set_filter_ip);
3841 
3842 static int
3843 ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
3844 		 int reset, int enable)
3845 {
3846 	return ftrace_set_hash(ops, buf, len, 0, 0, reset, enable);
3847 }
3848 
3849 /**
3850  * ftrace_set_filter - set a function to filter on in ftrace
3851  * @ops - the ops to set the filter with
3852  * @buf - the string that holds the function filter text.
3853  * @len - the length of the string.
3854  * @reset - non zero to reset all filters before applying this filter.
3855  *
3856  * Filters denote which functions should be enabled when tracing is enabled.
3857  * If @buf is NULL and reset is set, all functions will be enabled for tracing.
3858  */
3859 int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
3860 		       int len, int reset)
3861 {
3862 	ftrace_ops_init(ops);
3863 	return ftrace_set_regex(ops, buf, len, reset, 1);
3864 }
3865 EXPORT_SYMBOL_GPL(ftrace_set_filter);
3866 
3867 /**
3868  * ftrace_set_notrace - set a function to not trace in ftrace
3869  * @ops - the ops to set the notrace filter with
3870  * @buf - the string that holds the function notrace text.
3871  * @len - the length of the string.
3872  * @reset - non zero to reset all filters before applying this filter.
3873  *
3874  * Notrace Filters denote which functions should not be enabled when tracing
3875  * is enabled. If @buf is NULL and reset is set, all functions will be enabled
3876  * for tracing.
3877  */
3878 int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
3879 			int len, int reset)
3880 {
3881 	ftrace_ops_init(ops);
3882 	return ftrace_set_regex(ops, buf, len, reset, 0);
3883 }
3884 EXPORT_SYMBOL_GPL(ftrace_set_notrace);
3885 /**
3886  * ftrace_set_global_filter - set a function to filter on with global tracers
3887  * @buf - the string that holds the function filter text.
3888  * @len - the length of the string.
3889  * @reset - non zero to reset all filters before applying this filter.
3890  *
3891  * Filters denote which functions should be enabled when tracing is enabled.
3892  * If @buf is NULL and reset is set, all functions will be enabled for tracing.
3893  */
3894 void ftrace_set_global_filter(unsigned char *buf, int len, int reset)
3895 {
3896 	ftrace_set_regex(&global_ops, buf, len, reset, 1);
3897 }
3898 EXPORT_SYMBOL_GPL(ftrace_set_global_filter);
3899 
3900 /**
3901  * ftrace_set_global_notrace - set a function to not trace with global tracers
3902  * @buf - the string that holds the function notrace text.
3903  * @len - the length of the string.
3904  * @reset - non zero to reset all filters before applying this filter.
3905  *
3906  * Notrace Filters denote which functions should not be enabled when tracing
3907  * is enabled. If @buf is NULL and reset is set, all functions will be enabled
3908  * for tracing.
3909  */
3910 void ftrace_set_global_notrace(unsigned char *buf, int len, int reset)
3911 {
3912 	ftrace_set_regex(&global_ops, buf, len, reset, 0);
3913 }
3914 EXPORT_SYMBOL_GPL(ftrace_set_global_notrace);
3915 
3916 /*
3917  * command line interface to allow users to set filters on boot up.
3918  */
3919 #define FTRACE_FILTER_SIZE		COMMAND_LINE_SIZE
3920 static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
3921 static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;
3922 
3923 /* Used by function selftest to not test if filter is set */
3924 bool ftrace_filter_param __initdata;
3925 
3926 static int __init set_ftrace_notrace(char *str)
3927 {
3928 	ftrace_filter_param = true;
3929 	strlcpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
3930 	return 1;
3931 }
3932 __setup("ftrace_notrace=", set_ftrace_notrace);
3933 
3934 static int __init set_ftrace_filter(char *str)
3935 {
3936 	ftrace_filter_param = true;
3937 	strlcpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
3938 	return 1;
3939 }
3940 __setup("ftrace_filter=", set_ftrace_filter);
3941 
3942 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3943 static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
3944 static char ftrace_graph_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
3945 static int ftrace_set_func(unsigned long *array, int *idx, int size, char *buffer);
3946 
3947 static int __init set_graph_function(char *str)
3948 {
3949 	strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE);
3950 	return 1;
3951 }
3952 __setup("ftrace_graph_filter=", set_graph_function);
3953 
3954 static int __init set_graph_notrace_function(char *str)
3955 {
3956 	strlcpy(ftrace_graph_notrace_buf, str, FTRACE_FILTER_SIZE);
3957 	return 1;
3958 }
3959 __setup("ftrace_graph_notrace=", set_graph_notrace_function);
3960 
3961 static void __init set_ftrace_early_graph(char *buf, int enable)
3962 {
3963 	int ret;
3964 	char *func;
3965 	unsigned long *table = ftrace_graph_funcs;
3966 	int *count = &ftrace_graph_count;
3967 
3968 	if (!enable) {
3969 		table = ftrace_graph_notrace_funcs;
3970 		count = &ftrace_graph_notrace_count;
3971 	}
3972 
3973 	while (buf) {
3974 		func = strsep(&buf, ",");
3975 		/* we allow only one expression at a time */
3976 		ret = ftrace_set_func(table, count, FTRACE_GRAPH_MAX_FUNCS, func);
3977 		if (ret)
3978 			printk(KERN_DEBUG "ftrace: function %s not "
3979 					  "traceable\n", func);
3980 	}
3981 }
3982 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3983 
3984 void __init
3985 ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable)
3986 {
3987 	char *func;
3988 
3989 	ftrace_ops_init(ops);
3990 
3991 	while (buf) {
3992 		func = strsep(&buf, ",");
3993 		ftrace_set_regex(ops, func, strlen(func), 0, enable);
3994 	}
3995 }
3996 
3997 static void __init set_ftrace_early_filters(void)
3998 {
3999 	if (ftrace_filter_buf[0])
4000 		ftrace_set_early_filter(&global_ops, ftrace_filter_buf, 1);
4001 	if (ftrace_notrace_buf[0])
4002 		ftrace_set_early_filter(&global_ops, ftrace_notrace_buf, 0);
4003 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4004 	if (ftrace_graph_buf[0])
4005 		set_ftrace_early_graph(ftrace_graph_buf, 1);
4006 	if (ftrace_graph_notrace_buf[0])
4007 		set_ftrace_early_graph(ftrace_graph_notrace_buf, 0);
4008 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
4009 }
4010 
4011 int ftrace_regex_release(struct inode *inode, struct file *file)
4012 {
4013 	struct seq_file *m = (struct seq_file *)file->private_data;
4014 	struct ftrace_iterator *iter;
4015 	struct ftrace_hash **orig_hash;
4016 	struct trace_parser *parser;
4017 	int filter_hash;
4018 	int ret;
4019 
4020 	if (file->f_mode & FMODE_READ) {
4021 		iter = m->private;
4022 		seq_release(inode, file);
4023 	} else
4024 		iter = file->private_data;
4025 
4026 	parser = &iter->parser;
4027 	if (trace_parser_loaded(parser)) {
4028 		parser->buffer[parser->idx] = 0;
4029 		ftrace_match_records(iter->hash, parser->buffer, parser->idx);
4030 	}
4031 
4032 	trace_parser_put(parser);
4033 
4034 	mutex_lock(&iter->ops->func_hash->regex_lock);
4035 
4036 	if (file->f_mode & FMODE_WRITE) {
4037 		filter_hash = !!(iter->flags & FTRACE_ITER_FILTER);
4038 
4039 		if (filter_hash)
4040 			orig_hash = &iter->ops->func_hash->filter_hash;
4041 		else
4042 			orig_hash = &iter->ops->func_hash->notrace_hash;
4043 
4044 		mutex_lock(&ftrace_lock);
4045 		ret = ftrace_hash_move(iter->ops, filter_hash,
4046 				       orig_hash, iter->hash);
4047 		if (!ret)
4048 			ftrace_ops_update_code(iter->ops);
4049 
4050 		mutex_unlock(&ftrace_lock);
4051 	}
4052 
4053 	mutex_unlock(&iter->ops->func_hash->regex_lock);
4054 	free_ftrace_hash(iter->hash);
4055 	kfree(iter);
4056 
4057 	return 0;
4058 }
4059 
4060 static const struct file_operations ftrace_avail_fops = {
4061 	.open = ftrace_avail_open,
4062 	.read = seq_read,
4063 	.llseek = seq_lseek,
4064 	.release = seq_release_private,
4065 };
4066 
4067 static const struct file_operations ftrace_enabled_fops = {
4068 	.open = ftrace_enabled_open,
4069 	.read = seq_read,
4070 	.llseek = seq_lseek,
4071 	.release = seq_release_private,
4072 };
4073 
4074 static const struct file_operations ftrace_filter_fops = {
4075 	.open = ftrace_filter_open,
4076 	.read = seq_read,
4077 	.write = ftrace_filter_write,
4078 	.llseek = tracing_lseek,
4079 	.release = ftrace_regex_release,
4080 };
4081 
4082 static const struct file_operations ftrace_notrace_fops = {
4083 	.open = ftrace_notrace_open,
4084 	.read = seq_read,
4085 	.write = ftrace_notrace_write,
4086 	.llseek = tracing_lseek,
4087 	.release = ftrace_regex_release,
4088 };
4089 
4090 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4091 
4092 static DEFINE_MUTEX(graph_lock);
4093 
4094 int ftrace_graph_count;
4095 int ftrace_graph_notrace_count;
4096 unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
4097 unsigned long ftrace_graph_notrace_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
4098 
4099 struct ftrace_graph_data {
4100 	unsigned long *table;
4101 	size_t size;
4102 	int *count;
4103 	const struct seq_operations *seq_ops;
4104 };
4105 
4106 static void *
4107 __g_next(struct seq_file *m, loff_t *pos)
4108 {
4109 	struct ftrace_graph_data *fgd = m->private;
4110 
4111 	if (*pos >= *fgd->count)
4112 		return NULL;
4113 	return &fgd->table[*pos];
4114 }
4115 
4116 static void *
4117 g_next(struct seq_file *m, void *v, loff_t *pos)
4118 {
4119 	(*pos)++;
4120 	return __g_next(m, pos);
4121 }
4122 
4123 static void *g_start(struct seq_file *m, loff_t *pos)
4124 {
4125 	struct ftrace_graph_data *fgd = m->private;
4126 
4127 	mutex_lock(&graph_lock);
4128 
4129 	/* Nothing, tell g_show to print all functions are enabled */
4130 	if (!*fgd->count && !*pos)
4131 		return (void *)1;
4132 
4133 	return __g_next(m, pos);
4134 }
4135 
4136 static void g_stop(struct seq_file *m, void *p)
4137 {
4138 	mutex_unlock(&graph_lock);
4139 }
4140 
4141 static int g_show(struct seq_file *m, void *v)
4142 {
4143 	unsigned long *ptr = v;
4144 
4145 	if (!ptr)
4146 		return 0;
4147 
4148 	if (ptr == (unsigned long *)1) {
4149 		struct ftrace_graph_data *fgd = m->private;
4150 
4151 		if (fgd->table == ftrace_graph_funcs)
4152 			seq_printf(m, "#### all functions enabled ####\n");
4153 		else
4154 			seq_printf(m, "#### no functions disabled ####\n");
4155 		return 0;
4156 	}
4157 
4158 	seq_printf(m, "%ps\n", (void *)*ptr);
4159 
4160 	return 0;
4161 }
4162 
4163 static const struct seq_operations ftrace_graph_seq_ops = {
4164 	.start = g_start,
4165 	.next = g_next,
4166 	.stop = g_stop,
4167 	.show = g_show,
4168 };
4169 
4170 static int
4171 __ftrace_graph_open(struct inode *inode, struct file *file,
4172 		    struct ftrace_graph_data *fgd)
4173 {
4174 	int ret = 0;
4175 
4176 	mutex_lock(&graph_lock);
4177 	if ((file->f_mode & FMODE_WRITE) &&
4178 	    (file->f_flags & O_TRUNC)) {
4179 		*fgd->count = 0;
4180 		memset(fgd->table, 0, fgd->size * sizeof(*fgd->table));
4181 	}
4182 	mutex_unlock(&graph_lock);
4183 
4184 	if (file->f_mode & FMODE_READ) {
4185 		ret = seq_open(file, fgd->seq_ops);
4186 		if (!ret) {
4187 			struct seq_file *m = file->private_data;
4188 			m->private = fgd;
4189 		}
4190 	} else
4191 		file->private_data = fgd;
4192 
4193 	return ret;
4194 }
4195 
4196 static int
4197 ftrace_graph_open(struct inode *inode, struct file *file)
4198 {
4199 	struct ftrace_graph_data *fgd;
4200 
4201 	if (unlikely(ftrace_disabled))
4202 		return -ENODEV;
4203 
4204 	fgd = kmalloc(sizeof(*fgd), GFP_KERNEL);
4205 	if (fgd == NULL)
4206 		return -ENOMEM;
4207 
4208 	fgd->table = ftrace_graph_funcs;
4209 	fgd->size = FTRACE_GRAPH_MAX_FUNCS;
4210 	fgd->count = &ftrace_graph_count;
4211 	fgd->seq_ops = &ftrace_graph_seq_ops;
4212 
4213 	return __ftrace_graph_open(inode, file, fgd);
4214 }
4215 
4216 static int
4217 ftrace_graph_notrace_open(struct inode *inode, struct file *file)
4218 {
4219 	struct ftrace_graph_data *fgd;
4220 
4221 	if (unlikely(ftrace_disabled))
4222 		return -ENODEV;
4223 
4224 	fgd = kmalloc(sizeof(*fgd), GFP_KERNEL);
4225 	if (fgd == NULL)
4226 		return -ENOMEM;
4227 
4228 	fgd->table = ftrace_graph_notrace_funcs;
4229 	fgd->size = FTRACE_GRAPH_MAX_FUNCS;
4230 	fgd->count = &ftrace_graph_notrace_count;
4231 	fgd->seq_ops = &ftrace_graph_seq_ops;
4232 
4233 	return __ftrace_graph_open(inode, file, fgd);
4234 }
4235 
4236 static int
4237 ftrace_graph_release(struct inode *inode, struct file *file)
4238 {
4239 	if (file->f_mode & FMODE_READ) {
4240 		struct seq_file *m = file->private_data;
4241 
4242 		kfree(m->private);
4243 		seq_release(inode, file);
4244 	} else {
4245 		kfree(file->private_data);
4246 	}
4247 
4248 	return 0;
4249 }
4250 
4251 static int
4252 ftrace_set_func(unsigned long *array, int *idx, int size, char *buffer)
4253 {
4254 	struct dyn_ftrace *rec;
4255 	struct ftrace_page *pg;
4256 	int search_len;
4257 	int fail = 1;
4258 	int type, not;
4259 	char *search;
4260 	bool exists;
4261 	int i;
4262 
4263 	/* decode regex */
4264 	type = filter_parse_regex(buffer, strlen(buffer), &search, &not);
4265 	if (!not && *idx >= size)
4266 		return -EBUSY;
4267 
4268 	search_len = strlen(search);
4269 
4270 	mutex_lock(&ftrace_lock);
4271 
4272 	if (unlikely(ftrace_disabled)) {
4273 		mutex_unlock(&ftrace_lock);
4274 		return -ENODEV;
4275 	}
4276 
4277 	do_for_each_ftrace_rec(pg, rec) {
4278 
4279 		if (ftrace_match_record(rec, NULL, search, search_len, type)) {
4280 			/* if it is in the array */
4281 			exists = false;
4282 			for (i = 0; i < *idx; i++) {
4283 				if (array[i] == rec->ip) {
4284 					exists = true;
4285 					break;
4286 				}
4287 			}
4288 
4289 			if (!not) {
4290 				fail = 0;
4291 				if (!exists) {
4292 					array[(*idx)++] = rec->ip;
4293 					if (*idx >= size)
4294 						goto out;
4295 				}
4296 			} else {
4297 				if (exists) {
4298 					array[i] = array[--(*idx)];
4299 					array[*idx] = 0;
4300 					fail = 0;
4301 				}
4302 			}
4303 		}
4304 	} while_for_each_ftrace_rec();
4305 out:
4306 	mutex_unlock(&ftrace_lock);
4307 
4308 	if (fail)
4309 		return -EINVAL;
4310 
4311 	return 0;
4312 }
4313 
4314 static ssize_t
4315 ftrace_graph_write(struct file *file, const char __user *ubuf,
4316 		   size_t cnt, loff_t *ppos)
4317 {
4318 	struct trace_parser parser;
4319 	ssize_t read, ret = 0;
4320 	struct ftrace_graph_data *fgd = file->private_data;
4321 
4322 	if (!cnt)
4323 		return 0;
4324 
4325 	if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX))
4326 		return -ENOMEM;
4327 
4328 	read = trace_get_user(&parser, ubuf, cnt, ppos);
4329 
4330 	if (read >= 0 && trace_parser_loaded((&parser))) {
4331 		parser.buffer[parser.idx] = 0;
4332 
4333 		mutex_lock(&graph_lock);
4334 
4335 		/* we allow only one expression at a time */
4336 		ret = ftrace_set_func(fgd->table, fgd->count, fgd->size,
4337 				      parser.buffer);
4338 
4339 		mutex_unlock(&graph_lock);
4340 	}
4341 
4342 	if (!ret)
4343 		ret = read;
4344 
4345 	trace_parser_put(&parser);
4346 
4347 	return ret;
4348 }
4349 
4350 static const struct file_operations ftrace_graph_fops = {
4351 	.open		= ftrace_graph_open,
4352 	.read		= seq_read,
4353 	.write		= ftrace_graph_write,
4354 	.llseek		= tracing_lseek,
4355 	.release	= ftrace_graph_release,
4356 };
4357 
4358 static const struct file_operations ftrace_graph_notrace_fops = {
4359 	.open		= ftrace_graph_notrace_open,
4360 	.read		= seq_read,
4361 	.write		= ftrace_graph_write,
4362 	.llseek		= tracing_lseek,
4363 	.release	= ftrace_graph_release,
4364 };
4365 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
4366 
4367 void ftrace_create_filter_files(struct ftrace_ops *ops,
4368 				struct dentry *parent)
4369 {
4370 
4371 	trace_create_file("set_ftrace_filter", 0644, parent,
4372 			  ops, &ftrace_filter_fops);
4373 
4374 	trace_create_file("set_ftrace_notrace", 0644, parent,
4375 			  ops, &ftrace_notrace_fops);
4376 }
4377 
4378 /*
4379  * The name "destroy_filter_files" is really a misnomer. Although
4380  * in the future, it may actualy delete the files, but this is
4381  * really intended to make sure the ops passed in are disabled
4382  * and that when this function returns, the caller is free to
4383  * free the ops.
4384  *
4385  * The "destroy" name is only to match the "create" name that this
4386  * should be paired with.
4387  */
4388 void ftrace_destroy_filter_files(struct ftrace_ops *ops)
4389 {
4390 	mutex_lock(&ftrace_lock);
4391 	if (ops->flags & FTRACE_OPS_FL_ENABLED)
4392 		ftrace_shutdown(ops, 0);
4393 	ops->flags |= FTRACE_OPS_FL_DELETED;
4394 	mutex_unlock(&ftrace_lock);
4395 }
4396 
4397 static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
4398 {
4399 
4400 	trace_create_file("available_filter_functions", 0444,
4401 			d_tracer, NULL, &ftrace_avail_fops);
4402 
4403 	trace_create_file("enabled_functions", 0444,
4404 			d_tracer, NULL, &ftrace_enabled_fops);
4405 
4406 	ftrace_create_filter_files(&global_ops, d_tracer);
4407 
4408 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4409 	trace_create_file("set_graph_function", 0444, d_tracer,
4410 				    NULL,
4411 				    &ftrace_graph_fops);
4412 	trace_create_file("set_graph_notrace", 0444, d_tracer,
4413 				    NULL,
4414 				    &ftrace_graph_notrace_fops);
4415 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
4416 
4417 	return 0;
4418 }
4419 
4420 static int ftrace_cmp_ips(const void *a, const void *b)
4421 {
4422 	const unsigned long *ipa = a;
4423 	const unsigned long *ipb = b;
4424 
4425 	if (*ipa > *ipb)
4426 		return 1;
4427 	if (*ipa < *ipb)
4428 		return -1;
4429 	return 0;
4430 }
4431 
4432 static void ftrace_swap_ips(void *a, void *b, int size)
4433 {
4434 	unsigned long *ipa = a;
4435 	unsigned long *ipb = b;
4436 	unsigned long t;
4437 
4438 	t = *ipa;
4439 	*ipa = *ipb;
4440 	*ipb = t;
4441 }
4442 
4443 static int ftrace_process_locs(struct module *mod,
4444 			       unsigned long *start,
4445 			       unsigned long *end)
4446 {
4447 	struct ftrace_page *start_pg;
4448 	struct ftrace_page *pg;
4449 	struct dyn_ftrace *rec;
4450 	unsigned long count;
4451 	unsigned long *p;
4452 	unsigned long addr;
4453 	unsigned long flags = 0; /* Shut up gcc */
4454 	int ret = -ENOMEM;
4455 
4456 	count = end - start;
4457 
4458 	if (!count)
4459 		return 0;
4460 
4461 	sort(start, count, sizeof(*start),
4462 	     ftrace_cmp_ips, ftrace_swap_ips);
4463 
4464 	start_pg = ftrace_allocate_pages(count);
4465 	if (!start_pg)
4466 		return -ENOMEM;
4467 
4468 	mutex_lock(&ftrace_lock);
4469 
4470 	/*
4471 	 * Core and each module needs their own pages, as
4472 	 * modules will free them when they are removed.
4473 	 * Force a new page to be allocated for modules.
4474 	 */
4475 	if (!mod) {
4476 		WARN_ON(ftrace_pages || ftrace_pages_start);
4477 		/* First initialization */
4478 		ftrace_pages = ftrace_pages_start = start_pg;
4479 	} else {
4480 		if (!ftrace_pages)
4481 			goto out;
4482 
4483 		if (WARN_ON(ftrace_pages->next)) {
4484 			/* Hmm, we have free pages? */
4485 			while (ftrace_pages->next)
4486 				ftrace_pages = ftrace_pages->next;
4487 		}
4488 
4489 		ftrace_pages->next = start_pg;
4490 	}
4491 
4492 	p = start;
4493 	pg = start_pg;
4494 	while (p < end) {
4495 		addr = ftrace_call_adjust(*p++);
4496 		/*
4497 		 * Some architecture linkers will pad between
4498 		 * the different mcount_loc sections of different
4499 		 * object files to satisfy alignments.
4500 		 * Skip any NULL pointers.
4501 		 */
4502 		if (!addr)
4503 			continue;
4504 
4505 		if (pg->index == pg->size) {
4506 			/* We should have allocated enough */
4507 			if (WARN_ON(!pg->next))
4508 				break;
4509 			pg = pg->next;
4510 		}
4511 
4512 		rec = &pg->records[pg->index++];
4513 		rec->ip = addr;
4514 	}
4515 
4516 	/* We should have used all pages */
4517 	WARN_ON(pg->next);
4518 
4519 	/* Assign the last page to ftrace_pages */
4520 	ftrace_pages = pg;
4521 
4522 	/*
4523 	 * We only need to disable interrupts on start up
4524 	 * because we are modifying code that an interrupt
4525 	 * may execute, and the modification is not atomic.
4526 	 * But for modules, nothing runs the code we modify
4527 	 * until we are finished with it, and there's no
4528 	 * reason to cause large interrupt latencies while we do it.
4529 	 */
4530 	if (!mod)
4531 		local_irq_save(flags);
4532 	ftrace_update_code(mod, start_pg);
4533 	if (!mod)
4534 		local_irq_restore(flags);
4535 	ret = 0;
4536  out:
4537 	mutex_unlock(&ftrace_lock);
4538 
4539 	return ret;
4540 }
4541 
4542 #ifdef CONFIG_MODULES
4543 
4544 #define next_to_ftrace_page(p) container_of(p, struct ftrace_page, next)
4545 
4546 void ftrace_release_mod(struct module *mod)
4547 {
4548 	struct dyn_ftrace *rec;
4549 	struct ftrace_page **last_pg;
4550 	struct ftrace_page *pg;
4551 	int order;
4552 
4553 	mutex_lock(&ftrace_lock);
4554 
4555 	if (ftrace_disabled)
4556 		goto out_unlock;
4557 
4558 	/*
4559 	 * Each module has its own ftrace_pages, remove
4560 	 * them from the list.
4561 	 */
4562 	last_pg = &ftrace_pages_start;
4563 	for (pg = ftrace_pages_start; pg; pg = *last_pg) {
4564 		rec = &pg->records[0];
4565 		if (within_module_core(rec->ip, mod)) {
4566 			/*
4567 			 * As core pages are first, the first
4568 			 * page should never be a module page.
4569 			 */
4570 			if (WARN_ON(pg == ftrace_pages_start))
4571 				goto out_unlock;
4572 
4573 			/* Check if we are deleting the last page */
4574 			if (pg == ftrace_pages)
4575 				ftrace_pages = next_to_ftrace_page(last_pg);
4576 
4577 			*last_pg = pg->next;
4578 			order = get_count_order(pg->size / ENTRIES_PER_PAGE);
4579 			free_pages((unsigned long)pg->records, order);
4580 			kfree(pg);
4581 		} else
4582 			last_pg = &pg->next;
4583 	}
4584  out_unlock:
4585 	mutex_unlock(&ftrace_lock);
4586 }
4587 
4588 static void ftrace_init_module(struct module *mod,
4589 			       unsigned long *start, unsigned long *end)
4590 {
4591 	if (ftrace_disabled || start == end)
4592 		return;
4593 	ftrace_process_locs(mod, start, end);
4594 }
4595 
4596 void ftrace_module_init(struct module *mod)
4597 {
4598 	ftrace_init_module(mod, mod->ftrace_callsites,
4599 			   mod->ftrace_callsites +
4600 			   mod->num_ftrace_callsites);
4601 }
4602 
4603 static int ftrace_module_notify_exit(struct notifier_block *self,
4604 				     unsigned long val, void *data)
4605 {
4606 	struct module *mod = data;
4607 
4608 	if (val == MODULE_STATE_GOING)
4609 		ftrace_release_mod(mod);
4610 
4611 	return 0;
4612 }
4613 #else
4614 static int ftrace_module_notify_exit(struct notifier_block *self,
4615 				     unsigned long val, void *data)
4616 {
4617 	return 0;
4618 }
4619 #endif /* CONFIG_MODULES */
4620 
4621 struct notifier_block ftrace_module_exit_nb = {
4622 	.notifier_call = ftrace_module_notify_exit,
4623 	.priority = INT_MIN,	/* Run after anything that can remove kprobes */
4624 };
4625 
4626 void __init ftrace_init(void)
4627 {
4628 	extern unsigned long __start_mcount_loc[];
4629 	extern unsigned long __stop_mcount_loc[];
4630 	unsigned long count, flags;
4631 	int ret;
4632 
4633 	local_irq_save(flags);
4634 	ret = ftrace_dyn_arch_init();
4635 	local_irq_restore(flags);
4636 	if (ret)
4637 		goto failed;
4638 
4639 	count = __stop_mcount_loc - __start_mcount_loc;
4640 	if (!count) {
4641 		pr_info("ftrace: No functions to be traced?\n");
4642 		goto failed;
4643 	}
4644 
4645 	pr_info("ftrace: allocating %ld entries in %ld pages\n",
4646 		count, count / ENTRIES_PER_PAGE + 1);
4647 
4648 	last_ftrace_enabled = ftrace_enabled = 1;
4649 
4650 	ret = ftrace_process_locs(NULL,
4651 				  __start_mcount_loc,
4652 				  __stop_mcount_loc);
4653 
4654 	ret = register_module_notifier(&ftrace_module_exit_nb);
4655 	if (ret)
4656 		pr_warning("Failed to register trace ftrace module exit notifier\n");
4657 
4658 	set_ftrace_early_filters();
4659 
4660 	return;
4661  failed:
4662 	ftrace_disabled = 1;
4663 }
4664 
4665 #else
4666 
4667 static struct ftrace_ops global_ops = {
4668 	.func			= ftrace_stub,
4669 	.flags			= FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
4670 };
4671 
4672 static int __init ftrace_nodyn_init(void)
4673 {
4674 	ftrace_enabled = 1;
4675 	return 0;
4676 }
4677 core_initcall(ftrace_nodyn_init);
4678 
4679 static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
4680 static inline void ftrace_startup_enable(int command) { }
4681 /* Keep as macros so we do not need to define the commands */
4682 # define ftrace_startup(ops, command)					\
4683 	({								\
4684 		int ___ret = __register_ftrace_function(ops);		\
4685 		if (!___ret)						\
4686 			(ops)->flags |= FTRACE_OPS_FL_ENABLED;		\
4687 		___ret;							\
4688 	})
4689 # define ftrace_shutdown(ops, command)					\
4690 	({								\
4691 		int ___ret = __unregister_ftrace_function(ops);		\
4692 		if (!___ret)						\
4693 			(ops)->flags &= ~FTRACE_OPS_FL_ENABLED;		\
4694 		___ret;							\
4695 	})
4696 
4697 # define ftrace_startup_sysctl()	do { } while (0)
4698 # define ftrace_shutdown_sysctl()	do { } while (0)
4699 
4700 static inline int
4701 ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
4702 {
4703 	return 1;
4704 }
4705 
4706 #endif /* CONFIG_DYNAMIC_FTRACE */
4707 
4708 __init void ftrace_init_global_array_ops(struct trace_array *tr)
4709 {
4710 	tr->ops = &global_ops;
4711 	tr->ops->private = tr;
4712 }
4713 
4714 void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func)
4715 {
4716 	/* If we filter on pids, update to use the pid function */
4717 	if (tr->flags & TRACE_ARRAY_FL_GLOBAL) {
4718 		if (WARN_ON(tr->ops->func != ftrace_stub))
4719 			printk("ftrace ops had %pS for function\n",
4720 			       tr->ops->func);
4721 		/* Only the top level instance does pid tracing */
4722 		if (!list_empty(&ftrace_pids)) {
4723 			set_ftrace_pid_function(func);
4724 			func = ftrace_pid_func;
4725 		}
4726 	}
4727 	tr->ops->func = func;
4728 	tr->ops->private = tr;
4729 }
4730 
4731 void ftrace_reset_array_ops(struct trace_array *tr)
4732 {
4733 	tr->ops->func = ftrace_stub;
4734 }
4735 
4736 static void
4737 ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip,
4738 			struct ftrace_ops *op, struct pt_regs *regs)
4739 {
4740 	if (unlikely(trace_recursion_test(TRACE_CONTROL_BIT)))
4741 		return;
4742 
4743 	/*
4744 	 * Some of the ops may be dynamically allocated,
4745 	 * they must be freed after a synchronize_sched().
4746 	 */
4747 	preempt_disable_notrace();
4748 	trace_recursion_set(TRACE_CONTROL_BIT);
4749 
4750 	/*
4751 	 * Control funcs (perf) uses RCU. Only trace if
4752 	 * RCU is currently active.
4753 	 */
4754 	if (!rcu_is_watching())
4755 		goto out;
4756 
4757 	do_for_each_ftrace_op(op, ftrace_control_list) {
4758 		if (!(op->flags & FTRACE_OPS_FL_STUB) &&
4759 		    !ftrace_function_local_disabled(op) &&
4760 		    ftrace_ops_test(op, ip, regs))
4761 			op->func(ip, parent_ip, op, regs);
4762 	} while_for_each_ftrace_op(op);
4763  out:
4764 	trace_recursion_clear(TRACE_CONTROL_BIT);
4765 	preempt_enable_notrace();
4766 }
4767 
4768 static struct ftrace_ops control_ops = {
4769 	.func	= ftrace_ops_control_func,
4770 	.flags	= FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
4771 	INIT_OPS_HASH(control_ops)
4772 };
4773 
4774 static inline void
4775 __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
4776 		       struct ftrace_ops *ignored, struct pt_regs *regs)
4777 {
4778 	struct ftrace_ops *op;
4779 	int bit;
4780 
4781 	bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX);
4782 	if (bit < 0)
4783 		return;
4784 
4785 	/*
4786 	 * Some of the ops may be dynamically allocated,
4787 	 * they must be freed after a synchronize_sched().
4788 	 */
4789 	preempt_disable_notrace();
4790 	do_for_each_ftrace_op(op, ftrace_ops_list) {
4791 		if (ftrace_ops_test(op, ip, regs)) {
4792 			if (FTRACE_WARN_ON(!op->func)) {
4793 				pr_warn("op=%p %pS\n", op, op);
4794 				goto out;
4795 			}
4796 			op->func(ip, parent_ip, op, regs);
4797 		}
4798 	} while_for_each_ftrace_op(op);
4799 out:
4800 	preempt_enable_notrace();
4801 	trace_clear_recursion(bit);
4802 }
4803 
4804 /*
4805  * Some archs only support passing ip and parent_ip. Even though
4806  * the list function ignores the op parameter, we do not want any
4807  * C side effects, where a function is called without the caller
4808  * sending a third parameter.
4809  * Archs are to support both the regs and ftrace_ops at the same time.
4810  * If they support ftrace_ops, it is assumed they support regs.
4811  * If call backs want to use regs, they must either check for regs
4812  * being NULL, or CONFIG_DYNAMIC_FTRACE_WITH_REGS.
4813  * Note, CONFIG_DYNAMIC_FTRACE_WITH_REGS expects a full regs to be saved.
4814  * An architecture can pass partial regs with ftrace_ops and still
4815  * set the ARCH_SUPPORT_FTARCE_OPS.
4816  */
4817 #if ARCH_SUPPORTS_FTRACE_OPS
4818 static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
4819 				 struct ftrace_ops *op, struct pt_regs *regs)
4820 {
4821 	__ftrace_ops_list_func(ip, parent_ip, NULL, regs);
4822 }
4823 #else
4824 static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip)
4825 {
4826 	__ftrace_ops_list_func(ip, parent_ip, NULL, NULL);
4827 }
4828 #endif
4829 
4830 static void clear_ftrace_swapper(void)
4831 {
4832 	struct task_struct *p;
4833 	int cpu;
4834 
4835 	get_online_cpus();
4836 	for_each_online_cpu(cpu) {
4837 		p = idle_task(cpu);
4838 		clear_tsk_trace_trace(p);
4839 	}
4840 	put_online_cpus();
4841 }
4842 
4843 static void set_ftrace_swapper(void)
4844 {
4845 	struct task_struct *p;
4846 	int cpu;
4847 
4848 	get_online_cpus();
4849 	for_each_online_cpu(cpu) {
4850 		p = idle_task(cpu);
4851 		set_tsk_trace_trace(p);
4852 	}
4853 	put_online_cpus();
4854 }
4855 
4856 static void clear_ftrace_pid(struct pid *pid)
4857 {
4858 	struct task_struct *p;
4859 
4860 	rcu_read_lock();
4861 	do_each_pid_task(pid, PIDTYPE_PID, p) {
4862 		clear_tsk_trace_trace(p);
4863 	} while_each_pid_task(pid, PIDTYPE_PID, p);
4864 	rcu_read_unlock();
4865 
4866 	put_pid(pid);
4867 }
4868 
4869 static void set_ftrace_pid(struct pid *pid)
4870 {
4871 	struct task_struct *p;
4872 
4873 	rcu_read_lock();
4874 	do_each_pid_task(pid, PIDTYPE_PID, p) {
4875 		set_tsk_trace_trace(p);
4876 	} while_each_pid_task(pid, PIDTYPE_PID, p);
4877 	rcu_read_unlock();
4878 }
4879 
4880 static void clear_ftrace_pid_task(struct pid *pid)
4881 {
4882 	if (pid == ftrace_swapper_pid)
4883 		clear_ftrace_swapper();
4884 	else
4885 		clear_ftrace_pid(pid);
4886 }
4887 
4888 static void set_ftrace_pid_task(struct pid *pid)
4889 {
4890 	if (pid == ftrace_swapper_pid)
4891 		set_ftrace_swapper();
4892 	else
4893 		set_ftrace_pid(pid);
4894 }
4895 
4896 static int ftrace_pid_add(int p)
4897 {
4898 	struct pid *pid;
4899 	struct ftrace_pid *fpid;
4900 	int ret = -EINVAL;
4901 
4902 	mutex_lock(&ftrace_lock);
4903 
4904 	if (!p)
4905 		pid = ftrace_swapper_pid;
4906 	else
4907 		pid = find_get_pid(p);
4908 
4909 	if (!pid)
4910 		goto out;
4911 
4912 	ret = 0;
4913 
4914 	list_for_each_entry(fpid, &ftrace_pids, list)
4915 		if (fpid->pid == pid)
4916 			goto out_put;
4917 
4918 	ret = -ENOMEM;
4919 
4920 	fpid = kmalloc(sizeof(*fpid), GFP_KERNEL);
4921 	if (!fpid)
4922 		goto out_put;
4923 
4924 	list_add(&fpid->list, &ftrace_pids);
4925 	fpid->pid = pid;
4926 
4927 	set_ftrace_pid_task(pid);
4928 
4929 	ftrace_update_pid_func();
4930 	ftrace_startup_enable(0);
4931 
4932 	mutex_unlock(&ftrace_lock);
4933 	return 0;
4934 
4935 out_put:
4936 	if (pid != ftrace_swapper_pid)
4937 		put_pid(pid);
4938 
4939 out:
4940 	mutex_unlock(&ftrace_lock);
4941 	return ret;
4942 }
4943 
4944 static void ftrace_pid_reset(void)
4945 {
4946 	struct ftrace_pid *fpid, *safe;
4947 
4948 	mutex_lock(&ftrace_lock);
4949 	list_for_each_entry_safe(fpid, safe, &ftrace_pids, list) {
4950 		struct pid *pid = fpid->pid;
4951 
4952 		clear_ftrace_pid_task(pid);
4953 
4954 		list_del(&fpid->list);
4955 		kfree(fpid);
4956 	}
4957 
4958 	ftrace_update_pid_func();
4959 	ftrace_startup_enable(0);
4960 
4961 	mutex_unlock(&ftrace_lock);
4962 }
4963 
4964 static void *fpid_start(struct seq_file *m, loff_t *pos)
4965 {
4966 	mutex_lock(&ftrace_lock);
4967 
4968 	if (list_empty(&ftrace_pids) && (!*pos))
4969 		return (void *) 1;
4970 
4971 	return seq_list_start(&ftrace_pids, *pos);
4972 }
4973 
4974 static void *fpid_next(struct seq_file *m, void *v, loff_t *pos)
4975 {
4976 	if (v == (void *)1)
4977 		return NULL;
4978 
4979 	return seq_list_next(v, &ftrace_pids, pos);
4980 }
4981 
4982 static void fpid_stop(struct seq_file *m, void *p)
4983 {
4984 	mutex_unlock(&ftrace_lock);
4985 }
4986 
4987 static int fpid_show(struct seq_file *m, void *v)
4988 {
4989 	const struct ftrace_pid *fpid = list_entry(v, struct ftrace_pid, list);
4990 
4991 	if (v == (void *)1) {
4992 		seq_printf(m, "no pid\n");
4993 		return 0;
4994 	}
4995 
4996 	if (fpid->pid == ftrace_swapper_pid)
4997 		seq_printf(m, "swapper tasks\n");
4998 	else
4999 		seq_printf(m, "%u\n", pid_vnr(fpid->pid));
5000 
5001 	return 0;
5002 }
5003 
5004 static const struct seq_operations ftrace_pid_sops = {
5005 	.start = fpid_start,
5006 	.next = fpid_next,
5007 	.stop = fpid_stop,
5008 	.show = fpid_show,
5009 };
5010 
5011 static int
5012 ftrace_pid_open(struct inode *inode, struct file *file)
5013 {
5014 	int ret = 0;
5015 
5016 	if ((file->f_mode & FMODE_WRITE) &&
5017 	    (file->f_flags & O_TRUNC))
5018 		ftrace_pid_reset();
5019 
5020 	if (file->f_mode & FMODE_READ)
5021 		ret = seq_open(file, &ftrace_pid_sops);
5022 
5023 	return ret;
5024 }
5025 
5026 static ssize_t
5027 ftrace_pid_write(struct file *filp, const char __user *ubuf,
5028 		   size_t cnt, loff_t *ppos)
5029 {
5030 	char buf[64], *tmp;
5031 	long val;
5032 	int ret;
5033 
5034 	if (cnt >= sizeof(buf))
5035 		return -EINVAL;
5036 
5037 	if (copy_from_user(&buf, ubuf, cnt))
5038 		return -EFAULT;
5039 
5040 	buf[cnt] = 0;
5041 
5042 	/*
5043 	 * Allow "echo > set_ftrace_pid" or "echo -n '' > set_ftrace_pid"
5044 	 * to clean the filter quietly.
5045 	 */
5046 	tmp = strstrip(buf);
5047 	if (strlen(tmp) == 0)
5048 		return 1;
5049 
5050 	ret = kstrtol(tmp, 10, &val);
5051 	if (ret < 0)
5052 		return ret;
5053 
5054 	ret = ftrace_pid_add(val);
5055 
5056 	return ret ? ret : cnt;
5057 }
5058 
5059 static int
5060 ftrace_pid_release(struct inode *inode, struct file *file)
5061 {
5062 	if (file->f_mode & FMODE_READ)
5063 		seq_release(inode, file);
5064 
5065 	return 0;
5066 }
5067 
5068 static const struct file_operations ftrace_pid_fops = {
5069 	.open		= ftrace_pid_open,
5070 	.write		= ftrace_pid_write,
5071 	.read		= seq_read,
5072 	.llseek		= tracing_lseek,
5073 	.release	= ftrace_pid_release,
5074 };
5075 
5076 static __init int ftrace_init_debugfs(void)
5077 {
5078 	struct dentry *d_tracer;
5079 
5080 	d_tracer = tracing_init_dentry();
5081 	if (!d_tracer)
5082 		return 0;
5083 
5084 	ftrace_init_dyn_debugfs(d_tracer);
5085 
5086 	trace_create_file("set_ftrace_pid", 0644, d_tracer,
5087 			    NULL, &ftrace_pid_fops);
5088 
5089 	ftrace_profile_debugfs(d_tracer);
5090 
5091 	return 0;
5092 }
5093 fs_initcall(ftrace_init_debugfs);
5094 
5095 /**
5096  * ftrace_kill - kill ftrace
5097  *
5098  * This function should be used by panic code. It stops ftrace
5099  * but in a not so nice way. If you need to simply kill ftrace
5100  * from a non-atomic section, use ftrace_kill.
5101  */
5102 void ftrace_kill(void)
5103 {
5104 	ftrace_disabled = 1;
5105 	ftrace_enabled = 0;
5106 	clear_ftrace_function();
5107 }
5108 
5109 /**
5110  * Test if ftrace is dead or not.
5111  */
5112 int ftrace_is_dead(void)
5113 {
5114 	return ftrace_disabled;
5115 }
5116 
5117 /**
5118  * register_ftrace_function - register a function for profiling
5119  * @ops - ops structure that holds the function for profiling.
5120  *
5121  * Register a function to be called by all functions in the
5122  * kernel.
5123  *
5124  * Note: @ops->func and all the functions it calls must be labeled
5125  *       with "notrace", otherwise it will go into a
5126  *       recursive loop.
5127  */
5128 int register_ftrace_function(struct ftrace_ops *ops)
5129 {
5130 	int ret = -1;
5131 
5132 	ftrace_ops_init(ops);
5133 
5134 	mutex_lock(&ftrace_lock);
5135 
5136 	ret = ftrace_startup(ops, 0);
5137 
5138 	mutex_unlock(&ftrace_lock);
5139 
5140 	return ret;
5141 }
5142 EXPORT_SYMBOL_GPL(register_ftrace_function);
5143 
5144 /**
5145  * unregister_ftrace_function - unregister a function for profiling.
5146  * @ops - ops structure that holds the function to unregister
5147  *
5148  * Unregister a function that was added to be called by ftrace profiling.
5149  */
5150 int unregister_ftrace_function(struct ftrace_ops *ops)
5151 {
5152 	int ret;
5153 
5154 	mutex_lock(&ftrace_lock);
5155 	ret = ftrace_shutdown(ops, 0);
5156 	mutex_unlock(&ftrace_lock);
5157 
5158 	return ret;
5159 }
5160 EXPORT_SYMBOL_GPL(unregister_ftrace_function);
5161 
5162 int
5163 ftrace_enable_sysctl(struct ctl_table *table, int write,
5164 		     void __user *buffer, size_t *lenp,
5165 		     loff_t *ppos)
5166 {
5167 	int ret = -ENODEV;
5168 
5169 	mutex_lock(&ftrace_lock);
5170 
5171 	if (unlikely(ftrace_disabled))
5172 		goto out;
5173 
5174 	ret = proc_dointvec(table, write, buffer, lenp, ppos);
5175 
5176 	if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
5177 		goto out;
5178 
5179 	last_ftrace_enabled = !!ftrace_enabled;
5180 
5181 	if (ftrace_enabled) {
5182 
5183 		ftrace_startup_sysctl();
5184 
5185 		/* we are starting ftrace again */
5186 		if (ftrace_ops_list != &ftrace_list_end)
5187 			update_ftrace_function();
5188 
5189 	} else {
5190 		/* stopping ftrace calls (just send to ftrace_stub) */
5191 		ftrace_trace_function = ftrace_stub;
5192 
5193 		ftrace_shutdown_sysctl();
5194 	}
5195 
5196  out:
5197 	mutex_unlock(&ftrace_lock);
5198 	return ret;
5199 }
5200 
5201 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
5202 
5203 static struct ftrace_ops graph_ops = {
5204 	.func			= ftrace_stub,
5205 	.flags			= FTRACE_OPS_FL_RECURSION_SAFE |
5206 				   FTRACE_OPS_FL_INITIALIZED |
5207 				   FTRACE_OPS_FL_STUB,
5208 #ifdef FTRACE_GRAPH_TRAMP_ADDR
5209 	.trampoline		= FTRACE_GRAPH_TRAMP_ADDR,
5210 #endif
5211 	ASSIGN_OPS_HASH(graph_ops, &global_ops.local_hash)
5212 };
5213 
5214 static int ftrace_graph_active;
5215 
5216 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
5217 {
5218 	return 0;
5219 }
5220 
5221 /* The callbacks that hook a function */
5222 trace_func_graph_ret_t ftrace_graph_return =
5223 			(trace_func_graph_ret_t)ftrace_stub;
5224 trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
5225 static trace_func_graph_ent_t __ftrace_graph_entry = ftrace_graph_entry_stub;
5226 
5227 /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
5228 static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
5229 {
5230 	int i;
5231 	int ret = 0;
5232 	unsigned long flags;
5233 	int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
5234 	struct task_struct *g, *t;
5235 
5236 	for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
5237 		ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
5238 					* sizeof(struct ftrace_ret_stack),
5239 					GFP_KERNEL);
5240 		if (!ret_stack_list[i]) {
5241 			start = 0;
5242 			end = i;
5243 			ret = -ENOMEM;
5244 			goto free;
5245 		}
5246 	}
5247 
5248 	read_lock_irqsave(&tasklist_lock, flags);
5249 	do_each_thread(g, t) {
5250 		if (start == end) {
5251 			ret = -EAGAIN;
5252 			goto unlock;
5253 		}
5254 
5255 		if (t->ret_stack == NULL) {
5256 			atomic_set(&t->tracing_graph_pause, 0);
5257 			atomic_set(&t->trace_overrun, 0);
5258 			t->curr_ret_stack = -1;
5259 			/* Make sure the tasks see the -1 first: */
5260 			smp_wmb();
5261 			t->ret_stack = ret_stack_list[start++];
5262 		}
5263 	} while_each_thread(g, t);
5264 
5265 unlock:
5266 	read_unlock_irqrestore(&tasklist_lock, flags);
5267 free:
5268 	for (i = start; i < end; i++)
5269 		kfree(ret_stack_list[i]);
5270 	return ret;
5271 }
5272 
5273 static void
5274 ftrace_graph_probe_sched_switch(void *ignore,
5275 			struct task_struct *prev, struct task_struct *next)
5276 {
5277 	unsigned long long timestamp;
5278 	int index;
5279 
5280 	/*
5281 	 * Does the user want to count the time a function was asleep.
5282 	 * If so, do not update the time stamps.
5283 	 */
5284 	if (trace_flags & TRACE_ITER_SLEEP_TIME)
5285 		return;
5286 
5287 	timestamp = trace_clock_local();
5288 
5289 	prev->ftrace_timestamp = timestamp;
5290 
5291 	/* only process tasks that we timestamped */
5292 	if (!next->ftrace_timestamp)
5293 		return;
5294 
5295 	/*
5296 	 * Update all the counters in next to make up for the
5297 	 * time next was sleeping.
5298 	 */
5299 	timestamp -= next->ftrace_timestamp;
5300 
5301 	for (index = next->curr_ret_stack; index >= 0; index--)
5302 		next->ret_stack[index].calltime += timestamp;
5303 }
5304 
5305 /* Allocate a return stack for each task */
5306 static int start_graph_tracing(void)
5307 {
5308 	struct ftrace_ret_stack **ret_stack_list;
5309 	int ret, cpu;
5310 
5311 	ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
5312 				sizeof(struct ftrace_ret_stack *),
5313 				GFP_KERNEL);
5314 
5315 	if (!ret_stack_list)
5316 		return -ENOMEM;
5317 
5318 	/* The cpu_boot init_task->ret_stack will never be freed */
5319 	for_each_online_cpu(cpu) {
5320 		if (!idle_task(cpu)->ret_stack)
5321 			ftrace_graph_init_idle_task(idle_task(cpu), cpu);
5322 	}
5323 
5324 	do {
5325 		ret = alloc_retstack_tasklist(ret_stack_list);
5326 	} while (ret == -EAGAIN);
5327 
5328 	if (!ret) {
5329 		ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
5330 		if (ret)
5331 			pr_info("ftrace_graph: Couldn't activate tracepoint"
5332 				" probe to kernel_sched_switch\n");
5333 	}
5334 
5335 	kfree(ret_stack_list);
5336 	return ret;
5337 }
5338 
5339 /*
5340  * Hibernation protection.
5341  * The state of the current task is too much unstable during
5342  * suspend/restore to disk. We want to protect against that.
5343  */
5344 static int
5345 ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
5346 							void *unused)
5347 {
5348 	switch (state) {
5349 	case PM_HIBERNATION_PREPARE:
5350 		pause_graph_tracing();
5351 		break;
5352 
5353 	case PM_POST_HIBERNATION:
5354 		unpause_graph_tracing();
5355 		break;
5356 	}
5357 	return NOTIFY_DONE;
5358 }
5359 
5360 static int ftrace_graph_entry_test(struct ftrace_graph_ent *trace)
5361 {
5362 	if (!ftrace_ops_test(&global_ops, trace->func, NULL))
5363 		return 0;
5364 	return __ftrace_graph_entry(trace);
5365 }
5366 
5367 /*
5368  * The function graph tracer should only trace the functions defined
5369  * by set_ftrace_filter and set_ftrace_notrace. If another function
5370  * tracer ops is registered, the graph tracer requires testing the
5371  * function against the global ops, and not just trace any function
5372  * that any ftrace_ops registered.
5373  */
5374 static void update_function_graph_func(void)
5375 {
5376 	struct ftrace_ops *op;
5377 	bool do_test = false;
5378 
5379 	/*
5380 	 * The graph and global ops share the same set of functions
5381 	 * to test. If any other ops is on the list, then
5382 	 * the graph tracing needs to test if its the function
5383 	 * it should call.
5384 	 */
5385 	do_for_each_ftrace_op(op, ftrace_ops_list) {
5386 		if (op != &global_ops && op != &graph_ops &&
5387 		    op != &ftrace_list_end) {
5388 			do_test = true;
5389 			/* in double loop, break out with goto */
5390 			goto out;
5391 		}
5392 	} while_for_each_ftrace_op(op);
5393  out:
5394 	if (do_test)
5395 		ftrace_graph_entry = ftrace_graph_entry_test;
5396 	else
5397 		ftrace_graph_entry = __ftrace_graph_entry;
5398 }
5399 
5400 static struct notifier_block ftrace_suspend_notifier = {
5401 	.notifier_call = ftrace_suspend_notifier_call,
5402 };
5403 
5404 int register_ftrace_graph(trace_func_graph_ret_t retfunc,
5405 			trace_func_graph_ent_t entryfunc)
5406 {
5407 	int ret = 0;
5408 
5409 	mutex_lock(&ftrace_lock);
5410 
5411 	/* we currently allow only one tracer registered at a time */
5412 	if (ftrace_graph_active) {
5413 		ret = -EBUSY;
5414 		goto out;
5415 	}
5416 
5417 	register_pm_notifier(&ftrace_suspend_notifier);
5418 
5419 	ftrace_graph_active++;
5420 	ret = start_graph_tracing();
5421 	if (ret) {
5422 		ftrace_graph_active--;
5423 		goto out;
5424 	}
5425 
5426 	ftrace_graph_return = retfunc;
5427 
5428 	/*
5429 	 * Update the indirect function to the entryfunc, and the
5430 	 * function that gets called to the entry_test first. Then
5431 	 * call the update fgraph entry function to determine if
5432 	 * the entryfunc should be called directly or not.
5433 	 */
5434 	__ftrace_graph_entry = entryfunc;
5435 	ftrace_graph_entry = ftrace_graph_entry_test;
5436 	update_function_graph_func();
5437 
5438 	ret = ftrace_startup(&graph_ops, FTRACE_START_FUNC_RET);
5439 
5440 out:
5441 	mutex_unlock(&ftrace_lock);
5442 	return ret;
5443 }
5444 
5445 void unregister_ftrace_graph(void)
5446 {
5447 	mutex_lock(&ftrace_lock);
5448 
5449 	if (unlikely(!ftrace_graph_active))
5450 		goto out;
5451 
5452 	ftrace_graph_active--;
5453 	ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
5454 	ftrace_graph_entry = ftrace_graph_entry_stub;
5455 	__ftrace_graph_entry = ftrace_graph_entry_stub;
5456 	ftrace_shutdown(&graph_ops, FTRACE_STOP_FUNC_RET);
5457 	unregister_pm_notifier(&ftrace_suspend_notifier);
5458 	unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
5459 
5460  out:
5461 	mutex_unlock(&ftrace_lock);
5462 }
5463 
5464 static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack);
5465 
5466 static void
5467 graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
5468 {
5469 	atomic_set(&t->tracing_graph_pause, 0);
5470 	atomic_set(&t->trace_overrun, 0);
5471 	t->ftrace_timestamp = 0;
5472 	/* make curr_ret_stack visible before we add the ret_stack */
5473 	smp_wmb();
5474 	t->ret_stack = ret_stack;
5475 }
5476 
5477 /*
5478  * Allocate a return stack for the idle task. May be the first
5479  * time through, or it may be done by CPU hotplug online.
5480  */
5481 void ftrace_graph_init_idle_task(struct task_struct *t, int cpu)
5482 {
5483 	t->curr_ret_stack = -1;
5484 	/*
5485 	 * The idle task has no parent, it either has its own
5486 	 * stack or no stack at all.
5487 	 */
5488 	if (t->ret_stack)
5489 		WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu));
5490 
5491 	if (ftrace_graph_active) {
5492 		struct ftrace_ret_stack *ret_stack;
5493 
5494 		ret_stack = per_cpu(idle_ret_stack, cpu);
5495 		if (!ret_stack) {
5496 			ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
5497 					    * sizeof(struct ftrace_ret_stack),
5498 					    GFP_KERNEL);
5499 			if (!ret_stack)
5500 				return;
5501 			per_cpu(idle_ret_stack, cpu) = ret_stack;
5502 		}
5503 		graph_init_task(t, ret_stack);
5504 	}
5505 }
5506 
5507 /* Allocate a return stack for newly created task */
5508 void ftrace_graph_init_task(struct task_struct *t)
5509 {
5510 	/* Make sure we do not use the parent ret_stack */
5511 	t->ret_stack = NULL;
5512 	t->curr_ret_stack = -1;
5513 
5514 	if (ftrace_graph_active) {
5515 		struct ftrace_ret_stack *ret_stack;
5516 
5517 		ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
5518 				* sizeof(struct ftrace_ret_stack),
5519 				GFP_KERNEL);
5520 		if (!ret_stack)
5521 			return;
5522 		graph_init_task(t, ret_stack);
5523 	}
5524 }
5525 
5526 void ftrace_graph_exit_task(struct task_struct *t)
5527 {
5528 	struct ftrace_ret_stack	*ret_stack = t->ret_stack;
5529 
5530 	t->ret_stack = NULL;
5531 	/* NULL must become visible to IRQs before we free it: */
5532 	barrier();
5533 
5534 	kfree(ret_stack);
5535 }
5536 #endif
5537