xref: /linux/kernel/trace/ftrace.c (revision e0bf6c5ca2d3281f231c5f0c9bf145e9513644de)
1 /*
2  * Infrastructure for profiling code inserted by 'gcc -pg'.
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * Originally ported from the -rt patch by:
8  *   Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9  *
10  * Based on code in the latency_tracer, that is:
11  *
12  *  Copyright (C) 2004-2006 Ingo Molnar
13  *  Copyright (C) 2004 Nadia Yvette Chambers
14  */
15 
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/suspend.h>
21 #include <linux/debugfs.h>
22 #include <linux/hardirq.h>
23 #include <linux/kthread.h>
24 #include <linux/uaccess.h>
25 #include <linux/bsearch.h>
26 #include <linux/module.h>
27 #include <linux/ftrace.h>
28 #include <linux/sysctl.h>
29 #include <linux/slab.h>
30 #include <linux/ctype.h>
31 #include <linux/sort.h>
32 #include <linux/list.h>
33 #include <linux/hash.h>
34 #include <linux/rcupdate.h>
35 
36 #include <trace/events/sched.h>
37 
38 #include <asm/setup.h>
39 
40 #include "trace_output.h"
41 #include "trace_stat.h"
42 
43 #define FTRACE_WARN_ON(cond)			\
44 	({					\
45 		int ___r = cond;		\
46 		if (WARN_ON(___r))		\
47 			ftrace_kill();		\
48 		___r;				\
49 	})
50 
51 #define FTRACE_WARN_ON_ONCE(cond)		\
52 	({					\
53 		int ___r = cond;		\
54 		if (WARN_ON_ONCE(___r))		\
55 			ftrace_kill();		\
56 		___r;				\
57 	})
58 
59 /* hash bits for specific function selection */
60 #define FTRACE_HASH_BITS 7
61 #define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
62 #define FTRACE_HASH_DEFAULT_BITS 10
63 #define FTRACE_HASH_MAX_BITS 12
64 
65 #define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_CONTROL)
66 
67 #ifdef CONFIG_DYNAMIC_FTRACE
68 #define INIT_OPS_HASH(opsname)	\
69 	.func_hash		= &opsname.local_hash,			\
70 	.local_hash.regex_lock	= __MUTEX_INITIALIZER(opsname.local_hash.regex_lock),
71 #define ASSIGN_OPS_HASH(opsname, val) \
72 	.func_hash		= val, \
73 	.local_hash.regex_lock	= __MUTEX_INITIALIZER(opsname.local_hash.regex_lock),
74 #else
75 #define INIT_OPS_HASH(opsname)
76 #define ASSIGN_OPS_HASH(opsname, val)
77 #endif
78 
79 static struct ftrace_ops ftrace_list_end __read_mostly = {
80 	.func		= ftrace_stub,
81 	.flags		= FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_STUB,
82 	INIT_OPS_HASH(ftrace_list_end)
83 };
84 
85 /* ftrace_enabled is a method to turn ftrace on or off */
86 int ftrace_enabled __read_mostly;
87 static int last_ftrace_enabled;
88 
89 /* Current function tracing op */
90 struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end;
91 /* What to set function_trace_op to */
92 static struct ftrace_ops *set_function_trace_op;
93 
94 /* List for set_ftrace_pid's pids. */
95 LIST_HEAD(ftrace_pids);
96 struct ftrace_pid {
97 	struct list_head list;
98 	struct pid *pid;
99 };
100 
101 /*
102  * ftrace_disabled is set when an anomaly is discovered.
103  * ftrace_disabled is much stronger than ftrace_enabled.
104  */
105 static int ftrace_disabled __read_mostly;
106 
107 static DEFINE_MUTEX(ftrace_lock);
108 
109 static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end;
110 static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
111 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
112 ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
113 static struct ftrace_ops global_ops;
114 static struct ftrace_ops control_ops;
115 
116 static void ftrace_ops_recurs_func(unsigned long ip, unsigned long parent_ip,
117 				   struct ftrace_ops *op, struct pt_regs *regs);
118 
119 #if ARCH_SUPPORTS_FTRACE_OPS
120 static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
121 				 struct ftrace_ops *op, struct pt_regs *regs);
122 #else
123 /* See comment below, where ftrace_ops_list_func is defined */
124 static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip);
125 #define ftrace_ops_list_func ((ftrace_func_t)ftrace_ops_no_ops)
126 #endif
127 
128 /*
129  * Traverse the ftrace_global_list, invoking all entries.  The reason that we
130  * can use rcu_dereference_raw_notrace() is that elements removed from this list
131  * are simply leaked, so there is no need to interact with a grace-period
132  * mechanism.  The rcu_dereference_raw_notrace() calls are needed to handle
133  * concurrent insertions into the ftrace_global_list.
134  *
135  * Silly Alpha and silly pointer-speculation compiler optimizations!
136  */
137 #define do_for_each_ftrace_op(op, list)			\
138 	op = rcu_dereference_raw_notrace(list);			\
139 	do
140 
141 /*
142  * Optimized for just a single item in the list (as that is the normal case).
143  */
144 #define while_for_each_ftrace_op(op)				\
145 	while (likely(op = rcu_dereference_raw_notrace((op)->next)) &&	\
146 	       unlikely((op) != &ftrace_list_end))
147 
148 static inline void ftrace_ops_init(struct ftrace_ops *ops)
149 {
150 #ifdef CONFIG_DYNAMIC_FTRACE
151 	if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) {
152 		mutex_init(&ops->local_hash.regex_lock);
153 		ops->func_hash = &ops->local_hash;
154 		ops->flags |= FTRACE_OPS_FL_INITIALIZED;
155 	}
156 #endif
157 }
158 
159 /**
160  * ftrace_nr_registered_ops - return number of ops registered
161  *
162  * Returns the number of ftrace_ops registered and tracing functions
163  */
164 int ftrace_nr_registered_ops(void)
165 {
166 	struct ftrace_ops *ops;
167 	int cnt = 0;
168 
169 	mutex_lock(&ftrace_lock);
170 
171 	for (ops = ftrace_ops_list;
172 	     ops != &ftrace_list_end; ops = ops->next)
173 		cnt++;
174 
175 	mutex_unlock(&ftrace_lock);
176 
177 	return cnt;
178 }
179 
180 static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
181 			    struct ftrace_ops *op, struct pt_regs *regs)
182 {
183 	if (!test_tsk_trace_trace(current))
184 		return;
185 
186 	ftrace_pid_function(ip, parent_ip, op, regs);
187 }
188 
189 static void set_ftrace_pid_function(ftrace_func_t func)
190 {
191 	/* do not set ftrace_pid_function to itself! */
192 	if (func != ftrace_pid_func)
193 		ftrace_pid_function = func;
194 }
195 
196 /**
197  * clear_ftrace_function - reset the ftrace function
198  *
199  * This NULLs the ftrace function and in essence stops
200  * tracing.  There may be lag
201  */
202 void clear_ftrace_function(void)
203 {
204 	ftrace_trace_function = ftrace_stub;
205 	ftrace_pid_function = ftrace_stub;
206 }
207 
208 static void control_ops_disable_all(struct ftrace_ops *ops)
209 {
210 	int cpu;
211 
212 	for_each_possible_cpu(cpu)
213 		*per_cpu_ptr(ops->disabled, cpu) = 1;
214 }
215 
216 static int control_ops_alloc(struct ftrace_ops *ops)
217 {
218 	int __percpu *disabled;
219 
220 	disabled = alloc_percpu(int);
221 	if (!disabled)
222 		return -ENOMEM;
223 
224 	ops->disabled = disabled;
225 	control_ops_disable_all(ops);
226 	return 0;
227 }
228 
229 static void ftrace_sync(struct work_struct *work)
230 {
231 	/*
232 	 * This function is just a stub to implement a hard force
233 	 * of synchronize_sched(). This requires synchronizing
234 	 * tasks even in userspace and idle.
235 	 *
236 	 * Yes, function tracing is rude.
237 	 */
238 }
239 
240 static void ftrace_sync_ipi(void *data)
241 {
242 	/* Probably not needed, but do it anyway */
243 	smp_rmb();
244 }
245 
246 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
247 static void update_function_graph_func(void);
248 #else
249 static inline void update_function_graph_func(void) { }
250 #endif
251 
252 static void update_ftrace_function(void)
253 {
254 	ftrace_func_t func;
255 
256 	/*
257 	 * Prepare the ftrace_ops that the arch callback will use.
258 	 * If there's only one ftrace_ops registered, the ftrace_ops_list
259 	 * will point to the ops we want.
260 	 */
261 	set_function_trace_op = ftrace_ops_list;
262 
263 	/* If there's no ftrace_ops registered, just call the stub function */
264 	if (ftrace_ops_list == &ftrace_list_end) {
265 		func = ftrace_stub;
266 
267 	/*
268 	 * If we are at the end of the list and this ops is
269 	 * recursion safe and not dynamic and the arch supports passing ops,
270 	 * then have the mcount trampoline call the function directly.
271 	 */
272 	} else if (ftrace_ops_list->next == &ftrace_list_end) {
273 		func = ftrace_ops_get_func(ftrace_ops_list);
274 
275 	} else {
276 		/* Just use the default ftrace_ops */
277 		set_function_trace_op = &ftrace_list_end;
278 		func = ftrace_ops_list_func;
279 	}
280 
281 	update_function_graph_func();
282 
283 	/* If there's no change, then do nothing more here */
284 	if (ftrace_trace_function == func)
285 		return;
286 
287 	/*
288 	 * If we are using the list function, it doesn't care
289 	 * about the function_trace_ops.
290 	 */
291 	if (func == ftrace_ops_list_func) {
292 		ftrace_trace_function = func;
293 		/*
294 		 * Don't even bother setting function_trace_ops,
295 		 * it would be racy to do so anyway.
296 		 */
297 		return;
298 	}
299 
300 #ifndef CONFIG_DYNAMIC_FTRACE
301 	/*
302 	 * For static tracing, we need to be a bit more careful.
303 	 * The function change takes affect immediately. Thus,
304 	 * we need to coorditate the setting of the function_trace_ops
305 	 * with the setting of the ftrace_trace_function.
306 	 *
307 	 * Set the function to the list ops, which will call the
308 	 * function we want, albeit indirectly, but it handles the
309 	 * ftrace_ops and doesn't depend on function_trace_op.
310 	 */
311 	ftrace_trace_function = ftrace_ops_list_func;
312 	/*
313 	 * Make sure all CPUs see this. Yes this is slow, but static
314 	 * tracing is slow and nasty to have enabled.
315 	 */
316 	schedule_on_each_cpu(ftrace_sync);
317 	/* Now all cpus are using the list ops. */
318 	function_trace_op = set_function_trace_op;
319 	/* Make sure the function_trace_op is visible on all CPUs */
320 	smp_wmb();
321 	/* Nasty way to force a rmb on all cpus */
322 	smp_call_function(ftrace_sync_ipi, NULL, 1);
323 	/* OK, we are all set to update the ftrace_trace_function now! */
324 #endif /* !CONFIG_DYNAMIC_FTRACE */
325 
326 	ftrace_trace_function = func;
327 }
328 
329 int using_ftrace_ops_list_func(void)
330 {
331 	return ftrace_trace_function == ftrace_ops_list_func;
332 }
333 
334 static void add_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
335 {
336 	ops->next = *list;
337 	/*
338 	 * We are entering ops into the list but another
339 	 * CPU might be walking that list. We need to make sure
340 	 * the ops->next pointer is valid before another CPU sees
341 	 * the ops pointer included into the list.
342 	 */
343 	rcu_assign_pointer(*list, ops);
344 }
345 
346 static int remove_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
347 {
348 	struct ftrace_ops **p;
349 
350 	/*
351 	 * If we are removing the last function, then simply point
352 	 * to the ftrace_stub.
353 	 */
354 	if (*list == ops && ops->next == &ftrace_list_end) {
355 		*list = &ftrace_list_end;
356 		return 0;
357 	}
358 
359 	for (p = list; *p != &ftrace_list_end; p = &(*p)->next)
360 		if (*p == ops)
361 			break;
362 
363 	if (*p != ops)
364 		return -1;
365 
366 	*p = (*p)->next;
367 	return 0;
368 }
369 
370 static void add_ftrace_list_ops(struct ftrace_ops **list,
371 				struct ftrace_ops *main_ops,
372 				struct ftrace_ops *ops)
373 {
374 	int first = *list == &ftrace_list_end;
375 	add_ftrace_ops(list, ops);
376 	if (first)
377 		add_ftrace_ops(&ftrace_ops_list, main_ops);
378 }
379 
380 static int remove_ftrace_list_ops(struct ftrace_ops **list,
381 				  struct ftrace_ops *main_ops,
382 				  struct ftrace_ops *ops)
383 {
384 	int ret = remove_ftrace_ops(list, ops);
385 	if (!ret && *list == &ftrace_list_end)
386 		ret = remove_ftrace_ops(&ftrace_ops_list, main_ops);
387 	return ret;
388 }
389 
390 static void ftrace_update_trampoline(struct ftrace_ops *ops);
391 
392 static int __register_ftrace_function(struct ftrace_ops *ops)
393 {
394 	if (ops->flags & FTRACE_OPS_FL_DELETED)
395 		return -EINVAL;
396 
397 	if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED))
398 		return -EBUSY;
399 
400 #ifndef CONFIG_DYNAMIC_FTRACE_WITH_REGS
401 	/*
402 	 * If the ftrace_ops specifies SAVE_REGS, then it only can be used
403 	 * if the arch supports it, or SAVE_REGS_IF_SUPPORTED is also set.
404 	 * Setting SAVE_REGS_IF_SUPPORTED makes SAVE_REGS irrelevant.
405 	 */
406 	if (ops->flags & FTRACE_OPS_FL_SAVE_REGS &&
407 	    !(ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED))
408 		return -EINVAL;
409 
410 	if (ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED)
411 		ops->flags |= FTRACE_OPS_FL_SAVE_REGS;
412 #endif
413 
414 	if (!core_kernel_data((unsigned long)ops))
415 		ops->flags |= FTRACE_OPS_FL_DYNAMIC;
416 
417 	if (ops->flags & FTRACE_OPS_FL_CONTROL) {
418 		if (control_ops_alloc(ops))
419 			return -ENOMEM;
420 		add_ftrace_list_ops(&ftrace_control_list, &control_ops, ops);
421 		/* The control_ops needs the trampoline update */
422 		ops = &control_ops;
423 	} else
424 		add_ftrace_ops(&ftrace_ops_list, ops);
425 
426 	ftrace_update_trampoline(ops);
427 
428 	if (ftrace_enabled)
429 		update_ftrace_function();
430 
431 	return 0;
432 }
433 
434 static int __unregister_ftrace_function(struct ftrace_ops *ops)
435 {
436 	int ret;
437 
438 	if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED)))
439 		return -EBUSY;
440 
441 	if (ops->flags & FTRACE_OPS_FL_CONTROL) {
442 		ret = remove_ftrace_list_ops(&ftrace_control_list,
443 					     &control_ops, ops);
444 	} else
445 		ret = remove_ftrace_ops(&ftrace_ops_list, ops);
446 
447 	if (ret < 0)
448 		return ret;
449 
450 	if (ftrace_enabled)
451 		update_ftrace_function();
452 
453 	return 0;
454 }
455 
456 static void ftrace_update_pid_func(void)
457 {
458 	/* Only do something if we are tracing something */
459 	if (ftrace_trace_function == ftrace_stub)
460 		return;
461 
462 	update_ftrace_function();
463 }
464 
465 #ifdef CONFIG_FUNCTION_PROFILER
466 struct ftrace_profile {
467 	struct hlist_node		node;
468 	unsigned long			ip;
469 	unsigned long			counter;
470 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
471 	unsigned long long		time;
472 	unsigned long long		time_squared;
473 #endif
474 };
475 
476 struct ftrace_profile_page {
477 	struct ftrace_profile_page	*next;
478 	unsigned long			index;
479 	struct ftrace_profile		records[];
480 };
481 
482 struct ftrace_profile_stat {
483 	atomic_t			disabled;
484 	struct hlist_head		*hash;
485 	struct ftrace_profile_page	*pages;
486 	struct ftrace_profile_page	*start;
487 	struct tracer_stat		stat;
488 };
489 
490 #define PROFILE_RECORDS_SIZE						\
491 	(PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
492 
493 #define PROFILES_PER_PAGE					\
494 	(PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
495 
496 static int ftrace_profile_enabled __read_mostly;
497 
498 /* ftrace_profile_lock - synchronize the enable and disable of the profiler */
499 static DEFINE_MUTEX(ftrace_profile_lock);
500 
501 static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats);
502 
503 #define FTRACE_PROFILE_HASH_BITS 10
504 #define FTRACE_PROFILE_HASH_SIZE (1 << FTRACE_PROFILE_HASH_BITS)
505 
506 static void *
507 function_stat_next(void *v, int idx)
508 {
509 	struct ftrace_profile *rec = v;
510 	struct ftrace_profile_page *pg;
511 
512 	pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
513 
514  again:
515 	if (idx != 0)
516 		rec++;
517 
518 	if ((void *)rec >= (void *)&pg->records[pg->index]) {
519 		pg = pg->next;
520 		if (!pg)
521 			return NULL;
522 		rec = &pg->records[0];
523 		if (!rec->counter)
524 			goto again;
525 	}
526 
527 	return rec;
528 }
529 
530 static void *function_stat_start(struct tracer_stat *trace)
531 {
532 	struct ftrace_profile_stat *stat =
533 		container_of(trace, struct ftrace_profile_stat, stat);
534 
535 	if (!stat || !stat->start)
536 		return NULL;
537 
538 	return function_stat_next(&stat->start->records[0], 0);
539 }
540 
541 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
542 /* function graph compares on total time */
543 static int function_stat_cmp(void *p1, void *p2)
544 {
545 	struct ftrace_profile *a = p1;
546 	struct ftrace_profile *b = p2;
547 
548 	if (a->time < b->time)
549 		return -1;
550 	if (a->time > b->time)
551 		return 1;
552 	else
553 		return 0;
554 }
555 #else
556 /* not function graph compares against hits */
557 static int function_stat_cmp(void *p1, void *p2)
558 {
559 	struct ftrace_profile *a = p1;
560 	struct ftrace_profile *b = p2;
561 
562 	if (a->counter < b->counter)
563 		return -1;
564 	if (a->counter > b->counter)
565 		return 1;
566 	else
567 		return 0;
568 }
569 #endif
570 
571 static int function_stat_headers(struct seq_file *m)
572 {
573 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
574 	seq_puts(m, "  Function                               "
575 		 "Hit    Time            Avg             s^2\n"
576 		    "  --------                               "
577 		 "---    ----            ---             ---\n");
578 #else
579 	seq_puts(m, "  Function                               Hit\n"
580 		    "  --------                               ---\n");
581 #endif
582 	return 0;
583 }
584 
585 static int function_stat_show(struct seq_file *m, void *v)
586 {
587 	struct ftrace_profile *rec = v;
588 	char str[KSYM_SYMBOL_LEN];
589 	int ret = 0;
590 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
591 	static struct trace_seq s;
592 	unsigned long long avg;
593 	unsigned long long stddev;
594 #endif
595 	mutex_lock(&ftrace_profile_lock);
596 
597 	/* we raced with function_profile_reset() */
598 	if (unlikely(rec->counter == 0)) {
599 		ret = -EBUSY;
600 		goto out;
601 	}
602 
603 	kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
604 	seq_printf(m, "  %-30.30s  %10lu", str, rec->counter);
605 
606 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
607 	seq_puts(m, "    ");
608 	avg = rec->time;
609 	do_div(avg, rec->counter);
610 
611 	/* Sample standard deviation (s^2) */
612 	if (rec->counter <= 1)
613 		stddev = 0;
614 	else {
615 		/*
616 		 * Apply Welford's method:
617 		 * s^2 = 1 / (n * (n-1)) * (n * \Sum (x_i)^2 - (\Sum x_i)^2)
618 		 */
619 		stddev = rec->counter * rec->time_squared -
620 			 rec->time * rec->time;
621 
622 		/*
623 		 * Divide only 1000 for ns^2 -> us^2 conversion.
624 		 * trace_print_graph_duration will divide 1000 again.
625 		 */
626 		do_div(stddev, rec->counter * (rec->counter - 1) * 1000);
627 	}
628 
629 	trace_seq_init(&s);
630 	trace_print_graph_duration(rec->time, &s);
631 	trace_seq_puts(&s, "    ");
632 	trace_print_graph_duration(avg, &s);
633 	trace_seq_puts(&s, "    ");
634 	trace_print_graph_duration(stddev, &s);
635 	trace_print_seq(m, &s);
636 #endif
637 	seq_putc(m, '\n');
638 out:
639 	mutex_unlock(&ftrace_profile_lock);
640 
641 	return ret;
642 }
643 
644 static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
645 {
646 	struct ftrace_profile_page *pg;
647 
648 	pg = stat->pages = stat->start;
649 
650 	while (pg) {
651 		memset(pg->records, 0, PROFILE_RECORDS_SIZE);
652 		pg->index = 0;
653 		pg = pg->next;
654 	}
655 
656 	memset(stat->hash, 0,
657 	       FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head));
658 }
659 
660 int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
661 {
662 	struct ftrace_profile_page *pg;
663 	int functions;
664 	int pages;
665 	int i;
666 
667 	/* If we already allocated, do nothing */
668 	if (stat->pages)
669 		return 0;
670 
671 	stat->pages = (void *)get_zeroed_page(GFP_KERNEL);
672 	if (!stat->pages)
673 		return -ENOMEM;
674 
675 #ifdef CONFIG_DYNAMIC_FTRACE
676 	functions = ftrace_update_tot_cnt;
677 #else
678 	/*
679 	 * We do not know the number of functions that exist because
680 	 * dynamic tracing is what counts them. With past experience
681 	 * we have around 20K functions. That should be more than enough.
682 	 * It is highly unlikely we will execute every function in
683 	 * the kernel.
684 	 */
685 	functions = 20000;
686 #endif
687 
688 	pg = stat->start = stat->pages;
689 
690 	pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
691 
692 	for (i = 1; i < pages; i++) {
693 		pg->next = (void *)get_zeroed_page(GFP_KERNEL);
694 		if (!pg->next)
695 			goto out_free;
696 		pg = pg->next;
697 	}
698 
699 	return 0;
700 
701  out_free:
702 	pg = stat->start;
703 	while (pg) {
704 		unsigned long tmp = (unsigned long)pg;
705 
706 		pg = pg->next;
707 		free_page(tmp);
708 	}
709 
710 	stat->pages = NULL;
711 	stat->start = NULL;
712 
713 	return -ENOMEM;
714 }
715 
716 static int ftrace_profile_init_cpu(int cpu)
717 {
718 	struct ftrace_profile_stat *stat;
719 	int size;
720 
721 	stat = &per_cpu(ftrace_profile_stats, cpu);
722 
723 	if (stat->hash) {
724 		/* If the profile is already created, simply reset it */
725 		ftrace_profile_reset(stat);
726 		return 0;
727 	}
728 
729 	/*
730 	 * We are profiling all functions, but usually only a few thousand
731 	 * functions are hit. We'll make a hash of 1024 items.
732 	 */
733 	size = FTRACE_PROFILE_HASH_SIZE;
734 
735 	stat->hash = kzalloc(sizeof(struct hlist_head) * size, GFP_KERNEL);
736 
737 	if (!stat->hash)
738 		return -ENOMEM;
739 
740 	/* Preallocate the function profiling pages */
741 	if (ftrace_profile_pages_init(stat) < 0) {
742 		kfree(stat->hash);
743 		stat->hash = NULL;
744 		return -ENOMEM;
745 	}
746 
747 	return 0;
748 }
749 
750 static int ftrace_profile_init(void)
751 {
752 	int cpu;
753 	int ret = 0;
754 
755 	for_each_possible_cpu(cpu) {
756 		ret = ftrace_profile_init_cpu(cpu);
757 		if (ret)
758 			break;
759 	}
760 
761 	return ret;
762 }
763 
764 /* interrupts must be disabled */
765 static struct ftrace_profile *
766 ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
767 {
768 	struct ftrace_profile *rec;
769 	struct hlist_head *hhd;
770 	unsigned long key;
771 
772 	key = hash_long(ip, FTRACE_PROFILE_HASH_BITS);
773 	hhd = &stat->hash[key];
774 
775 	if (hlist_empty(hhd))
776 		return NULL;
777 
778 	hlist_for_each_entry_rcu_notrace(rec, hhd, node) {
779 		if (rec->ip == ip)
780 			return rec;
781 	}
782 
783 	return NULL;
784 }
785 
786 static void ftrace_add_profile(struct ftrace_profile_stat *stat,
787 			       struct ftrace_profile *rec)
788 {
789 	unsigned long key;
790 
791 	key = hash_long(rec->ip, FTRACE_PROFILE_HASH_BITS);
792 	hlist_add_head_rcu(&rec->node, &stat->hash[key]);
793 }
794 
795 /*
796  * The memory is already allocated, this simply finds a new record to use.
797  */
798 static struct ftrace_profile *
799 ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
800 {
801 	struct ftrace_profile *rec = NULL;
802 
803 	/* prevent recursion (from NMIs) */
804 	if (atomic_inc_return(&stat->disabled) != 1)
805 		goto out;
806 
807 	/*
808 	 * Try to find the function again since an NMI
809 	 * could have added it
810 	 */
811 	rec = ftrace_find_profiled_func(stat, ip);
812 	if (rec)
813 		goto out;
814 
815 	if (stat->pages->index == PROFILES_PER_PAGE) {
816 		if (!stat->pages->next)
817 			goto out;
818 		stat->pages = stat->pages->next;
819 	}
820 
821 	rec = &stat->pages->records[stat->pages->index++];
822 	rec->ip = ip;
823 	ftrace_add_profile(stat, rec);
824 
825  out:
826 	atomic_dec(&stat->disabled);
827 
828 	return rec;
829 }
830 
831 static void
832 function_profile_call(unsigned long ip, unsigned long parent_ip,
833 		      struct ftrace_ops *ops, struct pt_regs *regs)
834 {
835 	struct ftrace_profile_stat *stat;
836 	struct ftrace_profile *rec;
837 	unsigned long flags;
838 
839 	if (!ftrace_profile_enabled)
840 		return;
841 
842 	local_irq_save(flags);
843 
844 	stat = this_cpu_ptr(&ftrace_profile_stats);
845 	if (!stat->hash || !ftrace_profile_enabled)
846 		goto out;
847 
848 	rec = ftrace_find_profiled_func(stat, ip);
849 	if (!rec) {
850 		rec = ftrace_profile_alloc(stat, ip);
851 		if (!rec)
852 			goto out;
853 	}
854 
855 	rec->counter++;
856  out:
857 	local_irq_restore(flags);
858 }
859 
860 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
861 static int profile_graph_entry(struct ftrace_graph_ent *trace)
862 {
863 	function_profile_call(trace->func, 0, NULL, NULL);
864 	return 1;
865 }
866 
867 static void profile_graph_return(struct ftrace_graph_ret *trace)
868 {
869 	struct ftrace_profile_stat *stat;
870 	unsigned long long calltime;
871 	struct ftrace_profile *rec;
872 	unsigned long flags;
873 
874 	local_irq_save(flags);
875 	stat = this_cpu_ptr(&ftrace_profile_stats);
876 	if (!stat->hash || !ftrace_profile_enabled)
877 		goto out;
878 
879 	/* If the calltime was zero'd ignore it */
880 	if (!trace->calltime)
881 		goto out;
882 
883 	calltime = trace->rettime - trace->calltime;
884 
885 	if (!(trace_flags & TRACE_ITER_GRAPH_TIME)) {
886 		int index;
887 
888 		index = trace->depth;
889 
890 		/* Append this call time to the parent time to subtract */
891 		if (index)
892 			current->ret_stack[index - 1].subtime += calltime;
893 
894 		if (current->ret_stack[index].subtime < calltime)
895 			calltime -= current->ret_stack[index].subtime;
896 		else
897 			calltime = 0;
898 	}
899 
900 	rec = ftrace_find_profiled_func(stat, trace->func);
901 	if (rec) {
902 		rec->time += calltime;
903 		rec->time_squared += calltime * calltime;
904 	}
905 
906  out:
907 	local_irq_restore(flags);
908 }
909 
910 static int register_ftrace_profiler(void)
911 {
912 	return register_ftrace_graph(&profile_graph_return,
913 				     &profile_graph_entry);
914 }
915 
916 static void unregister_ftrace_profiler(void)
917 {
918 	unregister_ftrace_graph();
919 }
920 #else
921 static struct ftrace_ops ftrace_profile_ops __read_mostly = {
922 	.func		= function_profile_call,
923 	.flags		= FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
924 	INIT_OPS_HASH(ftrace_profile_ops)
925 };
926 
927 static int register_ftrace_profiler(void)
928 {
929 	return register_ftrace_function(&ftrace_profile_ops);
930 }
931 
932 static void unregister_ftrace_profiler(void)
933 {
934 	unregister_ftrace_function(&ftrace_profile_ops);
935 }
936 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
937 
938 static ssize_t
939 ftrace_profile_write(struct file *filp, const char __user *ubuf,
940 		     size_t cnt, loff_t *ppos)
941 {
942 	unsigned long val;
943 	int ret;
944 
945 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
946 	if (ret)
947 		return ret;
948 
949 	val = !!val;
950 
951 	mutex_lock(&ftrace_profile_lock);
952 	if (ftrace_profile_enabled ^ val) {
953 		if (val) {
954 			ret = ftrace_profile_init();
955 			if (ret < 0) {
956 				cnt = ret;
957 				goto out;
958 			}
959 
960 			ret = register_ftrace_profiler();
961 			if (ret < 0) {
962 				cnt = ret;
963 				goto out;
964 			}
965 			ftrace_profile_enabled = 1;
966 		} else {
967 			ftrace_profile_enabled = 0;
968 			/*
969 			 * unregister_ftrace_profiler calls stop_machine
970 			 * so this acts like an synchronize_sched.
971 			 */
972 			unregister_ftrace_profiler();
973 		}
974 	}
975  out:
976 	mutex_unlock(&ftrace_profile_lock);
977 
978 	*ppos += cnt;
979 
980 	return cnt;
981 }
982 
983 static ssize_t
984 ftrace_profile_read(struct file *filp, char __user *ubuf,
985 		     size_t cnt, loff_t *ppos)
986 {
987 	char buf[64];		/* big enough to hold a number */
988 	int r;
989 
990 	r = sprintf(buf, "%u\n", ftrace_profile_enabled);
991 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
992 }
993 
994 static const struct file_operations ftrace_profile_fops = {
995 	.open		= tracing_open_generic,
996 	.read		= ftrace_profile_read,
997 	.write		= ftrace_profile_write,
998 	.llseek		= default_llseek,
999 };
1000 
1001 /* used to initialize the real stat files */
1002 static struct tracer_stat function_stats __initdata = {
1003 	.name		= "functions",
1004 	.stat_start	= function_stat_start,
1005 	.stat_next	= function_stat_next,
1006 	.stat_cmp	= function_stat_cmp,
1007 	.stat_headers	= function_stat_headers,
1008 	.stat_show	= function_stat_show
1009 };
1010 
1011 static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
1012 {
1013 	struct ftrace_profile_stat *stat;
1014 	struct dentry *entry;
1015 	char *name;
1016 	int ret;
1017 	int cpu;
1018 
1019 	for_each_possible_cpu(cpu) {
1020 		stat = &per_cpu(ftrace_profile_stats, cpu);
1021 
1022 		/* allocate enough for function name + cpu number */
1023 		name = kmalloc(32, GFP_KERNEL);
1024 		if (!name) {
1025 			/*
1026 			 * The files created are permanent, if something happens
1027 			 * we still do not free memory.
1028 			 */
1029 			WARN(1,
1030 			     "Could not allocate stat file for cpu %d\n",
1031 			     cpu);
1032 			return;
1033 		}
1034 		stat->stat = function_stats;
1035 		snprintf(name, 32, "function%d", cpu);
1036 		stat->stat.name = name;
1037 		ret = register_stat_tracer(&stat->stat);
1038 		if (ret) {
1039 			WARN(1,
1040 			     "Could not register function stat for cpu %d\n",
1041 			     cpu);
1042 			kfree(name);
1043 			return;
1044 		}
1045 	}
1046 
1047 	entry = debugfs_create_file("function_profile_enabled", 0644,
1048 				    d_tracer, NULL, &ftrace_profile_fops);
1049 	if (!entry)
1050 		pr_warning("Could not create debugfs "
1051 			   "'function_profile_enabled' entry\n");
1052 }
1053 
1054 #else /* CONFIG_FUNCTION_PROFILER */
1055 static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
1056 {
1057 }
1058 #endif /* CONFIG_FUNCTION_PROFILER */
1059 
1060 static struct pid * const ftrace_swapper_pid = &init_struct_pid;
1061 
1062 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1063 static int ftrace_graph_active;
1064 #else
1065 # define ftrace_graph_active 0
1066 #endif
1067 
1068 #ifdef CONFIG_DYNAMIC_FTRACE
1069 
1070 static struct ftrace_ops *removed_ops;
1071 
1072 /*
1073  * Set when doing a global update, like enabling all recs or disabling them.
1074  * It is not set when just updating a single ftrace_ops.
1075  */
1076 static bool update_all_ops;
1077 
1078 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
1079 # error Dynamic ftrace depends on MCOUNT_RECORD
1080 #endif
1081 
1082 static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly;
1083 
1084 struct ftrace_func_probe {
1085 	struct hlist_node	node;
1086 	struct ftrace_probe_ops	*ops;
1087 	unsigned long		flags;
1088 	unsigned long		ip;
1089 	void			*data;
1090 	struct list_head	free_list;
1091 };
1092 
1093 struct ftrace_func_entry {
1094 	struct hlist_node hlist;
1095 	unsigned long ip;
1096 };
1097 
1098 struct ftrace_hash {
1099 	unsigned long		size_bits;
1100 	struct hlist_head	*buckets;
1101 	unsigned long		count;
1102 	struct rcu_head		rcu;
1103 };
1104 
1105 /*
1106  * We make these constant because no one should touch them,
1107  * but they are used as the default "empty hash", to avoid allocating
1108  * it all the time. These are in a read only section such that if
1109  * anyone does try to modify it, it will cause an exception.
1110  */
1111 static const struct hlist_head empty_buckets[1];
1112 static const struct ftrace_hash empty_hash = {
1113 	.buckets = (struct hlist_head *)empty_buckets,
1114 };
1115 #define EMPTY_HASH	((struct ftrace_hash *)&empty_hash)
1116 
1117 static struct ftrace_ops global_ops = {
1118 	.func				= ftrace_stub,
1119 	.local_hash.notrace_hash	= EMPTY_HASH,
1120 	.local_hash.filter_hash		= EMPTY_HASH,
1121 	INIT_OPS_HASH(global_ops)
1122 	.flags				= FTRACE_OPS_FL_RECURSION_SAFE |
1123 					  FTRACE_OPS_FL_INITIALIZED,
1124 };
1125 
1126 /*
1127  * This is used by __kernel_text_address() to return true if the
1128  * address is on a dynamically allocated trampoline that would
1129  * not return true for either core_kernel_text() or
1130  * is_module_text_address().
1131  */
1132 bool is_ftrace_trampoline(unsigned long addr)
1133 {
1134 	struct ftrace_ops *op;
1135 	bool ret = false;
1136 
1137 	/*
1138 	 * Some of the ops may be dynamically allocated,
1139 	 * they are freed after a synchronize_sched().
1140 	 */
1141 	preempt_disable_notrace();
1142 
1143 	do_for_each_ftrace_op(op, ftrace_ops_list) {
1144 		/*
1145 		 * This is to check for dynamically allocated trampolines.
1146 		 * Trampolines that are in kernel text will have
1147 		 * core_kernel_text() return true.
1148 		 */
1149 		if (op->trampoline && op->trampoline_size)
1150 			if (addr >= op->trampoline &&
1151 			    addr < op->trampoline + op->trampoline_size) {
1152 				ret = true;
1153 				goto out;
1154 			}
1155 	} while_for_each_ftrace_op(op);
1156 
1157  out:
1158 	preempt_enable_notrace();
1159 
1160 	return ret;
1161 }
1162 
1163 struct ftrace_page {
1164 	struct ftrace_page	*next;
1165 	struct dyn_ftrace	*records;
1166 	int			index;
1167 	int			size;
1168 };
1169 
1170 #define ENTRY_SIZE sizeof(struct dyn_ftrace)
1171 #define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE)
1172 
1173 /* estimate from running different kernels */
1174 #define NR_TO_INIT		10000
1175 
1176 static struct ftrace_page	*ftrace_pages_start;
1177 static struct ftrace_page	*ftrace_pages;
1178 
1179 static bool __always_inline ftrace_hash_empty(struct ftrace_hash *hash)
1180 {
1181 	return !hash || !hash->count;
1182 }
1183 
1184 static struct ftrace_func_entry *
1185 ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
1186 {
1187 	unsigned long key;
1188 	struct ftrace_func_entry *entry;
1189 	struct hlist_head *hhd;
1190 
1191 	if (ftrace_hash_empty(hash))
1192 		return NULL;
1193 
1194 	if (hash->size_bits > 0)
1195 		key = hash_long(ip, hash->size_bits);
1196 	else
1197 		key = 0;
1198 
1199 	hhd = &hash->buckets[key];
1200 
1201 	hlist_for_each_entry_rcu_notrace(entry, hhd, hlist) {
1202 		if (entry->ip == ip)
1203 			return entry;
1204 	}
1205 	return NULL;
1206 }
1207 
1208 static void __add_hash_entry(struct ftrace_hash *hash,
1209 			     struct ftrace_func_entry *entry)
1210 {
1211 	struct hlist_head *hhd;
1212 	unsigned long key;
1213 
1214 	if (hash->size_bits)
1215 		key = hash_long(entry->ip, hash->size_bits);
1216 	else
1217 		key = 0;
1218 
1219 	hhd = &hash->buckets[key];
1220 	hlist_add_head(&entry->hlist, hhd);
1221 	hash->count++;
1222 }
1223 
1224 static int add_hash_entry(struct ftrace_hash *hash, unsigned long ip)
1225 {
1226 	struct ftrace_func_entry *entry;
1227 
1228 	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1229 	if (!entry)
1230 		return -ENOMEM;
1231 
1232 	entry->ip = ip;
1233 	__add_hash_entry(hash, entry);
1234 
1235 	return 0;
1236 }
1237 
1238 static void
1239 free_hash_entry(struct ftrace_hash *hash,
1240 		  struct ftrace_func_entry *entry)
1241 {
1242 	hlist_del(&entry->hlist);
1243 	kfree(entry);
1244 	hash->count--;
1245 }
1246 
1247 static void
1248 remove_hash_entry(struct ftrace_hash *hash,
1249 		  struct ftrace_func_entry *entry)
1250 {
1251 	hlist_del(&entry->hlist);
1252 	hash->count--;
1253 }
1254 
1255 static void ftrace_hash_clear(struct ftrace_hash *hash)
1256 {
1257 	struct hlist_head *hhd;
1258 	struct hlist_node *tn;
1259 	struct ftrace_func_entry *entry;
1260 	int size = 1 << hash->size_bits;
1261 	int i;
1262 
1263 	if (!hash->count)
1264 		return;
1265 
1266 	for (i = 0; i < size; i++) {
1267 		hhd = &hash->buckets[i];
1268 		hlist_for_each_entry_safe(entry, tn, hhd, hlist)
1269 			free_hash_entry(hash, entry);
1270 	}
1271 	FTRACE_WARN_ON(hash->count);
1272 }
1273 
1274 static void free_ftrace_hash(struct ftrace_hash *hash)
1275 {
1276 	if (!hash || hash == EMPTY_HASH)
1277 		return;
1278 	ftrace_hash_clear(hash);
1279 	kfree(hash->buckets);
1280 	kfree(hash);
1281 }
1282 
1283 static void __free_ftrace_hash_rcu(struct rcu_head *rcu)
1284 {
1285 	struct ftrace_hash *hash;
1286 
1287 	hash = container_of(rcu, struct ftrace_hash, rcu);
1288 	free_ftrace_hash(hash);
1289 }
1290 
1291 static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
1292 {
1293 	if (!hash || hash == EMPTY_HASH)
1294 		return;
1295 	call_rcu_sched(&hash->rcu, __free_ftrace_hash_rcu);
1296 }
1297 
1298 void ftrace_free_filter(struct ftrace_ops *ops)
1299 {
1300 	ftrace_ops_init(ops);
1301 	free_ftrace_hash(ops->func_hash->filter_hash);
1302 	free_ftrace_hash(ops->func_hash->notrace_hash);
1303 }
1304 
1305 static struct ftrace_hash *alloc_ftrace_hash(int size_bits)
1306 {
1307 	struct ftrace_hash *hash;
1308 	int size;
1309 
1310 	hash = kzalloc(sizeof(*hash), GFP_KERNEL);
1311 	if (!hash)
1312 		return NULL;
1313 
1314 	size = 1 << size_bits;
1315 	hash->buckets = kcalloc(size, sizeof(*hash->buckets), GFP_KERNEL);
1316 
1317 	if (!hash->buckets) {
1318 		kfree(hash);
1319 		return NULL;
1320 	}
1321 
1322 	hash->size_bits = size_bits;
1323 
1324 	return hash;
1325 }
1326 
1327 static struct ftrace_hash *
1328 alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
1329 {
1330 	struct ftrace_func_entry *entry;
1331 	struct ftrace_hash *new_hash;
1332 	int size;
1333 	int ret;
1334 	int i;
1335 
1336 	new_hash = alloc_ftrace_hash(size_bits);
1337 	if (!new_hash)
1338 		return NULL;
1339 
1340 	/* Empty hash? */
1341 	if (ftrace_hash_empty(hash))
1342 		return new_hash;
1343 
1344 	size = 1 << hash->size_bits;
1345 	for (i = 0; i < size; i++) {
1346 		hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
1347 			ret = add_hash_entry(new_hash, entry->ip);
1348 			if (ret < 0)
1349 				goto free_hash;
1350 		}
1351 	}
1352 
1353 	FTRACE_WARN_ON(new_hash->count != hash->count);
1354 
1355 	return new_hash;
1356 
1357  free_hash:
1358 	free_ftrace_hash(new_hash);
1359 	return NULL;
1360 }
1361 
1362 static void
1363 ftrace_hash_rec_disable_modify(struct ftrace_ops *ops, int filter_hash);
1364 static void
1365 ftrace_hash_rec_enable_modify(struct ftrace_ops *ops, int filter_hash);
1366 
1367 static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops,
1368 				       struct ftrace_hash *new_hash);
1369 
1370 static int
1371 ftrace_hash_move(struct ftrace_ops *ops, int enable,
1372 		 struct ftrace_hash **dst, struct ftrace_hash *src)
1373 {
1374 	struct ftrace_func_entry *entry;
1375 	struct hlist_node *tn;
1376 	struct hlist_head *hhd;
1377 	struct ftrace_hash *new_hash;
1378 	int size = src->count;
1379 	int bits = 0;
1380 	int ret;
1381 	int i;
1382 
1383 	/* Reject setting notrace hash on IPMODIFY ftrace_ops */
1384 	if (ops->flags & FTRACE_OPS_FL_IPMODIFY && !enable)
1385 		return -EINVAL;
1386 
1387 	/*
1388 	 * If the new source is empty, just free dst and assign it
1389 	 * the empty_hash.
1390 	 */
1391 	if (!src->count) {
1392 		new_hash = EMPTY_HASH;
1393 		goto update;
1394 	}
1395 
1396 	/*
1397 	 * Make the hash size about 1/2 the # found
1398 	 */
1399 	for (size /= 2; size; size >>= 1)
1400 		bits++;
1401 
1402 	/* Don't allocate too much */
1403 	if (bits > FTRACE_HASH_MAX_BITS)
1404 		bits = FTRACE_HASH_MAX_BITS;
1405 
1406 	new_hash = alloc_ftrace_hash(bits);
1407 	if (!new_hash)
1408 		return -ENOMEM;
1409 
1410 	size = 1 << src->size_bits;
1411 	for (i = 0; i < size; i++) {
1412 		hhd = &src->buckets[i];
1413 		hlist_for_each_entry_safe(entry, tn, hhd, hlist) {
1414 			remove_hash_entry(src, entry);
1415 			__add_hash_entry(new_hash, entry);
1416 		}
1417 	}
1418 
1419 update:
1420 	/* Make sure this can be applied if it is IPMODIFY ftrace_ops */
1421 	if (enable) {
1422 		/* IPMODIFY should be updated only when filter_hash updating */
1423 		ret = ftrace_hash_ipmodify_update(ops, new_hash);
1424 		if (ret < 0) {
1425 			free_ftrace_hash(new_hash);
1426 			return ret;
1427 		}
1428 	}
1429 
1430 	/*
1431 	 * Remove the current set, update the hash and add
1432 	 * them back.
1433 	 */
1434 	ftrace_hash_rec_disable_modify(ops, enable);
1435 
1436 	rcu_assign_pointer(*dst, new_hash);
1437 
1438 	ftrace_hash_rec_enable_modify(ops, enable);
1439 
1440 	return 0;
1441 }
1442 
1443 static bool hash_contains_ip(unsigned long ip,
1444 			     struct ftrace_ops_hash *hash)
1445 {
1446 	/*
1447 	 * The function record is a match if it exists in the filter
1448 	 * hash and not in the notrace hash. Note, an emty hash is
1449 	 * considered a match for the filter hash, but an empty
1450 	 * notrace hash is considered not in the notrace hash.
1451 	 */
1452 	return (ftrace_hash_empty(hash->filter_hash) ||
1453 		ftrace_lookup_ip(hash->filter_hash, ip)) &&
1454 		(ftrace_hash_empty(hash->notrace_hash) ||
1455 		 !ftrace_lookup_ip(hash->notrace_hash, ip));
1456 }
1457 
1458 /*
1459  * Test the hashes for this ops to see if we want to call
1460  * the ops->func or not.
1461  *
1462  * It's a match if the ip is in the ops->filter_hash or
1463  * the filter_hash does not exist or is empty,
1464  *  AND
1465  * the ip is not in the ops->notrace_hash.
1466  *
1467  * This needs to be called with preemption disabled as
1468  * the hashes are freed with call_rcu_sched().
1469  */
1470 static int
1471 ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
1472 {
1473 	struct ftrace_ops_hash hash;
1474 	int ret;
1475 
1476 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
1477 	/*
1478 	 * There's a small race when adding ops that the ftrace handler
1479 	 * that wants regs, may be called without them. We can not
1480 	 * allow that handler to be called if regs is NULL.
1481 	 */
1482 	if (regs == NULL && (ops->flags & FTRACE_OPS_FL_SAVE_REGS))
1483 		return 0;
1484 #endif
1485 
1486 	hash.filter_hash = rcu_dereference_raw_notrace(ops->func_hash->filter_hash);
1487 	hash.notrace_hash = rcu_dereference_raw_notrace(ops->func_hash->notrace_hash);
1488 
1489 	if (hash_contains_ip(ip, &hash))
1490 		ret = 1;
1491 	else
1492 		ret = 0;
1493 
1494 	return ret;
1495 }
1496 
1497 /*
1498  * This is a double for. Do not use 'break' to break out of the loop,
1499  * you must use a goto.
1500  */
1501 #define do_for_each_ftrace_rec(pg, rec)					\
1502 	for (pg = ftrace_pages_start; pg; pg = pg->next) {		\
1503 		int _____i;						\
1504 		for (_____i = 0; _____i < pg->index; _____i++) {	\
1505 			rec = &pg->records[_____i];
1506 
1507 #define while_for_each_ftrace_rec()		\
1508 		}				\
1509 	}
1510 
1511 
1512 static int ftrace_cmp_recs(const void *a, const void *b)
1513 {
1514 	const struct dyn_ftrace *key = a;
1515 	const struct dyn_ftrace *rec = b;
1516 
1517 	if (key->flags < rec->ip)
1518 		return -1;
1519 	if (key->ip >= rec->ip + MCOUNT_INSN_SIZE)
1520 		return 1;
1521 	return 0;
1522 }
1523 
1524 static unsigned long ftrace_location_range(unsigned long start, unsigned long end)
1525 {
1526 	struct ftrace_page *pg;
1527 	struct dyn_ftrace *rec;
1528 	struct dyn_ftrace key;
1529 
1530 	key.ip = start;
1531 	key.flags = end;	/* overload flags, as it is unsigned long */
1532 
1533 	for (pg = ftrace_pages_start; pg; pg = pg->next) {
1534 		if (end < pg->records[0].ip ||
1535 		    start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
1536 			continue;
1537 		rec = bsearch(&key, pg->records, pg->index,
1538 			      sizeof(struct dyn_ftrace),
1539 			      ftrace_cmp_recs);
1540 		if (rec)
1541 			return rec->ip;
1542 	}
1543 
1544 	return 0;
1545 }
1546 
1547 /**
1548  * ftrace_location - return true if the ip giving is a traced location
1549  * @ip: the instruction pointer to check
1550  *
1551  * Returns rec->ip if @ip given is a pointer to a ftrace location.
1552  * That is, the instruction that is either a NOP or call to
1553  * the function tracer. It checks the ftrace internal tables to
1554  * determine if the address belongs or not.
1555  */
1556 unsigned long ftrace_location(unsigned long ip)
1557 {
1558 	return ftrace_location_range(ip, ip);
1559 }
1560 
1561 /**
1562  * ftrace_text_reserved - return true if range contains an ftrace location
1563  * @start: start of range to search
1564  * @end: end of range to search (inclusive). @end points to the last byte to check.
1565  *
1566  * Returns 1 if @start and @end contains a ftrace location.
1567  * That is, the instruction that is either a NOP or call to
1568  * the function tracer. It checks the ftrace internal tables to
1569  * determine if the address belongs or not.
1570  */
1571 int ftrace_text_reserved(const void *start, const void *end)
1572 {
1573 	unsigned long ret;
1574 
1575 	ret = ftrace_location_range((unsigned long)start,
1576 				    (unsigned long)end);
1577 
1578 	return (int)!!ret;
1579 }
1580 
1581 /* Test if ops registered to this rec needs regs */
1582 static bool test_rec_ops_needs_regs(struct dyn_ftrace *rec)
1583 {
1584 	struct ftrace_ops *ops;
1585 	bool keep_regs = false;
1586 
1587 	for (ops = ftrace_ops_list;
1588 	     ops != &ftrace_list_end; ops = ops->next) {
1589 		/* pass rec in as regs to have non-NULL val */
1590 		if (ftrace_ops_test(ops, rec->ip, rec)) {
1591 			if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
1592 				keep_regs = true;
1593 				break;
1594 			}
1595 		}
1596 	}
1597 
1598 	return  keep_regs;
1599 }
1600 
1601 static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
1602 				     int filter_hash,
1603 				     bool inc)
1604 {
1605 	struct ftrace_hash *hash;
1606 	struct ftrace_hash *other_hash;
1607 	struct ftrace_page *pg;
1608 	struct dyn_ftrace *rec;
1609 	int count = 0;
1610 	int all = 0;
1611 
1612 	/* Only update if the ops has been registered */
1613 	if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
1614 		return;
1615 
1616 	/*
1617 	 * In the filter_hash case:
1618 	 *   If the count is zero, we update all records.
1619 	 *   Otherwise we just update the items in the hash.
1620 	 *
1621 	 * In the notrace_hash case:
1622 	 *   We enable the update in the hash.
1623 	 *   As disabling notrace means enabling the tracing,
1624 	 *   and enabling notrace means disabling, the inc variable
1625 	 *   gets inversed.
1626 	 */
1627 	if (filter_hash) {
1628 		hash = ops->func_hash->filter_hash;
1629 		other_hash = ops->func_hash->notrace_hash;
1630 		if (ftrace_hash_empty(hash))
1631 			all = 1;
1632 	} else {
1633 		inc = !inc;
1634 		hash = ops->func_hash->notrace_hash;
1635 		other_hash = ops->func_hash->filter_hash;
1636 		/*
1637 		 * If the notrace hash has no items,
1638 		 * then there's nothing to do.
1639 		 */
1640 		if (ftrace_hash_empty(hash))
1641 			return;
1642 	}
1643 
1644 	do_for_each_ftrace_rec(pg, rec) {
1645 		int in_other_hash = 0;
1646 		int in_hash = 0;
1647 		int match = 0;
1648 
1649 		if (all) {
1650 			/*
1651 			 * Only the filter_hash affects all records.
1652 			 * Update if the record is not in the notrace hash.
1653 			 */
1654 			if (!other_hash || !ftrace_lookup_ip(other_hash, rec->ip))
1655 				match = 1;
1656 		} else {
1657 			in_hash = !!ftrace_lookup_ip(hash, rec->ip);
1658 			in_other_hash = !!ftrace_lookup_ip(other_hash, rec->ip);
1659 
1660 			/*
1661 			 * If filter_hash is set, we want to match all functions
1662 			 * that are in the hash but not in the other hash.
1663 			 *
1664 			 * If filter_hash is not set, then we are decrementing.
1665 			 * That means we match anything that is in the hash
1666 			 * and also in the other_hash. That is, we need to turn
1667 			 * off functions in the other hash because they are disabled
1668 			 * by this hash.
1669 			 */
1670 			if (filter_hash && in_hash && !in_other_hash)
1671 				match = 1;
1672 			else if (!filter_hash && in_hash &&
1673 				 (in_other_hash || ftrace_hash_empty(other_hash)))
1674 				match = 1;
1675 		}
1676 		if (!match)
1677 			continue;
1678 
1679 		if (inc) {
1680 			rec->flags++;
1681 			if (FTRACE_WARN_ON(ftrace_rec_count(rec) == FTRACE_REF_MAX))
1682 				return;
1683 
1684 			/*
1685 			 * If there's only a single callback registered to a
1686 			 * function, and the ops has a trampoline registered
1687 			 * for it, then we can call it directly.
1688 			 */
1689 			if (ftrace_rec_count(rec) == 1 && ops->trampoline)
1690 				rec->flags |= FTRACE_FL_TRAMP;
1691 			else
1692 				/*
1693 				 * If we are adding another function callback
1694 				 * to this function, and the previous had a
1695 				 * custom trampoline in use, then we need to go
1696 				 * back to the default trampoline.
1697 				 */
1698 				rec->flags &= ~FTRACE_FL_TRAMP;
1699 
1700 			/*
1701 			 * If any ops wants regs saved for this function
1702 			 * then all ops will get saved regs.
1703 			 */
1704 			if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
1705 				rec->flags |= FTRACE_FL_REGS;
1706 		} else {
1707 			if (FTRACE_WARN_ON(ftrace_rec_count(rec) == 0))
1708 				return;
1709 			rec->flags--;
1710 
1711 			/*
1712 			 * If the rec had REGS enabled and the ops that is
1713 			 * being removed had REGS set, then see if there is
1714 			 * still any ops for this record that wants regs.
1715 			 * If not, we can stop recording them.
1716 			 */
1717 			if (ftrace_rec_count(rec) > 0 &&
1718 			    rec->flags & FTRACE_FL_REGS &&
1719 			    ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
1720 				if (!test_rec_ops_needs_regs(rec))
1721 					rec->flags &= ~FTRACE_FL_REGS;
1722 			}
1723 
1724 			/*
1725 			 * If the rec had TRAMP enabled, then it needs to
1726 			 * be cleared. As TRAMP can only be enabled iff
1727 			 * there is only a single ops attached to it.
1728 			 * In otherwords, always disable it on decrementing.
1729 			 * In the future, we may set it if rec count is
1730 			 * decremented to one, and the ops that is left
1731 			 * has a trampoline.
1732 			 */
1733 			rec->flags &= ~FTRACE_FL_TRAMP;
1734 
1735 			/*
1736 			 * flags will be cleared in ftrace_check_record()
1737 			 * if rec count is zero.
1738 			 */
1739 		}
1740 		count++;
1741 		/* Shortcut, if we handled all records, we are done. */
1742 		if (!all && count == hash->count)
1743 			return;
1744 	} while_for_each_ftrace_rec();
1745 }
1746 
1747 static void ftrace_hash_rec_disable(struct ftrace_ops *ops,
1748 				    int filter_hash)
1749 {
1750 	__ftrace_hash_rec_update(ops, filter_hash, 0);
1751 }
1752 
1753 static void ftrace_hash_rec_enable(struct ftrace_ops *ops,
1754 				   int filter_hash)
1755 {
1756 	__ftrace_hash_rec_update(ops, filter_hash, 1);
1757 }
1758 
1759 static void ftrace_hash_rec_update_modify(struct ftrace_ops *ops,
1760 					  int filter_hash, int inc)
1761 {
1762 	struct ftrace_ops *op;
1763 
1764 	__ftrace_hash_rec_update(ops, filter_hash, inc);
1765 
1766 	if (ops->func_hash != &global_ops.local_hash)
1767 		return;
1768 
1769 	/*
1770 	 * If the ops shares the global_ops hash, then we need to update
1771 	 * all ops that are enabled and use this hash.
1772 	 */
1773 	do_for_each_ftrace_op(op, ftrace_ops_list) {
1774 		/* Already done */
1775 		if (op == ops)
1776 			continue;
1777 		if (op->func_hash == &global_ops.local_hash)
1778 			__ftrace_hash_rec_update(op, filter_hash, inc);
1779 	} while_for_each_ftrace_op(op);
1780 }
1781 
1782 static void ftrace_hash_rec_disable_modify(struct ftrace_ops *ops,
1783 					   int filter_hash)
1784 {
1785 	ftrace_hash_rec_update_modify(ops, filter_hash, 0);
1786 }
1787 
1788 static void ftrace_hash_rec_enable_modify(struct ftrace_ops *ops,
1789 					  int filter_hash)
1790 {
1791 	ftrace_hash_rec_update_modify(ops, filter_hash, 1);
1792 }
1793 
1794 /*
1795  * Try to update IPMODIFY flag on each ftrace_rec. Return 0 if it is OK
1796  * or no-needed to update, -EBUSY if it detects a conflict of the flag
1797  * on a ftrace_rec, and -EINVAL if the new_hash tries to trace all recs.
1798  * Note that old_hash and new_hash has below meanings
1799  *  - If the hash is NULL, it hits all recs (if IPMODIFY is set, this is rejected)
1800  *  - If the hash is EMPTY_HASH, it hits nothing
1801  *  - Anything else hits the recs which match the hash entries.
1802  */
1803 static int __ftrace_hash_update_ipmodify(struct ftrace_ops *ops,
1804 					 struct ftrace_hash *old_hash,
1805 					 struct ftrace_hash *new_hash)
1806 {
1807 	struct ftrace_page *pg;
1808 	struct dyn_ftrace *rec, *end = NULL;
1809 	int in_old, in_new;
1810 
1811 	/* Only update if the ops has been registered */
1812 	if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
1813 		return 0;
1814 
1815 	if (!(ops->flags & FTRACE_OPS_FL_IPMODIFY))
1816 		return 0;
1817 
1818 	/*
1819 	 * Since the IPMODIFY is a very address sensitive action, we do not
1820 	 * allow ftrace_ops to set all functions to new hash.
1821 	 */
1822 	if (!new_hash || !old_hash)
1823 		return -EINVAL;
1824 
1825 	/* Update rec->flags */
1826 	do_for_each_ftrace_rec(pg, rec) {
1827 		/* We need to update only differences of filter_hash */
1828 		in_old = !!ftrace_lookup_ip(old_hash, rec->ip);
1829 		in_new = !!ftrace_lookup_ip(new_hash, rec->ip);
1830 		if (in_old == in_new)
1831 			continue;
1832 
1833 		if (in_new) {
1834 			/* New entries must ensure no others are using it */
1835 			if (rec->flags & FTRACE_FL_IPMODIFY)
1836 				goto rollback;
1837 			rec->flags |= FTRACE_FL_IPMODIFY;
1838 		} else /* Removed entry */
1839 			rec->flags &= ~FTRACE_FL_IPMODIFY;
1840 	} while_for_each_ftrace_rec();
1841 
1842 	return 0;
1843 
1844 rollback:
1845 	end = rec;
1846 
1847 	/* Roll back what we did above */
1848 	do_for_each_ftrace_rec(pg, rec) {
1849 		if (rec == end)
1850 			goto err_out;
1851 
1852 		in_old = !!ftrace_lookup_ip(old_hash, rec->ip);
1853 		in_new = !!ftrace_lookup_ip(new_hash, rec->ip);
1854 		if (in_old == in_new)
1855 			continue;
1856 
1857 		if (in_new)
1858 			rec->flags &= ~FTRACE_FL_IPMODIFY;
1859 		else
1860 			rec->flags |= FTRACE_FL_IPMODIFY;
1861 	} while_for_each_ftrace_rec();
1862 
1863 err_out:
1864 	return -EBUSY;
1865 }
1866 
1867 static int ftrace_hash_ipmodify_enable(struct ftrace_ops *ops)
1868 {
1869 	struct ftrace_hash *hash = ops->func_hash->filter_hash;
1870 
1871 	if (ftrace_hash_empty(hash))
1872 		hash = NULL;
1873 
1874 	return __ftrace_hash_update_ipmodify(ops, EMPTY_HASH, hash);
1875 }
1876 
1877 /* Disabling always succeeds */
1878 static void ftrace_hash_ipmodify_disable(struct ftrace_ops *ops)
1879 {
1880 	struct ftrace_hash *hash = ops->func_hash->filter_hash;
1881 
1882 	if (ftrace_hash_empty(hash))
1883 		hash = NULL;
1884 
1885 	__ftrace_hash_update_ipmodify(ops, hash, EMPTY_HASH);
1886 }
1887 
1888 static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops,
1889 				       struct ftrace_hash *new_hash)
1890 {
1891 	struct ftrace_hash *old_hash = ops->func_hash->filter_hash;
1892 
1893 	if (ftrace_hash_empty(old_hash))
1894 		old_hash = NULL;
1895 
1896 	if (ftrace_hash_empty(new_hash))
1897 		new_hash = NULL;
1898 
1899 	return __ftrace_hash_update_ipmodify(ops, old_hash, new_hash);
1900 }
1901 
1902 static void print_ip_ins(const char *fmt, unsigned char *p)
1903 {
1904 	int i;
1905 
1906 	printk(KERN_CONT "%s", fmt);
1907 
1908 	for (i = 0; i < MCOUNT_INSN_SIZE; i++)
1909 		printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
1910 }
1911 
1912 static struct ftrace_ops *
1913 ftrace_find_tramp_ops_any(struct dyn_ftrace *rec);
1914 
1915 /**
1916  * ftrace_bug - report and shutdown function tracer
1917  * @failed: The failed type (EFAULT, EINVAL, EPERM)
1918  * @rec: The record that failed
1919  *
1920  * The arch code that enables or disables the function tracing
1921  * can call ftrace_bug() when it has detected a problem in
1922  * modifying the code. @failed should be one of either:
1923  * EFAULT - if the problem happens on reading the @ip address
1924  * EINVAL - if what is read at @ip is not what was expected
1925  * EPERM - if the problem happens on writting to the @ip address
1926  */
1927 void ftrace_bug(int failed, struct dyn_ftrace *rec)
1928 {
1929 	unsigned long ip = rec ? rec->ip : 0;
1930 
1931 	switch (failed) {
1932 	case -EFAULT:
1933 		FTRACE_WARN_ON_ONCE(1);
1934 		pr_info("ftrace faulted on modifying ");
1935 		print_ip_sym(ip);
1936 		break;
1937 	case -EINVAL:
1938 		FTRACE_WARN_ON_ONCE(1);
1939 		pr_info("ftrace failed to modify ");
1940 		print_ip_sym(ip);
1941 		print_ip_ins(" actual: ", (unsigned char *)ip);
1942 		pr_cont("\n");
1943 		break;
1944 	case -EPERM:
1945 		FTRACE_WARN_ON_ONCE(1);
1946 		pr_info("ftrace faulted on writing ");
1947 		print_ip_sym(ip);
1948 		break;
1949 	default:
1950 		FTRACE_WARN_ON_ONCE(1);
1951 		pr_info("ftrace faulted on unknown error ");
1952 		print_ip_sym(ip);
1953 	}
1954 	if (rec) {
1955 		struct ftrace_ops *ops = NULL;
1956 
1957 		pr_info("ftrace record flags: %lx\n", rec->flags);
1958 		pr_cont(" (%ld)%s", ftrace_rec_count(rec),
1959 			rec->flags & FTRACE_FL_REGS ? " R" : "  ");
1960 		if (rec->flags & FTRACE_FL_TRAMP_EN) {
1961 			ops = ftrace_find_tramp_ops_any(rec);
1962 			if (ops)
1963 				pr_cont("\ttramp: %pS",
1964 					(void *)ops->trampoline);
1965 			else
1966 				pr_cont("\ttramp: ERROR!");
1967 
1968 		}
1969 		ip = ftrace_get_addr_curr(rec);
1970 		pr_cont(" expected tramp: %lx\n", ip);
1971 	}
1972 }
1973 
1974 static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update)
1975 {
1976 	unsigned long flag = 0UL;
1977 
1978 	/*
1979 	 * If we are updating calls:
1980 	 *
1981 	 *   If the record has a ref count, then we need to enable it
1982 	 *   because someone is using it.
1983 	 *
1984 	 *   Otherwise we make sure its disabled.
1985 	 *
1986 	 * If we are disabling calls, then disable all records that
1987 	 * are enabled.
1988 	 */
1989 	if (enable && ftrace_rec_count(rec))
1990 		flag = FTRACE_FL_ENABLED;
1991 
1992 	/*
1993 	 * If enabling and the REGS flag does not match the REGS_EN, or
1994 	 * the TRAMP flag doesn't match the TRAMP_EN, then do not ignore
1995 	 * this record. Set flags to fail the compare against ENABLED.
1996 	 */
1997 	if (flag) {
1998 		if (!(rec->flags & FTRACE_FL_REGS) !=
1999 		    !(rec->flags & FTRACE_FL_REGS_EN))
2000 			flag |= FTRACE_FL_REGS;
2001 
2002 		if (!(rec->flags & FTRACE_FL_TRAMP) !=
2003 		    !(rec->flags & FTRACE_FL_TRAMP_EN))
2004 			flag |= FTRACE_FL_TRAMP;
2005 	}
2006 
2007 	/* If the state of this record hasn't changed, then do nothing */
2008 	if ((rec->flags & FTRACE_FL_ENABLED) == flag)
2009 		return FTRACE_UPDATE_IGNORE;
2010 
2011 	if (flag) {
2012 		/* Save off if rec is being enabled (for return value) */
2013 		flag ^= rec->flags & FTRACE_FL_ENABLED;
2014 
2015 		if (update) {
2016 			rec->flags |= FTRACE_FL_ENABLED;
2017 			if (flag & FTRACE_FL_REGS) {
2018 				if (rec->flags & FTRACE_FL_REGS)
2019 					rec->flags |= FTRACE_FL_REGS_EN;
2020 				else
2021 					rec->flags &= ~FTRACE_FL_REGS_EN;
2022 			}
2023 			if (flag & FTRACE_FL_TRAMP) {
2024 				if (rec->flags & FTRACE_FL_TRAMP)
2025 					rec->flags |= FTRACE_FL_TRAMP_EN;
2026 				else
2027 					rec->flags &= ~FTRACE_FL_TRAMP_EN;
2028 			}
2029 		}
2030 
2031 		/*
2032 		 * If this record is being updated from a nop, then
2033 		 *   return UPDATE_MAKE_CALL.
2034 		 * Otherwise,
2035 		 *   return UPDATE_MODIFY_CALL to tell the caller to convert
2036 		 *   from the save regs, to a non-save regs function or
2037 		 *   vice versa, or from a trampoline call.
2038 		 */
2039 		if (flag & FTRACE_FL_ENABLED)
2040 			return FTRACE_UPDATE_MAKE_CALL;
2041 
2042 		return FTRACE_UPDATE_MODIFY_CALL;
2043 	}
2044 
2045 	if (update) {
2046 		/* If there's no more users, clear all flags */
2047 		if (!ftrace_rec_count(rec))
2048 			rec->flags = 0;
2049 		else
2050 			/*
2051 			 * Just disable the record, but keep the ops TRAMP
2052 			 * and REGS states. The _EN flags must be disabled though.
2053 			 */
2054 			rec->flags &= ~(FTRACE_FL_ENABLED | FTRACE_FL_TRAMP_EN |
2055 					FTRACE_FL_REGS_EN);
2056 	}
2057 
2058 	return FTRACE_UPDATE_MAKE_NOP;
2059 }
2060 
2061 /**
2062  * ftrace_update_record, set a record that now is tracing or not
2063  * @rec: the record to update
2064  * @enable: set to 1 if the record is tracing, zero to force disable
2065  *
2066  * The records that represent all functions that can be traced need
2067  * to be updated when tracing has been enabled.
2068  */
2069 int ftrace_update_record(struct dyn_ftrace *rec, int enable)
2070 {
2071 	return ftrace_check_record(rec, enable, 1);
2072 }
2073 
2074 /**
2075  * ftrace_test_record, check if the record has been enabled or not
2076  * @rec: the record to test
2077  * @enable: set to 1 to check if enabled, 0 if it is disabled
2078  *
2079  * The arch code may need to test if a record is already set to
2080  * tracing to determine how to modify the function code that it
2081  * represents.
2082  */
2083 int ftrace_test_record(struct dyn_ftrace *rec, int enable)
2084 {
2085 	return ftrace_check_record(rec, enable, 0);
2086 }
2087 
2088 static struct ftrace_ops *
2089 ftrace_find_tramp_ops_any(struct dyn_ftrace *rec)
2090 {
2091 	struct ftrace_ops *op;
2092 	unsigned long ip = rec->ip;
2093 
2094 	do_for_each_ftrace_op(op, ftrace_ops_list) {
2095 
2096 		if (!op->trampoline)
2097 			continue;
2098 
2099 		if (hash_contains_ip(ip, op->func_hash))
2100 			return op;
2101 	} while_for_each_ftrace_op(op);
2102 
2103 	return NULL;
2104 }
2105 
2106 static struct ftrace_ops *
2107 ftrace_find_tramp_ops_curr(struct dyn_ftrace *rec)
2108 {
2109 	struct ftrace_ops *op;
2110 	unsigned long ip = rec->ip;
2111 
2112 	/*
2113 	 * Need to check removed ops first.
2114 	 * If they are being removed, and this rec has a tramp,
2115 	 * and this rec is in the ops list, then it would be the
2116 	 * one with the tramp.
2117 	 */
2118 	if (removed_ops) {
2119 		if (hash_contains_ip(ip, &removed_ops->old_hash))
2120 			return removed_ops;
2121 	}
2122 
2123 	/*
2124 	 * Need to find the current trampoline for a rec.
2125 	 * Now, a trampoline is only attached to a rec if there
2126 	 * was a single 'ops' attached to it. But this can be called
2127 	 * when we are adding another op to the rec or removing the
2128 	 * current one. Thus, if the op is being added, we can
2129 	 * ignore it because it hasn't attached itself to the rec
2130 	 * yet.
2131 	 *
2132 	 * If an ops is being modified (hooking to different functions)
2133 	 * then we don't care about the new functions that are being
2134 	 * added, just the old ones (that are probably being removed).
2135 	 *
2136 	 * If we are adding an ops to a function that already is using
2137 	 * a trampoline, it needs to be removed (trampolines are only
2138 	 * for single ops connected), then an ops that is not being
2139 	 * modified also needs to be checked.
2140 	 */
2141 	do_for_each_ftrace_op(op, ftrace_ops_list) {
2142 
2143 		if (!op->trampoline)
2144 			continue;
2145 
2146 		/*
2147 		 * If the ops is being added, it hasn't gotten to
2148 		 * the point to be removed from this tree yet.
2149 		 */
2150 		if (op->flags & FTRACE_OPS_FL_ADDING)
2151 			continue;
2152 
2153 
2154 		/*
2155 		 * If the ops is being modified and is in the old
2156 		 * hash, then it is probably being removed from this
2157 		 * function.
2158 		 */
2159 		if ((op->flags & FTRACE_OPS_FL_MODIFYING) &&
2160 		    hash_contains_ip(ip, &op->old_hash))
2161 			return op;
2162 		/*
2163 		 * If the ops is not being added or modified, and it's
2164 		 * in its normal filter hash, then this must be the one
2165 		 * we want!
2166 		 */
2167 		if (!(op->flags & FTRACE_OPS_FL_MODIFYING) &&
2168 		    hash_contains_ip(ip, op->func_hash))
2169 			return op;
2170 
2171 	} while_for_each_ftrace_op(op);
2172 
2173 	return NULL;
2174 }
2175 
2176 static struct ftrace_ops *
2177 ftrace_find_tramp_ops_new(struct dyn_ftrace *rec)
2178 {
2179 	struct ftrace_ops *op;
2180 	unsigned long ip = rec->ip;
2181 
2182 	do_for_each_ftrace_op(op, ftrace_ops_list) {
2183 		/* pass rec in as regs to have non-NULL val */
2184 		if (hash_contains_ip(ip, op->func_hash))
2185 			return op;
2186 	} while_for_each_ftrace_op(op);
2187 
2188 	return NULL;
2189 }
2190 
2191 /**
2192  * ftrace_get_addr_new - Get the call address to set to
2193  * @rec:  The ftrace record descriptor
2194  *
2195  * If the record has the FTRACE_FL_REGS set, that means that it
2196  * wants to convert to a callback that saves all regs. If FTRACE_FL_REGS
2197  * is not not set, then it wants to convert to the normal callback.
2198  *
2199  * Returns the address of the trampoline to set to
2200  */
2201 unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec)
2202 {
2203 	struct ftrace_ops *ops;
2204 
2205 	/* Trampolines take precedence over regs */
2206 	if (rec->flags & FTRACE_FL_TRAMP) {
2207 		ops = ftrace_find_tramp_ops_new(rec);
2208 		if (FTRACE_WARN_ON(!ops || !ops->trampoline)) {
2209 			pr_warn("Bad trampoline accounting at: %p (%pS) (%lx)\n",
2210 				(void *)rec->ip, (void *)rec->ip, rec->flags);
2211 			/* Ftrace is shutting down, return anything */
2212 			return (unsigned long)FTRACE_ADDR;
2213 		}
2214 		return ops->trampoline;
2215 	}
2216 
2217 	if (rec->flags & FTRACE_FL_REGS)
2218 		return (unsigned long)FTRACE_REGS_ADDR;
2219 	else
2220 		return (unsigned long)FTRACE_ADDR;
2221 }
2222 
2223 /**
2224  * ftrace_get_addr_curr - Get the call address that is already there
2225  * @rec:  The ftrace record descriptor
2226  *
2227  * The FTRACE_FL_REGS_EN is set when the record already points to
2228  * a function that saves all the regs. Basically the '_EN' version
2229  * represents the current state of the function.
2230  *
2231  * Returns the address of the trampoline that is currently being called
2232  */
2233 unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec)
2234 {
2235 	struct ftrace_ops *ops;
2236 
2237 	/* Trampolines take precedence over regs */
2238 	if (rec->flags & FTRACE_FL_TRAMP_EN) {
2239 		ops = ftrace_find_tramp_ops_curr(rec);
2240 		if (FTRACE_WARN_ON(!ops)) {
2241 			pr_warning("Bad trampoline accounting at: %p (%pS)\n",
2242 				    (void *)rec->ip, (void *)rec->ip);
2243 			/* Ftrace is shutting down, return anything */
2244 			return (unsigned long)FTRACE_ADDR;
2245 		}
2246 		return ops->trampoline;
2247 	}
2248 
2249 	if (rec->flags & FTRACE_FL_REGS_EN)
2250 		return (unsigned long)FTRACE_REGS_ADDR;
2251 	else
2252 		return (unsigned long)FTRACE_ADDR;
2253 }
2254 
2255 static int
2256 __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
2257 {
2258 	unsigned long ftrace_old_addr;
2259 	unsigned long ftrace_addr;
2260 	int ret;
2261 
2262 	ftrace_addr = ftrace_get_addr_new(rec);
2263 
2264 	/* This needs to be done before we call ftrace_update_record */
2265 	ftrace_old_addr = ftrace_get_addr_curr(rec);
2266 
2267 	ret = ftrace_update_record(rec, enable);
2268 
2269 	switch (ret) {
2270 	case FTRACE_UPDATE_IGNORE:
2271 		return 0;
2272 
2273 	case FTRACE_UPDATE_MAKE_CALL:
2274 		return ftrace_make_call(rec, ftrace_addr);
2275 
2276 	case FTRACE_UPDATE_MAKE_NOP:
2277 		return ftrace_make_nop(NULL, rec, ftrace_old_addr);
2278 
2279 	case FTRACE_UPDATE_MODIFY_CALL:
2280 		return ftrace_modify_call(rec, ftrace_old_addr, ftrace_addr);
2281 	}
2282 
2283 	return -1; /* unknow ftrace bug */
2284 }
2285 
2286 void __weak ftrace_replace_code(int enable)
2287 {
2288 	struct dyn_ftrace *rec;
2289 	struct ftrace_page *pg;
2290 	int failed;
2291 
2292 	if (unlikely(ftrace_disabled))
2293 		return;
2294 
2295 	do_for_each_ftrace_rec(pg, rec) {
2296 		failed = __ftrace_replace_code(rec, enable);
2297 		if (failed) {
2298 			ftrace_bug(failed, rec);
2299 			/* Stop processing */
2300 			return;
2301 		}
2302 	} while_for_each_ftrace_rec();
2303 }
2304 
2305 struct ftrace_rec_iter {
2306 	struct ftrace_page	*pg;
2307 	int			index;
2308 };
2309 
2310 /**
2311  * ftrace_rec_iter_start, start up iterating over traced functions
2312  *
2313  * Returns an iterator handle that is used to iterate over all
2314  * the records that represent address locations where functions
2315  * are traced.
2316  *
2317  * May return NULL if no records are available.
2318  */
2319 struct ftrace_rec_iter *ftrace_rec_iter_start(void)
2320 {
2321 	/*
2322 	 * We only use a single iterator.
2323 	 * Protected by the ftrace_lock mutex.
2324 	 */
2325 	static struct ftrace_rec_iter ftrace_rec_iter;
2326 	struct ftrace_rec_iter *iter = &ftrace_rec_iter;
2327 
2328 	iter->pg = ftrace_pages_start;
2329 	iter->index = 0;
2330 
2331 	/* Could have empty pages */
2332 	while (iter->pg && !iter->pg->index)
2333 		iter->pg = iter->pg->next;
2334 
2335 	if (!iter->pg)
2336 		return NULL;
2337 
2338 	return iter;
2339 }
2340 
2341 /**
2342  * ftrace_rec_iter_next, get the next record to process.
2343  * @iter: The handle to the iterator.
2344  *
2345  * Returns the next iterator after the given iterator @iter.
2346  */
2347 struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter)
2348 {
2349 	iter->index++;
2350 
2351 	if (iter->index >= iter->pg->index) {
2352 		iter->pg = iter->pg->next;
2353 		iter->index = 0;
2354 
2355 		/* Could have empty pages */
2356 		while (iter->pg && !iter->pg->index)
2357 			iter->pg = iter->pg->next;
2358 	}
2359 
2360 	if (!iter->pg)
2361 		return NULL;
2362 
2363 	return iter;
2364 }
2365 
2366 /**
2367  * ftrace_rec_iter_record, get the record at the iterator location
2368  * @iter: The current iterator location
2369  *
2370  * Returns the record that the current @iter is at.
2371  */
2372 struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter)
2373 {
2374 	return &iter->pg->records[iter->index];
2375 }
2376 
2377 static int
2378 ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
2379 {
2380 	int ret;
2381 
2382 	if (unlikely(ftrace_disabled))
2383 		return 0;
2384 
2385 	ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
2386 	if (ret) {
2387 		ftrace_bug(ret, rec);
2388 		return 0;
2389 	}
2390 	return 1;
2391 }
2392 
2393 /*
2394  * archs can override this function if they must do something
2395  * before the modifying code is performed.
2396  */
2397 int __weak ftrace_arch_code_modify_prepare(void)
2398 {
2399 	return 0;
2400 }
2401 
2402 /*
2403  * archs can override this function if they must do something
2404  * after the modifying code is performed.
2405  */
2406 int __weak ftrace_arch_code_modify_post_process(void)
2407 {
2408 	return 0;
2409 }
2410 
2411 void ftrace_modify_all_code(int command)
2412 {
2413 	int update = command & FTRACE_UPDATE_TRACE_FUNC;
2414 	int err = 0;
2415 
2416 	/*
2417 	 * If the ftrace_caller calls a ftrace_ops func directly,
2418 	 * we need to make sure that it only traces functions it
2419 	 * expects to trace. When doing the switch of functions,
2420 	 * we need to update to the ftrace_ops_list_func first
2421 	 * before the transition between old and new calls are set,
2422 	 * as the ftrace_ops_list_func will check the ops hashes
2423 	 * to make sure the ops are having the right functions
2424 	 * traced.
2425 	 */
2426 	if (update) {
2427 		err = ftrace_update_ftrace_func(ftrace_ops_list_func);
2428 		if (FTRACE_WARN_ON(err))
2429 			return;
2430 	}
2431 
2432 	if (command & FTRACE_UPDATE_CALLS)
2433 		ftrace_replace_code(1);
2434 	else if (command & FTRACE_DISABLE_CALLS)
2435 		ftrace_replace_code(0);
2436 
2437 	if (update && ftrace_trace_function != ftrace_ops_list_func) {
2438 		function_trace_op = set_function_trace_op;
2439 		smp_wmb();
2440 		/* If irqs are disabled, we are in stop machine */
2441 		if (!irqs_disabled())
2442 			smp_call_function(ftrace_sync_ipi, NULL, 1);
2443 		err = ftrace_update_ftrace_func(ftrace_trace_function);
2444 		if (FTRACE_WARN_ON(err))
2445 			return;
2446 	}
2447 
2448 	if (command & FTRACE_START_FUNC_RET)
2449 		err = ftrace_enable_ftrace_graph_caller();
2450 	else if (command & FTRACE_STOP_FUNC_RET)
2451 		err = ftrace_disable_ftrace_graph_caller();
2452 	FTRACE_WARN_ON(err);
2453 }
2454 
2455 static int __ftrace_modify_code(void *data)
2456 {
2457 	int *command = data;
2458 
2459 	ftrace_modify_all_code(*command);
2460 
2461 	return 0;
2462 }
2463 
2464 /**
2465  * ftrace_run_stop_machine, go back to the stop machine method
2466  * @command: The command to tell ftrace what to do
2467  *
2468  * If an arch needs to fall back to the stop machine method, the
2469  * it can call this function.
2470  */
2471 void ftrace_run_stop_machine(int command)
2472 {
2473 	stop_machine(__ftrace_modify_code, &command, NULL);
2474 }
2475 
2476 /**
2477  * arch_ftrace_update_code, modify the code to trace or not trace
2478  * @command: The command that needs to be done
2479  *
2480  * Archs can override this function if it does not need to
2481  * run stop_machine() to modify code.
2482  */
2483 void __weak arch_ftrace_update_code(int command)
2484 {
2485 	ftrace_run_stop_machine(command);
2486 }
2487 
2488 static void ftrace_run_update_code(int command)
2489 {
2490 	int ret;
2491 
2492 	ret = ftrace_arch_code_modify_prepare();
2493 	FTRACE_WARN_ON(ret);
2494 	if (ret)
2495 		return;
2496 
2497 	/*
2498 	 * By default we use stop_machine() to modify the code.
2499 	 * But archs can do what ever they want as long as it
2500 	 * is safe. The stop_machine() is the safest, but also
2501 	 * produces the most overhead.
2502 	 */
2503 	arch_ftrace_update_code(command);
2504 
2505 	ret = ftrace_arch_code_modify_post_process();
2506 	FTRACE_WARN_ON(ret);
2507 }
2508 
2509 static void ftrace_run_modify_code(struct ftrace_ops *ops, int command,
2510 				   struct ftrace_ops_hash *old_hash)
2511 {
2512 	ops->flags |= FTRACE_OPS_FL_MODIFYING;
2513 	ops->old_hash.filter_hash = old_hash->filter_hash;
2514 	ops->old_hash.notrace_hash = old_hash->notrace_hash;
2515 	ftrace_run_update_code(command);
2516 	ops->old_hash.filter_hash = NULL;
2517 	ops->old_hash.notrace_hash = NULL;
2518 	ops->flags &= ~FTRACE_OPS_FL_MODIFYING;
2519 }
2520 
2521 static ftrace_func_t saved_ftrace_func;
2522 static int ftrace_start_up;
2523 
2524 void __weak arch_ftrace_trampoline_free(struct ftrace_ops *ops)
2525 {
2526 }
2527 
2528 static void control_ops_free(struct ftrace_ops *ops)
2529 {
2530 	free_percpu(ops->disabled);
2531 }
2532 
2533 static void ftrace_startup_enable(int command)
2534 {
2535 	if (saved_ftrace_func != ftrace_trace_function) {
2536 		saved_ftrace_func = ftrace_trace_function;
2537 		command |= FTRACE_UPDATE_TRACE_FUNC;
2538 	}
2539 
2540 	if (!command || !ftrace_enabled)
2541 		return;
2542 
2543 	ftrace_run_update_code(command);
2544 }
2545 
2546 static void ftrace_startup_all(int command)
2547 {
2548 	update_all_ops = true;
2549 	ftrace_startup_enable(command);
2550 	update_all_ops = false;
2551 }
2552 
2553 static int ftrace_startup(struct ftrace_ops *ops, int command)
2554 {
2555 	int ret;
2556 
2557 	if (unlikely(ftrace_disabled))
2558 		return -ENODEV;
2559 
2560 	ret = __register_ftrace_function(ops);
2561 	if (ret)
2562 		return ret;
2563 
2564 	ftrace_start_up++;
2565 	command |= FTRACE_UPDATE_CALLS;
2566 
2567 	/*
2568 	 * Note that ftrace probes uses this to start up
2569 	 * and modify functions it will probe. But we still
2570 	 * set the ADDING flag for modification, as probes
2571 	 * do not have trampolines. If they add them in the
2572 	 * future, then the probes will need to distinguish
2573 	 * between adding and updating probes.
2574 	 */
2575 	ops->flags |= FTRACE_OPS_FL_ENABLED | FTRACE_OPS_FL_ADDING;
2576 
2577 	ret = ftrace_hash_ipmodify_enable(ops);
2578 	if (ret < 0) {
2579 		/* Rollback registration process */
2580 		__unregister_ftrace_function(ops);
2581 		ftrace_start_up--;
2582 		ops->flags &= ~FTRACE_OPS_FL_ENABLED;
2583 		return ret;
2584 	}
2585 
2586 	ftrace_hash_rec_enable(ops, 1);
2587 
2588 	ftrace_startup_enable(command);
2589 
2590 	ops->flags &= ~FTRACE_OPS_FL_ADDING;
2591 
2592 	return 0;
2593 }
2594 
2595 static int ftrace_shutdown(struct ftrace_ops *ops, int command)
2596 {
2597 	int ret;
2598 
2599 	if (unlikely(ftrace_disabled))
2600 		return -ENODEV;
2601 
2602 	ret = __unregister_ftrace_function(ops);
2603 	if (ret)
2604 		return ret;
2605 
2606 	ftrace_start_up--;
2607 	/*
2608 	 * Just warn in case of unbalance, no need to kill ftrace, it's not
2609 	 * critical but the ftrace_call callers may be never nopped again after
2610 	 * further ftrace uses.
2611 	 */
2612 	WARN_ON_ONCE(ftrace_start_up < 0);
2613 
2614 	/* Disabling ipmodify never fails */
2615 	ftrace_hash_ipmodify_disable(ops);
2616 	ftrace_hash_rec_disable(ops, 1);
2617 
2618 	ops->flags &= ~FTRACE_OPS_FL_ENABLED;
2619 
2620 	command |= FTRACE_UPDATE_CALLS;
2621 
2622 	if (saved_ftrace_func != ftrace_trace_function) {
2623 		saved_ftrace_func = ftrace_trace_function;
2624 		command |= FTRACE_UPDATE_TRACE_FUNC;
2625 	}
2626 
2627 	if (!command || !ftrace_enabled) {
2628 		/*
2629 		 * If these are control ops, they still need their
2630 		 * per_cpu field freed. Since, function tracing is
2631 		 * not currently active, we can just free them
2632 		 * without synchronizing all CPUs.
2633 		 */
2634 		if (ops->flags & FTRACE_OPS_FL_CONTROL)
2635 			control_ops_free(ops);
2636 		return 0;
2637 	}
2638 
2639 	/*
2640 	 * If the ops uses a trampoline, then it needs to be
2641 	 * tested first on update.
2642 	 */
2643 	ops->flags |= FTRACE_OPS_FL_REMOVING;
2644 	removed_ops = ops;
2645 
2646 	/* The trampoline logic checks the old hashes */
2647 	ops->old_hash.filter_hash = ops->func_hash->filter_hash;
2648 	ops->old_hash.notrace_hash = ops->func_hash->notrace_hash;
2649 
2650 	ftrace_run_update_code(command);
2651 
2652 	/*
2653 	 * If there's no more ops registered with ftrace, run a
2654 	 * sanity check to make sure all rec flags are cleared.
2655 	 */
2656 	if (ftrace_ops_list == &ftrace_list_end) {
2657 		struct ftrace_page *pg;
2658 		struct dyn_ftrace *rec;
2659 
2660 		do_for_each_ftrace_rec(pg, rec) {
2661 			if (FTRACE_WARN_ON_ONCE(rec->flags))
2662 				pr_warn("  %pS flags:%lx\n",
2663 					(void *)rec->ip, rec->flags);
2664 		} while_for_each_ftrace_rec();
2665 	}
2666 
2667 	ops->old_hash.filter_hash = NULL;
2668 	ops->old_hash.notrace_hash = NULL;
2669 
2670 	removed_ops = NULL;
2671 	ops->flags &= ~FTRACE_OPS_FL_REMOVING;
2672 
2673 	/*
2674 	 * Dynamic ops may be freed, we must make sure that all
2675 	 * callers are done before leaving this function.
2676 	 * The same goes for freeing the per_cpu data of the control
2677 	 * ops.
2678 	 *
2679 	 * Again, normal synchronize_sched() is not good enough.
2680 	 * We need to do a hard force of sched synchronization.
2681 	 * This is because we use preempt_disable() to do RCU, but
2682 	 * the function tracers can be called where RCU is not watching
2683 	 * (like before user_exit()). We can not rely on the RCU
2684 	 * infrastructure to do the synchronization, thus we must do it
2685 	 * ourselves.
2686 	 */
2687 	if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_CONTROL)) {
2688 		schedule_on_each_cpu(ftrace_sync);
2689 
2690 		arch_ftrace_trampoline_free(ops);
2691 
2692 		if (ops->flags & FTRACE_OPS_FL_CONTROL)
2693 			control_ops_free(ops);
2694 	}
2695 
2696 	return 0;
2697 }
2698 
2699 static void ftrace_startup_sysctl(void)
2700 {
2701 	int command;
2702 
2703 	if (unlikely(ftrace_disabled))
2704 		return;
2705 
2706 	/* Force update next time */
2707 	saved_ftrace_func = NULL;
2708 	/* ftrace_start_up is true if we want ftrace running */
2709 	if (ftrace_start_up) {
2710 		command = FTRACE_UPDATE_CALLS;
2711 		if (ftrace_graph_active)
2712 			command |= FTRACE_START_FUNC_RET;
2713 		ftrace_startup_enable(command);
2714 	}
2715 }
2716 
2717 static void ftrace_shutdown_sysctl(void)
2718 {
2719 	int command;
2720 
2721 	if (unlikely(ftrace_disabled))
2722 		return;
2723 
2724 	/* ftrace_start_up is true if ftrace is running */
2725 	if (ftrace_start_up) {
2726 		command = FTRACE_DISABLE_CALLS;
2727 		if (ftrace_graph_active)
2728 			command |= FTRACE_STOP_FUNC_RET;
2729 		ftrace_run_update_code(command);
2730 	}
2731 }
2732 
2733 static cycle_t		ftrace_update_time;
2734 unsigned long		ftrace_update_tot_cnt;
2735 
2736 static inline int ops_traces_mod(struct ftrace_ops *ops)
2737 {
2738 	/*
2739 	 * Filter_hash being empty will default to trace module.
2740 	 * But notrace hash requires a test of individual module functions.
2741 	 */
2742 	return ftrace_hash_empty(ops->func_hash->filter_hash) &&
2743 		ftrace_hash_empty(ops->func_hash->notrace_hash);
2744 }
2745 
2746 /*
2747  * Check if the current ops references the record.
2748  *
2749  * If the ops traces all functions, then it was already accounted for.
2750  * If the ops does not trace the current record function, skip it.
2751  * If the ops ignores the function via notrace filter, skip it.
2752  */
2753 static inline bool
2754 ops_references_rec(struct ftrace_ops *ops, struct dyn_ftrace *rec)
2755 {
2756 	/* If ops isn't enabled, ignore it */
2757 	if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
2758 		return 0;
2759 
2760 	/* If ops traces all mods, we already accounted for it */
2761 	if (ops_traces_mod(ops))
2762 		return 0;
2763 
2764 	/* The function must be in the filter */
2765 	if (!ftrace_hash_empty(ops->func_hash->filter_hash) &&
2766 	    !ftrace_lookup_ip(ops->func_hash->filter_hash, rec->ip))
2767 		return 0;
2768 
2769 	/* If in notrace hash, we ignore it too */
2770 	if (ftrace_lookup_ip(ops->func_hash->notrace_hash, rec->ip))
2771 		return 0;
2772 
2773 	return 1;
2774 }
2775 
2776 static int referenced_filters(struct dyn_ftrace *rec)
2777 {
2778 	struct ftrace_ops *ops;
2779 	int cnt = 0;
2780 
2781 	for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) {
2782 		if (ops_references_rec(ops, rec))
2783 		    cnt++;
2784 	}
2785 
2786 	return cnt;
2787 }
2788 
2789 static int ftrace_update_code(struct module *mod, struct ftrace_page *new_pgs)
2790 {
2791 	struct ftrace_page *pg;
2792 	struct dyn_ftrace *p;
2793 	cycle_t start, stop;
2794 	unsigned long update_cnt = 0;
2795 	unsigned long ref = 0;
2796 	bool test = false;
2797 	int i;
2798 
2799 	/*
2800 	 * When adding a module, we need to check if tracers are
2801 	 * currently enabled and if they are set to trace all functions.
2802 	 * If they are, we need to enable the module functions as well
2803 	 * as update the reference counts for those function records.
2804 	 */
2805 	if (mod) {
2806 		struct ftrace_ops *ops;
2807 
2808 		for (ops = ftrace_ops_list;
2809 		     ops != &ftrace_list_end; ops = ops->next) {
2810 			if (ops->flags & FTRACE_OPS_FL_ENABLED) {
2811 				if (ops_traces_mod(ops))
2812 					ref++;
2813 				else
2814 					test = true;
2815 			}
2816 		}
2817 	}
2818 
2819 	start = ftrace_now(raw_smp_processor_id());
2820 
2821 	for (pg = new_pgs; pg; pg = pg->next) {
2822 
2823 		for (i = 0; i < pg->index; i++) {
2824 			int cnt = ref;
2825 
2826 			/* If something went wrong, bail without enabling anything */
2827 			if (unlikely(ftrace_disabled))
2828 				return -1;
2829 
2830 			p = &pg->records[i];
2831 			if (test)
2832 				cnt += referenced_filters(p);
2833 			p->flags = cnt;
2834 
2835 			/*
2836 			 * Do the initial record conversion from mcount jump
2837 			 * to the NOP instructions.
2838 			 */
2839 			if (!ftrace_code_disable(mod, p))
2840 				break;
2841 
2842 			update_cnt++;
2843 
2844 			/*
2845 			 * If the tracing is enabled, go ahead and enable the record.
2846 			 *
2847 			 * The reason not to enable the record immediatelly is the
2848 			 * inherent check of ftrace_make_nop/ftrace_make_call for
2849 			 * correct previous instructions.  Making first the NOP
2850 			 * conversion puts the module to the correct state, thus
2851 			 * passing the ftrace_make_call check.
2852 			 */
2853 			if (ftrace_start_up && cnt) {
2854 				int failed = __ftrace_replace_code(p, 1);
2855 				if (failed)
2856 					ftrace_bug(failed, p);
2857 			}
2858 		}
2859 	}
2860 
2861 	stop = ftrace_now(raw_smp_processor_id());
2862 	ftrace_update_time = stop - start;
2863 	ftrace_update_tot_cnt += update_cnt;
2864 
2865 	return 0;
2866 }
2867 
2868 static int ftrace_allocate_records(struct ftrace_page *pg, int count)
2869 {
2870 	int order;
2871 	int cnt;
2872 
2873 	if (WARN_ON(!count))
2874 		return -EINVAL;
2875 
2876 	order = get_count_order(DIV_ROUND_UP(count, ENTRIES_PER_PAGE));
2877 
2878 	/*
2879 	 * We want to fill as much as possible. No more than a page
2880 	 * may be empty.
2881 	 */
2882 	while ((PAGE_SIZE << order) / ENTRY_SIZE >= count + ENTRIES_PER_PAGE)
2883 		order--;
2884 
2885  again:
2886 	pg->records = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
2887 
2888 	if (!pg->records) {
2889 		/* if we can't allocate this size, try something smaller */
2890 		if (!order)
2891 			return -ENOMEM;
2892 		order >>= 1;
2893 		goto again;
2894 	}
2895 
2896 	cnt = (PAGE_SIZE << order) / ENTRY_SIZE;
2897 	pg->size = cnt;
2898 
2899 	if (cnt > count)
2900 		cnt = count;
2901 
2902 	return cnt;
2903 }
2904 
2905 static struct ftrace_page *
2906 ftrace_allocate_pages(unsigned long num_to_init)
2907 {
2908 	struct ftrace_page *start_pg;
2909 	struct ftrace_page *pg;
2910 	int order;
2911 	int cnt;
2912 
2913 	if (!num_to_init)
2914 		return 0;
2915 
2916 	start_pg = pg = kzalloc(sizeof(*pg), GFP_KERNEL);
2917 	if (!pg)
2918 		return NULL;
2919 
2920 	/*
2921 	 * Try to allocate as much as possible in one continues
2922 	 * location that fills in all of the space. We want to
2923 	 * waste as little space as possible.
2924 	 */
2925 	for (;;) {
2926 		cnt = ftrace_allocate_records(pg, num_to_init);
2927 		if (cnt < 0)
2928 			goto free_pages;
2929 
2930 		num_to_init -= cnt;
2931 		if (!num_to_init)
2932 			break;
2933 
2934 		pg->next = kzalloc(sizeof(*pg), GFP_KERNEL);
2935 		if (!pg->next)
2936 			goto free_pages;
2937 
2938 		pg = pg->next;
2939 	}
2940 
2941 	return start_pg;
2942 
2943  free_pages:
2944 	pg = start_pg;
2945 	while (pg) {
2946 		order = get_count_order(pg->size / ENTRIES_PER_PAGE);
2947 		free_pages((unsigned long)pg->records, order);
2948 		start_pg = pg->next;
2949 		kfree(pg);
2950 		pg = start_pg;
2951 	}
2952 	pr_info("ftrace: FAILED to allocate memory for functions\n");
2953 	return NULL;
2954 }
2955 
2956 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
2957 
2958 struct ftrace_iterator {
2959 	loff_t				pos;
2960 	loff_t				func_pos;
2961 	struct ftrace_page		*pg;
2962 	struct dyn_ftrace		*func;
2963 	struct ftrace_func_probe	*probe;
2964 	struct trace_parser		parser;
2965 	struct ftrace_hash		*hash;
2966 	struct ftrace_ops		*ops;
2967 	int				hidx;
2968 	int				idx;
2969 	unsigned			flags;
2970 };
2971 
2972 static void *
2973 t_hash_next(struct seq_file *m, loff_t *pos)
2974 {
2975 	struct ftrace_iterator *iter = m->private;
2976 	struct hlist_node *hnd = NULL;
2977 	struct hlist_head *hhd;
2978 
2979 	(*pos)++;
2980 	iter->pos = *pos;
2981 
2982 	if (iter->probe)
2983 		hnd = &iter->probe->node;
2984  retry:
2985 	if (iter->hidx >= FTRACE_FUNC_HASHSIZE)
2986 		return NULL;
2987 
2988 	hhd = &ftrace_func_hash[iter->hidx];
2989 
2990 	if (hlist_empty(hhd)) {
2991 		iter->hidx++;
2992 		hnd = NULL;
2993 		goto retry;
2994 	}
2995 
2996 	if (!hnd)
2997 		hnd = hhd->first;
2998 	else {
2999 		hnd = hnd->next;
3000 		if (!hnd) {
3001 			iter->hidx++;
3002 			goto retry;
3003 		}
3004 	}
3005 
3006 	if (WARN_ON_ONCE(!hnd))
3007 		return NULL;
3008 
3009 	iter->probe = hlist_entry(hnd, struct ftrace_func_probe, node);
3010 
3011 	return iter;
3012 }
3013 
3014 static void *t_hash_start(struct seq_file *m, loff_t *pos)
3015 {
3016 	struct ftrace_iterator *iter = m->private;
3017 	void *p = NULL;
3018 	loff_t l;
3019 
3020 	if (!(iter->flags & FTRACE_ITER_DO_HASH))
3021 		return NULL;
3022 
3023 	if (iter->func_pos > *pos)
3024 		return NULL;
3025 
3026 	iter->hidx = 0;
3027 	for (l = 0; l <= (*pos - iter->func_pos); ) {
3028 		p = t_hash_next(m, &l);
3029 		if (!p)
3030 			break;
3031 	}
3032 	if (!p)
3033 		return NULL;
3034 
3035 	/* Only set this if we have an item */
3036 	iter->flags |= FTRACE_ITER_HASH;
3037 
3038 	return iter;
3039 }
3040 
3041 static int
3042 t_hash_show(struct seq_file *m, struct ftrace_iterator *iter)
3043 {
3044 	struct ftrace_func_probe *rec;
3045 
3046 	rec = iter->probe;
3047 	if (WARN_ON_ONCE(!rec))
3048 		return -EIO;
3049 
3050 	if (rec->ops->print)
3051 		return rec->ops->print(m, rec->ip, rec->ops, rec->data);
3052 
3053 	seq_printf(m, "%ps:%ps", (void *)rec->ip, (void *)rec->ops->func);
3054 
3055 	if (rec->data)
3056 		seq_printf(m, ":%p", rec->data);
3057 	seq_putc(m, '\n');
3058 
3059 	return 0;
3060 }
3061 
3062 static void *
3063 t_next(struct seq_file *m, void *v, loff_t *pos)
3064 {
3065 	struct ftrace_iterator *iter = m->private;
3066 	struct ftrace_ops *ops = iter->ops;
3067 	struct dyn_ftrace *rec = NULL;
3068 
3069 	if (unlikely(ftrace_disabled))
3070 		return NULL;
3071 
3072 	if (iter->flags & FTRACE_ITER_HASH)
3073 		return t_hash_next(m, pos);
3074 
3075 	(*pos)++;
3076 	iter->pos = iter->func_pos = *pos;
3077 
3078 	if (iter->flags & FTRACE_ITER_PRINTALL)
3079 		return t_hash_start(m, pos);
3080 
3081  retry:
3082 	if (iter->idx >= iter->pg->index) {
3083 		if (iter->pg->next) {
3084 			iter->pg = iter->pg->next;
3085 			iter->idx = 0;
3086 			goto retry;
3087 		}
3088 	} else {
3089 		rec = &iter->pg->records[iter->idx++];
3090 		if (((iter->flags & FTRACE_ITER_FILTER) &&
3091 		     !(ftrace_lookup_ip(ops->func_hash->filter_hash, rec->ip))) ||
3092 
3093 		    ((iter->flags & FTRACE_ITER_NOTRACE) &&
3094 		     !ftrace_lookup_ip(ops->func_hash->notrace_hash, rec->ip)) ||
3095 
3096 		    ((iter->flags & FTRACE_ITER_ENABLED) &&
3097 		     !(rec->flags & FTRACE_FL_ENABLED))) {
3098 
3099 			rec = NULL;
3100 			goto retry;
3101 		}
3102 	}
3103 
3104 	if (!rec)
3105 		return t_hash_start(m, pos);
3106 
3107 	iter->func = rec;
3108 
3109 	return iter;
3110 }
3111 
3112 static void reset_iter_read(struct ftrace_iterator *iter)
3113 {
3114 	iter->pos = 0;
3115 	iter->func_pos = 0;
3116 	iter->flags &= ~(FTRACE_ITER_PRINTALL | FTRACE_ITER_HASH);
3117 }
3118 
3119 static void *t_start(struct seq_file *m, loff_t *pos)
3120 {
3121 	struct ftrace_iterator *iter = m->private;
3122 	struct ftrace_ops *ops = iter->ops;
3123 	void *p = NULL;
3124 	loff_t l;
3125 
3126 	mutex_lock(&ftrace_lock);
3127 
3128 	if (unlikely(ftrace_disabled))
3129 		return NULL;
3130 
3131 	/*
3132 	 * If an lseek was done, then reset and start from beginning.
3133 	 */
3134 	if (*pos < iter->pos)
3135 		reset_iter_read(iter);
3136 
3137 	/*
3138 	 * For set_ftrace_filter reading, if we have the filter
3139 	 * off, we can short cut and just print out that all
3140 	 * functions are enabled.
3141 	 */
3142 	if ((iter->flags & FTRACE_ITER_FILTER &&
3143 	     ftrace_hash_empty(ops->func_hash->filter_hash)) ||
3144 	    (iter->flags & FTRACE_ITER_NOTRACE &&
3145 	     ftrace_hash_empty(ops->func_hash->notrace_hash))) {
3146 		if (*pos > 0)
3147 			return t_hash_start(m, pos);
3148 		iter->flags |= FTRACE_ITER_PRINTALL;
3149 		/* reset in case of seek/pread */
3150 		iter->flags &= ~FTRACE_ITER_HASH;
3151 		return iter;
3152 	}
3153 
3154 	if (iter->flags & FTRACE_ITER_HASH)
3155 		return t_hash_start(m, pos);
3156 
3157 	/*
3158 	 * Unfortunately, we need to restart at ftrace_pages_start
3159 	 * every time we let go of the ftrace_mutex. This is because
3160 	 * those pointers can change without the lock.
3161 	 */
3162 	iter->pg = ftrace_pages_start;
3163 	iter->idx = 0;
3164 	for (l = 0; l <= *pos; ) {
3165 		p = t_next(m, p, &l);
3166 		if (!p)
3167 			break;
3168 	}
3169 
3170 	if (!p)
3171 		return t_hash_start(m, pos);
3172 
3173 	return iter;
3174 }
3175 
3176 static void t_stop(struct seq_file *m, void *p)
3177 {
3178 	mutex_unlock(&ftrace_lock);
3179 }
3180 
3181 void * __weak
3182 arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec)
3183 {
3184 	return NULL;
3185 }
3186 
3187 static void add_trampoline_func(struct seq_file *m, struct ftrace_ops *ops,
3188 				struct dyn_ftrace *rec)
3189 {
3190 	void *ptr;
3191 
3192 	ptr = arch_ftrace_trampoline_func(ops, rec);
3193 	if (ptr)
3194 		seq_printf(m, " ->%pS", ptr);
3195 }
3196 
3197 static int t_show(struct seq_file *m, void *v)
3198 {
3199 	struct ftrace_iterator *iter = m->private;
3200 	struct dyn_ftrace *rec;
3201 
3202 	if (iter->flags & FTRACE_ITER_HASH)
3203 		return t_hash_show(m, iter);
3204 
3205 	if (iter->flags & FTRACE_ITER_PRINTALL) {
3206 		if (iter->flags & FTRACE_ITER_NOTRACE)
3207 			seq_puts(m, "#### no functions disabled ####\n");
3208 		else
3209 			seq_puts(m, "#### all functions enabled ####\n");
3210 		return 0;
3211 	}
3212 
3213 	rec = iter->func;
3214 
3215 	if (!rec)
3216 		return 0;
3217 
3218 	seq_printf(m, "%ps", (void *)rec->ip);
3219 	if (iter->flags & FTRACE_ITER_ENABLED) {
3220 		struct ftrace_ops *ops = NULL;
3221 
3222 		seq_printf(m, " (%ld)%s%s",
3223 			   ftrace_rec_count(rec),
3224 			   rec->flags & FTRACE_FL_REGS ? " R" : "  ",
3225 			   rec->flags & FTRACE_FL_IPMODIFY ? " I" : "  ");
3226 		if (rec->flags & FTRACE_FL_TRAMP_EN) {
3227 			ops = ftrace_find_tramp_ops_any(rec);
3228 			if (ops)
3229 				seq_printf(m, "\ttramp: %pS",
3230 					   (void *)ops->trampoline);
3231 			else
3232 				seq_puts(m, "\ttramp: ERROR!");
3233 
3234 		}
3235 		add_trampoline_func(m, ops, rec);
3236 	}
3237 
3238 	seq_putc(m, '\n');
3239 
3240 	return 0;
3241 }
3242 
3243 static const struct seq_operations show_ftrace_seq_ops = {
3244 	.start = t_start,
3245 	.next = t_next,
3246 	.stop = t_stop,
3247 	.show = t_show,
3248 };
3249 
3250 static int
3251 ftrace_avail_open(struct inode *inode, struct file *file)
3252 {
3253 	struct ftrace_iterator *iter;
3254 
3255 	if (unlikely(ftrace_disabled))
3256 		return -ENODEV;
3257 
3258 	iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
3259 	if (iter) {
3260 		iter->pg = ftrace_pages_start;
3261 		iter->ops = &global_ops;
3262 	}
3263 
3264 	return iter ? 0 : -ENOMEM;
3265 }
3266 
3267 static int
3268 ftrace_enabled_open(struct inode *inode, struct file *file)
3269 {
3270 	struct ftrace_iterator *iter;
3271 
3272 	iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
3273 	if (iter) {
3274 		iter->pg = ftrace_pages_start;
3275 		iter->flags = FTRACE_ITER_ENABLED;
3276 		iter->ops = &global_ops;
3277 	}
3278 
3279 	return iter ? 0 : -ENOMEM;
3280 }
3281 
3282 /**
3283  * ftrace_regex_open - initialize function tracer filter files
3284  * @ops: The ftrace_ops that hold the hash filters
3285  * @flag: The type of filter to process
3286  * @inode: The inode, usually passed in to your open routine
3287  * @file: The file, usually passed in to your open routine
3288  *
3289  * ftrace_regex_open() initializes the filter files for the
3290  * @ops. Depending on @flag it may process the filter hash or
3291  * the notrace hash of @ops. With this called from the open
3292  * routine, you can use ftrace_filter_write() for the write
3293  * routine if @flag has FTRACE_ITER_FILTER set, or
3294  * ftrace_notrace_write() if @flag has FTRACE_ITER_NOTRACE set.
3295  * tracing_lseek() should be used as the lseek routine, and
3296  * release must call ftrace_regex_release().
3297  */
3298 int
3299 ftrace_regex_open(struct ftrace_ops *ops, int flag,
3300 		  struct inode *inode, struct file *file)
3301 {
3302 	struct ftrace_iterator *iter;
3303 	struct ftrace_hash *hash;
3304 	int ret = 0;
3305 
3306 	ftrace_ops_init(ops);
3307 
3308 	if (unlikely(ftrace_disabled))
3309 		return -ENODEV;
3310 
3311 	iter = kzalloc(sizeof(*iter), GFP_KERNEL);
3312 	if (!iter)
3313 		return -ENOMEM;
3314 
3315 	if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX)) {
3316 		kfree(iter);
3317 		return -ENOMEM;
3318 	}
3319 
3320 	iter->ops = ops;
3321 	iter->flags = flag;
3322 
3323 	mutex_lock(&ops->func_hash->regex_lock);
3324 
3325 	if (flag & FTRACE_ITER_NOTRACE)
3326 		hash = ops->func_hash->notrace_hash;
3327 	else
3328 		hash = ops->func_hash->filter_hash;
3329 
3330 	if (file->f_mode & FMODE_WRITE) {
3331 		const int size_bits = FTRACE_HASH_DEFAULT_BITS;
3332 
3333 		if (file->f_flags & O_TRUNC)
3334 			iter->hash = alloc_ftrace_hash(size_bits);
3335 		else
3336 			iter->hash = alloc_and_copy_ftrace_hash(size_bits, hash);
3337 
3338 		if (!iter->hash) {
3339 			trace_parser_put(&iter->parser);
3340 			kfree(iter);
3341 			ret = -ENOMEM;
3342 			goto out_unlock;
3343 		}
3344 	}
3345 
3346 	if (file->f_mode & FMODE_READ) {
3347 		iter->pg = ftrace_pages_start;
3348 
3349 		ret = seq_open(file, &show_ftrace_seq_ops);
3350 		if (!ret) {
3351 			struct seq_file *m = file->private_data;
3352 			m->private = iter;
3353 		} else {
3354 			/* Failed */
3355 			free_ftrace_hash(iter->hash);
3356 			trace_parser_put(&iter->parser);
3357 			kfree(iter);
3358 		}
3359 	} else
3360 		file->private_data = iter;
3361 
3362  out_unlock:
3363 	mutex_unlock(&ops->func_hash->regex_lock);
3364 
3365 	return ret;
3366 }
3367 
3368 static int
3369 ftrace_filter_open(struct inode *inode, struct file *file)
3370 {
3371 	struct ftrace_ops *ops = inode->i_private;
3372 
3373 	return ftrace_regex_open(ops,
3374 			FTRACE_ITER_FILTER | FTRACE_ITER_DO_HASH,
3375 			inode, file);
3376 }
3377 
3378 static int
3379 ftrace_notrace_open(struct inode *inode, struct file *file)
3380 {
3381 	struct ftrace_ops *ops = inode->i_private;
3382 
3383 	return ftrace_regex_open(ops, FTRACE_ITER_NOTRACE,
3384 				 inode, file);
3385 }
3386 
3387 static int ftrace_match(char *str, char *regex, int len, int type)
3388 {
3389 	int matched = 0;
3390 	int slen;
3391 
3392 	switch (type) {
3393 	case MATCH_FULL:
3394 		if (strcmp(str, regex) == 0)
3395 			matched = 1;
3396 		break;
3397 	case MATCH_FRONT_ONLY:
3398 		if (strncmp(str, regex, len) == 0)
3399 			matched = 1;
3400 		break;
3401 	case MATCH_MIDDLE_ONLY:
3402 		if (strstr(str, regex))
3403 			matched = 1;
3404 		break;
3405 	case MATCH_END_ONLY:
3406 		slen = strlen(str);
3407 		if (slen >= len && memcmp(str + slen - len, regex, len) == 0)
3408 			matched = 1;
3409 		break;
3410 	}
3411 
3412 	return matched;
3413 }
3414 
3415 static int
3416 enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int not)
3417 {
3418 	struct ftrace_func_entry *entry;
3419 	int ret = 0;
3420 
3421 	entry = ftrace_lookup_ip(hash, rec->ip);
3422 	if (not) {
3423 		/* Do nothing if it doesn't exist */
3424 		if (!entry)
3425 			return 0;
3426 
3427 		free_hash_entry(hash, entry);
3428 	} else {
3429 		/* Do nothing if it exists */
3430 		if (entry)
3431 			return 0;
3432 
3433 		ret = add_hash_entry(hash, rec->ip);
3434 	}
3435 	return ret;
3436 }
3437 
3438 static int
3439 ftrace_match_record(struct dyn_ftrace *rec, char *mod,
3440 		    char *regex, int len, int type)
3441 {
3442 	char str[KSYM_SYMBOL_LEN];
3443 	char *modname;
3444 
3445 	kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
3446 
3447 	if (mod) {
3448 		/* module lookup requires matching the module */
3449 		if (!modname || strcmp(modname, mod))
3450 			return 0;
3451 
3452 		/* blank search means to match all funcs in the mod */
3453 		if (!len)
3454 			return 1;
3455 	}
3456 
3457 	return ftrace_match(str, regex, len, type);
3458 }
3459 
3460 static int
3461 match_records(struct ftrace_hash *hash, char *buff,
3462 	      int len, char *mod, int not)
3463 {
3464 	unsigned search_len = 0;
3465 	struct ftrace_page *pg;
3466 	struct dyn_ftrace *rec;
3467 	int type = MATCH_FULL;
3468 	char *search = buff;
3469 	int found = 0;
3470 	int ret;
3471 
3472 	if (len) {
3473 		type = filter_parse_regex(buff, len, &search, &not);
3474 		search_len = strlen(search);
3475 	}
3476 
3477 	mutex_lock(&ftrace_lock);
3478 
3479 	if (unlikely(ftrace_disabled))
3480 		goto out_unlock;
3481 
3482 	do_for_each_ftrace_rec(pg, rec) {
3483 		if (ftrace_match_record(rec, mod, search, search_len, type)) {
3484 			ret = enter_record(hash, rec, not);
3485 			if (ret < 0) {
3486 				found = ret;
3487 				goto out_unlock;
3488 			}
3489 			found = 1;
3490 		}
3491 	} while_for_each_ftrace_rec();
3492  out_unlock:
3493 	mutex_unlock(&ftrace_lock);
3494 
3495 	return found;
3496 }
3497 
3498 static int
3499 ftrace_match_records(struct ftrace_hash *hash, char *buff, int len)
3500 {
3501 	return match_records(hash, buff, len, NULL, 0);
3502 }
3503 
3504 static int
3505 ftrace_match_module_records(struct ftrace_hash *hash, char *buff, char *mod)
3506 {
3507 	int not = 0;
3508 
3509 	/* blank or '*' mean the same */
3510 	if (strcmp(buff, "*") == 0)
3511 		buff[0] = 0;
3512 
3513 	/* handle the case of 'dont filter this module' */
3514 	if (strcmp(buff, "!") == 0 || strcmp(buff, "!*") == 0) {
3515 		buff[0] = 0;
3516 		not = 1;
3517 	}
3518 
3519 	return match_records(hash, buff, strlen(buff), mod, not);
3520 }
3521 
3522 /*
3523  * We register the module command as a template to show others how
3524  * to register the a command as well.
3525  */
3526 
3527 static int
3528 ftrace_mod_callback(struct ftrace_hash *hash,
3529 		    char *func, char *cmd, char *param, int enable)
3530 {
3531 	char *mod;
3532 	int ret = -EINVAL;
3533 
3534 	/*
3535 	 * cmd == 'mod' because we only registered this func
3536 	 * for the 'mod' ftrace_func_command.
3537 	 * But if you register one func with multiple commands,
3538 	 * you can tell which command was used by the cmd
3539 	 * parameter.
3540 	 */
3541 
3542 	/* we must have a module name */
3543 	if (!param)
3544 		return ret;
3545 
3546 	mod = strsep(&param, ":");
3547 	if (!strlen(mod))
3548 		return ret;
3549 
3550 	ret = ftrace_match_module_records(hash, func, mod);
3551 	if (!ret)
3552 		ret = -EINVAL;
3553 	if (ret < 0)
3554 		return ret;
3555 
3556 	return 0;
3557 }
3558 
3559 static struct ftrace_func_command ftrace_mod_cmd = {
3560 	.name			= "mod",
3561 	.func			= ftrace_mod_callback,
3562 };
3563 
3564 static int __init ftrace_mod_cmd_init(void)
3565 {
3566 	return register_ftrace_command(&ftrace_mod_cmd);
3567 }
3568 core_initcall(ftrace_mod_cmd_init);
3569 
3570 static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
3571 				      struct ftrace_ops *op, struct pt_regs *pt_regs)
3572 {
3573 	struct ftrace_func_probe *entry;
3574 	struct hlist_head *hhd;
3575 	unsigned long key;
3576 
3577 	key = hash_long(ip, FTRACE_HASH_BITS);
3578 
3579 	hhd = &ftrace_func_hash[key];
3580 
3581 	if (hlist_empty(hhd))
3582 		return;
3583 
3584 	/*
3585 	 * Disable preemption for these calls to prevent a RCU grace
3586 	 * period. This syncs the hash iteration and freeing of items
3587 	 * on the hash. rcu_read_lock is too dangerous here.
3588 	 */
3589 	preempt_disable_notrace();
3590 	hlist_for_each_entry_rcu_notrace(entry, hhd, node) {
3591 		if (entry->ip == ip)
3592 			entry->ops->func(ip, parent_ip, &entry->data);
3593 	}
3594 	preempt_enable_notrace();
3595 }
3596 
3597 static struct ftrace_ops trace_probe_ops __read_mostly =
3598 {
3599 	.func		= function_trace_probe_call,
3600 	.flags		= FTRACE_OPS_FL_INITIALIZED,
3601 	INIT_OPS_HASH(trace_probe_ops)
3602 };
3603 
3604 static int ftrace_probe_registered;
3605 
3606 static void __enable_ftrace_function_probe(struct ftrace_ops_hash *old_hash)
3607 {
3608 	int ret;
3609 	int i;
3610 
3611 	if (ftrace_probe_registered) {
3612 		/* still need to update the function call sites */
3613 		if (ftrace_enabled)
3614 			ftrace_run_modify_code(&trace_probe_ops, FTRACE_UPDATE_CALLS,
3615 					       old_hash);
3616 		return;
3617 	}
3618 
3619 	for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
3620 		struct hlist_head *hhd = &ftrace_func_hash[i];
3621 		if (hhd->first)
3622 			break;
3623 	}
3624 	/* Nothing registered? */
3625 	if (i == FTRACE_FUNC_HASHSIZE)
3626 		return;
3627 
3628 	ret = ftrace_startup(&trace_probe_ops, 0);
3629 
3630 	ftrace_probe_registered = 1;
3631 }
3632 
3633 static void __disable_ftrace_function_probe(void)
3634 {
3635 	int i;
3636 
3637 	if (!ftrace_probe_registered)
3638 		return;
3639 
3640 	for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
3641 		struct hlist_head *hhd = &ftrace_func_hash[i];
3642 		if (hhd->first)
3643 			return;
3644 	}
3645 
3646 	/* no more funcs left */
3647 	ftrace_shutdown(&trace_probe_ops, 0);
3648 
3649 	ftrace_probe_registered = 0;
3650 }
3651 
3652 
3653 static void ftrace_free_entry(struct ftrace_func_probe *entry)
3654 {
3655 	if (entry->ops->free)
3656 		entry->ops->free(entry->ops, entry->ip, &entry->data);
3657 	kfree(entry);
3658 }
3659 
3660 int
3661 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3662 			      void *data)
3663 {
3664 	struct ftrace_ops_hash old_hash_ops;
3665 	struct ftrace_func_probe *entry;
3666 	struct ftrace_hash **orig_hash = &trace_probe_ops.func_hash->filter_hash;
3667 	struct ftrace_hash *old_hash = *orig_hash;
3668 	struct ftrace_hash *hash;
3669 	struct ftrace_page *pg;
3670 	struct dyn_ftrace *rec;
3671 	int type, len, not;
3672 	unsigned long key;
3673 	int count = 0;
3674 	char *search;
3675 	int ret;
3676 
3677 	type = filter_parse_regex(glob, strlen(glob), &search, &not);
3678 	len = strlen(search);
3679 
3680 	/* we do not support '!' for function probes */
3681 	if (WARN_ON(not))
3682 		return -EINVAL;
3683 
3684 	mutex_lock(&trace_probe_ops.func_hash->regex_lock);
3685 
3686 	old_hash_ops.filter_hash = old_hash;
3687 	/* Probes only have filters */
3688 	old_hash_ops.notrace_hash = NULL;
3689 
3690 	hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash);
3691 	if (!hash) {
3692 		count = -ENOMEM;
3693 		goto out;
3694 	}
3695 
3696 	if (unlikely(ftrace_disabled)) {
3697 		count = -ENODEV;
3698 		goto out;
3699 	}
3700 
3701 	mutex_lock(&ftrace_lock);
3702 
3703 	do_for_each_ftrace_rec(pg, rec) {
3704 
3705 		if (!ftrace_match_record(rec, NULL, search, len, type))
3706 			continue;
3707 
3708 		entry = kmalloc(sizeof(*entry), GFP_KERNEL);
3709 		if (!entry) {
3710 			/* If we did not process any, then return error */
3711 			if (!count)
3712 				count = -ENOMEM;
3713 			goto out_unlock;
3714 		}
3715 
3716 		count++;
3717 
3718 		entry->data = data;
3719 
3720 		/*
3721 		 * The caller might want to do something special
3722 		 * for each function we find. We call the callback
3723 		 * to give the caller an opportunity to do so.
3724 		 */
3725 		if (ops->init) {
3726 			if (ops->init(ops, rec->ip, &entry->data) < 0) {
3727 				/* caller does not like this func */
3728 				kfree(entry);
3729 				continue;
3730 			}
3731 		}
3732 
3733 		ret = enter_record(hash, rec, 0);
3734 		if (ret < 0) {
3735 			kfree(entry);
3736 			count = ret;
3737 			goto out_unlock;
3738 		}
3739 
3740 		entry->ops = ops;
3741 		entry->ip = rec->ip;
3742 
3743 		key = hash_long(entry->ip, FTRACE_HASH_BITS);
3744 		hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]);
3745 
3746 	} while_for_each_ftrace_rec();
3747 
3748 	ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash);
3749 
3750 	__enable_ftrace_function_probe(&old_hash_ops);
3751 
3752 	if (!ret)
3753 		free_ftrace_hash_rcu(old_hash);
3754 	else
3755 		count = ret;
3756 
3757  out_unlock:
3758 	mutex_unlock(&ftrace_lock);
3759  out:
3760 	mutex_unlock(&trace_probe_ops.func_hash->regex_lock);
3761 	free_ftrace_hash(hash);
3762 
3763 	return count;
3764 }
3765 
3766 enum {
3767 	PROBE_TEST_FUNC		= 1,
3768 	PROBE_TEST_DATA		= 2
3769 };
3770 
3771 static void
3772 __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3773 				  void *data, int flags)
3774 {
3775 	struct ftrace_func_entry *rec_entry;
3776 	struct ftrace_func_probe *entry;
3777 	struct ftrace_func_probe *p;
3778 	struct ftrace_hash **orig_hash = &trace_probe_ops.func_hash->filter_hash;
3779 	struct ftrace_hash *old_hash = *orig_hash;
3780 	struct list_head free_list;
3781 	struct ftrace_hash *hash;
3782 	struct hlist_node *tmp;
3783 	char str[KSYM_SYMBOL_LEN];
3784 	int type = MATCH_FULL;
3785 	int i, len = 0;
3786 	char *search;
3787 	int ret;
3788 
3789 	if (glob && (strcmp(glob, "*") == 0 || !strlen(glob)))
3790 		glob = NULL;
3791 	else if (glob) {
3792 		int not;
3793 
3794 		type = filter_parse_regex(glob, strlen(glob), &search, &not);
3795 		len = strlen(search);
3796 
3797 		/* we do not support '!' for function probes */
3798 		if (WARN_ON(not))
3799 			return;
3800 	}
3801 
3802 	mutex_lock(&trace_probe_ops.func_hash->regex_lock);
3803 
3804 	hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
3805 	if (!hash)
3806 		/* Hmm, should report this somehow */
3807 		goto out_unlock;
3808 
3809 	INIT_LIST_HEAD(&free_list);
3810 
3811 	for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
3812 		struct hlist_head *hhd = &ftrace_func_hash[i];
3813 
3814 		hlist_for_each_entry_safe(entry, tmp, hhd, node) {
3815 
3816 			/* break up if statements for readability */
3817 			if ((flags & PROBE_TEST_FUNC) && entry->ops != ops)
3818 				continue;
3819 
3820 			if ((flags & PROBE_TEST_DATA) && entry->data != data)
3821 				continue;
3822 
3823 			/* do this last, since it is the most expensive */
3824 			if (glob) {
3825 				kallsyms_lookup(entry->ip, NULL, NULL,
3826 						NULL, str);
3827 				if (!ftrace_match(str, glob, len, type))
3828 					continue;
3829 			}
3830 
3831 			rec_entry = ftrace_lookup_ip(hash, entry->ip);
3832 			/* It is possible more than one entry had this ip */
3833 			if (rec_entry)
3834 				free_hash_entry(hash, rec_entry);
3835 
3836 			hlist_del_rcu(&entry->node);
3837 			list_add(&entry->free_list, &free_list);
3838 		}
3839 	}
3840 	mutex_lock(&ftrace_lock);
3841 	__disable_ftrace_function_probe();
3842 	/*
3843 	 * Remove after the disable is called. Otherwise, if the last
3844 	 * probe is removed, a null hash means *all enabled*.
3845 	 */
3846 	ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash);
3847 	synchronize_sched();
3848 	if (!ret)
3849 		free_ftrace_hash_rcu(old_hash);
3850 
3851 	list_for_each_entry_safe(entry, p, &free_list, free_list) {
3852 		list_del(&entry->free_list);
3853 		ftrace_free_entry(entry);
3854 	}
3855 	mutex_unlock(&ftrace_lock);
3856 
3857  out_unlock:
3858 	mutex_unlock(&trace_probe_ops.func_hash->regex_lock);
3859 	free_ftrace_hash(hash);
3860 }
3861 
3862 void
3863 unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3864 				void *data)
3865 {
3866 	__unregister_ftrace_function_probe(glob, ops, data,
3867 					  PROBE_TEST_FUNC | PROBE_TEST_DATA);
3868 }
3869 
3870 void
3871 unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops)
3872 {
3873 	__unregister_ftrace_function_probe(glob, ops, NULL, PROBE_TEST_FUNC);
3874 }
3875 
3876 void unregister_ftrace_function_probe_all(char *glob)
3877 {
3878 	__unregister_ftrace_function_probe(glob, NULL, NULL, 0);
3879 }
3880 
3881 static LIST_HEAD(ftrace_commands);
3882 static DEFINE_MUTEX(ftrace_cmd_mutex);
3883 
3884 /*
3885  * Currently we only register ftrace commands from __init, so mark this
3886  * __init too.
3887  */
3888 __init int register_ftrace_command(struct ftrace_func_command *cmd)
3889 {
3890 	struct ftrace_func_command *p;
3891 	int ret = 0;
3892 
3893 	mutex_lock(&ftrace_cmd_mutex);
3894 	list_for_each_entry(p, &ftrace_commands, list) {
3895 		if (strcmp(cmd->name, p->name) == 0) {
3896 			ret = -EBUSY;
3897 			goto out_unlock;
3898 		}
3899 	}
3900 	list_add(&cmd->list, &ftrace_commands);
3901  out_unlock:
3902 	mutex_unlock(&ftrace_cmd_mutex);
3903 
3904 	return ret;
3905 }
3906 
3907 /*
3908  * Currently we only unregister ftrace commands from __init, so mark
3909  * this __init too.
3910  */
3911 __init int unregister_ftrace_command(struct ftrace_func_command *cmd)
3912 {
3913 	struct ftrace_func_command *p, *n;
3914 	int ret = -ENODEV;
3915 
3916 	mutex_lock(&ftrace_cmd_mutex);
3917 	list_for_each_entry_safe(p, n, &ftrace_commands, list) {
3918 		if (strcmp(cmd->name, p->name) == 0) {
3919 			ret = 0;
3920 			list_del_init(&p->list);
3921 			goto out_unlock;
3922 		}
3923 	}
3924  out_unlock:
3925 	mutex_unlock(&ftrace_cmd_mutex);
3926 
3927 	return ret;
3928 }
3929 
3930 static int ftrace_process_regex(struct ftrace_hash *hash,
3931 				char *buff, int len, int enable)
3932 {
3933 	char *func, *command, *next = buff;
3934 	struct ftrace_func_command *p;
3935 	int ret = -EINVAL;
3936 
3937 	func = strsep(&next, ":");
3938 
3939 	if (!next) {
3940 		ret = ftrace_match_records(hash, func, len);
3941 		if (!ret)
3942 			ret = -EINVAL;
3943 		if (ret < 0)
3944 			return ret;
3945 		return 0;
3946 	}
3947 
3948 	/* command found */
3949 
3950 	command = strsep(&next, ":");
3951 
3952 	mutex_lock(&ftrace_cmd_mutex);
3953 	list_for_each_entry(p, &ftrace_commands, list) {
3954 		if (strcmp(p->name, command) == 0) {
3955 			ret = p->func(hash, func, command, next, enable);
3956 			goto out_unlock;
3957 		}
3958 	}
3959  out_unlock:
3960 	mutex_unlock(&ftrace_cmd_mutex);
3961 
3962 	return ret;
3963 }
3964 
3965 static ssize_t
3966 ftrace_regex_write(struct file *file, const char __user *ubuf,
3967 		   size_t cnt, loff_t *ppos, int enable)
3968 {
3969 	struct ftrace_iterator *iter;
3970 	struct trace_parser *parser;
3971 	ssize_t ret, read;
3972 
3973 	if (!cnt)
3974 		return 0;
3975 
3976 	if (file->f_mode & FMODE_READ) {
3977 		struct seq_file *m = file->private_data;
3978 		iter = m->private;
3979 	} else
3980 		iter = file->private_data;
3981 
3982 	if (unlikely(ftrace_disabled))
3983 		return -ENODEV;
3984 
3985 	/* iter->hash is a local copy, so we don't need regex_lock */
3986 
3987 	parser = &iter->parser;
3988 	read = trace_get_user(parser, ubuf, cnt, ppos);
3989 
3990 	if (read >= 0 && trace_parser_loaded(parser) &&
3991 	    !trace_parser_cont(parser)) {
3992 		ret = ftrace_process_regex(iter->hash, parser->buffer,
3993 					   parser->idx, enable);
3994 		trace_parser_clear(parser);
3995 		if (ret < 0)
3996 			goto out;
3997 	}
3998 
3999 	ret = read;
4000  out:
4001 	return ret;
4002 }
4003 
4004 ssize_t
4005 ftrace_filter_write(struct file *file, const char __user *ubuf,
4006 		    size_t cnt, loff_t *ppos)
4007 {
4008 	return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
4009 }
4010 
4011 ssize_t
4012 ftrace_notrace_write(struct file *file, const char __user *ubuf,
4013 		     size_t cnt, loff_t *ppos)
4014 {
4015 	return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
4016 }
4017 
4018 static int
4019 ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove)
4020 {
4021 	struct ftrace_func_entry *entry;
4022 
4023 	if (!ftrace_location(ip))
4024 		return -EINVAL;
4025 
4026 	if (remove) {
4027 		entry = ftrace_lookup_ip(hash, ip);
4028 		if (!entry)
4029 			return -ENOENT;
4030 		free_hash_entry(hash, entry);
4031 		return 0;
4032 	}
4033 
4034 	return add_hash_entry(hash, ip);
4035 }
4036 
4037 static void ftrace_ops_update_code(struct ftrace_ops *ops,
4038 				   struct ftrace_ops_hash *old_hash)
4039 {
4040 	struct ftrace_ops *op;
4041 
4042 	if (!ftrace_enabled)
4043 		return;
4044 
4045 	if (ops->flags & FTRACE_OPS_FL_ENABLED) {
4046 		ftrace_run_modify_code(ops, FTRACE_UPDATE_CALLS, old_hash);
4047 		return;
4048 	}
4049 
4050 	/*
4051 	 * If this is the shared global_ops filter, then we need to
4052 	 * check if there is another ops that shares it, is enabled.
4053 	 * If so, we still need to run the modify code.
4054 	 */
4055 	if (ops->func_hash != &global_ops.local_hash)
4056 		return;
4057 
4058 	do_for_each_ftrace_op(op, ftrace_ops_list) {
4059 		if (op->func_hash == &global_ops.local_hash &&
4060 		    op->flags & FTRACE_OPS_FL_ENABLED) {
4061 			ftrace_run_modify_code(op, FTRACE_UPDATE_CALLS, old_hash);
4062 			/* Only need to do this once */
4063 			return;
4064 		}
4065 	} while_for_each_ftrace_op(op);
4066 }
4067 
4068 static int
4069 ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
4070 		unsigned long ip, int remove, int reset, int enable)
4071 {
4072 	struct ftrace_hash **orig_hash;
4073 	struct ftrace_ops_hash old_hash_ops;
4074 	struct ftrace_hash *old_hash;
4075 	struct ftrace_hash *hash;
4076 	int ret;
4077 
4078 	if (unlikely(ftrace_disabled))
4079 		return -ENODEV;
4080 
4081 	mutex_lock(&ops->func_hash->regex_lock);
4082 
4083 	if (enable)
4084 		orig_hash = &ops->func_hash->filter_hash;
4085 	else
4086 		orig_hash = &ops->func_hash->notrace_hash;
4087 
4088 	if (reset)
4089 		hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
4090 	else
4091 		hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
4092 
4093 	if (!hash) {
4094 		ret = -ENOMEM;
4095 		goto out_regex_unlock;
4096 	}
4097 
4098 	if (buf && !ftrace_match_records(hash, buf, len)) {
4099 		ret = -EINVAL;
4100 		goto out_regex_unlock;
4101 	}
4102 	if (ip) {
4103 		ret = ftrace_match_addr(hash, ip, remove);
4104 		if (ret < 0)
4105 			goto out_regex_unlock;
4106 	}
4107 
4108 	mutex_lock(&ftrace_lock);
4109 	old_hash = *orig_hash;
4110 	old_hash_ops.filter_hash = ops->func_hash->filter_hash;
4111 	old_hash_ops.notrace_hash = ops->func_hash->notrace_hash;
4112 	ret = ftrace_hash_move(ops, enable, orig_hash, hash);
4113 	if (!ret) {
4114 		ftrace_ops_update_code(ops, &old_hash_ops);
4115 		free_ftrace_hash_rcu(old_hash);
4116 	}
4117 	mutex_unlock(&ftrace_lock);
4118 
4119  out_regex_unlock:
4120 	mutex_unlock(&ops->func_hash->regex_lock);
4121 
4122 	free_ftrace_hash(hash);
4123 	return ret;
4124 }
4125 
4126 static int
4127 ftrace_set_addr(struct ftrace_ops *ops, unsigned long ip, int remove,
4128 		int reset, int enable)
4129 {
4130 	return ftrace_set_hash(ops, 0, 0, ip, remove, reset, enable);
4131 }
4132 
4133 /**
4134  * ftrace_set_filter_ip - set a function to filter on in ftrace by address
4135  * @ops - the ops to set the filter with
4136  * @ip - the address to add to or remove from the filter.
4137  * @remove - non zero to remove the ip from the filter
4138  * @reset - non zero to reset all filters before applying this filter.
4139  *
4140  * Filters denote which functions should be enabled when tracing is enabled
4141  * If @ip is NULL, it failes to update filter.
4142  */
4143 int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
4144 			 int remove, int reset)
4145 {
4146 	ftrace_ops_init(ops);
4147 	return ftrace_set_addr(ops, ip, remove, reset, 1);
4148 }
4149 EXPORT_SYMBOL_GPL(ftrace_set_filter_ip);
4150 
4151 static int
4152 ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
4153 		 int reset, int enable)
4154 {
4155 	return ftrace_set_hash(ops, buf, len, 0, 0, reset, enable);
4156 }
4157 
4158 /**
4159  * ftrace_set_filter - set a function to filter on in ftrace
4160  * @ops - the ops to set the filter with
4161  * @buf - the string that holds the function filter text.
4162  * @len - the length of the string.
4163  * @reset - non zero to reset all filters before applying this filter.
4164  *
4165  * Filters denote which functions should be enabled when tracing is enabled.
4166  * If @buf is NULL and reset is set, all functions will be enabled for tracing.
4167  */
4168 int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
4169 		       int len, int reset)
4170 {
4171 	ftrace_ops_init(ops);
4172 	return ftrace_set_regex(ops, buf, len, reset, 1);
4173 }
4174 EXPORT_SYMBOL_GPL(ftrace_set_filter);
4175 
4176 /**
4177  * ftrace_set_notrace - set a function to not trace in ftrace
4178  * @ops - the ops to set the notrace filter with
4179  * @buf - the string that holds the function notrace text.
4180  * @len - the length of the string.
4181  * @reset - non zero to reset all filters before applying this filter.
4182  *
4183  * Notrace Filters denote which functions should not be enabled when tracing
4184  * is enabled. If @buf is NULL and reset is set, all functions will be enabled
4185  * for tracing.
4186  */
4187 int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
4188 			int len, int reset)
4189 {
4190 	ftrace_ops_init(ops);
4191 	return ftrace_set_regex(ops, buf, len, reset, 0);
4192 }
4193 EXPORT_SYMBOL_GPL(ftrace_set_notrace);
4194 /**
4195  * ftrace_set_global_filter - set a function to filter on with global tracers
4196  * @buf - the string that holds the function filter text.
4197  * @len - the length of the string.
4198  * @reset - non zero to reset all filters before applying this filter.
4199  *
4200  * Filters denote which functions should be enabled when tracing is enabled.
4201  * If @buf is NULL and reset is set, all functions will be enabled for tracing.
4202  */
4203 void ftrace_set_global_filter(unsigned char *buf, int len, int reset)
4204 {
4205 	ftrace_set_regex(&global_ops, buf, len, reset, 1);
4206 }
4207 EXPORT_SYMBOL_GPL(ftrace_set_global_filter);
4208 
4209 /**
4210  * ftrace_set_global_notrace - set a function to not trace with global tracers
4211  * @buf - the string that holds the function notrace text.
4212  * @len - the length of the string.
4213  * @reset - non zero to reset all filters before applying this filter.
4214  *
4215  * Notrace Filters denote which functions should not be enabled when tracing
4216  * is enabled. If @buf is NULL and reset is set, all functions will be enabled
4217  * for tracing.
4218  */
4219 void ftrace_set_global_notrace(unsigned char *buf, int len, int reset)
4220 {
4221 	ftrace_set_regex(&global_ops, buf, len, reset, 0);
4222 }
4223 EXPORT_SYMBOL_GPL(ftrace_set_global_notrace);
4224 
4225 /*
4226  * command line interface to allow users to set filters on boot up.
4227  */
4228 #define FTRACE_FILTER_SIZE		COMMAND_LINE_SIZE
4229 static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
4230 static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;
4231 
4232 /* Used by function selftest to not test if filter is set */
4233 bool ftrace_filter_param __initdata;
4234 
4235 static int __init set_ftrace_notrace(char *str)
4236 {
4237 	ftrace_filter_param = true;
4238 	strlcpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
4239 	return 1;
4240 }
4241 __setup("ftrace_notrace=", set_ftrace_notrace);
4242 
4243 static int __init set_ftrace_filter(char *str)
4244 {
4245 	ftrace_filter_param = true;
4246 	strlcpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
4247 	return 1;
4248 }
4249 __setup("ftrace_filter=", set_ftrace_filter);
4250 
4251 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4252 static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
4253 static char ftrace_graph_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
4254 static int ftrace_set_func(unsigned long *array, int *idx, int size, char *buffer);
4255 
4256 static unsigned long save_global_trampoline;
4257 static unsigned long save_global_flags;
4258 
4259 static int __init set_graph_function(char *str)
4260 {
4261 	strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE);
4262 	return 1;
4263 }
4264 __setup("ftrace_graph_filter=", set_graph_function);
4265 
4266 static int __init set_graph_notrace_function(char *str)
4267 {
4268 	strlcpy(ftrace_graph_notrace_buf, str, FTRACE_FILTER_SIZE);
4269 	return 1;
4270 }
4271 __setup("ftrace_graph_notrace=", set_graph_notrace_function);
4272 
4273 static void __init set_ftrace_early_graph(char *buf, int enable)
4274 {
4275 	int ret;
4276 	char *func;
4277 	unsigned long *table = ftrace_graph_funcs;
4278 	int *count = &ftrace_graph_count;
4279 
4280 	if (!enable) {
4281 		table = ftrace_graph_notrace_funcs;
4282 		count = &ftrace_graph_notrace_count;
4283 	}
4284 
4285 	while (buf) {
4286 		func = strsep(&buf, ",");
4287 		/* we allow only one expression at a time */
4288 		ret = ftrace_set_func(table, count, FTRACE_GRAPH_MAX_FUNCS, func);
4289 		if (ret)
4290 			printk(KERN_DEBUG "ftrace: function %s not "
4291 					  "traceable\n", func);
4292 	}
4293 }
4294 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
4295 
4296 void __init
4297 ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable)
4298 {
4299 	char *func;
4300 
4301 	ftrace_ops_init(ops);
4302 
4303 	while (buf) {
4304 		func = strsep(&buf, ",");
4305 		ftrace_set_regex(ops, func, strlen(func), 0, enable);
4306 	}
4307 }
4308 
4309 static void __init set_ftrace_early_filters(void)
4310 {
4311 	if (ftrace_filter_buf[0])
4312 		ftrace_set_early_filter(&global_ops, ftrace_filter_buf, 1);
4313 	if (ftrace_notrace_buf[0])
4314 		ftrace_set_early_filter(&global_ops, ftrace_notrace_buf, 0);
4315 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4316 	if (ftrace_graph_buf[0])
4317 		set_ftrace_early_graph(ftrace_graph_buf, 1);
4318 	if (ftrace_graph_notrace_buf[0])
4319 		set_ftrace_early_graph(ftrace_graph_notrace_buf, 0);
4320 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
4321 }
4322 
4323 int ftrace_regex_release(struct inode *inode, struct file *file)
4324 {
4325 	struct seq_file *m = (struct seq_file *)file->private_data;
4326 	struct ftrace_ops_hash old_hash_ops;
4327 	struct ftrace_iterator *iter;
4328 	struct ftrace_hash **orig_hash;
4329 	struct ftrace_hash *old_hash;
4330 	struct trace_parser *parser;
4331 	int filter_hash;
4332 	int ret;
4333 
4334 	if (file->f_mode & FMODE_READ) {
4335 		iter = m->private;
4336 		seq_release(inode, file);
4337 	} else
4338 		iter = file->private_data;
4339 
4340 	parser = &iter->parser;
4341 	if (trace_parser_loaded(parser)) {
4342 		parser->buffer[parser->idx] = 0;
4343 		ftrace_match_records(iter->hash, parser->buffer, parser->idx);
4344 	}
4345 
4346 	trace_parser_put(parser);
4347 
4348 	mutex_lock(&iter->ops->func_hash->regex_lock);
4349 
4350 	if (file->f_mode & FMODE_WRITE) {
4351 		filter_hash = !!(iter->flags & FTRACE_ITER_FILTER);
4352 
4353 		if (filter_hash)
4354 			orig_hash = &iter->ops->func_hash->filter_hash;
4355 		else
4356 			orig_hash = &iter->ops->func_hash->notrace_hash;
4357 
4358 		mutex_lock(&ftrace_lock);
4359 		old_hash = *orig_hash;
4360 		old_hash_ops.filter_hash = iter->ops->func_hash->filter_hash;
4361 		old_hash_ops.notrace_hash = iter->ops->func_hash->notrace_hash;
4362 		ret = ftrace_hash_move(iter->ops, filter_hash,
4363 				       orig_hash, iter->hash);
4364 		if (!ret) {
4365 			ftrace_ops_update_code(iter->ops, &old_hash_ops);
4366 			free_ftrace_hash_rcu(old_hash);
4367 		}
4368 		mutex_unlock(&ftrace_lock);
4369 	}
4370 
4371 	mutex_unlock(&iter->ops->func_hash->regex_lock);
4372 	free_ftrace_hash(iter->hash);
4373 	kfree(iter);
4374 
4375 	return 0;
4376 }
4377 
4378 static const struct file_operations ftrace_avail_fops = {
4379 	.open = ftrace_avail_open,
4380 	.read = seq_read,
4381 	.llseek = seq_lseek,
4382 	.release = seq_release_private,
4383 };
4384 
4385 static const struct file_operations ftrace_enabled_fops = {
4386 	.open = ftrace_enabled_open,
4387 	.read = seq_read,
4388 	.llseek = seq_lseek,
4389 	.release = seq_release_private,
4390 };
4391 
4392 static const struct file_operations ftrace_filter_fops = {
4393 	.open = ftrace_filter_open,
4394 	.read = seq_read,
4395 	.write = ftrace_filter_write,
4396 	.llseek = tracing_lseek,
4397 	.release = ftrace_regex_release,
4398 };
4399 
4400 static const struct file_operations ftrace_notrace_fops = {
4401 	.open = ftrace_notrace_open,
4402 	.read = seq_read,
4403 	.write = ftrace_notrace_write,
4404 	.llseek = tracing_lseek,
4405 	.release = ftrace_regex_release,
4406 };
4407 
4408 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4409 
4410 static DEFINE_MUTEX(graph_lock);
4411 
4412 int ftrace_graph_count;
4413 int ftrace_graph_notrace_count;
4414 unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
4415 unsigned long ftrace_graph_notrace_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
4416 
4417 struct ftrace_graph_data {
4418 	unsigned long *table;
4419 	size_t size;
4420 	int *count;
4421 	const struct seq_operations *seq_ops;
4422 };
4423 
4424 static void *
4425 __g_next(struct seq_file *m, loff_t *pos)
4426 {
4427 	struct ftrace_graph_data *fgd = m->private;
4428 
4429 	if (*pos >= *fgd->count)
4430 		return NULL;
4431 	return &fgd->table[*pos];
4432 }
4433 
4434 static void *
4435 g_next(struct seq_file *m, void *v, loff_t *pos)
4436 {
4437 	(*pos)++;
4438 	return __g_next(m, pos);
4439 }
4440 
4441 static void *g_start(struct seq_file *m, loff_t *pos)
4442 {
4443 	struct ftrace_graph_data *fgd = m->private;
4444 
4445 	mutex_lock(&graph_lock);
4446 
4447 	/* Nothing, tell g_show to print all functions are enabled */
4448 	if (!*fgd->count && !*pos)
4449 		return (void *)1;
4450 
4451 	return __g_next(m, pos);
4452 }
4453 
4454 static void g_stop(struct seq_file *m, void *p)
4455 {
4456 	mutex_unlock(&graph_lock);
4457 }
4458 
4459 static int g_show(struct seq_file *m, void *v)
4460 {
4461 	unsigned long *ptr = v;
4462 
4463 	if (!ptr)
4464 		return 0;
4465 
4466 	if (ptr == (unsigned long *)1) {
4467 		struct ftrace_graph_data *fgd = m->private;
4468 
4469 		if (fgd->table == ftrace_graph_funcs)
4470 			seq_puts(m, "#### all functions enabled ####\n");
4471 		else
4472 			seq_puts(m, "#### no functions disabled ####\n");
4473 		return 0;
4474 	}
4475 
4476 	seq_printf(m, "%ps\n", (void *)*ptr);
4477 
4478 	return 0;
4479 }
4480 
4481 static const struct seq_operations ftrace_graph_seq_ops = {
4482 	.start = g_start,
4483 	.next = g_next,
4484 	.stop = g_stop,
4485 	.show = g_show,
4486 };
4487 
4488 static int
4489 __ftrace_graph_open(struct inode *inode, struct file *file,
4490 		    struct ftrace_graph_data *fgd)
4491 {
4492 	int ret = 0;
4493 
4494 	mutex_lock(&graph_lock);
4495 	if ((file->f_mode & FMODE_WRITE) &&
4496 	    (file->f_flags & O_TRUNC)) {
4497 		*fgd->count = 0;
4498 		memset(fgd->table, 0, fgd->size * sizeof(*fgd->table));
4499 	}
4500 	mutex_unlock(&graph_lock);
4501 
4502 	if (file->f_mode & FMODE_READ) {
4503 		ret = seq_open(file, fgd->seq_ops);
4504 		if (!ret) {
4505 			struct seq_file *m = file->private_data;
4506 			m->private = fgd;
4507 		}
4508 	} else
4509 		file->private_data = fgd;
4510 
4511 	return ret;
4512 }
4513 
4514 static int
4515 ftrace_graph_open(struct inode *inode, struct file *file)
4516 {
4517 	struct ftrace_graph_data *fgd;
4518 
4519 	if (unlikely(ftrace_disabled))
4520 		return -ENODEV;
4521 
4522 	fgd = kmalloc(sizeof(*fgd), GFP_KERNEL);
4523 	if (fgd == NULL)
4524 		return -ENOMEM;
4525 
4526 	fgd->table = ftrace_graph_funcs;
4527 	fgd->size = FTRACE_GRAPH_MAX_FUNCS;
4528 	fgd->count = &ftrace_graph_count;
4529 	fgd->seq_ops = &ftrace_graph_seq_ops;
4530 
4531 	return __ftrace_graph_open(inode, file, fgd);
4532 }
4533 
4534 static int
4535 ftrace_graph_notrace_open(struct inode *inode, struct file *file)
4536 {
4537 	struct ftrace_graph_data *fgd;
4538 
4539 	if (unlikely(ftrace_disabled))
4540 		return -ENODEV;
4541 
4542 	fgd = kmalloc(sizeof(*fgd), GFP_KERNEL);
4543 	if (fgd == NULL)
4544 		return -ENOMEM;
4545 
4546 	fgd->table = ftrace_graph_notrace_funcs;
4547 	fgd->size = FTRACE_GRAPH_MAX_FUNCS;
4548 	fgd->count = &ftrace_graph_notrace_count;
4549 	fgd->seq_ops = &ftrace_graph_seq_ops;
4550 
4551 	return __ftrace_graph_open(inode, file, fgd);
4552 }
4553 
4554 static int
4555 ftrace_graph_release(struct inode *inode, struct file *file)
4556 {
4557 	if (file->f_mode & FMODE_READ) {
4558 		struct seq_file *m = file->private_data;
4559 
4560 		kfree(m->private);
4561 		seq_release(inode, file);
4562 	} else {
4563 		kfree(file->private_data);
4564 	}
4565 
4566 	return 0;
4567 }
4568 
4569 static int
4570 ftrace_set_func(unsigned long *array, int *idx, int size, char *buffer)
4571 {
4572 	struct dyn_ftrace *rec;
4573 	struct ftrace_page *pg;
4574 	int search_len;
4575 	int fail = 1;
4576 	int type, not;
4577 	char *search;
4578 	bool exists;
4579 	int i;
4580 
4581 	/* decode regex */
4582 	type = filter_parse_regex(buffer, strlen(buffer), &search, &not);
4583 	if (!not && *idx >= size)
4584 		return -EBUSY;
4585 
4586 	search_len = strlen(search);
4587 
4588 	mutex_lock(&ftrace_lock);
4589 
4590 	if (unlikely(ftrace_disabled)) {
4591 		mutex_unlock(&ftrace_lock);
4592 		return -ENODEV;
4593 	}
4594 
4595 	do_for_each_ftrace_rec(pg, rec) {
4596 
4597 		if (ftrace_match_record(rec, NULL, search, search_len, type)) {
4598 			/* if it is in the array */
4599 			exists = false;
4600 			for (i = 0; i < *idx; i++) {
4601 				if (array[i] == rec->ip) {
4602 					exists = true;
4603 					break;
4604 				}
4605 			}
4606 
4607 			if (!not) {
4608 				fail = 0;
4609 				if (!exists) {
4610 					array[(*idx)++] = rec->ip;
4611 					if (*idx >= size)
4612 						goto out;
4613 				}
4614 			} else {
4615 				if (exists) {
4616 					array[i] = array[--(*idx)];
4617 					array[*idx] = 0;
4618 					fail = 0;
4619 				}
4620 			}
4621 		}
4622 	} while_for_each_ftrace_rec();
4623 out:
4624 	mutex_unlock(&ftrace_lock);
4625 
4626 	if (fail)
4627 		return -EINVAL;
4628 
4629 	return 0;
4630 }
4631 
4632 static ssize_t
4633 ftrace_graph_write(struct file *file, const char __user *ubuf,
4634 		   size_t cnt, loff_t *ppos)
4635 {
4636 	struct trace_parser parser;
4637 	ssize_t read, ret = 0;
4638 	struct ftrace_graph_data *fgd = file->private_data;
4639 
4640 	if (!cnt)
4641 		return 0;
4642 
4643 	if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX))
4644 		return -ENOMEM;
4645 
4646 	read = trace_get_user(&parser, ubuf, cnt, ppos);
4647 
4648 	if (read >= 0 && trace_parser_loaded((&parser))) {
4649 		parser.buffer[parser.idx] = 0;
4650 
4651 		mutex_lock(&graph_lock);
4652 
4653 		/* we allow only one expression at a time */
4654 		ret = ftrace_set_func(fgd->table, fgd->count, fgd->size,
4655 				      parser.buffer);
4656 
4657 		mutex_unlock(&graph_lock);
4658 	}
4659 
4660 	if (!ret)
4661 		ret = read;
4662 
4663 	trace_parser_put(&parser);
4664 
4665 	return ret;
4666 }
4667 
4668 static const struct file_operations ftrace_graph_fops = {
4669 	.open		= ftrace_graph_open,
4670 	.read		= seq_read,
4671 	.write		= ftrace_graph_write,
4672 	.llseek		= tracing_lseek,
4673 	.release	= ftrace_graph_release,
4674 };
4675 
4676 static const struct file_operations ftrace_graph_notrace_fops = {
4677 	.open		= ftrace_graph_notrace_open,
4678 	.read		= seq_read,
4679 	.write		= ftrace_graph_write,
4680 	.llseek		= tracing_lseek,
4681 	.release	= ftrace_graph_release,
4682 };
4683 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
4684 
4685 void ftrace_create_filter_files(struct ftrace_ops *ops,
4686 				struct dentry *parent)
4687 {
4688 
4689 	trace_create_file("set_ftrace_filter", 0644, parent,
4690 			  ops, &ftrace_filter_fops);
4691 
4692 	trace_create_file("set_ftrace_notrace", 0644, parent,
4693 			  ops, &ftrace_notrace_fops);
4694 }
4695 
4696 /*
4697  * The name "destroy_filter_files" is really a misnomer. Although
4698  * in the future, it may actualy delete the files, but this is
4699  * really intended to make sure the ops passed in are disabled
4700  * and that when this function returns, the caller is free to
4701  * free the ops.
4702  *
4703  * The "destroy" name is only to match the "create" name that this
4704  * should be paired with.
4705  */
4706 void ftrace_destroy_filter_files(struct ftrace_ops *ops)
4707 {
4708 	mutex_lock(&ftrace_lock);
4709 	if (ops->flags & FTRACE_OPS_FL_ENABLED)
4710 		ftrace_shutdown(ops, 0);
4711 	ops->flags |= FTRACE_OPS_FL_DELETED;
4712 	mutex_unlock(&ftrace_lock);
4713 }
4714 
4715 static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
4716 {
4717 
4718 	trace_create_file("available_filter_functions", 0444,
4719 			d_tracer, NULL, &ftrace_avail_fops);
4720 
4721 	trace_create_file("enabled_functions", 0444,
4722 			d_tracer, NULL, &ftrace_enabled_fops);
4723 
4724 	ftrace_create_filter_files(&global_ops, d_tracer);
4725 
4726 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4727 	trace_create_file("set_graph_function", 0444, d_tracer,
4728 				    NULL,
4729 				    &ftrace_graph_fops);
4730 	trace_create_file("set_graph_notrace", 0444, d_tracer,
4731 				    NULL,
4732 				    &ftrace_graph_notrace_fops);
4733 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
4734 
4735 	return 0;
4736 }
4737 
4738 static int ftrace_cmp_ips(const void *a, const void *b)
4739 {
4740 	const unsigned long *ipa = a;
4741 	const unsigned long *ipb = b;
4742 
4743 	if (*ipa > *ipb)
4744 		return 1;
4745 	if (*ipa < *ipb)
4746 		return -1;
4747 	return 0;
4748 }
4749 
4750 static void ftrace_swap_ips(void *a, void *b, int size)
4751 {
4752 	unsigned long *ipa = a;
4753 	unsigned long *ipb = b;
4754 	unsigned long t;
4755 
4756 	t = *ipa;
4757 	*ipa = *ipb;
4758 	*ipb = t;
4759 }
4760 
4761 static int ftrace_process_locs(struct module *mod,
4762 			       unsigned long *start,
4763 			       unsigned long *end)
4764 {
4765 	struct ftrace_page *start_pg;
4766 	struct ftrace_page *pg;
4767 	struct dyn_ftrace *rec;
4768 	unsigned long count;
4769 	unsigned long *p;
4770 	unsigned long addr;
4771 	unsigned long flags = 0; /* Shut up gcc */
4772 	int ret = -ENOMEM;
4773 
4774 	count = end - start;
4775 
4776 	if (!count)
4777 		return 0;
4778 
4779 	sort(start, count, sizeof(*start),
4780 	     ftrace_cmp_ips, ftrace_swap_ips);
4781 
4782 	start_pg = ftrace_allocate_pages(count);
4783 	if (!start_pg)
4784 		return -ENOMEM;
4785 
4786 	mutex_lock(&ftrace_lock);
4787 
4788 	/*
4789 	 * Core and each module needs their own pages, as
4790 	 * modules will free them when they are removed.
4791 	 * Force a new page to be allocated for modules.
4792 	 */
4793 	if (!mod) {
4794 		WARN_ON(ftrace_pages || ftrace_pages_start);
4795 		/* First initialization */
4796 		ftrace_pages = ftrace_pages_start = start_pg;
4797 	} else {
4798 		if (!ftrace_pages)
4799 			goto out;
4800 
4801 		if (WARN_ON(ftrace_pages->next)) {
4802 			/* Hmm, we have free pages? */
4803 			while (ftrace_pages->next)
4804 				ftrace_pages = ftrace_pages->next;
4805 		}
4806 
4807 		ftrace_pages->next = start_pg;
4808 	}
4809 
4810 	p = start;
4811 	pg = start_pg;
4812 	while (p < end) {
4813 		addr = ftrace_call_adjust(*p++);
4814 		/*
4815 		 * Some architecture linkers will pad between
4816 		 * the different mcount_loc sections of different
4817 		 * object files to satisfy alignments.
4818 		 * Skip any NULL pointers.
4819 		 */
4820 		if (!addr)
4821 			continue;
4822 
4823 		if (pg->index == pg->size) {
4824 			/* We should have allocated enough */
4825 			if (WARN_ON(!pg->next))
4826 				break;
4827 			pg = pg->next;
4828 		}
4829 
4830 		rec = &pg->records[pg->index++];
4831 		rec->ip = addr;
4832 	}
4833 
4834 	/* We should have used all pages */
4835 	WARN_ON(pg->next);
4836 
4837 	/* Assign the last page to ftrace_pages */
4838 	ftrace_pages = pg;
4839 
4840 	/*
4841 	 * We only need to disable interrupts on start up
4842 	 * because we are modifying code that an interrupt
4843 	 * may execute, and the modification is not atomic.
4844 	 * But for modules, nothing runs the code we modify
4845 	 * until we are finished with it, and there's no
4846 	 * reason to cause large interrupt latencies while we do it.
4847 	 */
4848 	if (!mod)
4849 		local_irq_save(flags);
4850 	ftrace_update_code(mod, start_pg);
4851 	if (!mod)
4852 		local_irq_restore(flags);
4853 	ret = 0;
4854  out:
4855 	mutex_unlock(&ftrace_lock);
4856 
4857 	return ret;
4858 }
4859 
4860 #ifdef CONFIG_MODULES
4861 
4862 #define next_to_ftrace_page(p) container_of(p, struct ftrace_page, next)
4863 
4864 void ftrace_release_mod(struct module *mod)
4865 {
4866 	struct dyn_ftrace *rec;
4867 	struct ftrace_page **last_pg;
4868 	struct ftrace_page *pg;
4869 	int order;
4870 
4871 	mutex_lock(&ftrace_lock);
4872 
4873 	if (ftrace_disabled)
4874 		goto out_unlock;
4875 
4876 	/*
4877 	 * Each module has its own ftrace_pages, remove
4878 	 * them from the list.
4879 	 */
4880 	last_pg = &ftrace_pages_start;
4881 	for (pg = ftrace_pages_start; pg; pg = *last_pg) {
4882 		rec = &pg->records[0];
4883 		if (within_module_core(rec->ip, mod)) {
4884 			/*
4885 			 * As core pages are first, the first
4886 			 * page should never be a module page.
4887 			 */
4888 			if (WARN_ON(pg == ftrace_pages_start))
4889 				goto out_unlock;
4890 
4891 			/* Check if we are deleting the last page */
4892 			if (pg == ftrace_pages)
4893 				ftrace_pages = next_to_ftrace_page(last_pg);
4894 
4895 			*last_pg = pg->next;
4896 			order = get_count_order(pg->size / ENTRIES_PER_PAGE);
4897 			free_pages((unsigned long)pg->records, order);
4898 			kfree(pg);
4899 		} else
4900 			last_pg = &pg->next;
4901 	}
4902  out_unlock:
4903 	mutex_unlock(&ftrace_lock);
4904 }
4905 
4906 static void ftrace_init_module(struct module *mod,
4907 			       unsigned long *start, unsigned long *end)
4908 {
4909 	if (ftrace_disabled || start == end)
4910 		return;
4911 	ftrace_process_locs(mod, start, end);
4912 }
4913 
4914 void ftrace_module_init(struct module *mod)
4915 {
4916 	ftrace_init_module(mod, mod->ftrace_callsites,
4917 			   mod->ftrace_callsites +
4918 			   mod->num_ftrace_callsites);
4919 }
4920 
4921 static int ftrace_module_notify_exit(struct notifier_block *self,
4922 				     unsigned long val, void *data)
4923 {
4924 	struct module *mod = data;
4925 
4926 	if (val == MODULE_STATE_GOING)
4927 		ftrace_release_mod(mod);
4928 
4929 	return 0;
4930 }
4931 #else
4932 static int ftrace_module_notify_exit(struct notifier_block *self,
4933 				     unsigned long val, void *data)
4934 {
4935 	return 0;
4936 }
4937 #endif /* CONFIG_MODULES */
4938 
4939 struct notifier_block ftrace_module_exit_nb = {
4940 	.notifier_call = ftrace_module_notify_exit,
4941 	.priority = INT_MIN,	/* Run after anything that can remove kprobes */
4942 };
4943 
4944 void __init ftrace_init(void)
4945 {
4946 	extern unsigned long __start_mcount_loc[];
4947 	extern unsigned long __stop_mcount_loc[];
4948 	unsigned long count, flags;
4949 	int ret;
4950 
4951 	local_irq_save(flags);
4952 	ret = ftrace_dyn_arch_init();
4953 	local_irq_restore(flags);
4954 	if (ret)
4955 		goto failed;
4956 
4957 	count = __stop_mcount_loc - __start_mcount_loc;
4958 	if (!count) {
4959 		pr_info("ftrace: No functions to be traced?\n");
4960 		goto failed;
4961 	}
4962 
4963 	pr_info("ftrace: allocating %ld entries in %ld pages\n",
4964 		count, count / ENTRIES_PER_PAGE + 1);
4965 
4966 	last_ftrace_enabled = ftrace_enabled = 1;
4967 
4968 	ret = ftrace_process_locs(NULL,
4969 				  __start_mcount_loc,
4970 				  __stop_mcount_loc);
4971 
4972 	ret = register_module_notifier(&ftrace_module_exit_nb);
4973 	if (ret)
4974 		pr_warning("Failed to register trace ftrace module exit notifier\n");
4975 
4976 	set_ftrace_early_filters();
4977 
4978 	return;
4979  failed:
4980 	ftrace_disabled = 1;
4981 }
4982 
4983 /* Do nothing if arch does not support this */
4984 void __weak arch_ftrace_update_trampoline(struct ftrace_ops *ops)
4985 {
4986 }
4987 
4988 static void ftrace_update_trampoline(struct ftrace_ops *ops)
4989 {
4990 
4991 /*
4992  * Currently there's no safe way to free a trampoline when the kernel
4993  * is configured with PREEMPT. That is because a task could be preempted
4994  * when it jumped to the trampoline, it may be preempted for a long time
4995  * depending on the system load, and currently there's no way to know
4996  * when it will be off the trampoline. If the trampoline is freed
4997  * too early, when the task runs again, it will be executing on freed
4998  * memory and crash.
4999  */
5000 #ifdef CONFIG_PREEMPT
5001 	/* Currently, only non dynamic ops can have a trampoline */
5002 	if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
5003 		return;
5004 #endif
5005 
5006 	arch_ftrace_update_trampoline(ops);
5007 }
5008 
5009 #else
5010 
5011 static struct ftrace_ops global_ops = {
5012 	.func			= ftrace_stub,
5013 	.flags			= FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
5014 };
5015 
5016 static int __init ftrace_nodyn_init(void)
5017 {
5018 	ftrace_enabled = 1;
5019 	return 0;
5020 }
5021 core_initcall(ftrace_nodyn_init);
5022 
5023 static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
5024 static inline void ftrace_startup_enable(int command) { }
5025 static inline void ftrace_startup_all(int command) { }
5026 /* Keep as macros so we do not need to define the commands */
5027 # define ftrace_startup(ops, command)					\
5028 	({								\
5029 		int ___ret = __register_ftrace_function(ops);		\
5030 		if (!___ret)						\
5031 			(ops)->flags |= FTRACE_OPS_FL_ENABLED;		\
5032 		___ret;							\
5033 	})
5034 # define ftrace_shutdown(ops, command)					\
5035 	({								\
5036 		int ___ret = __unregister_ftrace_function(ops);		\
5037 		if (!___ret)						\
5038 			(ops)->flags &= ~FTRACE_OPS_FL_ENABLED;		\
5039 		___ret;							\
5040 	})
5041 
5042 # define ftrace_startup_sysctl()	do { } while (0)
5043 # define ftrace_shutdown_sysctl()	do { } while (0)
5044 
5045 static inline int
5046 ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
5047 {
5048 	return 1;
5049 }
5050 
5051 static void ftrace_update_trampoline(struct ftrace_ops *ops)
5052 {
5053 }
5054 
5055 #endif /* CONFIG_DYNAMIC_FTRACE */
5056 
5057 __init void ftrace_init_global_array_ops(struct trace_array *tr)
5058 {
5059 	tr->ops = &global_ops;
5060 	tr->ops->private = tr;
5061 }
5062 
5063 void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func)
5064 {
5065 	/* If we filter on pids, update to use the pid function */
5066 	if (tr->flags & TRACE_ARRAY_FL_GLOBAL) {
5067 		if (WARN_ON(tr->ops->func != ftrace_stub))
5068 			printk("ftrace ops had %pS for function\n",
5069 			       tr->ops->func);
5070 		/* Only the top level instance does pid tracing */
5071 		if (!list_empty(&ftrace_pids)) {
5072 			set_ftrace_pid_function(func);
5073 			func = ftrace_pid_func;
5074 		}
5075 	}
5076 	tr->ops->func = func;
5077 	tr->ops->private = tr;
5078 }
5079 
5080 void ftrace_reset_array_ops(struct trace_array *tr)
5081 {
5082 	tr->ops->func = ftrace_stub;
5083 }
5084 
5085 static void
5086 ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip,
5087 			struct ftrace_ops *op, struct pt_regs *regs)
5088 {
5089 	if (unlikely(trace_recursion_test(TRACE_CONTROL_BIT)))
5090 		return;
5091 
5092 	/*
5093 	 * Some of the ops may be dynamically allocated,
5094 	 * they must be freed after a synchronize_sched().
5095 	 */
5096 	preempt_disable_notrace();
5097 	trace_recursion_set(TRACE_CONTROL_BIT);
5098 
5099 	/*
5100 	 * Control funcs (perf) uses RCU. Only trace if
5101 	 * RCU is currently active.
5102 	 */
5103 	if (!rcu_is_watching())
5104 		goto out;
5105 
5106 	do_for_each_ftrace_op(op, ftrace_control_list) {
5107 		if (!(op->flags & FTRACE_OPS_FL_STUB) &&
5108 		    !ftrace_function_local_disabled(op) &&
5109 		    ftrace_ops_test(op, ip, regs))
5110 			op->func(ip, parent_ip, op, regs);
5111 	} while_for_each_ftrace_op(op);
5112  out:
5113 	trace_recursion_clear(TRACE_CONTROL_BIT);
5114 	preempt_enable_notrace();
5115 }
5116 
5117 static struct ftrace_ops control_ops = {
5118 	.func	= ftrace_ops_control_func,
5119 	.flags	= FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
5120 	INIT_OPS_HASH(control_ops)
5121 };
5122 
5123 static inline void
5124 __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
5125 		       struct ftrace_ops *ignored, struct pt_regs *regs)
5126 {
5127 	struct ftrace_ops *op;
5128 	int bit;
5129 
5130 	bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX);
5131 	if (bit < 0)
5132 		return;
5133 
5134 	/*
5135 	 * Some of the ops may be dynamically allocated,
5136 	 * they must be freed after a synchronize_sched().
5137 	 */
5138 	preempt_disable_notrace();
5139 	do_for_each_ftrace_op(op, ftrace_ops_list) {
5140 		if (ftrace_ops_test(op, ip, regs)) {
5141 			if (FTRACE_WARN_ON(!op->func)) {
5142 				pr_warn("op=%p %pS\n", op, op);
5143 				goto out;
5144 			}
5145 			op->func(ip, parent_ip, op, regs);
5146 		}
5147 	} while_for_each_ftrace_op(op);
5148 out:
5149 	preempt_enable_notrace();
5150 	trace_clear_recursion(bit);
5151 }
5152 
5153 /*
5154  * Some archs only support passing ip and parent_ip. Even though
5155  * the list function ignores the op parameter, we do not want any
5156  * C side effects, where a function is called without the caller
5157  * sending a third parameter.
5158  * Archs are to support both the regs and ftrace_ops at the same time.
5159  * If they support ftrace_ops, it is assumed they support regs.
5160  * If call backs want to use regs, they must either check for regs
5161  * being NULL, or CONFIG_DYNAMIC_FTRACE_WITH_REGS.
5162  * Note, CONFIG_DYNAMIC_FTRACE_WITH_REGS expects a full regs to be saved.
5163  * An architecture can pass partial regs with ftrace_ops and still
5164  * set the ARCH_SUPPORT_FTARCE_OPS.
5165  */
5166 #if ARCH_SUPPORTS_FTRACE_OPS
5167 static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
5168 				 struct ftrace_ops *op, struct pt_regs *regs)
5169 {
5170 	__ftrace_ops_list_func(ip, parent_ip, NULL, regs);
5171 }
5172 #else
5173 static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip)
5174 {
5175 	__ftrace_ops_list_func(ip, parent_ip, NULL, NULL);
5176 }
5177 #endif
5178 
5179 /*
5180  * If there's only one function registered but it does not support
5181  * recursion, this function will be called by the mcount trampoline.
5182  * This function will handle recursion protection.
5183  */
5184 static void ftrace_ops_recurs_func(unsigned long ip, unsigned long parent_ip,
5185 				   struct ftrace_ops *op, struct pt_regs *regs)
5186 {
5187 	int bit;
5188 
5189 	bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX);
5190 	if (bit < 0)
5191 		return;
5192 
5193 	op->func(ip, parent_ip, op, regs);
5194 
5195 	trace_clear_recursion(bit);
5196 }
5197 
5198 /**
5199  * ftrace_ops_get_func - get the function a trampoline should call
5200  * @ops: the ops to get the function for
5201  *
5202  * Normally the mcount trampoline will call the ops->func, but there
5203  * are times that it should not. For example, if the ops does not
5204  * have its own recursion protection, then it should call the
5205  * ftrace_ops_recurs_func() instead.
5206  *
5207  * Returns the function that the trampoline should call for @ops.
5208  */
5209 ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops)
5210 {
5211 	/*
5212 	 * If this is a dynamic ops or we force list func,
5213 	 * then it needs to call the list anyway.
5214 	 */
5215 	if (ops->flags & FTRACE_OPS_FL_DYNAMIC || FTRACE_FORCE_LIST_FUNC)
5216 		return ftrace_ops_list_func;
5217 
5218 	/*
5219 	 * If the func handles its own recursion, call it directly.
5220 	 * Otherwise call the recursion protected function that
5221 	 * will call the ftrace ops function.
5222 	 */
5223 	if (!(ops->flags & FTRACE_OPS_FL_RECURSION_SAFE))
5224 		return ftrace_ops_recurs_func;
5225 
5226 	return ops->func;
5227 }
5228 
5229 static void clear_ftrace_swapper(void)
5230 {
5231 	struct task_struct *p;
5232 	int cpu;
5233 
5234 	get_online_cpus();
5235 	for_each_online_cpu(cpu) {
5236 		p = idle_task(cpu);
5237 		clear_tsk_trace_trace(p);
5238 	}
5239 	put_online_cpus();
5240 }
5241 
5242 static void set_ftrace_swapper(void)
5243 {
5244 	struct task_struct *p;
5245 	int cpu;
5246 
5247 	get_online_cpus();
5248 	for_each_online_cpu(cpu) {
5249 		p = idle_task(cpu);
5250 		set_tsk_trace_trace(p);
5251 	}
5252 	put_online_cpus();
5253 }
5254 
5255 static void clear_ftrace_pid(struct pid *pid)
5256 {
5257 	struct task_struct *p;
5258 
5259 	rcu_read_lock();
5260 	do_each_pid_task(pid, PIDTYPE_PID, p) {
5261 		clear_tsk_trace_trace(p);
5262 	} while_each_pid_task(pid, PIDTYPE_PID, p);
5263 	rcu_read_unlock();
5264 
5265 	put_pid(pid);
5266 }
5267 
5268 static void set_ftrace_pid(struct pid *pid)
5269 {
5270 	struct task_struct *p;
5271 
5272 	rcu_read_lock();
5273 	do_each_pid_task(pid, PIDTYPE_PID, p) {
5274 		set_tsk_trace_trace(p);
5275 	} while_each_pid_task(pid, PIDTYPE_PID, p);
5276 	rcu_read_unlock();
5277 }
5278 
5279 static void clear_ftrace_pid_task(struct pid *pid)
5280 {
5281 	if (pid == ftrace_swapper_pid)
5282 		clear_ftrace_swapper();
5283 	else
5284 		clear_ftrace_pid(pid);
5285 }
5286 
5287 static void set_ftrace_pid_task(struct pid *pid)
5288 {
5289 	if (pid == ftrace_swapper_pid)
5290 		set_ftrace_swapper();
5291 	else
5292 		set_ftrace_pid(pid);
5293 }
5294 
5295 static int ftrace_pid_add(int p)
5296 {
5297 	struct pid *pid;
5298 	struct ftrace_pid *fpid;
5299 	int ret = -EINVAL;
5300 
5301 	mutex_lock(&ftrace_lock);
5302 
5303 	if (!p)
5304 		pid = ftrace_swapper_pid;
5305 	else
5306 		pid = find_get_pid(p);
5307 
5308 	if (!pid)
5309 		goto out;
5310 
5311 	ret = 0;
5312 
5313 	list_for_each_entry(fpid, &ftrace_pids, list)
5314 		if (fpid->pid == pid)
5315 			goto out_put;
5316 
5317 	ret = -ENOMEM;
5318 
5319 	fpid = kmalloc(sizeof(*fpid), GFP_KERNEL);
5320 	if (!fpid)
5321 		goto out_put;
5322 
5323 	list_add(&fpid->list, &ftrace_pids);
5324 	fpid->pid = pid;
5325 
5326 	set_ftrace_pid_task(pid);
5327 
5328 	ftrace_update_pid_func();
5329 
5330 	ftrace_startup_all(0);
5331 
5332 	mutex_unlock(&ftrace_lock);
5333 	return 0;
5334 
5335 out_put:
5336 	if (pid != ftrace_swapper_pid)
5337 		put_pid(pid);
5338 
5339 out:
5340 	mutex_unlock(&ftrace_lock);
5341 	return ret;
5342 }
5343 
5344 static void ftrace_pid_reset(void)
5345 {
5346 	struct ftrace_pid *fpid, *safe;
5347 
5348 	mutex_lock(&ftrace_lock);
5349 	list_for_each_entry_safe(fpid, safe, &ftrace_pids, list) {
5350 		struct pid *pid = fpid->pid;
5351 
5352 		clear_ftrace_pid_task(pid);
5353 
5354 		list_del(&fpid->list);
5355 		kfree(fpid);
5356 	}
5357 
5358 	ftrace_update_pid_func();
5359 	ftrace_startup_all(0);
5360 
5361 	mutex_unlock(&ftrace_lock);
5362 }
5363 
5364 static void *fpid_start(struct seq_file *m, loff_t *pos)
5365 {
5366 	mutex_lock(&ftrace_lock);
5367 
5368 	if (list_empty(&ftrace_pids) && (!*pos))
5369 		return (void *) 1;
5370 
5371 	return seq_list_start(&ftrace_pids, *pos);
5372 }
5373 
5374 static void *fpid_next(struct seq_file *m, void *v, loff_t *pos)
5375 {
5376 	if (v == (void *)1)
5377 		return NULL;
5378 
5379 	return seq_list_next(v, &ftrace_pids, pos);
5380 }
5381 
5382 static void fpid_stop(struct seq_file *m, void *p)
5383 {
5384 	mutex_unlock(&ftrace_lock);
5385 }
5386 
5387 static int fpid_show(struct seq_file *m, void *v)
5388 {
5389 	const struct ftrace_pid *fpid = list_entry(v, struct ftrace_pid, list);
5390 
5391 	if (v == (void *)1) {
5392 		seq_puts(m, "no pid\n");
5393 		return 0;
5394 	}
5395 
5396 	if (fpid->pid == ftrace_swapper_pid)
5397 		seq_puts(m, "swapper tasks\n");
5398 	else
5399 		seq_printf(m, "%u\n", pid_vnr(fpid->pid));
5400 
5401 	return 0;
5402 }
5403 
5404 static const struct seq_operations ftrace_pid_sops = {
5405 	.start = fpid_start,
5406 	.next = fpid_next,
5407 	.stop = fpid_stop,
5408 	.show = fpid_show,
5409 };
5410 
5411 static int
5412 ftrace_pid_open(struct inode *inode, struct file *file)
5413 {
5414 	int ret = 0;
5415 
5416 	if ((file->f_mode & FMODE_WRITE) &&
5417 	    (file->f_flags & O_TRUNC))
5418 		ftrace_pid_reset();
5419 
5420 	if (file->f_mode & FMODE_READ)
5421 		ret = seq_open(file, &ftrace_pid_sops);
5422 
5423 	return ret;
5424 }
5425 
5426 static ssize_t
5427 ftrace_pid_write(struct file *filp, const char __user *ubuf,
5428 		   size_t cnt, loff_t *ppos)
5429 {
5430 	char buf[64], *tmp;
5431 	long val;
5432 	int ret;
5433 
5434 	if (cnt >= sizeof(buf))
5435 		return -EINVAL;
5436 
5437 	if (copy_from_user(&buf, ubuf, cnt))
5438 		return -EFAULT;
5439 
5440 	buf[cnt] = 0;
5441 
5442 	/*
5443 	 * Allow "echo > set_ftrace_pid" or "echo -n '' > set_ftrace_pid"
5444 	 * to clean the filter quietly.
5445 	 */
5446 	tmp = strstrip(buf);
5447 	if (strlen(tmp) == 0)
5448 		return 1;
5449 
5450 	ret = kstrtol(tmp, 10, &val);
5451 	if (ret < 0)
5452 		return ret;
5453 
5454 	ret = ftrace_pid_add(val);
5455 
5456 	return ret ? ret : cnt;
5457 }
5458 
5459 static int
5460 ftrace_pid_release(struct inode *inode, struct file *file)
5461 {
5462 	if (file->f_mode & FMODE_READ)
5463 		seq_release(inode, file);
5464 
5465 	return 0;
5466 }
5467 
5468 static const struct file_operations ftrace_pid_fops = {
5469 	.open		= ftrace_pid_open,
5470 	.write		= ftrace_pid_write,
5471 	.read		= seq_read,
5472 	.llseek		= tracing_lseek,
5473 	.release	= ftrace_pid_release,
5474 };
5475 
5476 static __init int ftrace_init_debugfs(void)
5477 {
5478 	struct dentry *d_tracer;
5479 
5480 	d_tracer = tracing_init_dentry();
5481 	if (IS_ERR(d_tracer))
5482 		return 0;
5483 
5484 	ftrace_init_dyn_debugfs(d_tracer);
5485 
5486 	trace_create_file("set_ftrace_pid", 0644, d_tracer,
5487 			    NULL, &ftrace_pid_fops);
5488 
5489 	ftrace_profile_debugfs(d_tracer);
5490 
5491 	return 0;
5492 }
5493 fs_initcall(ftrace_init_debugfs);
5494 
5495 /**
5496  * ftrace_kill - kill ftrace
5497  *
5498  * This function should be used by panic code. It stops ftrace
5499  * but in a not so nice way. If you need to simply kill ftrace
5500  * from a non-atomic section, use ftrace_kill.
5501  */
5502 void ftrace_kill(void)
5503 {
5504 	ftrace_disabled = 1;
5505 	ftrace_enabled = 0;
5506 	clear_ftrace_function();
5507 }
5508 
5509 /**
5510  * Test if ftrace is dead or not.
5511  */
5512 int ftrace_is_dead(void)
5513 {
5514 	return ftrace_disabled;
5515 }
5516 
5517 /**
5518  * register_ftrace_function - register a function for profiling
5519  * @ops - ops structure that holds the function for profiling.
5520  *
5521  * Register a function to be called by all functions in the
5522  * kernel.
5523  *
5524  * Note: @ops->func and all the functions it calls must be labeled
5525  *       with "notrace", otherwise it will go into a
5526  *       recursive loop.
5527  */
5528 int register_ftrace_function(struct ftrace_ops *ops)
5529 {
5530 	int ret = -1;
5531 
5532 	ftrace_ops_init(ops);
5533 
5534 	mutex_lock(&ftrace_lock);
5535 
5536 	ret = ftrace_startup(ops, 0);
5537 
5538 	mutex_unlock(&ftrace_lock);
5539 
5540 	return ret;
5541 }
5542 EXPORT_SYMBOL_GPL(register_ftrace_function);
5543 
5544 /**
5545  * unregister_ftrace_function - unregister a function for profiling.
5546  * @ops - ops structure that holds the function to unregister
5547  *
5548  * Unregister a function that was added to be called by ftrace profiling.
5549  */
5550 int unregister_ftrace_function(struct ftrace_ops *ops)
5551 {
5552 	int ret;
5553 
5554 	mutex_lock(&ftrace_lock);
5555 	ret = ftrace_shutdown(ops, 0);
5556 	mutex_unlock(&ftrace_lock);
5557 
5558 	return ret;
5559 }
5560 EXPORT_SYMBOL_GPL(unregister_ftrace_function);
5561 
5562 int
5563 ftrace_enable_sysctl(struct ctl_table *table, int write,
5564 		     void __user *buffer, size_t *lenp,
5565 		     loff_t *ppos)
5566 {
5567 	int ret = -ENODEV;
5568 
5569 	mutex_lock(&ftrace_lock);
5570 
5571 	if (unlikely(ftrace_disabled))
5572 		goto out;
5573 
5574 	ret = proc_dointvec(table, write, buffer, lenp, ppos);
5575 
5576 	if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
5577 		goto out;
5578 
5579 	last_ftrace_enabled = !!ftrace_enabled;
5580 
5581 	if (ftrace_enabled) {
5582 
5583 		/* we are starting ftrace again */
5584 		if (ftrace_ops_list != &ftrace_list_end)
5585 			update_ftrace_function();
5586 
5587 		ftrace_startup_sysctl();
5588 
5589 	} else {
5590 		/* stopping ftrace calls (just send to ftrace_stub) */
5591 		ftrace_trace_function = ftrace_stub;
5592 
5593 		ftrace_shutdown_sysctl();
5594 	}
5595 
5596  out:
5597 	mutex_unlock(&ftrace_lock);
5598 	return ret;
5599 }
5600 
5601 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
5602 
5603 static struct ftrace_ops graph_ops = {
5604 	.func			= ftrace_stub,
5605 	.flags			= FTRACE_OPS_FL_RECURSION_SAFE |
5606 				   FTRACE_OPS_FL_INITIALIZED |
5607 				   FTRACE_OPS_FL_STUB,
5608 #ifdef FTRACE_GRAPH_TRAMP_ADDR
5609 	.trampoline		= FTRACE_GRAPH_TRAMP_ADDR,
5610 	/* trampoline_size is only needed for dynamically allocated tramps */
5611 #endif
5612 	ASSIGN_OPS_HASH(graph_ops, &global_ops.local_hash)
5613 };
5614 
5615 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
5616 {
5617 	return 0;
5618 }
5619 
5620 /* The callbacks that hook a function */
5621 trace_func_graph_ret_t ftrace_graph_return =
5622 			(trace_func_graph_ret_t)ftrace_stub;
5623 trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
5624 static trace_func_graph_ent_t __ftrace_graph_entry = ftrace_graph_entry_stub;
5625 
5626 /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
5627 static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
5628 {
5629 	int i;
5630 	int ret = 0;
5631 	unsigned long flags;
5632 	int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
5633 	struct task_struct *g, *t;
5634 
5635 	for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
5636 		ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
5637 					* sizeof(struct ftrace_ret_stack),
5638 					GFP_KERNEL);
5639 		if (!ret_stack_list[i]) {
5640 			start = 0;
5641 			end = i;
5642 			ret = -ENOMEM;
5643 			goto free;
5644 		}
5645 	}
5646 
5647 	read_lock_irqsave(&tasklist_lock, flags);
5648 	do_each_thread(g, t) {
5649 		if (start == end) {
5650 			ret = -EAGAIN;
5651 			goto unlock;
5652 		}
5653 
5654 		if (t->ret_stack == NULL) {
5655 			atomic_set(&t->tracing_graph_pause, 0);
5656 			atomic_set(&t->trace_overrun, 0);
5657 			t->curr_ret_stack = -1;
5658 			/* Make sure the tasks see the -1 first: */
5659 			smp_wmb();
5660 			t->ret_stack = ret_stack_list[start++];
5661 		}
5662 	} while_each_thread(g, t);
5663 
5664 unlock:
5665 	read_unlock_irqrestore(&tasklist_lock, flags);
5666 free:
5667 	for (i = start; i < end; i++)
5668 		kfree(ret_stack_list[i]);
5669 	return ret;
5670 }
5671 
5672 static void
5673 ftrace_graph_probe_sched_switch(void *ignore,
5674 			struct task_struct *prev, struct task_struct *next)
5675 {
5676 	unsigned long long timestamp;
5677 	int index;
5678 
5679 	/*
5680 	 * Does the user want to count the time a function was asleep.
5681 	 * If so, do not update the time stamps.
5682 	 */
5683 	if (trace_flags & TRACE_ITER_SLEEP_TIME)
5684 		return;
5685 
5686 	timestamp = trace_clock_local();
5687 
5688 	prev->ftrace_timestamp = timestamp;
5689 
5690 	/* only process tasks that we timestamped */
5691 	if (!next->ftrace_timestamp)
5692 		return;
5693 
5694 	/*
5695 	 * Update all the counters in next to make up for the
5696 	 * time next was sleeping.
5697 	 */
5698 	timestamp -= next->ftrace_timestamp;
5699 
5700 	for (index = next->curr_ret_stack; index >= 0; index--)
5701 		next->ret_stack[index].calltime += timestamp;
5702 }
5703 
5704 /* Allocate a return stack for each task */
5705 static int start_graph_tracing(void)
5706 {
5707 	struct ftrace_ret_stack **ret_stack_list;
5708 	int ret, cpu;
5709 
5710 	ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
5711 				sizeof(struct ftrace_ret_stack *),
5712 				GFP_KERNEL);
5713 
5714 	if (!ret_stack_list)
5715 		return -ENOMEM;
5716 
5717 	/* The cpu_boot init_task->ret_stack will never be freed */
5718 	for_each_online_cpu(cpu) {
5719 		if (!idle_task(cpu)->ret_stack)
5720 			ftrace_graph_init_idle_task(idle_task(cpu), cpu);
5721 	}
5722 
5723 	do {
5724 		ret = alloc_retstack_tasklist(ret_stack_list);
5725 	} while (ret == -EAGAIN);
5726 
5727 	if (!ret) {
5728 		ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
5729 		if (ret)
5730 			pr_info("ftrace_graph: Couldn't activate tracepoint"
5731 				" probe to kernel_sched_switch\n");
5732 	}
5733 
5734 	kfree(ret_stack_list);
5735 	return ret;
5736 }
5737 
5738 /*
5739  * Hibernation protection.
5740  * The state of the current task is too much unstable during
5741  * suspend/restore to disk. We want to protect against that.
5742  */
5743 static int
5744 ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
5745 							void *unused)
5746 {
5747 	switch (state) {
5748 	case PM_HIBERNATION_PREPARE:
5749 		pause_graph_tracing();
5750 		break;
5751 
5752 	case PM_POST_HIBERNATION:
5753 		unpause_graph_tracing();
5754 		break;
5755 	}
5756 	return NOTIFY_DONE;
5757 }
5758 
5759 static int ftrace_graph_entry_test(struct ftrace_graph_ent *trace)
5760 {
5761 	if (!ftrace_ops_test(&global_ops, trace->func, NULL))
5762 		return 0;
5763 	return __ftrace_graph_entry(trace);
5764 }
5765 
5766 /*
5767  * The function graph tracer should only trace the functions defined
5768  * by set_ftrace_filter and set_ftrace_notrace. If another function
5769  * tracer ops is registered, the graph tracer requires testing the
5770  * function against the global ops, and not just trace any function
5771  * that any ftrace_ops registered.
5772  */
5773 static void update_function_graph_func(void)
5774 {
5775 	struct ftrace_ops *op;
5776 	bool do_test = false;
5777 
5778 	/*
5779 	 * The graph and global ops share the same set of functions
5780 	 * to test. If any other ops is on the list, then
5781 	 * the graph tracing needs to test if its the function
5782 	 * it should call.
5783 	 */
5784 	do_for_each_ftrace_op(op, ftrace_ops_list) {
5785 		if (op != &global_ops && op != &graph_ops &&
5786 		    op != &ftrace_list_end) {
5787 			do_test = true;
5788 			/* in double loop, break out with goto */
5789 			goto out;
5790 		}
5791 	} while_for_each_ftrace_op(op);
5792  out:
5793 	if (do_test)
5794 		ftrace_graph_entry = ftrace_graph_entry_test;
5795 	else
5796 		ftrace_graph_entry = __ftrace_graph_entry;
5797 }
5798 
5799 static struct notifier_block ftrace_suspend_notifier = {
5800 	.notifier_call = ftrace_suspend_notifier_call,
5801 };
5802 
5803 int register_ftrace_graph(trace_func_graph_ret_t retfunc,
5804 			trace_func_graph_ent_t entryfunc)
5805 {
5806 	int ret = 0;
5807 
5808 	mutex_lock(&ftrace_lock);
5809 
5810 	/* we currently allow only one tracer registered at a time */
5811 	if (ftrace_graph_active) {
5812 		ret = -EBUSY;
5813 		goto out;
5814 	}
5815 
5816 	register_pm_notifier(&ftrace_suspend_notifier);
5817 
5818 	ftrace_graph_active++;
5819 	ret = start_graph_tracing();
5820 	if (ret) {
5821 		ftrace_graph_active--;
5822 		goto out;
5823 	}
5824 
5825 	ftrace_graph_return = retfunc;
5826 
5827 	/*
5828 	 * Update the indirect function to the entryfunc, and the
5829 	 * function that gets called to the entry_test first. Then
5830 	 * call the update fgraph entry function to determine if
5831 	 * the entryfunc should be called directly or not.
5832 	 */
5833 	__ftrace_graph_entry = entryfunc;
5834 	ftrace_graph_entry = ftrace_graph_entry_test;
5835 	update_function_graph_func();
5836 
5837 	ret = ftrace_startup(&graph_ops, FTRACE_START_FUNC_RET);
5838 out:
5839 	mutex_unlock(&ftrace_lock);
5840 	return ret;
5841 }
5842 
5843 void unregister_ftrace_graph(void)
5844 {
5845 	mutex_lock(&ftrace_lock);
5846 
5847 	if (unlikely(!ftrace_graph_active))
5848 		goto out;
5849 
5850 	ftrace_graph_active--;
5851 	ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
5852 	ftrace_graph_entry = ftrace_graph_entry_stub;
5853 	__ftrace_graph_entry = ftrace_graph_entry_stub;
5854 	ftrace_shutdown(&graph_ops, FTRACE_STOP_FUNC_RET);
5855 	unregister_pm_notifier(&ftrace_suspend_notifier);
5856 	unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
5857 
5858 #ifdef CONFIG_DYNAMIC_FTRACE
5859 	/*
5860 	 * Function graph does not allocate the trampoline, but
5861 	 * other global_ops do. We need to reset the ALLOC_TRAMP flag
5862 	 * if one was used.
5863 	 */
5864 	global_ops.trampoline = save_global_trampoline;
5865 	if (save_global_flags & FTRACE_OPS_FL_ALLOC_TRAMP)
5866 		global_ops.flags |= FTRACE_OPS_FL_ALLOC_TRAMP;
5867 #endif
5868 
5869  out:
5870 	mutex_unlock(&ftrace_lock);
5871 }
5872 
5873 static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack);
5874 
5875 static void
5876 graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
5877 {
5878 	atomic_set(&t->tracing_graph_pause, 0);
5879 	atomic_set(&t->trace_overrun, 0);
5880 	t->ftrace_timestamp = 0;
5881 	/* make curr_ret_stack visible before we add the ret_stack */
5882 	smp_wmb();
5883 	t->ret_stack = ret_stack;
5884 }
5885 
5886 /*
5887  * Allocate a return stack for the idle task. May be the first
5888  * time through, or it may be done by CPU hotplug online.
5889  */
5890 void ftrace_graph_init_idle_task(struct task_struct *t, int cpu)
5891 {
5892 	t->curr_ret_stack = -1;
5893 	/*
5894 	 * The idle task has no parent, it either has its own
5895 	 * stack or no stack at all.
5896 	 */
5897 	if (t->ret_stack)
5898 		WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu));
5899 
5900 	if (ftrace_graph_active) {
5901 		struct ftrace_ret_stack *ret_stack;
5902 
5903 		ret_stack = per_cpu(idle_ret_stack, cpu);
5904 		if (!ret_stack) {
5905 			ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
5906 					    * sizeof(struct ftrace_ret_stack),
5907 					    GFP_KERNEL);
5908 			if (!ret_stack)
5909 				return;
5910 			per_cpu(idle_ret_stack, cpu) = ret_stack;
5911 		}
5912 		graph_init_task(t, ret_stack);
5913 	}
5914 }
5915 
5916 /* Allocate a return stack for newly created task */
5917 void ftrace_graph_init_task(struct task_struct *t)
5918 {
5919 	/* Make sure we do not use the parent ret_stack */
5920 	t->ret_stack = NULL;
5921 	t->curr_ret_stack = -1;
5922 
5923 	if (ftrace_graph_active) {
5924 		struct ftrace_ret_stack *ret_stack;
5925 
5926 		ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
5927 				* sizeof(struct ftrace_ret_stack),
5928 				GFP_KERNEL);
5929 		if (!ret_stack)
5930 			return;
5931 		graph_init_task(t, ret_stack);
5932 	}
5933 }
5934 
5935 void ftrace_graph_exit_task(struct task_struct *t)
5936 {
5937 	struct ftrace_ret_stack	*ret_stack = t->ret_stack;
5938 
5939 	t->ret_stack = NULL;
5940 	/* NULL must become visible to IRQs before we free it: */
5941 	barrier();
5942 
5943 	kfree(ret_stack);
5944 }
5945 #endif
5946