xref: /linux/kernel/trace/ftrace.c (revision 69c5079b49fa120c1a108b6e28b3a6a8e4ae2db5)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Infrastructure for profiling code inserted by 'gcc -pg'.
4  *
5  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
6  * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
7  *
8  * Originally ported from the -rt patch by:
9  *   Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
10  *
11  * Based on code in the latency_tracer, that is:
12  *
13  *  Copyright (C) 2004-2006 Ingo Molnar
14  *  Copyright (C) 2004 Nadia Yvette Chambers
15  */
16 
17 #include <linux/stop_machine.h>
18 #include <linux/clocksource.h>
19 #include <linux/sched/task.h>
20 #include <linux/kallsyms.h>
21 #include <linux/security.h>
22 #include <linux/seq_file.h>
23 #include <linux/tracefs.h>
24 #include <linux/hardirq.h>
25 #include <linux/kthread.h>
26 #include <linux/uaccess.h>
27 #include <linux/bsearch.h>
28 #include <linux/module.h>
29 #include <linux/ftrace.h>
30 #include <linux/sysctl.h>
31 #include <linux/slab.h>
32 #include <linux/ctype.h>
33 #include <linux/sort.h>
34 #include <linux/list.h>
35 #include <linux/hash.h>
36 #include <linux/rcupdate.h>
37 #include <linux/kprobes.h>
38 
39 #include <trace/events/sched.h>
40 
41 #include <asm/sections.h>
42 #include <asm/setup.h>
43 
44 #include "ftrace_internal.h"
45 #include "trace_output.h"
46 #include "trace_stat.h"
47 
48 /* Flags that do not get reset */
49 #define FTRACE_NOCLEAR_FLAGS	(FTRACE_FL_DISABLED | FTRACE_FL_TOUCHED | \
50 				 FTRACE_FL_MODIFIED)
51 
52 #define FTRACE_INVALID_FUNCTION		"__ftrace_invalid_address__"
53 
54 #define FTRACE_WARN_ON(cond)			\
55 	({					\
56 		int ___r = cond;		\
57 		if (WARN_ON(___r))		\
58 			ftrace_kill();		\
59 		___r;				\
60 	})
61 
62 #define FTRACE_WARN_ON_ONCE(cond)		\
63 	({					\
64 		int ___r = cond;		\
65 		if (WARN_ON_ONCE(___r))		\
66 			ftrace_kill();		\
67 		___r;				\
68 	})
69 
70 /* hash bits for specific function selection */
71 #define FTRACE_HASH_DEFAULT_BITS 10
72 #define FTRACE_HASH_MAX_BITS 12
73 
74 #ifdef CONFIG_DYNAMIC_FTRACE
75 #define INIT_OPS_HASH(opsname)	\
76 	.func_hash		= &opsname.local_hash,			\
77 	.local_hash.regex_lock	= __MUTEX_INITIALIZER(opsname.local_hash.regex_lock), \
78 	.subop_list		= LIST_HEAD_INIT(opsname.subop_list),
79 #else
80 #define INIT_OPS_HASH(opsname)
81 #endif
82 
83 enum {
84 	FTRACE_MODIFY_ENABLE_FL		= (1 << 0),
85 	FTRACE_MODIFY_MAY_SLEEP_FL	= (1 << 1),
86 };
87 
88 struct ftrace_ops ftrace_list_end __read_mostly = {
89 	.func		= ftrace_stub,
90 	.flags		= FTRACE_OPS_FL_STUB,
91 	INIT_OPS_HASH(ftrace_list_end)
92 };
93 
94 /* ftrace_enabled is a method to turn ftrace on or off */
95 int ftrace_enabled __read_mostly;
96 static int __maybe_unused last_ftrace_enabled;
97 
98 /* Current function tracing op */
99 struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end;
100 /* What to set function_trace_op to */
101 static struct ftrace_ops *set_function_trace_op;
102 
ftrace_pids_enabled(struct ftrace_ops * ops)103 bool ftrace_pids_enabled(struct ftrace_ops *ops)
104 {
105 	struct trace_array *tr;
106 
107 	if (!(ops->flags & FTRACE_OPS_FL_PID) || !ops->private)
108 		return false;
109 
110 	tr = ops->private;
111 
112 	return tr->function_pids != NULL || tr->function_no_pids != NULL;
113 }
114 
115 static void ftrace_update_trampoline(struct ftrace_ops *ops);
116 
117 /*
118  * ftrace_disabled is set when an anomaly is discovered.
119  * ftrace_disabled is much stronger than ftrace_enabled.
120  */
121 static int ftrace_disabled __read_mostly;
122 
123 DEFINE_MUTEX(ftrace_lock);
124 
125 struct ftrace_ops __rcu *ftrace_ops_list __read_mostly = (struct ftrace_ops __rcu *)&ftrace_list_end;
126 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
127 struct ftrace_ops global_ops;
128 
129 /* Defined by vmlinux.lds.h see the comment above arch_ftrace_ops_list_func for details */
130 void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
131 			  struct ftrace_ops *op, struct ftrace_regs *fregs);
132 
133 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS
134 /*
135  * Stub used to invoke the list ops without requiring a separate trampoline.
136  */
137 const struct ftrace_ops ftrace_list_ops = {
138 	.func	= ftrace_ops_list_func,
139 	.flags	= FTRACE_OPS_FL_STUB,
140 };
141 
ftrace_ops_nop_func(unsigned long ip,unsigned long parent_ip,struct ftrace_ops * op,struct ftrace_regs * fregs)142 static void ftrace_ops_nop_func(unsigned long ip, unsigned long parent_ip,
143 				struct ftrace_ops *op,
144 				struct ftrace_regs *fregs)
145 {
146 	/* do nothing */
147 }
148 
149 /*
150  * Stub used when a call site is disabled. May be called transiently by threads
151  * which have made it into ftrace_caller but haven't yet recovered the ops at
152  * the point the call site is disabled.
153  */
154 const struct ftrace_ops ftrace_nop_ops = {
155 	.func	= ftrace_ops_nop_func,
156 	.flags  = FTRACE_OPS_FL_STUB,
157 };
158 #endif
159 
ftrace_ops_init(struct ftrace_ops * ops)160 static inline void ftrace_ops_init(struct ftrace_ops *ops)
161 {
162 #ifdef CONFIG_DYNAMIC_FTRACE
163 	if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) {
164 		mutex_init(&ops->local_hash.regex_lock);
165 		INIT_LIST_HEAD(&ops->subop_list);
166 		ops->func_hash = &ops->local_hash;
167 		ops->flags |= FTRACE_OPS_FL_INITIALIZED;
168 	}
169 #endif
170 }
171 
172 /* Call this function for when a callback filters on set_ftrace_pid */
ftrace_pid_func(unsigned long ip,unsigned long parent_ip,struct ftrace_ops * op,struct ftrace_regs * fregs)173 static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
174 			    struct ftrace_ops *op, struct ftrace_regs *fregs)
175 {
176 	struct trace_array *tr = op->private;
177 	int pid;
178 
179 	if (tr) {
180 		pid = this_cpu_read(tr->array_buffer.data->ftrace_ignore_pid);
181 		if (pid == FTRACE_PID_IGNORE)
182 			return;
183 		if (pid != FTRACE_PID_TRACE &&
184 		    pid != current->pid)
185 			return;
186 	}
187 
188 	op->saved_func(ip, parent_ip, op, fregs);
189 }
190 
ftrace_sync_ipi(void * data)191 void ftrace_sync_ipi(void *data)
192 {
193 	/* Probably not needed, but do it anyway */
194 	smp_rmb();
195 }
196 
ftrace_ops_get_list_func(struct ftrace_ops * ops)197 static ftrace_func_t ftrace_ops_get_list_func(struct ftrace_ops *ops)
198 {
199 	/*
200 	 * If this is a dynamic or RCU ops, or we force list func,
201 	 * then it needs to call the list anyway.
202 	 */
203 	if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_RCU) ||
204 	    FTRACE_FORCE_LIST_FUNC)
205 		return ftrace_ops_list_func;
206 
207 	return ftrace_ops_get_func(ops);
208 }
209 
update_ftrace_function(void)210 static void update_ftrace_function(void)
211 {
212 	ftrace_func_t func;
213 
214 	/*
215 	 * Prepare the ftrace_ops that the arch callback will use.
216 	 * If there's only one ftrace_ops registered, the ftrace_ops_list
217 	 * will point to the ops we want.
218 	 */
219 	set_function_trace_op = rcu_dereference_protected(ftrace_ops_list,
220 						lockdep_is_held(&ftrace_lock));
221 
222 	/* If there's no ftrace_ops registered, just call the stub function */
223 	if (set_function_trace_op == &ftrace_list_end) {
224 		func = ftrace_stub;
225 
226 	/*
227 	 * If we are at the end of the list and this ops is
228 	 * recursion safe and not dynamic and the arch supports passing ops,
229 	 * then have the mcount trampoline call the function directly.
230 	 */
231 	} else if (rcu_dereference_protected(ftrace_ops_list->next,
232 			lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) {
233 		func = ftrace_ops_get_list_func(ftrace_ops_list);
234 
235 	} else {
236 		/* Just use the default ftrace_ops */
237 		set_function_trace_op = &ftrace_list_end;
238 		func = ftrace_ops_list_func;
239 	}
240 
241 	/* If there's no change, then do nothing more here */
242 	if (ftrace_trace_function == func)
243 		return;
244 
245 	/*
246 	 * If we are using the list function, it doesn't care
247 	 * about the function_trace_ops.
248 	 */
249 	if (func == ftrace_ops_list_func) {
250 		ftrace_trace_function = func;
251 		/*
252 		 * Don't even bother setting function_trace_ops,
253 		 * it would be racy to do so anyway.
254 		 */
255 		return;
256 	}
257 
258 #ifndef CONFIG_DYNAMIC_FTRACE
259 	/*
260 	 * For static tracing, we need to be a bit more careful.
261 	 * The function change takes affect immediately. Thus,
262 	 * we need to coordinate the setting of the function_trace_ops
263 	 * with the setting of the ftrace_trace_function.
264 	 *
265 	 * Set the function to the list ops, which will call the
266 	 * function we want, albeit indirectly, but it handles the
267 	 * ftrace_ops and doesn't depend on function_trace_op.
268 	 */
269 	ftrace_trace_function = ftrace_ops_list_func;
270 	/*
271 	 * Make sure all CPUs see this. Yes this is slow, but static
272 	 * tracing is slow and nasty to have enabled.
273 	 */
274 	synchronize_rcu_tasks_rude();
275 	/* Now all cpus are using the list ops. */
276 	function_trace_op = set_function_trace_op;
277 	/* Make sure the function_trace_op is visible on all CPUs */
278 	smp_wmb();
279 	/* Nasty way to force a rmb on all cpus */
280 	smp_call_function(ftrace_sync_ipi, NULL, 1);
281 	/* OK, we are all set to update the ftrace_trace_function now! */
282 #endif /* !CONFIG_DYNAMIC_FTRACE */
283 
284 	ftrace_trace_function = func;
285 }
286 
add_ftrace_ops(struct ftrace_ops __rcu ** list,struct ftrace_ops * ops)287 static void add_ftrace_ops(struct ftrace_ops __rcu **list,
288 			   struct ftrace_ops *ops)
289 {
290 	rcu_assign_pointer(ops->next, *list);
291 
292 	/*
293 	 * We are entering ops into the list but another
294 	 * CPU might be walking that list. We need to make sure
295 	 * the ops->next pointer is valid before another CPU sees
296 	 * the ops pointer included into the list.
297 	 */
298 	rcu_assign_pointer(*list, ops);
299 }
300 
remove_ftrace_ops(struct ftrace_ops __rcu ** list,struct ftrace_ops * ops)301 static int remove_ftrace_ops(struct ftrace_ops __rcu **list,
302 			     struct ftrace_ops *ops)
303 {
304 	struct ftrace_ops **p;
305 
306 	/*
307 	 * If we are removing the last function, then simply point
308 	 * to the ftrace_stub.
309 	 */
310 	if (rcu_dereference_protected(*list,
311 			lockdep_is_held(&ftrace_lock)) == ops &&
312 	    rcu_dereference_protected(ops->next,
313 			lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) {
314 		rcu_assign_pointer(*list, &ftrace_list_end);
315 		return 0;
316 	}
317 
318 	for (p = list; *p != &ftrace_list_end; p = &(*p)->next)
319 		if (*p == ops)
320 			break;
321 
322 	if (*p != ops)
323 		return -1;
324 
325 	*p = (*p)->next;
326 	return 0;
327 }
328 
329 static void ftrace_update_trampoline(struct ftrace_ops *ops);
330 
__register_ftrace_function(struct ftrace_ops * ops)331 int __register_ftrace_function(struct ftrace_ops *ops)
332 {
333 	if (ops->flags & FTRACE_OPS_FL_DELETED)
334 		return -EINVAL;
335 
336 	if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED))
337 		return -EBUSY;
338 
339 #ifndef CONFIG_DYNAMIC_FTRACE_WITH_REGS
340 	/*
341 	 * If the ftrace_ops specifies SAVE_REGS, then it only can be used
342 	 * if the arch supports it, or SAVE_REGS_IF_SUPPORTED is also set.
343 	 * Setting SAVE_REGS_IF_SUPPORTED makes SAVE_REGS irrelevant.
344 	 */
345 	if (ops->flags & FTRACE_OPS_FL_SAVE_REGS &&
346 	    !(ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED))
347 		return -EINVAL;
348 
349 	if (ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED)
350 		ops->flags |= FTRACE_OPS_FL_SAVE_REGS;
351 #endif
352 	if (!ftrace_enabled && (ops->flags & FTRACE_OPS_FL_PERMANENT))
353 		return -EBUSY;
354 
355 	if (!is_kernel_core_data((unsigned long)ops))
356 		ops->flags |= FTRACE_OPS_FL_DYNAMIC;
357 
358 	add_ftrace_ops(&ftrace_ops_list, ops);
359 
360 	/* Always save the function, and reset at unregistering */
361 	ops->saved_func = ops->func;
362 
363 	if (ftrace_pids_enabled(ops))
364 		ops->func = ftrace_pid_func;
365 
366 	ftrace_update_trampoline(ops);
367 
368 	if (ftrace_enabled)
369 		update_ftrace_function();
370 
371 	return 0;
372 }
373 
__unregister_ftrace_function(struct ftrace_ops * ops)374 int __unregister_ftrace_function(struct ftrace_ops *ops)
375 {
376 	int ret;
377 
378 	if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED)))
379 		return -EBUSY;
380 
381 	ret = remove_ftrace_ops(&ftrace_ops_list, ops);
382 
383 	if (ret < 0)
384 		return ret;
385 
386 	if (ftrace_enabled)
387 		update_ftrace_function();
388 
389 	ops->func = ops->saved_func;
390 
391 	return 0;
392 }
393 
ftrace_update_pid_func(void)394 static void ftrace_update_pid_func(void)
395 {
396 	struct ftrace_ops *op;
397 
398 	/* Only do something if we are tracing something */
399 	if (ftrace_trace_function == ftrace_stub)
400 		return;
401 
402 	do_for_each_ftrace_op(op, ftrace_ops_list) {
403 		if (op->flags & FTRACE_OPS_FL_PID) {
404 			op->func = ftrace_pids_enabled(op) ?
405 				ftrace_pid_func : op->saved_func;
406 			ftrace_update_trampoline(op);
407 		}
408 	} while_for_each_ftrace_op(op);
409 
410 	fgraph_update_pid_func();
411 
412 	update_ftrace_function();
413 }
414 
415 #ifdef CONFIG_FUNCTION_PROFILER
416 struct ftrace_profile {
417 	struct hlist_node		node;
418 	unsigned long			ip;
419 	unsigned long			counter;
420 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
421 	unsigned long long		time;
422 	unsigned long long		time_squared;
423 #endif
424 };
425 
426 struct ftrace_profile_page {
427 	struct ftrace_profile_page	*next;
428 	unsigned long			index;
429 	struct ftrace_profile		records[];
430 };
431 
432 struct ftrace_profile_stat {
433 	atomic_t			disabled;
434 	struct hlist_head		*hash;
435 	struct ftrace_profile_page	*pages;
436 	struct ftrace_profile_page	*start;
437 	struct tracer_stat		stat;
438 };
439 
440 #define PROFILE_RECORDS_SIZE						\
441 	(PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
442 
443 #define PROFILES_PER_PAGE					\
444 	(PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
445 
446 static int ftrace_profile_enabled __read_mostly;
447 
448 /* ftrace_profile_lock - synchronize the enable and disable of the profiler */
449 static DEFINE_MUTEX(ftrace_profile_lock);
450 
451 static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats);
452 
453 #define FTRACE_PROFILE_HASH_BITS 10
454 #define FTRACE_PROFILE_HASH_SIZE (1 << FTRACE_PROFILE_HASH_BITS)
455 
456 static void *
function_stat_next(void * v,int idx)457 function_stat_next(void *v, int idx)
458 {
459 	struct ftrace_profile *rec = v;
460 	struct ftrace_profile_page *pg;
461 
462 	pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
463 
464  again:
465 	if (idx != 0)
466 		rec++;
467 
468 	if ((void *)rec >= (void *)&pg->records[pg->index]) {
469 		pg = pg->next;
470 		if (!pg)
471 			return NULL;
472 		rec = &pg->records[0];
473 		if (!rec->counter)
474 			goto again;
475 	}
476 
477 	return rec;
478 }
479 
function_stat_start(struct tracer_stat * trace)480 static void *function_stat_start(struct tracer_stat *trace)
481 {
482 	struct ftrace_profile_stat *stat =
483 		container_of(trace, struct ftrace_profile_stat, stat);
484 
485 	if (!stat || !stat->start)
486 		return NULL;
487 
488 	return function_stat_next(&stat->start->records[0], 0);
489 }
490 
491 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
492 /* function graph compares on total time */
function_stat_cmp(const void * p1,const void * p2)493 static int function_stat_cmp(const void *p1, const void *p2)
494 {
495 	const struct ftrace_profile *a = p1;
496 	const struct ftrace_profile *b = p2;
497 
498 	if (a->time < b->time)
499 		return -1;
500 	if (a->time > b->time)
501 		return 1;
502 	else
503 		return 0;
504 }
505 #else
506 /* not function graph compares against hits */
function_stat_cmp(const void * p1,const void * p2)507 static int function_stat_cmp(const void *p1, const void *p2)
508 {
509 	const struct ftrace_profile *a = p1;
510 	const struct ftrace_profile *b = p2;
511 
512 	if (a->counter < b->counter)
513 		return -1;
514 	if (a->counter > b->counter)
515 		return 1;
516 	else
517 		return 0;
518 }
519 #endif
520 
function_stat_headers(struct seq_file * m)521 static int function_stat_headers(struct seq_file *m)
522 {
523 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
524 	seq_puts(m, "  Function                               "
525 		 "Hit    Time            Avg             s^2\n"
526 		    "  --------                               "
527 		 "---    ----            ---             ---\n");
528 #else
529 	seq_puts(m, "  Function                               Hit\n"
530 		    "  --------                               ---\n");
531 #endif
532 	return 0;
533 }
534 
function_stat_show(struct seq_file * m,void * v)535 static int function_stat_show(struct seq_file *m, void *v)
536 {
537 	struct trace_array *tr = trace_get_global_array();
538 	struct ftrace_profile *rec = v;
539 	const char *refsymbol = NULL;
540 	char str[KSYM_SYMBOL_LEN];
541 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
542 	static struct trace_seq s;
543 	unsigned long long avg;
544 	unsigned long long stddev;
545 	unsigned long long stddev_denom;
546 #endif
547 	guard(mutex)(&ftrace_profile_lock);
548 
549 	/* we raced with function_profile_reset() */
550 	if (unlikely(rec->counter == 0))
551 		return -EBUSY;
552 
553 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
554 	avg = div64_ul(rec->time, rec->counter);
555 	if (tracing_thresh && (avg < tracing_thresh))
556 		return 0;
557 #endif
558 
559 	if (tr->trace_flags & TRACE_ITER(PROF_TEXT_OFFSET)) {
560 		unsigned long offset;
561 
562 		if (core_kernel_text(rec->ip)) {
563 			refsymbol = "_text";
564 			offset = rec->ip - (unsigned long)_text;
565 		} else {
566 			struct module *mod;
567 
568 			guard(rcu)();
569 			mod = __module_text_address(rec->ip);
570 			if (mod) {
571 				refsymbol = mod->name;
572 				/* Calculate offset from module's text entry address. */
573 				offset = rec->ip - (unsigned long)mod->mem[MOD_TEXT].base;
574 			}
575 		}
576 		if (refsymbol)
577 			snprintf(str, sizeof(str), "  %s+%#lx", refsymbol, offset);
578 	}
579 	if (!refsymbol)
580 		kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
581 
582 	seq_printf(m, "  %-30.30s  %10lu", str, rec->counter);
583 
584 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
585 	seq_puts(m, "    ");
586 
587 	/*
588 	 * Variance formula:
589 	 * s^2 = 1 / (n * (n-1)) * (n * \Sum (x_i)^2 - (\Sum x_i)^2)
590 	 * Maybe Welford's method is better here?
591 	 * Divide only by 1000 for ns^2 -> us^2 conversion.
592 	 * trace_print_graph_duration will divide by 1000 again.
593 	 */
594 	stddev = 0;
595 	stddev_denom = rec->counter * (rec->counter - 1) * 1000;
596 	if (stddev_denom) {
597 		stddev = rec->counter * rec->time_squared -
598 			 rec->time * rec->time;
599 		stddev = div64_ul(stddev, stddev_denom);
600 	}
601 
602 	trace_seq_init(&s);
603 	trace_print_graph_duration(rec->time, &s);
604 	trace_seq_puts(&s, "    ");
605 	trace_print_graph_duration(avg, &s);
606 	trace_seq_puts(&s, "    ");
607 	trace_print_graph_duration(stddev, &s);
608 	trace_print_seq(m, &s);
609 #endif
610 	seq_putc(m, '\n');
611 
612 	return 0;
613 }
614 
ftrace_profile_reset(struct ftrace_profile_stat * stat)615 static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
616 {
617 	struct ftrace_profile_page *pg;
618 
619 	pg = stat->pages = stat->start;
620 
621 	while (pg) {
622 		memset(pg->records, 0, PROFILE_RECORDS_SIZE);
623 		pg->index = 0;
624 		pg = pg->next;
625 	}
626 
627 	memset(stat->hash, 0,
628 	       FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head));
629 }
630 
ftrace_profile_pages_init(struct ftrace_profile_stat * stat)631 static int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
632 {
633 	struct ftrace_profile_page *pg;
634 	int functions;
635 	int pages;
636 	int i;
637 
638 	/* If we already allocated, do nothing */
639 	if (stat->pages)
640 		return 0;
641 
642 	stat->pages = (void *)get_zeroed_page(GFP_KERNEL);
643 	if (!stat->pages)
644 		return -ENOMEM;
645 
646 #ifdef CONFIG_DYNAMIC_FTRACE
647 	functions = ftrace_update_tot_cnt;
648 #else
649 	/*
650 	 * We do not know the number of functions that exist because
651 	 * dynamic tracing is what counts them. With past experience
652 	 * we have around 20K functions. That should be more than enough.
653 	 * It is highly unlikely we will execute every function in
654 	 * the kernel.
655 	 */
656 	functions = 20000;
657 #endif
658 
659 	pg = stat->start = stat->pages;
660 
661 	pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
662 
663 	for (i = 1; i < pages; i++) {
664 		pg->next = (void *)get_zeroed_page(GFP_KERNEL);
665 		if (!pg->next)
666 			goto out_free;
667 		pg = pg->next;
668 	}
669 
670 	return 0;
671 
672  out_free:
673 	pg = stat->start;
674 	while (pg) {
675 		unsigned long tmp = (unsigned long)pg;
676 
677 		pg = pg->next;
678 		free_page(tmp);
679 	}
680 
681 	stat->pages = NULL;
682 	stat->start = NULL;
683 
684 	return -ENOMEM;
685 }
686 
ftrace_profile_init_cpu(int cpu)687 static int ftrace_profile_init_cpu(int cpu)
688 {
689 	struct ftrace_profile_stat *stat;
690 	int size;
691 
692 	stat = &per_cpu(ftrace_profile_stats, cpu);
693 
694 	if (stat->hash) {
695 		/* If the profile is already created, simply reset it */
696 		ftrace_profile_reset(stat);
697 		return 0;
698 	}
699 
700 	/*
701 	 * We are profiling all functions, but usually only a few thousand
702 	 * functions are hit. We'll make a hash of 1024 items.
703 	 */
704 	size = FTRACE_PROFILE_HASH_SIZE;
705 
706 	stat->hash = kcalloc(size, sizeof(struct hlist_head), GFP_KERNEL);
707 
708 	if (!stat->hash)
709 		return -ENOMEM;
710 
711 	/* Preallocate the function profiling pages */
712 	if (ftrace_profile_pages_init(stat) < 0) {
713 		kfree(stat->hash);
714 		stat->hash = NULL;
715 		return -ENOMEM;
716 	}
717 
718 	return 0;
719 }
720 
ftrace_profile_init(void)721 static int ftrace_profile_init(void)
722 {
723 	int cpu;
724 	int ret = 0;
725 
726 	for_each_possible_cpu(cpu) {
727 		ret = ftrace_profile_init_cpu(cpu);
728 		if (ret)
729 			break;
730 	}
731 
732 	return ret;
733 }
734 
735 /* interrupts must be disabled */
736 static struct ftrace_profile *
ftrace_find_profiled_func(struct ftrace_profile_stat * stat,unsigned long ip)737 ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
738 {
739 	struct ftrace_profile *rec;
740 	struct hlist_head *hhd;
741 	unsigned long key;
742 
743 	key = hash_long(ip, FTRACE_PROFILE_HASH_BITS);
744 	hhd = &stat->hash[key];
745 
746 	if (hlist_empty(hhd))
747 		return NULL;
748 
749 	hlist_for_each_entry_rcu_notrace(rec, hhd, node) {
750 		if (rec->ip == ip)
751 			return rec;
752 	}
753 
754 	return NULL;
755 }
756 
ftrace_add_profile(struct ftrace_profile_stat * stat,struct ftrace_profile * rec)757 static void ftrace_add_profile(struct ftrace_profile_stat *stat,
758 			       struct ftrace_profile *rec)
759 {
760 	unsigned long key;
761 
762 	key = hash_long(rec->ip, FTRACE_PROFILE_HASH_BITS);
763 	hlist_add_head_rcu(&rec->node, &stat->hash[key]);
764 }
765 
766 /*
767  * The memory is already allocated, this simply finds a new record to use.
768  */
769 static struct ftrace_profile *
ftrace_profile_alloc(struct ftrace_profile_stat * stat,unsigned long ip)770 ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
771 {
772 	struct ftrace_profile *rec = NULL;
773 
774 	/* prevent recursion (from NMIs) */
775 	if (atomic_inc_return(&stat->disabled) != 1)
776 		goto out;
777 
778 	/*
779 	 * Try to find the function again since an NMI
780 	 * could have added it
781 	 */
782 	rec = ftrace_find_profiled_func(stat, ip);
783 	if (rec)
784 		goto out;
785 
786 	if (stat->pages->index == PROFILES_PER_PAGE) {
787 		if (!stat->pages->next)
788 			goto out;
789 		stat->pages = stat->pages->next;
790 	}
791 
792 	rec = &stat->pages->records[stat->pages->index++];
793 	rec->ip = ip;
794 	ftrace_add_profile(stat, rec);
795 
796  out:
797 	atomic_dec(&stat->disabled);
798 
799 	return rec;
800 }
801 
802 static void
function_profile_call(unsigned long ip,unsigned long parent_ip,struct ftrace_ops * ops,struct ftrace_regs * fregs)803 function_profile_call(unsigned long ip, unsigned long parent_ip,
804 		      struct ftrace_ops *ops, struct ftrace_regs *fregs)
805 {
806 	struct ftrace_profile_stat *stat;
807 	struct ftrace_profile *rec;
808 
809 	if (!ftrace_profile_enabled)
810 		return;
811 
812 	guard(preempt_notrace)();
813 
814 	stat = this_cpu_ptr(&ftrace_profile_stats);
815 	if (!stat->hash || !ftrace_profile_enabled)
816 		return;
817 
818 	rec = ftrace_find_profiled_func(stat, ip);
819 	if (!rec) {
820 		rec = ftrace_profile_alloc(stat, ip);
821 		if (!rec)
822 			return;
823 	}
824 
825 	rec->counter++;
826 }
827 
828 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
829 static bool fgraph_graph_time = true;
830 
ftrace_graph_graph_time_control(bool enable)831 void ftrace_graph_graph_time_control(bool enable)
832 {
833 	fgraph_graph_time = enable;
834 }
835 
836 struct profile_fgraph_data {
837 	unsigned long long		calltime;
838 	unsigned long long		subtime;
839 	unsigned long long		sleeptime;
840 };
841 
profile_graph_entry(struct ftrace_graph_ent * trace,struct fgraph_ops * gops,struct ftrace_regs * fregs)842 static int profile_graph_entry(struct ftrace_graph_ent *trace,
843 			       struct fgraph_ops *gops,
844 			       struct ftrace_regs *fregs)
845 {
846 	struct profile_fgraph_data *profile_data;
847 
848 	function_profile_call(trace->func, 0, NULL, NULL);
849 
850 	/* If function graph is shutting down, ret_stack can be NULL */
851 	if (!current->ret_stack)
852 		return 0;
853 
854 	profile_data = fgraph_reserve_data(gops->idx, sizeof(*profile_data));
855 	if (!profile_data)
856 		return 0;
857 
858 	profile_data->subtime = 0;
859 	profile_data->sleeptime = current->ftrace_sleeptime;
860 	profile_data->calltime = trace_clock_local();
861 
862 	return 1;
863 }
864 
865 bool fprofile_no_sleep_time;
866 
profile_graph_return(struct ftrace_graph_ret * trace,struct fgraph_ops * gops,struct ftrace_regs * fregs)867 static void profile_graph_return(struct ftrace_graph_ret *trace,
868 				 struct fgraph_ops *gops,
869 				 struct ftrace_regs *fregs)
870 {
871 	struct profile_fgraph_data *profile_data;
872 	struct ftrace_profile_stat *stat;
873 	unsigned long long calltime;
874 	unsigned long long rettime = trace_clock_local();
875 	struct ftrace_profile *rec;
876 	int size;
877 
878 	guard(preempt_notrace)();
879 
880 	stat = this_cpu_ptr(&ftrace_profile_stats);
881 	if (!stat->hash || !ftrace_profile_enabled)
882 		return;
883 
884 	profile_data = fgraph_retrieve_data(gops->idx, &size);
885 
886 	/* If the calltime was zero'd ignore it */
887 	if (!profile_data || !profile_data->calltime)
888 		return;
889 
890 	calltime = rettime - profile_data->calltime;
891 
892 	if (fprofile_no_sleep_time) {
893 		if (current->ftrace_sleeptime)
894 			calltime -= current->ftrace_sleeptime - profile_data->sleeptime;
895 	}
896 
897 	if (!fgraph_graph_time) {
898 		struct profile_fgraph_data *parent_data;
899 
900 		/* Append this call time to the parent time to subtract */
901 		parent_data = fgraph_retrieve_parent_data(gops->idx, &size, 1);
902 		if (parent_data)
903 			parent_data->subtime += calltime;
904 
905 		if (profile_data->subtime && profile_data->subtime < calltime)
906 			calltime -= profile_data->subtime;
907 		else
908 			calltime = 0;
909 	}
910 
911 	rec = ftrace_find_profiled_func(stat, trace->func);
912 	if (rec) {
913 		rec->time += calltime;
914 		rec->time_squared += calltime * calltime;
915 	}
916 }
917 
918 static struct fgraph_ops fprofiler_ops = {
919 	.entryfunc = &profile_graph_entry,
920 	.retfunc = &profile_graph_return,
921 };
922 
register_ftrace_profiler(void)923 static int register_ftrace_profiler(void)
924 {
925 	ftrace_ops_set_global_filter(&fprofiler_ops.ops);
926 	return register_ftrace_graph(&fprofiler_ops);
927 }
928 
unregister_ftrace_profiler(void)929 static void unregister_ftrace_profiler(void)
930 {
931 	unregister_ftrace_graph(&fprofiler_ops);
932 }
933 #else
934 static struct ftrace_ops ftrace_profile_ops __read_mostly = {
935 	.func		= function_profile_call,
936 };
937 
register_ftrace_profiler(void)938 static int register_ftrace_profiler(void)
939 {
940 	ftrace_ops_set_global_filter(&ftrace_profile_ops);
941 	return register_ftrace_function(&ftrace_profile_ops);
942 }
943 
unregister_ftrace_profiler(void)944 static void unregister_ftrace_profiler(void)
945 {
946 	unregister_ftrace_function(&ftrace_profile_ops);
947 }
948 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
949 
950 static ssize_t
ftrace_profile_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)951 ftrace_profile_write(struct file *filp, const char __user *ubuf,
952 		     size_t cnt, loff_t *ppos)
953 {
954 	unsigned long val;
955 	int ret;
956 
957 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
958 	if (ret)
959 		return ret;
960 
961 	val = !!val;
962 
963 	guard(mutex)(&ftrace_profile_lock);
964 	if (ftrace_profile_enabled ^ val) {
965 		if (val) {
966 			ret = ftrace_profile_init();
967 			if (ret < 0)
968 				return ret;
969 
970 			ret = register_ftrace_profiler();
971 			if (ret < 0)
972 				return ret;
973 			ftrace_profile_enabled = 1;
974 		} else {
975 			ftrace_profile_enabled = 0;
976 			/*
977 			 * unregister_ftrace_profiler calls stop_machine
978 			 * so this acts like an synchronize_rcu.
979 			 */
980 			unregister_ftrace_profiler();
981 		}
982 	}
983 
984 	*ppos += cnt;
985 
986 	return cnt;
987 }
988 
989 static ssize_t
ftrace_profile_read(struct file * filp,char __user * ubuf,size_t cnt,loff_t * ppos)990 ftrace_profile_read(struct file *filp, char __user *ubuf,
991 		     size_t cnt, loff_t *ppos)
992 {
993 	char buf[64];		/* big enough to hold a number */
994 	int r;
995 
996 	r = sprintf(buf, "%u\n", ftrace_profile_enabled);
997 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
998 }
999 
1000 static const struct file_operations ftrace_profile_fops = {
1001 	.open		= tracing_open_generic,
1002 	.read		= ftrace_profile_read,
1003 	.write		= ftrace_profile_write,
1004 	.llseek		= default_llseek,
1005 };
1006 
1007 /* used to initialize the real stat files */
1008 static struct tracer_stat function_stats __initdata = {
1009 	.name		= "functions",
1010 	.stat_start	= function_stat_start,
1011 	.stat_next	= function_stat_next,
1012 	.stat_cmp	= function_stat_cmp,
1013 	.stat_headers	= function_stat_headers,
1014 	.stat_show	= function_stat_show
1015 };
1016 
ftrace_profile_tracefs(struct dentry * d_tracer)1017 static __init void ftrace_profile_tracefs(struct dentry *d_tracer)
1018 {
1019 	struct ftrace_profile_stat *stat;
1020 	char *name;
1021 	int ret;
1022 	int cpu;
1023 
1024 	for_each_possible_cpu(cpu) {
1025 		stat = &per_cpu(ftrace_profile_stats, cpu);
1026 
1027 		name = kasprintf(GFP_KERNEL, "function%d", cpu);
1028 		if (!name) {
1029 			/*
1030 			 * The files created are permanent, if something happens
1031 			 * we still do not free memory.
1032 			 */
1033 			WARN(1,
1034 			     "Could not allocate stat file for cpu %d\n",
1035 			     cpu);
1036 			return;
1037 		}
1038 		stat->stat = function_stats;
1039 		stat->stat.name = name;
1040 		ret = register_stat_tracer(&stat->stat);
1041 		if (ret) {
1042 			WARN(1,
1043 			     "Could not register function stat for cpu %d\n",
1044 			     cpu);
1045 			kfree(name);
1046 			return;
1047 		}
1048 	}
1049 
1050 	trace_create_file("function_profile_enabled",
1051 			  TRACE_MODE_WRITE, d_tracer, NULL,
1052 			  &ftrace_profile_fops);
1053 }
1054 
1055 #else /* CONFIG_FUNCTION_PROFILER */
ftrace_profile_tracefs(struct dentry * d_tracer)1056 static __init void ftrace_profile_tracefs(struct dentry *d_tracer)
1057 {
1058 }
1059 #endif /* CONFIG_FUNCTION_PROFILER */
1060 
1061 #ifdef CONFIG_DYNAMIC_FTRACE
1062 
1063 static struct ftrace_ops *removed_ops;
1064 
1065 /*
1066  * Set when doing a global update, like enabling all recs or disabling them.
1067  * It is not set when just updating a single ftrace_ops.
1068  */
1069 static bool update_all_ops;
1070 
1071 struct ftrace_func_probe {
1072 	struct ftrace_probe_ops	*probe_ops;
1073 	struct ftrace_ops	ops;
1074 	struct trace_array	*tr;
1075 	struct list_head	list;
1076 	void			*data;
1077 	int			ref;
1078 };
1079 
1080 /*
1081  * We make these constant because no one should touch them,
1082  * but they are used as the default "empty hash", to avoid allocating
1083  * it all the time. These are in a read only section such that if
1084  * anyone does try to modify it, it will cause an exception.
1085  */
1086 static const struct hlist_head empty_buckets[1];
1087 static const struct ftrace_hash empty_hash = {
1088 	.buckets = (struct hlist_head *)empty_buckets,
1089 };
1090 #define EMPTY_HASH	((struct ftrace_hash *)&empty_hash)
1091 
1092 struct ftrace_ops global_ops = {
1093 	.func				= ftrace_stub,
1094 	.local_hash.notrace_hash	= EMPTY_HASH,
1095 	.local_hash.filter_hash		= EMPTY_HASH,
1096 	INIT_OPS_HASH(global_ops)
1097 	.flags				= FTRACE_OPS_FL_INITIALIZED |
1098 					  FTRACE_OPS_FL_PID,
1099 };
1100 
1101 /*
1102  * Used by the stack unwinder to know about dynamic ftrace trampolines.
1103  */
ftrace_ops_trampoline(unsigned long addr)1104 struct ftrace_ops *ftrace_ops_trampoline(unsigned long addr)
1105 {
1106 	struct ftrace_ops *op = NULL;
1107 
1108 	/*
1109 	 * Some of the ops may be dynamically allocated,
1110 	 * they are freed after a synchronize_rcu().
1111 	 */
1112 	preempt_disable_notrace();
1113 
1114 	do_for_each_ftrace_op(op, ftrace_ops_list) {
1115 		/*
1116 		 * This is to check for dynamically allocated trampolines.
1117 		 * Trampolines that are in kernel text will have
1118 		 * core_kernel_text() return true.
1119 		 */
1120 		if (op->trampoline && op->trampoline_size)
1121 			if (addr >= op->trampoline &&
1122 			    addr < op->trampoline + op->trampoline_size) {
1123 				preempt_enable_notrace();
1124 				return op;
1125 			}
1126 	} while_for_each_ftrace_op(op);
1127 	preempt_enable_notrace();
1128 
1129 	return NULL;
1130 }
1131 
1132 /*
1133  * This is used by __kernel_text_address() to return true if the
1134  * address is on a dynamically allocated trampoline that would
1135  * not return true for either core_kernel_text() or
1136  * is_module_text_address().
1137  */
is_ftrace_trampoline(unsigned long addr)1138 bool is_ftrace_trampoline(unsigned long addr)
1139 {
1140 	return ftrace_ops_trampoline(addr) != NULL;
1141 }
1142 
1143 struct ftrace_page {
1144 	struct ftrace_page	*next;
1145 	struct dyn_ftrace	*records;
1146 	int			index;
1147 	int			order;
1148 };
1149 
1150 #define ENTRY_SIZE sizeof(struct dyn_ftrace)
1151 #define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE)
1152 
1153 static struct ftrace_page	*ftrace_pages_start;
1154 static struct ftrace_page	*ftrace_pages;
1155 
1156 static __always_inline unsigned long
ftrace_hash_key(struct ftrace_hash * hash,unsigned long ip)1157 ftrace_hash_key(struct ftrace_hash *hash, unsigned long ip)
1158 {
1159 	if (hash->size_bits > 0)
1160 		return hash_long(ip, hash->size_bits);
1161 
1162 	return 0;
1163 }
1164 
1165 /* Only use this function if ftrace_hash_empty() has already been tested */
1166 static __always_inline struct ftrace_func_entry *
__ftrace_lookup_ip(struct ftrace_hash * hash,unsigned long ip)1167 __ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
1168 {
1169 	unsigned long key;
1170 	struct ftrace_func_entry *entry;
1171 	struct hlist_head *hhd;
1172 
1173 	key = ftrace_hash_key(hash, ip);
1174 	hhd = &hash->buckets[key];
1175 
1176 	hlist_for_each_entry_rcu_notrace(entry, hhd, hlist) {
1177 		if (entry->ip == ip)
1178 			return entry;
1179 	}
1180 	return NULL;
1181 }
1182 
1183 /**
1184  * ftrace_lookup_ip - Test to see if an ip exists in an ftrace_hash
1185  * @hash: The hash to look at
1186  * @ip: The instruction pointer to test
1187  *
1188  * Search a given @hash to see if a given instruction pointer (@ip)
1189  * exists in it.
1190  *
1191  * Returns: the entry that holds the @ip if found. NULL otherwise.
1192  */
1193 struct ftrace_func_entry *
ftrace_lookup_ip(struct ftrace_hash * hash,unsigned long ip)1194 ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
1195 {
1196 	if (ftrace_hash_empty(hash))
1197 		return NULL;
1198 
1199 	return __ftrace_lookup_ip(hash, ip);
1200 }
1201 
__add_hash_entry(struct ftrace_hash * hash,struct ftrace_func_entry * entry)1202 static void __add_hash_entry(struct ftrace_hash *hash,
1203 			     struct ftrace_func_entry *entry)
1204 {
1205 	struct hlist_head *hhd;
1206 	unsigned long key;
1207 
1208 	key = ftrace_hash_key(hash, entry->ip);
1209 	hhd = &hash->buckets[key];
1210 	hlist_add_head(&entry->hlist, hhd);
1211 	hash->count++;
1212 }
1213 
1214 static struct ftrace_func_entry *
add_hash_entry(struct ftrace_hash * hash,unsigned long ip)1215 add_hash_entry(struct ftrace_hash *hash, unsigned long ip)
1216 {
1217 	struct ftrace_func_entry *entry;
1218 
1219 	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1220 	if (!entry)
1221 		return NULL;
1222 
1223 	entry->ip = ip;
1224 	__add_hash_entry(hash, entry);
1225 
1226 	return entry;
1227 }
1228 
1229 static void
free_hash_entry(struct ftrace_hash * hash,struct ftrace_func_entry * entry)1230 free_hash_entry(struct ftrace_hash *hash,
1231 		  struct ftrace_func_entry *entry)
1232 {
1233 	hlist_del(&entry->hlist);
1234 	kfree(entry);
1235 	hash->count--;
1236 }
1237 
1238 static void
remove_hash_entry(struct ftrace_hash * hash,struct ftrace_func_entry * entry)1239 remove_hash_entry(struct ftrace_hash *hash,
1240 		  struct ftrace_func_entry *entry)
1241 {
1242 	hlist_del_rcu(&entry->hlist);
1243 	hash->count--;
1244 }
1245 
ftrace_hash_clear(struct ftrace_hash * hash)1246 static void ftrace_hash_clear(struct ftrace_hash *hash)
1247 {
1248 	struct hlist_head *hhd;
1249 	struct hlist_node *tn;
1250 	struct ftrace_func_entry *entry;
1251 	int size = 1 << hash->size_bits;
1252 	int i;
1253 
1254 	if (!hash->count)
1255 		return;
1256 
1257 	for (i = 0; i < size; i++) {
1258 		hhd = &hash->buckets[i];
1259 		hlist_for_each_entry_safe(entry, tn, hhd, hlist)
1260 			free_hash_entry(hash, entry);
1261 	}
1262 	FTRACE_WARN_ON(hash->count);
1263 }
1264 
free_ftrace_mod(struct ftrace_mod_load * ftrace_mod)1265 static void free_ftrace_mod(struct ftrace_mod_load *ftrace_mod)
1266 {
1267 	list_del(&ftrace_mod->list);
1268 	kfree(ftrace_mod->module);
1269 	kfree(ftrace_mod->func);
1270 	kfree(ftrace_mod);
1271 }
1272 
clear_ftrace_mod_list(struct list_head * head)1273 static void clear_ftrace_mod_list(struct list_head *head)
1274 {
1275 	struct ftrace_mod_load *p, *n;
1276 
1277 	/* stack tracer isn't supported yet */
1278 	if (!head)
1279 		return;
1280 
1281 	mutex_lock(&ftrace_lock);
1282 	list_for_each_entry_safe(p, n, head, list)
1283 		free_ftrace_mod(p);
1284 	mutex_unlock(&ftrace_lock);
1285 }
1286 
free_ftrace_hash(struct ftrace_hash * hash)1287 static void free_ftrace_hash(struct ftrace_hash *hash)
1288 {
1289 	if (!hash || hash == EMPTY_HASH)
1290 		return;
1291 	ftrace_hash_clear(hash);
1292 	kfree(hash->buckets);
1293 	kfree(hash);
1294 }
1295 
__free_ftrace_hash_rcu(struct rcu_head * rcu)1296 static void __free_ftrace_hash_rcu(struct rcu_head *rcu)
1297 {
1298 	struct ftrace_hash *hash;
1299 
1300 	hash = container_of(rcu, struct ftrace_hash, rcu);
1301 	free_ftrace_hash(hash);
1302 }
1303 
free_ftrace_hash_rcu(struct ftrace_hash * hash)1304 static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
1305 {
1306 	if (!hash || hash == EMPTY_HASH)
1307 		return;
1308 	call_rcu(&hash->rcu, __free_ftrace_hash_rcu);
1309 }
1310 
1311 /**
1312  * ftrace_free_filter - remove all filters for an ftrace_ops
1313  * @ops: the ops to remove the filters from
1314  */
ftrace_free_filter(struct ftrace_ops * ops)1315 void ftrace_free_filter(struct ftrace_ops *ops)
1316 {
1317 	ftrace_ops_init(ops);
1318 	if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED))
1319 		return;
1320 	free_ftrace_hash(ops->func_hash->filter_hash);
1321 	free_ftrace_hash(ops->func_hash->notrace_hash);
1322 	ops->func_hash->filter_hash = EMPTY_HASH;
1323 	ops->func_hash->notrace_hash = EMPTY_HASH;
1324 }
1325 EXPORT_SYMBOL_GPL(ftrace_free_filter);
1326 
alloc_ftrace_hash(int size_bits)1327 static struct ftrace_hash *alloc_ftrace_hash(int size_bits)
1328 {
1329 	struct ftrace_hash *hash;
1330 	int size;
1331 
1332 	hash = kzalloc(sizeof(*hash), GFP_KERNEL);
1333 	if (!hash)
1334 		return NULL;
1335 
1336 	size = 1 << size_bits;
1337 	hash->buckets = kcalloc(size, sizeof(*hash->buckets), GFP_KERNEL);
1338 
1339 	if (!hash->buckets) {
1340 		kfree(hash);
1341 		return NULL;
1342 	}
1343 
1344 	hash->size_bits = size_bits;
1345 
1346 	return hash;
1347 }
1348 
1349 /* Used to save filters on functions for modules not loaded yet */
ftrace_add_mod(struct trace_array * tr,const char * func,const char * module,int enable)1350 static int ftrace_add_mod(struct trace_array *tr,
1351 			  const char *func, const char *module,
1352 			  int enable)
1353 {
1354 	struct ftrace_mod_load *ftrace_mod;
1355 	struct list_head *mod_head = enable ? &tr->mod_trace : &tr->mod_notrace;
1356 
1357 	ftrace_mod = kzalloc(sizeof(*ftrace_mod), GFP_KERNEL);
1358 	if (!ftrace_mod)
1359 		return -ENOMEM;
1360 
1361 	INIT_LIST_HEAD(&ftrace_mod->list);
1362 	ftrace_mod->func = kstrdup(func, GFP_KERNEL);
1363 	ftrace_mod->module = kstrdup(module, GFP_KERNEL);
1364 	ftrace_mod->enable = enable;
1365 
1366 	if (!ftrace_mod->func || !ftrace_mod->module)
1367 		goto out_free;
1368 
1369 	list_add(&ftrace_mod->list, mod_head);
1370 
1371 	return 0;
1372 
1373  out_free:
1374 	free_ftrace_mod(ftrace_mod);
1375 
1376 	return -ENOMEM;
1377 }
1378 
1379 static struct ftrace_hash *
alloc_and_copy_ftrace_hash(int size_bits,struct ftrace_hash * hash)1380 alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
1381 {
1382 	struct ftrace_func_entry *entry;
1383 	struct ftrace_hash *new_hash;
1384 	int size;
1385 	int i;
1386 
1387 	new_hash = alloc_ftrace_hash(size_bits);
1388 	if (!new_hash)
1389 		return NULL;
1390 
1391 	if (hash)
1392 		new_hash->flags = hash->flags;
1393 
1394 	/* Empty hash? */
1395 	if (ftrace_hash_empty(hash))
1396 		return new_hash;
1397 
1398 	size = 1 << hash->size_bits;
1399 	for (i = 0; i < size; i++) {
1400 		hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
1401 			if (add_hash_entry(new_hash, entry->ip) == NULL)
1402 				goto free_hash;
1403 		}
1404 	}
1405 
1406 	FTRACE_WARN_ON(new_hash->count != hash->count);
1407 
1408 	return new_hash;
1409 
1410  free_hash:
1411 	free_ftrace_hash(new_hash);
1412 	return NULL;
1413 }
1414 
1415 static void ftrace_hash_rec_disable_modify(struct ftrace_ops *ops);
1416 static void ftrace_hash_rec_enable_modify(struct ftrace_ops *ops);
1417 
1418 static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops,
1419 				       struct ftrace_hash *new_hash);
1420 
1421 /*
1422  * Allocate a new hash and remove entries from @src and move them to the new hash.
1423  * On success, the @src hash will be empty and should be freed.
1424  */
__move_hash(struct ftrace_hash * src,int size)1425 static struct ftrace_hash *__move_hash(struct ftrace_hash *src, int size)
1426 {
1427 	struct ftrace_func_entry *entry;
1428 	struct ftrace_hash *new_hash;
1429 	struct hlist_head *hhd;
1430 	struct hlist_node *tn;
1431 	int bits = 0;
1432 	int i;
1433 
1434 	/*
1435 	 * Use around half the size (max bit of it), but
1436 	 * a minimum of 2 is fine (as size of 0 or 1 both give 1 for bits).
1437 	 */
1438 	bits = fls(size / 2);
1439 
1440 	/* Don't allocate too much */
1441 	if (bits > FTRACE_HASH_MAX_BITS)
1442 		bits = FTRACE_HASH_MAX_BITS;
1443 
1444 	new_hash = alloc_ftrace_hash(bits);
1445 	if (!new_hash)
1446 		return NULL;
1447 
1448 	new_hash->flags = src->flags;
1449 
1450 	size = 1 << src->size_bits;
1451 	for (i = 0; i < size; i++) {
1452 		hhd = &src->buckets[i];
1453 		hlist_for_each_entry_safe(entry, tn, hhd, hlist) {
1454 			remove_hash_entry(src, entry);
1455 			__add_hash_entry(new_hash, entry);
1456 		}
1457 	}
1458 	return new_hash;
1459 }
1460 
1461 /* Move the @src entries to a newly allocated hash */
1462 static struct ftrace_hash *
__ftrace_hash_move(struct ftrace_hash * src)1463 __ftrace_hash_move(struct ftrace_hash *src)
1464 {
1465 	int size = src->count;
1466 
1467 	/*
1468 	 * If the new source is empty, just return the empty_hash.
1469 	 */
1470 	if (ftrace_hash_empty(src))
1471 		return EMPTY_HASH;
1472 
1473 	return __move_hash(src, size);
1474 }
1475 
1476 /**
1477  * ftrace_hash_move - move a new hash to a filter and do updates
1478  * @ops: The ops with the hash that @dst points to
1479  * @enable: True if for the filter hash, false for the notrace hash
1480  * @dst: Points to the @ops hash that should be updated
1481  * @src: The hash to update @dst with
1482  *
1483  * This is called when an ftrace_ops hash is being updated and the
1484  * the kernel needs to reflect this. Note, this only updates the kernel
1485  * function callbacks if the @ops is enabled (not to be confused with
1486  * @enable above). If the @ops is enabled, its hash determines what
1487  * callbacks get called. This function gets called when the @ops hash
1488  * is updated and it requires new callbacks.
1489  *
1490  * On success the elements of @src is moved to @dst, and @dst is updated
1491  * properly, as well as the functions determined by the @ops hashes
1492  * are now calling the @ops callback function.
1493  *
1494  * Regardless of return type, @src should be freed with free_ftrace_hash().
1495  */
1496 static int
ftrace_hash_move(struct ftrace_ops * ops,int enable,struct ftrace_hash ** dst,struct ftrace_hash * src)1497 ftrace_hash_move(struct ftrace_ops *ops, int enable,
1498 		 struct ftrace_hash **dst, struct ftrace_hash *src)
1499 {
1500 	struct ftrace_hash *new_hash;
1501 	int ret;
1502 
1503 	/* Reject setting notrace hash on IPMODIFY ftrace_ops */
1504 	if (ops->flags & FTRACE_OPS_FL_IPMODIFY && !enable)
1505 		return -EINVAL;
1506 
1507 	new_hash = __ftrace_hash_move(src);
1508 	if (!new_hash)
1509 		return -ENOMEM;
1510 
1511 	/* Make sure this can be applied if it is IPMODIFY ftrace_ops */
1512 	if (enable) {
1513 		/* IPMODIFY should be updated only when filter_hash updating */
1514 		ret = ftrace_hash_ipmodify_update(ops, new_hash);
1515 		if (ret < 0) {
1516 			free_ftrace_hash(new_hash);
1517 			return ret;
1518 		}
1519 	}
1520 
1521 	/*
1522 	 * Remove the current set, update the hash and add
1523 	 * them back.
1524 	 */
1525 	ftrace_hash_rec_disable_modify(ops);
1526 
1527 	rcu_assign_pointer(*dst, new_hash);
1528 
1529 	ftrace_hash_rec_enable_modify(ops);
1530 
1531 	return 0;
1532 }
1533 
hash_contains_ip(unsigned long ip,struct ftrace_ops_hash * hash)1534 static bool hash_contains_ip(unsigned long ip,
1535 			     struct ftrace_ops_hash *hash)
1536 {
1537 	/*
1538 	 * The function record is a match if it exists in the filter
1539 	 * hash and not in the notrace hash. Note, an empty hash is
1540 	 * considered a match for the filter hash, but an empty
1541 	 * notrace hash is considered not in the notrace hash.
1542 	 */
1543 	return (ftrace_hash_empty(hash->filter_hash) ||
1544 		__ftrace_lookup_ip(hash->filter_hash, ip)) &&
1545 		(ftrace_hash_empty(hash->notrace_hash) ||
1546 		 !__ftrace_lookup_ip(hash->notrace_hash, ip));
1547 }
1548 
1549 /*
1550  * Test the hashes for this ops to see if we want to call
1551  * the ops->func or not.
1552  *
1553  * It's a match if the ip is in the ops->filter_hash or
1554  * the filter_hash does not exist or is empty,
1555  *  AND
1556  * the ip is not in the ops->notrace_hash.
1557  *
1558  * This needs to be called with preemption disabled as
1559  * the hashes are freed with call_rcu().
1560  */
1561 int
ftrace_ops_test(struct ftrace_ops * ops,unsigned long ip,void * regs)1562 ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
1563 {
1564 	struct ftrace_ops_hash hash;
1565 	int ret;
1566 
1567 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
1568 	/*
1569 	 * There's a small race when adding ops that the ftrace handler
1570 	 * that wants regs, may be called without them. We can not
1571 	 * allow that handler to be called if regs is NULL.
1572 	 */
1573 	if (regs == NULL && (ops->flags & FTRACE_OPS_FL_SAVE_REGS))
1574 		return 0;
1575 #endif
1576 
1577 	rcu_assign_pointer(hash.filter_hash, ops->func_hash->filter_hash);
1578 	rcu_assign_pointer(hash.notrace_hash, ops->func_hash->notrace_hash);
1579 
1580 	if (hash_contains_ip(ip, &hash))
1581 		ret = 1;
1582 	else
1583 		ret = 0;
1584 
1585 	return ret;
1586 }
1587 
1588 /*
1589  * This is a double for. Do not use 'break' to break out of the loop,
1590  * you must use a goto.
1591  */
1592 #define do_for_each_ftrace_rec(pg, rec)					\
1593 	for (pg = ftrace_pages_start; pg; pg = pg->next) {		\
1594 		int _____i;						\
1595 		for (_____i = 0; _____i < pg->index; _____i++) {	\
1596 			rec = &pg->records[_____i];
1597 
1598 #define while_for_each_ftrace_rec()		\
1599 		}				\
1600 	}
1601 
1602 
ftrace_cmp_recs(const void * a,const void * b)1603 static int ftrace_cmp_recs(const void *a, const void *b)
1604 {
1605 	const struct dyn_ftrace *key = a;
1606 	const struct dyn_ftrace *rec = b;
1607 
1608 	if (key->flags < rec->ip)
1609 		return -1;
1610 	if (key->ip >= rec->ip + MCOUNT_INSN_SIZE)
1611 		return 1;
1612 	return 0;
1613 }
1614 
lookup_rec(unsigned long start,unsigned long end)1615 static struct dyn_ftrace *lookup_rec(unsigned long start, unsigned long end)
1616 {
1617 	struct ftrace_page *pg;
1618 	struct dyn_ftrace *rec = NULL;
1619 	struct dyn_ftrace key;
1620 
1621 	key.ip = start;
1622 	key.flags = end;	/* overload flags, as it is unsigned long */
1623 
1624 	for (pg = ftrace_pages_start; pg; pg = pg->next) {
1625 		if (pg->index == 0 ||
1626 		    end < pg->records[0].ip ||
1627 		    start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
1628 			continue;
1629 		rec = bsearch(&key, pg->records, pg->index,
1630 			      sizeof(struct dyn_ftrace),
1631 			      ftrace_cmp_recs);
1632 		if (rec)
1633 			break;
1634 	}
1635 	return rec;
1636 }
1637 
1638 /**
1639  * ftrace_location_range - return the first address of a traced location
1640  *	if it touches the given ip range
1641  * @start: start of range to search.
1642  * @end: end of range to search (inclusive). @end points to the last byte
1643  *	to check.
1644  *
1645  * Returns: rec->ip if the related ftrace location is a least partly within
1646  * the given address range. That is, the first address of the instruction
1647  * that is either a NOP or call to the function tracer. It checks the ftrace
1648  * internal tables to determine if the address belongs or not.
1649  */
ftrace_location_range(unsigned long start,unsigned long end)1650 unsigned long ftrace_location_range(unsigned long start, unsigned long end)
1651 {
1652 	struct dyn_ftrace *rec;
1653 	unsigned long ip = 0;
1654 
1655 	rcu_read_lock();
1656 	rec = lookup_rec(start, end);
1657 	if (rec)
1658 		ip = rec->ip;
1659 	rcu_read_unlock();
1660 
1661 	return ip;
1662 }
1663 
1664 /**
1665  * ftrace_location - return the ftrace location
1666  * @ip: the instruction pointer to check
1667  *
1668  * Returns:
1669  * * If @ip matches the ftrace location, return @ip.
1670  * * If @ip matches sym+0, return sym's ftrace location.
1671  * * Otherwise, return 0.
1672  */
ftrace_location(unsigned long ip)1673 unsigned long ftrace_location(unsigned long ip)
1674 {
1675 	unsigned long loc;
1676 	unsigned long offset;
1677 	unsigned long size;
1678 
1679 	loc = ftrace_location_range(ip, ip);
1680 	if (!loc) {
1681 		if (!kallsyms_lookup_size_offset(ip, &size, &offset))
1682 			return 0;
1683 
1684 		/* map sym+0 to __fentry__ */
1685 		if (!offset)
1686 			loc = ftrace_location_range(ip, ip + size - 1);
1687 	}
1688 	return loc;
1689 }
1690 
1691 /**
1692  * ftrace_text_reserved - return true if range contains an ftrace location
1693  * @start: start of range to search
1694  * @end: end of range to search (inclusive). @end points to the last byte to check.
1695  *
1696  * Returns: 1 if @start and @end contains a ftrace location.
1697  * That is, the instruction that is either a NOP or call to
1698  * the function tracer. It checks the ftrace internal tables to
1699  * determine if the address belongs or not.
1700  */
ftrace_text_reserved(const void * start,const void * end)1701 int ftrace_text_reserved(const void *start, const void *end)
1702 {
1703 	unsigned long ret;
1704 
1705 	ret = ftrace_location_range((unsigned long)start,
1706 				    (unsigned long)end);
1707 
1708 	return (int)!!ret;
1709 }
1710 
1711 /* Test if ops registered to this rec needs regs */
test_rec_ops_needs_regs(struct dyn_ftrace * rec)1712 static bool test_rec_ops_needs_regs(struct dyn_ftrace *rec)
1713 {
1714 	struct ftrace_ops *ops;
1715 	bool keep_regs = false;
1716 
1717 	for (ops = ftrace_ops_list;
1718 	     ops != &ftrace_list_end; ops = ops->next) {
1719 		/* pass rec in as regs to have non-NULL val */
1720 		if (ftrace_ops_test(ops, rec->ip, rec)) {
1721 			if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
1722 				keep_regs = true;
1723 				break;
1724 			}
1725 		}
1726 	}
1727 
1728 	return  keep_regs;
1729 }
1730 
1731 static struct ftrace_ops *
1732 ftrace_find_tramp_ops_any(struct dyn_ftrace *rec);
1733 static struct ftrace_ops *
1734 ftrace_find_tramp_ops_any_other(struct dyn_ftrace *rec, struct ftrace_ops *op_exclude);
1735 static struct ftrace_ops *
1736 ftrace_find_tramp_ops_next(struct dyn_ftrace *rec, struct ftrace_ops *ops);
1737 
skip_record(struct dyn_ftrace * rec)1738 static bool skip_record(struct dyn_ftrace *rec)
1739 {
1740 	/*
1741 	 * At boot up, weak functions are set to disable. Function tracing
1742 	 * can be enabled before they are, and they still need to be disabled now.
1743 	 * If the record is disabled, still continue if it is marked as already
1744 	 * enabled (this is needed to keep the accounting working).
1745 	 */
1746 	return rec->flags & FTRACE_FL_DISABLED &&
1747 		!(rec->flags & FTRACE_FL_ENABLED);
1748 }
1749 
1750 /*
1751  * This is the main engine to the ftrace updates to the dyn_ftrace records.
1752  *
1753  * It will iterate through all the available ftrace functions
1754  * (the ones that ftrace can have callbacks to) and set the flags
1755  * in the associated dyn_ftrace records.
1756  *
1757  * @inc: If true, the functions associated to @ops are added to
1758  *       the dyn_ftrace records, otherwise they are removed.
1759  */
__ftrace_hash_rec_update(struct ftrace_ops * ops,bool inc)1760 static bool __ftrace_hash_rec_update(struct ftrace_ops *ops,
1761 				     bool inc)
1762 {
1763 	struct ftrace_hash *hash;
1764 	struct ftrace_hash *notrace_hash;
1765 	struct ftrace_page *pg;
1766 	struct dyn_ftrace *rec;
1767 	bool update = false;
1768 	int count = 0;
1769 	int all = false;
1770 
1771 	/* Only update if the ops has been registered */
1772 	if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
1773 		return false;
1774 
1775 	/*
1776 	 *   If the count is zero, we update all records.
1777 	 *   Otherwise we just update the items in the hash.
1778 	 */
1779 	hash = ops->func_hash->filter_hash;
1780 	notrace_hash = ops->func_hash->notrace_hash;
1781 	if (ftrace_hash_empty(hash))
1782 		all = true;
1783 
1784 	do_for_each_ftrace_rec(pg, rec) {
1785 		int in_notrace_hash = 0;
1786 		int in_hash = 0;
1787 		int match = 0;
1788 
1789 		if (skip_record(rec))
1790 			continue;
1791 
1792 		if (all) {
1793 			/*
1794 			 * Only the filter_hash affects all records.
1795 			 * Update if the record is not in the notrace hash.
1796 			 */
1797 			if (!notrace_hash || !ftrace_lookup_ip(notrace_hash, rec->ip))
1798 				match = 1;
1799 		} else {
1800 			in_hash = !!ftrace_lookup_ip(hash, rec->ip);
1801 			in_notrace_hash = !!ftrace_lookup_ip(notrace_hash, rec->ip);
1802 
1803 			/*
1804 			 * We want to match all functions that are in the hash but
1805 			 * not in the other hash.
1806 			 */
1807 			if (in_hash && !in_notrace_hash)
1808 				match = 1;
1809 		}
1810 		if (!match)
1811 			continue;
1812 
1813 		if (inc) {
1814 			rec->flags++;
1815 			if (FTRACE_WARN_ON(ftrace_rec_count(rec) == FTRACE_REF_MAX))
1816 				return false;
1817 
1818 			if (ops->flags & FTRACE_OPS_FL_DIRECT)
1819 				rec->flags |= FTRACE_FL_DIRECT;
1820 
1821 			/*
1822 			 * If there's only a single callback registered to a
1823 			 * function, and the ops has a trampoline registered
1824 			 * for it, then we can call it directly.
1825 			 */
1826 			if (ftrace_rec_count(rec) == 1 && ops->trampoline)
1827 				rec->flags |= FTRACE_FL_TRAMP;
1828 			else
1829 				/*
1830 				 * If we are adding another function callback
1831 				 * to this function, and the previous had a
1832 				 * custom trampoline in use, then we need to go
1833 				 * back to the default trampoline.
1834 				 */
1835 				rec->flags &= ~FTRACE_FL_TRAMP;
1836 
1837 			/*
1838 			 * If any ops wants regs saved for this function
1839 			 * then all ops will get saved regs.
1840 			 */
1841 			if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
1842 				rec->flags |= FTRACE_FL_REGS;
1843 		} else {
1844 			if (FTRACE_WARN_ON(ftrace_rec_count(rec) == 0))
1845 				return false;
1846 			rec->flags--;
1847 
1848 			/*
1849 			 * Only the internal direct_ops should have the
1850 			 * DIRECT flag set. Thus, if it is removing a
1851 			 * function, then that function should no longer
1852 			 * be direct.
1853 			 */
1854 			if (ops->flags & FTRACE_OPS_FL_DIRECT)
1855 				rec->flags &= ~FTRACE_FL_DIRECT;
1856 
1857 			/*
1858 			 * If the rec had REGS enabled and the ops that is
1859 			 * being removed had REGS set, then see if there is
1860 			 * still any ops for this record that wants regs.
1861 			 * If not, we can stop recording them.
1862 			 */
1863 			if (ftrace_rec_count(rec) > 0 &&
1864 			    rec->flags & FTRACE_FL_REGS &&
1865 			    ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
1866 				if (!test_rec_ops_needs_regs(rec))
1867 					rec->flags &= ~FTRACE_FL_REGS;
1868 			}
1869 
1870 			/*
1871 			 * The TRAMP needs to be set only if rec count
1872 			 * is decremented to one, and the ops that is
1873 			 * left has a trampoline. As TRAMP can only be
1874 			 * enabled if there is only a single ops attached
1875 			 * to it.
1876 			 */
1877 			if (ftrace_rec_count(rec) == 1 &&
1878 			    ftrace_find_tramp_ops_any_other(rec, ops))
1879 				rec->flags |= FTRACE_FL_TRAMP;
1880 			else
1881 				rec->flags &= ~FTRACE_FL_TRAMP;
1882 
1883 			/*
1884 			 * flags will be cleared in ftrace_check_record()
1885 			 * if rec count is zero.
1886 			 */
1887 		}
1888 
1889 		/*
1890 		 * If the rec has a single associated ops, and ops->func can be
1891 		 * called directly, allow the call site to call via the ops.
1892 		 */
1893 		if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS) &&
1894 		    ftrace_rec_count(rec) == 1 &&
1895 		    ftrace_ops_get_func(ops) == ops->func)
1896 			rec->flags |= FTRACE_FL_CALL_OPS;
1897 		else
1898 			rec->flags &= ~FTRACE_FL_CALL_OPS;
1899 
1900 		count++;
1901 
1902 		/* Must match FTRACE_UPDATE_CALLS in ftrace_modify_all_code() */
1903 		update |= ftrace_test_record(rec, true) != FTRACE_UPDATE_IGNORE;
1904 
1905 		/* Shortcut, if we handled all records, we are done. */
1906 		if (!all && count == hash->count)
1907 			return update;
1908 	} while_for_each_ftrace_rec();
1909 
1910 	return update;
1911 }
1912 
1913 /*
1914  * This is called when an ops is removed from tracing. It will decrement
1915  * the counters of the dyn_ftrace records for all the functions that
1916  * the @ops attached to.
1917  */
ftrace_hash_rec_disable(struct ftrace_ops * ops)1918 static bool ftrace_hash_rec_disable(struct ftrace_ops *ops)
1919 {
1920 	return __ftrace_hash_rec_update(ops, false);
1921 }
1922 
1923 /*
1924  * This is called when an ops is added to tracing. It will increment
1925  * the counters of the dyn_ftrace records for all the functions that
1926  * the @ops attached to.
1927  */
ftrace_hash_rec_enable(struct ftrace_ops * ops)1928 static bool ftrace_hash_rec_enable(struct ftrace_ops *ops)
1929 {
1930 	return __ftrace_hash_rec_update(ops, true);
1931 }
1932 
1933 /*
1934  * This function will update what functions @ops traces when its filter
1935  * changes.
1936  *
1937  * The @inc states if the @ops callbacks are going to be added or removed.
1938  * When one of the @ops hashes are updated to a "new_hash" the dyn_ftrace
1939  * records are update via:
1940  *
1941  * ftrace_hash_rec_disable_modify(ops);
1942  * ops->hash = new_hash
1943  * ftrace_hash_rec_enable_modify(ops);
1944  *
1945  * Where the @ops is removed from all the records it is tracing using
1946  * its old hash. The @ops hash is updated to the new hash, and then
1947  * the @ops is added back to the records so that it is tracing all
1948  * the new functions.
1949  */
ftrace_hash_rec_update_modify(struct ftrace_ops * ops,bool inc)1950 static void ftrace_hash_rec_update_modify(struct ftrace_ops *ops, bool inc)
1951 {
1952 	struct ftrace_ops *op;
1953 
1954 	__ftrace_hash_rec_update(ops, inc);
1955 
1956 	if (ops->func_hash != &global_ops.local_hash)
1957 		return;
1958 
1959 	/*
1960 	 * If the ops shares the global_ops hash, then we need to update
1961 	 * all ops that are enabled and use this hash.
1962 	 */
1963 	do_for_each_ftrace_op(op, ftrace_ops_list) {
1964 		/* Already done */
1965 		if (op == ops)
1966 			continue;
1967 		if (op->func_hash == &global_ops.local_hash)
1968 			__ftrace_hash_rec_update(op, inc);
1969 	} while_for_each_ftrace_op(op);
1970 }
1971 
ftrace_hash_rec_disable_modify(struct ftrace_ops * ops)1972 static void ftrace_hash_rec_disable_modify(struct ftrace_ops *ops)
1973 {
1974 	ftrace_hash_rec_update_modify(ops, false);
1975 }
1976 
ftrace_hash_rec_enable_modify(struct ftrace_ops * ops)1977 static void ftrace_hash_rec_enable_modify(struct ftrace_ops *ops)
1978 {
1979 	ftrace_hash_rec_update_modify(ops, true);
1980 }
1981 
1982 /*
1983  * Try to update IPMODIFY flag on each ftrace_rec. Return 0 if it is OK
1984  * or no-needed to update, -EBUSY if it detects a conflict of the flag
1985  * on a ftrace_rec, and -EINVAL if the new_hash tries to trace all recs.
1986  * Note that old_hash and new_hash has below meanings
1987  *  - If the hash is NULL, it hits all recs (if IPMODIFY is set, this is rejected)
1988  *  - If the hash is EMPTY_HASH, it hits nothing
1989  *  - Anything else hits the recs which match the hash entries.
1990  *
1991  * DIRECT ops does not have IPMODIFY flag, but we still need to check it
1992  * against functions with FTRACE_FL_IPMODIFY. If there is any overlap, call
1993  * ops_func(SHARE_IPMODIFY_SELF) to make sure current ops can share with
1994  * IPMODIFY. If ops_func(SHARE_IPMODIFY_SELF) returns non-zero, propagate
1995  * the return value to the caller and eventually to the owner of the DIRECT
1996  * ops.
1997  */
__ftrace_hash_update_ipmodify(struct ftrace_ops * ops,struct ftrace_hash * old_hash,struct ftrace_hash * new_hash,bool update_target)1998 static int __ftrace_hash_update_ipmodify(struct ftrace_ops *ops,
1999 					 struct ftrace_hash *old_hash,
2000 					 struct ftrace_hash *new_hash,
2001 					 bool update_target)
2002 {
2003 	struct ftrace_page *pg;
2004 	struct dyn_ftrace *rec, *end = NULL;
2005 	int in_old, in_new;
2006 	bool is_ipmodify, is_direct;
2007 
2008 	/* Only update if the ops has been registered */
2009 	if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
2010 		return 0;
2011 
2012 	is_ipmodify = ops->flags & FTRACE_OPS_FL_IPMODIFY;
2013 	is_direct = ops->flags & FTRACE_OPS_FL_DIRECT;
2014 
2015 	/* neither IPMODIFY nor DIRECT, skip */
2016 	if (!is_ipmodify && !is_direct)
2017 		return 0;
2018 
2019 	if (WARN_ON_ONCE(is_ipmodify && is_direct))
2020 		return 0;
2021 
2022 	/*
2023 	 * Since the IPMODIFY and DIRECT are very address sensitive
2024 	 * actions, we do not allow ftrace_ops to set all functions to new
2025 	 * hash.
2026 	 */
2027 	if (!new_hash || !old_hash)
2028 		return -EINVAL;
2029 
2030 	/* Update rec->flags */
2031 	do_for_each_ftrace_rec(pg, rec) {
2032 
2033 		if (rec->flags & FTRACE_FL_DISABLED)
2034 			continue;
2035 
2036 		/*
2037 		 * Unless we are updating the target of a direct function,
2038 		 * we only need to update differences of filter_hash
2039 		 */
2040 		in_old = !!ftrace_lookup_ip(old_hash, rec->ip);
2041 		in_new = !!ftrace_lookup_ip(new_hash, rec->ip);
2042 		if (!update_target && (in_old == in_new))
2043 			continue;
2044 
2045 		if (in_new) {
2046 			if (rec->flags & FTRACE_FL_IPMODIFY) {
2047 				int ret;
2048 
2049 				/* Cannot have two ipmodify on same rec */
2050 				if (is_ipmodify)
2051 					goto rollback;
2052 
2053 				/*
2054 				 * If this is called by __modify_ftrace_direct()
2055 				 * then it is only changing where the direct
2056 				 * pointer is jumping to, and the record already
2057 				 * points to a direct trampoline. If it isn't,
2058 				 * then it is a bug to update ipmodify on a direct
2059 				 * caller.
2060 				 */
2061 				FTRACE_WARN_ON(!update_target &&
2062 					       (rec->flags & FTRACE_FL_DIRECT));
2063 
2064 				/*
2065 				 * Another ops with IPMODIFY is already
2066 				 * attached. We are now attaching a direct
2067 				 * ops. Run SHARE_IPMODIFY_SELF, to check
2068 				 * whether sharing is supported.
2069 				 */
2070 				if (!ops->ops_func)
2071 					return -EBUSY;
2072 				ret = ops->ops_func(ops, FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_SELF);
2073 				if (ret)
2074 					return ret;
2075 			} else if (is_ipmodify) {
2076 				rec->flags |= FTRACE_FL_IPMODIFY;
2077 			}
2078 		} else if (is_ipmodify) {
2079 			rec->flags &= ~FTRACE_FL_IPMODIFY;
2080 		}
2081 	} while_for_each_ftrace_rec();
2082 
2083 	return 0;
2084 
2085 rollback:
2086 	end = rec;
2087 
2088 	/* Roll back what we did above */
2089 	do_for_each_ftrace_rec(pg, rec) {
2090 
2091 		if (rec->flags & FTRACE_FL_DISABLED)
2092 			continue;
2093 
2094 		if (rec == end)
2095 			return -EBUSY;
2096 
2097 		in_old = !!ftrace_lookup_ip(old_hash, rec->ip);
2098 		in_new = !!ftrace_lookup_ip(new_hash, rec->ip);
2099 		if (in_old == in_new)
2100 			continue;
2101 
2102 		if (in_new)
2103 			rec->flags &= ~FTRACE_FL_IPMODIFY;
2104 		else
2105 			rec->flags |= FTRACE_FL_IPMODIFY;
2106 	} while_for_each_ftrace_rec();
2107 
2108 	return -EBUSY;
2109 }
2110 
ftrace_hash_ipmodify_enable(struct ftrace_ops * ops)2111 static int ftrace_hash_ipmodify_enable(struct ftrace_ops *ops)
2112 {
2113 	struct ftrace_hash *hash = ops->func_hash->filter_hash;
2114 
2115 	if (ftrace_hash_empty(hash))
2116 		hash = NULL;
2117 
2118 	return __ftrace_hash_update_ipmodify(ops, EMPTY_HASH, hash, false);
2119 }
2120 
2121 /* Disabling always succeeds */
ftrace_hash_ipmodify_disable(struct ftrace_ops * ops)2122 static void ftrace_hash_ipmodify_disable(struct ftrace_ops *ops)
2123 {
2124 	struct ftrace_hash *hash = ops->func_hash->filter_hash;
2125 
2126 	if (ftrace_hash_empty(hash))
2127 		hash = NULL;
2128 
2129 	__ftrace_hash_update_ipmodify(ops, hash, EMPTY_HASH, false);
2130 }
2131 
ftrace_hash_ipmodify_update(struct ftrace_ops * ops,struct ftrace_hash * new_hash)2132 static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops,
2133 				       struct ftrace_hash *new_hash)
2134 {
2135 	struct ftrace_hash *old_hash = ops->func_hash->filter_hash;
2136 
2137 	if (ftrace_hash_empty(old_hash))
2138 		old_hash = NULL;
2139 
2140 	if (ftrace_hash_empty(new_hash))
2141 		new_hash = NULL;
2142 
2143 	return __ftrace_hash_update_ipmodify(ops, old_hash, new_hash, false);
2144 }
2145 
print_ip_ins(const char * fmt,const unsigned char * p)2146 static void print_ip_ins(const char *fmt, const unsigned char *p)
2147 {
2148 	char ins[MCOUNT_INSN_SIZE];
2149 
2150 	if (copy_from_kernel_nofault(ins, p, MCOUNT_INSN_SIZE)) {
2151 		printk(KERN_CONT "%s[FAULT] %px\n", fmt, p);
2152 		return;
2153 	}
2154 
2155 	printk(KERN_CONT "%s", fmt);
2156 	pr_cont("%*phC", MCOUNT_INSN_SIZE, ins);
2157 }
2158 
2159 enum ftrace_bug_type ftrace_bug_type;
2160 const void *ftrace_expected;
2161 
print_bug_type(void)2162 static void print_bug_type(void)
2163 {
2164 	switch (ftrace_bug_type) {
2165 	case FTRACE_BUG_UNKNOWN:
2166 		break;
2167 	case FTRACE_BUG_INIT:
2168 		pr_info("Initializing ftrace call sites\n");
2169 		break;
2170 	case FTRACE_BUG_NOP:
2171 		pr_info("Setting ftrace call site to NOP\n");
2172 		break;
2173 	case FTRACE_BUG_CALL:
2174 		pr_info("Setting ftrace call site to call ftrace function\n");
2175 		break;
2176 	case FTRACE_BUG_UPDATE:
2177 		pr_info("Updating ftrace call site to call a different ftrace function\n");
2178 		break;
2179 	}
2180 }
2181 
2182 /**
2183  * ftrace_bug - report and shutdown function tracer
2184  * @failed: The failed type (EFAULT, EINVAL, EPERM)
2185  * @rec: The record that failed
2186  *
2187  * The arch code that enables or disables the function tracing
2188  * can call ftrace_bug() when it has detected a problem in
2189  * modifying the code. @failed should be one of either:
2190  * EFAULT - if the problem happens on reading the @ip address
2191  * EINVAL - if what is read at @ip is not what was expected
2192  * EPERM - if the problem happens on writing to the @ip address
2193  */
ftrace_bug(int failed,struct dyn_ftrace * rec)2194 void ftrace_bug(int failed, struct dyn_ftrace *rec)
2195 {
2196 	unsigned long ip = rec ? rec->ip : 0;
2197 
2198 	pr_info("------------[ ftrace bug ]------------\n");
2199 
2200 	switch (failed) {
2201 	case -EFAULT:
2202 		pr_info("ftrace faulted on modifying ");
2203 		print_ip_sym(KERN_INFO, ip);
2204 		break;
2205 	case -EINVAL:
2206 		pr_info("ftrace failed to modify ");
2207 		print_ip_sym(KERN_INFO, ip);
2208 		print_ip_ins(" actual:   ", (unsigned char *)ip);
2209 		pr_cont("\n");
2210 		if (ftrace_expected) {
2211 			print_ip_ins(" expected: ", ftrace_expected);
2212 			pr_cont("\n");
2213 		}
2214 		break;
2215 	case -EPERM:
2216 		pr_info("ftrace faulted on writing ");
2217 		print_ip_sym(KERN_INFO, ip);
2218 		break;
2219 	default:
2220 		pr_info("ftrace faulted on unknown error ");
2221 		print_ip_sym(KERN_INFO, ip);
2222 	}
2223 	print_bug_type();
2224 	if (rec) {
2225 		struct ftrace_ops *ops = NULL;
2226 
2227 		pr_info("ftrace record flags: %lx\n", rec->flags);
2228 		pr_cont(" (%ld)%s%s", ftrace_rec_count(rec),
2229 			rec->flags & FTRACE_FL_REGS ? " R" : "  ",
2230 			rec->flags & FTRACE_FL_CALL_OPS ? " O" : "  ");
2231 		if (rec->flags & FTRACE_FL_TRAMP_EN) {
2232 			ops = ftrace_find_tramp_ops_any(rec);
2233 			if (ops) {
2234 				do {
2235 					pr_cont("\ttramp: %pS (%pS)",
2236 						(void *)ops->trampoline,
2237 						(void *)ops->func);
2238 					ops = ftrace_find_tramp_ops_next(rec, ops);
2239 				} while (ops);
2240 			} else
2241 				pr_cont("\ttramp: ERROR!");
2242 
2243 		}
2244 		ip = ftrace_get_addr_curr(rec);
2245 		pr_cont("\n expected tramp: %lx\n", ip);
2246 	}
2247 
2248 	FTRACE_WARN_ON_ONCE(1);
2249 }
2250 
ftrace_check_record(struct dyn_ftrace * rec,bool enable,bool update)2251 static int ftrace_check_record(struct dyn_ftrace *rec, bool enable, bool update)
2252 {
2253 	unsigned long flag = 0UL;
2254 
2255 	ftrace_bug_type = FTRACE_BUG_UNKNOWN;
2256 
2257 	if (skip_record(rec))
2258 		return FTRACE_UPDATE_IGNORE;
2259 
2260 	/*
2261 	 * If we are updating calls:
2262 	 *
2263 	 *   If the record has a ref count, then we need to enable it
2264 	 *   because someone is using it.
2265 	 *
2266 	 *   Otherwise we make sure its disabled.
2267 	 *
2268 	 * If we are disabling calls, then disable all records that
2269 	 * are enabled.
2270 	 */
2271 	if (enable && ftrace_rec_count(rec))
2272 		flag = FTRACE_FL_ENABLED;
2273 
2274 	/*
2275 	 * If enabling and the REGS flag does not match the REGS_EN, or
2276 	 * the TRAMP flag doesn't match the TRAMP_EN, then do not ignore
2277 	 * this record. Set flags to fail the compare against ENABLED.
2278 	 * Same for direct calls.
2279 	 */
2280 	if (flag) {
2281 		if (!(rec->flags & FTRACE_FL_REGS) !=
2282 		    !(rec->flags & FTRACE_FL_REGS_EN))
2283 			flag |= FTRACE_FL_REGS;
2284 
2285 		if (!(rec->flags & FTRACE_FL_TRAMP) !=
2286 		    !(rec->flags & FTRACE_FL_TRAMP_EN))
2287 			flag |= FTRACE_FL_TRAMP;
2288 
2289 		/*
2290 		 * Direct calls are special, as count matters.
2291 		 * We must test the record for direct, if the
2292 		 * DIRECT and DIRECT_EN do not match, but only
2293 		 * if the count is 1. That's because, if the
2294 		 * count is something other than one, we do not
2295 		 * want the direct enabled (it will be done via the
2296 		 * direct helper). But if DIRECT_EN is set, and
2297 		 * the count is not one, we need to clear it.
2298 		 *
2299 		 */
2300 		if (ftrace_rec_count(rec) == 1) {
2301 			if (!(rec->flags & FTRACE_FL_DIRECT) !=
2302 			    !(rec->flags & FTRACE_FL_DIRECT_EN))
2303 				flag |= FTRACE_FL_DIRECT;
2304 		} else if (rec->flags & FTRACE_FL_DIRECT_EN) {
2305 			flag |= FTRACE_FL_DIRECT;
2306 		}
2307 
2308 		/*
2309 		 * Ops calls are special, as count matters.
2310 		 * As with direct calls, they must only be enabled when count
2311 		 * is one, otherwise they'll be handled via the list ops.
2312 		 */
2313 		if (ftrace_rec_count(rec) == 1) {
2314 			if (!(rec->flags & FTRACE_FL_CALL_OPS) !=
2315 			    !(rec->flags & FTRACE_FL_CALL_OPS_EN))
2316 				flag |= FTRACE_FL_CALL_OPS;
2317 		} else if (rec->flags & FTRACE_FL_CALL_OPS_EN) {
2318 			flag |= FTRACE_FL_CALL_OPS;
2319 		}
2320 	}
2321 
2322 	/* If the state of this record hasn't changed, then do nothing */
2323 	if ((rec->flags & FTRACE_FL_ENABLED) == flag)
2324 		return FTRACE_UPDATE_IGNORE;
2325 
2326 	if (flag) {
2327 		/* Save off if rec is being enabled (for return value) */
2328 		flag ^= rec->flags & FTRACE_FL_ENABLED;
2329 
2330 		if (update) {
2331 			rec->flags |= FTRACE_FL_ENABLED | FTRACE_FL_TOUCHED;
2332 			if (flag & FTRACE_FL_REGS) {
2333 				if (rec->flags & FTRACE_FL_REGS)
2334 					rec->flags |= FTRACE_FL_REGS_EN;
2335 				else
2336 					rec->flags &= ~FTRACE_FL_REGS_EN;
2337 			}
2338 			if (flag & FTRACE_FL_TRAMP) {
2339 				if (rec->flags & FTRACE_FL_TRAMP)
2340 					rec->flags |= FTRACE_FL_TRAMP_EN;
2341 				else
2342 					rec->flags &= ~FTRACE_FL_TRAMP_EN;
2343 			}
2344 
2345 			/* Keep track of anything that modifies the function */
2346 			if (rec->flags & (FTRACE_FL_DIRECT | FTRACE_FL_IPMODIFY))
2347 				rec->flags |= FTRACE_FL_MODIFIED;
2348 
2349 			if (flag & FTRACE_FL_DIRECT) {
2350 				/*
2351 				 * If there's only one user (direct_ops helper)
2352 				 * then we can call the direct function
2353 				 * directly (no ftrace trampoline).
2354 				 */
2355 				if (ftrace_rec_count(rec) == 1) {
2356 					if (rec->flags & FTRACE_FL_DIRECT)
2357 						rec->flags |= FTRACE_FL_DIRECT_EN;
2358 					else
2359 						rec->flags &= ~FTRACE_FL_DIRECT_EN;
2360 				} else {
2361 					/*
2362 					 * Can only call directly if there's
2363 					 * only one callback to the function.
2364 					 */
2365 					rec->flags &= ~FTRACE_FL_DIRECT_EN;
2366 				}
2367 			}
2368 
2369 			if (flag & FTRACE_FL_CALL_OPS) {
2370 				if (ftrace_rec_count(rec) == 1) {
2371 					if (rec->flags & FTRACE_FL_CALL_OPS)
2372 						rec->flags |= FTRACE_FL_CALL_OPS_EN;
2373 					else
2374 						rec->flags &= ~FTRACE_FL_CALL_OPS_EN;
2375 				} else {
2376 					/*
2377 					 * Can only call directly if there's
2378 					 * only one set of associated ops.
2379 					 */
2380 					rec->flags &= ~FTRACE_FL_CALL_OPS_EN;
2381 				}
2382 			}
2383 		}
2384 
2385 		/*
2386 		 * If this record is being updated from a nop, then
2387 		 *   return UPDATE_MAKE_CALL.
2388 		 * Otherwise,
2389 		 *   return UPDATE_MODIFY_CALL to tell the caller to convert
2390 		 *   from the save regs, to a non-save regs function or
2391 		 *   vice versa, or from a trampoline call.
2392 		 */
2393 		if (flag & FTRACE_FL_ENABLED) {
2394 			ftrace_bug_type = FTRACE_BUG_CALL;
2395 			return FTRACE_UPDATE_MAKE_CALL;
2396 		}
2397 
2398 		ftrace_bug_type = FTRACE_BUG_UPDATE;
2399 		return FTRACE_UPDATE_MODIFY_CALL;
2400 	}
2401 
2402 	if (update) {
2403 		/* If there's no more users, clear all flags */
2404 		if (!ftrace_rec_count(rec))
2405 			rec->flags &= FTRACE_NOCLEAR_FLAGS;
2406 		else
2407 			/*
2408 			 * Just disable the record, but keep the ops TRAMP
2409 			 * and REGS states. The _EN flags must be disabled though.
2410 			 */
2411 			rec->flags &= ~(FTRACE_FL_ENABLED | FTRACE_FL_TRAMP_EN |
2412 					FTRACE_FL_REGS_EN | FTRACE_FL_DIRECT_EN |
2413 					FTRACE_FL_CALL_OPS_EN);
2414 	}
2415 
2416 	ftrace_bug_type = FTRACE_BUG_NOP;
2417 	return FTRACE_UPDATE_MAKE_NOP;
2418 }
2419 
2420 /**
2421  * ftrace_update_record - set a record that now is tracing or not
2422  * @rec: the record to update
2423  * @enable: set to true if the record is tracing, false to force disable
2424  *
2425  * The records that represent all functions that can be traced need
2426  * to be updated when tracing has been enabled.
2427  */
ftrace_update_record(struct dyn_ftrace * rec,bool enable)2428 int ftrace_update_record(struct dyn_ftrace *rec, bool enable)
2429 {
2430 	return ftrace_check_record(rec, enable, true);
2431 }
2432 
2433 /**
2434  * ftrace_test_record - check if the record has been enabled or not
2435  * @rec: the record to test
2436  * @enable: set to true to check if enabled, false if it is disabled
2437  *
2438  * The arch code may need to test if a record is already set to
2439  * tracing to determine how to modify the function code that it
2440  * represents.
2441  */
ftrace_test_record(struct dyn_ftrace * rec,bool enable)2442 int ftrace_test_record(struct dyn_ftrace *rec, bool enable)
2443 {
2444 	return ftrace_check_record(rec, enable, false);
2445 }
2446 
2447 static struct ftrace_ops *
ftrace_find_tramp_ops_any(struct dyn_ftrace * rec)2448 ftrace_find_tramp_ops_any(struct dyn_ftrace *rec)
2449 {
2450 	struct ftrace_ops *op;
2451 	unsigned long ip = rec->ip;
2452 
2453 	do_for_each_ftrace_op(op, ftrace_ops_list) {
2454 
2455 		if (!op->trampoline)
2456 			continue;
2457 
2458 		if (hash_contains_ip(ip, op->func_hash))
2459 			return op;
2460 	} while_for_each_ftrace_op(op);
2461 
2462 	return NULL;
2463 }
2464 
2465 static struct ftrace_ops *
ftrace_find_tramp_ops_any_other(struct dyn_ftrace * rec,struct ftrace_ops * op_exclude)2466 ftrace_find_tramp_ops_any_other(struct dyn_ftrace *rec, struct ftrace_ops *op_exclude)
2467 {
2468 	struct ftrace_ops *op;
2469 	unsigned long ip = rec->ip;
2470 
2471 	do_for_each_ftrace_op(op, ftrace_ops_list) {
2472 
2473 		if (op == op_exclude || !op->trampoline)
2474 			continue;
2475 
2476 		if (hash_contains_ip(ip, op->func_hash))
2477 			return op;
2478 	} while_for_each_ftrace_op(op);
2479 
2480 	return NULL;
2481 }
2482 
2483 static struct ftrace_ops *
ftrace_find_tramp_ops_next(struct dyn_ftrace * rec,struct ftrace_ops * op)2484 ftrace_find_tramp_ops_next(struct dyn_ftrace *rec,
2485 			   struct ftrace_ops *op)
2486 {
2487 	unsigned long ip = rec->ip;
2488 
2489 	while_for_each_ftrace_op(op) {
2490 
2491 		if (!op->trampoline)
2492 			continue;
2493 
2494 		if (hash_contains_ip(ip, op->func_hash))
2495 			return op;
2496 	}
2497 
2498 	return NULL;
2499 }
2500 
2501 static struct ftrace_ops *
ftrace_find_tramp_ops_curr(struct dyn_ftrace * rec)2502 ftrace_find_tramp_ops_curr(struct dyn_ftrace *rec)
2503 {
2504 	struct ftrace_ops *op;
2505 	unsigned long ip = rec->ip;
2506 
2507 	/*
2508 	 * Need to check removed ops first.
2509 	 * If they are being removed, and this rec has a tramp,
2510 	 * and this rec is in the ops list, then it would be the
2511 	 * one with the tramp.
2512 	 */
2513 	if (removed_ops) {
2514 		if (hash_contains_ip(ip, &removed_ops->old_hash))
2515 			return removed_ops;
2516 	}
2517 
2518 	/*
2519 	 * Need to find the current trampoline for a rec.
2520 	 * Now, a trampoline is only attached to a rec if there
2521 	 * was a single 'ops' attached to it. But this can be called
2522 	 * when we are adding another op to the rec or removing the
2523 	 * current one. Thus, if the op is being added, we can
2524 	 * ignore it because it hasn't attached itself to the rec
2525 	 * yet.
2526 	 *
2527 	 * If an ops is being modified (hooking to different functions)
2528 	 * then we don't care about the new functions that are being
2529 	 * added, just the old ones (that are probably being removed).
2530 	 *
2531 	 * If we are adding an ops to a function that already is using
2532 	 * a trampoline, it needs to be removed (trampolines are only
2533 	 * for single ops connected), then an ops that is not being
2534 	 * modified also needs to be checked.
2535 	 */
2536 	do_for_each_ftrace_op(op, ftrace_ops_list) {
2537 
2538 		if (!op->trampoline)
2539 			continue;
2540 
2541 		/*
2542 		 * If the ops is being added, it hasn't gotten to
2543 		 * the point to be removed from this tree yet.
2544 		 */
2545 		if (op->flags & FTRACE_OPS_FL_ADDING)
2546 			continue;
2547 
2548 
2549 		/*
2550 		 * If the ops is being modified and is in the old
2551 		 * hash, then it is probably being removed from this
2552 		 * function.
2553 		 */
2554 		if ((op->flags & FTRACE_OPS_FL_MODIFYING) &&
2555 		    hash_contains_ip(ip, &op->old_hash))
2556 			return op;
2557 		/*
2558 		 * If the ops is not being added or modified, and it's
2559 		 * in its normal filter hash, then this must be the one
2560 		 * we want!
2561 		 */
2562 		if (!(op->flags & FTRACE_OPS_FL_MODIFYING) &&
2563 		    hash_contains_ip(ip, op->func_hash))
2564 			return op;
2565 
2566 	} while_for_each_ftrace_op(op);
2567 
2568 	return NULL;
2569 }
2570 
2571 static struct ftrace_ops *
ftrace_find_tramp_ops_new(struct dyn_ftrace * rec)2572 ftrace_find_tramp_ops_new(struct dyn_ftrace *rec)
2573 {
2574 	struct ftrace_ops *op;
2575 	unsigned long ip = rec->ip;
2576 
2577 	do_for_each_ftrace_op(op, ftrace_ops_list) {
2578 		/* pass rec in as regs to have non-NULL val */
2579 		if (hash_contains_ip(ip, op->func_hash))
2580 			return op;
2581 	} while_for_each_ftrace_op(op);
2582 
2583 	return NULL;
2584 }
2585 
2586 struct ftrace_ops *
ftrace_find_unique_ops(struct dyn_ftrace * rec)2587 ftrace_find_unique_ops(struct dyn_ftrace *rec)
2588 {
2589 	struct ftrace_ops *op, *found = NULL;
2590 	unsigned long ip = rec->ip;
2591 
2592 	do_for_each_ftrace_op(op, ftrace_ops_list) {
2593 
2594 		if (hash_contains_ip(ip, op->func_hash)) {
2595 			if (found)
2596 				return NULL;
2597 			found = op;
2598 		}
2599 
2600 	} while_for_each_ftrace_op(op);
2601 
2602 	return found;
2603 }
2604 
2605 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
2606 /* Protected by rcu_tasks for reading, and direct_mutex for writing */
2607 static struct ftrace_hash __rcu *direct_functions = EMPTY_HASH;
2608 static DEFINE_MUTEX(direct_mutex);
2609 
2610 /*
2611  * Search the direct_functions hash to see if the given instruction pointer
2612  * has a direct caller attached to it.
2613  */
ftrace_find_rec_direct(unsigned long ip)2614 unsigned long ftrace_find_rec_direct(unsigned long ip)
2615 {
2616 	struct ftrace_func_entry *entry;
2617 
2618 	entry = __ftrace_lookup_ip(direct_functions, ip);
2619 	if (!entry)
2620 		return 0;
2621 
2622 	return entry->direct;
2623 }
2624 
call_direct_funcs(unsigned long ip,unsigned long pip,struct ftrace_ops * ops,struct ftrace_regs * fregs)2625 static void call_direct_funcs(unsigned long ip, unsigned long pip,
2626 			      struct ftrace_ops *ops, struct ftrace_regs *fregs)
2627 {
2628 	unsigned long addr = READ_ONCE(ops->direct_call);
2629 
2630 	if (!addr)
2631 		return;
2632 
2633 	arch_ftrace_set_direct_caller(fregs, addr);
2634 }
2635 #endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
2636 
2637 /**
2638  * ftrace_get_addr_new - Get the call address to set to
2639  * @rec:  The ftrace record descriptor
2640  *
2641  * If the record has the FTRACE_FL_REGS set, that means that it
2642  * wants to convert to a callback that saves all regs. If FTRACE_FL_REGS
2643  * is not set, then it wants to convert to the normal callback.
2644  *
2645  * Returns: the address of the trampoline to set to
2646  */
ftrace_get_addr_new(struct dyn_ftrace * rec)2647 unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec)
2648 {
2649 	struct ftrace_ops *ops;
2650 	unsigned long addr;
2651 
2652 	if ((rec->flags & FTRACE_FL_DIRECT) &&
2653 	    (ftrace_rec_count(rec) == 1)) {
2654 		addr = ftrace_find_rec_direct(rec->ip);
2655 		if (addr)
2656 			return addr;
2657 		WARN_ON_ONCE(1);
2658 	}
2659 
2660 	/* Trampolines take precedence over regs */
2661 	if (rec->flags & FTRACE_FL_TRAMP) {
2662 		ops = ftrace_find_tramp_ops_new(rec);
2663 		if (FTRACE_WARN_ON(!ops || !ops->trampoline)) {
2664 			pr_warn("Bad trampoline accounting at: %p (%pS) (%lx)\n",
2665 				(void *)rec->ip, (void *)rec->ip, rec->flags);
2666 			/* Ftrace is shutting down, return anything */
2667 			return (unsigned long)FTRACE_ADDR;
2668 		}
2669 		return ops->trampoline;
2670 	}
2671 
2672 	if (rec->flags & FTRACE_FL_REGS)
2673 		return (unsigned long)FTRACE_REGS_ADDR;
2674 	else
2675 		return (unsigned long)FTRACE_ADDR;
2676 }
2677 
2678 /**
2679  * ftrace_get_addr_curr - Get the call address that is already there
2680  * @rec:  The ftrace record descriptor
2681  *
2682  * The FTRACE_FL_REGS_EN is set when the record already points to
2683  * a function that saves all the regs. Basically the '_EN' version
2684  * represents the current state of the function.
2685  *
2686  * Returns: the address of the trampoline that is currently being called
2687  */
ftrace_get_addr_curr(struct dyn_ftrace * rec)2688 unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec)
2689 {
2690 	struct ftrace_ops *ops;
2691 	unsigned long addr;
2692 
2693 	/* Direct calls take precedence over trampolines */
2694 	if (rec->flags & FTRACE_FL_DIRECT_EN) {
2695 		addr = ftrace_find_rec_direct(rec->ip);
2696 		if (addr)
2697 			return addr;
2698 		WARN_ON_ONCE(1);
2699 	}
2700 
2701 	/* Trampolines take precedence over regs */
2702 	if (rec->flags & FTRACE_FL_TRAMP_EN) {
2703 		ops = ftrace_find_tramp_ops_curr(rec);
2704 		if (FTRACE_WARN_ON(!ops)) {
2705 			pr_warn("Bad trampoline accounting at: %p (%pS)\n",
2706 				(void *)rec->ip, (void *)rec->ip);
2707 			/* Ftrace is shutting down, return anything */
2708 			return (unsigned long)FTRACE_ADDR;
2709 		}
2710 		return ops->trampoline;
2711 	}
2712 
2713 	if (rec->flags & FTRACE_FL_REGS_EN)
2714 		return (unsigned long)FTRACE_REGS_ADDR;
2715 	else
2716 		return (unsigned long)FTRACE_ADDR;
2717 }
2718 
2719 static int
__ftrace_replace_code(struct dyn_ftrace * rec,bool enable)2720 __ftrace_replace_code(struct dyn_ftrace *rec, bool enable)
2721 {
2722 	unsigned long ftrace_old_addr;
2723 	unsigned long ftrace_addr;
2724 	int ret;
2725 
2726 	ftrace_addr = ftrace_get_addr_new(rec);
2727 
2728 	/* This needs to be done before we call ftrace_update_record */
2729 	ftrace_old_addr = ftrace_get_addr_curr(rec);
2730 
2731 	ret = ftrace_update_record(rec, enable);
2732 
2733 	ftrace_bug_type = FTRACE_BUG_UNKNOWN;
2734 
2735 	switch (ret) {
2736 	case FTRACE_UPDATE_IGNORE:
2737 		return 0;
2738 
2739 	case FTRACE_UPDATE_MAKE_CALL:
2740 		ftrace_bug_type = FTRACE_BUG_CALL;
2741 		return ftrace_make_call(rec, ftrace_addr);
2742 
2743 	case FTRACE_UPDATE_MAKE_NOP:
2744 		ftrace_bug_type = FTRACE_BUG_NOP;
2745 		return ftrace_make_nop(NULL, rec, ftrace_old_addr);
2746 
2747 	case FTRACE_UPDATE_MODIFY_CALL:
2748 		ftrace_bug_type = FTRACE_BUG_UPDATE;
2749 		return ftrace_modify_call(rec, ftrace_old_addr, ftrace_addr);
2750 	}
2751 
2752 	return -1; /* unknown ftrace bug */
2753 }
2754 
ftrace_replace_code(int mod_flags)2755 void __weak ftrace_replace_code(int mod_flags)
2756 {
2757 	struct dyn_ftrace *rec;
2758 	struct ftrace_page *pg;
2759 	bool enable = mod_flags & FTRACE_MODIFY_ENABLE_FL;
2760 	int schedulable = mod_flags & FTRACE_MODIFY_MAY_SLEEP_FL;
2761 	int failed;
2762 
2763 	if (unlikely(ftrace_disabled))
2764 		return;
2765 
2766 	do_for_each_ftrace_rec(pg, rec) {
2767 
2768 		if (skip_record(rec))
2769 			continue;
2770 
2771 		failed = __ftrace_replace_code(rec, enable);
2772 		if (failed) {
2773 			ftrace_bug(failed, rec);
2774 			/* Stop processing */
2775 			return;
2776 		}
2777 		if (schedulable)
2778 			cond_resched();
2779 	} while_for_each_ftrace_rec();
2780 }
2781 
2782 struct ftrace_rec_iter {
2783 	struct ftrace_page	*pg;
2784 	int			index;
2785 };
2786 
2787 /**
2788  * ftrace_rec_iter_start - start up iterating over traced functions
2789  *
2790  * Returns: an iterator handle that is used to iterate over all
2791  * the records that represent address locations where functions
2792  * are traced.
2793  *
2794  * May return NULL if no records are available.
2795  */
ftrace_rec_iter_start(void)2796 struct ftrace_rec_iter *ftrace_rec_iter_start(void)
2797 {
2798 	/*
2799 	 * We only use a single iterator.
2800 	 * Protected by the ftrace_lock mutex.
2801 	 */
2802 	static struct ftrace_rec_iter ftrace_rec_iter;
2803 	struct ftrace_rec_iter *iter = &ftrace_rec_iter;
2804 
2805 	iter->pg = ftrace_pages_start;
2806 	iter->index = 0;
2807 
2808 	/* Could have empty pages */
2809 	while (iter->pg && !iter->pg->index)
2810 		iter->pg = iter->pg->next;
2811 
2812 	if (!iter->pg)
2813 		return NULL;
2814 
2815 	return iter;
2816 }
2817 
2818 /**
2819  * ftrace_rec_iter_next - get the next record to process.
2820  * @iter: The handle to the iterator.
2821  *
2822  * Returns: the next iterator after the given iterator @iter.
2823  */
ftrace_rec_iter_next(struct ftrace_rec_iter * iter)2824 struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter)
2825 {
2826 	iter->index++;
2827 
2828 	if (iter->index >= iter->pg->index) {
2829 		iter->pg = iter->pg->next;
2830 		iter->index = 0;
2831 
2832 		/* Could have empty pages */
2833 		while (iter->pg && !iter->pg->index)
2834 			iter->pg = iter->pg->next;
2835 	}
2836 
2837 	if (!iter->pg)
2838 		return NULL;
2839 
2840 	return iter;
2841 }
2842 
2843 /**
2844  * ftrace_rec_iter_record - get the record at the iterator location
2845  * @iter: The current iterator location
2846  *
2847  * Returns: the record that the current @iter is at.
2848  */
ftrace_rec_iter_record(struct ftrace_rec_iter * iter)2849 struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter)
2850 {
2851 	return &iter->pg->records[iter->index];
2852 }
2853 
2854 static int
ftrace_nop_initialize(struct module * mod,struct dyn_ftrace * rec)2855 ftrace_nop_initialize(struct module *mod, struct dyn_ftrace *rec)
2856 {
2857 	int ret;
2858 
2859 	if (unlikely(ftrace_disabled))
2860 		return 0;
2861 
2862 	ret = ftrace_init_nop(mod, rec);
2863 	if (ret) {
2864 		ftrace_bug_type = FTRACE_BUG_INIT;
2865 		ftrace_bug(ret, rec);
2866 		return 0;
2867 	}
2868 	return 1;
2869 }
2870 
2871 /*
2872  * archs can override this function if they must do something
2873  * before the modifying code is performed.
2874  */
ftrace_arch_code_modify_prepare(void)2875 void __weak ftrace_arch_code_modify_prepare(void)
2876 {
2877 }
2878 
2879 /*
2880  * archs can override this function if they must do something
2881  * after the modifying code is performed.
2882  */
ftrace_arch_code_modify_post_process(void)2883 void __weak ftrace_arch_code_modify_post_process(void)
2884 {
2885 }
2886 
update_ftrace_func(ftrace_func_t func)2887 static int update_ftrace_func(ftrace_func_t func)
2888 {
2889 	static ftrace_func_t save_func;
2890 
2891 	/* Avoid updating if it hasn't changed */
2892 	if (func == save_func)
2893 		return 0;
2894 
2895 	save_func = func;
2896 
2897 	return ftrace_update_ftrace_func(func);
2898 }
2899 
ftrace_modify_all_code(int command)2900 void ftrace_modify_all_code(int command)
2901 {
2902 	int update = command & FTRACE_UPDATE_TRACE_FUNC;
2903 	int mod_flags = 0;
2904 	int err = 0;
2905 
2906 	if (command & FTRACE_MAY_SLEEP)
2907 		mod_flags = FTRACE_MODIFY_MAY_SLEEP_FL;
2908 
2909 	/*
2910 	 * If the ftrace_caller calls a ftrace_ops func directly,
2911 	 * we need to make sure that it only traces functions it
2912 	 * expects to trace. When doing the switch of functions,
2913 	 * we need to update to the ftrace_ops_list_func first
2914 	 * before the transition between old and new calls are set,
2915 	 * as the ftrace_ops_list_func will check the ops hashes
2916 	 * to make sure the ops are having the right functions
2917 	 * traced.
2918 	 */
2919 	if (update) {
2920 		err = update_ftrace_func(ftrace_ops_list_func);
2921 		if (FTRACE_WARN_ON(err))
2922 			return;
2923 	}
2924 
2925 	if (command & FTRACE_UPDATE_CALLS)
2926 		ftrace_replace_code(mod_flags | FTRACE_MODIFY_ENABLE_FL);
2927 	else if (command & FTRACE_DISABLE_CALLS)
2928 		ftrace_replace_code(mod_flags);
2929 
2930 	if (update && ftrace_trace_function != ftrace_ops_list_func) {
2931 		function_trace_op = set_function_trace_op;
2932 		smp_wmb();
2933 		/* If irqs are disabled, we are in stop machine */
2934 		if (!irqs_disabled())
2935 			smp_call_function(ftrace_sync_ipi, NULL, 1);
2936 		err = update_ftrace_func(ftrace_trace_function);
2937 		if (FTRACE_WARN_ON(err))
2938 			return;
2939 	}
2940 
2941 	if (command & FTRACE_START_FUNC_RET)
2942 		err = ftrace_enable_ftrace_graph_caller();
2943 	else if (command & FTRACE_STOP_FUNC_RET)
2944 		err = ftrace_disable_ftrace_graph_caller();
2945 	FTRACE_WARN_ON(err);
2946 }
2947 
__ftrace_modify_code(void * data)2948 static int __ftrace_modify_code(void *data)
2949 {
2950 	int *command = data;
2951 
2952 	ftrace_modify_all_code(*command);
2953 
2954 	return 0;
2955 }
2956 
2957 /**
2958  * ftrace_run_stop_machine - go back to the stop machine method
2959  * @command: The command to tell ftrace what to do
2960  *
2961  * If an arch needs to fall back to the stop machine method, the
2962  * it can call this function.
2963  */
ftrace_run_stop_machine(int command)2964 void ftrace_run_stop_machine(int command)
2965 {
2966 	stop_machine(__ftrace_modify_code, &command, NULL);
2967 }
2968 
2969 /**
2970  * arch_ftrace_update_code - modify the code to trace or not trace
2971  * @command: The command that needs to be done
2972  *
2973  * Archs can override this function if it does not need to
2974  * run stop_machine() to modify code.
2975  */
arch_ftrace_update_code(int command)2976 void __weak arch_ftrace_update_code(int command)
2977 {
2978 	ftrace_run_stop_machine(command);
2979 }
2980 
ftrace_run_update_code(int command)2981 static void ftrace_run_update_code(int command)
2982 {
2983 	ftrace_arch_code_modify_prepare();
2984 
2985 	/*
2986 	 * By default we use stop_machine() to modify the code.
2987 	 * But archs can do what ever they want as long as it
2988 	 * is safe. The stop_machine() is the safest, but also
2989 	 * produces the most overhead.
2990 	 */
2991 	arch_ftrace_update_code(command);
2992 
2993 	ftrace_arch_code_modify_post_process();
2994 }
2995 
ftrace_run_modify_code(struct ftrace_ops * ops,int command,struct ftrace_ops_hash * old_hash)2996 static void ftrace_run_modify_code(struct ftrace_ops *ops, int command,
2997 				   struct ftrace_ops_hash *old_hash)
2998 {
2999 	ops->flags |= FTRACE_OPS_FL_MODIFYING;
3000 	ops->old_hash.filter_hash = old_hash->filter_hash;
3001 	ops->old_hash.notrace_hash = old_hash->notrace_hash;
3002 	ftrace_run_update_code(command);
3003 	ops->old_hash.filter_hash = NULL;
3004 	ops->old_hash.notrace_hash = NULL;
3005 	ops->flags &= ~FTRACE_OPS_FL_MODIFYING;
3006 }
3007 
3008 static ftrace_func_t saved_ftrace_func;
3009 static int ftrace_start_up;
3010 
arch_ftrace_trampoline_free(struct ftrace_ops * ops)3011 void __weak arch_ftrace_trampoline_free(struct ftrace_ops *ops)
3012 {
3013 }
3014 
3015 /* List of trace_ops that have allocated trampolines */
3016 static LIST_HEAD(ftrace_ops_trampoline_list);
3017 
ftrace_add_trampoline_to_kallsyms(struct ftrace_ops * ops)3018 static void ftrace_add_trampoline_to_kallsyms(struct ftrace_ops *ops)
3019 {
3020 	lockdep_assert_held(&ftrace_lock);
3021 	list_add_rcu(&ops->list, &ftrace_ops_trampoline_list);
3022 }
3023 
ftrace_remove_trampoline_from_kallsyms(struct ftrace_ops * ops)3024 static void ftrace_remove_trampoline_from_kallsyms(struct ftrace_ops *ops)
3025 {
3026 	lockdep_assert_held(&ftrace_lock);
3027 	list_del_rcu(&ops->list);
3028 	synchronize_rcu();
3029 }
3030 
3031 /*
3032  * "__builtin__ftrace" is used as a module name in /proc/kallsyms for symbols
3033  * for pages allocated for ftrace purposes, even though "__builtin__ftrace" is
3034  * not a module.
3035  */
3036 #define FTRACE_TRAMPOLINE_MOD "__builtin__ftrace"
3037 #define FTRACE_TRAMPOLINE_SYM "ftrace_trampoline"
3038 
ftrace_trampoline_free(struct ftrace_ops * ops)3039 static void ftrace_trampoline_free(struct ftrace_ops *ops)
3040 {
3041 	if (ops && (ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP) &&
3042 	    ops->trampoline) {
3043 		/*
3044 		 * Record the text poke event before the ksymbol unregister
3045 		 * event.
3046 		 */
3047 		perf_event_text_poke((void *)ops->trampoline,
3048 				     (void *)ops->trampoline,
3049 				     ops->trampoline_size, NULL, 0);
3050 		perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_OOL,
3051 				   ops->trampoline, ops->trampoline_size,
3052 				   true, FTRACE_TRAMPOLINE_SYM);
3053 		/* Remove from kallsyms after the perf events */
3054 		ftrace_remove_trampoline_from_kallsyms(ops);
3055 	}
3056 
3057 	arch_ftrace_trampoline_free(ops);
3058 }
3059 
ftrace_startup_enable(int command)3060 static void ftrace_startup_enable(int command)
3061 {
3062 	if (saved_ftrace_func != ftrace_trace_function) {
3063 		saved_ftrace_func = ftrace_trace_function;
3064 		command |= FTRACE_UPDATE_TRACE_FUNC;
3065 	}
3066 
3067 	if (!command || !ftrace_enabled)
3068 		return;
3069 
3070 	ftrace_run_update_code(command);
3071 }
3072 
ftrace_startup_all(int command)3073 static void ftrace_startup_all(int command)
3074 {
3075 	update_all_ops = true;
3076 	ftrace_startup_enable(command);
3077 	update_all_ops = false;
3078 }
3079 
ftrace_startup(struct ftrace_ops * ops,int command)3080 int ftrace_startup(struct ftrace_ops *ops, int command)
3081 {
3082 	int ret;
3083 
3084 	if (unlikely(ftrace_disabled))
3085 		return -ENODEV;
3086 
3087 	ret = __register_ftrace_function(ops);
3088 	if (ret)
3089 		return ret;
3090 
3091 	ftrace_start_up++;
3092 
3093 	/*
3094 	 * Note that ftrace probes uses this to start up
3095 	 * and modify functions it will probe. But we still
3096 	 * set the ADDING flag for modification, as probes
3097 	 * do not have trampolines. If they add them in the
3098 	 * future, then the probes will need to distinguish
3099 	 * between adding and updating probes.
3100 	 */
3101 	ops->flags |= FTRACE_OPS_FL_ENABLED | FTRACE_OPS_FL_ADDING;
3102 
3103 	ret = ftrace_hash_ipmodify_enable(ops);
3104 	if (ret < 0) {
3105 		/* Rollback registration process */
3106 		__unregister_ftrace_function(ops);
3107 		ftrace_start_up--;
3108 		ops->flags &= ~FTRACE_OPS_FL_ENABLED;
3109 		if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
3110 			ftrace_trampoline_free(ops);
3111 		return ret;
3112 	}
3113 
3114 	if (ftrace_hash_rec_enable(ops))
3115 		command |= FTRACE_UPDATE_CALLS;
3116 
3117 	ftrace_startup_enable(command);
3118 
3119 	/*
3120 	 * If ftrace is in an undefined state, we just remove ops from list
3121 	 * to prevent the NULL pointer, instead of totally rolling it back and
3122 	 * free trampoline, because those actions could cause further damage.
3123 	 */
3124 	if (unlikely(ftrace_disabled)) {
3125 		__unregister_ftrace_function(ops);
3126 		return -ENODEV;
3127 	}
3128 
3129 	ops->flags &= ~FTRACE_OPS_FL_ADDING;
3130 
3131 	return 0;
3132 }
3133 
ftrace_shutdown(struct ftrace_ops * ops,int command)3134 int ftrace_shutdown(struct ftrace_ops *ops, int command)
3135 {
3136 	int ret;
3137 
3138 	if (unlikely(ftrace_disabled))
3139 		return -ENODEV;
3140 
3141 	ret = __unregister_ftrace_function(ops);
3142 	if (ret)
3143 		return ret;
3144 
3145 	ftrace_start_up--;
3146 	/*
3147 	 * Just warn in case of unbalance, no need to kill ftrace, it's not
3148 	 * critical but the ftrace_call callers may be never nopped again after
3149 	 * further ftrace uses.
3150 	 */
3151 	WARN_ON_ONCE(ftrace_start_up < 0);
3152 
3153 	/* Disabling ipmodify never fails */
3154 	ftrace_hash_ipmodify_disable(ops);
3155 
3156 	if (ftrace_hash_rec_disable(ops))
3157 		command |= FTRACE_UPDATE_CALLS;
3158 
3159 	ops->flags &= ~FTRACE_OPS_FL_ENABLED;
3160 
3161 	if (saved_ftrace_func != ftrace_trace_function) {
3162 		saved_ftrace_func = ftrace_trace_function;
3163 		command |= FTRACE_UPDATE_TRACE_FUNC;
3164 	}
3165 
3166 	if (!command || !ftrace_enabled)
3167 		goto out;
3168 
3169 	/*
3170 	 * If the ops uses a trampoline, then it needs to be
3171 	 * tested first on update.
3172 	 */
3173 	ops->flags |= FTRACE_OPS_FL_REMOVING;
3174 	removed_ops = ops;
3175 
3176 	/* The trampoline logic checks the old hashes */
3177 	ops->old_hash.filter_hash = ops->func_hash->filter_hash;
3178 	ops->old_hash.notrace_hash = ops->func_hash->notrace_hash;
3179 
3180 	ftrace_run_update_code(command);
3181 
3182 	/*
3183 	 * If there's no more ops registered with ftrace, run a
3184 	 * sanity check to make sure all rec flags are cleared.
3185 	 */
3186 	if (rcu_dereference_protected(ftrace_ops_list,
3187 			lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) {
3188 		struct ftrace_page *pg;
3189 		struct dyn_ftrace *rec;
3190 
3191 		do_for_each_ftrace_rec(pg, rec) {
3192 			if (FTRACE_WARN_ON_ONCE(rec->flags & ~FTRACE_NOCLEAR_FLAGS))
3193 				pr_warn("  %pS flags:%lx\n",
3194 					(void *)rec->ip, rec->flags);
3195 		} while_for_each_ftrace_rec();
3196 	}
3197 
3198 	ops->old_hash.filter_hash = NULL;
3199 	ops->old_hash.notrace_hash = NULL;
3200 
3201 	removed_ops = NULL;
3202 	ops->flags &= ~FTRACE_OPS_FL_REMOVING;
3203 
3204 out:
3205 	/*
3206 	 * Dynamic ops may be freed, we must make sure that all
3207 	 * callers are done before leaving this function.
3208 	 */
3209 	if (ops->flags & FTRACE_OPS_FL_DYNAMIC) {
3210 		/*
3211 		 * We need to do a hard force of sched synchronization.
3212 		 * This is because we use preempt_disable() to do RCU, but
3213 		 * the function tracers can be called where RCU is not watching
3214 		 * (like before user_exit()). We can not rely on the RCU
3215 		 * infrastructure to do the synchronization, thus we must do it
3216 		 * ourselves.
3217 		 */
3218 		synchronize_rcu_tasks_rude();
3219 
3220 		/*
3221 		 * When the kernel is preemptive, tasks can be preempted
3222 		 * while on a ftrace trampoline. Just scheduling a task on
3223 		 * a CPU is not good enough to flush them. Calling
3224 		 * synchronize_rcu_tasks() will wait for those tasks to
3225 		 * execute and either schedule voluntarily or enter user space.
3226 		 */
3227 		synchronize_rcu_tasks();
3228 
3229 		ftrace_trampoline_free(ops);
3230 	}
3231 
3232 	return 0;
3233 }
3234 
3235 /* Simply make a copy of @src and return it */
copy_hash(struct ftrace_hash * src)3236 static struct ftrace_hash *copy_hash(struct ftrace_hash *src)
3237 {
3238 	if (ftrace_hash_empty(src))
3239 		return EMPTY_HASH;
3240 
3241 	return alloc_and_copy_ftrace_hash(src->size_bits, src);
3242 }
3243 
3244 /*
3245  * Append @new_hash entries to @hash:
3246  *
3247  *  If @hash is the EMPTY_HASH then it traces all functions and nothing
3248  *  needs to be done.
3249  *
3250  *  If @new_hash is the EMPTY_HASH, then make *hash the EMPTY_HASH so
3251  *  that it traces everything.
3252  *
3253  *  Otherwise, go through all of @new_hash and add anything that @hash
3254  *  doesn't already have, to @hash.
3255  *
3256  *  The filter_hash updates uses just the append_hash() function
3257  *  and the notrace_hash does not.
3258  */
append_hash(struct ftrace_hash ** hash,struct ftrace_hash * new_hash,int size_bits)3259 static int append_hash(struct ftrace_hash **hash, struct ftrace_hash *new_hash,
3260 		       int size_bits)
3261 {
3262 	struct ftrace_func_entry *entry;
3263 	int size;
3264 	int i;
3265 
3266 	if (*hash) {
3267 		/* An empty hash does everything */
3268 		if (ftrace_hash_empty(*hash))
3269 			return 0;
3270 	} else {
3271 		*hash = alloc_ftrace_hash(size_bits);
3272 		if (!*hash)
3273 			return -ENOMEM;
3274 	}
3275 
3276 	/* If new_hash has everything make hash have everything */
3277 	if (ftrace_hash_empty(new_hash)) {
3278 		free_ftrace_hash(*hash);
3279 		*hash = EMPTY_HASH;
3280 		return 0;
3281 	}
3282 
3283 	size = 1 << new_hash->size_bits;
3284 	for (i = 0; i < size; i++) {
3285 		hlist_for_each_entry(entry, &new_hash->buckets[i], hlist) {
3286 			/* Only add if not already in hash */
3287 			if (!__ftrace_lookup_ip(*hash, entry->ip) &&
3288 			    add_hash_entry(*hash, entry->ip) == NULL)
3289 				return -ENOMEM;
3290 		}
3291 	}
3292 	return 0;
3293 }
3294 
3295 /*
3296  * Remove functions from @hash that are in @notrace_hash
3297  */
remove_hash(struct ftrace_hash * hash,struct ftrace_hash * notrace_hash)3298 static void remove_hash(struct ftrace_hash *hash, struct ftrace_hash *notrace_hash)
3299 {
3300 	struct ftrace_func_entry *entry;
3301 	struct hlist_node *tmp;
3302 	int size;
3303 	int i;
3304 
3305 	/* If the notrace hash is empty, there's nothing to do */
3306 	if (ftrace_hash_empty(notrace_hash))
3307 		return;
3308 
3309 	size = 1 << hash->size_bits;
3310 	for (i = 0; i < size; i++) {
3311 		hlist_for_each_entry_safe(entry, tmp, &hash->buckets[i], hlist) {
3312 			if (!__ftrace_lookup_ip(notrace_hash, entry->ip))
3313 				continue;
3314 			remove_hash_entry(hash, entry);
3315 			kfree(entry);
3316 		}
3317 	}
3318 }
3319 
3320 /*
3321  * Add to @hash only those that are in both @new_hash1 and @new_hash2
3322  *
3323  * The notrace_hash updates uses just the intersect_hash() function
3324  * and the filter_hash does not.
3325  */
intersect_hash(struct ftrace_hash ** hash,struct ftrace_hash * new_hash1,struct ftrace_hash * new_hash2)3326 static int intersect_hash(struct ftrace_hash **hash, struct ftrace_hash *new_hash1,
3327 			  struct ftrace_hash *new_hash2)
3328 {
3329 	struct ftrace_func_entry *entry;
3330 	int size;
3331 	int i;
3332 
3333 	/*
3334 	 * If new_hash1 or new_hash2 is the EMPTY_HASH then make the hash
3335 	 * empty as well as empty for notrace means none are notraced.
3336 	 */
3337 	if (ftrace_hash_empty(new_hash1) || ftrace_hash_empty(new_hash2)) {
3338 		free_ftrace_hash(*hash);
3339 		*hash = EMPTY_HASH;
3340 		return 0;
3341 	}
3342 
3343 	size = 1 << new_hash1->size_bits;
3344 	for (i = 0; i < size; i++) {
3345 		hlist_for_each_entry(entry, &new_hash1->buckets[i], hlist) {
3346 			/* Only add if in both @new_hash1 and @new_hash2 */
3347 			if (__ftrace_lookup_ip(new_hash2, entry->ip) &&
3348 			    add_hash_entry(*hash, entry->ip) == NULL)
3349 				return -ENOMEM;
3350 		}
3351 	}
3352 	/* If nothing intersects, make it the empty set */
3353 	if (ftrace_hash_empty(*hash)) {
3354 		free_ftrace_hash(*hash);
3355 		*hash = EMPTY_HASH;
3356 	}
3357 	return 0;
3358 }
3359 
ops_equal(struct ftrace_hash * A,struct ftrace_hash * B)3360 static bool ops_equal(struct ftrace_hash *A, struct ftrace_hash *B)
3361 {
3362 	struct ftrace_func_entry *entry;
3363 	int size;
3364 	int i;
3365 
3366 	if (ftrace_hash_empty(A))
3367 		return ftrace_hash_empty(B);
3368 
3369 	if (ftrace_hash_empty(B))
3370 		return ftrace_hash_empty(A);
3371 
3372 	if (A->count != B->count)
3373 		return false;
3374 
3375 	size = 1 << A->size_bits;
3376 	for (i = 0; i < size; i++) {
3377 		hlist_for_each_entry(entry, &A->buckets[i], hlist) {
3378 			if (!__ftrace_lookup_ip(B, entry->ip))
3379 				return false;
3380 		}
3381 	}
3382 
3383 	return true;
3384 }
3385 
3386 static void ftrace_ops_update_code(struct ftrace_ops *ops,
3387 				   struct ftrace_ops_hash *old_hash);
3388 
__ftrace_hash_move_and_update_ops(struct ftrace_ops * ops,struct ftrace_hash ** orig_hash,struct ftrace_hash * hash,int enable)3389 static int __ftrace_hash_move_and_update_ops(struct ftrace_ops *ops,
3390 					     struct ftrace_hash **orig_hash,
3391 					     struct ftrace_hash *hash,
3392 					     int enable)
3393 {
3394 	struct ftrace_ops_hash old_hash_ops;
3395 	struct ftrace_hash *old_hash;
3396 	int ret;
3397 
3398 	old_hash = *orig_hash;
3399 	old_hash_ops.filter_hash = ops->func_hash->filter_hash;
3400 	old_hash_ops.notrace_hash = ops->func_hash->notrace_hash;
3401 	ret = ftrace_hash_move(ops, enable, orig_hash, hash);
3402 	if (!ret) {
3403 		ftrace_ops_update_code(ops, &old_hash_ops);
3404 		free_ftrace_hash_rcu(old_hash);
3405 	}
3406 	return ret;
3407 }
3408 
ftrace_update_ops(struct ftrace_ops * ops,struct ftrace_hash * filter_hash,struct ftrace_hash * notrace_hash)3409 static int ftrace_update_ops(struct ftrace_ops *ops, struct ftrace_hash *filter_hash,
3410 			     struct ftrace_hash *notrace_hash)
3411 {
3412 	int ret;
3413 
3414 	if (!ops_equal(filter_hash, ops->func_hash->filter_hash)) {
3415 		ret = __ftrace_hash_move_and_update_ops(ops, &ops->func_hash->filter_hash,
3416 							filter_hash, 1);
3417 		if (ret < 0)
3418 			return ret;
3419 	}
3420 
3421 	if (!ops_equal(notrace_hash, ops->func_hash->notrace_hash)) {
3422 		ret = __ftrace_hash_move_and_update_ops(ops, &ops->func_hash->notrace_hash,
3423 							notrace_hash, 0);
3424 		if (ret < 0)
3425 			return ret;
3426 	}
3427 
3428 	return 0;
3429 }
3430 
add_first_hash(struct ftrace_hash ** filter_hash,struct ftrace_hash ** notrace_hash,struct ftrace_ops_hash * func_hash)3431 static int add_first_hash(struct ftrace_hash **filter_hash, struct ftrace_hash **notrace_hash,
3432 			  struct ftrace_ops_hash *func_hash)
3433 {
3434 	/* If the filter hash is not empty, simply remove the nohash from it */
3435 	if (!ftrace_hash_empty(func_hash->filter_hash)) {
3436 		*filter_hash = copy_hash(func_hash->filter_hash);
3437 		if (!*filter_hash)
3438 			return -ENOMEM;
3439 		remove_hash(*filter_hash, func_hash->notrace_hash);
3440 		*notrace_hash = EMPTY_HASH;
3441 
3442 	} else {
3443 		*notrace_hash = copy_hash(func_hash->notrace_hash);
3444 		if (!*notrace_hash)
3445 			return -ENOMEM;
3446 		*filter_hash = EMPTY_HASH;
3447 	}
3448 	return 0;
3449 }
3450 
add_next_hash(struct ftrace_hash ** filter_hash,struct ftrace_hash ** notrace_hash,struct ftrace_ops_hash * ops_hash,struct ftrace_ops_hash * subops_hash)3451 static int add_next_hash(struct ftrace_hash **filter_hash, struct ftrace_hash **notrace_hash,
3452 			 struct ftrace_ops_hash *ops_hash, struct ftrace_ops_hash *subops_hash)
3453 {
3454 	int size_bits;
3455 	int ret;
3456 
3457 	/* If the subops trace all functions so must the main ops */
3458 	if (ftrace_hash_empty(ops_hash->filter_hash) ||
3459 	    ftrace_hash_empty(subops_hash->filter_hash)) {
3460 		*filter_hash = EMPTY_HASH;
3461 	} else {
3462 		/*
3463 		 * The main ops filter hash is not empty, so its
3464 		 * notrace_hash had better be, as the notrace hash
3465 		 * is only used for empty main filter hashes.
3466 		 */
3467 		WARN_ON_ONCE(!ftrace_hash_empty(ops_hash->notrace_hash));
3468 
3469 		size_bits = max(ops_hash->filter_hash->size_bits,
3470 				subops_hash->filter_hash->size_bits);
3471 
3472 		/* Copy the subops hash */
3473 		*filter_hash = alloc_and_copy_ftrace_hash(size_bits, subops_hash->filter_hash);
3474 		if (!*filter_hash)
3475 			return -ENOMEM;
3476 		/* Remove any notrace functions from the copy */
3477 		remove_hash(*filter_hash, subops_hash->notrace_hash);
3478 
3479 		ret = append_hash(filter_hash, ops_hash->filter_hash,
3480 				  size_bits);
3481 		if (ret < 0) {
3482 			free_ftrace_hash(*filter_hash);
3483 			*filter_hash = EMPTY_HASH;
3484 			return ret;
3485 		}
3486 	}
3487 
3488 	/*
3489 	 * Only process notrace hashes if the main filter hash is empty
3490 	 * (tracing all functions), otherwise the filter hash will just
3491 	 * remove the notrace hash functions, and the notrace hash is
3492 	 * not needed.
3493 	 */
3494 	if (ftrace_hash_empty(*filter_hash)) {
3495 		/*
3496 		 * Intersect the notrace functions. That is, if two
3497 		 * subops are not tracing a set of functions, the
3498 		 * main ops will only not trace the functions that are
3499 		 * in both subops, but has to trace the functions that
3500 		 * are only notrace in one of the subops, for the other
3501 		 * subops to be able to trace them.
3502 		 */
3503 		size_bits = max(ops_hash->notrace_hash->size_bits,
3504 				subops_hash->notrace_hash->size_bits);
3505 		*notrace_hash = alloc_ftrace_hash(size_bits);
3506 		if (!*notrace_hash)
3507 			return -ENOMEM;
3508 
3509 		ret = intersect_hash(notrace_hash, ops_hash->notrace_hash,
3510 				     subops_hash->notrace_hash);
3511 		if (ret < 0) {
3512 			free_ftrace_hash(*notrace_hash);
3513 			*notrace_hash = EMPTY_HASH;
3514 			return ret;
3515 		}
3516 	}
3517 	return 0;
3518 }
3519 
3520 /**
3521  * ftrace_startup_subops - enable tracing for subops of an ops
3522  * @ops: Manager ops (used to pick all the functions of its subops)
3523  * @subops: A new ops to add to @ops
3524  * @command: Extra commands to use to enable tracing
3525  *
3526  * The @ops is a manager @ops that has the filter that includes all the functions
3527  * that its list of subops are tracing. Adding a new @subops will add the
3528  * functions of @subops to @ops.
3529  */
ftrace_startup_subops(struct ftrace_ops * ops,struct ftrace_ops * subops,int command)3530 int ftrace_startup_subops(struct ftrace_ops *ops, struct ftrace_ops *subops, int command)
3531 {
3532 	struct ftrace_hash *filter_hash = EMPTY_HASH;
3533 	struct ftrace_hash *notrace_hash = EMPTY_HASH;
3534 	struct ftrace_hash *save_filter_hash;
3535 	struct ftrace_hash *save_notrace_hash;
3536 	int ret;
3537 
3538 	if (unlikely(ftrace_disabled))
3539 		return -ENODEV;
3540 
3541 	ftrace_ops_init(ops);
3542 	ftrace_ops_init(subops);
3543 
3544 	if (WARN_ON_ONCE(subops->flags & FTRACE_OPS_FL_ENABLED))
3545 		return -EBUSY;
3546 
3547 	/* Make everything canonical (Just in case!) */
3548 	if (!ops->func_hash->filter_hash)
3549 		ops->func_hash->filter_hash = EMPTY_HASH;
3550 	if (!ops->func_hash->notrace_hash)
3551 		ops->func_hash->notrace_hash = EMPTY_HASH;
3552 	if (!subops->func_hash->filter_hash)
3553 		subops->func_hash->filter_hash = EMPTY_HASH;
3554 	if (!subops->func_hash->notrace_hash)
3555 		subops->func_hash->notrace_hash = EMPTY_HASH;
3556 
3557 	/* For the first subops to ops just enable it normally */
3558 	if (list_empty(&ops->subop_list)) {
3559 
3560 		/* The ops was empty, should have empty hashes */
3561 		WARN_ON_ONCE(!ftrace_hash_empty(ops->func_hash->filter_hash));
3562 		WARN_ON_ONCE(!ftrace_hash_empty(ops->func_hash->notrace_hash));
3563 
3564 		ret = add_first_hash(&filter_hash, &notrace_hash, subops->func_hash);
3565 		if (ret < 0)
3566 			return ret;
3567 
3568 		save_filter_hash = ops->func_hash->filter_hash;
3569 		save_notrace_hash = ops->func_hash->notrace_hash;
3570 
3571 		ops->func_hash->filter_hash = filter_hash;
3572 		ops->func_hash->notrace_hash = notrace_hash;
3573 		list_add(&subops->list, &ops->subop_list);
3574 		ret = ftrace_startup(ops, command);
3575 		if (ret < 0) {
3576 			list_del(&subops->list);
3577 			ops->func_hash->filter_hash = save_filter_hash;
3578 			ops->func_hash->notrace_hash = save_notrace_hash;
3579 			free_ftrace_hash(filter_hash);
3580 			free_ftrace_hash(notrace_hash);
3581 		} else {
3582 			free_ftrace_hash(save_filter_hash);
3583 			free_ftrace_hash(save_notrace_hash);
3584 			subops->flags |= FTRACE_OPS_FL_ENABLED | FTRACE_OPS_FL_SUBOP;
3585 			subops->managed = ops;
3586 		}
3587 		return ret;
3588 	}
3589 
3590 	/*
3591 	 * Here there's already something attached. Here are the rules:
3592 	 *   If the new subops and main ops filter hashes are not empty:
3593 	 *     o Make a copy of the subops filter hash
3594 	 *     o Remove all functions in the nohash from it.
3595 	 *     o Add in the main hash filter functions
3596 	 *     o Remove any of these functions from the main notrace hash
3597 	 */
3598 
3599 	ret = add_next_hash(&filter_hash, &notrace_hash, ops->func_hash, subops->func_hash);
3600 	if (ret < 0)
3601 		return ret;
3602 
3603 	list_add(&subops->list, &ops->subop_list);
3604 
3605 	ret = ftrace_update_ops(ops, filter_hash, notrace_hash);
3606 	free_ftrace_hash(filter_hash);
3607 	free_ftrace_hash(notrace_hash);
3608 	if (ret < 0) {
3609 		list_del(&subops->list);
3610 	} else {
3611 		subops->flags |= FTRACE_OPS_FL_ENABLED | FTRACE_OPS_FL_SUBOP;
3612 		subops->managed = ops;
3613 	}
3614 	return ret;
3615 }
3616 
rebuild_hashes(struct ftrace_hash ** filter_hash,struct ftrace_hash ** notrace_hash,struct ftrace_ops * ops)3617 static int rebuild_hashes(struct ftrace_hash **filter_hash, struct ftrace_hash **notrace_hash,
3618 			  struct ftrace_ops *ops)
3619 {
3620 	struct ftrace_ops_hash temp_hash;
3621 	struct ftrace_ops *subops;
3622 	bool first = true;
3623 	int ret;
3624 
3625 	temp_hash.filter_hash = EMPTY_HASH;
3626 	temp_hash.notrace_hash = EMPTY_HASH;
3627 
3628 	list_for_each_entry(subops, &ops->subop_list, list) {
3629 		*filter_hash = EMPTY_HASH;
3630 		*notrace_hash = EMPTY_HASH;
3631 
3632 		if (first) {
3633 			ret = add_first_hash(filter_hash, notrace_hash, subops->func_hash);
3634 			if (ret < 0)
3635 				return ret;
3636 			first = false;
3637 		} else {
3638 			ret = add_next_hash(filter_hash, notrace_hash,
3639 					    &temp_hash, subops->func_hash);
3640 			if (ret < 0) {
3641 				free_ftrace_hash(temp_hash.filter_hash);
3642 				free_ftrace_hash(temp_hash.notrace_hash);
3643 				return ret;
3644 			}
3645 		}
3646 
3647 		free_ftrace_hash(temp_hash.filter_hash);
3648 		free_ftrace_hash(temp_hash.notrace_hash);
3649 
3650 		temp_hash.filter_hash = *filter_hash;
3651 		temp_hash.notrace_hash = *notrace_hash;
3652 	}
3653 	return 0;
3654 }
3655 
3656 /**
3657  * ftrace_shutdown_subops - Remove a subops from a manager ops
3658  * @ops: A manager ops to remove @subops from
3659  * @subops: The subops to remove from @ops
3660  * @command: Any extra command flags to add to modifying the text
3661  *
3662  * Removes the functions being traced by the @subops from @ops. Note, it
3663  * will not affect functions that are being traced by other subops that
3664  * still exist in @ops.
3665  *
3666  * If the last subops is removed from @ops, then @ops is shutdown normally.
3667  */
ftrace_shutdown_subops(struct ftrace_ops * ops,struct ftrace_ops * subops,int command)3668 int ftrace_shutdown_subops(struct ftrace_ops *ops, struct ftrace_ops *subops, int command)
3669 {
3670 	struct ftrace_hash *filter_hash = EMPTY_HASH;
3671 	struct ftrace_hash *notrace_hash = EMPTY_HASH;
3672 	int ret;
3673 
3674 	if (unlikely(ftrace_disabled))
3675 		return -ENODEV;
3676 
3677 	if (WARN_ON_ONCE(!(subops->flags & FTRACE_OPS_FL_ENABLED)))
3678 		return -EINVAL;
3679 
3680 	list_del(&subops->list);
3681 
3682 	if (list_empty(&ops->subop_list)) {
3683 		/* Last one, just disable the current ops */
3684 
3685 		ret = ftrace_shutdown(ops, command);
3686 		if (ret < 0) {
3687 			list_add(&subops->list, &ops->subop_list);
3688 			return ret;
3689 		}
3690 
3691 		subops->flags &= ~FTRACE_OPS_FL_ENABLED;
3692 
3693 		free_ftrace_hash(ops->func_hash->filter_hash);
3694 		free_ftrace_hash(ops->func_hash->notrace_hash);
3695 		ops->func_hash->filter_hash = EMPTY_HASH;
3696 		ops->func_hash->notrace_hash = EMPTY_HASH;
3697 		subops->flags &= ~(FTRACE_OPS_FL_ENABLED | FTRACE_OPS_FL_SUBOP);
3698 		subops->managed = NULL;
3699 
3700 		return 0;
3701 	}
3702 
3703 	/* Rebuild the hashes without subops */
3704 	ret = rebuild_hashes(&filter_hash, &notrace_hash, ops);
3705 	if (ret < 0)
3706 		return ret;
3707 
3708 	ret = ftrace_update_ops(ops, filter_hash, notrace_hash);
3709 	if (ret < 0) {
3710 		list_add(&subops->list, &ops->subop_list);
3711 	} else {
3712 		subops->flags &= ~(FTRACE_OPS_FL_ENABLED | FTRACE_OPS_FL_SUBOP);
3713 		subops->managed = NULL;
3714 	}
3715 	free_ftrace_hash(filter_hash);
3716 	free_ftrace_hash(notrace_hash);
3717 	return ret;
3718 }
3719 
ftrace_hash_move_and_update_subops(struct ftrace_ops * subops,struct ftrace_hash ** orig_subhash,struct ftrace_hash * hash)3720 static int ftrace_hash_move_and_update_subops(struct ftrace_ops *subops,
3721 					      struct ftrace_hash **orig_subhash,
3722 					      struct ftrace_hash *hash)
3723 {
3724 	struct ftrace_ops *ops = subops->managed;
3725 	struct ftrace_hash *notrace_hash;
3726 	struct ftrace_hash *filter_hash;
3727 	struct ftrace_hash *save_hash;
3728 	struct ftrace_hash *new_hash;
3729 	int ret;
3730 
3731 	/* Manager ops can not be subops (yet) */
3732 	if (WARN_ON_ONCE(!ops || ops->flags & FTRACE_OPS_FL_SUBOP))
3733 		return -EINVAL;
3734 
3735 	/* Move the new hash over to the subops hash */
3736 	save_hash = *orig_subhash;
3737 	*orig_subhash = __ftrace_hash_move(hash);
3738 	if (!*orig_subhash) {
3739 		*orig_subhash = save_hash;
3740 		return -ENOMEM;
3741 	}
3742 
3743 	ret = rebuild_hashes(&filter_hash, &notrace_hash, ops);
3744 	if (!ret) {
3745 		ret = ftrace_update_ops(ops, filter_hash, notrace_hash);
3746 		free_ftrace_hash(filter_hash);
3747 		free_ftrace_hash(notrace_hash);
3748 	}
3749 
3750 	if (ret) {
3751 		/* Put back the original hash */
3752 		new_hash = *orig_subhash;
3753 		*orig_subhash = save_hash;
3754 		free_ftrace_hash_rcu(new_hash);
3755 	} else {
3756 		free_ftrace_hash_rcu(save_hash);
3757 	}
3758 	return ret;
3759 }
3760 
3761 
3762 u64			ftrace_update_time;
3763 u64			ftrace_total_mod_time;
3764 unsigned long		ftrace_update_tot_cnt;
3765 unsigned long		ftrace_number_of_pages;
3766 unsigned long		ftrace_number_of_groups;
3767 
ops_traces_mod(struct ftrace_ops * ops)3768 static inline int ops_traces_mod(struct ftrace_ops *ops)
3769 {
3770 	/*
3771 	 * Filter_hash being empty will default to trace module.
3772 	 * But notrace hash requires a test of individual module functions.
3773 	 */
3774 	return ftrace_hash_empty(ops->func_hash->filter_hash) &&
3775 		ftrace_hash_empty(ops->func_hash->notrace_hash);
3776 }
3777 
ftrace_update_code(struct module * mod,struct ftrace_page * new_pgs)3778 static int ftrace_update_code(struct module *mod, struct ftrace_page *new_pgs)
3779 {
3780 	bool init_nop = ftrace_need_init_nop();
3781 	struct ftrace_page *pg;
3782 	struct dyn_ftrace *p;
3783 	u64 start, stop, update_time;
3784 	unsigned long update_cnt = 0;
3785 	unsigned long rec_flags = 0;
3786 	int i;
3787 
3788 	start = ftrace_now(raw_smp_processor_id());
3789 
3790 	/*
3791 	 * When a module is loaded, this function is called to convert
3792 	 * the calls to mcount in its text to nops, and also to create
3793 	 * an entry in the ftrace data. Now, if ftrace is activated
3794 	 * after this call, but before the module sets its text to
3795 	 * read-only, the modification of enabling ftrace can fail if
3796 	 * the read-only is done while ftrace is converting the calls.
3797 	 * To prevent this, the module's records are set as disabled
3798 	 * and will be enabled after the call to set the module's text
3799 	 * to read-only.
3800 	 */
3801 	if (mod)
3802 		rec_flags |= FTRACE_FL_DISABLED;
3803 
3804 	for (pg = new_pgs; pg; pg = pg->next) {
3805 
3806 		for (i = 0; i < pg->index; i++) {
3807 
3808 			/* If something went wrong, bail without enabling anything */
3809 			if (unlikely(ftrace_disabled))
3810 				return -1;
3811 
3812 			p = &pg->records[i];
3813 			p->flags = rec_flags;
3814 
3815 			/*
3816 			 * Do the initial record conversion from mcount jump
3817 			 * to the NOP instructions.
3818 			 */
3819 			if (init_nop && !ftrace_nop_initialize(mod, p))
3820 				break;
3821 
3822 			update_cnt++;
3823 		}
3824 	}
3825 
3826 	stop = ftrace_now(raw_smp_processor_id());
3827 	update_time = stop - start;
3828 	if (mod)
3829 		ftrace_total_mod_time += update_time;
3830 	else
3831 		ftrace_update_time = update_time;
3832 	ftrace_update_tot_cnt += update_cnt;
3833 
3834 	return 0;
3835 }
3836 
ftrace_allocate_records(struct ftrace_page * pg,int count)3837 static int ftrace_allocate_records(struct ftrace_page *pg, int count)
3838 {
3839 	int order;
3840 	int pages;
3841 	int cnt;
3842 
3843 	if (WARN_ON(!count))
3844 		return -EINVAL;
3845 
3846 	/* We want to fill as much as possible, with no empty pages */
3847 	pages = DIV_ROUND_UP(count, ENTRIES_PER_PAGE);
3848 	order = fls(pages) - 1;
3849 
3850  again:
3851 	pg->records = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
3852 
3853 	if (!pg->records) {
3854 		/* if we can't allocate this size, try something smaller */
3855 		if (!order)
3856 			return -ENOMEM;
3857 		order--;
3858 		goto again;
3859 	}
3860 
3861 	ftrace_number_of_pages += 1 << order;
3862 	ftrace_number_of_groups++;
3863 
3864 	cnt = (PAGE_SIZE << order) / ENTRY_SIZE;
3865 	pg->order = order;
3866 
3867 	if (cnt > count)
3868 		cnt = count;
3869 
3870 	return cnt;
3871 }
3872 
ftrace_free_pages(struct ftrace_page * pages)3873 static void ftrace_free_pages(struct ftrace_page *pages)
3874 {
3875 	struct ftrace_page *pg = pages;
3876 
3877 	while (pg) {
3878 		if (pg->records) {
3879 			free_pages((unsigned long)pg->records, pg->order);
3880 			ftrace_number_of_pages -= 1 << pg->order;
3881 		}
3882 		pages = pg->next;
3883 		kfree(pg);
3884 		pg = pages;
3885 		ftrace_number_of_groups--;
3886 	}
3887 }
3888 
3889 static struct ftrace_page *
ftrace_allocate_pages(unsigned long num_to_init)3890 ftrace_allocate_pages(unsigned long num_to_init)
3891 {
3892 	struct ftrace_page *start_pg;
3893 	struct ftrace_page *pg;
3894 	int cnt;
3895 
3896 	if (!num_to_init)
3897 		return NULL;
3898 
3899 	start_pg = pg = kzalloc(sizeof(*pg), GFP_KERNEL);
3900 	if (!pg)
3901 		return NULL;
3902 
3903 	/*
3904 	 * Try to allocate as much as possible in one continues
3905 	 * location that fills in all of the space. We want to
3906 	 * waste as little space as possible.
3907 	 */
3908 	for (;;) {
3909 		cnt = ftrace_allocate_records(pg, num_to_init);
3910 		if (cnt < 0)
3911 			goto free_pages;
3912 
3913 		num_to_init -= cnt;
3914 		if (!num_to_init)
3915 			break;
3916 
3917 		pg->next = kzalloc(sizeof(*pg), GFP_KERNEL);
3918 		if (!pg->next)
3919 			goto free_pages;
3920 
3921 		pg = pg->next;
3922 	}
3923 
3924 	return start_pg;
3925 
3926  free_pages:
3927 	ftrace_free_pages(start_pg);
3928 	pr_info("ftrace: FAILED to allocate memory for functions\n");
3929 	return NULL;
3930 }
3931 
3932 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
3933 
3934 struct ftrace_iterator {
3935 	loff_t				pos;
3936 	loff_t				func_pos;
3937 	loff_t				mod_pos;
3938 	struct ftrace_page		*pg;
3939 	struct dyn_ftrace		*func;
3940 	struct ftrace_func_probe	*probe;
3941 	struct ftrace_func_entry	*probe_entry;
3942 	struct trace_parser		parser;
3943 	struct ftrace_hash		*hash;
3944 	struct ftrace_ops		*ops;
3945 	struct trace_array		*tr;
3946 	struct list_head		*mod_list;
3947 	int				pidx;
3948 	int				idx;
3949 	unsigned			flags;
3950 };
3951 
3952 static void *
t_probe_next(struct seq_file * m,loff_t * pos)3953 t_probe_next(struct seq_file *m, loff_t *pos)
3954 {
3955 	struct ftrace_iterator *iter = m->private;
3956 	struct trace_array *tr = iter->ops->private;
3957 	struct list_head *func_probes;
3958 	struct ftrace_hash *hash;
3959 	struct list_head *next;
3960 	struct hlist_node *hnd = NULL;
3961 	struct hlist_head *hhd;
3962 	int size;
3963 
3964 	(*pos)++;
3965 	iter->pos = *pos;
3966 
3967 	if (!tr)
3968 		return NULL;
3969 
3970 	func_probes = &tr->func_probes;
3971 	if (list_empty(func_probes))
3972 		return NULL;
3973 
3974 	if (!iter->probe) {
3975 		next = func_probes->next;
3976 		iter->probe = list_entry(next, struct ftrace_func_probe, list);
3977 	}
3978 
3979 	if (iter->probe_entry)
3980 		hnd = &iter->probe_entry->hlist;
3981 
3982 	hash = iter->probe->ops.func_hash->filter_hash;
3983 
3984 	/*
3985 	 * A probe being registered may temporarily have an empty hash
3986 	 * and it's at the end of the func_probes list.
3987 	 */
3988 	if (!hash || hash == EMPTY_HASH)
3989 		return NULL;
3990 
3991 	size = 1 << hash->size_bits;
3992 
3993  retry:
3994 	if (iter->pidx >= size) {
3995 		if (iter->probe->list.next == func_probes)
3996 			return NULL;
3997 		next = iter->probe->list.next;
3998 		iter->probe = list_entry(next, struct ftrace_func_probe, list);
3999 		hash = iter->probe->ops.func_hash->filter_hash;
4000 		size = 1 << hash->size_bits;
4001 		iter->pidx = 0;
4002 	}
4003 
4004 	hhd = &hash->buckets[iter->pidx];
4005 
4006 	if (hlist_empty(hhd)) {
4007 		iter->pidx++;
4008 		hnd = NULL;
4009 		goto retry;
4010 	}
4011 
4012 	if (!hnd)
4013 		hnd = hhd->first;
4014 	else {
4015 		hnd = hnd->next;
4016 		if (!hnd) {
4017 			iter->pidx++;
4018 			goto retry;
4019 		}
4020 	}
4021 
4022 	if (WARN_ON_ONCE(!hnd))
4023 		return NULL;
4024 
4025 	iter->probe_entry = hlist_entry(hnd, struct ftrace_func_entry, hlist);
4026 
4027 	return iter;
4028 }
4029 
t_probe_start(struct seq_file * m,loff_t * pos)4030 static void *t_probe_start(struct seq_file *m, loff_t *pos)
4031 {
4032 	struct ftrace_iterator *iter = m->private;
4033 	void *p = NULL;
4034 	loff_t l;
4035 
4036 	if (!(iter->flags & FTRACE_ITER_DO_PROBES))
4037 		return NULL;
4038 
4039 	if (iter->mod_pos > *pos)
4040 		return NULL;
4041 
4042 	iter->probe = NULL;
4043 	iter->probe_entry = NULL;
4044 	iter->pidx = 0;
4045 	for (l = 0; l <= (*pos - iter->mod_pos); ) {
4046 		p = t_probe_next(m, &l);
4047 		if (!p)
4048 			break;
4049 	}
4050 	if (!p)
4051 		return NULL;
4052 
4053 	/* Only set this if we have an item */
4054 	iter->flags |= FTRACE_ITER_PROBE;
4055 
4056 	return iter;
4057 }
4058 
4059 static int
t_probe_show(struct seq_file * m,struct ftrace_iterator * iter)4060 t_probe_show(struct seq_file *m, struct ftrace_iterator *iter)
4061 {
4062 	struct ftrace_func_entry *probe_entry;
4063 	struct ftrace_probe_ops *probe_ops;
4064 	struct ftrace_func_probe *probe;
4065 
4066 	probe = iter->probe;
4067 	probe_entry = iter->probe_entry;
4068 
4069 	if (WARN_ON_ONCE(!probe || !probe_entry))
4070 		return -EIO;
4071 
4072 	probe_ops = probe->probe_ops;
4073 
4074 	if (probe_ops->print)
4075 		return probe_ops->print(m, probe_entry->ip, probe_ops, probe->data);
4076 
4077 	seq_printf(m, "%ps:%ps\n", (void *)probe_entry->ip,
4078 		   (void *)probe_ops->func);
4079 
4080 	return 0;
4081 }
4082 
4083 static void *
t_mod_next(struct seq_file * m,loff_t * pos)4084 t_mod_next(struct seq_file *m, loff_t *pos)
4085 {
4086 	struct ftrace_iterator *iter = m->private;
4087 	struct trace_array *tr = iter->tr;
4088 
4089 	(*pos)++;
4090 	iter->pos = *pos;
4091 
4092 	iter->mod_list = iter->mod_list->next;
4093 
4094 	if (iter->mod_list == &tr->mod_trace ||
4095 	    iter->mod_list == &tr->mod_notrace) {
4096 		iter->flags &= ~FTRACE_ITER_MOD;
4097 		return NULL;
4098 	}
4099 
4100 	iter->mod_pos = *pos;
4101 
4102 	return iter;
4103 }
4104 
t_mod_start(struct seq_file * m,loff_t * pos)4105 static void *t_mod_start(struct seq_file *m, loff_t *pos)
4106 {
4107 	struct ftrace_iterator *iter = m->private;
4108 	void *p = NULL;
4109 	loff_t l;
4110 
4111 	if (iter->func_pos > *pos)
4112 		return NULL;
4113 
4114 	iter->mod_pos = iter->func_pos;
4115 
4116 	/* probes are only available if tr is set */
4117 	if (!iter->tr)
4118 		return NULL;
4119 
4120 	for (l = 0; l <= (*pos - iter->func_pos); ) {
4121 		p = t_mod_next(m, &l);
4122 		if (!p)
4123 			break;
4124 	}
4125 	if (!p) {
4126 		iter->flags &= ~FTRACE_ITER_MOD;
4127 		return t_probe_start(m, pos);
4128 	}
4129 
4130 	/* Only set this if we have an item */
4131 	iter->flags |= FTRACE_ITER_MOD;
4132 
4133 	return iter;
4134 }
4135 
4136 static int
t_mod_show(struct seq_file * m,struct ftrace_iterator * iter)4137 t_mod_show(struct seq_file *m, struct ftrace_iterator *iter)
4138 {
4139 	struct ftrace_mod_load *ftrace_mod;
4140 	struct trace_array *tr = iter->tr;
4141 
4142 	if (WARN_ON_ONCE(!iter->mod_list) ||
4143 			 iter->mod_list == &tr->mod_trace ||
4144 			 iter->mod_list == &tr->mod_notrace)
4145 		return -EIO;
4146 
4147 	ftrace_mod = list_entry(iter->mod_list, struct ftrace_mod_load, list);
4148 
4149 	if (ftrace_mod->func)
4150 		seq_printf(m, "%s", ftrace_mod->func);
4151 	else
4152 		seq_putc(m, '*');
4153 
4154 	seq_printf(m, ":mod:%s\n", ftrace_mod->module);
4155 
4156 	return 0;
4157 }
4158 
4159 static void *
t_func_next(struct seq_file * m,loff_t * pos)4160 t_func_next(struct seq_file *m, loff_t *pos)
4161 {
4162 	struct ftrace_iterator *iter = m->private;
4163 	struct dyn_ftrace *rec = NULL;
4164 
4165 	(*pos)++;
4166 
4167  retry:
4168 	if (iter->idx >= iter->pg->index) {
4169 		if (iter->pg->next) {
4170 			iter->pg = iter->pg->next;
4171 			iter->idx = 0;
4172 			goto retry;
4173 		}
4174 	} else {
4175 		rec = &iter->pg->records[iter->idx++];
4176 		if (((iter->flags & (FTRACE_ITER_FILTER | FTRACE_ITER_NOTRACE)) &&
4177 		     !ftrace_lookup_ip(iter->hash, rec->ip)) ||
4178 
4179 		    ((iter->flags & FTRACE_ITER_ENABLED) &&
4180 		     !(rec->flags & FTRACE_FL_ENABLED)) ||
4181 
4182 		    ((iter->flags & FTRACE_ITER_TOUCHED) &&
4183 		     !(rec->flags & FTRACE_FL_TOUCHED))) {
4184 
4185 			rec = NULL;
4186 			goto retry;
4187 		}
4188 	}
4189 
4190 	if (!rec)
4191 		return NULL;
4192 
4193 	iter->pos = iter->func_pos = *pos;
4194 	iter->func = rec;
4195 
4196 	return iter;
4197 }
4198 
4199 static void *
t_next(struct seq_file * m,void * v,loff_t * pos)4200 t_next(struct seq_file *m, void *v, loff_t *pos)
4201 {
4202 	struct ftrace_iterator *iter = m->private;
4203 	loff_t l = *pos; /* t_probe_start() must use original pos */
4204 	void *ret;
4205 
4206 	if (unlikely(ftrace_disabled))
4207 		return NULL;
4208 
4209 	if (iter->flags & FTRACE_ITER_PROBE)
4210 		return t_probe_next(m, pos);
4211 
4212 	if (iter->flags & FTRACE_ITER_MOD)
4213 		return t_mod_next(m, pos);
4214 
4215 	if (iter->flags & FTRACE_ITER_PRINTALL) {
4216 		/* next must increment pos, and t_probe_start does not */
4217 		(*pos)++;
4218 		return t_mod_start(m, &l);
4219 	}
4220 
4221 	ret = t_func_next(m, pos);
4222 
4223 	if (!ret)
4224 		return t_mod_start(m, &l);
4225 
4226 	return ret;
4227 }
4228 
reset_iter_read(struct ftrace_iterator * iter)4229 static void reset_iter_read(struct ftrace_iterator *iter)
4230 {
4231 	iter->pos = 0;
4232 	iter->func_pos = 0;
4233 	iter->flags &= ~(FTRACE_ITER_PRINTALL | FTRACE_ITER_PROBE | FTRACE_ITER_MOD);
4234 }
4235 
t_start(struct seq_file * m,loff_t * pos)4236 static void *t_start(struct seq_file *m, loff_t *pos)
4237 {
4238 	struct ftrace_iterator *iter = m->private;
4239 	void *p = NULL;
4240 	loff_t l;
4241 
4242 	mutex_lock(&ftrace_lock);
4243 
4244 	if (unlikely(ftrace_disabled))
4245 		return NULL;
4246 
4247 	/*
4248 	 * If an lseek was done, then reset and start from beginning.
4249 	 */
4250 	if (*pos < iter->pos)
4251 		reset_iter_read(iter);
4252 
4253 	/*
4254 	 * For set_ftrace_filter reading, if we have the filter
4255 	 * off, we can short cut and just print out that all
4256 	 * functions are enabled.
4257 	 */
4258 	if ((iter->flags & (FTRACE_ITER_FILTER | FTRACE_ITER_NOTRACE)) &&
4259 	    ftrace_hash_empty(iter->hash)) {
4260 		iter->func_pos = 1; /* Account for the message */
4261 		if (*pos > 0)
4262 			return t_mod_start(m, pos);
4263 		iter->flags |= FTRACE_ITER_PRINTALL;
4264 		/* reset in case of seek/pread */
4265 		iter->flags &= ~FTRACE_ITER_PROBE;
4266 		return iter;
4267 	}
4268 
4269 	if (iter->flags & FTRACE_ITER_MOD)
4270 		return t_mod_start(m, pos);
4271 
4272 	/*
4273 	 * Unfortunately, we need to restart at ftrace_pages_start
4274 	 * every time we let go of the ftrace_mutex. This is because
4275 	 * those pointers can change without the lock.
4276 	 */
4277 	iter->pg = ftrace_pages_start;
4278 	iter->idx = 0;
4279 	for (l = 0; l <= *pos; ) {
4280 		p = t_func_next(m, &l);
4281 		if (!p)
4282 			break;
4283 	}
4284 
4285 	if (!p)
4286 		return t_mod_start(m, pos);
4287 
4288 	return iter;
4289 }
4290 
t_stop(struct seq_file * m,void * p)4291 static void t_stop(struct seq_file *m, void *p)
4292 {
4293 	mutex_unlock(&ftrace_lock);
4294 }
4295 
4296 void * __weak
arch_ftrace_trampoline_func(struct ftrace_ops * ops,struct dyn_ftrace * rec)4297 arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec)
4298 {
4299 	return NULL;
4300 }
4301 
add_trampoline_func(struct seq_file * m,struct ftrace_ops * ops,struct dyn_ftrace * rec)4302 static void add_trampoline_func(struct seq_file *m, struct ftrace_ops *ops,
4303 				struct dyn_ftrace *rec)
4304 {
4305 	void *ptr;
4306 
4307 	ptr = arch_ftrace_trampoline_func(ops, rec);
4308 	if (ptr)
4309 		seq_printf(m, " ->%pS", ptr);
4310 }
4311 
4312 #ifdef FTRACE_MCOUNT_MAX_OFFSET
4313 /*
4314  * Weak functions can still have an mcount/fentry that is saved in
4315  * the __mcount_loc section. These can be detected by having a
4316  * symbol offset of greater than FTRACE_MCOUNT_MAX_OFFSET, as the
4317  * symbol found by kallsyms is not the function that the mcount/fentry
4318  * is part of. The offset is much greater in these cases.
4319  *
4320  * Test the record to make sure that the ip points to a valid kallsyms
4321  * and if not, mark it disabled.
4322  */
test_for_valid_rec(struct dyn_ftrace * rec)4323 static int test_for_valid_rec(struct dyn_ftrace *rec)
4324 {
4325 	char str[KSYM_SYMBOL_LEN];
4326 	unsigned long offset;
4327 	const char *ret;
4328 
4329 	ret = kallsyms_lookup(rec->ip, NULL, &offset, NULL, str);
4330 
4331 	/* Weak functions can cause invalid addresses */
4332 	if (!ret || offset > FTRACE_MCOUNT_MAX_OFFSET) {
4333 		rec->flags |= FTRACE_FL_DISABLED;
4334 		return 0;
4335 	}
4336 	return 1;
4337 }
4338 
4339 static struct workqueue_struct *ftrace_check_wq __initdata;
4340 static struct work_struct ftrace_check_work __initdata;
4341 
4342 /*
4343  * Scan all the mcount/fentry entries to make sure they are valid.
4344  */
ftrace_check_work_func(struct work_struct * work)4345 static __init void ftrace_check_work_func(struct work_struct *work)
4346 {
4347 	struct ftrace_page *pg;
4348 	struct dyn_ftrace *rec;
4349 
4350 	mutex_lock(&ftrace_lock);
4351 	do_for_each_ftrace_rec(pg, rec) {
4352 		test_for_valid_rec(rec);
4353 	} while_for_each_ftrace_rec();
4354 	mutex_unlock(&ftrace_lock);
4355 }
4356 
ftrace_check_for_weak_functions(void)4357 static int __init ftrace_check_for_weak_functions(void)
4358 {
4359 	INIT_WORK(&ftrace_check_work, ftrace_check_work_func);
4360 
4361 	ftrace_check_wq = alloc_workqueue("ftrace_check_wq", WQ_UNBOUND, 0);
4362 
4363 	queue_work(ftrace_check_wq, &ftrace_check_work);
4364 	return 0;
4365 }
4366 
ftrace_check_sync(void)4367 static int __init ftrace_check_sync(void)
4368 {
4369 	/* Make sure the ftrace_check updates are finished */
4370 	if (ftrace_check_wq)
4371 		destroy_workqueue(ftrace_check_wq);
4372 	return 0;
4373 }
4374 
4375 late_initcall_sync(ftrace_check_sync);
4376 subsys_initcall(ftrace_check_for_weak_functions);
4377 
print_rec(struct seq_file * m,unsigned long ip)4378 static int print_rec(struct seq_file *m, unsigned long ip)
4379 {
4380 	unsigned long offset;
4381 	char str[KSYM_SYMBOL_LEN];
4382 	char *modname;
4383 	const char *ret;
4384 
4385 	ret = kallsyms_lookup(ip, NULL, &offset, &modname, str);
4386 	/* Weak functions can cause invalid addresses */
4387 	if (!ret || offset > FTRACE_MCOUNT_MAX_OFFSET) {
4388 		snprintf(str, KSYM_SYMBOL_LEN, "%s_%ld",
4389 			 FTRACE_INVALID_FUNCTION, offset);
4390 		ret = NULL;
4391 	}
4392 
4393 	seq_puts(m, str);
4394 	if (modname)
4395 		seq_printf(m, " [%s]", modname);
4396 	return ret == NULL ? -1 : 0;
4397 }
4398 #else
test_for_valid_rec(struct dyn_ftrace * rec)4399 static inline int test_for_valid_rec(struct dyn_ftrace *rec)
4400 {
4401 	return 1;
4402 }
4403 
print_rec(struct seq_file * m,unsigned long ip)4404 static inline int print_rec(struct seq_file *m, unsigned long ip)
4405 {
4406 	seq_printf(m, "%ps", (void *)ip);
4407 	return 0;
4408 }
4409 #endif
4410 
print_subops(struct seq_file * m,struct ftrace_ops * ops,struct dyn_ftrace * rec)4411 static void print_subops(struct seq_file *m, struct ftrace_ops *ops, struct dyn_ftrace *rec)
4412 {
4413 	struct ftrace_ops *subops;
4414 	bool first = true;
4415 
4416 	list_for_each_entry(subops, &ops->subop_list, list) {
4417 		if (!((subops->flags & FTRACE_OPS_FL_ENABLED) &&
4418 		      hash_contains_ip(rec->ip, subops->func_hash)))
4419 			continue;
4420 		if (first) {
4421 			seq_printf(m, "\tsubops:");
4422 			first = false;
4423 		}
4424 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4425 		if (subops->flags & FTRACE_OPS_FL_GRAPH) {
4426 			struct fgraph_ops *gops;
4427 
4428 			gops = container_of(subops, struct fgraph_ops, ops);
4429 			seq_printf(m, " {ent:%pS ret:%pS}",
4430 				   (void *)gops->entryfunc,
4431 				   (void *)gops->retfunc);
4432 			continue;
4433 		}
4434 #endif
4435 		if (subops->trampoline) {
4436 			seq_printf(m, " {%pS (%pS)}",
4437 				   (void *)subops->trampoline,
4438 				   (void *)subops->func);
4439 			add_trampoline_func(m, subops, rec);
4440 		} else {
4441 			seq_printf(m, " {%pS}",
4442 				   (void *)subops->func);
4443 		}
4444 	}
4445 }
4446 
t_show(struct seq_file * m,void * v)4447 static int t_show(struct seq_file *m, void *v)
4448 {
4449 	struct ftrace_iterator *iter = m->private;
4450 	struct dyn_ftrace *rec;
4451 
4452 	if (iter->flags & FTRACE_ITER_PROBE)
4453 		return t_probe_show(m, iter);
4454 
4455 	if (iter->flags & FTRACE_ITER_MOD)
4456 		return t_mod_show(m, iter);
4457 
4458 	if (iter->flags & FTRACE_ITER_PRINTALL) {
4459 		if (iter->flags & FTRACE_ITER_NOTRACE)
4460 			seq_puts(m, "#### no functions disabled ####\n");
4461 		else
4462 			seq_puts(m, "#### all functions enabled ####\n");
4463 		return 0;
4464 	}
4465 
4466 	rec = iter->func;
4467 
4468 	if (!rec)
4469 		return 0;
4470 
4471 	if (iter->flags & FTRACE_ITER_ADDRS)
4472 		seq_printf(m, "%lx ", rec->ip);
4473 
4474 	if (print_rec(m, rec->ip)) {
4475 		/* This should only happen when a rec is disabled */
4476 		WARN_ON_ONCE(!(rec->flags & FTRACE_FL_DISABLED));
4477 		seq_putc(m, '\n');
4478 		return 0;
4479 	}
4480 
4481 	if (iter->flags & (FTRACE_ITER_ENABLED | FTRACE_ITER_TOUCHED)) {
4482 		struct ftrace_ops *ops;
4483 
4484 		seq_printf(m, " (%ld)%s%s%s%s%s",
4485 			   ftrace_rec_count(rec),
4486 			   rec->flags & FTRACE_FL_REGS ? " R" : "  ",
4487 			   rec->flags & FTRACE_FL_IPMODIFY ? " I" : "  ",
4488 			   rec->flags & FTRACE_FL_DIRECT ? " D" : "  ",
4489 			   rec->flags & FTRACE_FL_CALL_OPS ? " O" : "  ",
4490 			   rec->flags & FTRACE_FL_MODIFIED ? " M " : "   ");
4491 		if (rec->flags & FTRACE_FL_TRAMP_EN) {
4492 			ops = ftrace_find_tramp_ops_any(rec);
4493 			if (ops) {
4494 				do {
4495 					seq_printf(m, "\ttramp: %pS (%pS)",
4496 						   (void *)ops->trampoline,
4497 						   (void *)ops->func);
4498 					add_trampoline_func(m, ops, rec);
4499 					print_subops(m, ops, rec);
4500 					ops = ftrace_find_tramp_ops_next(rec, ops);
4501 				} while (ops);
4502 			} else
4503 				seq_puts(m, "\ttramp: ERROR!");
4504 		} else {
4505 			add_trampoline_func(m, NULL, rec);
4506 		}
4507 		if (rec->flags & FTRACE_FL_CALL_OPS_EN) {
4508 			ops = ftrace_find_unique_ops(rec);
4509 			if (ops) {
4510 				seq_printf(m, "\tops: %pS (%pS)",
4511 					   ops, ops->func);
4512 				print_subops(m, ops, rec);
4513 			} else {
4514 				seq_puts(m, "\tops: ERROR!");
4515 			}
4516 		}
4517 		if (rec->flags & FTRACE_FL_DIRECT) {
4518 			unsigned long direct;
4519 
4520 			direct = ftrace_find_rec_direct(rec->ip);
4521 			if (direct)
4522 				seq_printf(m, "\n\tdirect-->%pS", (void *)direct);
4523 		}
4524 	}
4525 
4526 	seq_putc(m, '\n');
4527 
4528 	return 0;
4529 }
4530 
4531 static const struct seq_operations show_ftrace_seq_ops = {
4532 	.start = t_start,
4533 	.next = t_next,
4534 	.stop = t_stop,
4535 	.show = t_show,
4536 };
4537 
4538 static int
ftrace_avail_open(struct inode * inode,struct file * file)4539 ftrace_avail_open(struct inode *inode, struct file *file)
4540 {
4541 	struct ftrace_iterator *iter;
4542 	int ret;
4543 
4544 	ret = security_locked_down(LOCKDOWN_TRACEFS);
4545 	if (ret)
4546 		return ret;
4547 
4548 	if (unlikely(ftrace_disabled))
4549 		return -ENODEV;
4550 
4551 	iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
4552 	if (!iter)
4553 		return -ENOMEM;
4554 
4555 	iter->pg = ftrace_pages_start;
4556 	iter->ops = &global_ops;
4557 
4558 	return 0;
4559 }
4560 
4561 static int
ftrace_enabled_open(struct inode * inode,struct file * file)4562 ftrace_enabled_open(struct inode *inode, struct file *file)
4563 {
4564 	struct ftrace_iterator *iter;
4565 
4566 	/*
4567 	 * This shows us what functions are currently being
4568 	 * traced and by what. Not sure if we want lockdown
4569 	 * to hide such critical information for an admin.
4570 	 * Although, perhaps it can show information we don't
4571 	 * want people to see, but if something is tracing
4572 	 * something, we probably want to know about it.
4573 	 */
4574 
4575 	iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
4576 	if (!iter)
4577 		return -ENOMEM;
4578 
4579 	iter->pg = ftrace_pages_start;
4580 	iter->flags = FTRACE_ITER_ENABLED;
4581 	iter->ops = &global_ops;
4582 
4583 	return 0;
4584 }
4585 
4586 static int
ftrace_touched_open(struct inode * inode,struct file * file)4587 ftrace_touched_open(struct inode *inode, struct file *file)
4588 {
4589 	struct ftrace_iterator *iter;
4590 
4591 	/*
4592 	 * This shows us what functions have ever been enabled
4593 	 * (traced, direct, patched, etc). Not sure if we want lockdown
4594 	 * to hide such critical information for an admin.
4595 	 * Although, perhaps it can show information we don't
4596 	 * want people to see, but if something had traced
4597 	 * something, we probably want to know about it.
4598 	 */
4599 
4600 	iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
4601 	if (!iter)
4602 		return -ENOMEM;
4603 
4604 	iter->pg = ftrace_pages_start;
4605 	iter->flags = FTRACE_ITER_TOUCHED;
4606 	iter->ops = &global_ops;
4607 
4608 	return 0;
4609 }
4610 
4611 static int
ftrace_avail_addrs_open(struct inode * inode,struct file * file)4612 ftrace_avail_addrs_open(struct inode *inode, struct file *file)
4613 {
4614 	struct ftrace_iterator *iter;
4615 	int ret;
4616 
4617 	ret = security_locked_down(LOCKDOWN_TRACEFS);
4618 	if (ret)
4619 		return ret;
4620 
4621 	if (unlikely(ftrace_disabled))
4622 		return -ENODEV;
4623 
4624 	iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
4625 	if (!iter)
4626 		return -ENOMEM;
4627 
4628 	iter->pg = ftrace_pages_start;
4629 	iter->flags = FTRACE_ITER_ADDRS;
4630 	iter->ops = &global_ops;
4631 
4632 	return 0;
4633 }
4634 
4635 /**
4636  * ftrace_regex_open - initialize function tracer filter files
4637  * @ops: The ftrace_ops that hold the hash filters
4638  * @flag: The type of filter to process
4639  * @inode: The inode, usually passed in to your open routine
4640  * @file: The file, usually passed in to your open routine
4641  *
4642  * ftrace_regex_open() initializes the filter files for the
4643  * @ops. Depending on @flag it may process the filter hash or
4644  * the notrace hash of @ops. With this called from the open
4645  * routine, you can use ftrace_filter_write() for the write
4646  * routine if @flag has FTRACE_ITER_FILTER set, or
4647  * ftrace_notrace_write() if @flag has FTRACE_ITER_NOTRACE set.
4648  * tracing_lseek() should be used as the lseek routine, and
4649  * release must call ftrace_regex_release().
4650  *
4651  * Returns: 0 on success or a negative errno value on failure
4652  */
4653 int
ftrace_regex_open(struct ftrace_ops * ops,int flag,struct inode * inode,struct file * file)4654 ftrace_regex_open(struct ftrace_ops *ops, int flag,
4655 		  struct inode *inode, struct file *file)
4656 {
4657 	struct ftrace_iterator *iter;
4658 	struct ftrace_hash *hash;
4659 	struct list_head *mod_head;
4660 	struct trace_array *tr = ops->private;
4661 	int ret = -ENOMEM;
4662 
4663 	ftrace_ops_init(ops);
4664 
4665 	if (unlikely(ftrace_disabled))
4666 		return -ENODEV;
4667 
4668 	if (tracing_check_open_get_tr(tr))
4669 		return -ENODEV;
4670 
4671 	iter = kzalloc(sizeof(*iter), GFP_KERNEL);
4672 	if (!iter)
4673 		goto out;
4674 
4675 	if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX))
4676 		goto out;
4677 
4678 	iter->ops = ops;
4679 	iter->flags = flag;
4680 	iter->tr = tr;
4681 
4682 	mutex_lock(&ops->func_hash->regex_lock);
4683 
4684 	if (flag & FTRACE_ITER_NOTRACE) {
4685 		hash = ops->func_hash->notrace_hash;
4686 		mod_head = tr ? &tr->mod_notrace : NULL;
4687 	} else {
4688 		hash = ops->func_hash->filter_hash;
4689 		mod_head = tr ? &tr->mod_trace : NULL;
4690 	}
4691 
4692 	iter->mod_list = mod_head;
4693 
4694 	if (file->f_mode & FMODE_WRITE) {
4695 		const int size_bits = FTRACE_HASH_DEFAULT_BITS;
4696 
4697 		if (file->f_flags & O_TRUNC) {
4698 			iter->hash = alloc_ftrace_hash(size_bits);
4699 			clear_ftrace_mod_list(mod_head);
4700 	        } else {
4701 			iter->hash = alloc_and_copy_ftrace_hash(size_bits, hash);
4702 		}
4703 	} else {
4704 		if (hash)
4705 			iter->hash = alloc_and_copy_ftrace_hash(hash->size_bits, hash);
4706 		else
4707 			iter->hash = EMPTY_HASH;
4708 	}
4709 
4710 	if (!iter->hash) {
4711 		trace_parser_put(&iter->parser);
4712 		goto out_unlock;
4713 	}
4714 
4715 	ret = 0;
4716 
4717 	if (file->f_mode & FMODE_READ) {
4718 		iter->pg = ftrace_pages_start;
4719 
4720 		ret = seq_open(file, &show_ftrace_seq_ops);
4721 		if (!ret) {
4722 			struct seq_file *m = file->private_data;
4723 			m->private = iter;
4724 		} else {
4725 			/* Failed */
4726 			free_ftrace_hash(iter->hash);
4727 			trace_parser_put(&iter->parser);
4728 		}
4729 	} else
4730 		file->private_data = iter;
4731 
4732  out_unlock:
4733 	mutex_unlock(&ops->func_hash->regex_lock);
4734 
4735  out:
4736 	if (ret) {
4737 		kfree(iter);
4738 		if (tr)
4739 			trace_array_put(tr);
4740 	}
4741 
4742 	return ret;
4743 }
4744 
4745 static int
ftrace_filter_open(struct inode * inode,struct file * file)4746 ftrace_filter_open(struct inode *inode, struct file *file)
4747 {
4748 	struct ftrace_ops *ops = inode->i_private;
4749 
4750 	/* Checks for tracefs lockdown */
4751 	return ftrace_regex_open(ops,
4752 			FTRACE_ITER_FILTER | FTRACE_ITER_DO_PROBES,
4753 			inode, file);
4754 }
4755 
4756 static int
ftrace_notrace_open(struct inode * inode,struct file * file)4757 ftrace_notrace_open(struct inode *inode, struct file *file)
4758 {
4759 	struct ftrace_ops *ops = inode->i_private;
4760 
4761 	/* Checks for tracefs lockdown */
4762 	return ftrace_regex_open(ops, FTRACE_ITER_NOTRACE,
4763 				 inode, file);
4764 }
4765 
4766 /* Type for quick search ftrace basic regexes (globs) from filter_parse_regex */
4767 struct ftrace_glob {
4768 	char *search;
4769 	unsigned len;
4770 	int type;
4771 };
4772 
4773 /*
4774  * If symbols in an architecture don't correspond exactly to the user-visible
4775  * name of what they represent, it is possible to define this function to
4776  * perform the necessary adjustments.
4777 */
arch_ftrace_match_adjust(char * str,const char * search)4778 char * __weak arch_ftrace_match_adjust(char *str, const char *search)
4779 {
4780 	return str;
4781 }
4782 
ftrace_match(char * str,struct ftrace_glob * g)4783 static int ftrace_match(char *str, struct ftrace_glob *g)
4784 {
4785 	int matched = 0;
4786 	int slen;
4787 
4788 	str = arch_ftrace_match_adjust(str, g->search);
4789 
4790 	switch (g->type) {
4791 	case MATCH_FULL:
4792 		if (strcmp(str, g->search) == 0)
4793 			matched = 1;
4794 		break;
4795 	case MATCH_FRONT_ONLY:
4796 		if (strncmp(str, g->search, g->len) == 0)
4797 			matched = 1;
4798 		break;
4799 	case MATCH_MIDDLE_ONLY:
4800 		if (strstr(str, g->search))
4801 			matched = 1;
4802 		break;
4803 	case MATCH_END_ONLY:
4804 		slen = strlen(str);
4805 		if (slen >= g->len &&
4806 		    memcmp(str + slen - g->len, g->search, g->len) == 0)
4807 			matched = 1;
4808 		break;
4809 	case MATCH_GLOB:
4810 		if (glob_match(g->search, str))
4811 			matched = 1;
4812 		break;
4813 	}
4814 
4815 	return matched;
4816 }
4817 
4818 static int
enter_record(struct ftrace_hash * hash,struct dyn_ftrace * rec,int clear_filter)4819 enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int clear_filter)
4820 {
4821 	struct ftrace_func_entry *entry;
4822 	int ret = 0;
4823 
4824 	entry = ftrace_lookup_ip(hash, rec->ip);
4825 	if (clear_filter) {
4826 		/* Do nothing if it doesn't exist */
4827 		if (!entry)
4828 			return 0;
4829 
4830 		free_hash_entry(hash, entry);
4831 	} else {
4832 		/* Do nothing if it exists */
4833 		if (entry)
4834 			return 0;
4835 		if (add_hash_entry(hash, rec->ip) == NULL)
4836 			ret = -ENOMEM;
4837 	}
4838 	return ret;
4839 }
4840 
4841 static int
add_rec_by_index(struct ftrace_hash * hash,struct ftrace_glob * func_g,int clear_filter)4842 add_rec_by_index(struct ftrace_hash *hash, struct ftrace_glob *func_g,
4843 		 int clear_filter)
4844 {
4845 	long index;
4846 	struct ftrace_page *pg;
4847 	struct dyn_ftrace *rec;
4848 
4849 	/* The index starts at 1 */
4850 	if (kstrtoul(func_g->search, 0, &index) || --index < 0)
4851 		return 0;
4852 
4853 	do_for_each_ftrace_rec(pg, rec) {
4854 		if (pg->index <= index) {
4855 			index -= pg->index;
4856 			/* this is a double loop, break goes to the next page */
4857 			break;
4858 		}
4859 		rec = &pg->records[index];
4860 		enter_record(hash, rec, clear_filter);
4861 		return 1;
4862 	} while_for_each_ftrace_rec();
4863 	return 0;
4864 }
4865 
4866 #ifdef FTRACE_MCOUNT_MAX_OFFSET
lookup_ip(unsigned long ip,char ** modname,char * str)4867 static int lookup_ip(unsigned long ip, char **modname, char *str)
4868 {
4869 	unsigned long offset;
4870 
4871 	kallsyms_lookup(ip, NULL, &offset, modname, str);
4872 	if (offset > FTRACE_MCOUNT_MAX_OFFSET)
4873 		return -1;
4874 	return 0;
4875 }
4876 #else
lookup_ip(unsigned long ip,char ** modname,char * str)4877 static int lookup_ip(unsigned long ip, char **modname, char *str)
4878 {
4879 	kallsyms_lookup(ip, NULL, NULL, modname, str);
4880 	return 0;
4881 }
4882 #endif
4883 
4884 static int
ftrace_match_record(struct dyn_ftrace * rec,struct ftrace_glob * func_g,struct ftrace_glob * mod_g,int exclude_mod)4885 ftrace_match_record(struct dyn_ftrace *rec, struct ftrace_glob *func_g,
4886 		struct ftrace_glob *mod_g, int exclude_mod)
4887 {
4888 	char str[KSYM_SYMBOL_LEN];
4889 	char *modname;
4890 
4891 	if (lookup_ip(rec->ip, &modname, str)) {
4892 		/* This should only happen when a rec is disabled */
4893 		WARN_ON_ONCE(system_state == SYSTEM_RUNNING &&
4894 			     !(rec->flags & FTRACE_FL_DISABLED));
4895 		return 0;
4896 	}
4897 
4898 	if (mod_g) {
4899 		int mod_matches = (modname) ? ftrace_match(modname, mod_g) : 0;
4900 
4901 		/* blank module name to match all modules */
4902 		if (!mod_g->len) {
4903 			/* blank module globbing: modname xor exclude_mod */
4904 			if (!exclude_mod != !modname)
4905 				goto func_match;
4906 			return 0;
4907 		}
4908 
4909 		/*
4910 		 * exclude_mod is set to trace everything but the given
4911 		 * module. If it is set and the module matches, then
4912 		 * return 0. If it is not set, and the module doesn't match
4913 		 * also return 0. Otherwise, check the function to see if
4914 		 * that matches.
4915 		 */
4916 		if (!mod_matches == !exclude_mod)
4917 			return 0;
4918 func_match:
4919 		/* blank search means to match all funcs in the mod */
4920 		if (!func_g->len)
4921 			return 1;
4922 	}
4923 
4924 	return ftrace_match(str, func_g);
4925 }
4926 
4927 static int
match_records(struct ftrace_hash * hash,char * func,int len,char * mod)4928 match_records(struct ftrace_hash *hash, char *func, int len, char *mod)
4929 {
4930 	struct ftrace_page *pg;
4931 	struct dyn_ftrace *rec;
4932 	struct ftrace_glob func_g = { .type = MATCH_FULL };
4933 	struct ftrace_glob mod_g = { .type = MATCH_FULL };
4934 	struct ftrace_glob *mod_match = (mod) ? &mod_g : NULL;
4935 	int exclude_mod = 0;
4936 	int found = 0;
4937 	int ret;
4938 	int clear_filter = 0;
4939 
4940 	if (func) {
4941 		func_g.type = filter_parse_regex(func, len, &func_g.search,
4942 						 &clear_filter);
4943 		func_g.len = strlen(func_g.search);
4944 	}
4945 
4946 	if (mod) {
4947 		mod_g.type = filter_parse_regex(mod, strlen(mod),
4948 				&mod_g.search, &exclude_mod);
4949 		mod_g.len = strlen(mod_g.search);
4950 	}
4951 
4952 	guard(mutex)(&ftrace_lock);
4953 
4954 	if (unlikely(ftrace_disabled))
4955 		return 0;
4956 
4957 	if (func_g.type == MATCH_INDEX)
4958 		return add_rec_by_index(hash, &func_g, clear_filter);
4959 
4960 	do_for_each_ftrace_rec(pg, rec) {
4961 
4962 		if (rec->flags & FTRACE_FL_DISABLED)
4963 			continue;
4964 
4965 		if (ftrace_match_record(rec, &func_g, mod_match, exclude_mod)) {
4966 			ret = enter_record(hash, rec, clear_filter);
4967 			if (ret < 0)
4968 				return ret;
4969 			found = 1;
4970 		}
4971 		cond_resched();
4972 	} while_for_each_ftrace_rec();
4973 
4974 	return found;
4975 }
4976 
4977 static int
ftrace_match_records(struct ftrace_hash * hash,char * buff,int len)4978 ftrace_match_records(struct ftrace_hash *hash, char *buff, int len)
4979 {
4980 	return match_records(hash, buff, len, NULL);
4981 }
4982 
ftrace_ops_update_code(struct ftrace_ops * ops,struct ftrace_ops_hash * old_hash)4983 static void ftrace_ops_update_code(struct ftrace_ops *ops,
4984 				   struct ftrace_ops_hash *old_hash)
4985 {
4986 	struct ftrace_ops *op;
4987 
4988 	if (!ftrace_enabled)
4989 		return;
4990 
4991 	if (ops->flags & FTRACE_OPS_FL_ENABLED) {
4992 		ftrace_run_modify_code(ops, FTRACE_UPDATE_CALLS, old_hash);
4993 		return;
4994 	}
4995 
4996 	/*
4997 	 * If this is the shared global_ops filter, then we need to
4998 	 * check if there is another ops that shares it, is enabled.
4999 	 * If so, we still need to run the modify code.
5000 	 */
5001 	if (ops->func_hash != &global_ops.local_hash)
5002 		return;
5003 
5004 	do_for_each_ftrace_op(op, ftrace_ops_list) {
5005 		if (op->func_hash == &global_ops.local_hash &&
5006 		    op->flags & FTRACE_OPS_FL_ENABLED) {
5007 			ftrace_run_modify_code(op, FTRACE_UPDATE_CALLS, old_hash);
5008 			/* Only need to do this once */
5009 			return;
5010 		}
5011 	} while_for_each_ftrace_op(op);
5012 }
5013 
ftrace_hash_move_and_update_ops(struct ftrace_ops * ops,struct ftrace_hash ** orig_hash,struct ftrace_hash * hash,int enable)5014 static int ftrace_hash_move_and_update_ops(struct ftrace_ops *ops,
5015 					   struct ftrace_hash **orig_hash,
5016 					   struct ftrace_hash *hash,
5017 					   int enable)
5018 {
5019 	if (ops->flags & FTRACE_OPS_FL_SUBOP)
5020 		return ftrace_hash_move_and_update_subops(ops, orig_hash, hash);
5021 
5022 	/*
5023 	 * If this ops is not enabled, it could be sharing its filters
5024 	 * with a subop. If that's the case, update the subop instead of
5025 	 * this ops. Shared filters are only allowed to have one ops set
5026 	 * at a time, and if we update the ops that is not enabled,
5027 	 * it will not affect subops that share it.
5028 	 */
5029 	if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) {
5030 		struct ftrace_ops *op;
5031 
5032 		/* Check if any other manager subops maps to this hash */
5033 		do_for_each_ftrace_op(op, ftrace_ops_list) {
5034 			struct ftrace_ops *subops;
5035 
5036 			list_for_each_entry(subops, &op->subop_list, list) {
5037 				if ((subops->flags & FTRACE_OPS_FL_ENABLED) &&
5038 				     subops->func_hash == ops->func_hash) {
5039 					return ftrace_hash_move_and_update_subops(subops, orig_hash, hash);
5040 				}
5041 			}
5042 		} while_for_each_ftrace_op(op);
5043 	}
5044 
5045 	return __ftrace_hash_move_and_update_ops(ops, orig_hash, hash, enable);
5046 }
5047 
cache_mod(struct trace_array * tr,const char * func,char * module,int enable)5048 static int cache_mod(struct trace_array *tr,
5049 		     const char *func, char *module, int enable)
5050 {
5051 	struct ftrace_mod_load *ftrace_mod, *n;
5052 	struct list_head *head = enable ? &tr->mod_trace : &tr->mod_notrace;
5053 
5054 	guard(mutex)(&ftrace_lock);
5055 
5056 	/* We do not cache inverse filters */
5057 	if (func[0] == '!') {
5058 		int ret = -EINVAL;
5059 
5060 		func++;
5061 
5062 		/* Look to remove this hash */
5063 		list_for_each_entry_safe(ftrace_mod, n, head, list) {
5064 			if (strcmp(ftrace_mod->module, module) != 0)
5065 				continue;
5066 
5067 			/* no func matches all */
5068 			if (strcmp(func, "*") == 0 ||
5069 			    (ftrace_mod->func &&
5070 			     strcmp(ftrace_mod->func, func) == 0)) {
5071 				ret = 0;
5072 				free_ftrace_mod(ftrace_mod);
5073 				continue;
5074 			}
5075 		}
5076 		return ret;
5077 	}
5078 
5079 	/* We only care about modules that have not been loaded yet */
5080 	if (module_exists(module))
5081 		return -EINVAL;
5082 
5083 	/* Save this string off, and execute it when the module is loaded */
5084 	return ftrace_add_mod(tr, func, module, enable);
5085 }
5086 
5087 #ifdef CONFIG_MODULES
process_mod_list(struct list_head * head,struct ftrace_ops * ops,char * mod,bool enable)5088 static void process_mod_list(struct list_head *head, struct ftrace_ops *ops,
5089 			     char *mod, bool enable)
5090 {
5091 	struct ftrace_mod_load *ftrace_mod, *n;
5092 	struct ftrace_hash **orig_hash, *new_hash;
5093 	LIST_HEAD(process_mods);
5094 	char *func;
5095 
5096 	mutex_lock(&ops->func_hash->regex_lock);
5097 
5098 	if (enable)
5099 		orig_hash = &ops->func_hash->filter_hash;
5100 	else
5101 		orig_hash = &ops->func_hash->notrace_hash;
5102 
5103 	new_hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS,
5104 					      *orig_hash);
5105 	if (!new_hash)
5106 		goto out; /* warn? */
5107 
5108 	mutex_lock(&ftrace_lock);
5109 
5110 	list_for_each_entry_safe(ftrace_mod, n, head, list) {
5111 
5112 		if (strcmp(ftrace_mod->module, mod) != 0)
5113 			continue;
5114 
5115 		if (ftrace_mod->func)
5116 			func = kstrdup(ftrace_mod->func, GFP_KERNEL);
5117 		else
5118 			func = kstrdup("*", GFP_KERNEL);
5119 
5120 		if (!func) /* warn? */
5121 			continue;
5122 
5123 		list_move(&ftrace_mod->list, &process_mods);
5124 
5125 		/* Use the newly allocated func, as it may be "*" */
5126 		kfree(ftrace_mod->func);
5127 		ftrace_mod->func = func;
5128 	}
5129 
5130 	mutex_unlock(&ftrace_lock);
5131 
5132 	list_for_each_entry_safe(ftrace_mod, n, &process_mods, list) {
5133 
5134 		func = ftrace_mod->func;
5135 
5136 		/* Grabs ftrace_lock, which is why we have this extra step */
5137 		match_records(new_hash, func, strlen(func), mod);
5138 		free_ftrace_mod(ftrace_mod);
5139 	}
5140 
5141 	if (enable && list_empty(head))
5142 		new_hash->flags &= ~FTRACE_HASH_FL_MOD;
5143 
5144 	mutex_lock(&ftrace_lock);
5145 
5146 	ftrace_hash_move_and_update_ops(ops, orig_hash,
5147 					      new_hash, enable);
5148 	mutex_unlock(&ftrace_lock);
5149 
5150  out:
5151 	mutex_unlock(&ops->func_hash->regex_lock);
5152 
5153 	free_ftrace_hash(new_hash);
5154 }
5155 
process_cached_mods(const char * mod_name)5156 static void process_cached_mods(const char *mod_name)
5157 {
5158 	struct trace_array *tr;
5159 	char *mod;
5160 
5161 	mod = kstrdup(mod_name, GFP_KERNEL);
5162 	if (!mod)
5163 		return;
5164 
5165 	mutex_lock(&trace_types_lock);
5166 	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
5167 		if (!list_empty(&tr->mod_trace))
5168 			process_mod_list(&tr->mod_trace, tr->ops, mod, true);
5169 		if (!list_empty(&tr->mod_notrace))
5170 			process_mod_list(&tr->mod_notrace, tr->ops, mod, false);
5171 	}
5172 	mutex_unlock(&trace_types_lock);
5173 
5174 	kfree(mod);
5175 }
5176 #endif
5177 
5178 /*
5179  * We register the module command as a template to show others how
5180  * to register the a command as well.
5181  */
5182 
5183 static int
ftrace_mod_callback(struct trace_array * tr,struct ftrace_hash * hash,char * func_orig,char * cmd,char * module,int enable)5184 ftrace_mod_callback(struct trace_array *tr, struct ftrace_hash *hash,
5185 		    char *func_orig, char *cmd, char *module, int enable)
5186 {
5187 	char *func;
5188 	int ret;
5189 
5190 	if (!tr)
5191 		return -ENODEV;
5192 
5193 	/* match_records() modifies func, and we need the original */
5194 	func = kstrdup(func_orig, GFP_KERNEL);
5195 	if (!func)
5196 		return -ENOMEM;
5197 
5198 	/*
5199 	 * cmd == 'mod' because we only registered this func
5200 	 * for the 'mod' ftrace_func_command.
5201 	 * But if you register one func with multiple commands,
5202 	 * you can tell which command was used by the cmd
5203 	 * parameter.
5204 	 */
5205 	ret = match_records(hash, func, strlen(func), module);
5206 	kfree(func);
5207 
5208 	if (!ret)
5209 		return cache_mod(tr, func_orig, module, enable);
5210 	if (ret < 0)
5211 		return ret;
5212 	return 0;
5213 }
5214 
5215 static struct ftrace_func_command ftrace_mod_cmd = {
5216 	.name			= "mod",
5217 	.func			= ftrace_mod_callback,
5218 };
5219 
ftrace_mod_cmd_init(void)5220 static int __init ftrace_mod_cmd_init(void)
5221 {
5222 	return register_ftrace_command(&ftrace_mod_cmd);
5223 }
5224 core_initcall(ftrace_mod_cmd_init);
5225 
function_trace_probe_call(unsigned long ip,unsigned long parent_ip,struct ftrace_ops * op,struct ftrace_regs * fregs)5226 static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
5227 				      struct ftrace_ops *op, struct ftrace_regs *fregs)
5228 {
5229 	struct ftrace_probe_ops *probe_ops;
5230 	struct ftrace_func_probe *probe;
5231 
5232 	probe = container_of(op, struct ftrace_func_probe, ops);
5233 	probe_ops = probe->probe_ops;
5234 
5235 	/*
5236 	 * Disable preemption for these calls to prevent a RCU grace
5237 	 * period. This syncs the hash iteration and freeing of items
5238 	 * on the hash. rcu_read_lock is too dangerous here.
5239 	 */
5240 	preempt_disable_notrace();
5241 	probe_ops->func(ip, parent_ip, probe->tr, probe_ops, probe->data);
5242 	preempt_enable_notrace();
5243 }
5244 
5245 struct ftrace_func_map {
5246 	struct ftrace_func_entry	entry;
5247 	void				*data;
5248 };
5249 
5250 /*
5251  * Note, ftrace_func_mapper is freed by free_ftrace_hash(&mapper->hash).
5252  * The hash field must be the first field.
5253  */
5254 struct ftrace_func_mapper {
5255 	struct ftrace_hash		hash;	/* Must be first! */
5256 };
5257 
5258 /**
5259  * allocate_ftrace_func_mapper - allocate a new ftrace_func_mapper
5260  *
5261  * Returns: a ftrace_func_mapper descriptor that can be used to map ips to data.
5262  */
allocate_ftrace_func_mapper(void)5263 struct ftrace_func_mapper *allocate_ftrace_func_mapper(void)
5264 {
5265 	struct ftrace_hash *hash;
5266 
5267 	/*
5268 	 * The mapper is simply a ftrace_hash, but since the entries
5269 	 * in the hash are not ftrace_func_entry type, we define it
5270 	 * as a separate structure.
5271 	 */
5272 	hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
5273 	return (struct ftrace_func_mapper *)hash;
5274 }
5275 
5276 /**
5277  * ftrace_func_mapper_find_ip - Find some data mapped to an ip
5278  * @mapper: The mapper that has the ip maps
5279  * @ip: the instruction pointer to find the data for
5280  *
5281  * Returns: the data mapped to @ip if found otherwise NULL. The return
5282  * is actually the address of the mapper data pointer. The address is
5283  * returned for use cases where the data is no bigger than a long, and
5284  * the user can use the data pointer as its data instead of having to
5285  * allocate more memory for the reference.
5286  */
ftrace_func_mapper_find_ip(struct ftrace_func_mapper * mapper,unsigned long ip)5287 void **ftrace_func_mapper_find_ip(struct ftrace_func_mapper *mapper,
5288 				  unsigned long ip)
5289 {
5290 	struct ftrace_func_entry *entry;
5291 	struct ftrace_func_map *map;
5292 
5293 	entry = ftrace_lookup_ip(&mapper->hash, ip);
5294 	if (!entry)
5295 		return NULL;
5296 
5297 	map = (struct ftrace_func_map *)entry;
5298 	return &map->data;
5299 }
5300 
5301 /**
5302  * ftrace_func_mapper_add_ip - Map some data to an ip
5303  * @mapper: The mapper that has the ip maps
5304  * @ip: The instruction pointer address to map @data to
5305  * @data: The data to map to @ip
5306  *
5307  * Returns: 0 on success otherwise an error.
5308  */
ftrace_func_mapper_add_ip(struct ftrace_func_mapper * mapper,unsigned long ip,void * data)5309 int ftrace_func_mapper_add_ip(struct ftrace_func_mapper *mapper,
5310 			      unsigned long ip, void *data)
5311 {
5312 	struct ftrace_func_entry *entry;
5313 	struct ftrace_func_map *map;
5314 
5315 	entry = ftrace_lookup_ip(&mapper->hash, ip);
5316 	if (entry)
5317 		return -EBUSY;
5318 
5319 	map = kmalloc(sizeof(*map), GFP_KERNEL);
5320 	if (!map)
5321 		return -ENOMEM;
5322 
5323 	map->entry.ip = ip;
5324 	map->data = data;
5325 
5326 	__add_hash_entry(&mapper->hash, &map->entry);
5327 
5328 	return 0;
5329 }
5330 
5331 /**
5332  * ftrace_func_mapper_remove_ip - Remove an ip from the mapping
5333  * @mapper: The mapper that has the ip maps
5334  * @ip: The instruction pointer address to remove the data from
5335  *
5336  * Returns: the data if it is found, otherwise NULL.
5337  * Note, if the data pointer is used as the data itself, (see
5338  * ftrace_func_mapper_find_ip(), then the return value may be meaningless,
5339  * if the data pointer was set to zero.
5340  */
ftrace_func_mapper_remove_ip(struct ftrace_func_mapper * mapper,unsigned long ip)5341 void *ftrace_func_mapper_remove_ip(struct ftrace_func_mapper *mapper,
5342 				   unsigned long ip)
5343 {
5344 	struct ftrace_func_entry *entry;
5345 	struct ftrace_func_map *map;
5346 	void *data;
5347 
5348 	entry = ftrace_lookup_ip(&mapper->hash, ip);
5349 	if (!entry)
5350 		return NULL;
5351 
5352 	map = (struct ftrace_func_map *)entry;
5353 	data = map->data;
5354 
5355 	remove_hash_entry(&mapper->hash, entry);
5356 	kfree(entry);
5357 
5358 	return data;
5359 }
5360 
5361 /**
5362  * free_ftrace_func_mapper - free a mapping of ips and data
5363  * @mapper: The mapper that has the ip maps
5364  * @free_func: A function to be called on each data item.
5365  *
5366  * This is used to free the function mapper. The @free_func is optional
5367  * and can be used if the data needs to be freed as well.
5368  */
free_ftrace_func_mapper(struct ftrace_func_mapper * mapper,ftrace_mapper_func free_func)5369 void free_ftrace_func_mapper(struct ftrace_func_mapper *mapper,
5370 			     ftrace_mapper_func free_func)
5371 {
5372 	struct ftrace_func_entry *entry;
5373 	struct ftrace_func_map *map;
5374 	struct hlist_head *hhd;
5375 	int size, i;
5376 
5377 	if (!mapper)
5378 		return;
5379 
5380 	if (free_func && mapper->hash.count) {
5381 		size = 1 << mapper->hash.size_bits;
5382 		for (i = 0; i < size; i++) {
5383 			hhd = &mapper->hash.buckets[i];
5384 			hlist_for_each_entry(entry, hhd, hlist) {
5385 				map = (struct ftrace_func_map *)entry;
5386 				free_func(map);
5387 			}
5388 		}
5389 	}
5390 	/* This also frees the mapper itself */
5391 	free_ftrace_hash(&mapper->hash);
5392 }
5393 
release_probe(struct ftrace_func_probe * probe)5394 static void release_probe(struct ftrace_func_probe *probe)
5395 {
5396 	struct ftrace_probe_ops *probe_ops;
5397 
5398 	guard(mutex)(&ftrace_lock);
5399 
5400 	WARN_ON(probe->ref <= 0);
5401 
5402 	/* Subtract the ref that was used to protect this instance */
5403 	probe->ref--;
5404 
5405 	if (!probe->ref) {
5406 		probe_ops = probe->probe_ops;
5407 		/*
5408 		 * Sending zero as ip tells probe_ops to free
5409 		 * the probe->data itself
5410 		 */
5411 		if (probe_ops->free)
5412 			probe_ops->free(probe_ops, probe->tr, 0, probe->data);
5413 		list_del(&probe->list);
5414 		kfree(probe);
5415 	}
5416 }
5417 
acquire_probe_locked(struct ftrace_func_probe * probe)5418 static void acquire_probe_locked(struct ftrace_func_probe *probe)
5419 {
5420 	/*
5421 	 * Add one ref to keep it from being freed when releasing the
5422 	 * ftrace_lock mutex.
5423 	 */
5424 	probe->ref++;
5425 }
5426 
5427 int
register_ftrace_function_probe(char * glob,struct trace_array * tr,struct ftrace_probe_ops * probe_ops,void * data)5428 register_ftrace_function_probe(char *glob, struct trace_array *tr,
5429 			       struct ftrace_probe_ops *probe_ops,
5430 			       void *data)
5431 {
5432 	struct ftrace_func_probe *probe = NULL, *iter;
5433 	struct ftrace_func_entry *entry;
5434 	struct ftrace_hash **orig_hash;
5435 	struct ftrace_hash *old_hash;
5436 	struct ftrace_hash *hash;
5437 	int count = 0;
5438 	int size;
5439 	int ret;
5440 	int i;
5441 
5442 	if (WARN_ON(!tr))
5443 		return -EINVAL;
5444 
5445 	/* We do not support '!' for function probes */
5446 	if (WARN_ON(glob[0] == '!'))
5447 		return -EINVAL;
5448 
5449 
5450 	mutex_lock(&ftrace_lock);
5451 	/* Check if the probe_ops is already registered */
5452 	list_for_each_entry(iter, &tr->func_probes, list) {
5453 		if (iter->probe_ops == probe_ops) {
5454 			probe = iter;
5455 			break;
5456 		}
5457 	}
5458 	if (!probe) {
5459 		probe = kzalloc(sizeof(*probe), GFP_KERNEL);
5460 		if (!probe) {
5461 			mutex_unlock(&ftrace_lock);
5462 			return -ENOMEM;
5463 		}
5464 		probe->probe_ops = probe_ops;
5465 		probe->ops.func = function_trace_probe_call;
5466 		probe->tr = tr;
5467 		ftrace_ops_init(&probe->ops);
5468 		list_add(&probe->list, &tr->func_probes);
5469 	}
5470 
5471 	acquire_probe_locked(probe);
5472 
5473 	mutex_unlock(&ftrace_lock);
5474 
5475 	/*
5476 	 * Note, there's a small window here that the func_hash->filter_hash
5477 	 * may be NULL or empty. Need to be careful when reading the loop.
5478 	 */
5479 	mutex_lock(&probe->ops.func_hash->regex_lock);
5480 
5481 	orig_hash = &probe->ops.func_hash->filter_hash;
5482 	old_hash = *orig_hash;
5483 	hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash);
5484 
5485 	if (!hash) {
5486 		ret = -ENOMEM;
5487 		goto out;
5488 	}
5489 
5490 	ret = ftrace_match_records(hash, glob, strlen(glob));
5491 
5492 	/* Nothing found? */
5493 	if (!ret)
5494 		ret = -EINVAL;
5495 
5496 	if (ret < 0)
5497 		goto out;
5498 
5499 	size = 1 << hash->size_bits;
5500 	for (i = 0; i < size; i++) {
5501 		hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
5502 			if (ftrace_lookup_ip(old_hash, entry->ip))
5503 				continue;
5504 			/*
5505 			 * The caller might want to do something special
5506 			 * for each function we find. We call the callback
5507 			 * to give the caller an opportunity to do so.
5508 			 */
5509 			if (probe_ops->init) {
5510 				ret = probe_ops->init(probe_ops, tr,
5511 						      entry->ip, data,
5512 						      &probe->data);
5513 				if (ret < 0) {
5514 					if (probe_ops->free && count)
5515 						probe_ops->free(probe_ops, tr,
5516 								0, probe->data);
5517 					probe->data = NULL;
5518 					goto out;
5519 				}
5520 			}
5521 			count++;
5522 		}
5523 	}
5524 
5525 	mutex_lock(&ftrace_lock);
5526 
5527 	if (!count) {
5528 		/* Nothing was added? */
5529 		ret = -EINVAL;
5530 		goto out_unlock;
5531 	}
5532 
5533 	ret = ftrace_hash_move_and_update_ops(&probe->ops, orig_hash,
5534 					      hash, 1);
5535 	if (ret < 0)
5536 		goto err_unlock;
5537 
5538 	/* One ref for each new function traced */
5539 	probe->ref += count;
5540 
5541 	if (!(probe->ops.flags & FTRACE_OPS_FL_ENABLED))
5542 		ret = ftrace_startup(&probe->ops, 0);
5543 
5544  out_unlock:
5545 	mutex_unlock(&ftrace_lock);
5546 
5547 	if (!ret)
5548 		ret = count;
5549  out:
5550 	mutex_unlock(&probe->ops.func_hash->regex_lock);
5551 	free_ftrace_hash(hash);
5552 
5553 	release_probe(probe);
5554 
5555 	return ret;
5556 
5557  err_unlock:
5558 	if (!probe_ops->free || !count)
5559 		goto out_unlock;
5560 
5561 	/* Failed to do the move, need to call the free functions */
5562 	for (i = 0; i < size; i++) {
5563 		hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
5564 			if (ftrace_lookup_ip(old_hash, entry->ip))
5565 				continue;
5566 			probe_ops->free(probe_ops, tr, entry->ip, probe->data);
5567 		}
5568 	}
5569 	goto out_unlock;
5570 }
5571 
5572 int
unregister_ftrace_function_probe_func(char * glob,struct trace_array * tr,struct ftrace_probe_ops * probe_ops)5573 unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr,
5574 				      struct ftrace_probe_ops *probe_ops)
5575 {
5576 	struct ftrace_func_probe *probe = NULL, *iter;
5577 	struct ftrace_ops_hash old_hash_ops;
5578 	struct ftrace_func_entry *entry;
5579 	struct ftrace_glob func_g;
5580 	struct ftrace_hash **orig_hash;
5581 	struct ftrace_hash *old_hash;
5582 	struct ftrace_hash *hash = NULL;
5583 	struct hlist_node *tmp;
5584 	struct hlist_head hhd;
5585 	char str[KSYM_SYMBOL_LEN];
5586 	int count = 0;
5587 	int i, ret = -ENODEV;
5588 	int size;
5589 
5590 	if (!glob || !strlen(glob) || !strcmp(glob, "*"))
5591 		func_g.search = NULL;
5592 	else {
5593 		int not;
5594 
5595 		func_g.type = filter_parse_regex(glob, strlen(glob),
5596 						 &func_g.search, &not);
5597 		func_g.len = strlen(func_g.search);
5598 
5599 		/* we do not support '!' for function probes */
5600 		if (WARN_ON(not))
5601 			return -EINVAL;
5602 	}
5603 
5604 	mutex_lock(&ftrace_lock);
5605 	/* Check if the probe_ops is already registered */
5606 	list_for_each_entry(iter, &tr->func_probes, list) {
5607 		if (iter->probe_ops == probe_ops) {
5608 			probe = iter;
5609 			break;
5610 		}
5611 	}
5612 	if (!probe)
5613 		goto err_unlock_ftrace;
5614 
5615 	ret = -EINVAL;
5616 	if (!(probe->ops.flags & FTRACE_OPS_FL_INITIALIZED))
5617 		goto err_unlock_ftrace;
5618 
5619 	acquire_probe_locked(probe);
5620 
5621 	mutex_unlock(&ftrace_lock);
5622 
5623 	mutex_lock(&probe->ops.func_hash->regex_lock);
5624 
5625 	orig_hash = &probe->ops.func_hash->filter_hash;
5626 	old_hash = *orig_hash;
5627 
5628 	if (ftrace_hash_empty(old_hash))
5629 		goto out_unlock;
5630 
5631 	old_hash_ops.filter_hash = old_hash;
5632 	/* Probes only have filters */
5633 	old_hash_ops.notrace_hash = NULL;
5634 
5635 	ret = -ENOMEM;
5636 	hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash);
5637 	if (!hash)
5638 		goto out_unlock;
5639 
5640 	INIT_HLIST_HEAD(&hhd);
5641 
5642 	size = 1 << hash->size_bits;
5643 	for (i = 0; i < size; i++) {
5644 		hlist_for_each_entry_safe(entry, tmp, &hash->buckets[i], hlist) {
5645 
5646 			if (func_g.search) {
5647 				kallsyms_lookup(entry->ip, NULL, NULL,
5648 						NULL, str);
5649 				if (!ftrace_match(str, &func_g))
5650 					continue;
5651 			}
5652 			count++;
5653 			remove_hash_entry(hash, entry);
5654 			hlist_add_head(&entry->hlist, &hhd);
5655 		}
5656 	}
5657 
5658 	/* Nothing found? */
5659 	if (!count) {
5660 		ret = -EINVAL;
5661 		goto out_unlock;
5662 	}
5663 
5664 	mutex_lock(&ftrace_lock);
5665 
5666 	WARN_ON(probe->ref < count);
5667 
5668 	probe->ref -= count;
5669 
5670 	if (ftrace_hash_empty(hash))
5671 		ftrace_shutdown(&probe->ops, 0);
5672 
5673 	ret = ftrace_hash_move_and_update_ops(&probe->ops, orig_hash,
5674 					      hash, 1);
5675 
5676 	/* still need to update the function call sites */
5677 	if (ftrace_enabled && !ftrace_hash_empty(hash))
5678 		ftrace_run_modify_code(&probe->ops, FTRACE_UPDATE_CALLS,
5679 				       &old_hash_ops);
5680 	synchronize_rcu();
5681 
5682 	hlist_for_each_entry_safe(entry, tmp, &hhd, hlist) {
5683 		hlist_del(&entry->hlist);
5684 		if (probe_ops->free)
5685 			probe_ops->free(probe_ops, tr, entry->ip, probe->data);
5686 		kfree(entry);
5687 	}
5688 	mutex_unlock(&ftrace_lock);
5689 
5690  out_unlock:
5691 	mutex_unlock(&probe->ops.func_hash->regex_lock);
5692 	free_ftrace_hash(hash);
5693 
5694 	release_probe(probe);
5695 
5696 	return ret;
5697 
5698  err_unlock_ftrace:
5699 	mutex_unlock(&ftrace_lock);
5700 	return ret;
5701 }
5702 
clear_ftrace_function_probes(struct trace_array * tr)5703 void clear_ftrace_function_probes(struct trace_array *tr)
5704 {
5705 	struct ftrace_func_probe *probe, *n;
5706 
5707 	list_for_each_entry_safe(probe, n, &tr->func_probes, list)
5708 		unregister_ftrace_function_probe_func(NULL, tr, probe->probe_ops);
5709 }
5710 
5711 static LIST_HEAD(ftrace_commands);
5712 static DEFINE_MUTEX(ftrace_cmd_mutex);
5713 
5714 /*
5715  * Currently we only register ftrace commands from __init, so mark this
5716  * __init too.
5717  */
register_ftrace_command(struct ftrace_func_command * cmd)5718 __init int register_ftrace_command(struct ftrace_func_command *cmd)
5719 {
5720 	struct ftrace_func_command *p;
5721 
5722 	guard(mutex)(&ftrace_cmd_mutex);
5723 	list_for_each_entry(p, &ftrace_commands, list) {
5724 		if (strcmp(cmd->name, p->name) == 0)
5725 			return -EBUSY;
5726 	}
5727 	list_add(&cmd->list, &ftrace_commands);
5728 
5729 	return 0;
5730 }
5731 
5732 /*
5733  * Currently we only unregister ftrace commands from __init, so mark
5734  * this __init too.
5735  */
unregister_ftrace_command(struct ftrace_func_command * cmd)5736 __init int unregister_ftrace_command(struct ftrace_func_command *cmd)
5737 {
5738 	struct ftrace_func_command *p, *n;
5739 
5740 	guard(mutex)(&ftrace_cmd_mutex);
5741 
5742 	list_for_each_entry_safe(p, n, &ftrace_commands, list) {
5743 		if (strcmp(cmd->name, p->name) == 0) {
5744 			list_del_init(&p->list);
5745 			return 0;
5746 		}
5747 	}
5748 
5749 	return -ENODEV;
5750 }
5751 
ftrace_process_regex(struct ftrace_iterator * iter,char * buff,int len,int enable)5752 static int ftrace_process_regex(struct ftrace_iterator *iter,
5753 				char *buff, int len, int enable)
5754 {
5755 	struct ftrace_hash *hash = iter->hash;
5756 	struct trace_array *tr = iter->ops->private;
5757 	char *func, *command, *next = buff;
5758 	struct ftrace_func_command *p;
5759 	int ret;
5760 
5761 	func = strsep(&next, ":");
5762 
5763 	if (!next) {
5764 		ret = ftrace_match_records(hash, func, len);
5765 		if (!ret)
5766 			ret = -EINVAL;
5767 		if (ret < 0)
5768 			return ret;
5769 		return 0;
5770 	}
5771 
5772 	/* command found */
5773 
5774 	command = strsep(&next, ":");
5775 
5776 	guard(mutex)(&ftrace_cmd_mutex);
5777 
5778 	list_for_each_entry(p, &ftrace_commands, list) {
5779 		if (strcmp(p->name, command) == 0)
5780 			return p->func(tr, hash, func, command, next, enable);
5781 	}
5782 
5783 	return -EINVAL;
5784 }
5785 
5786 static ssize_t
ftrace_regex_write(struct file * file,const char __user * ubuf,size_t cnt,loff_t * ppos,int enable)5787 ftrace_regex_write(struct file *file, const char __user *ubuf,
5788 		   size_t cnt, loff_t *ppos, int enable)
5789 {
5790 	struct ftrace_iterator *iter;
5791 	struct trace_parser *parser;
5792 	ssize_t ret, read;
5793 
5794 	if (!cnt)
5795 		return 0;
5796 
5797 	if (file->f_mode & FMODE_READ) {
5798 		struct seq_file *m = file->private_data;
5799 		iter = m->private;
5800 	} else
5801 		iter = file->private_data;
5802 
5803 	if (unlikely(ftrace_disabled))
5804 		return -ENODEV;
5805 
5806 	/* iter->hash is a local copy, so we don't need regex_lock */
5807 
5808 	parser = &iter->parser;
5809 	read = trace_get_user(parser, ubuf, cnt, ppos);
5810 
5811 	if (read >= 0 && trace_parser_loaded(parser) &&
5812 	    !trace_parser_cont(parser)) {
5813 		ret = ftrace_process_regex(iter, parser->buffer,
5814 					   parser->idx, enable);
5815 		trace_parser_clear(parser);
5816 		if (ret < 0)
5817 			return ret;
5818 	}
5819 
5820 	return read;
5821 }
5822 
5823 ssize_t
ftrace_filter_write(struct file * file,const char __user * ubuf,size_t cnt,loff_t * ppos)5824 ftrace_filter_write(struct file *file, const char __user *ubuf,
5825 		    size_t cnt, loff_t *ppos)
5826 {
5827 	return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
5828 }
5829 
5830 ssize_t
ftrace_notrace_write(struct file * file,const char __user * ubuf,size_t cnt,loff_t * ppos)5831 ftrace_notrace_write(struct file *file, const char __user *ubuf,
5832 		     size_t cnt, loff_t *ppos)
5833 {
5834 	return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
5835 }
5836 
5837 static int
__ftrace_match_addr(struct ftrace_hash * hash,unsigned long ip,int remove)5838 __ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove)
5839 {
5840 	struct ftrace_func_entry *entry;
5841 
5842 	ip = ftrace_location(ip);
5843 	if (!ip)
5844 		return -EINVAL;
5845 
5846 	if (remove) {
5847 		entry = ftrace_lookup_ip(hash, ip);
5848 		if (!entry)
5849 			return -ENOENT;
5850 		free_hash_entry(hash, entry);
5851 		return 0;
5852 	} else if (__ftrace_lookup_ip(hash, ip) != NULL) {
5853 		/* Already exists */
5854 		return 0;
5855 	}
5856 
5857 	entry = add_hash_entry(hash, ip);
5858 	return entry ? 0 :  -ENOMEM;
5859 }
5860 
5861 static int
ftrace_match_addr(struct ftrace_hash * hash,unsigned long * ips,unsigned int cnt,int remove)5862 ftrace_match_addr(struct ftrace_hash *hash, unsigned long *ips,
5863 		  unsigned int cnt, int remove)
5864 {
5865 	unsigned int i;
5866 	int err;
5867 
5868 	for (i = 0; i < cnt; i++) {
5869 		err = __ftrace_match_addr(hash, ips[i], remove);
5870 		if (err) {
5871 			/*
5872 			 * This expects the @hash is a temporary hash and if this
5873 			 * fails the caller must free the @hash.
5874 			 */
5875 			return err;
5876 		}
5877 	}
5878 	return 0;
5879 }
5880 
5881 static int
ftrace_set_hash(struct ftrace_ops * ops,unsigned char * buf,int len,unsigned long * ips,unsigned int cnt,int remove,int reset,int enable,char * mod)5882 ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
5883 		unsigned long *ips, unsigned int cnt,
5884 		int remove, int reset, int enable, char *mod)
5885 {
5886 	struct ftrace_hash **orig_hash;
5887 	struct ftrace_hash *hash;
5888 	int ret;
5889 
5890 	if (unlikely(ftrace_disabled))
5891 		return -ENODEV;
5892 
5893 	mutex_lock(&ops->func_hash->regex_lock);
5894 
5895 	if (enable)
5896 		orig_hash = &ops->func_hash->filter_hash;
5897 	else
5898 		orig_hash = &ops->func_hash->notrace_hash;
5899 
5900 	if (reset)
5901 		hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
5902 	else
5903 		hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
5904 
5905 	if (!hash) {
5906 		ret = -ENOMEM;
5907 		goto out_regex_unlock;
5908 	}
5909 
5910 	if (buf && !match_records(hash, buf, len, mod)) {
5911 		/* If this was for a module and nothing was enabled, flag it */
5912 		if (mod)
5913 			(*orig_hash)->flags |= FTRACE_HASH_FL_MOD;
5914 
5915 		/*
5916 		 * Even if it is a mod, return error to let caller know
5917 		 * nothing was added
5918 		 */
5919 		ret = -EINVAL;
5920 		goto out_regex_unlock;
5921 	}
5922 	if (ips) {
5923 		ret = ftrace_match_addr(hash, ips, cnt, remove);
5924 		if (ret < 0)
5925 			goto out_regex_unlock;
5926 	}
5927 
5928 	mutex_lock(&ftrace_lock);
5929 	ret = ftrace_hash_move_and_update_ops(ops, orig_hash, hash, enable);
5930 	mutex_unlock(&ftrace_lock);
5931 
5932  out_regex_unlock:
5933 	mutex_unlock(&ops->func_hash->regex_lock);
5934 
5935 	free_ftrace_hash(hash);
5936 	return ret;
5937 }
5938 
5939 static int
ftrace_set_addr(struct ftrace_ops * ops,unsigned long * ips,unsigned int cnt,int remove,int reset,int enable)5940 ftrace_set_addr(struct ftrace_ops *ops, unsigned long *ips, unsigned int cnt,
5941 		int remove, int reset, int enable)
5942 {
5943 	return ftrace_set_hash(ops, NULL, 0, ips, cnt, remove, reset, enable, NULL);
5944 }
5945 
5946 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
5947 
5948 static int register_ftrace_function_nolock(struct ftrace_ops *ops);
5949 
5950 /*
5951  * If there are multiple ftrace_ops, use SAVE_REGS by default, so that direct
5952  * call will be jumped from ftrace_regs_caller. Only if the architecture does
5953  * not support ftrace_regs_caller but direct_call, use SAVE_ARGS so that it
5954  * jumps from ftrace_caller for multiple ftrace_ops.
5955  */
5956 #ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS
5957 #define MULTI_FLAGS (FTRACE_OPS_FL_DIRECT | FTRACE_OPS_FL_SAVE_ARGS)
5958 #else
5959 #define MULTI_FLAGS (FTRACE_OPS_FL_DIRECT | FTRACE_OPS_FL_SAVE_REGS)
5960 #endif
5961 
check_direct_multi(struct ftrace_ops * ops)5962 static int check_direct_multi(struct ftrace_ops *ops)
5963 {
5964 	if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED))
5965 		return -EINVAL;
5966 	if ((ops->flags & MULTI_FLAGS) != MULTI_FLAGS)
5967 		return -EINVAL;
5968 	return 0;
5969 }
5970 
remove_direct_functions_hash(struct ftrace_hash * hash,unsigned long addr)5971 static void remove_direct_functions_hash(struct ftrace_hash *hash, unsigned long addr)
5972 {
5973 	struct ftrace_func_entry *entry, *del;
5974 	int size, i;
5975 
5976 	size = 1 << hash->size_bits;
5977 	for (i = 0; i < size; i++) {
5978 		hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
5979 			del = __ftrace_lookup_ip(direct_functions, entry->ip);
5980 			if (del && ftrace_jmp_get(del->direct) ==
5981 				   ftrace_jmp_get(addr)) {
5982 				remove_hash_entry(direct_functions, del);
5983 				kfree(del);
5984 			}
5985 		}
5986 	}
5987 }
5988 
register_ftrace_direct_cb(struct rcu_head * rhp)5989 static void register_ftrace_direct_cb(struct rcu_head *rhp)
5990 {
5991 	struct ftrace_hash *fhp = container_of(rhp, struct ftrace_hash, rcu);
5992 
5993 	free_ftrace_hash(fhp);
5994 }
5995 
reset_direct(struct ftrace_ops * ops,unsigned long addr)5996 static void reset_direct(struct ftrace_ops *ops, unsigned long addr)
5997 {
5998 	struct ftrace_hash *hash = ops->func_hash->filter_hash;
5999 
6000 	remove_direct_functions_hash(hash, addr);
6001 
6002 	/* cleanup for possible another register call */
6003 	ops->func = NULL;
6004 	ops->trampoline = 0;
6005 }
6006 
6007 /**
6008  * register_ftrace_direct - Call a custom trampoline directly
6009  * for multiple functions registered in @ops
6010  * @ops: The address of the struct ftrace_ops object
6011  * @addr: The address of the trampoline to call at @ops functions
6012  *
6013  * This is used to connect a direct calls to @addr from the nop locations
6014  * of the functions registered in @ops (with by ftrace_set_filter_ip
6015  * function).
6016  *
6017  * The location that it calls (@addr) must be able to handle a direct call,
6018  * and save the parameters of the function being traced, and restore them
6019  * (or inject new ones if needed), before returning.
6020  *
6021  * Returns:
6022  *  0 on success
6023  *  -EINVAL  - The @ops object was already registered with this call or
6024  *             when there are no functions in @ops object.
6025  *  -EBUSY   - Another direct function is already attached (there can be only one)
6026  *  -ENODEV  - @ip does not point to a ftrace nop location (or not supported)
6027  *  -ENOMEM  - There was an allocation failure.
6028  */
register_ftrace_direct(struct ftrace_ops * ops,unsigned long addr)6029 int register_ftrace_direct(struct ftrace_ops *ops, unsigned long addr)
6030 {
6031 	struct ftrace_hash *hash, *new_hash = NULL, *free_hash = NULL;
6032 	struct ftrace_func_entry *entry, *new;
6033 	int err = -EBUSY, size, i;
6034 
6035 	if (ops->func || ops->trampoline)
6036 		return -EINVAL;
6037 	if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED))
6038 		return -EINVAL;
6039 	if (ops->flags & FTRACE_OPS_FL_ENABLED)
6040 		return -EINVAL;
6041 
6042 	hash = ops->func_hash->filter_hash;
6043 	if (ftrace_hash_empty(hash))
6044 		return -EINVAL;
6045 
6046 	/* This is a "raw" address, and this should never happen. */
6047 	if (WARN_ON_ONCE(ftrace_is_jmp(addr)))
6048 		return -EINVAL;
6049 
6050 	mutex_lock(&direct_mutex);
6051 
6052 	if (ops->flags & FTRACE_OPS_FL_JMP)
6053 		addr = ftrace_jmp_set(addr);
6054 
6055 	/* Make sure requested entries are not already registered.. */
6056 	size = 1 << hash->size_bits;
6057 	for (i = 0; i < size; i++) {
6058 		hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
6059 			if (ftrace_find_rec_direct(entry->ip))
6060 				goto out_unlock;
6061 		}
6062 	}
6063 
6064 	err = -ENOMEM;
6065 
6066 	/* Make a copy hash to place the new and the old entries in */
6067 	size = hash->count + direct_functions->count;
6068 	size = fls(size);
6069 	if (size > FTRACE_HASH_MAX_BITS)
6070 		size = FTRACE_HASH_MAX_BITS;
6071 	new_hash = alloc_ftrace_hash(size);
6072 	if (!new_hash)
6073 		goto out_unlock;
6074 
6075 	/* Now copy over the existing direct entries */
6076 	size = 1 << direct_functions->size_bits;
6077 	for (i = 0; i < size; i++) {
6078 		hlist_for_each_entry(entry, &direct_functions->buckets[i], hlist) {
6079 			new = add_hash_entry(new_hash, entry->ip);
6080 			if (!new)
6081 				goto out_unlock;
6082 			new->direct = entry->direct;
6083 		}
6084 	}
6085 
6086 	/* ... and add the new entries */
6087 	size = 1 << hash->size_bits;
6088 	for (i = 0; i < size; i++) {
6089 		hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
6090 			new = add_hash_entry(new_hash, entry->ip);
6091 			if (!new)
6092 				goto out_unlock;
6093 			/* Update both the copy and the hash entry */
6094 			new->direct = addr;
6095 			entry->direct = addr;
6096 		}
6097 	}
6098 
6099 	free_hash = direct_functions;
6100 	rcu_assign_pointer(direct_functions, new_hash);
6101 	new_hash = NULL;
6102 
6103 	ops->func = call_direct_funcs;
6104 	ops->flags |= MULTI_FLAGS;
6105 	ops->trampoline = FTRACE_REGS_ADDR;
6106 	ops->direct_call = addr;
6107 
6108 	err = register_ftrace_function_nolock(ops);
6109 	if (err)
6110 		reset_direct(ops, addr);
6111 
6112  out_unlock:
6113 	mutex_unlock(&direct_mutex);
6114 
6115 	if (free_hash && free_hash != EMPTY_HASH)
6116 		call_rcu_tasks(&free_hash->rcu, register_ftrace_direct_cb);
6117 
6118 	if (new_hash)
6119 		free_ftrace_hash(new_hash);
6120 
6121 	return err;
6122 }
6123 EXPORT_SYMBOL_GPL(register_ftrace_direct);
6124 
6125 /**
6126  * unregister_ftrace_direct - Remove calls to custom trampoline
6127  * previously registered by register_ftrace_direct for @ops object.
6128  * @ops: The address of the struct ftrace_ops object
6129  * @addr: The address of the direct function that is called by the @ops functions
6130  * @free_filters: Set to true to remove all filters for the ftrace_ops, false otherwise
6131  *
6132  * This is used to remove a direct calls to @addr from the nop locations
6133  * of the functions registered in @ops (with by ftrace_set_filter_ip
6134  * function).
6135  *
6136  * Returns:
6137  *  0 on success
6138  *  -EINVAL - The @ops object was not properly registered.
6139  */
unregister_ftrace_direct(struct ftrace_ops * ops,unsigned long addr,bool free_filters)6140 int unregister_ftrace_direct(struct ftrace_ops *ops, unsigned long addr,
6141 			     bool free_filters)
6142 {
6143 	int err;
6144 
6145 	if (check_direct_multi(ops))
6146 		return -EINVAL;
6147 	if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
6148 		return -EINVAL;
6149 
6150 	mutex_lock(&direct_mutex);
6151 	err = unregister_ftrace_function(ops);
6152 	reset_direct(ops, addr);
6153 	mutex_unlock(&direct_mutex);
6154 
6155 	if (free_filters)
6156 		ftrace_free_filter(ops);
6157 	return err;
6158 }
6159 EXPORT_SYMBOL_GPL(unregister_ftrace_direct);
6160 
6161 static int
__modify_ftrace_direct(struct ftrace_ops * ops,unsigned long addr)6162 __modify_ftrace_direct(struct ftrace_ops *ops, unsigned long addr)
6163 {
6164 	struct ftrace_hash *hash = ops->func_hash->filter_hash;
6165 	struct ftrace_func_entry *entry, *iter;
6166 	static struct ftrace_ops tmp_ops = {
6167 		.func		= ftrace_stub,
6168 		.flags		= FTRACE_OPS_FL_STUB,
6169 	};
6170 	int i, size;
6171 	int err;
6172 
6173 	lockdep_assert_held_once(&direct_mutex);
6174 
6175 	/* This is a "raw" address, and this should never happen. */
6176 	if (WARN_ON_ONCE(ftrace_is_jmp(addr)))
6177 		return -EINVAL;
6178 
6179 	if (ops->flags & FTRACE_OPS_FL_JMP)
6180 		addr = ftrace_jmp_set(addr);
6181 
6182 	/* Enable the tmp_ops to have the same functions as the direct ops */
6183 	ftrace_ops_init(&tmp_ops);
6184 	tmp_ops.func_hash = ops->func_hash;
6185 	tmp_ops.direct_call = addr;
6186 
6187 	err = register_ftrace_function_nolock(&tmp_ops);
6188 	if (err)
6189 		return err;
6190 
6191 	/*
6192 	 * Call __ftrace_hash_update_ipmodify() here, so that we can call
6193 	 * ops->ops_func for the ops. This is needed because the above
6194 	 * register_ftrace_function_nolock() worked on tmp_ops.
6195 	 */
6196 	err = __ftrace_hash_update_ipmodify(ops, hash, hash, true);
6197 	if (err)
6198 		goto out;
6199 
6200 	/*
6201 	 * Now the ftrace_ops_list_func() is called to do the direct callers.
6202 	 * We can safely change the direct functions attached to each entry.
6203 	 */
6204 	mutex_lock(&ftrace_lock);
6205 
6206 	size = 1 << hash->size_bits;
6207 	for (i = 0; i < size; i++) {
6208 		hlist_for_each_entry(iter, &hash->buckets[i], hlist) {
6209 			entry = __ftrace_lookup_ip(direct_functions, iter->ip);
6210 			if (!entry)
6211 				continue;
6212 			entry->direct = addr;
6213 		}
6214 	}
6215 	/* Prevent store tearing if a trampoline concurrently accesses the value */
6216 	WRITE_ONCE(ops->direct_call, addr);
6217 
6218 	mutex_unlock(&ftrace_lock);
6219 
6220 out:
6221 	/* Removing the tmp_ops will add the updated direct callers to the functions */
6222 	unregister_ftrace_function(&tmp_ops);
6223 
6224 	return err;
6225 }
6226 
6227 /**
6228  * modify_ftrace_direct_nolock - Modify an existing direct 'multi' call
6229  * to call something else
6230  * @ops: The address of the struct ftrace_ops object
6231  * @addr: The address of the new trampoline to call at @ops functions
6232  *
6233  * This is used to unregister currently registered direct caller and
6234  * register new one @addr on functions registered in @ops object.
6235  *
6236  * Note there's window between ftrace_shutdown and ftrace_startup calls
6237  * where there will be no callbacks called.
6238  *
6239  * Caller should already have direct_mutex locked, so we don't lock
6240  * direct_mutex here.
6241  *
6242  * Returns: zero on success. Non zero on error, which includes:
6243  *  -EINVAL - The @ops object was not properly registered.
6244  */
modify_ftrace_direct_nolock(struct ftrace_ops * ops,unsigned long addr)6245 int modify_ftrace_direct_nolock(struct ftrace_ops *ops, unsigned long addr)
6246 {
6247 	if (check_direct_multi(ops))
6248 		return -EINVAL;
6249 	if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
6250 		return -EINVAL;
6251 
6252 	return __modify_ftrace_direct(ops, addr);
6253 }
6254 EXPORT_SYMBOL_GPL(modify_ftrace_direct_nolock);
6255 
6256 /**
6257  * modify_ftrace_direct - Modify an existing direct 'multi' call
6258  * to call something else
6259  * @ops: The address of the struct ftrace_ops object
6260  * @addr: The address of the new trampoline to call at @ops functions
6261  *
6262  * This is used to unregister currently registered direct caller and
6263  * register new one @addr on functions registered in @ops object.
6264  *
6265  * Note there's window between ftrace_shutdown and ftrace_startup calls
6266  * where there will be no callbacks called.
6267  *
6268  * Returns: zero on success. Non zero on error, which includes:
6269  *  -EINVAL - The @ops object was not properly registered.
6270  */
modify_ftrace_direct(struct ftrace_ops * ops,unsigned long addr)6271 int modify_ftrace_direct(struct ftrace_ops *ops, unsigned long addr)
6272 {
6273 	int err;
6274 
6275 	if (check_direct_multi(ops))
6276 		return -EINVAL;
6277 	if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
6278 		return -EINVAL;
6279 
6280 	mutex_lock(&direct_mutex);
6281 	err = __modify_ftrace_direct(ops, addr);
6282 	mutex_unlock(&direct_mutex);
6283 	return err;
6284 }
6285 EXPORT_SYMBOL_GPL(modify_ftrace_direct);
6286 #endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
6287 
6288 /**
6289  * ftrace_set_filter_ip - set a function to filter on in ftrace by address
6290  * @ops: the ops to set the filter with
6291  * @ip: the address to add to or remove from the filter.
6292  * @remove: non zero to remove the ip from the filter
6293  * @reset: non zero to reset all filters before applying this filter.
6294  *
6295  * Filters denote which functions should be enabled when tracing is enabled
6296  * If @ip is NULL, it fails to update filter.
6297  *
6298  * This can allocate memory which must be freed before @ops can be freed,
6299  * either by removing each filtered addr or by using
6300  * ftrace_free_filter(@ops).
6301  */
ftrace_set_filter_ip(struct ftrace_ops * ops,unsigned long ip,int remove,int reset)6302 int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
6303 			 int remove, int reset)
6304 {
6305 	ftrace_ops_init(ops);
6306 	return ftrace_set_addr(ops, &ip, 1, remove, reset, 1);
6307 }
6308 EXPORT_SYMBOL_GPL(ftrace_set_filter_ip);
6309 
6310 /**
6311  * ftrace_set_filter_ips - set functions to filter on in ftrace by addresses
6312  * @ops: the ops to set the filter with
6313  * @ips: the array of addresses to add to or remove from the filter.
6314  * @cnt: the number of addresses in @ips
6315  * @remove: non zero to remove ips from the filter
6316  * @reset: non zero to reset all filters before applying this filter.
6317  *
6318  * Filters denote which functions should be enabled when tracing is enabled
6319  * If @ips array or any ip specified within is NULL , it fails to update filter.
6320  *
6321  * This can allocate memory which must be freed before @ops can be freed,
6322  * either by removing each filtered addr or by using
6323  * ftrace_free_filter(@ops).
6324 */
ftrace_set_filter_ips(struct ftrace_ops * ops,unsigned long * ips,unsigned int cnt,int remove,int reset)6325 int ftrace_set_filter_ips(struct ftrace_ops *ops, unsigned long *ips,
6326 			  unsigned int cnt, int remove, int reset)
6327 {
6328 	ftrace_ops_init(ops);
6329 	return ftrace_set_addr(ops, ips, cnt, remove, reset, 1);
6330 }
6331 EXPORT_SYMBOL_GPL(ftrace_set_filter_ips);
6332 
6333 /**
6334  * ftrace_ops_set_global_filter - setup ops to use global filters
6335  * @ops: the ops which will use the global filters
6336  *
6337  * ftrace users who need global function trace filtering should call this.
6338  * It can set the global filter only if ops were not initialized before.
6339  */
ftrace_ops_set_global_filter(struct ftrace_ops * ops)6340 void ftrace_ops_set_global_filter(struct ftrace_ops *ops)
6341 {
6342 	if (ops->flags & FTRACE_OPS_FL_INITIALIZED)
6343 		return;
6344 
6345 	ftrace_ops_init(ops);
6346 	ops->func_hash = &global_ops.local_hash;
6347 }
6348 EXPORT_SYMBOL_GPL(ftrace_ops_set_global_filter);
6349 
6350 static int
ftrace_set_regex(struct ftrace_ops * ops,unsigned char * buf,int len,int reset,int enable)6351 ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
6352 		 int reset, int enable)
6353 {
6354 	char *mod = NULL, *func, *command, *next = buf;
6355 	char *tmp __free(kfree) = NULL;
6356 	struct trace_array *tr = ops->private;
6357 	int ret;
6358 
6359 	func = strsep(&next, ":");
6360 
6361 	/* This can also handle :mod: parsing */
6362 	if (next) {
6363 		if (!tr)
6364 			return -EINVAL;
6365 
6366 		command = strsep(&next, ":");
6367 		if (strcmp(command, "mod") != 0)
6368 			return -EINVAL;
6369 
6370 		mod = next;
6371 		len = command - func;
6372 		/* Save the original func as ftrace_set_hash() can modify it */
6373 		tmp = kstrdup(func, GFP_KERNEL);
6374 	}
6375 
6376 	ret = ftrace_set_hash(ops, func, len, NULL, 0, 0, reset, enable, mod);
6377 
6378 	if (tr && mod && ret < 0) {
6379 		/* Did tmp fail to allocate? */
6380 		if (!tmp)
6381 			return -ENOMEM;
6382 		ret = cache_mod(tr, tmp, mod, enable);
6383 	}
6384 
6385 	return ret;
6386 }
6387 
6388 /**
6389  * ftrace_set_filter - set a function to filter on in ftrace
6390  * @ops: the ops to set the filter with
6391  * @buf: the string that holds the function filter text.
6392  * @len: the length of the string.
6393  * @reset: non-zero to reset all filters before applying this filter.
6394  *
6395  * Filters denote which functions should be enabled when tracing is enabled.
6396  * If @buf is NULL and reset is set, all functions will be enabled for tracing.
6397  *
6398  * This can allocate memory which must be freed before @ops can be freed,
6399  * either by removing each filtered addr or by using
6400  * ftrace_free_filter(@ops).
6401  */
ftrace_set_filter(struct ftrace_ops * ops,unsigned char * buf,int len,int reset)6402 int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
6403 		       int len, int reset)
6404 {
6405 	ftrace_ops_init(ops);
6406 	return ftrace_set_regex(ops, buf, len, reset, 1);
6407 }
6408 EXPORT_SYMBOL_GPL(ftrace_set_filter);
6409 
6410 /**
6411  * ftrace_set_notrace - set a function to not trace in ftrace
6412  * @ops: the ops to set the notrace filter with
6413  * @buf: the string that holds the function notrace text.
6414  * @len: the length of the string.
6415  * @reset: non-zero to reset all filters before applying this filter.
6416  *
6417  * Notrace Filters denote which functions should not be enabled when tracing
6418  * is enabled. If @buf is NULL and reset is set, all functions will be enabled
6419  * for tracing.
6420  *
6421  * This can allocate memory which must be freed before @ops can be freed,
6422  * either by removing each filtered addr or by using
6423  * ftrace_free_filter(@ops).
6424  */
ftrace_set_notrace(struct ftrace_ops * ops,unsigned char * buf,int len,int reset)6425 int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
6426 			int len, int reset)
6427 {
6428 	ftrace_ops_init(ops);
6429 	return ftrace_set_regex(ops, buf, len, reset, 0);
6430 }
6431 EXPORT_SYMBOL_GPL(ftrace_set_notrace);
6432 /**
6433  * ftrace_set_global_filter - set a function to filter on with global tracers
6434  * @buf: the string that holds the function filter text.
6435  * @len: the length of the string.
6436  * @reset: non-zero to reset all filters before applying this filter.
6437  *
6438  * Filters denote which functions should be enabled when tracing is enabled.
6439  * If @buf is NULL and reset is set, all functions will be enabled for tracing.
6440  */
ftrace_set_global_filter(unsigned char * buf,int len,int reset)6441 void ftrace_set_global_filter(unsigned char *buf, int len, int reset)
6442 {
6443 	ftrace_set_regex(&global_ops, buf, len, reset, 1);
6444 }
6445 EXPORT_SYMBOL_GPL(ftrace_set_global_filter);
6446 
6447 /**
6448  * ftrace_set_global_notrace - set a function to not trace with global tracers
6449  * @buf: the string that holds the function notrace text.
6450  * @len: the length of the string.
6451  * @reset: non-zero to reset all filters before applying this filter.
6452  *
6453  * Notrace Filters denote which functions should not be enabled when tracing
6454  * is enabled. If @buf is NULL and reset is set, all functions will be enabled
6455  * for tracing.
6456  */
ftrace_set_global_notrace(unsigned char * buf,int len,int reset)6457 void ftrace_set_global_notrace(unsigned char *buf, int len, int reset)
6458 {
6459 	ftrace_set_regex(&global_ops, buf, len, reset, 0);
6460 }
6461 EXPORT_SYMBOL_GPL(ftrace_set_global_notrace);
6462 
6463 /*
6464  * command line interface to allow users to set filters on boot up.
6465  */
6466 #define FTRACE_FILTER_SIZE		COMMAND_LINE_SIZE
6467 static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
6468 static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;
6469 
6470 /* Used by function selftest to not test if filter is set */
6471 bool ftrace_filter_param __initdata;
6472 
set_ftrace_notrace(char * str)6473 static int __init set_ftrace_notrace(char *str)
6474 {
6475 	ftrace_filter_param = true;
6476 	strscpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
6477 	return 1;
6478 }
6479 __setup("ftrace_notrace=", set_ftrace_notrace);
6480 
set_ftrace_filter(char * str)6481 static int __init set_ftrace_filter(char *str)
6482 {
6483 	ftrace_filter_param = true;
6484 	strscpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
6485 	return 1;
6486 }
6487 __setup("ftrace_filter=", set_ftrace_filter);
6488 
6489 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
6490 static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
6491 static char ftrace_graph_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
6492 static int ftrace_graph_set_hash(struct ftrace_hash *hash, char *buffer);
6493 
set_graph_function(char * str)6494 static int __init set_graph_function(char *str)
6495 {
6496 	strscpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE);
6497 	return 1;
6498 }
6499 __setup("ftrace_graph_filter=", set_graph_function);
6500 
set_graph_notrace_function(char * str)6501 static int __init set_graph_notrace_function(char *str)
6502 {
6503 	strscpy(ftrace_graph_notrace_buf, str, FTRACE_FILTER_SIZE);
6504 	return 1;
6505 }
6506 __setup("ftrace_graph_notrace=", set_graph_notrace_function);
6507 
set_graph_max_depth_function(char * str)6508 static int __init set_graph_max_depth_function(char *str)
6509 {
6510 	if (!str || kstrtouint(str, 0, &fgraph_max_depth))
6511 		return 0;
6512 	return 1;
6513 }
6514 __setup("ftrace_graph_max_depth=", set_graph_max_depth_function);
6515 
set_ftrace_early_graph(char * buf,int enable)6516 static void __init set_ftrace_early_graph(char *buf, int enable)
6517 {
6518 	int ret;
6519 	char *func;
6520 	struct ftrace_hash *hash;
6521 
6522 	hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
6523 	if (MEM_FAIL(!hash, "Failed to allocate hash\n"))
6524 		return;
6525 
6526 	while (buf) {
6527 		func = strsep(&buf, ",");
6528 		/* we allow only one expression at a time */
6529 		ret = ftrace_graph_set_hash(hash, func);
6530 		if (ret)
6531 			printk(KERN_DEBUG "ftrace: function %s not "
6532 					  "traceable\n", func);
6533 	}
6534 
6535 	if (enable)
6536 		ftrace_graph_hash = hash;
6537 	else
6538 		ftrace_graph_notrace_hash = hash;
6539 }
6540 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
6541 
6542 void __init
ftrace_set_early_filter(struct ftrace_ops * ops,char * buf,int enable)6543 ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable)
6544 {
6545 	char *func;
6546 
6547 	ftrace_ops_init(ops);
6548 
6549 	/* The trace_array is needed for caching module function filters */
6550 	if (!ops->private) {
6551 		struct trace_array *tr = trace_get_global_array();
6552 
6553 		ops->private = tr;
6554 		ftrace_init_trace_array(tr);
6555 	}
6556 
6557 	while (buf) {
6558 		func = strsep(&buf, ",");
6559 		ftrace_set_regex(ops, func, strlen(func), 0, enable);
6560 	}
6561 }
6562 
set_ftrace_early_filters(void)6563 static void __init set_ftrace_early_filters(void)
6564 {
6565 	if (ftrace_filter_buf[0])
6566 		ftrace_set_early_filter(&global_ops, ftrace_filter_buf, 1);
6567 	if (ftrace_notrace_buf[0])
6568 		ftrace_set_early_filter(&global_ops, ftrace_notrace_buf, 0);
6569 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
6570 	if (ftrace_graph_buf[0])
6571 		set_ftrace_early_graph(ftrace_graph_buf, 1);
6572 	if (ftrace_graph_notrace_buf[0])
6573 		set_ftrace_early_graph(ftrace_graph_notrace_buf, 0);
6574 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
6575 }
6576 
ftrace_regex_release(struct inode * inode,struct file * file)6577 int ftrace_regex_release(struct inode *inode, struct file *file)
6578 {
6579 	struct seq_file *m = (struct seq_file *)file->private_data;
6580 	struct ftrace_iterator *iter;
6581 	struct ftrace_hash **orig_hash;
6582 	struct trace_parser *parser;
6583 	int filter_hash;
6584 
6585 	if (file->f_mode & FMODE_READ) {
6586 		iter = m->private;
6587 		seq_release(inode, file);
6588 	} else
6589 		iter = file->private_data;
6590 
6591 	parser = &iter->parser;
6592 	if (trace_parser_loaded(parser)) {
6593 		int enable = !(iter->flags & FTRACE_ITER_NOTRACE);
6594 
6595 		ftrace_process_regex(iter, parser->buffer,
6596 				     parser->idx, enable);
6597 	}
6598 
6599 	trace_parser_put(parser);
6600 
6601 	mutex_lock(&iter->ops->func_hash->regex_lock);
6602 
6603 	if (file->f_mode & FMODE_WRITE) {
6604 		filter_hash = !!(iter->flags & FTRACE_ITER_FILTER);
6605 
6606 		if (filter_hash) {
6607 			orig_hash = &iter->ops->func_hash->filter_hash;
6608 			if (iter->tr) {
6609 				if (list_empty(&iter->tr->mod_trace))
6610 					iter->hash->flags &= ~FTRACE_HASH_FL_MOD;
6611 				else
6612 					iter->hash->flags |= FTRACE_HASH_FL_MOD;
6613 			}
6614 		} else
6615 			orig_hash = &iter->ops->func_hash->notrace_hash;
6616 
6617 		mutex_lock(&ftrace_lock);
6618 		ftrace_hash_move_and_update_ops(iter->ops, orig_hash,
6619 						      iter->hash, filter_hash);
6620 		mutex_unlock(&ftrace_lock);
6621 	}
6622 
6623 	mutex_unlock(&iter->ops->func_hash->regex_lock);
6624 	free_ftrace_hash(iter->hash);
6625 	if (iter->tr)
6626 		trace_array_put(iter->tr);
6627 	kfree(iter);
6628 
6629 	return 0;
6630 }
6631 
6632 static const struct file_operations ftrace_avail_fops = {
6633 	.open = ftrace_avail_open,
6634 	.read = seq_read,
6635 	.llseek = seq_lseek,
6636 	.release = seq_release_private,
6637 };
6638 
6639 static const struct file_operations ftrace_enabled_fops = {
6640 	.open = ftrace_enabled_open,
6641 	.read = seq_read,
6642 	.llseek = seq_lseek,
6643 	.release = seq_release_private,
6644 };
6645 
6646 static const struct file_operations ftrace_touched_fops = {
6647 	.open = ftrace_touched_open,
6648 	.read = seq_read,
6649 	.llseek = seq_lseek,
6650 	.release = seq_release_private,
6651 };
6652 
6653 static const struct file_operations ftrace_avail_addrs_fops = {
6654 	.open = ftrace_avail_addrs_open,
6655 	.read = seq_read,
6656 	.llseek = seq_lseek,
6657 	.release = seq_release_private,
6658 };
6659 
6660 static const struct file_operations ftrace_filter_fops = {
6661 	.open = ftrace_filter_open,
6662 	.read = seq_read,
6663 	.write = ftrace_filter_write,
6664 	.llseek = tracing_lseek,
6665 	.release = ftrace_regex_release,
6666 };
6667 
6668 static const struct file_operations ftrace_notrace_fops = {
6669 	.open = ftrace_notrace_open,
6670 	.read = seq_read,
6671 	.write = ftrace_notrace_write,
6672 	.llseek = tracing_lseek,
6673 	.release = ftrace_regex_release,
6674 };
6675 
6676 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
6677 
6678 static DEFINE_MUTEX(graph_lock);
6679 
6680 struct ftrace_hash __rcu *ftrace_graph_hash = EMPTY_HASH;
6681 struct ftrace_hash __rcu *ftrace_graph_notrace_hash = EMPTY_HASH;
6682 
6683 enum graph_filter_type {
6684 	GRAPH_FILTER_NOTRACE	= 0,
6685 	GRAPH_FILTER_FUNCTION,
6686 };
6687 
6688 #define FTRACE_GRAPH_EMPTY	((void *)1)
6689 
6690 struct ftrace_graph_data {
6691 	struct ftrace_hash		*hash;
6692 	struct ftrace_func_entry	*entry;
6693 	int				idx;   /* for hash table iteration */
6694 	enum graph_filter_type		type;
6695 	struct ftrace_hash		*new_hash;
6696 	const struct seq_operations	*seq_ops;
6697 	struct trace_parser		parser;
6698 };
6699 
6700 static void *
__g_next(struct seq_file * m,loff_t * pos)6701 __g_next(struct seq_file *m, loff_t *pos)
6702 {
6703 	struct ftrace_graph_data *fgd = m->private;
6704 	struct ftrace_func_entry *entry = fgd->entry;
6705 	struct hlist_head *head;
6706 	int i, idx = fgd->idx;
6707 
6708 	if (*pos >= fgd->hash->count)
6709 		return NULL;
6710 
6711 	if (entry) {
6712 		hlist_for_each_entry_continue(entry, hlist) {
6713 			fgd->entry = entry;
6714 			return entry;
6715 		}
6716 
6717 		idx++;
6718 	}
6719 
6720 	for (i = idx; i < 1 << fgd->hash->size_bits; i++) {
6721 		head = &fgd->hash->buckets[i];
6722 		hlist_for_each_entry(entry, head, hlist) {
6723 			fgd->entry = entry;
6724 			fgd->idx = i;
6725 			return entry;
6726 		}
6727 	}
6728 	return NULL;
6729 }
6730 
6731 static void *
g_next(struct seq_file * m,void * v,loff_t * pos)6732 g_next(struct seq_file *m, void *v, loff_t *pos)
6733 {
6734 	(*pos)++;
6735 	return __g_next(m, pos);
6736 }
6737 
g_start(struct seq_file * m,loff_t * pos)6738 static void *g_start(struct seq_file *m, loff_t *pos)
6739 {
6740 	struct ftrace_graph_data *fgd = m->private;
6741 
6742 	mutex_lock(&graph_lock);
6743 
6744 	if (fgd->type == GRAPH_FILTER_FUNCTION)
6745 		fgd->hash = rcu_dereference_protected(ftrace_graph_hash,
6746 					lockdep_is_held(&graph_lock));
6747 	else
6748 		fgd->hash = rcu_dereference_protected(ftrace_graph_notrace_hash,
6749 					lockdep_is_held(&graph_lock));
6750 
6751 	/* Nothing, tell g_show to print all functions are enabled */
6752 	if (ftrace_hash_empty(fgd->hash) && !*pos)
6753 		return FTRACE_GRAPH_EMPTY;
6754 
6755 	fgd->idx = 0;
6756 	fgd->entry = NULL;
6757 	return __g_next(m, pos);
6758 }
6759 
g_stop(struct seq_file * m,void * p)6760 static void g_stop(struct seq_file *m, void *p)
6761 {
6762 	mutex_unlock(&graph_lock);
6763 }
6764 
g_show(struct seq_file * m,void * v)6765 static int g_show(struct seq_file *m, void *v)
6766 {
6767 	struct ftrace_func_entry *entry = v;
6768 
6769 	if (!entry)
6770 		return 0;
6771 
6772 	if (entry == FTRACE_GRAPH_EMPTY) {
6773 		struct ftrace_graph_data *fgd = m->private;
6774 
6775 		if (fgd->type == GRAPH_FILTER_FUNCTION)
6776 			seq_puts(m, "#### all functions enabled ####\n");
6777 		else
6778 			seq_puts(m, "#### no functions disabled ####\n");
6779 		return 0;
6780 	}
6781 
6782 	seq_printf(m, "%ps\n", (void *)entry->ip);
6783 
6784 	return 0;
6785 }
6786 
6787 static const struct seq_operations ftrace_graph_seq_ops = {
6788 	.start = g_start,
6789 	.next = g_next,
6790 	.stop = g_stop,
6791 	.show = g_show,
6792 };
6793 
6794 static int
__ftrace_graph_open(struct inode * inode,struct file * file,struct ftrace_graph_data * fgd)6795 __ftrace_graph_open(struct inode *inode, struct file *file,
6796 		    struct ftrace_graph_data *fgd)
6797 {
6798 	int ret;
6799 	struct ftrace_hash *new_hash = NULL;
6800 
6801 	ret = security_locked_down(LOCKDOWN_TRACEFS);
6802 	if (ret)
6803 		return ret;
6804 
6805 	if (file->f_mode & FMODE_WRITE) {
6806 		const int size_bits = FTRACE_HASH_DEFAULT_BITS;
6807 
6808 		if (trace_parser_get_init(&fgd->parser, FTRACE_BUFF_MAX))
6809 			return -ENOMEM;
6810 
6811 		if (file->f_flags & O_TRUNC)
6812 			new_hash = alloc_ftrace_hash(size_bits);
6813 		else
6814 			new_hash = alloc_and_copy_ftrace_hash(size_bits,
6815 							      fgd->hash);
6816 		if (!new_hash) {
6817 			ret = -ENOMEM;
6818 			goto out;
6819 		}
6820 	}
6821 
6822 	if (file->f_mode & FMODE_READ) {
6823 		ret = seq_open(file, &ftrace_graph_seq_ops);
6824 		if (!ret) {
6825 			struct seq_file *m = file->private_data;
6826 			m->private = fgd;
6827 		} else {
6828 			/* Failed */
6829 			free_ftrace_hash(new_hash);
6830 			new_hash = NULL;
6831 		}
6832 	} else
6833 		file->private_data = fgd;
6834 
6835 out:
6836 	if (ret < 0 && file->f_mode & FMODE_WRITE)
6837 		trace_parser_put(&fgd->parser);
6838 
6839 	fgd->new_hash = new_hash;
6840 
6841 	/*
6842 	 * All uses of fgd->hash must be taken with the graph_lock
6843 	 * held. The graph_lock is going to be released, so force
6844 	 * fgd->hash to be reinitialized when it is taken again.
6845 	 */
6846 	fgd->hash = NULL;
6847 
6848 	return ret;
6849 }
6850 
6851 static int
ftrace_graph_open(struct inode * inode,struct file * file)6852 ftrace_graph_open(struct inode *inode, struct file *file)
6853 {
6854 	struct ftrace_graph_data *fgd;
6855 	int ret;
6856 
6857 	if (unlikely(ftrace_disabled))
6858 		return -ENODEV;
6859 
6860 	fgd = kmalloc(sizeof(*fgd), GFP_KERNEL);
6861 	if (fgd == NULL)
6862 		return -ENOMEM;
6863 
6864 	mutex_lock(&graph_lock);
6865 
6866 	fgd->hash = rcu_dereference_protected(ftrace_graph_hash,
6867 					lockdep_is_held(&graph_lock));
6868 	fgd->type = GRAPH_FILTER_FUNCTION;
6869 	fgd->seq_ops = &ftrace_graph_seq_ops;
6870 
6871 	ret = __ftrace_graph_open(inode, file, fgd);
6872 	if (ret < 0)
6873 		kfree(fgd);
6874 
6875 	mutex_unlock(&graph_lock);
6876 	return ret;
6877 }
6878 
6879 static int
ftrace_graph_notrace_open(struct inode * inode,struct file * file)6880 ftrace_graph_notrace_open(struct inode *inode, struct file *file)
6881 {
6882 	struct ftrace_graph_data *fgd;
6883 	int ret;
6884 
6885 	if (unlikely(ftrace_disabled))
6886 		return -ENODEV;
6887 
6888 	fgd = kmalloc(sizeof(*fgd), GFP_KERNEL);
6889 	if (fgd == NULL)
6890 		return -ENOMEM;
6891 
6892 	mutex_lock(&graph_lock);
6893 
6894 	fgd->hash = rcu_dereference_protected(ftrace_graph_notrace_hash,
6895 					lockdep_is_held(&graph_lock));
6896 	fgd->type = GRAPH_FILTER_NOTRACE;
6897 	fgd->seq_ops = &ftrace_graph_seq_ops;
6898 
6899 	ret = __ftrace_graph_open(inode, file, fgd);
6900 	if (ret < 0)
6901 		kfree(fgd);
6902 
6903 	mutex_unlock(&graph_lock);
6904 	return ret;
6905 }
6906 
6907 static int
ftrace_graph_release(struct inode * inode,struct file * file)6908 ftrace_graph_release(struct inode *inode, struct file *file)
6909 {
6910 	struct ftrace_graph_data *fgd;
6911 	struct ftrace_hash *old_hash, *new_hash;
6912 	struct trace_parser *parser;
6913 	int ret = 0;
6914 
6915 	if (file->f_mode & FMODE_READ) {
6916 		struct seq_file *m = file->private_data;
6917 
6918 		fgd = m->private;
6919 		seq_release(inode, file);
6920 	} else {
6921 		fgd = file->private_data;
6922 	}
6923 
6924 
6925 	if (file->f_mode & FMODE_WRITE) {
6926 
6927 		parser = &fgd->parser;
6928 
6929 		if (trace_parser_loaded((parser))) {
6930 			ret = ftrace_graph_set_hash(fgd->new_hash,
6931 						    parser->buffer);
6932 		}
6933 
6934 		trace_parser_put(parser);
6935 
6936 		new_hash = __ftrace_hash_move(fgd->new_hash);
6937 		if (!new_hash) {
6938 			ret = -ENOMEM;
6939 			goto out;
6940 		}
6941 
6942 		mutex_lock(&graph_lock);
6943 
6944 		if (fgd->type == GRAPH_FILTER_FUNCTION) {
6945 			old_hash = rcu_dereference_protected(ftrace_graph_hash,
6946 					lockdep_is_held(&graph_lock));
6947 			rcu_assign_pointer(ftrace_graph_hash, new_hash);
6948 		} else {
6949 			old_hash = rcu_dereference_protected(ftrace_graph_notrace_hash,
6950 					lockdep_is_held(&graph_lock));
6951 			rcu_assign_pointer(ftrace_graph_notrace_hash, new_hash);
6952 		}
6953 
6954 		mutex_unlock(&graph_lock);
6955 
6956 		/*
6957 		 * We need to do a hard force of sched synchronization.
6958 		 * This is because we use preempt_disable() to do RCU, but
6959 		 * the function tracers can be called where RCU is not watching
6960 		 * (like before user_exit()). We can not rely on the RCU
6961 		 * infrastructure to do the synchronization, thus we must do it
6962 		 * ourselves.
6963 		 */
6964 		if (old_hash != EMPTY_HASH)
6965 			synchronize_rcu_tasks_rude();
6966 
6967 		free_ftrace_hash(old_hash);
6968 	}
6969 
6970  out:
6971 	free_ftrace_hash(fgd->new_hash);
6972 	kfree(fgd);
6973 
6974 	return ret;
6975 }
6976 
6977 static int
ftrace_graph_set_hash(struct ftrace_hash * hash,char * buffer)6978 ftrace_graph_set_hash(struct ftrace_hash *hash, char *buffer)
6979 {
6980 	struct ftrace_glob func_g;
6981 	struct dyn_ftrace *rec;
6982 	struct ftrace_page *pg;
6983 	struct ftrace_func_entry *entry;
6984 	int fail = 1;
6985 	int not;
6986 
6987 	/* decode regex */
6988 	func_g.type = filter_parse_regex(buffer, strlen(buffer),
6989 					 &func_g.search, &not);
6990 
6991 	func_g.len = strlen(func_g.search);
6992 
6993 	guard(mutex)(&ftrace_lock);
6994 
6995 	if (unlikely(ftrace_disabled))
6996 		return -ENODEV;
6997 
6998 	do_for_each_ftrace_rec(pg, rec) {
6999 
7000 		if (rec->flags & FTRACE_FL_DISABLED)
7001 			continue;
7002 
7003 		if (ftrace_match_record(rec, &func_g, NULL, 0)) {
7004 			entry = ftrace_lookup_ip(hash, rec->ip);
7005 
7006 			if (!not) {
7007 				fail = 0;
7008 
7009 				if (entry)
7010 					continue;
7011 				if (add_hash_entry(hash, rec->ip) == NULL)
7012 					return 0;
7013 			} else {
7014 				if (entry) {
7015 					free_hash_entry(hash, entry);
7016 					fail = 0;
7017 				}
7018 			}
7019 		}
7020 		cond_resched();
7021 	} while_for_each_ftrace_rec();
7022 
7023 	return fail ? -EINVAL : 0;
7024 }
7025 
7026 static ssize_t
ftrace_graph_write(struct file * file,const char __user * ubuf,size_t cnt,loff_t * ppos)7027 ftrace_graph_write(struct file *file, const char __user *ubuf,
7028 		   size_t cnt, loff_t *ppos)
7029 {
7030 	ssize_t read, ret = 0;
7031 	struct ftrace_graph_data *fgd = file->private_data;
7032 	struct trace_parser *parser;
7033 
7034 	if (!cnt)
7035 		return 0;
7036 
7037 	/* Read mode uses seq functions */
7038 	if (file->f_mode & FMODE_READ) {
7039 		struct seq_file *m = file->private_data;
7040 		fgd = m->private;
7041 	}
7042 
7043 	parser = &fgd->parser;
7044 
7045 	read = trace_get_user(parser, ubuf, cnt, ppos);
7046 
7047 	if (read >= 0 && trace_parser_loaded(parser) &&
7048 	    !trace_parser_cont(parser)) {
7049 
7050 		ret = ftrace_graph_set_hash(fgd->new_hash,
7051 					    parser->buffer);
7052 		trace_parser_clear(parser);
7053 	}
7054 
7055 	if (!ret)
7056 		ret = read;
7057 
7058 	return ret;
7059 }
7060 
7061 static const struct file_operations ftrace_graph_fops = {
7062 	.open		= ftrace_graph_open,
7063 	.read		= seq_read,
7064 	.write		= ftrace_graph_write,
7065 	.llseek		= tracing_lseek,
7066 	.release	= ftrace_graph_release,
7067 };
7068 
7069 static const struct file_operations ftrace_graph_notrace_fops = {
7070 	.open		= ftrace_graph_notrace_open,
7071 	.read		= seq_read,
7072 	.write		= ftrace_graph_write,
7073 	.llseek		= tracing_lseek,
7074 	.release	= ftrace_graph_release,
7075 };
7076 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
7077 
ftrace_create_filter_files(struct ftrace_ops * ops,struct dentry * parent)7078 void ftrace_create_filter_files(struct ftrace_ops *ops,
7079 				struct dentry *parent)
7080 {
7081 
7082 	trace_create_file("set_ftrace_filter", TRACE_MODE_WRITE, parent,
7083 			  ops, &ftrace_filter_fops);
7084 
7085 	trace_create_file("set_ftrace_notrace", TRACE_MODE_WRITE, parent,
7086 			  ops, &ftrace_notrace_fops);
7087 }
7088 
7089 /*
7090  * The name "destroy_filter_files" is really a misnomer. Although
7091  * in the future, it may actually delete the files, but this is
7092  * really intended to make sure the ops passed in are disabled
7093  * and that when this function returns, the caller is free to
7094  * free the ops.
7095  *
7096  * The "destroy" name is only to match the "create" name that this
7097  * should be paired with.
7098  */
ftrace_destroy_filter_files(struct ftrace_ops * ops)7099 void ftrace_destroy_filter_files(struct ftrace_ops *ops)
7100 {
7101 	mutex_lock(&ftrace_lock);
7102 	if (ops->flags & FTRACE_OPS_FL_ENABLED)
7103 		ftrace_shutdown(ops, 0);
7104 	ops->flags |= FTRACE_OPS_FL_DELETED;
7105 	ftrace_free_filter(ops);
7106 	mutex_unlock(&ftrace_lock);
7107 }
7108 
ftrace_init_dyn_tracefs(struct dentry * d_tracer)7109 static __init int ftrace_init_dyn_tracefs(struct dentry *d_tracer)
7110 {
7111 
7112 	trace_create_file("available_filter_functions", TRACE_MODE_READ,
7113 			d_tracer, NULL, &ftrace_avail_fops);
7114 
7115 	trace_create_file("available_filter_functions_addrs", TRACE_MODE_READ,
7116 			d_tracer, NULL, &ftrace_avail_addrs_fops);
7117 
7118 	trace_create_file("enabled_functions", TRACE_MODE_READ,
7119 			d_tracer, NULL, &ftrace_enabled_fops);
7120 
7121 	trace_create_file("touched_functions", TRACE_MODE_READ,
7122 			d_tracer, NULL, &ftrace_touched_fops);
7123 
7124 	ftrace_create_filter_files(&global_ops, d_tracer);
7125 
7126 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
7127 	trace_create_file("set_graph_function", TRACE_MODE_WRITE, d_tracer,
7128 				    NULL,
7129 				    &ftrace_graph_fops);
7130 	trace_create_file("set_graph_notrace", TRACE_MODE_WRITE, d_tracer,
7131 				    NULL,
7132 				    &ftrace_graph_notrace_fops);
7133 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
7134 
7135 	return 0;
7136 }
7137 
ftrace_cmp_ips(const void * a,const void * b)7138 static int ftrace_cmp_ips(const void *a, const void *b)
7139 {
7140 	const unsigned long *ipa = a;
7141 	const unsigned long *ipb = b;
7142 
7143 	if (*ipa > *ipb)
7144 		return 1;
7145 	if (*ipa < *ipb)
7146 		return -1;
7147 	return 0;
7148 }
7149 
7150 #ifdef CONFIG_FTRACE_SORT_STARTUP_TEST
test_is_sorted(unsigned long * start,unsigned long count)7151 static void test_is_sorted(unsigned long *start, unsigned long count)
7152 {
7153 	int i;
7154 
7155 	for (i = 1; i < count; i++) {
7156 		if (WARN(start[i - 1] > start[i],
7157 			 "[%d] %pS at %lx is not sorted with %pS at %lx\n", i,
7158 			 (void *)start[i - 1], start[i - 1],
7159 			 (void *)start[i], start[i]))
7160 			break;
7161 	}
7162 	if (i == count)
7163 		pr_info("ftrace section at %px sorted properly\n", start);
7164 }
7165 #else
test_is_sorted(unsigned long * start,unsigned long count)7166 static void test_is_sorted(unsigned long *start, unsigned long count)
7167 {
7168 }
7169 #endif
7170 
ftrace_process_locs(struct module * mod,unsigned long * start,unsigned long * end)7171 static int ftrace_process_locs(struct module *mod,
7172 			       unsigned long *start,
7173 			       unsigned long *end)
7174 {
7175 	struct ftrace_page *pg_unuse = NULL;
7176 	struct ftrace_page *start_pg;
7177 	struct ftrace_page *pg;
7178 	struct dyn_ftrace *rec;
7179 	unsigned long skipped = 0;
7180 	unsigned long count;
7181 	unsigned long *p;
7182 	unsigned long addr;
7183 	unsigned long flags = 0; /* Shut up gcc */
7184 	unsigned long pages;
7185 	int ret = -ENOMEM;
7186 
7187 	count = end - start;
7188 
7189 	if (!count)
7190 		return 0;
7191 
7192 	pages = DIV_ROUND_UP(count, ENTRIES_PER_PAGE);
7193 
7194 	/*
7195 	 * Sorting mcount in vmlinux at build time depend on
7196 	 * CONFIG_BUILDTIME_MCOUNT_SORT, while mcount loc in
7197 	 * modules can not be sorted at build time.
7198 	 */
7199 	if (!IS_ENABLED(CONFIG_BUILDTIME_MCOUNT_SORT) || mod) {
7200 		sort(start, count, sizeof(*start),
7201 		     ftrace_cmp_ips, NULL);
7202 	} else {
7203 		test_is_sorted(start, count);
7204 	}
7205 
7206 	start_pg = ftrace_allocate_pages(count);
7207 	if (!start_pg)
7208 		return -ENOMEM;
7209 
7210 	mutex_lock(&ftrace_lock);
7211 
7212 	/*
7213 	 * Core and each module needs their own pages, as
7214 	 * modules will free them when they are removed.
7215 	 * Force a new page to be allocated for modules.
7216 	 */
7217 	if (!mod) {
7218 		WARN_ON(ftrace_pages || ftrace_pages_start);
7219 		/* First initialization */
7220 		ftrace_pages = ftrace_pages_start = start_pg;
7221 	} else {
7222 		if (!ftrace_pages)
7223 			goto out;
7224 
7225 		if (WARN_ON(ftrace_pages->next)) {
7226 			/* Hmm, we have free pages? */
7227 			while (ftrace_pages->next)
7228 				ftrace_pages = ftrace_pages->next;
7229 		}
7230 
7231 		ftrace_pages->next = start_pg;
7232 	}
7233 
7234 	p = start;
7235 	pg = start_pg;
7236 	while (p < end) {
7237 		unsigned long end_offset;
7238 
7239 		addr = *p++;
7240 
7241 		/*
7242 		 * Some architecture linkers will pad between
7243 		 * the different mcount_loc sections of different
7244 		 * object files to satisfy alignments.
7245 		 * Skip any NULL pointers.
7246 		 */
7247 		if (!addr) {
7248 			skipped++;
7249 			continue;
7250 		}
7251 
7252 		/*
7253 		 * If this is core kernel, make sure the address is in core
7254 		 * or inittext, as weak functions get zeroed and KASLR can
7255 		 * move them to something other than zero. It just will not
7256 		 * move it to an area where kernel text is.
7257 		 */
7258 		if (!mod && !(is_kernel_text(addr) || is_kernel_inittext(addr))) {
7259 			skipped++;
7260 			continue;
7261 		}
7262 
7263 		addr = ftrace_call_adjust(addr);
7264 
7265 		end_offset = (pg->index+1) * sizeof(pg->records[0]);
7266 		if (end_offset > PAGE_SIZE << pg->order) {
7267 			/* We should have allocated enough */
7268 			if (WARN_ON(!pg->next))
7269 				break;
7270 			pg = pg->next;
7271 		}
7272 
7273 		rec = &pg->records[pg->index++];
7274 		rec->ip = addr;
7275 	}
7276 
7277 	if (pg->next) {
7278 		pg_unuse = pg->next;
7279 		pg->next = NULL;
7280 	}
7281 
7282 	/* Assign the last page to ftrace_pages */
7283 	ftrace_pages = pg;
7284 
7285 	/*
7286 	 * We only need to disable interrupts on start up
7287 	 * because we are modifying code that an interrupt
7288 	 * may execute, and the modification is not atomic.
7289 	 * But for modules, nothing runs the code we modify
7290 	 * until we are finished with it, and there's no
7291 	 * reason to cause large interrupt latencies while we do it.
7292 	 */
7293 	if (!mod)
7294 		local_irq_save(flags);
7295 	ftrace_update_code(mod, start_pg);
7296 	if (!mod)
7297 		local_irq_restore(flags);
7298 	ret = 0;
7299  out:
7300 	mutex_unlock(&ftrace_lock);
7301 
7302 	/* We should have used all pages unless we skipped some */
7303 	if (pg_unuse) {
7304 		unsigned long pg_remaining, remaining = 0;
7305 		unsigned long skip;
7306 
7307 		/* Count the number of entries unused and compare it to skipped. */
7308 		pg_remaining = (ENTRIES_PER_PAGE << pg->order) - pg->index;
7309 
7310 		if (!WARN(skipped < pg_remaining, "Extra allocated pages for ftrace")) {
7311 
7312 			skip = skipped - pg_remaining;
7313 
7314 			for (pg = pg_unuse; pg; pg = pg->next)
7315 				remaining += 1 << pg->order;
7316 
7317 			pages -= remaining;
7318 
7319 			skip = DIV_ROUND_UP(skip, ENTRIES_PER_PAGE);
7320 
7321 			/*
7322 			 * Check to see if the number of pages remaining would
7323 			 * just fit the number of entries skipped.
7324 			 */
7325 			WARN(skip != remaining, "Extra allocated pages for ftrace: %lu with %lu skipped",
7326 			     remaining, skipped);
7327 		}
7328 		/* Need to synchronize with ftrace_location_range() */
7329 		synchronize_rcu();
7330 		ftrace_free_pages(pg_unuse);
7331 	}
7332 
7333 	if (!mod) {
7334 		count -= skipped;
7335 		pr_info("ftrace: allocating %ld entries in %ld pages\n",
7336 			count, pages);
7337 	}
7338 
7339 	return ret;
7340 }
7341 
7342 struct ftrace_mod_func {
7343 	struct list_head	list;
7344 	char			*name;
7345 	unsigned long		ip;
7346 	unsigned int		size;
7347 };
7348 
7349 struct ftrace_mod_map {
7350 	struct rcu_head		rcu;
7351 	struct list_head	list;
7352 	struct module		*mod;
7353 	unsigned long		start_addr;
7354 	unsigned long		end_addr;
7355 	struct list_head	funcs;
7356 	unsigned int		num_funcs;
7357 };
7358 
ftrace_get_trampoline_kallsym(unsigned int symnum,unsigned long * value,char * type,char * name,char * module_name,int * exported)7359 static int ftrace_get_trampoline_kallsym(unsigned int symnum,
7360 					 unsigned long *value, char *type,
7361 					 char *name, char *module_name,
7362 					 int *exported)
7363 {
7364 	struct ftrace_ops *op;
7365 
7366 	list_for_each_entry_rcu(op, &ftrace_ops_trampoline_list, list) {
7367 		if (!op->trampoline || symnum--)
7368 			continue;
7369 		*value = op->trampoline;
7370 		*type = 't';
7371 		strscpy(name, FTRACE_TRAMPOLINE_SYM, KSYM_NAME_LEN);
7372 		strscpy(module_name, FTRACE_TRAMPOLINE_MOD, MODULE_NAME_LEN);
7373 		*exported = 0;
7374 		return 0;
7375 	}
7376 
7377 	return -ERANGE;
7378 }
7379 
7380 #if defined(CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS) || defined(CONFIG_MODULES)
7381 /*
7382  * Check if the current ops references the given ip.
7383  *
7384  * If the ops traces all functions, then it was already accounted for.
7385  * If the ops does not trace the current record function, skip it.
7386  * If the ops ignores the function via notrace filter, skip it.
7387  */
7388 static bool
ops_references_ip(struct ftrace_ops * ops,unsigned long ip)7389 ops_references_ip(struct ftrace_ops *ops, unsigned long ip)
7390 {
7391 	/* If ops isn't enabled, ignore it */
7392 	if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
7393 		return false;
7394 
7395 	/* If ops traces all then it includes this function */
7396 	if (ops_traces_mod(ops))
7397 		return true;
7398 
7399 	/* The function must be in the filter */
7400 	if (!ftrace_hash_empty(ops->func_hash->filter_hash) &&
7401 	    !__ftrace_lookup_ip(ops->func_hash->filter_hash, ip))
7402 		return false;
7403 
7404 	/* If in notrace hash, we ignore it too */
7405 	if (ftrace_lookup_ip(ops->func_hash->notrace_hash, ip))
7406 		return false;
7407 
7408 	return true;
7409 }
7410 #endif
7411 
7412 #ifdef CONFIG_MODULES
7413 
7414 #define next_to_ftrace_page(p) container_of(p, struct ftrace_page, next)
7415 
7416 static LIST_HEAD(ftrace_mod_maps);
7417 
referenced_filters(struct dyn_ftrace * rec)7418 static int referenced_filters(struct dyn_ftrace *rec)
7419 {
7420 	struct ftrace_ops *ops;
7421 	int cnt = 0;
7422 
7423 	for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) {
7424 		if (ops_references_ip(ops, rec->ip)) {
7425 			if (WARN_ON_ONCE(ops->flags & FTRACE_OPS_FL_DIRECT))
7426 				continue;
7427 			if (WARN_ON_ONCE(ops->flags & FTRACE_OPS_FL_IPMODIFY))
7428 				continue;
7429 			cnt++;
7430 			if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
7431 				rec->flags |= FTRACE_FL_REGS;
7432 			if (cnt == 1 && ops->trampoline)
7433 				rec->flags |= FTRACE_FL_TRAMP;
7434 			else
7435 				rec->flags &= ~FTRACE_FL_TRAMP;
7436 		}
7437 	}
7438 
7439 	return cnt;
7440 }
7441 
7442 static void
clear_mod_from_hash(struct ftrace_page * pg,struct ftrace_hash * hash)7443 clear_mod_from_hash(struct ftrace_page *pg, struct ftrace_hash *hash)
7444 {
7445 	struct ftrace_func_entry *entry;
7446 	struct dyn_ftrace *rec;
7447 	int i;
7448 
7449 	if (ftrace_hash_empty(hash))
7450 		return;
7451 
7452 	for (i = 0; i < pg->index; i++) {
7453 		rec = &pg->records[i];
7454 		entry = __ftrace_lookup_ip(hash, rec->ip);
7455 		/*
7456 		 * Do not allow this rec to match again.
7457 		 * Yeah, it may waste some memory, but will be removed
7458 		 * if/when the hash is modified again.
7459 		 */
7460 		if (entry)
7461 			entry->ip = 0;
7462 	}
7463 }
7464 
7465 /* Clear any records from hashes */
clear_mod_from_hashes(struct ftrace_page * pg)7466 static void clear_mod_from_hashes(struct ftrace_page *pg)
7467 {
7468 	struct trace_array *tr;
7469 
7470 	mutex_lock(&trace_types_lock);
7471 	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
7472 		if (!tr->ops || !tr->ops->func_hash)
7473 			continue;
7474 		mutex_lock(&tr->ops->func_hash->regex_lock);
7475 		clear_mod_from_hash(pg, tr->ops->func_hash->filter_hash);
7476 		clear_mod_from_hash(pg, tr->ops->func_hash->notrace_hash);
7477 		mutex_unlock(&tr->ops->func_hash->regex_lock);
7478 	}
7479 	mutex_unlock(&trace_types_lock);
7480 }
7481 
ftrace_free_mod_map(struct rcu_head * rcu)7482 static void ftrace_free_mod_map(struct rcu_head *rcu)
7483 {
7484 	struct ftrace_mod_map *mod_map = container_of(rcu, struct ftrace_mod_map, rcu);
7485 	struct ftrace_mod_func *mod_func;
7486 	struct ftrace_mod_func *n;
7487 
7488 	/* All the contents of mod_map are now not visible to readers */
7489 	list_for_each_entry_safe(mod_func, n, &mod_map->funcs, list) {
7490 		kfree(mod_func->name);
7491 		list_del(&mod_func->list);
7492 		kfree(mod_func);
7493 	}
7494 
7495 	kfree(mod_map);
7496 }
7497 
ftrace_release_mod(struct module * mod)7498 void ftrace_release_mod(struct module *mod)
7499 {
7500 	struct ftrace_mod_map *mod_map;
7501 	struct ftrace_mod_map *n;
7502 	struct dyn_ftrace *rec;
7503 	struct ftrace_page **last_pg;
7504 	struct ftrace_page *tmp_page = NULL;
7505 	struct ftrace_page *pg;
7506 
7507 	mutex_lock(&ftrace_lock);
7508 
7509 	/*
7510 	 * To avoid the UAF problem after the module is unloaded, the
7511 	 * 'mod_map' resource needs to be released unconditionally.
7512 	 */
7513 	list_for_each_entry_safe(mod_map, n, &ftrace_mod_maps, list) {
7514 		if (mod_map->mod == mod) {
7515 			list_del_rcu(&mod_map->list);
7516 			call_rcu(&mod_map->rcu, ftrace_free_mod_map);
7517 			break;
7518 		}
7519 	}
7520 
7521 	if (ftrace_disabled)
7522 		goto out_unlock;
7523 
7524 	/*
7525 	 * Each module has its own ftrace_pages, remove
7526 	 * them from the list.
7527 	 */
7528 	last_pg = &ftrace_pages_start;
7529 	for (pg = ftrace_pages_start; pg; pg = *last_pg) {
7530 		rec = &pg->records[0];
7531 		if (within_module(rec->ip, mod)) {
7532 			/*
7533 			 * As core pages are first, the first
7534 			 * page should never be a module page.
7535 			 */
7536 			if (WARN_ON(pg == ftrace_pages_start))
7537 				goto out_unlock;
7538 
7539 			/* Check if we are deleting the last page */
7540 			if (pg == ftrace_pages)
7541 				ftrace_pages = next_to_ftrace_page(last_pg);
7542 
7543 			ftrace_update_tot_cnt -= pg->index;
7544 			*last_pg = pg->next;
7545 
7546 			pg->next = tmp_page;
7547 			tmp_page = pg;
7548 		} else
7549 			last_pg = &pg->next;
7550 	}
7551  out_unlock:
7552 	mutex_unlock(&ftrace_lock);
7553 
7554 	/* Need to synchronize with ftrace_location_range() */
7555 	if (tmp_page)
7556 		synchronize_rcu();
7557 	for (pg = tmp_page; pg; pg = tmp_page) {
7558 
7559 		/* Needs to be called outside of ftrace_lock */
7560 		clear_mod_from_hashes(pg);
7561 
7562 		if (pg->records) {
7563 			free_pages((unsigned long)pg->records, pg->order);
7564 			ftrace_number_of_pages -= 1 << pg->order;
7565 		}
7566 		tmp_page = pg->next;
7567 		kfree(pg);
7568 		ftrace_number_of_groups--;
7569 	}
7570 }
7571 
ftrace_module_enable(struct module * mod)7572 void ftrace_module_enable(struct module *mod)
7573 {
7574 	struct dyn_ftrace *rec;
7575 	struct ftrace_page *pg;
7576 
7577 	mutex_lock(&ftrace_lock);
7578 
7579 	if (ftrace_disabled)
7580 		goto out_unlock;
7581 
7582 	/*
7583 	 * If the tracing is enabled, go ahead and enable the record.
7584 	 *
7585 	 * The reason not to enable the record immediately is the
7586 	 * inherent check of ftrace_make_nop/ftrace_make_call for
7587 	 * correct previous instructions.  Making first the NOP
7588 	 * conversion puts the module to the correct state, thus
7589 	 * passing the ftrace_make_call check.
7590 	 *
7591 	 * We also delay this to after the module code already set the
7592 	 * text to read-only, as we now need to set it back to read-write
7593 	 * so that we can modify the text.
7594 	 */
7595 	if (ftrace_start_up)
7596 		ftrace_arch_code_modify_prepare();
7597 
7598 	do_for_each_ftrace_rec(pg, rec) {
7599 		int cnt;
7600 		/*
7601 		 * do_for_each_ftrace_rec() is a double loop.
7602 		 * module text shares the pg. If a record is
7603 		 * not part of this module, then skip this pg,
7604 		 * which the "break" will do.
7605 		 */
7606 		if (!within_module(rec->ip, mod))
7607 			break;
7608 
7609 		cond_resched();
7610 
7611 		/* Weak functions should still be ignored */
7612 		if (!test_for_valid_rec(rec)) {
7613 			/* Clear all other flags. Should not be enabled anyway */
7614 			rec->flags = FTRACE_FL_DISABLED;
7615 			continue;
7616 		}
7617 
7618 		cnt = 0;
7619 
7620 		/*
7621 		 * When adding a module, we need to check if tracers are
7622 		 * currently enabled and if they are, and can trace this record,
7623 		 * we need to enable the module functions as well as update the
7624 		 * reference counts for those function records.
7625 		 */
7626 		if (ftrace_start_up)
7627 			cnt += referenced_filters(rec);
7628 
7629 		rec->flags &= ~FTRACE_FL_DISABLED;
7630 		rec->flags += cnt;
7631 
7632 		if (ftrace_start_up && cnt) {
7633 			int failed = __ftrace_replace_code(rec, 1);
7634 			if (failed) {
7635 				ftrace_bug(failed, rec);
7636 				goto out_loop;
7637 			}
7638 		}
7639 
7640 	} while_for_each_ftrace_rec();
7641 
7642  out_loop:
7643 	if (ftrace_start_up)
7644 		ftrace_arch_code_modify_post_process();
7645 
7646  out_unlock:
7647 	mutex_unlock(&ftrace_lock);
7648 
7649 	process_cached_mods(mod->name);
7650 }
7651 
ftrace_module_init(struct module * mod)7652 void ftrace_module_init(struct module *mod)
7653 {
7654 	int ret;
7655 
7656 	if (ftrace_disabled || !mod->num_ftrace_callsites)
7657 		return;
7658 
7659 	ret = ftrace_process_locs(mod, mod->ftrace_callsites,
7660 				  mod->ftrace_callsites + mod->num_ftrace_callsites);
7661 	if (ret)
7662 		pr_warn("ftrace: failed to allocate entries for module '%s' functions\n",
7663 			mod->name);
7664 }
7665 
save_ftrace_mod_rec(struct ftrace_mod_map * mod_map,struct dyn_ftrace * rec)7666 static void save_ftrace_mod_rec(struct ftrace_mod_map *mod_map,
7667 				struct dyn_ftrace *rec)
7668 {
7669 	struct ftrace_mod_func *mod_func;
7670 	unsigned long symsize;
7671 	unsigned long offset;
7672 	char str[KSYM_SYMBOL_LEN];
7673 	char *modname;
7674 	const char *ret;
7675 
7676 	ret = kallsyms_lookup(rec->ip, &symsize, &offset, &modname, str);
7677 	if (!ret)
7678 		return;
7679 
7680 	mod_func = kmalloc(sizeof(*mod_func), GFP_KERNEL);
7681 	if (!mod_func)
7682 		return;
7683 
7684 	mod_func->name = kstrdup(str, GFP_KERNEL);
7685 	if (!mod_func->name) {
7686 		kfree(mod_func);
7687 		return;
7688 	}
7689 
7690 	mod_func->ip = rec->ip - offset;
7691 	mod_func->size = symsize;
7692 
7693 	mod_map->num_funcs++;
7694 
7695 	list_add_rcu(&mod_func->list, &mod_map->funcs);
7696 }
7697 
7698 static struct ftrace_mod_map *
allocate_ftrace_mod_map(struct module * mod,unsigned long start,unsigned long end)7699 allocate_ftrace_mod_map(struct module *mod,
7700 			unsigned long start, unsigned long end)
7701 {
7702 	struct ftrace_mod_map *mod_map;
7703 
7704 	if (ftrace_disabled)
7705 		return NULL;
7706 
7707 	mod_map = kmalloc(sizeof(*mod_map), GFP_KERNEL);
7708 	if (!mod_map)
7709 		return NULL;
7710 
7711 	mod_map->mod = mod;
7712 	mod_map->start_addr = start;
7713 	mod_map->end_addr = end;
7714 	mod_map->num_funcs = 0;
7715 
7716 	INIT_LIST_HEAD_RCU(&mod_map->funcs);
7717 
7718 	list_add_rcu(&mod_map->list, &ftrace_mod_maps);
7719 
7720 	return mod_map;
7721 }
7722 
7723 static int
ftrace_func_address_lookup(struct ftrace_mod_map * mod_map,unsigned long addr,unsigned long * size,unsigned long * off,char * sym)7724 ftrace_func_address_lookup(struct ftrace_mod_map *mod_map,
7725 			   unsigned long addr, unsigned long *size,
7726 			   unsigned long *off, char *sym)
7727 {
7728 	struct ftrace_mod_func *found_func =  NULL;
7729 	struct ftrace_mod_func *mod_func;
7730 
7731 	list_for_each_entry_rcu(mod_func, &mod_map->funcs, list) {
7732 		if (addr >= mod_func->ip &&
7733 		    addr < mod_func->ip + mod_func->size) {
7734 			found_func = mod_func;
7735 			break;
7736 		}
7737 	}
7738 
7739 	if (found_func) {
7740 		if (size)
7741 			*size = found_func->size;
7742 		if (off)
7743 			*off = addr - found_func->ip;
7744 		return strscpy(sym, found_func->name, KSYM_NAME_LEN);
7745 	}
7746 
7747 	return 0;
7748 }
7749 
7750 int
ftrace_mod_address_lookup(unsigned long addr,unsigned long * size,unsigned long * off,char ** modname,char * sym)7751 ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
7752 		   unsigned long *off, char **modname, char *sym)
7753 {
7754 	struct ftrace_mod_map *mod_map;
7755 	int ret = 0;
7756 
7757 	/* mod_map is freed via call_rcu() */
7758 	preempt_disable();
7759 	list_for_each_entry_rcu(mod_map, &ftrace_mod_maps, list) {
7760 		ret = ftrace_func_address_lookup(mod_map, addr, size, off, sym);
7761 		if (ret) {
7762 			if (modname)
7763 				*modname = mod_map->mod->name;
7764 			break;
7765 		}
7766 	}
7767 	preempt_enable();
7768 
7769 	return ret;
7770 }
7771 
ftrace_mod_get_kallsym(unsigned int symnum,unsigned long * value,char * type,char * name,char * module_name,int * exported)7772 int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
7773 			   char *type, char *name,
7774 			   char *module_name, int *exported)
7775 {
7776 	struct ftrace_mod_map *mod_map;
7777 	struct ftrace_mod_func *mod_func;
7778 	int ret;
7779 
7780 	preempt_disable();
7781 	list_for_each_entry_rcu(mod_map, &ftrace_mod_maps, list) {
7782 
7783 		if (symnum >= mod_map->num_funcs) {
7784 			symnum -= mod_map->num_funcs;
7785 			continue;
7786 		}
7787 
7788 		list_for_each_entry_rcu(mod_func, &mod_map->funcs, list) {
7789 			if (symnum > 1) {
7790 				symnum--;
7791 				continue;
7792 			}
7793 
7794 			*value = mod_func->ip;
7795 			*type = 'T';
7796 			strscpy(name, mod_func->name, KSYM_NAME_LEN);
7797 			strscpy(module_name, mod_map->mod->name, MODULE_NAME_LEN);
7798 			*exported = 1;
7799 			preempt_enable();
7800 			return 0;
7801 		}
7802 		WARN_ON(1);
7803 		break;
7804 	}
7805 	ret = ftrace_get_trampoline_kallsym(symnum, value, type, name,
7806 					    module_name, exported);
7807 	preempt_enable();
7808 	return ret;
7809 }
7810 
7811 #else
save_ftrace_mod_rec(struct ftrace_mod_map * mod_map,struct dyn_ftrace * rec)7812 static void save_ftrace_mod_rec(struct ftrace_mod_map *mod_map,
7813 				struct dyn_ftrace *rec) { }
7814 static inline struct ftrace_mod_map *
allocate_ftrace_mod_map(struct module * mod,unsigned long start,unsigned long end)7815 allocate_ftrace_mod_map(struct module *mod,
7816 			unsigned long start, unsigned long end)
7817 {
7818 	return NULL;
7819 }
ftrace_mod_get_kallsym(unsigned int symnum,unsigned long * value,char * type,char * name,char * module_name,int * exported)7820 int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
7821 			   char *type, char *name, char *module_name,
7822 			   int *exported)
7823 {
7824 	int ret;
7825 
7826 	preempt_disable();
7827 	ret = ftrace_get_trampoline_kallsym(symnum, value, type, name,
7828 					    module_name, exported);
7829 	preempt_enable();
7830 	return ret;
7831 }
7832 #endif /* CONFIG_MODULES */
7833 
7834 struct ftrace_init_func {
7835 	struct list_head list;
7836 	unsigned long ip;
7837 };
7838 
7839 /* Clear any init ips from hashes */
7840 static void
clear_func_from_hash(struct ftrace_init_func * func,struct ftrace_hash * hash)7841 clear_func_from_hash(struct ftrace_init_func *func, struct ftrace_hash *hash)
7842 {
7843 	struct ftrace_func_entry *entry;
7844 
7845 	entry = ftrace_lookup_ip(hash, func->ip);
7846 	/*
7847 	 * Do not allow this rec to match again.
7848 	 * Yeah, it may waste some memory, but will be removed
7849 	 * if/when the hash is modified again.
7850 	 */
7851 	if (entry)
7852 		entry->ip = 0;
7853 }
7854 
7855 static void
clear_func_from_hashes(struct ftrace_init_func * func)7856 clear_func_from_hashes(struct ftrace_init_func *func)
7857 {
7858 	struct trace_array *tr;
7859 
7860 	mutex_lock(&trace_types_lock);
7861 	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
7862 		if (!tr->ops || !tr->ops->func_hash)
7863 			continue;
7864 		mutex_lock(&tr->ops->func_hash->regex_lock);
7865 		clear_func_from_hash(func, tr->ops->func_hash->filter_hash);
7866 		clear_func_from_hash(func, tr->ops->func_hash->notrace_hash);
7867 		mutex_unlock(&tr->ops->func_hash->regex_lock);
7868 	}
7869 	mutex_unlock(&trace_types_lock);
7870 }
7871 
add_to_clear_hash_list(struct list_head * clear_list,struct dyn_ftrace * rec)7872 static void add_to_clear_hash_list(struct list_head *clear_list,
7873 				   struct dyn_ftrace *rec)
7874 {
7875 	struct ftrace_init_func *func;
7876 
7877 	func = kmalloc(sizeof(*func), GFP_KERNEL);
7878 	if (!func) {
7879 		MEM_FAIL(1, "alloc failure, ftrace filter could be stale\n");
7880 		return;
7881 	}
7882 
7883 	func->ip = rec->ip;
7884 	list_add(&func->list, clear_list);
7885 }
7886 
ftrace_free_mem(struct module * mod,void * start_ptr,void * end_ptr)7887 void ftrace_free_mem(struct module *mod, void *start_ptr, void *end_ptr)
7888 {
7889 	unsigned long start = (unsigned long)(start_ptr);
7890 	unsigned long end = (unsigned long)(end_ptr);
7891 	struct ftrace_page **last_pg = &ftrace_pages_start;
7892 	struct ftrace_page *tmp_page = NULL;
7893 	struct ftrace_page *pg;
7894 	struct dyn_ftrace *rec;
7895 	struct dyn_ftrace key;
7896 	struct ftrace_mod_map *mod_map = NULL;
7897 	struct ftrace_init_func *func, *func_next;
7898 	LIST_HEAD(clear_hash);
7899 
7900 	key.ip = start;
7901 	key.flags = end;	/* overload flags, as it is unsigned long */
7902 
7903 	mutex_lock(&ftrace_lock);
7904 
7905 	/*
7906 	 * If we are freeing module init memory, then check if
7907 	 * any tracer is active. If so, we need to save a mapping of
7908 	 * the module functions being freed with the address.
7909 	 */
7910 	if (mod && ftrace_ops_list != &ftrace_list_end)
7911 		mod_map = allocate_ftrace_mod_map(mod, start, end);
7912 
7913 	for (pg = ftrace_pages_start; pg; last_pg = &pg->next, pg = *last_pg) {
7914 		if (end < pg->records[0].ip ||
7915 		    start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
7916 			continue;
7917  again:
7918 		rec = bsearch(&key, pg->records, pg->index,
7919 			      sizeof(struct dyn_ftrace),
7920 			      ftrace_cmp_recs);
7921 		if (!rec)
7922 			continue;
7923 
7924 		/* rec will be cleared from hashes after ftrace_lock unlock */
7925 		add_to_clear_hash_list(&clear_hash, rec);
7926 
7927 		if (mod_map)
7928 			save_ftrace_mod_rec(mod_map, rec);
7929 
7930 		pg->index--;
7931 		ftrace_update_tot_cnt--;
7932 		if (!pg->index) {
7933 			*last_pg = pg->next;
7934 			pg->next = tmp_page;
7935 			tmp_page = pg;
7936 			pg = container_of(last_pg, struct ftrace_page, next);
7937 			if (!(*last_pg))
7938 				ftrace_pages = pg;
7939 			continue;
7940 		}
7941 		memmove(rec, rec + 1,
7942 			(pg->index - (rec - pg->records)) * sizeof(*rec));
7943 		/* More than one function may be in this block */
7944 		goto again;
7945 	}
7946 	mutex_unlock(&ftrace_lock);
7947 
7948 	list_for_each_entry_safe(func, func_next, &clear_hash, list) {
7949 		clear_func_from_hashes(func);
7950 		kfree(func);
7951 	}
7952 	/* Need to synchronize with ftrace_location_range() */
7953 	if (tmp_page) {
7954 		synchronize_rcu();
7955 		ftrace_free_pages(tmp_page);
7956 	}
7957 }
7958 
ftrace_free_init_mem(void)7959 void __init ftrace_free_init_mem(void)
7960 {
7961 	void *start = (void *)(&__init_begin);
7962 	void *end = (void *)(&__init_end);
7963 
7964 	ftrace_boot_snapshot();
7965 
7966 	ftrace_free_mem(NULL, start, end);
7967 }
7968 
ftrace_dyn_arch_init(void)7969 int __init __weak ftrace_dyn_arch_init(void)
7970 {
7971 	return 0;
7972 }
7973 
ftrace_init(void)7974 void __init ftrace_init(void)
7975 {
7976 	extern unsigned long __start_mcount_loc[];
7977 	extern unsigned long __stop_mcount_loc[];
7978 	unsigned long count, flags;
7979 	int ret;
7980 
7981 	local_irq_save(flags);
7982 	ret = ftrace_dyn_arch_init();
7983 	local_irq_restore(flags);
7984 	if (ret)
7985 		goto failed;
7986 
7987 	count = __stop_mcount_loc - __start_mcount_loc;
7988 	if (!count) {
7989 		pr_info("ftrace: No functions to be traced?\n");
7990 		goto failed;
7991 	}
7992 
7993 	ret = ftrace_process_locs(NULL,
7994 				  __start_mcount_loc,
7995 				  __stop_mcount_loc);
7996 	if (ret) {
7997 		pr_warn("ftrace: failed to allocate entries for functions\n");
7998 		goto failed;
7999 	}
8000 
8001 	pr_info("ftrace: allocated %ld pages with %ld groups\n",
8002 		ftrace_number_of_pages, ftrace_number_of_groups);
8003 
8004 	last_ftrace_enabled = ftrace_enabled = 1;
8005 
8006 	set_ftrace_early_filters();
8007 
8008 	return;
8009  failed:
8010 	ftrace_disabled = 1;
8011 }
8012 
8013 /* Do nothing if arch does not support this */
arch_ftrace_update_trampoline(struct ftrace_ops * ops)8014 void __weak arch_ftrace_update_trampoline(struct ftrace_ops *ops)
8015 {
8016 }
8017 
ftrace_update_trampoline(struct ftrace_ops * ops)8018 static void ftrace_update_trampoline(struct ftrace_ops *ops)
8019 {
8020 	unsigned long trampoline = ops->trampoline;
8021 
8022 	arch_ftrace_update_trampoline(ops);
8023 	if (ops->trampoline && ops->trampoline != trampoline &&
8024 	    (ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP)) {
8025 		/* Add to kallsyms before the perf events */
8026 		ftrace_add_trampoline_to_kallsyms(ops);
8027 		perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_OOL,
8028 				   ops->trampoline, ops->trampoline_size, false,
8029 				   FTRACE_TRAMPOLINE_SYM);
8030 		/*
8031 		 * Record the perf text poke event after the ksymbol register
8032 		 * event.
8033 		 */
8034 		perf_event_text_poke((void *)ops->trampoline, NULL, 0,
8035 				     (void *)ops->trampoline,
8036 				     ops->trampoline_size);
8037 	}
8038 }
8039 
ftrace_init_trace_array(struct trace_array * tr)8040 void ftrace_init_trace_array(struct trace_array *tr)
8041 {
8042 	if (tr->flags & TRACE_ARRAY_FL_MOD_INIT)
8043 		return;
8044 
8045 	INIT_LIST_HEAD(&tr->func_probes);
8046 	INIT_LIST_HEAD(&tr->mod_trace);
8047 	INIT_LIST_HEAD(&tr->mod_notrace);
8048 
8049 	tr->flags |= TRACE_ARRAY_FL_MOD_INIT;
8050 }
8051 #else
8052 
8053 struct ftrace_ops global_ops = {
8054 	.func			= ftrace_stub,
8055 	.flags			= FTRACE_OPS_FL_INITIALIZED |
8056 				  FTRACE_OPS_FL_PID,
8057 };
8058 
ftrace_nodyn_init(void)8059 static int __init ftrace_nodyn_init(void)
8060 {
8061 	ftrace_enabled = 1;
8062 	return 0;
8063 }
8064 core_initcall(ftrace_nodyn_init);
8065 
ftrace_init_dyn_tracefs(struct dentry * d_tracer)8066 static inline int ftrace_init_dyn_tracefs(struct dentry *d_tracer) { return 0; }
ftrace_startup_all(int command)8067 static inline void ftrace_startup_all(int command) { }
8068 
ftrace_update_trampoline(struct ftrace_ops * ops)8069 static void ftrace_update_trampoline(struct ftrace_ops *ops)
8070 {
8071 }
8072 
8073 #endif /* CONFIG_DYNAMIC_FTRACE */
8074 
ftrace_init_global_array_ops(struct trace_array * tr)8075 __init void ftrace_init_global_array_ops(struct trace_array *tr)
8076 {
8077 	tr->ops = &global_ops;
8078 	if (!global_ops.private)
8079 		global_ops.private = tr;
8080 	ftrace_init_trace_array(tr);
8081 	init_array_fgraph_ops(tr, tr->ops);
8082 }
8083 
ftrace_init_array_ops(struct trace_array * tr,ftrace_func_t func)8084 void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func)
8085 {
8086 	/* If we filter on pids, update to use the pid function */
8087 	if (tr->flags & TRACE_ARRAY_FL_GLOBAL) {
8088 		if (WARN_ON(tr->ops->func != ftrace_stub))
8089 			printk("ftrace ops had %pS for function\n",
8090 			       tr->ops->func);
8091 	}
8092 	tr->ops->func = func;
8093 	tr->ops->private = tr;
8094 }
8095 
ftrace_reset_array_ops(struct trace_array * tr)8096 void ftrace_reset_array_ops(struct trace_array *tr)
8097 {
8098 	tr->ops->func = ftrace_stub;
8099 }
8100 
8101 static nokprobe_inline void
__ftrace_ops_list_func(unsigned long ip,unsigned long parent_ip,struct ftrace_ops * ignored,struct ftrace_regs * fregs)8102 __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
8103 		       struct ftrace_ops *ignored, struct ftrace_regs *fregs)
8104 {
8105 	struct pt_regs *regs = ftrace_get_regs(fregs);
8106 	struct ftrace_ops *op;
8107 	int bit;
8108 
8109 	/*
8110 	 * The ftrace_test_and_set_recursion() will disable preemption,
8111 	 * which is required since some of the ops may be dynamically
8112 	 * allocated, they must be freed after a synchronize_rcu().
8113 	 */
8114 	bit = trace_test_and_set_recursion(ip, parent_ip, TRACE_LIST_START);
8115 	if (bit < 0)
8116 		return;
8117 
8118 	do_for_each_ftrace_op(op, ftrace_ops_list) {
8119 		/* Stub functions don't need to be called nor tested */
8120 		if (op->flags & FTRACE_OPS_FL_STUB)
8121 			continue;
8122 		/*
8123 		 * Check the following for each ops before calling their func:
8124 		 *  if RCU flag is set, then rcu_is_watching() must be true
8125 		 *  Otherwise test if the ip matches the ops filter
8126 		 *
8127 		 * If any of the above fails then the op->func() is not executed.
8128 		 */
8129 		if ((!(op->flags & FTRACE_OPS_FL_RCU) || rcu_is_watching()) &&
8130 		    ftrace_ops_test(op, ip, regs)) {
8131 			if (FTRACE_WARN_ON(!op->func)) {
8132 				pr_warn("op=%p %pS\n", op, op);
8133 				goto out;
8134 			}
8135 			op->func(ip, parent_ip, op, fregs);
8136 		}
8137 	} while_for_each_ftrace_op(op);
8138 out:
8139 	trace_clear_recursion(bit);
8140 }
8141 
8142 /*
8143  * Some archs only support passing ip and parent_ip. Even though
8144  * the list function ignores the op parameter, we do not want any
8145  * C side effects, where a function is called without the caller
8146  * sending a third parameter.
8147  * Archs are to support both the regs and ftrace_ops at the same time.
8148  * If they support ftrace_ops, it is assumed they support regs.
8149  * If call backs want to use regs, they must either check for regs
8150  * being NULL, or CONFIG_DYNAMIC_FTRACE_WITH_REGS.
8151  * Note, CONFIG_DYNAMIC_FTRACE_WITH_REGS expects a full regs to be saved.
8152  * An architecture can pass partial regs with ftrace_ops and still
8153  * set the ARCH_SUPPORTS_FTRACE_OPS.
8154  *
8155  * In vmlinux.lds.h, ftrace_ops_list_func() is defined to be
8156  * arch_ftrace_ops_list_func.
8157  */
8158 #if ARCH_SUPPORTS_FTRACE_OPS
arch_ftrace_ops_list_func(unsigned long ip,unsigned long parent_ip,struct ftrace_ops * op,struct ftrace_regs * fregs)8159 void arch_ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
8160 			       struct ftrace_ops *op, struct ftrace_regs *fregs)
8161 {
8162 	kmsan_unpoison_memory(fregs, ftrace_regs_size());
8163 	__ftrace_ops_list_func(ip, parent_ip, NULL, fregs);
8164 }
8165 #else
arch_ftrace_ops_list_func(unsigned long ip,unsigned long parent_ip)8166 void arch_ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip)
8167 {
8168 	__ftrace_ops_list_func(ip, parent_ip, NULL, NULL);
8169 }
8170 #endif
8171 NOKPROBE_SYMBOL(arch_ftrace_ops_list_func);
8172 
8173 /*
8174  * If there's only one function registered but it does not support
8175  * recursion, needs RCU protection, then this function will be called
8176  * by the mcount trampoline.
8177  */
ftrace_ops_assist_func(unsigned long ip,unsigned long parent_ip,struct ftrace_ops * op,struct ftrace_regs * fregs)8178 static void ftrace_ops_assist_func(unsigned long ip, unsigned long parent_ip,
8179 				   struct ftrace_ops *op, struct ftrace_regs *fregs)
8180 {
8181 	int bit;
8182 
8183 	bit = trace_test_and_set_recursion(ip, parent_ip, TRACE_LIST_START);
8184 	if (bit < 0)
8185 		return;
8186 
8187 	if (!(op->flags & FTRACE_OPS_FL_RCU) || rcu_is_watching())
8188 		op->func(ip, parent_ip, op, fregs);
8189 
8190 	trace_clear_recursion(bit);
8191 }
8192 NOKPROBE_SYMBOL(ftrace_ops_assist_func);
8193 
8194 /**
8195  * ftrace_ops_get_func - get the function a trampoline should call
8196  * @ops: the ops to get the function for
8197  *
8198  * Normally the mcount trampoline will call the ops->func, but there
8199  * are times that it should not. For example, if the ops does not
8200  * have its own recursion protection, then it should call the
8201  * ftrace_ops_assist_func() instead.
8202  *
8203  * Returns: the function that the trampoline should call for @ops.
8204  */
ftrace_ops_get_func(struct ftrace_ops * ops)8205 ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops)
8206 {
8207 	/*
8208 	 * If the function does not handle recursion or needs to be RCU safe,
8209 	 * then we need to call the assist handler.
8210 	 */
8211 	if (ops->flags & (FTRACE_OPS_FL_RECURSION |
8212 			  FTRACE_OPS_FL_RCU))
8213 		return ftrace_ops_assist_func;
8214 
8215 	return ops->func;
8216 }
8217 
8218 static void
ftrace_filter_pid_sched_switch_probe(void * data,bool preempt,struct task_struct * prev,struct task_struct * next,unsigned int prev_state)8219 ftrace_filter_pid_sched_switch_probe(void *data, bool preempt,
8220 				     struct task_struct *prev,
8221 				     struct task_struct *next,
8222 				     unsigned int prev_state)
8223 {
8224 	struct trace_array *tr = data;
8225 	struct trace_pid_list *pid_list;
8226 	struct trace_pid_list *no_pid_list;
8227 
8228 	pid_list = rcu_dereference_sched(tr->function_pids);
8229 	no_pid_list = rcu_dereference_sched(tr->function_no_pids);
8230 
8231 	if (trace_ignore_this_task(pid_list, no_pid_list, next))
8232 		this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid,
8233 			       FTRACE_PID_IGNORE);
8234 	else
8235 		this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid,
8236 			       next->pid);
8237 }
8238 
8239 static void
ftrace_pid_follow_sched_process_fork(void * data,struct task_struct * self,struct task_struct * task)8240 ftrace_pid_follow_sched_process_fork(void *data,
8241 				     struct task_struct *self,
8242 				     struct task_struct *task)
8243 {
8244 	struct trace_pid_list *pid_list;
8245 	struct trace_array *tr = data;
8246 
8247 	pid_list = rcu_dereference_sched(tr->function_pids);
8248 	trace_filter_add_remove_task(pid_list, self, task);
8249 
8250 	pid_list = rcu_dereference_sched(tr->function_no_pids);
8251 	trace_filter_add_remove_task(pid_list, self, task);
8252 }
8253 
8254 static void
ftrace_pid_follow_sched_process_exit(void * data,struct task_struct * task)8255 ftrace_pid_follow_sched_process_exit(void *data, struct task_struct *task)
8256 {
8257 	struct trace_pid_list *pid_list;
8258 	struct trace_array *tr = data;
8259 
8260 	pid_list = rcu_dereference_sched(tr->function_pids);
8261 	trace_filter_add_remove_task(pid_list, NULL, task);
8262 
8263 	pid_list = rcu_dereference_sched(tr->function_no_pids);
8264 	trace_filter_add_remove_task(pid_list, NULL, task);
8265 }
8266 
ftrace_pid_follow_fork(struct trace_array * tr,bool enable)8267 void ftrace_pid_follow_fork(struct trace_array *tr, bool enable)
8268 {
8269 	if (enable) {
8270 		register_trace_sched_process_fork(ftrace_pid_follow_sched_process_fork,
8271 						  tr);
8272 		register_trace_sched_process_free(ftrace_pid_follow_sched_process_exit,
8273 						  tr);
8274 	} else {
8275 		unregister_trace_sched_process_fork(ftrace_pid_follow_sched_process_fork,
8276 						    tr);
8277 		unregister_trace_sched_process_free(ftrace_pid_follow_sched_process_exit,
8278 						    tr);
8279 	}
8280 }
8281 
clear_ftrace_pids(struct trace_array * tr,int type)8282 static void clear_ftrace_pids(struct trace_array *tr, int type)
8283 {
8284 	struct trace_pid_list *pid_list;
8285 	struct trace_pid_list *no_pid_list;
8286 	int cpu;
8287 
8288 	pid_list = rcu_dereference_protected(tr->function_pids,
8289 					     lockdep_is_held(&ftrace_lock));
8290 	no_pid_list = rcu_dereference_protected(tr->function_no_pids,
8291 						lockdep_is_held(&ftrace_lock));
8292 
8293 	/* Make sure there's something to do */
8294 	if (!pid_type_enabled(type, pid_list, no_pid_list))
8295 		return;
8296 
8297 	/* See if the pids still need to be checked after this */
8298 	if (!still_need_pid_events(type, pid_list, no_pid_list)) {
8299 		unregister_trace_sched_switch(ftrace_filter_pid_sched_switch_probe, tr);
8300 		for_each_possible_cpu(cpu)
8301 			per_cpu_ptr(tr->array_buffer.data, cpu)->ftrace_ignore_pid = FTRACE_PID_TRACE;
8302 	}
8303 
8304 	if (type & TRACE_PIDS)
8305 		rcu_assign_pointer(tr->function_pids, NULL);
8306 
8307 	if (type & TRACE_NO_PIDS)
8308 		rcu_assign_pointer(tr->function_no_pids, NULL);
8309 
8310 	/* Wait till all users are no longer using pid filtering */
8311 	synchronize_rcu();
8312 
8313 	if ((type & TRACE_PIDS) && pid_list)
8314 		trace_pid_list_free(pid_list);
8315 
8316 	if ((type & TRACE_NO_PIDS) && no_pid_list)
8317 		trace_pid_list_free(no_pid_list);
8318 }
8319 
ftrace_clear_pids(struct trace_array * tr)8320 void ftrace_clear_pids(struct trace_array *tr)
8321 {
8322 	mutex_lock(&ftrace_lock);
8323 
8324 	clear_ftrace_pids(tr, TRACE_PIDS | TRACE_NO_PIDS);
8325 
8326 	mutex_unlock(&ftrace_lock);
8327 }
8328 
ftrace_pid_reset(struct trace_array * tr,int type)8329 static void ftrace_pid_reset(struct trace_array *tr, int type)
8330 {
8331 	mutex_lock(&ftrace_lock);
8332 	clear_ftrace_pids(tr, type);
8333 
8334 	ftrace_update_pid_func();
8335 	ftrace_startup_all(0);
8336 
8337 	mutex_unlock(&ftrace_lock);
8338 }
8339 
8340 /* Greater than any max PID */
8341 #define FTRACE_NO_PIDS		(void *)(PID_MAX_LIMIT + 1)
8342 
fpid_start(struct seq_file * m,loff_t * pos)8343 static void *fpid_start(struct seq_file *m, loff_t *pos)
8344 	__acquires(RCU)
8345 {
8346 	struct trace_pid_list *pid_list;
8347 	struct trace_array *tr = m->private;
8348 
8349 	mutex_lock(&ftrace_lock);
8350 	rcu_read_lock_sched();
8351 
8352 	pid_list = rcu_dereference_sched(tr->function_pids);
8353 
8354 	if (!pid_list)
8355 		return !(*pos) ? FTRACE_NO_PIDS : NULL;
8356 
8357 	return trace_pid_start(pid_list, pos);
8358 }
8359 
fpid_next(struct seq_file * m,void * v,loff_t * pos)8360 static void *fpid_next(struct seq_file *m, void *v, loff_t *pos)
8361 {
8362 	struct trace_array *tr = m->private;
8363 	struct trace_pid_list *pid_list = rcu_dereference_sched(tr->function_pids);
8364 
8365 	if (v == FTRACE_NO_PIDS) {
8366 		(*pos)++;
8367 		return NULL;
8368 	}
8369 	return trace_pid_next(pid_list, v, pos);
8370 }
8371 
fpid_stop(struct seq_file * m,void * p)8372 static void fpid_stop(struct seq_file *m, void *p)
8373 	__releases(RCU)
8374 {
8375 	rcu_read_unlock_sched();
8376 	mutex_unlock(&ftrace_lock);
8377 }
8378 
fpid_show(struct seq_file * m,void * v)8379 static int fpid_show(struct seq_file *m, void *v)
8380 {
8381 	if (v == FTRACE_NO_PIDS) {
8382 		seq_puts(m, "no pid\n");
8383 		return 0;
8384 	}
8385 
8386 	return trace_pid_show(m, v);
8387 }
8388 
8389 static const struct seq_operations ftrace_pid_sops = {
8390 	.start = fpid_start,
8391 	.next = fpid_next,
8392 	.stop = fpid_stop,
8393 	.show = fpid_show,
8394 };
8395 
fnpid_start(struct seq_file * m,loff_t * pos)8396 static void *fnpid_start(struct seq_file *m, loff_t *pos)
8397 	__acquires(RCU)
8398 {
8399 	struct trace_pid_list *pid_list;
8400 	struct trace_array *tr = m->private;
8401 
8402 	mutex_lock(&ftrace_lock);
8403 	rcu_read_lock_sched();
8404 
8405 	pid_list = rcu_dereference_sched(tr->function_no_pids);
8406 
8407 	if (!pid_list)
8408 		return !(*pos) ? FTRACE_NO_PIDS : NULL;
8409 
8410 	return trace_pid_start(pid_list, pos);
8411 }
8412 
fnpid_next(struct seq_file * m,void * v,loff_t * pos)8413 static void *fnpid_next(struct seq_file *m, void *v, loff_t *pos)
8414 {
8415 	struct trace_array *tr = m->private;
8416 	struct trace_pid_list *pid_list = rcu_dereference_sched(tr->function_no_pids);
8417 
8418 	if (v == FTRACE_NO_PIDS) {
8419 		(*pos)++;
8420 		return NULL;
8421 	}
8422 	return trace_pid_next(pid_list, v, pos);
8423 }
8424 
8425 static const struct seq_operations ftrace_no_pid_sops = {
8426 	.start = fnpid_start,
8427 	.next = fnpid_next,
8428 	.stop = fpid_stop,
8429 	.show = fpid_show,
8430 };
8431 
pid_open(struct inode * inode,struct file * file,int type)8432 static int pid_open(struct inode *inode, struct file *file, int type)
8433 {
8434 	const struct seq_operations *seq_ops;
8435 	struct trace_array *tr = inode->i_private;
8436 	struct seq_file *m;
8437 	int ret = 0;
8438 
8439 	ret = tracing_check_open_get_tr(tr);
8440 	if (ret)
8441 		return ret;
8442 
8443 	if ((file->f_mode & FMODE_WRITE) &&
8444 	    (file->f_flags & O_TRUNC))
8445 		ftrace_pid_reset(tr, type);
8446 
8447 	switch (type) {
8448 	case TRACE_PIDS:
8449 		seq_ops = &ftrace_pid_sops;
8450 		break;
8451 	case TRACE_NO_PIDS:
8452 		seq_ops = &ftrace_no_pid_sops;
8453 		break;
8454 	default:
8455 		trace_array_put(tr);
8456 		WARN_ON_ONCE(1);
8457 		return -EINVAL;
8458 	}
8459 
8460 	ret = seq_open(file, seq_ops);
8461 	if (ret < 0) {
8462 		trace_array_put(tr);
8463 	} else {
8464 		m = file->private_data;
8465 		/* copy tr over to seq ops */
8466 		m->private = tr;
8467 	}
8468 
8469 	return ret;
8470 }
8471 
8472 static int
ftrace_pid_open(struct inode * inode,struct file * file)8473 ftrace_pid_open(struct inode *inode, struct file *file)
8474 {
8475 	return pid_open(inode, file, TRACE_PIDS);
8476 }
8477 
8478 static int
ftrace_no_pid_open(struct inode * inode,struct file * file)8479 ftrace_no_pid_open(struct inode *inode, struct file *file)
8480 {
8481 	return pid_open(inode, file, TRACE_NO_PIDS);
8482 }
8483 
ignore_task_cpu(void * data)8484 static void ignore_task_cpu(void *data)
8485 {
8486 	struct trace_array *tr = data;
8487 	struct trace_pid_list *pid_list;
8488 	struct trace_pid_list *no_pid_list;
8489 
8490 	/*
8491 	 * This function is called by on_each_cpu() while the
8492 	 * event_mutex is held.
8493 	 */
8494 	pid_list = rcu_dereference_protected(tr->function_pids,
8495 					     mutex_is_locked(&ftrace_lock));
8496 	no_pid_list = rcu_dereference_protected(tr->function_no_pids,
8497 						mutex_is_locked(&ftrace_lock));
8498 
8499 	if (trace_ignore_this_task(pid_list, no_pid_list, current))
8500 		this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid,
8501 			       FTRACE_PID_IGNORE);
8502 	else
8503 		this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid,
8504 			       current->pid);
8505 }
8506 
8507 static ssize_t
pid_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos,int type)8508 pid_write(struct file *filp, const char __user *ubuf,
8509 	  size_t cnt, loff_t *ppos, int type)
8510 {
8511 	struct seq_file *m = filp->private_data;
8512 	struct trace_array *tr = m->private;
8513 	struct trace_pid_list *filtered_pids;
8514 	struct trace_pid_list *other_pids;
8515 	struct trace_pid_list *pid_list;
8516 	ssize_t ret;
8517 
8518 	if (!cnt)
8519 		return 0;
8520 
8521 	guard(mutex)(&ftrace_lock);
8522 
8523 	switch (type) {
8524 	case TRACE_PIDS:
8525 		filtered_pids = rcu_dereference_protected(tr->function_pids,
8526 					     lockdep_is_held(&ftrace_lock));
8527 		other_pids = rcu_dereference_protected(tr->function_no_pids,
8528 					     lockdep_is_held(&ftrace_lock));
8529 		break;
8530 	case TRACE_NO_PIDS:
8531 		filtered_pids = rcu_dereference_protected(tr->function_no_pids,
8532 					     lockdep_is_held(&ftrace_lock));
8533 		other_pids = rcu_dereference_protected(tr->function_pids,
8534 					     lockdep_is_held(&ftrace_lock));
8535 		break;
8536 	default:
8537 		WARN_ON_ONCE(1);
8538 		return -EINVAL;
8539 	}
8540 
8541 	ret = trace_pid_write(filtered_pids, &pid_list, ubuf, cnt);
8542 	if (ret < 0)
8543 		return ret;
8544 
8545 	switch (type) {
8546 	case TRACE_PIDS:
8547 		rcu_assign_pointer(tr->function_pids, pid_list);
8548 		break;
8549 	case TRACE_NO_PIDS:
8550 		rcu_assign_pointer(tr->function_no_pids, pid_list);
8551 		break;
8552 	}
8553 
8554 
8555 	if (filtered_pids) {
8556 		synchronize_rcu();
8557 		trace_pid_list_free(filtered_pids);
8558 	} else if (pid_list && !other_pids) {
8559 		/* Register a probe to set whether to ignore the tracing of a task */
8560 		register_trace_sched_switch(ftrace_filter_pid_sched_switch_probe, tr);
8561 	}
8562 
8563 	/*
8564 	 * Ignoring of pids is done at task switch. But we have to
8565 	 * check for those tasks that are currently running.
8566 	 * Always do this in case a pid was appended or removed.
8567 	 */
8568 	on_each_cpu(ignore_task_cpu, tr, 1);
8569 
8570 	ftrace_update_pid_func();
8571 	ftrace_startup_all(0);
8572 
8573 	*ppos += ret;
8574 
8575 	return ret;
8576 }
8577 
8578 static ssize_t
ftrace_pid_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)8579 ftrace_pid_write(struct file *filp, const char __user *ubuf,
8580 		 size_t cnt, loff_t *ppos)
8581 {
8582 	return pid_write(filp, ubuf, cnt, ppos, TRACE_PIDS);
8583 }
8584 
8585 static ssize_t
ftrace_no_pid_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)8586 ftrace_no_pid_write(struct file *filp, const char __user *ubuf,
8587 		    size_t cnt, loff_t *ppos)
8588 {
8589 	return pid_write(filp, ubuf, cnt, ppos, TRACE_NO_PIDS);
8590 }
8591 
8592 static int
ftrace_pid_release(struct inode * inode,struct file * file)8593 ftrace_pid_release(struct inode *inode, struct file *file)
8594 {
8595 	struct trace_array *tr = inode->i_private;
8596 
8597 	trace_array_put(tr);
8598 
8599 	return seq_release(inode, file);
8600 }
8601 
8602 static const struct file_operations ftrace_pid_fops = {
8603 	.open		= ftrace_pid_open,
8604 	.write		= ftrace_pid_write,
8605 	.read		= seq_read,
8606 	.llseek		= tracing_lseek,
8607 	.release	= ftrace_pid_release,
8608 };
8609 
8610 static const struct file_operations ftrace_no_pid_fops = {
8611 	.open		= ftrace_no_pid_open,
8612 	.write		= ftrace_no_pid_write,
8613 	.read		= seq_read,
8614 	.llseek		= tracing_lseek,
8615 	.release	= ftrace_pid_release,
8616 };
8617 
ftrace_init_tracefs(struct trace_array * tr,struct dentry * d_tracer)8618 void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d_tracer)
8619 {
8620 	trace_create_file("set_ftrace_pid", TRACE_MODE_WRITE, d_tracer,
8621 			    tr, &ftrace_pid_fops);
8622 	trace_create_file("set_ftrace_notrace_pid", TRACE_MODE_WRITE,
8623 			  d_tracer, tr, &ftrace_no_pid_fops);
8624 }
8625 
ftrace_init_tracefs_toplevel(struct trace_array * tr,struct dentry * d_tracer)8626 void __init ftrace_init_tracefs_toplevel(struct trace_array *tr,
8627 					 struct dentry *d_tracer)
8628 {
8629 	/* Only the top level directory has the dyn_tracefs and profile */
8630 	WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL));
8631 
8632 	ftrace_init_dyn_tracefs(d_tracer);
8633 	ftrace_profile_tracefs(d_tracer);
8634 }
8635 
8636 /**
8637  * ftrace_kill - kill ftrace
8638  *
8639  * This function should be used by panic code. It stops ftrace
8640  * but in a not so nice way. If you need to simply kill ftrace
8641  * from a non-atomic section, use ftrace_kill.
8642  */
ftrace_kill(void)8643 void ftrace_kill(void)
8644 {
8645 	ftrace_disabled = 1;
8646 	ftrace_enabled = 0;
8647 	ftrace_trace_function = ftrace_stub;
8648 	kprobe_ftrace_kill();
8649 }
8650 
8651 /**
8652  * ftrace_is_dead - Test if ftrace is dead or not.
8653  *
8654  * Returns: 1 if ftrace is "dead", zero otherwise.
8655  */
ftrace_is_dead(void)8656 int ftrace_is_dead(void)
8657 {
8658 	return ftrace_disabled;
8659 }
8660 
8661 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
8662 /*
8663  * When registering ftrace_ops with IPMODIFY, it is necessary to make sure
8664  * it doesn't conflict with any direct ftrace_ops. If there is existing
8665  * direct ftrace_ops on a kernel function being patched, call
8666  * FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_PEER on it to enable sharing.
8667  *
8668  * @ops:     ftrace_ops being registered.
8669  *
8670  * Returns:
8671  *         0 on success;
8672  *         Negative on failure.
8673  */
prepare_direct_functions_for_ipmodify(struct ftrace_ops * ops)8674 static int prepare_direct_functions_for_ipmodify(struct ftrace_ops *ops)
8675 {
8676 	struct ftrace_func_entry *entry;
8677 	struct ftrace_hash *hash;
8678 	struct ftrace_ops *op;
8679 	int size, i, ret;
8680 
8681 	lockdep_assert_held_once(&direct_mutex);
8682 
8683 	if (!(ops->flags & FTRACE_OPS_FL_IPMODIFY))
8684 		return 0;
8685 
8686 	hash = ops->func_hash->filter_hash;
8687 	size = 1 << hash->size_bits;
8688 	for (i = 0; i < size; i++) {
8689 		hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
8690 			unsigned long ip = entry->ip;
8691 			bool found_op = false;
8692 
8693 			mutex_lock(&ftrace_lock);
8694 			do_for_each_ftrace_op(op, ftrace_ops_list) {
8695 				if (!(op->flags & FTRACE_OPS_FL_DIRECT))
8696 					continue;
8697 				if (ops_references_ip(op, ip)) {
8698 					found_op = true;
8699 					break;
8700 				}
8701 			} while_for_each_ftrace_op(op);
8702 			mutex_unlock(&ftrace_lock);
8703 
8704 			if (found_op) {
8705 				if (!op->ops_func)
8706 					return -EBUSY;
8707 
8708 				ret = op->ops_func(op, FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_PEER);
8709 				if (ret)
8710 					return ret;
8711 			}
8712 		}
8713 	}
8714 
8715 	return 0;
8716 }
8717 
8718 /*
8719  * Similar to prepare_direct_functions_for_ipmodify, clean up after ops
8720  * with IPMODIFY is unregistered. The cleanup is optional for most DIRECT
8721  * ops.
8722  */
cleanup_direct_functions_after_ipmodify(struct ftrace_ops * ops)8723 static void cleanup_direct_functions_after_ipmodify(struct ftrace_ops *ops)
8724 {
8725 	struct ftrace_func_entry *entry;
8726 	struct ftrace_hash *hash;
8727 	struct ftrace_ops *op;
8728 	int size, i;
8729 
8730 	if (!(ops->flags & FTRACE_OPS_FL_IPMODIFY))
8731 		return;
8732 
8733 	mutex_lock(&direct_mutex);
8734 
8735 	hash = ops->func_hash->filter_hash;
8736 	size = 1 << hash->size_bits;
8737 	for (i = 0; i < size; i++) {
8738 		hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
8739 			unsigned long ip = entry->ip;
8740 			bool found_op = false;
8741 
8742 			mutex_lock(&ftrace_lock);
8743 			do_for_each_ftrace_op(op, ftrace_ops_list) {
8744 				if (!(op->flags & FTRACE_OPS_FL_DIRECT))
8745 					continue;
8746 				if (ops_references_ip(op, ip)) {
8747 					found_op = true;
8748 					break;
8749 				}
8750 			} while_for_each_ftrace_op(op);
8751 			mutex_unlock(&ftrace_lock);
8752 
8753 			/* The cleanup is optional, ignore any errors */
8754 			if (found_op && op->ops_func)
8755 				op->ops_func(op, FTRACE_OPS_CMD_DISABLE_SHARE_IPMODIFY_PEER);
8756 		}
8757 	}
8758 	mutex_unlock(&direct_mutex);
8759 }
8760 
8761 #define lock_direct_mutex()	mutex_lock(&direct_mutex)
8762 #define unlock_direct_mutex()	mutex_unlock(&direct_mutex)
8763 
8764 #else  /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
8765 
prepare_direct_functions_for_ipmodify(struct ftrace_ops * ops)8766 static int prepare_direct_functions_for_ipmodify(struct ftrace_ops *ops)
8767 {
8768 	return 0;
8769 }
8770 
cleanup_direct_functions_after_ipmodify(struct ftrace_ops * ops)8771 static void cleanup_direct_functions_after_ipmodify(struct ftrace_ops *ops)
8772 {
8773 }
8774 
8775 #define lock_direct_mutex()	do { } while (0)
8776 #define unlock_direct_mutex()	do { } while (0)
8777 
8778 #endif  /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
8779 
8780 /*
8781  * Similar to register_ftrace_function, except we don't lock direct_mutex.
8782  */
register_ftrace_function_nolock(struct ftrace_ops * ops)8783 static int register_ftrace_function_nolock(struct ftrace_ops *ops)
8784 {
8785 	int ret;
8786 
8787 	ftrace_ops_init(ops);
8788 
8789 	mutex_lock(&ftrace_lock);
8790 
8791 	ret = ftrace_startup(ops, 0);
8792 
8793 	mutex_unlock(&ftrace_lock);
8794 
8795 	return ret;
8796 }
8797 
8798 /**
8799  * register_ftrace_function - register a function for profiling
8800  * @ops:	ops structure that holds the function for profiling.
8801  *
8802  * Register a function to be called by all functions in the
8803  * kernel.
8804  *
8805  * Note: @ops->func and all the functions it calls must be labeled
8806  *       with "notrace", otherwise it will go into a
8807  *       recursive loop.
8808  */
register_ftrace_function(struct ftrace_ops * ops)8809 int register_ftrace_function(struct ftrace_ops *ops)
8810 {
8811 	int ret;
8812 
8813 	lock_direct_mutex();
8814 	ret = prepare_direct_functions_for_ipmodify(ops);
8815 	if (ret < 0)
8816 		goto out_unlock;
8817 
8818 	ret = register_ftrace_function_nolock(ops);
8819 
8820 out_unlock:
8821 	unlock_direct_mutex();
8822 	return ret;
8823 }
8824 EXPORT_SYMBOL_GPL(register_ftrace_function);
8825 
8826 /**
8827  * unregister_ftrace_function - unregister a function for profiling.
8828  * @ops:	ops structure that holds the function to unregister
8829  *
8830  * Unregister a function that was added to be called by ftrace profiling.
8831  */
unregister_ftrace_function(struct ftrace_ops * ops)8832 int unregister_ftrace_function(struct ftrace_ops *ops)
8833 {
8834 	int ret;
8835 
8836 	mutex_lock(&ftrace_lock);
8837 	ret = ftrace_shutdown(ops, 0);
8838 	mutex_unlock(&ftrace_lock);
8839 
8840 	cleanup_direct_functions_after_ipmodify(ops);
8841 	return ret;
8842 }
8843 EXPORT_SYMBOL_GPL(unregister_ftrace_function);
8844 
symbols_cmp(const void * a,const void * b)8845 static int symbols_cmp(const void *a, const void *b)
8846 {
8847 	const char **str_a = (const char **) a;
8848 	const char **str_b = (const char **) b;
8849 
8850 	return strcmp(*str_a, *str_b);
8851 }
8852 
8853 struct kallsyms_data {
8854 	unsigned long *addrs;
8855 	const char **syms;
8856 	size_t cnt;
8857 	size_t found;
8858 };
8859 
8860 /* This function gets called for all kernel and module symbols
8861  * and returns 1 in case we resolved all the requested symbols,
8862  * 0 otherwise.
8863  */
kallsyms_callback(void * data,const char * name,unsigned long addr)8864 static int kallsyms_callback(void *data, const char *name, unsigned long addr)
8865 {
8866 	struct kallsyms_data *args = data;
8867 	const char **sym;
8868 	int idx;
8869 
8870 	sym = bsearch(&name, args->syms, args->cnt, sizeof(*args->syms), symbols_cmp);
8871 	if (!sym)
8872 		return 0;
8873 
8874 	idx = sym - args->syms;
8875 	if (args->addrs[idx])
8876 		return 0;
8877 
8878 	if (!ftrace_location(addr))
8879 		return 0;
8880 
8881 	args->addrs[idx] = addr;
8882 	args->found++;
8883 	return args->found == args->cnt ? 1 : 0;
8884 }
8885 
8886 /**
8887  * ftrace_lookup_symbols - Lookup addresses for array of symbols
8888  *
8889  * @sorted_syms: array of symbols pointers symbols to resolve,
8890  * must be alphabetically sorted
8891  * @cnt: number of symbols/addresses in @syms/@addrs arrays
8892  * @addrs: array for storing resulting addresses
8893  *
8894  * This function looks up addresses for array of symbols provided in
8895  * @syms array (must be alphabetically sorted) and stores them in
8896  * @addrs array, which needs to be big enough to store at least @cnt
8897  * addresses.
8898  *
8899  * Returns: 0 if all provided symbols are found, -ESRCH otherwise.
8900  */
ftrace_lookup_symbols(const char ** sorted_syms,size_t cnt,unsigned long * addrs)8901 int ftrace_lookup_symbols(const char **sorted_syms, size_t cnt, unsigned long *addrs)
8902 {
8903 	struct kallsyms_data args;
8904 	int found_all;
8905 
8906 	memset(addrs, 0, sizeof(*addrs) * cnt);
8907 	args.addrs = addrs;
8908 	args.syms = sorted_syms;
8909 	args.cnt = cnt;
8910 	args.found = 0;
8911 
8912 	found_all = kallsyms_on_each_symbol(kallsyms_callback, &args);
8913 	if (found_all)
8914 		return 0;
8915 	found_all = module_kallsyms_on_each_symbol(NULL, kallsyms_callback, &args);
8916 	return found_all ? 0 : -ESRCH;
8917 }
8918 
8919 #ifdef CONFIG_SYSCTL
8920 
8921 #ifdef CONFIG_DYNAMIC_FTRACE
ftrace_startup_sysctl(void)8922 static void ftrace_startup_sysctl(void)
8923 {
8924 	int command;
8925 
8926 	if (unlikely(ftrace_disabled))
8927 		return;
8928 
8929 	/* Force update next time */
8930 	saved_ftrace_func = NULL;
8931 	/* ftrace_start_up is true if we want ftrace running */
8932 	if (ftrace_start_up) {
8933 		command = FTRACE_UPDATE_CALLS;
8934 		if (ftrace_graph_active)
8935 			command |= FTRACE_START_FUNC_RET;
8936 		ftrace_startup_enable(command);
8937 	}
8938 }
8939 
ftrace_shutdown_sysctl(void)8940 static void ftrace_shutdown_sysctl(void)
8941 {
8942 	int command;
8943 
8944 	if (unlikely(ftrace_disabled))
8945 		return;
8946 
8947 	/* ftrace_start_up is true if ftrace is running */
8948 	if (ftrace_start_up) {
8949 		command = FTRACE_DISABLE_CALLS;
8950 		if (ftrace_graph_active)
8951 			command |= FTRACE_STOP_FUNC_RET;
8952 		ftrace_run_update_code(command);
8953 	}
8954 }
8955 #else
8956 # define ftrace_startup_sysctl()       do { } while (0)
8957 # define ftrace_shutdown_sysctl()      do { } while (0)
8958 #endif /* CONFIG_DYNAMIC_FTRACE */
8959 
is_permanent_ops_registered(void)8960 static bool is_permanent_ops_registered(void)
8961 {
8962 	struct ftrace_ops *op;
8963 
8964 	do_for_each_ftrace_op(op, ftrace_ops_list) {
8965 		if (op->flags & FTRACE_OPS_FL_PERMANENT)
8966 			return true;
8967 	} while_for_each_ftrace_op(op);
8968 
8969 	return false;
8970 }
8971 
8972 static int
ftrace_enable_sysctl(const struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)8973 ftrace_enable_sysctl(const struct ctl_table *table, int write,
8974 		     void *buffer, size_t *lenp, loff_t *ppos)
8975 {
8976 	int ret;
8977 
8978 	guard(mutex)(&ftrace_lock);
8979 
8980 	if (unlikely(ftrace_disabled))
8981 		return -ENODEV;
8982 
8983 	ret = proc_dointvec(table, write, buffer, lenp, ppos);
8984 
8985 	if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
8986 		return ret;
8987 
8988 	if (ftrace_enabled) {
8989 
8990 		/* we are starting ftrace again */
8991 		if (rcu_dereference_protected(ftrace_ops_list,
8992 			lockdep_is_held(&ftrace_lock)) != &ftrace_list_end)
8993 			update_ftrace_function();
8994 
8995 		ftrace_startup_sysctl();
8996 
8997 	} else {
8998 		if (is_permanent_ops_registered()) {
8999 			ftrace_enabled = true;
9000 			return -EBUSY;
9001 		}
9002 
9003 		/* stopping ftrace calls (just send to ftrace_stub) */
9004 		ftrace_trace_function = ftrace_stub;
9005 
9006 		ftrace_shutdown_sysctl();
9007 	}
9008 
9009 	last_ftrace_enabled = !!ftrace_enabled;
9010 	return 0;
9011 }
9012 
9013 static const struct ctl_table ftrace_sysctls[] = {
9014 	{
9015 		.procname       = "ftrace_enabled",
9016 		.data           = &ftrace_enabled,
9017 		.maxlen         = sizeof(int),
9018 		.mode           = 0644,
9019 		.proc_handler   = ftrace_enable_sysctl,
9020 	},
9021 };
9022 
ftrace_sysctl_init(void)9023 static int __init ftrace_sysctl_init(void)
9024 {
9025 	register_sysctl_init("kernel", ftrace_sysctls);
9026 	return 0;
9027 }
9028 late_initcall(ftrace_sysctl_init);
9029 #endif
9030