1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Infrastructure for profiling code inserted by 'gcc -pg'.
4 *
5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
6 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
7 *
8 * Originally ported from the -rt patch by:
9 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
10 *
11 * Based on code in the latency_tracer, that is:
12 *
13 * Copyright (C) 2004-2006 Ingo Molnar
14 * Copyright (C) 2004 Nadia Yvette Chambers
15 */
16
17 #include <linux/stop_machine.h>
18 #include <linux/clocksource.h>
19 #include <linux/sched/task.h>
20 #include <linux/kallsyms.h>
21 #include <linux/security.h>
22 #include <linux/seq_file.h>
23 #include <linux/tracefs.h>
24 #include <linux/hardirq.h>
25 #include <linux/kthread.h>
26 #include <linux/uaccess.h>
27 #include <linux/bsearch.h>
28 #include <linux/module.h>
29 #include <linux/ftrace.h>
30 #include <linux/sysctl.h>
31 #include <linux/slab.h>
32 #include <linux/ctype.h>
33 #include <linux/sort.h>
34 #include <linux/list.h>
35 #include <linux/hash.h>
36 #include <linux/rcupdate.h>
37 #include <linux/kprobes.h>
38
39 #include <trace/events/sched.h>
40
41 #include <asm/sections.h>
42 #include <asm/setup.h>
43
44 #include "ftrace_internal.h"
45 #include "trace_output.h"
46 #include "trace_stat.h"
47
48 /* Flags that do not get reset */
49 #define FTRACE_NOCLEAR_FLAGS (FTRACE_FL_DISABLED | FTRACE_FL_TOUCHED | \
50 FTRACE_FL_MODIFIED)
51
52 #define FTRACE_INVALID_FUNCTION "__ftrace_invalid_address__"
53
54 #define FTRACE_WARN_ON(cond) \
55 ({ \
56 int ___r = cond; \
57 if (WARN_ON(___r)) \
58 ftrace_kill(); \
59 ___r; \
60 })
61
62 #define FTRACE_WARN_ON_ONCE(cond) \
63 ({ \
64 int ___r = cond; \
65 if (WARN_ON_ONCE(___r)) \
66 ftrace_kill(); \
67 ___r; \
68 })
69
70 /* hash bits for specific function selection */
71 #define FTRACE_HASH_DEFAULT_BITS 10
72 #define FTRACE_HASH_MAX_BITS 12
73
74 #ifdef CONFIG_DYNAMIC_FTRACE
75 #define INIT_OPS_HASH(opsname) \
76 .func_hash = &opsname.local_hash, \
77 .local_hash.regex_lock = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock), \
78 .subop_list = LIST_HEAD_INIT(opsname.subop_list),
79 #else
80 #define INIT_OPS_HASH(opsname)
81 #endif
82
83 enum {
84 FTRACE_MODIFY_ENABLE_FL = (1 << 0),
85 FTRACE_MODIFY_MAY_SLEEP_FL = (1 << 1),
86 };
87
88 struct ftrace_ops ftrace_list_end __read_mostly = {
89 .func = ftrace_stub,
90 .flags = FTRACE_OPS_FL_STUB,
91 INIT_OPS_HASH(ftrace_list_end)
92 };
93
94 /* ftrace_enabled is a method to turn ftrace on or off */
95 int ftrace_enabled __read_mostly;
96 static int __maybe_unused last_ftrace_enabled;
97
98 /* Current function tracing op */
99 struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end;
100 /* What to set function_trace_op to */
101 static struct ftrace_ops *set_function_trace_op;
102
ftrace_pids_enabled(struct ftrace_ops * ops)103 bool ftrace_pids_enabled(struct ftrace_ops *ops)
104 {
105 struct trace_array *tr;
106
107 if (!(ops->flags & FTRACE_OPS_FL_PID) || !ops->private)
108 return false;
109
110 tr = ops->private;
111
112 return tr->function_pids != NULL || tr->function_no_pids != NULL;
113 }
114
115 static void ftrace_update_trampoline(struct ftrace_ops *ops);
116
117 /*
118 * ftrace_disabled is set when an anomaly is discovered.
119 * ftrace_disabled is much stronger than ftrace_enabled.
120 */
121 static int ftrace_disabled __read_mostly;
122
123 DEFINE_MUTEX(ftrace_lock);
124
125 struct ftrace_ops __rcu *ftrace_ops_list __read_mostly = (struct ftrace_ops __rcu *)&ftrace_list_end;
126 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
127 struct ftrace_ops global_ops;
128
129 /* Defined by vmlinux.lds.h see the comment above arch_ftrace_ops_list_func for details */
130 void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
131 struct ftrace_ops *op, struct ftrace_regs *fregs);
132
133 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS
134 /*
135 * Stub used to invoke the list ops without requiring a separate trampoline.
136 */
137 const struct ftrace_ops ftrace_list_ops = {
138 .func = ftrace_ops_list_func,
139 .flags = FTRACE_OPS_FL_STUB,
140 };
141
ftrace_ops_nop_func(unsigned long ip,unsigned long parent_ip,struct ftrace_ops * op,struct ftrace_regs * fregs)142 static void ftrace_ops_nop_func(unsigned long ip, unsigned long parent_ip,
143 struct ftrace_ops *op,
144 struct ftrace_regs *fregs)
145 {
146 /* do nothing */
147 }
148
149 /*
150 * Stub used when a call site is disabled. May be called transiently by threads
151 * which have made it into ftrace_caller but haven't yet recovered the ops at
152 * the point the call site is disabled.
153 */
154 const struct ftrace_ops ftrace_nop_ops = {
155 .func = ftrace_ops_nop_func,
156 .flags = FTRACE_OPS_FL_STUB,
157 };
158 #endif
159
ftrace_ops_init(struct ftrace_ops * ops)160 static inline void ftrace_ops_init(struct ftrace_ops *ops)
161 {
162 #ifdef CONFIG_DYNAMIC_FTRACE
163 if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) {
164 mutex_init(&ops->local_hash.regex_lock);
165 INIT_LIST_HEAD(&ops->subop_list);
166 ops->func_hash = &ops->local_hash;
167 ops->flags |= FTRACE_OPS_FL_INITIALIZED;
168 }
169 #endif
170 }
171
172 /* Call this function for when a callback filters on set_ftrace_pid */
ftrace_pid_func(unsigned long ip,unsigned long parent_ip,struct ftrace_ops * op,struct ftrace_regs * fregs)173 static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
174 struct ftrace_ops *op, struct ftrace_regs *fregs)
175 {
176 struct trace_array *tr = op->private;
177 int pid;
178
179 if (tr) {
180 pid = this_cpu_read(tr->array_buffer.data->ftrace_ignore_pid);
181 if (pid == FTRACE_PID_IGNORE)
182 return;
183 if (pid != FTRACE_PID_TRACE &&
184 pid != current->pid)
185 return;
186 }
187
188 op->saved_func(ip, parent_ip, op, fregs);
189 }
190
ftrace_sync_ipi(void * data)191 static void ftrace_sync_ipi(void *data)
192 {
193 /* Probably not needed, but do it anyway */
194 smp_rmb();
195 }
196
ftrace_ops_get_list_func(struct ftrace_ops * ops)197 static ftrace_func_t ftrace_ops_get_list_func(struct ftrace_ops *ops)
198 {
199 /*
200 * If this is a dynamic or RCU ops, or we force list func,
201 * then it needs to call the list anyway.
202 */
203 if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_RCU) ||
204 FTRACE_FORCE_LIST_FUNC)
205 return ftrace_ops_list_func;
206
207 return ftrace_ops_get_func(ops);
208 }
209
update_ftrace_function(void)210 static void update_ftrace_function(void)
211 {
212 ftrace_func_t func;
213
214 /*
215 * Prepare the ftrace_ops that the arch callback will use.
216 * If there's only one ftrace_ops registered, the ftrace_ops_list
217 * will point to the ops we want.
218 */
219 set_function_trace_op = rcu_dereference_protected(ftrace_ops_list,
220 lockdep_is_held(&ftrace_lock));
221
222 /* If there's no ftrace_ops registered, just call the stub function */
223 if (set_function_trace_op == &ftrace_list_end) {
224 func = ftrace_stub;
225
226 /*
227 * If we are at the end of the list and this ops is
228 * recursion safe and not dynamic and the arch supports passing ops,
229 * then have the mcount trampoline call the function directly.
230 */
231 } else if (rcu_dereference_protected(ftrace_ops_list->next,
232 lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) {
233 func = ftrace_ops_get_list_func(ftrace_ops_list);
234
235 } else {
236 /* Just use the default ftrace_ops */
237 set_function_trace_op = &ftrace_list_end;
238 func = ftrace_ops_list_func;
239 }
240
241 /* If there's no change, then do nothing more here */
242 if (ftrace_trace_function == func)
243 return;
244
245 /*
246 * If we are using the list function, it doesn't care
247 * about the function_trace_ops.
248 */
249 if (func == ftrace_ops_list_func) {
250 ftrace_trace_function = func;
251 /*
252 * Don't even bother setting function_trace_ops,
253 * it would be racy to do so anyway.
254 */
255 return;
256 }
257
258 #ifndef CONFIG_DYNAMIC_FTRACE
259 /*
260 * For static tracing, we need to be a bit more careful.
261 * The function change takes affect immediately. Thus,
262 * we need to coordinate the setting of the function_trace_ops
263 * with the setting of the ftrace_trace_function.
264 *
265 * Set the function to the list ops, which will call the
266 * function we want, albeit indirectly, but it handles the
267 * ftrace_ops and doesn't depend on function_trace_op.
268 */
269 ftrace_trace_function = ftrace_ops_list_func;
270 /*
271 * Make sure all CPUs see this. Yes this is slow, but static
272 * tracing is slow and nasty to have enabled.
273 */
274 synchronize_rcu_tasks_rude();
275 /* Now all cpus are using the list ops. */
276 function_trace_op = set_function_trace_op;
277 /* Make sure the function_trace_op is visible on all CPUs */
278 smp_wmb();
279 /* Nasty way to force a rmb on all cpus */
280 smp_call_function(ftrace_sync_ipi, NULL, 1);
281 /* OK, we are all set to update the ftrace_trace_function now! */
282 #endif /* !CONFIG_DYNAMIC_FTRACE */
283
284 ftrace_trace_function = func;
285 }
286
add_ftrace_ops(struct ftrace_ops __rcu ** list,struct ftrace_ops * ops)287 static void add_ftrace_ops(struct ftrace_ops __rcu **list,
288 struct ftrace_ops *ops)
289 {
290 rcu_assign_pointer(ops->next, *list);
291
292 /*
293 * We are entering ops into the list but another
294 * CPU might be walking that list. We need to make sure
295 * the ops->next pointer is valid before another CPU sees
296 * the ops pointer included into the list.
297 */
298 rcu_assign_pointer(*list, ops);
299 }
300
remove_ftrace_ops(struct ftrace_ops __rcu ** list,struct ftrace_ops * ops)301 static int remove_ftrace_ops(struct ftrace_ops __rcu **list,
302 struct ftrace_ops *ops)
303 {
304 struct ftrace_ops **p;
305
306 /*
307 * If we are removing the last function, then simply point
308 * to the ftrace_stub.
309 */
310 if (rcu_dereference_protected(*list,
311 lockdep_is_held(&ftrace_lock)) == ops &&
312 rcu_dereference_protected(ops->next,
313 lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) {
314 rcu_assign_pointer(*list, &ftrace_list_end);
315 return 0;
316 }
317
318 for (p = list; *p != &ftrace_list_end; p = &(*p)->next)
319 if (*p == ops)
320 break;
321
322 if (*p != ops)
323 return -1;
324
325 *p = (*p)->next;
326 return 0;
327 }
328
329 static void ftrace_update_trampoline(struct ftrace_ops *ops);
330
__register_ftrace_function(struct ftrace_ops * ops)331 int __register_ftrace_function(struct ftrace_ops *ops)
332 {
333 if (ops->flags & FTRACE_OPS_FL_DELETED)
334 return -EINVAL;
335
336 if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED))
337 return -EBUSY;
338
339 #ifndef CONFIG_DYNAMIC_FTRACE_WITH_REGS
340 /*
341 * If the ftrace_ops specifies SAVE_REGS, then it only can be used
342 * if the arch supports it, or SAVE_REGS_IF_SUPPORTED is also set.
343 * Setting SAVE_REGS_IF_SUPPORTED makes SAVE_REGS irrelevant.
344 */
345 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS &&
346 !(ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED))
347 return -EINVAL;
348
349 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED)
350 ops->flags |= FTRACE_OPS_FL_SAVE_REGS;
351 #endif
352 if (!ftrace_enabled && (ops->flags & FTRACE_OPS_FL_PERMANENT))
353 return -EBUSY;
354
355 if (!is_kernel_core_data((unsigned long)ops))
356 ops->flags |= FTRACE_OPS_FL_DYNAMIC;
357
358 add_ftrace_ops(&ftrace_ops_list, ops);
359
360 /* Always save the function, and reset at unregistering */
361 ops->saved_func = ops->func;
362
363 if (ftrace_pids_enabled(ops))
364 ops->func = ftrace_pid_func;
365
366 ftrace_update_trampoline(ops);
367
368 if (ftrace_enabled)
369 update_ftrace_function();
370
371 return 0;
372 }
373
__unregister_ftrace_function(struct ftrace_ops * ops)374 int __unregister_ftrace_function(struct ftrace_ops *ops)
375 {
376 int ret;
377
378 if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED)))
379 return -EBUSY;
380
381 ret = remove_ftrace_ops(&ftrace_ops_list, ops);
382
383 if (ret < 0)
384 return ret;
385
386 if (ftrace_enabled)
387 update_ftrace_function();
388
389 ops->func = ops->saved_func;
390
391 return 0;
392 }
393
ftrace_update_pid_func(void)394 static void ftrace_update_pid_func(void)
395 {
396 struct ftrace_ops *op;
397
398 /* Only do something if we are tracing something */
399 if (ftrace_trace_function == ftrace_stub)
400 return;
401
402 do_for_each_ftrace_op(op, ftrace_ops_list) {
403 if (op->flags & FTRACE_OPS_FL_PID) {
404 op->func = ftrace_pids_enabled(op) ?
405 ftrace_pid_func : op->saved_func;
406 ftrace_update_trampoline(op);
407 }
408 } while_for_each_ftrace_op(op);
409
410 fgraph_update_pid_func();
411
412 update_ftrace_function();
413 }
414
415 #ifdef CONFIG_FUNCTION_PROFILER
416 struct ftrace_profile {
417 struct hlist_node node;
418 unsigned long ip;
419 unsigned long counter;
420 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
421 unsigned long long time;
422 unsigned long long time_squared;
423 #endif
424 };
425
426 struct ftrace_profile_page {
427 struct ftrace_profile_page *next;
428 unsigned long index;
429 struct ftrace_profile records[];
430 };
431
432 struct ftrace_profile_stat {
433 atomic_t disabled;
434 struct hlist_head *hash;
435 struct ftrace_profile_page *pages;
436 struct ftrace_profile_page *start;
437 struct tracer_stat stat;
438 };
439
440 #define PROFILE_RECORDS_SIZE \
441 (PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
442
443 #define PROFILES_PER_PAGE \
444 (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
445
446 static int ftrace_profile_enabled __read_mostly;
447
448 /* ftrace_profile_lock - synchronize the enable and disable of the profiler */
449 static DEFINE_MUTEX(ftrace_profile_lock);
450
451 static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats);
452
453 #define FTRACE_PROFILE_HASH_BITS 10
454 #define FTRACE_PROFILE_HASH_SIZE (1 << FTRACE_PROFILE_HASH_BITS)
455
456 static void *
function_stat_next(void * v,int idx)457 function_stat_next(void *v, int idx)
458 {
459 struct ftrace_profile *rec = v;
460 struct ftrace_profile_page *pg;
461
462 pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
463
464 again:
465 if (idx != 0)
466 rec++;
467
468 if ((void *)rec >= (void *)&pg->records[pg->index]) {
469 pg = pg->next;
470 if (!pg)
471 return NULL;
472 rec = &pg->records[0];
473 if (!rec->counter)
474 goto again;
475 }
476
477 return rec;
478 }
479
function_stat_start(struct tracer_stat * trace)480 static void *function_stat_start(struct tracer_stat *trace)
481 {
482 struct ftrace_profile_stat *stat =
483 container_of(trace, struct ftrace_profile_stat, stat);
484
485 if (!stat || !stat->start)
486 return NULL;
487
488 return function_stat_next(&stat->start->records[0], 0);
489 }
490
491 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
492 /* function graph compares on total time */
function_stat_cmp(const void * p1,const void * p2)493 static int function_stat_cmp(const void *p1, const void *p2)
494 {
495 const struct ftrace_profile *a = p1;
496 const struct ftrace_profile *b = p2;
497
498 if (a->time < b->time)
499 return -1;
500 if (a->time > b->time)
501 return 1;
502 else
503 return 0;
504 }
505 #else
506 /* not function graph compares against hits */
function_stat_cmp(const void * p1,const void * p2)507 static int function_stat_cmp(const void *p1, const void *p2)
508 {
509 const struct ftrace_profile *a = p1;
510 const struct ftrace_profile *b = p2;
511
512 if (a->counter < b->counter)
513 return -1;
514 if (a->counter > b->counter)
515 return 1;
516 else
517 return 0;
518 }
519 #endif
520
function_stat_headers(struct seq_file * m)521 static int function_stat_headers(struct seq_file *m)
522 {
523 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
524 seq_puts(m, " Function "
525 "Hit Time Avg s^2\n"
526 " -------- "
527 "--- ---- --- ---\n");
528 #else
529 seq_puts(m, " Function Hit\n"
530 " -------- ---\n");
531 #endif
532 return 0;
533 }
534
function_stat_show(struct seq_file * m,void * v)535 static int function_stat_show(struct seq_file *m, void *v)
536 {
537 struct ftrace_profile *rec = v;
538 char str[KSYM_SYMBOL_LEN];
539 int ret = 0;
540 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
541 static struct trace_seq s;
542 unsigned long long avg;
543 unsigned long long stddev;
544 #endif
545 mutex_lock(&ftrace_profile_lock);
546
547 /* we raced with function_profile_reset() */
548 if (unlikely(rec->counter == 0)) {
549 ret = -EBUSY;
550 goto out;
551 }
552
553 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
554 avg = div64_ul(rec->time, rec->counter);
555 if (tracing_thresh && (avg < tracing_thresh))
556 goto out;
557 #endif
558
559 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
560 seq_printf(m, " %-30.30s %10lu", str, rec->counter);
561
562 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
563 seq_puts(m, " ");
564
565 /* Sample standard deviation (s^2) */
566 if (rec->counter <= 1)
567 stddev = 0;
568 else {
569 /*
570 * Apply Welford's method:
571 * s^2 = 1 / (n * (n-1)) * (n * \Sum (x_i)^2 - (\Sum x_i)^2)
572 */
573 stddev = rec->counter * rec->time_squared -
574 rec->time * rec->time;
575
576 /*
577 * Divide only 1000 for ns^2 -> us^2 conversion.
578 * trace_print_graph_duration will divide 1000 again.
579 */
580 stddev = div64_ul(stddev,
581 rec->counter * (rec->counter - 1) * 1000);
582 }
583
584 trace_seq_init(&s);
585 trace_print_graph_duration(rec->time, &s);
586 trace_seq_puts(&s, " ");
587 trace_print_graph_duration(avg, &s);
588 trace_seq_puts(&s, " ");
589 trace_print_graph_duration(stddev, &s);
590 trace_print_seq(m, &s);
591 #endif
592 seq_putc(m, '\n');
593 out:
594 mutex_unlock(&ftrace_profile_lock);
595
596 return ret;
597 }
598
ftrace_profile_reset(struct ftrace_profile_stat * stat)599 static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
600 {
601 struct ftrace_profile_page *pg;
602
603 pg = stat->pages = stat->start;
604
605 while (pg) {
606 memset(pg->records, 0, PROFILE_RECORDS_SIZE);
607 pg->index = 0;
608 pg = pg->next;
609 }
610
611 memset(stat->hash, 0,
612 FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head));
613 }
614
ftrace_profile_pages_init(struct ftrace_profile_stat * stat)615 static int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
616 {
617 struct ftrace_profile_page *pg;
618 int functions;
619 int pages;
620 int i;
621
622 /* If we already allocated, do nothing */
623 if (stat->pages)
624 return 0;
625
626 stat->pages = (void *)get_zeroed_page(GFP_KERNEL);
627 if (!stat->pages)
628 return -ENOMEM;
629
630 #ifdef CONFIG_DYNAMIC_FTRACE
631 functions = ftrace_update_tot_cnt;
632 #else
633 /*
634 * We do not know the number of functions that exist because
635 * dynamic tracing is what counts them. With past experience
636 * we have around 20K functions. That should be more than enough.
637 * It is highly unlikely we will execute every function in
638 * the kernel.
639 */
640 functions = 20000;
641 #endif
642
643 pg = stat->start = stat->pages;
644
645 pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
646
647 for (i = 1; i < pages; i++) {
648 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
649 if (!pg->next)
650 goto out_free;
651 pg = pg->next;
652 }
653
654 return 0;
655
656 out_free:
657 pg = stat->start;
658 while (pg) {
659 unsigned long tmp = (unsigned long)pg;
660
661 pg = pg->next;
662 free_page(tmp);
663 }
664
665 stat->pages = NULL;
666 stat->start = NULL;
667
668 return -ENOMEM;
669 }
670
ftrace_profile_init_cpu(int cpu)671 static int ftrace_profile_init_cpu(int cpu)
672 {
673 struct ftrace_profile_stat *stat;
674 int size;
675
676 stat = &per_cpu(ftrace_profile_stats, cpu);
677
678 if (stat->hash) {
679 /* If the profile is already created, simply reset it */
680 ftrace_profile_reset(stat);
681 return 0;
682 }
683
684 /*
685 * We are profiling all functions, but usually only a few thousand
686 * functions are hit. We'll make a hash of 1024 items.
687 */
688 size = FTRACE_PROFILE_HASH_SIZE;
689
690 stat->hash = kcalloc(size, sizeof(struct hlist_head), GFP_KERNEL);
691
692 if (!stat->hash)
693 return -ENOMEM;
694
695 /* Preallocate the function profiling pages */
696 if (ftrace_profile_pages_init(stat) < 0) {
697 kfree(stat->hash);
698 stat->hash = NULL;
699 return -ENOMEM;
700 }
701
702 return 0;
703 }
704
ftrace_profile_init(void)705 static int ftrace_profile_init(void)
706 {
707 int cpu;
708 int ret = 0;
709
710 for_each_possible_cpu(cpu) {
711 ret = ftrace_profile_init_cpu(cpu);
712 if (ret)
713 break;
714 }
715
716 return ret;
717 }
718
719 /* interrupts must be disabled */
720 static struct ftrace_profile *
ftrace_find_profiled_func(struct ftrace_profile_stat * stat,unsigned long ip)721 ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
722 {
723 struct ftrace_profile *rec;
724 struct hlist_head *hhd;
725 unsigned long key;
726
727 key = hash_long(ip, FTRACE_PROFILE_HASH_BITS);
728 hhd = &stat->hash[key];
729
730 if (hlist_empty(hhd))
731 return NULL;
732
733 hlist_for_each_entry_rcu_notrace(rec, hhd, node) {
734 if (rec->ip == ip)
735 return rec;
736 }
737
738 return NULL;
739 }
740
ftrace_add_profile(struct ftrace_profile_stat * stat,struct ftrace_profile * rec)741 static void ftrace_add_profile(struct ftrace_profile_stat *stat,
742 struct ftrace_profile *rec)
743 {
744 unsigned long key;
745
746 key = hash_long(rec->ip, FTRACE_PROFILE_HASH_BITS);
747 hlist_add_head_rcu(&rec->node, &stat->hash[key]);
748 }
749
750 /*
751 * The memory is already allocated, this simply finds a new record to use.
752 */
753 static struct ftrace_profile *
ftrace_profile_alloc(struct ftrace_profile_stat * stat,unsigned long ip)754 ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
755 {
756 struct ftrace_profile *rec = NULL;
757
758 /* prevent recursion (from NMIs) */
759 if (atomic_inc_return(&stat->disabled) != 1)
760 goto out;
761
762 /*
763 * Try to find the function again since an NMI
764 * could have added it
765 */
766 rec = ftrace_find_profiled_func(stat, ip);
767 if (rec)
768 goto out;
769
770 if (stat->pages->index == PROFILES_PER_PAGE) {
771 if (!stat->pages->next)
772 goto out;
773 stat->pages = stat->pages->next;
774 }
775
776 rec = &stat->pages->records[stat->pages->index++];
777 rec->ip = ip;
778 ftrace_add_profile(stat, rec);
779
780 out:
781 atomic_dec(&stat->disabled);
782
783 return rec;
784 }
785
786 static void
function_profile_call(unsigned long ip,unsigned long parent_ip,struct ftrace_ops * ops,struct ftrace_regs * fregs)787 function_profile_call(unsigned long ip, unsigned long parent_ip,
788 struct ftrace_ops *ops, struct ftrace_regs *fregs)
789 {
790 struct ftrace_profile_stat *stat;
791 struct ftrace_profile *rec;
792 unsigned long flags;
793
794 if (!ftrace_profile_enabled)
795 return;
796
797 local_irq_save(flags);
798
799 stat = this_cpu_ptr(&ftrace_profile_stats);
800 if (!stat->hash || !ftrace_profile_enabled)
801 goto out;
802
803 rec = ftrace_find_profiled_func(stat, ip);
804 if (!rec) {
805 rec = ftrace_profile_alloc(stat, ip);
806 if (!rec)
807 goto out;
808 }
809
810 rec->counter++;
811 out:
812 local_irq_restore(flags);
813 }
814
815 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
816 static bool fgraph_graph_time = true;
817
ftrace_graph_graph_time_control(bool enable)818 void ftrace_graph_graph_time_control(bool enable)
819 {
820 fgraph_graph_time = enable;
821 }
822
823 struct profile_fgraph_data {
824 unsigned long long calltime;
825 unsigned long long subtime;
826 unsigned long long sleeptime;
827 };
828
profile_graph_entry(struct ftrace_graph_ent * trace,struct fgraph_ops * gops)829 static int profile_graph_entry(struct ftrace_graph_ent *trace,
830 struct fgraph_ops *gops)
831 {
832 struct profile_fgraph_data *profile_data;
833
834 function_profile_call(trace->func, 0, NULL, NULL);
835
836 /* If function graph is shutting down, ret_stack can be NULL */
837 if (!current->ret_stack)
838 return 0;
839
840 profile_data = fgraph_reserve_data(gops->idx, sizeof(*profile_data));
841 if (!profile_data)
842 return 0;
843
844 profile_data->subtime = 0;
845 profile_data->sleeptime = current->ftrace_sleeptime;
846 profile_data->calltime = trace_clock_local();
847
848 return 1;
849 }
850
profile_graph_return(struct ftrace_graph_ret * trace,struct fgraph_ops * gops)851 static void profile_graph_return(struct ftrace_graph_ret *trace,
852 struct fgraph_ops *gops)
853 {
854 struct profile_fgraph_data *profile_data;
855 struct ftrace_profile_stat *stat;
856 unsigned long long calltime;
857 unsigned long long rettime = trace_clock_local();
858 struct ftrace_profile *rec;
859 unsigned long flags;
860 int size;
861
862 local_irq_save(flags);
863 stat = this_cpu_ptr(&ftrace_profile_stats);
864 if (!stat->hash || !ftrace_profile_enabled)
865 goto out;
866
867 profile_data = fgraph_retrieve_data(gops->idx, &size);
868
869 /* If the calltime was zero'd ignore it */
870 if (!profile_data || !profile_data->calltime)
871 goto out;
872
873 calltime = rettime - profile_data->calltime;
874
875 if (!fgraph_sleep_time) {
876 if (current->ftrace_sleeptime)
877 calltime -= current->ftrace_sleeptime - profile_data->sleeptime;
878 }
879
880 if (!fgraph_graph_time) {
881 struct profile_fgraph_data *parent_data;
882
883 /* Append this call time to the parent time to subtract */
884 parent_data = fgraph_retrieve_parent_data(gops->idx, &size, 1);
885 if (parent_data)
886 parent_data->subtime += calltime;
887
888 if (profile_data->subtime && profile_data->subtime < calltime)
889 calltime -= profile_data->subtime;
890 else
891 calltime = 0;
892 }
893
894 rec = ftrace_find_profiled_func(stat, trace->func);
895 if (rec) {
896 rec->time += calltime;
897 rec->time_squared += calltime * calltime;
898 }
899
900 out:
901 local_irq_restore(flags);
902 }
903
904 static struct fgraph_ops fprofiler_ops = {
905 .ops = {
906 .flags = FTRACE_OPS_FL_INITIALIZED,
907 INIT_OPS_HASH(fprofiler_ops.ops)
908 },
909 .entryfunc = &profile_graph_entry,
910 .retfunc = &profile_graph_return,
911 };
912
register_ftrace_profiler(void)913 static int register_ftrace_profiler(void)
914 {
915 return register_ftrace_graph(&fprofiler_ops);
916 }
917
unregister_ftrace_profiler(void)918 static void unregister_ftrace_profiler(void)
919 {
920 unregister_ftrace_graph(&fprofiler_ops);
921 }
922 #else
923 static struct ftrace_ops ftrace_profile_ops __read_mostly = {
924 .func = function_profile_call,
925 .flags = FTRACE_OPS_FL_INITIALIZED,
926 INIT_OPS_HASH(ftrace_profile_ops)
927 };
928
register_ftrace_profiler(void)929 static int register_ftrace_profiler(void)
930 {
931 return register_ftrace_function(&ftrace_profile_ops);
932 }
933
unregister_ftrace_profiler(void)934 static void unregister_ftrace_profiler(void)
935 {
936 unregister_ftrace_function(&ftrace_profile_ops);
937 }
938 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
939
940 static ssize_t
ftrace_profile_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)941 ftrace_profile_write(struct file *filp, const char __user *ubuf,
942 size_t cnt, loff_t *ppos)
943 {
944 unsigned long val;
945 int ret;
946
947 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
948 if (ret)
949 return ret;
950
951 val = !!val;
952
953 mutex_lock(&ftrace_profile_lock);
954 if (ftrace_profile_enabled ^ val) {
955 if (val) {
956 ret = ftrace_profile_init();
957 if (ret < 0) {
958 cnt = ret;
959 goto out;
960 }
961
962 ret = register_ftrace_profiler();
963 if (ret < 0) {
964 cnt = ret;
965 goto out;
966 }
967 ftrace_profile_enabled = 1;
968 } else {
969 ftrace_profile_enabled = 0;
970 /*
971 * unregister_ftrace_profiler calls stop_machine
972 * so this acts like an synchronize_rcu.
973 */
974 unregister_ftrace_profiler();
975 }
976 }
977 out:
978 mutex_unlock(&ftrace_profile_lock);
979
980 *ppos += cnt;
981
982 return cnt;
983 }
984
985 static ssize_t
ftrace_profile_read(struct file * filp,char __user * ubuf,size_t cnt,loff_t * ppos)986 ftrace_profile_read(struct file *filp, char __user *ubuf,
987 size_t cnt, loff_t *ppos)
988 {
989 char buf[64]; /* big enough to hold a number */
990 int r;
991
992 r = sprintf(buf, "%u\n", ftrace_profile_enabled);
993 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
994 }
995
996 static const struct file_operations ftrace_profile_fops = {
997 .open = tracing_open_generic,
998 .read = ftrace_profile_read,
999 .write = ftrace_profile_write,
1000 .llseek = default_llseek,
1001 };
1002
1003 /* used to initialize the real stat files */
1004 static struct tracer_stat function_stats __initdata = {
1005 .name = "functions",
1006 .stat_start = function_stat_start,
1007 .stat_next = function_stat_next,
1008 .stat_cmp = function_stat_cmp,
1009 .stat_headers = function_stat_headers,
1010 .stat_show = function_stat_show
1011 };
1012
ftrace_profile_tracefs(struct dentry * d_tracer)1013 static __init void ftrace_profile_tracefs(struct dentry *d_tracer)
1014 {
1015 struct ftrace_profile_stat *stat;
1016 char *name;
1017 int ret;
1018 int cpu;
1019
1020 for_each_possible_cpu(cpu) {
1021 stat = &per_cpu(ftrace_profile_stats, cpu);
1022
1023 name = kasprintf(GFP_KERNEL, "function%d", cpu);
1024 if (!name) {
1025 /*
1026 * The files created are permanent, if something happens
1027 * we still do not free memory.
1028 */
1029 WARN(1,
1030 "Could not allocate stat file for cpu %d\n",
1031 cpu);
1032 return;
1033 }
1034 stat->stat = function_stats;
1035 stat->stat.name = name;
1036 ret = register_stat_tracer(&stat->stat);
1037 if (ret) {
1038 WARN(1,
1039 "Could not register function stat for cpu %d\n",
1040 cpu);
1041 kfree(name);
1042 return;
1043 }
1044 }
1045
1046 trace_create_file("function_profile_enabled",
1047 TRACE_MODE_WRITE, d_tracer, NULL,
1048 &ftrace_profile_fops);
1049 }
1050
1051 #else /* CONFIG_FUNCTION_PROFILER */
ftrace_profile_tracefs(struct dentry * d_tracer)1052 static __init void ftrace_profile_tracefs(struct dentry *d_tracer)
1053 {
1054 }
1055 #endif /* CONFIG_FUNCTION_PROFILER */
1056
1057 #ifdef CONFIG_DYNAMIC_FTRACE
1058
1059 static struct ftrace_ops *removed_ops;
1060
1061 /*
1062 * Set when doing a global update, like enabling all recs or disabling them.
1063 * It is not set when just updating a single ftrace_ops.
1064 */
1065 static bool update_all_ops;
1066
1067 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
1068 # error Dynamic ftrace depends on MCOUNT_RECORD
1069 #endif
1070
1071 struct ftrace_func_probe {
1072 struct ftrace_probe_ops *probe_ops;
1073 struct ftrace_ops ops;
1074 struct trace_array *tr;
1075 struct list_head list;
1076 void *data;
1077 int ref;
1078 };
1079
1080 /*
1081 * We make these constant because no one should touch them,
1082 * but they are used as the default "empty hash", to avoid allocating
1083 * it all the time. These are in a read only section such that if
1084 * anyone does try to modify it, it will cause an exception.
1085 */
1086 static const struct hlist_head empty_buckets[1];
1087 static const struct ftrace_hash empty_hash = {
1088 .buckets = (struct hlist_head *)empty_buckets,
1089 };
1090 #define EMPTY_HASH ((struct ftrace_hash *)&empty_hash)
1091
1092 struct ftrace_ops global_ops = {
1093 .func = ftrace_stub,
1094 .local_hash.notrace_hash = EMPTY_HASH,
1095 .local_hash.filter_hash = EMPTY_HASH,
1096 INIT_OPS_HASH(global_ops)
1097 .flags = FTRACE_OPS_FL_INITIALIZED |
1098 FTRACE_OPS_FL_PID,
1099 };
1100
1101 /*
1102 * Used by the stack unwinder to know about dynamic ftrace trampolines.
1103 */
ftrace_ops_trampoline(unsigned long addr)1104 struct ftrace_ops *ftrace_ops_trampoline(unsigned long addr)
1105 {
1106 struct ftrace_ops *op = NULL;
1107
1108 /*
1109 * Some of the ops may be dynamically allocated,
1110 * they are freed after a synchronize_rcu().
1111 */
1112 preempt_disable_notrace();
1113
1114 do_for_each_ftrace_op(op, ftrace_ops_list) {
1115 /*
1116 * This is to check for dynamically allocated trampolines.
1117 * Trampolines that are in kernel text will have
1118 * core_kernel_text() return true.
1119 */
1120 if (op->trampoline && op->trampoline_size)
1121 if (addr >= op->trampoline &&
1122 addr < op->trampoline + op->trampoline_size) {
1123 preempt_enable_notrace();
1124 return op;
1125 }
1126 } while_for_each_ftrace_op(op);
1127 preempt_enable_notrace();
1128
1129 return NULL;
1130 }
1131
1132 /*
1133 * This is used by __kernel_text_address() to return true if the
1134 * address is on a dynamically allocated trampoline that would
1135 * not return true for either core_kernel_text() or
1136 * is_module_text_address().
1137 */
is_ftrace_trampoline(unsigned long addr)1138 bool is_ftrace_trampoline(unsigned long addr)
1139 {
1140 return ftrace_ops_trampoline(addr) != NULL;
1141 }
1142
1143 struct ftrace_page {
1144 struct ftrace_page *next;
1145 struct dyn_ftrace *records;
1146 int index;
1147 int order;
1148 };
1149
1150 #define ENTRY_SIZE sizeof(struct dyn_ftrace)
1151 #define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE)
1152
1153 static struct ftrace_page *ftrace_pages_start;
1154 static struct ftrace_page *ftrace_pages;
1155
1156 static __always_inline unsigned long
ftrace_hash_key(struct ftrace_hash * hash,unsigned long ip)1157 ftrace_hash_key(struct ftrace_hash *hash, unsigned long ip)
1158 {
1159 if (hash->size_bits > 0)
1160 return hash_long(ip, hash->size_bits);
1161
1162 return 0;
1163 }
1164
1165 /* Only use this function if ftrace_hash_empty() has already been tested */
1166 static __always_inline struct ftrace_func_entry *
__ftrace_lookup_ip(struct ftrace_hash * hash,unsigned long ip)1167 __ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
1168 {
1169 unsigned long key;
1170 struct ftrace_func_entry *entry;
1171 struct hlist_head *hhd;
1172
1173 key = ftrace_hash_key(hash, ip);
1174 hhd = &hash->buckets[key];
1175
1176 hlist_for_each_entry_rcu_notrace(entry, hhd, hlist) {
1177 if (entry->ip == ip)
1178 return entry;
1179 }
1180 return NULL;
1181 }
1182
1183 /**
1184 * ftrace_lookup_ip - Test to see if an ip exists in an ftrace_hash
1185 * @hash: The hash to look at
1186 * @ip: The instruction pointer to test
1187 *
1188 * Search a given @hash to see if a given instruction pointer (@ip)
1189 * exists in it.
1190 *
1191 * Returns: the entry that holds the @ip if found. NULL otherwise.
1192 */
1193 struct ftrace_func_entry *
ftrace_lookup_ip(struct ftrace_hash * hash,unsigned long ip)1194 ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
1195 {
1196 if (ftrace_hash_empty(hash))
1197 return NULL;
1198
1199 return __ftrace_lookup_ip(hash, ip);
1200 }
1201
__add_hash_entry(struct ftrace_hash * hash,struct ftrace_func_entry * entry)1202 static void __add_hash_entry(struct ftrace_hash *hash,
1203 struct ftrace_func_entry *entry)
1204 {
1205 struct hlist_head *hhd;
1206 unsigned long key;
1207
1208 key = ftrace_hash_key(hash, entry->ip);
1209 hhd = &hash->buckets[key];
1210 hlist_add_head(&entry->hlist, hhd);
1211 hash->count++;
1212 }
1213
1214 static struct ftrace_func_entry *
add_hash_entry(struct ftrace_hash * hash,unsigned long ip)1215 add_hash_entry(struct ftrace_hash *hash, unsigned long ip)
1216 {
1217 struct ftrace_func_entry *entry;
1218
1219 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1220 if (!entry)
1221 return NULL;
1222
1223 entry->ip = ip;
1224 __add_hash_entry(hash, entry);
1225
1226 return entry;
1227 }
1228
1229 static void
free_hash_entry(struct ftrace_hash * hash,struct ftrace_func_entry * entry)1230 free_hash_entry(struct ftrace_hash *hash,
1231 struct ftrace_func_entry *entry)
1232 {
1233 hlist_del(&entry->hlist);
1234 kfree(entry);
1235 hash->count--;
1236 }
1237
1238 static void
remove_hash_entry(struct ftrace_hash * hash,struct ftrace_func_entry * entry)1239 remove_hash_entry(struct ftrace_hash *hash,
1240 struct ftrace_func_entry *entry)
1241 {
1242 hlist_del_rcu(&entry->hlist);
1243 hash->count--;
1244 }
1245
ftrace_hash_clear(struct ftrace_hash * hash)1246 static void ftrace_hash_clear(struct ftrace_hash *hash)
1247 {
1248 struct hlist_head *hhd;
1249 struct hlist_node *tn;
1250 struct ftrace_func_entry *entry;
1251 int size = 1 << hash->size_bits;
1252 int i;
1253
1254 if (!hash->count)
1255 return;
1256
1257 for (i = 0; i < size; i++) {
1258 hhd = &hash->buckets[i];
1259 hlist_for_each_entry_safe(entry, tn, hhd, hlist)
1260 free_hash_entry(hash, entry);
1261 }
1262 FTRACE_WARN_ON(hash->count);
1263 }
1264
free_ftrace_mod(struct ftrace_mod_load * ftrace_mod)1265 static void free_ftrace_mod(struct ftrace_mod_load *ftrace_mod)
1266 {
1267 list_del(&ftrace_mod->list);
1268 kfree(ftrace_mod->module);
1269 kfree(ftrace_mod->func);
1270 kfree(ftrace_mod);
1271 }
1272
clear_ftrace_mod_list(struct list_head * head)1273 static void clear_ftrace_mod_list(struct list_head *head)
1274 {
1275 struct ftrace_mod_load *p, *n;
1276
1277 /* stack tracer isn't supported yet */
1278 if (!head)
1279 return;
1280
1281 mutex_lock(&ftrace_lock);
1282 list_for_each_entry_safe(p, n, head, list)
1283 free_ftrace_mod(p);
1284 mutex_unlock(&ftrace_lock);
1285 }
1286
free_ftrace_hash(struct ftrace_hash * hash)1287 static void free_ftrace_hash(struct ftrace_hash *hash)
1288 {
1289 if (!hash || hash == EMPTY_HASH)
1290 return;
1291 ftrace_hash_clear(hash);
1292 kfree(hash->buckets);
1293 kfree(hash);
1294 }
1295
__free_ftrace_hash_rcu(struct rcu_head * rcu)1296 static void __free_ftrace_hash_rcu(struct rcu_head *rcu)
1297 {
1298 struct ftrace_hash *hash;
1299
1300 hash = container_of(rcu, struct ftrace_hash, rcu);
1301 free_ftrace_hash(hash);
1302 }
1303
free_ftrace_hash_rcu(struct ftrace_hash * hash)1304 static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
1305 {
1306 if (!hash || hash == EMPTY_HASH)
1307 return;
1308 call_rcu(&hash->rcu, __free_ftrace_hash_rcu);
1309 }
1310
1311 /**
1312 * ftrace_free_filter - remove all filters for an ftrace_ops
1313 * @ops: the ops to remove the filters from
1314 */
ftrace_free_filter(struct ftrace_ops * ops)1315 void ftrace_free_filter(struct ftrace_ops *ops)
1316 {
1317 ftrace_ops_init(ops);
1318 free_ftrace_hash(ops->func_hash->filter_hash);
1319 free_ftrace_hash(ops->func_hash->notrace_hash);
1320 }
1321 EXPORT_SYMBOL_GPL(ftrace_free_filter);
1322
alloc_ftrace_hash(int size_bits)1323 static struct ftrace_hash *alloc_ftrace_hash(int size_bits)
1324 {
1325 struct ftrace_hash *hash;
1326 int size;
1327
1328 hash = kzalloc(sizeof(*hash), GFP_KERNEL);
1329 if (!hash)
1330 return NULL;
1331
1332 size = 1 << size_bits;
1333 hash->buckets = kcalloc(size, sizeof(*hash->buckets), GFP_KERNEL);
1334
1335 if (!hash->buckets) {
1336 kfree(hash);
1337 return NULL;
1338 }
1339
1340 hash->size_bits = size_bits;
1341
1342 return hash;
1343 }
1344
1345 /* Used to save filters on functions for modules not loaded yet */
ftrace_add_mod(struct trace_array * tr,const char * func,const char * module,int enable)1346 static int ftrace_add_mod(struct trace_array *tr,
1347 const char *func, const char *module,
1348 int enable)
1349 {
1350 struct ftrace_mod_load *ftrace_mod;
1351 struct list_head *mod_head = enable ? &tr->mod_trace : &tr->mod_notrace;
1352
1353 ftrace_mod = kzalloc(sizeof(*ftrace_mod), GFP_KERNEL);
1354 if (!ftrace_mod)
1355 return -ENOMEM;
1356
1357 INIT_LIST_HEAD(&ftrace_mod->list);
1358 ftrace_mod->func = kstrdup(func, GFP_KERNEL);
1359 ftrace_mod->module = kstrdup(module, GFP_KERNEL);
1360 ftrace_mod->enable = enable;
1361
1362 if (!ftrace_mod->func || !ftrace_mod->module)
1363 goto out_free;
1364
1365 list_add(&ftrace_mod->list, mod_head);
1366
1367 return 0;
1368
1369 out_free:
1370 free_ftrace_mod(ftrace_mod);
1371
1372 return -ENOMEM;
1373 }
1374
1375 static struct ftrace_hash *
alloc_and_copy_ftrace_hash(int size_bits,struct ftrace_hash * hash)1376 alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
1377 {
1378 struct ftrace_func_entry *entry;
1379 struct ftrace_hash *new_hash;
1380 int size;
1381 int i;
1382
1383 new_hash = alloc_ftrace_hash(size_bits);
1384 if (!new_hash)
1385 return NULL;
1386
1387 if (hash)
1388 new_hash->flags = hash->flags;
1389
1390 /* Empty hash? */
1391 if (ftrace_hash_empty(hash))
1392 return new_hash;
1393
1394 size = 1 << hash->size_bits;
1395 for (i = 0; i < size; i++) {
1396 hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
1397 if (add_hash_entry(new_hash, entry->ip) == NULL)
1398 goto free_hash;
1399 }
1400 }
1401
1402 FTRACE_WARN_ON(new_hash->count != hash->count);
1403
1404 return new_hash;
1405
1406 free_hash:
1407 free_ftrace_hash(new_hash);
1408 return NULL;
1409 }
1410
1411 static void ftrace_hash_rec_disable_modify(struct ftrace_ops *ops);
1412 static void ftrace_hash_rec_enable_modify(struct ftrace_ops *ops);
1413
1414 static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops,
1415 struct ftrace_hash *new_hash);
1416
1417 /*
1418 * Allocate a new hash and remove entries from @src and move them to the new hash.
1419 * On success, the @src hash will be empty and should be freed.
1420 */
__move_hash(struct ftrace_hash * src,int size)1421 static struct ftrace_hash *__move_hash(struct ftrace_hash *src, int size)
1422 {
1423 struct ftrace_func_entry *entry;
1424 struct ftrace_hash *new_hash;
1425 struct hlist_head *hhd;
1426 struct hlist_node *tn;
1427 int bits = 0;
1428 int i;
1429
1430 /*
1431 * Use around half the size (max bit of it), but
1432 * a minimum of 2 is fine (as size of 0 or 1 both give 1 for bits).
1433 */
1434 bits = fls(size / 2);
1435
1436 /* Don't allocate too much */
1437 if (bits > FTRACE_HASH_MAX_BITS)
1438 bits = FTRACE_HASH_MAX_BITS;
1439
1440 new_hash = alloc_ftrace_hash(bits);
1441 if (!new_hash)
1442 return NULL;
1443
1444 new_hash->flags = src->flags;
1445
1446 size = 1 << src->size_bits;
1447 for (i = 0; i < size; i++) {
1448 hhd = &src->buckets[i];
1449 hlist_for_each_entry_safe(entry, tn, hhd, hlist) {
1450 remove_hash_entry(src, entry);
1451 __add_hash_entry(new_hash, entry);
1452 }
1453 }
1454 return new_hash;
1455 }
1456
1457 /* Move the @src entries to a newly allocated hash */
1458 static struct ftrace_hash *
__ftrace_hash_move(struct ftrace_hash * src)1459 __ftrace_hash_move(struct ftrace_hash *src)
1460 {
1461 int size = src->count;
1462
1463 /*
1464 * If the new source is empty, just return the empty_hash.
1465 */
1466 if (ftrace_hash_empty(src))
1467 return EMPTY_HASH;
1468
1469 return __move_hash(src, size);
1470 }
1471
1472 /**
1473 * ftrace_hash_move - move a new hash to a filter and do updates
1474 * @ops: The ops with the hash that @dst points to
1475 * @enable: True if for the filter hash, false for the notrace hash
1476 * @dst: Points to the @ops hash that should be updated
1477 * @src: The hash to update @dst with
1478 *
1479 * This is called when an ftrace_ops hash is being updated and the
1480 * the kernel needs to reflect this. Note, this only updates the kernel
1481 * function callbacks if the @ops is enabled (not to be confused with
1482 * @enable above). If the @ops is enabled, its hash determines what
1483 * callbacks get called. This function gets called when the @ops hash
1484 * is updated and it requires new callbacks.
1485 *
1486 * On success the elements of @src is moved to @dst, and @dst is updated
1487 * properly, as well as the functions determined by the @ops hashes
1488 * are now calling the @ops callback function.
1489 *
1490 * Regardless of return type, @src should be freed with free_ftrace_hash().
1491 */
1492 static int
ftrace_hash_move(struct ftrace_ops * ops,int enable,struct ftrace_hash ** dst,struct ftrace_hash * src)1493 ftrace_hash_move(struct ftrace_ops *ops, int enable,
1494 struct ftrace_hash **dst, struct ftrace_hash *src)
1495 {
1496 struct ftrace_hash *new_hash;
1497 int ret;
1498
1499 /* Reject setting notrace hash on IPMODIFY ftrace_ops */
1500 if (ops->flags & FTRACE_OPS_FL_IPMODIFY && !enable)
1501 return -EINVAL;
1502
1503 new_hash = __ftrace_hash_move(src);
1504 if (!new_hash)
1505 return -ENOMEM;
1506
1507 /* Make sure this can be applied if it is IPMODIFY ftrace_ops */
1508 if (enable) {
1509 /* IPMODIFY should be updated only when filter_hash updating */
1510 ret = ftrace_hash_ipmodify_update(ops, new_hash);
1511 if (ret < 0) {
1512 free_ftrace_hash(new_hash);
1513 return ret;
1514 }
1515 }
1516
1517 /*
1518 * Remove the current set, update the hash and add
1519 * them back.
1520 */
1521 ftrace_hash_rec_disable_modify(ops);
1522
1523 rcu_assign_pointer(*dst, new_hash);
1524
1525 ftrace_hash_rec_enable_modify(ops);
1526
1527 return 0;
1528 }
1529
hash_contains_ip(unsigned long ip,struct ftrace_ops_hash * hash)1530 static bool hash_contains_ip(unsigned long ip,
1531 struct ftrace_ops_hash *hash)
1532 {
1533 /*
1534 * The function record is a match if it exists in the filter
1535 * hash and not in the notrace hash. Note, an empty hash is
1536 * considered a match for the filter hash, but an empty
1537 * notrace hash is considered not in the notrace hash.
1538 */
1539 return (ftrace_hash_empty(hash->filter_hash) ||
1540 __ftrace_lookup_ip(hash->filter_hash, ip)) &&
1541 (ftrace_hash_empty(hash->notrace_hash) ||
1542 !__ftrace_lookup_ip(hash->notrace_hash, ip));
1543 }
1544
1545 /*
1546 * Test the hashes for this ops to see if we want to call
1547 * the ops->func or not.
1548 *
1549 * It's a match if the ip is in the ops->filter_hash or
1550 * the filter_hash does not exist or is empty,
1551 * AND
1552 * the ip is not in the ops->notrace_hash.
1553 *
1554 * This needs to be called with preemption disabled as
1555 * the hashes are freed with call_rcu().
1556 */
1557 int
ftrace_ops_test(struct ftrace_ops * ops,unsigned long ip,void * regs)1558 ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
1559 {
1560 struct ftrace_ops_hash hash;
1561 int ret;
1562
1563 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
1564 /*
1565 * There's a small race when adding ops that the ftrace handler
1566 * that wants regs, may be called without them. We can not
1567 * allow that handler to be called if regs is NULL.
1568 */
1569 if (regs == NULL && (ops->flags & FTRACE_OPS_FL_SAVE_REGS))
1570 return 0;
1571 #endif
1572
1573 rcu_assign_pointer(hash.filter_hash, ops->func_hash->filter_hash);
1574 rcu_assign_pointer(hash.notrace_hash, ops->func_hash->notrace_hash);
1575
1576 if (hash_contains_ip(ip, &hash))
1577 ret = 1;
1578 else
1579 ret = 0;
1580
1581 return ret;
1582 }
1583
1584 /*
1585 * This is a double for. Do not use 'break' to break out of the loop,
1586 * you must use a goto.
1587 */
1588 #define do_for_each_ftrace_rec(pg, rec) \
1589 for (pg = ftrace_pages_start; pg; pg = pg->next) { \
1590 int _____i; \
1591 for (_____i = 0; _____i < pg->index; _____i++) { \
1592 rec = &pg->records[_____i];
1593
1594 #define while_for_each_ftrace_rec() \
1595 } \
1596 }
1597
1598
ftrace_cmp_recs(const void * a,const void * b)1599 static int ftrace_cmp_recs(const void *a, const void *b)
1600 {
1601 const struct dyn_ftrace *key = a;
1602 const struct dyn_ftrace *rec = b;
1603
1604 if (key->flags < rec->ip)
1605 return -1;
1606 if (key->ip >= rec->ip + MCOUNT_INSN_SIZE)
1607 return 1;
1608 return 0;
1609 }
1610
lookup_rec(unsigned long start,unsigned long end)1611 static struct dyn_ftrace *lookup_rec(unsigned long start, unsigned long end)
1612 {
1613 struct ftrace_page *pg;
1614 struct dyn_ftrace *rec = NULL;
1615 struct dyn_ftrace key;
1616
1617 key.ip = start;
1618 key.flags = end; /* overload flags, as it is unsigned long */
1619
1620 for (pg = ftrace_pages_start; pg; pg = pg->next) {
1621 if (pg->index == 0 ||
1622 end < pg->records[0].ip ||
1623 start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
1624 continue;
1625 rec = bsearch(&key, pg->records, pg->index,
1626 sizeof(struct dyn_ftrace),
1627 ftrace_cmp_recs);
1628 if (rec)
1629 break;
1630 }
1631 return rec;
1632 }
1633
1634 /**
1635 * ftrace_location_range - return the first address of a traced location
1636 * if it touches the given ip range
1637 * @start: start of range to search.
1638 * @end: end of range to search (inclusive). @end points to the last byte
1639 * to check.
1640 *
1641 * Returns: rec->ip if the related ftrace location is a least partly within
1642 * the given address range. That is, the first address of the instruction
1643 * that is either a NOP or call to the function tracer. It checks the ftrace
1644 * internal tables to determine if the address belongs or not.
1645 */
ftrace_location_range(unsigned long start,unsigned long end)1646 unsigned long ftrace_location_range(unsigned long start, unsigned long end)
1647 {
1648 struct dyn_ftrace *rec;
1649 unsigned long ip = 0;
1650
1651 rcu_read_lock();
1652 rec = lookup_rec(start, end);
1653 if (rec)
1654 ip = rec->ip;
1655 rcu_read_unlock();
1656
1657 return ip;
1658 }
1659
1660 /**
1661 * ftrace_location - return the ftrace location
1662 * @ip: the instruction pointer to check
1663 *
1664 * Returns:
1665 * * If @ip matches the ftrace location, return @ip.
1666 * * If @ip matches sym+0, return sym's ftrace location.
1667 * * Otherwise, return 0.
1668 */
ftrace_location(unsigned long ip)1669 unsigned long ftrace_location(unsigned long ip)
1670 {
1671 unsigned long loc;
1672 unsigned long offset;
1673 unsigned long size;
1674
1675 loc = ftrace_location_range(ip, ip);
1676 if (!loc) {
1677 if (!kallsyms_lookup_size_offset(ip, &size, &offset))
1678 goto out;
1679
1680 /* map sym+0 to __fentry__ */
1681 if (!offset)
1682 loc = ftrace_location_range(ip, ip + size - 1);
1683 }
1684
1685 out:
1686 return loc;
1687 }
1688
1689 /**
1690 * ftrace_text_reserved - return true if range contains an ftrace location
1691 * @start: start of range to search
1692 * @end: end of range to search (inclusive). @end points to the last byte to check.
1693 *
1694 * Returns: 1 if @start and @end contains a ftrace location.
1695 * That is, the instruction that is either a NOP or call to
1696 * the function tracer. It checks the ftrace internal tables to
1697 * determine if the address belongs or not.
1698 */
ftrace_text_reserved(const void * start,const void * end)1699 int ftrace_text_reserved(const void *start, const void *end)
1700 {
1701 unsigned long ret;
1702
1703 ret = ftrace_location_range((unsigned long)start,
1704 (unsigned long)end);
1705
1706 return (int)!!ret;
1707 }
1708
1709 /* Test if ops registered to this rec needs regs */
test_rec_ops_needs_regs(struct dyn_ftrace * rec)1710 static bool test_rec_ops_needs_regs(struct dyn_ftrace *rec)
1711 {
1712 struct ftrace_ops *ops;
1713 bool keep_regs = false;
1714
1715 for (ops = ftrace_ops_list;
1716 ops != &ftrace_list_end; ops = ops->next) {
1717 /* pass rec in as regs to have non-NULL val */
1718 if (ftrace_ops_test(ops, rec->ip, rec)) {
1719 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
1720 keep_regs = true;
1721 break;
1722 }
1723 }
1724 }
1725
1726 return keep_regs;
1727 }
1728
1729 static struct ftrace_ops *
1730 ftrace_find_tramp_ops_any(struct dyn_ftrace *rec);
1731 static struct ftrace_ops *
1732 ftrace_find_tramp_ops_any_other(struct dyn_ftrace *rec, struct ftrace_ops *op_exclude);
1733 static struct ftrace_ops *
1734 ftrace_find_tramp_ops_next(struct dyn_ftrace *rec, struct ftrace_ops *ops);
1735
skip_record(struct dyn_ftrace * rec)1736 static bool skip_record(struct dyn_ftrace *rec)
1737 {
1738 /*
1739 * At boot up, weak functions are set to disable. Function tracing
1740 * can be enabled before they are, and they still need to be disabled now.
1741 * If the record is disabled, still continue if it is marked as already
1742 * enabled (this is needed to keep the accounting working).
1743 */
1744 return rec->flags & FTRACE_FL_DISABLED &&
1745 !(rec->flags & FTRACE_FL_ENABLED);
1746 }
1747
1748 /*
1749 * This is the main engine to the ftrace updates to the dyn_ftrace records.
1750 *
1751 * It will iterate through all the available ftrace functions
1752 * (the ones that ftrace can have callbacks to) and set the flags
1753 * in the associated dyn_ftrace records.
1754 *
1755 * @inc: If true, the functions associated to @ops are added to
1756 * the dyn_ftrace records, otherwise they are removed.
1757 */
__ftrace_hash_rec_update(struct ftrace_ops * ops,bool inc)1758 static bool __ftrace_hash_rec_update(struct ftrace_ops *ops,
1759 bool inc)
1760 {
1761 struct ftrace_hash *hash;
1762 struct ftrace_hash *notrace_hash;
1763 struct ftrace_page *pg;
1764 struct dyn_ftrace *rec;
1765 bool update = false;
1766 int count = 0;
1767 int all = false;
1768
1769 /* Only update if the ops has been registered */
1770 if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
1771 return false;
1772
1773 /*
1774 * If the count is zero, we update all records.
1775 * Otherwise we just update the items in the hash.
1776 */
1777 hash = ops->func_hash->filter_hash;
1778 notrace_hash = ops->func_hash->notrace_hash;
1779 if (ftrace_hash_empty(hash))
1780 all = true;
1781
1782 do_for_each_ftrace_rec(pg, rec) {
1783 int in_notrace_hash = 0;
1784 int in_hash = 0;
1785 int match = 0;
1786
1787 if (skip_record(rec))
1788 continue;
1789
1790 if (all) {
1791 /*
1792 * Only the filter_hash affects all records.
1793 * Update if the record is not in the notrace hash.
1794 */
1795 if (!notrace_hash || !ftrace_lookup_ip(notrace_hash, rec->ip))
1796 match = 1;
1797 } else {
1798 in_hash = !!ftrace_lookup_ip(hash, rec->ip);
1799 in_notrace_hash = !!ftrace_lookup_ip(notrace_hash, rec->ip);
1800
1801 /*
1802 * We want to match all functions that are in the hash but
1803 * not in the other hash.
1804 */
1805 if (in_hash && !in_notrace_hash)
1806 match = 1;
1807 }
1808 if (!match)
1809 continue;
1810
1811 if (inc) {
1812 rec->flags++;
1813 if (FTRACE_WARN_ON(ftrace_rec_count(rec) == FTRACE_REF_MAX))
1814 return false;
1815
1816 if (ops->flags & FTRACE_OPS_FL_DIRECT)
1817 rec->flags |= FTRACE_FL_DIRECT;
1818
1819 /*
1820 * If there's only a single callback registered to a
1821 * function, and the ops has a trampoline registered
1822 * for it, then we can call it directly.
1823 */
1824 if (ftrace_rec_count(rec) == 1 && ops->trampoline)
1825 rec->flags |= FTRACE_FL_TRAMP;
1826 else
1827 /*
1828 * If we are adding another function callback
1829 * to this function, and the previous had a
1830 * custom trampoline in use, then we need to go
1831 * back to the default trampoline.
1832 */
1833 rec->flags &= ~FTRACE_FL_TRAMP;
1834
1835 /*
1836 * If any ops wants regs saved for this function
1837 * then all ops will get saved regs.
1838 */
1839 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
1840 rec->flags |= FTRACE_FL_REGS;
1841 } else {
1842 if (FTRACE_WARN_ON(ftrace_rec_count(rec) == 0))
1843 return false;
1844 rec->flags--;
1845
1846 /*
1847 * Only the internal direct_ops should have the
1848 * DIRECT flag set. Thus, if it is removing a
1849 * function, then that function should no longer
1850 * be direct.
1851 */
1852 if (ops->flags & FTRACE_OPS_FL_DIRECT)
1853 rec->flags &= ~FTRACE_FL_DIRECT;
1854
1855 /*
1856 * If the rec had REGS enabled and the ops that is
1857 * being removed had REGS set, then see if there is
1858 * still any ops for this record that wants regs.
1859 * If not, we can stop recording them.
1860 */
1861 if (ftrace_rec_count(rec) > 0 &&
1862 rec->flags & FTRACE_FL_REGS &&
1863 ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
1864 if (!test_rec_ops_needs_regs(rec))
1865 rec->flags &= ~FTRACE_FL_REGS;
1866 }
1867
1868 /*
1869 * The TRAMP needs to be set only if rec count
1870 * is decremented to one, and the ops that is
1871 * left has a trampoline. As TRAMP can only be
1872 * enabled if there is only a single ops attached
1873 * to it.
1874 */
1875 if (ftrace_rec_count(rec) == 1 &&
1876 ftrace_find_tramp_ops_any_other(rec, ops))
1877 rec->flags |= FTRACE_FL_TRAMP;
1878 else
1879 rec->flags &= ~FTRACE_FL_TRAMP;
1880
1881 /*
1882 * flags will be cleared in ftrace_check_record()
1883 * if rec count is zero.
1884 */
1885 }
1886
1887 /*
1888 * If the rec has a single associated ops, and ops->func can be
1889 * called directly, allow the call site to call via the ops.
1890 */
1891 if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS) &&
1892 ftrace_rec_count(rec) == 1 &&
1893 ftrace_ops_get_func(ops) == ops->func)
1894 rec->flags |= FTRACE_FL_CALL_OPS;
1895 else
1896 rec->flags &= ~FTRACE_FL_CALL_OPS;
1897
1898 count++;
1899
1900 /* Must match FTRACE_UPDATE_CALLS in ftrace_modify_all_code() */
1901 update |= ftrace_test_record(rec, true) != FTRACE_UPDATE_IGNORE;
1902
1903 /* Shortcut, if we handled all records, we are done. */
1904 if (!all && count == hash->count)
1905 return update;
1906 } while_for_each_ftrace_rec();
1907
1908 return update;
1909 }
1910
1911 /*
1912 * This is called when an ops is removed from tracing. It will decrement
1913 * the counters of the dyn_ftrace records for all the functions that
1914 * the @ops attached to.
1915 */
ftrace_hash_rec_disable(struct ftrace_ops * ops)1916 static bool ftrace_hash_rec_disable(struct ftrace_ops *ops)
1917 {
1918 return __ftrace_hash_rec_update(ops, false);
1919 }
1920
1921 /*
1922 * This is called when an ops is added to tracing. It will increment
1923 * the counters of the dyn_ftrace records for all the functions that
1924 * the @ops attached to.
1925 */
ftrace_hash_rec_enable(struct ftrace_ops * ops)1926 static bool ftrace_hash_rec_enable(struct ftrace_ops *ops)
1927 {
1928 return __ftrace_hash_rec_update(ops, true);
1929 }
1930
1931 /*
1932 * This function will update what functions @ops traces when its filter
1933 * changes.
1934 *
1935 * The @inc states if the @ops callbacks are going to be added or removed.
1936 * When one of the @ops hashes are updated to a "new_hash" the dyn_ftrace
1937 * records are update via:
1938 *
1939 * ftrace_hash_rec_disable_modify(ops);
1940 * ops->hash = new_hash
1941 * ftrace_hash_rec_enable_modify(ops);
1942 *
1943 * Where the @ops is removed from all the records it is tracing using
1944 * its old hash. The @ops hash is updated to the new hash, and then
1945 * the @ops is added back to the records so that it is tracing all
1946 * the new functions.
1947 */
ftrace_hash_rec_update_modify(struct ftrace_ops * ops,bool inc)1948 static void ftrace_hash_rec_update_modify(struct ftrace_ops *ops, bool inc)
1949 {
1950 struct ftrace_ops *op;
1951
1952 __ftrace_hash_rec_update(ops, inc);
1953
1954 if (ops->func_hash != &global_ops.local_hash)
1955 return;
1956
1957 /*
1958 * If the ops shares the global_ops hash, then we need to update
1959 * all ops that are enabled and use this hash.
1960 */
1961 do_for_each_ftrace_op(op, ftrace_ops_list) {
1962 /* Already done */
1963 if (op == ops)
1964 continue;
1965 if (op->func_hash == &global_ops.local_hash)
1966 __ftrace_hash_rec_update(op, inc);
1967 } while_for_each_ftrace_op(op);
1968 }
1969
ftrace_hash_rec_disable_modify(struct ftrace_ops * ops)1970 static void ftrace_hash_rec_disable_modify(struct ftrace_ops *ops)
1971 {
1972 ftrace_hash_rec_update_modify(ops, false);
1973 }
1974
ftrace_hash_rec_enable_modify(struct ftrace_ops * ops)1975 static void ftrace_hash_rec_enable_modify(struct ftrace_ops *ops)
1976 {
1977 ftrace_hash_rec_update_modify(ops, true);
1978 }
1979
1980 /*
1981 * Try to update IPMODIFY flag on each ftrace_rec. Return 0 if it is OK
1982 * or no-needed to update, -EBUSY if it detects a conflict of the flag
1983 * on a ftrace_rec, and -EINVAL if the new_hash tries to trace all recs.
1984 * Note that old_hash and new_hash has below meanings
1985 * - If the hash is NULL, it hits all recs (if IPMODIFY is set, this is rejected)
1986 * - If the hash is EMPTY_HASH, it hits nothing
1987 * - Anything else hits the recs which match the hash entries.
1988 *
1989 * DIRECT ops does not have IPMODIFY flag, but we still need to check it
1990 * against functions with FTRACE_FL_IPMODIFY. If there is any overlap, call
1991 * ops_func(SHARE_IPMODIFY_SELF) to make sure current ops can share with
1992 * IPMODIFY. If ops_func(SHARE_IPMODIFY_SELF) returns non-zero, propagate
1993 * the return value to the caller and eventually to the owner of the DIRECT
1994 * ops.
1995 */
__ftrace_hash_update_ipmodify(struct ftrace_ops * ops,struct ftrace_hash * old_hash,struct ftrace_hash * new_hash)1996 static int __ftrace_hash_update_ipmodify(struct ftrace_ops *ops,
1997 struct ftrace_hash *old_hash,
1998 struct ftrace_hash *new_hash)
1999 {
2000 struct ftrace_page *pg;
2001 struct dyn_ftrace *rec, *end = NULL;
2002 int in_old, in_new;
2003 bool is_ipmodify, is_direct;
2004
2005 /* Only update if the ops has been registered */
2006 if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
2007 return 0;
2008
2009 is_ipmodify = ops->flags & FTRACE_OPS_FL_IPMODIFY;
2010 is_direct = ops->flags & FTRACE_OPS_FL_DIRECT;
2011
2012 /* neither IPMODIFY nor DIRECT, skip */
2013 if (!is_ipmodify && !is_direct)
2014 return 0;
2015
2016 if (WARN_ON_ONCE(is_ipmodify && is_direct))
2017 return 0;
2018
2019 /*
2020 * Since the IPMODIFY and DIRECT are very address sensitive
2021 * actions, we do not allow ftrace_ops to set all functions to new
2022 * hash.
2023 */
2024 if (!new_hash || !old_hash)
2025 return -EINVAL;
2026
2027 /* Update rec->flags */
2028 do_for_each_ftrace_rec(pg, rec) {
2029
2030 if (rec->flags & FTRACE_FL_DISABLED)
2031 continue;
2032
2033 /* We need to update only differences of filter_hash */
2034 in_old = !!ftrace_lookup_ip(old_hash, rec->ip);
2035 in_new = !!ftrace_lookup_ip(new_hash, rec->ip);
2036 if (in_old == in_new)
2037 continue;
2038
2039 if (in_new) {
2040 if (rec->flags & FTRACE_FL_IPMODIFY) {
2041 int ret;
2042
2043 /* Cannot have two ipmodify on same rec */
2044 if (is_ipmodify)
2045 goto rollback;
2046
2047 FTRACE_WARN_ON(rec->flags & FTRACE_FL_DIRECT);
2048
2049 /*
2050 * Another ops with IPMODIFY is already
2051 * attached. We are now attaching a direct
2052 * ops. Run SHARE_IPMODIFY_SELF, to check
2053 * whether sharing is supported.
2054 */
2055 if (!ops->ops_func)
2056 return -EBUSY;
2057 ret = ops->ops_func(ops, FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_SELF);
2058 if (ret)
2059 return ret;
2060 } else if (is_ipmodify) {
2061 rec->flags |= FTRACE_FL_IPMODIFY;
2062 }
2063 } else if (is_ipmodify) {
2064 rec->flags &= ~FTRACE_FL_IPMODIFY;
2065 }
2066 } while_for_each_ftrace_rec();
2067
2068 return 0;
2069
2070 rollback:
2071 end = rec;
2072
2073 /* Roll back what we did above */
2074 do_for_each_ftrace_rec(pg, rec) {
2075
2076 if (rec->flags & FTRACE_FL_DISABLED)
2077 continue;
2078
2079 if (rec == end)
2080 goto err_out;
2081
2082 in_old = !!ftrace_lookup_ip(old_hash, rec->ip);
2083 in_new = !!ftrace_lookup_ip(new_hash, rec->ip);
2084 if (in_old == in_new)
2085 continue;
2086
2087 if (in_new)
2088 rec->flags &= ~FTRACE_FL_IPMODIFY;
2089 else
2090 rec->flags |= FTRACE_FL_IPMODIFY;
2091 } while_for_each_ftrace_rec();
2092
2093 err_out:
2094 return -EBUSY;
2095 }
2096
ftrace_hash_ipmodify_enable(struct ftrace_ops * ops)2097 static int ftrace_hash_ipmodify_enable(struct ftrace_ops *ops)
2098 {
2099 struct ftrace_hash *hash = ops->func_hash->filter_hash;
2100
2101 if (ftrace_hash_empty(hash))
2102 hash = NULL;
2103
2104 return __ftrace_hash_update_ipmodify(ops, EMPTY_HASH, hash);
2105 }
2106
2107 /* Disabling always succeeds */
ftrace_hash_ipmodify_disable(struct ftrace_ops * ops)2108 static void ftrace_hash_ipmodify_disable(struct ftrace_ops *ops)
2109 {
2110 struct ftrace_hash *hash = ops->func_hash->filter_hash;
2111
2112 if (ftrace_hash_empty(hash))
2113 hash = NULL;
2114
2115 __ftrace_hash_update_ipmodify(ops, hash, EMPTY_HASH);
2116 }
2117
ftrace_hash_ipmodify_update(struct ftrace_ops * ops,struct ftrace_hash * new_hash)2118 static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops,
2119 struct ftrace_hash *new_hash)
2120 {
2121 struct ftrace_hash *old_hash = ops->func_hash->filter_hash;
2122
2123 if (ftrace_hash_empty(old_hash))
2124 old_hash = NULL;
2125
2126 if (ftrace_hash_empty(new_hash))
2127 new_hash = NULL;
2128
2129 return __ftrace_hash_update_ipmodify(ops, old_hash, new_hash);
2130 }
2131
print_ip_ins(const char * fmt,const unsigned char * p)2132 static void print_ip_ins(const char *fmt, const unsigned char *p)
2133 {
2134 char ins[MCOUNT_INSN_SIZE];
2135
2136 if (copy_from_kernel_nofault(ins, p, MCOUNT_INSN_SIZE)) {
2137 printk(KERN_CONT "%s[FAULT] %px\n", fmt, p);
2138 return;
2139 }
2140
2141 printk(KERN_CONT "%s", fmt);
2142 pr_cont("%*phC", MCOUNT_INSN_SIZE, ins);
2143 }
2144
2145 enum ftrace_bug_type ftrace_bug_type;
2146 const void *ftrace_expected;
2147
print_bug_type(void)2148 static void print_bug_type(void)
2149 {
2150 switch (ftrace_bug_type) {
2151 case FTRACE_BUG_UNKNOWN:
2152 break;
2153 case FTRACE_BUG_INIT:
2154 pr_info("Initializing ftrace call sites\n");
2155 break;
2156 case FTRACE_BUG_NOP:
2157 pr_info("Setting ftrace call site to NOP\n");
2158 break;
2159 case FTRACE_BUG_CALL:
2160 pr_info("Setting ftrace call site to call ftrace function\n");
2161 break;
2162 case FTRACE_BUG_UPDATE:
2163 pr_info("Updating ftrace call site to call a different ftrace function\n");
2164 break;
2165 }
2166 }
2167
2168 /**
2169 * ftrace_bug - report and shutdown function tracer
2170 * @failed: The failed type (EFAULT, EINVAL, EPERM)
2171 * @rec: The record that failed
2172 *
2173 * The arch code that enables or disables the function tracing
2174 * can call ftrace_bug() when it has detected a problem in
2175 * modifying the code. @failed should be one of either:
2176 * EFAULT - if the problem happens on reading the @ip address
2177 * EINVAL - if what is read at @ip is not what was expected
2178 * EPERM - if the problem happens on writing to the @ip address
2179 */
ftrace_bug(int failed,struct dyn_ftrace * rec)2180 void ftrace_bug(int failed, struct dyn_ftrace *rec)
2181 {
2182 unsigned long ip = rec ? rec->ip : 0;
2183
2184 pr_info("------------[ ftrace bug ]------------\n");
2185
2186 switch (failed) {
2187 case -EFAULT:
2188 pr_info("ftrace faulted on modifying ");
2189 print_ip_sym(KERN_INFO, ip);
2190 break;
2191 case -EINVAL:
2192 pr_info("ftrace failed to modify ");
2193 print_ip_sym(KERN_INFO, ip);
2194 print_ip_ins(" actual: ", (unsigned char *)ip);
2195 pr_cont("\n");
2196 if (ftrace_expected) {
2197 print_ip_ins(" expected: ", ftrace_expected);
2198 pr_cont("\n");
2199 }
2200 break;
2201 case -EPERM:
2202 pr_info("ftrace faulted on writing ");
2203 print_ip_sym(KERN_INFO, ip);
2204 break;
2205 default:
2206 pr_info("ftrace faulted on unknown error ");
2207 print_ip_sym(KERN_INFO, ip);
2208 }
2209 print_bug_type();
2210 if (rec) {
2211 struct ftrace_ops *ops = NULL;
2212
2213 pr_info("ftrace record flags: %lx\n", rec->flags);
2214 pr_cont(" (%ld)%s%s", ftrace_rec_count(rec),
2215 rec->flags & FTRACE_FL_REGS ? " R" : " ",
2216 rec->flags & FTRACE_FL_CALL_OPS ? " O" : " ");
2217 if (rec->flags & FTRACE_FL_TRAMP_EN) {
2218 ops = ftrace_find_tramp_ops_any(rec);
2219 if (ops) {
2220 do {
2221 pr_cont("\ttramp: %pS (%pS)",
2222 (void *)ops->trampoline,
2223 (void *)ops->func);
2224 ops = ftrace_find_tramp_ops_next(rec, ops);
2225 } while (ops);
2226 } else
2227 pr_cont("\ttramp: ERROR!");
2228
2229 }
2230 ip = ftrace_get_addr_curr(rec);
2231 pr_cont("\n expected tramp: %lx\n", ip);
2232 }
2233
2234 FTRACE_WARN_ON_ONCE(1);
2235 }
2236
ftrace_check_record(struct dyn_ftrace * rec,bool enable,bool update)2237 static int ftrace_check_record(struct dyn_ftrace *rec, bool enable, bool update)
2238 {
2239 unsigned long flag = 0UL;
2240
2241 ftrace_bug_type = FTRACE_BUG_UNKNOWN;
2242
2243 if (skip_record(rec))
2244 return FTRACE_UPDATE_IGNORE;
2245
2246 /*
2247 * If we are updating calls:
2248 *
2249 * If the record has a ref count, then we need to enable it
2250 * because someone is using it.
2251 *
2252 * Otherwise we make sure its disabled.
2253 *
2254 * If we are disabling calls, then disable all records that
2255 * are enabled.
2256 */
2257 if (enable && ftrace_rec_count(rec))
2258 flag = FTRACE_FL_ENABLED;
2259
2260 /*
2261 * If enabling and the REGS flag does not match the REGS_EN, or
2262 * the TRAMP flag doesn't match the TRAMP_EN, then do not ignore
2263 * this record. Set flags to fail the compare against ENABLED.
2264 * Same for direct calls.
2265 */
2266 if (flag) {
2267 if (!(rec->flags & FTRACE_FL_REGS) !=
2268 !(rec->flags & FTRACE_FL_REGS_EN))
2269 flag |= FTRACE_FL_REGS;
2270
2271 if (!(rec->flags & FTRACE_FL_TRAMP) !=
2272 !(rec->flags & FTRACE_FL_TRAMP_EN))
2273 flag |= FTRACE_FL_TRAMP;
2274
2275 /*
2276 * Direct calls are special, as count matters.
2277 * We must test the record for direct, if the
2278 * DIRECT and DIRECT_EN do not match, but only
2279 * if the count is 1. That's because, if the
2280 * count is something other than one, we do not
2281 * want the direct enabled (it will be done via the
2282 * direct helper). But if DIRECT_EN is set, and
2283 * the count is not one, we need to clear it.
2284 *
2285 */
2286 if (ftrace_rec_count(rec) == 1) {
2287 if (!(rec->flags & FTRACE_FL_DIRECT) !=
2288 !(rec->flags & FTRACE_FL_DIRECT_EN))
2289 flag |= FTRACE_FL_DIRECT;
2290 } else if (rec->flags & FTRACE_FL_DIRECT_EN) {
2291 flag |= FTRACE_FL_DIRECT;
2292 }
2293
2294 /*
2295 * Ops calls are special, as count matters.
2296 * As with direct calls, they must only be enabled when count
2297 * is one, otherwise they'll be handled via the list ops.
2298 */
2299 if (ftrace_rec_count(rec) == 1) {
2300 if (!(rec->flags & FTRACE_FL_CALL_OPS) !=
2301 !(rec->flags & FTRACE_FL_CALL_OPS_EN))
2302 flag |= FTRACE_FL_CALL_OPS;
2303 } else if (rec->flags & FTRACE_FL_CALL_OPS_EN) {
2304 flag |= FTRACE_FL_CALL_OPS;
2305 }
2306 }
2307
2308 /* If the state of this record hasn't changed, then do nothing */
2309 if ((rec->flags & FTRACE_FL_ENABLED) == flag)
2310 return FTRACE_UPDATE_IGNORE;
2311
2312 if (flag) {
2313 /* Save off if rec is being enabled (for return value) */
2314 flag ^= rec->flags & FTRACE_FL_ENABLED;
2315
2316 if (update) {
2317 rec->flags |= FTRACE_FL_ENABLED | FTRACE_FL_TOUCHED;
2318 if (flag & FTRACE_FL_REGS) {
2319 if (rec->flags & FTRACE_FL_REGS)
2320 rec->flags |= FTRACE_FL_REGS_EN;
2321 else
2322 rec->flags &= ~FTRACE_FL_REGS_EN;
2323 }
2324 if (flag & FTRACE_FL_TRAMP) {
2325 if (rec->flags & FTRACE_FL_TRAMP)
2326 rec->flags |= FTRACE_FL_TRAMP_EN;
2327 else
2328 rec->flags &= ~FTRACE_FL_TRAMP_EN;
2329 }
2330
2331 /* Keep track of anything that modifies the function */
2332 if (rec->flags & (FTRACE_FL_DIRECT | FTRACE_FL_IPMODIFY))
2333 rec->flags |= FTRACE_FL_MODIFIED;
2334
2335 if (flag & FTRACE_FL_DIRECT) {
2336 /*
2337 * If there's only one user (direct_ops helper)
2338 * then we can call the direct function
2339 * directly (no ftrace trampoline).
2340 */
2341 if (ftrace_rec_count(rec) == 1) {
2342 if (rec->flags & FTRACE_FL_DIRECT)
2343 rec->flags |= FTRACE_FL_DIRECT_EN;
2344 else
2345 rec->flags &= ~FTRACE_FL_DIRECT_EN;
2346 } else {
2347 /*
2348 * Can only call directly if there's
2349 * only one callback to the function.
2350 */
2351 rec->flags &= ~FTRACE_FL_DIRECT_EN;
2352 }
2353 }
2354
2355 if (flag & FTRACE_FL_CALL_OPS) {
2356 if (ftrace_rec_count(rec) == 1) {
2357 if (rec->flags & FTRACE_FL_CALL_OPS)
2358 rec->flags |= FTRACE_FL_CALL_OPS_EN;
2359 else
2360 rec->flags &= ~FTRACE_FL_CALL_OPS_EN;
2361 } else {
2362 /*
2363 * Can only call directly if there's
2364 * only one set of associated ops.
2365 */
2366 rec->flags &= ~FTRACE_FL_CALL_OPS_EN;
2367 }
2368 }
2369 }
2370
2371 /*
2372 * If this record is being updated from a nop, then
2373 * return UPDATE_MAKE_CALL.
2374 * Otherwise,
2375 * return UPDATE_MODIFY_CALL to tell the caller to convert
2376 * from the save regs, to a non-save regs function or
2377 * vice versa, or from a trampoline call.
2378 */
2379 if (flag & FTRACE_FL_ENABLED) {
2380 ftrace_bug_type = FTRACE_BUG_CALL;
2381 return FTRACE_UPDATE_MAKE_CALL;
2382 }
2383
2384 ftrace_bug_type = FTRACE_BUG_UPDATE;
2385 return FTRACE_UPDATE_MODIFY_CALL;
2386 }
2387
2388 if (update) {
2389 /* If there's no more users, clear all flags */
2390 if (!ftrace_rec_count(rec))
2391 rec->flags &= FTRACE_NOCLEAR_FLAGS;
2392 else
2393 /*
2394 * Just disable the record, but keep the ops TRAMP
2395 * and REGS states. The _EN flags must be disabled though.
2396 */
2397 rec->flags &= ~(FTRACE_FL_ENABLED | FTRACE_FL_TRAMP_EN |
2398 FTRACE_FL_REGS_EN | FTRACE_FL_DIRECT_EN |
2399 FTRACE_FL_CALL_OPS_EN);
2400 }
2401
2402 ftrace_bug_type = FTRACE_BUG_NOP;
2403 return FTRACE_UPDATE_MAKE_NOP;
2404 }
2405
2406 /**
2407 * ftrace_update_record - set a record that now is tracing or not
2408 * @rec: the record to update
2409 * @enable: set to true if the record is tracing, false to force disable
2410 *
2411 * The records that represent all functions that can be traced need
2412 * to be updated when tracing has been enabled.
2413 */
ftrace_update_record(struct dyn_ftrace * rec,bool enable)2414 int ftrace_update_record(struct dyn_ftrace *rec, bool enable)
2415 {
2416 return ftrace_check_record(rec, enable, true);
2417 }
2418
2419 /**
2420 * ftrace_test_record - check if the record has been enabled or not
2421 * @rec: the record to test
2422 * @enable: set to true to check if enabled, false if it is disabled
2423 *
2424 * The arch code may need to test if a record is already set to
2425 * tracing to determine how to modify the function code that it
2426 * represents.
2427 */
ftrace_test_record(struct dyn_ftrace * rec,bool enable)2428 int ftrace_test_record(struct dyn_ftrace *rec, bool enable)
2429 {
2430 return ftrace_check_record(rec, enable, false);
2431 }
2432
2433 static struct ftrace_ops *
ftrace_find_tramp_ops_any(struct dyn_ftrace * rec)2434 ftrace_find_tramp_ops_any(struct dyn_ftrace *rec)
2435 {
2436 struct ftrace_ops *op;
2437 unsigned long ip = rec->ip;
2438
2439 do_for_each_ftrace_op(op, ftrace_ops_list) {
2440
2441 if (!op->trampoline)
2442 continue;
2443
2444 if (hash_contains_ip(ip, op->func_hash))
2445 return op;
2446 } while_for_each_ftrace_op(op);
2447
2448 return NULL;
2449 }
2450
2451 static struct ftrace_ops *
ftrace_find_tramp_ops_any_other(struct dyn_ftrace * rec,struct ftrace_ops * op_exclude)2452 ftrace_find_tramp_ops_any_other(struct dyn_ftrace *rec, struct ftrace_ops *op_exclude)
2453 {
2454 struct ftrace_ops *op;
2455 unsigned long ip = rec->ip;
2456
2457 do_for_each_ftrace_op(op, ftrace_ops_list) {
2458
2459 if (op == op_exclude || !op->trampoline)
2460 continue;
2461
2462 if (hash_contains_ip(ip, op->func_hash))
2463 return op;
2464 } while_for_each_ftrace_op(op);
2465
2466 return NULL;
2467 }
2468
2469 static struct ftrace_ops *
ftrace_find_tramp_ops_next(struct dyn_ftrace * rec,struct ftrace_ops * op)2470 ftrace_find_tramp_ops_next(struct dyn_ftrace *rec,
2471 struct ftrace_ops *op)
2472 {
2473 unsigned long ip = rec->ip;
2474
2475 while_for_each_ftrace_op(op) {
2476
2477 if (!op->trampoline)
2478 continue;
2479
2480 if (hash_contains_ip(ip, op->func_hash))
2481 return op;
2482 }
2483
2484 return NULL;
2485 }
2486
2487 static struct ftrace_ops *
ftrace_find_tramp_ops_curr(struct dyn_ftrace * rec)2488 ftrace_find_tramp_ops_curr(struct dyn_ftrace *rec)
2489 {
2490 struct ftrace_ops *op;
2491 unsigned long ip = rec->ip;
2492
2493 /*
2494 * Need to check removed ops first.
2495 * If they are being removed, and this rec has a tramp,
2496 * and this rec is in the ops list, then it would be the
2497 * one with the tramp.
2498 */
2499 if (removed_ops) {
2500 if (hash_contains_ip(ip, &removed_ops->old_hash))
2501 return removed_ops;
2502 }
2503
2504 /*
2505 * Need to find the current trampoline for a rec.
2506 * Now, a trampoline is only attached to a rec if there
2507 * was a single 'ops' attached to it. But this can be called
2508 * when we are adding another op to the rec or removing the
2509 * current one. Thus, if the op is being added, we can
2510 * ignore it because it hasn't attached itself to the rec
2511 * yet.
2512 *
2513 * If an ops is being modified (hooking to different functions)
2514 * then we don't care about the new functions that are being
2515 * added, just the old ones (that are probably being removed).
2516 *
2517 * If we are adding an ops to a function that already is using
2518 * a trampoline, it needs to be removed (trampolines are only
2519 * for single ops connected), then an ops that is not being
2520 * modified also needs to be checked.
2521 */
2522 do_for_each_ftrace_op(op, ftrace_ops_list) {
2523
2524 if (!op->trampoline)
2525 continue;
2526
2527 /*
2528 * If the ops is being added, it hasn't gotten to
2529 * the point to be removed from this tree yet.
2530 */
2531 if (op->flags & FTRACE_OPS_FL_ADDING)
2532 continue;
2533
2534
2535 /*
2536 * If the ops is being modified and is in the old
2537 * hash, then it is probably being removed from this
2538 * function.
2539 */
2540 if ((op->flags & FTRACE_OPS_FL_MODIFYING) &&
2541 hash_contains_ip(ip, &op->old_hash))
2542 return op;
2543 /*
2544 * If the ops is not being added or modified, and it's
2545 * in its normal filter hash, then this must be the one
2546 * we want!
2547 */
2548 if (!(op->flags & FTRACE_OPS_FL_MODIFYING) &&
2549 hash_contains_ip(ip, op->func_hash))
2550 return op;
2551
2552 } while_for_each_ftrace_op(op);
2553
2554 return NULL;
2555 }
2556
2557 static struct ftrace_ops *
ftrace_find_tramp_ops_new(struct dyn_ftrace * rec)2558 ftrace_find_tramp_ops_new(struct dyn_ftrace *rec)
2559 {
2560 struct ftrace_ops *op;
2561 unsigned long ip = rec->ip;
2562
2563 do_for_each_ftrace_op(op, ftrace_ops_list) {
2564 /* pass rec in as regs to have non-NULL val */
2565 if (hash_contains_ip(ip, op->func_hash))
2566 return op;
2567 } while_for_each_ftrace_op(op);
2568
2569 return NULL;
2570 }
2571
2572 struct ftrace_ops *
ftrace_find_unique_ops(struct dyn_ftrace * rec)2573 ftrace_find_unique_ops(struct dyn_ftrace *rec)
2574 {
2575 struct ftrace_ops *op, *found = NULL;
2576 unsigned long ip = rec->ip;
2577
2578 do_for_each_ftrace_op(op, ftrace_ops_list) {
2579
2580 if (hash_contains_ip(ip, op->func_hash)) {
2581 if (found)
2582 return NULL;
2583 found = op;
2584 }
2585
2586 } while_for_each_ftrace_op(op);
2587
2588 return found;
2589 }
2590
2591 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
2592 /* Protected by rcu_tasks for reading, and direct_mutex for writing */
2593 static struct ftrace_hash __rcu *direct_functions = EMPTY_HASH;
2594 static DEFINE_MUTEX(direct_mutex);
2595
2596 /*
2597 * Search the direct_functions hash to see if the given instruction pointer
2598 * has a direct caller attached to it.
2599 */
ftrace_find_rec_direct(unsigned long ip)2600 unsigned long ftrace_find_rec_direct(unsigned long ip)
2601 {
2602 struct ftrace_func_entry *entry;
2603
2604 entry = __ftrace_lookup_ip(direct_functions, ip);
2605 if (!entry)
2606 return 0;
2607
2608 return entry->direct;
2609 }
2610
call_direct_funcs(unsigned long ip,unsigned long pip,struct ftrace_ops * ops,struct ftrace_regs * fregs)2611 static void call_direct_funcs(unsigned long ip, unsigned long pip,
2612 struct ftrace_ops *ops, struct ftrace_regs *fregs)
2613 {
2614 unsigned long addr = READ_ONCE(ops->direct_call);
2615
2616 if (!addr)
2617 return;
2618
2619 arch_ftrace_set_direct_caller(fregs, addr);
2620 }
2621 #endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
2622
2623 /**
2624 * ftrace_get_addr_new - Get the call address to set to
2625 * @rec: The ftrace record descriptor
2626 *
2627 * If the record has the FTRACE_FL_REGS set, that means that it
2628 * wants to convert to a callback that saves all regs. If FTRACE_FL_REGS
2629 * is not set, then it wants to convert to the normal callback.
2630 *
2631 * Returns: the address of the trampoline to set to
2632 */
ftrace_get_addr_new(struct dyn_ftrace * rec)2633 unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec)
2634 {
2635 struct ftrace_ops *ops;
2636 unsigned long addr;
2637
2638 if ((rec->flags & FTRACE_FL_DIRECT) &&
2639 (ftrace_rec_count(rec) == 1)) {
2640 addr = ftrace_find_rec_direct(rec->ip);
2641 if (addr)
2642 return addr;
2643 WARN_ON_ONCE(1);
2644 }
2645
2646 /* Trampolines take precedence over regs */
2647 if (rec->flags & FTRACE_FL_TRAMP) {
2648 ops = ftrace_find_tramp_ops_new(rec);
2649 if (FTRACE_WARN_ON(!ops || !ops->trampoline)) {
2650 pr_warn("Bad trampoline accounting at: %p (%pS) (%lx)\n",
2651 (void *)rec->ip, (void *)rec->ip, rec->flags);
2652 /* Ftrace is shutting down, return anything */
2653 return (unsigned long)FTRACE_ADDR;
2654 }
2655 return ops->trampoline;
2656 }
2657
2658 if (rec->flags & FTRACE_FL_REGS)
2659 return (unsigned long)FTRACE_REGS_ADDR;
2660 else
2661 return (unsigned long)FTRACE_ADDR;
2662 }
2663
2664 /**
2665 * ftrace_get_addr_curr - Get the call address that is already there
2666 * @rec: The ftrace record descriptor
2667 *
2668 * The FTRACE_FL_REGS_EN is set when the record already points to
2669 * a function that saves all the regs. Basically the '_EN' version
2670 * represents the current state of the function.
2671 *
2672 * Returns: the address of the trampoline that is currently being called
2673 */
ftrace_get_addr_curr(struct dyn_ftrace * rec)2674 unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec)
2675 {
2676 struct ftrace_ops *ops;
2677 unsigned long addr;
2678
2679 /* Direct calls take precedence over trampolines */
2680 if (rec->flags & FTRACE_FL_DIRECT_EN) {
2681 addr = ftrace_find_rec_direct(rec->ip);
2682 if (addr)
2683 return addr;
2684 WARN_ON_ONCE(1);
2685 }
2686
2687 /* Trampolines take precedence over regs */
2688 if (rec->flags & FTRACE_FL_TRAMP_EN) {
2689 ops = ftrace_find_tramp_ops_curr(rec);
2690 if (FTRACE_WARN_ON(!ops)) {
2691 pr_warn("Bad trampoline accounting at: %p (%pS)\n",
2692 (void *)rec->ip, (void *)rec->ip);
2693 /* Ftrace is shutting down, return anything */
2694 return (unsigned long)FTRACE_ADDR;
2695 }
2696 return ops->trampoline;
2697 }
2698
2699 if (rec->flags & FTRACE_FL_REGS_EN)
2700 return (unsigned long)FTRACE_REGS_ADDR;
2701 else
2702 return (unsigned long)FTRACE_ADDR;
2703 }
2704
2705 static int
__ftrace_replace_code(struct dyn_ftrace * rec,bool enable)2706 __ftrace_replace_code(struct dyn_ftrace *rec, bool enable)
2707 {
2708 unsigned long ftrace_old_addr;
2709 unsigned long ftrace_addr;
2710 int ret;
2711
2712 ftrace_addr = ftrace_get_addr_new(rec);
2713
2714 /* This needs to be done before we call ftrace_update_record */
2715 ftrace_old_addr = ftrace_get_addr_curr(rec);
2716
2717 ret = ftrace_update_record(rec, enable);
2718
2719 ftrace_bug_type = FTRACE_BUG_UNKNOWN;
2720
2721 switch (ret) {
2722 case FTRACE_UPDATE_IGNORE:
2723 return 0;
2724
2725 case FTRACE_UPDATE_MAKE_CALL:
2726 ftrace_bug_type = FTRACE_BUG_CALL;
2727 return ftrace_make_call(rec, ftrace_addr);
2728
2729 case FTRACE_UPDATE_MAKE_NOP:
2730 ftrace_bug_type = FTRACE_BUG_NOP;
2731 return ftrace_make_nop(NULL, rec, ftrace_old_addr);
2732
2733 case FTRACE_UPDATE_MODIFY_CALL:
2734 ftrace_bug_type = FTRACE_BUG_UPDATE;
2735 return ftrace_modify_call(rec, ftrace_old_addr, ftrace_addr);
2736 }
2737
2738 return -1; /* unknown ftrace bug */
2739 }
2740
ftrace_replace_code(int mod_flags)2741 void __weak ftrace_replace_code(int mod_flags)
2742 {
2743 struct dyn_ftrace *rec;
2744 struct ftrace_page *pg;
2745 bool enable = mod_flags & FTRACE_MODIFY_ENABLE_FL;
2746 int schedulable = mod_flags & FTRACE_MODIFY_MAY_SLEEP_FL;
2747 int failed;
2748
2749 if (unlikely(ftrace_disabled))
2750 return;
2751
2752 do_for_each_ftrace_rec(pg, rec) {
2753
2754 if (skip_record(rec))
2755 continue;
2756
2757 failed = __ftrace_replace_code(rec, enable);
2758 if (failed) {
2759 ftrace_bug(failed, rec);
2760 /* Stop processing */
2761 return;
2762 }
2763 if (schedulable)
2764 cond_resched();
2765 } while_for_each_ftrace_rec();
2766 }
2767
2768 struct ftrace_rec_iter {
2769 struct ftrace_page *pg;
2770 int index;
2771 };
2772
2773 /**
2774 * ftrace_rec_iter_start - start up iterating over traced functions
2775 *
2776 * Returns: an iterator handle that is used to iterate over all
2777 * the records that represent address locations where functions
2778 * are traced.
2779 *
2780 * May return NULL if no records are available.
2781 */
ftrace_rec_iter_start(void)2782 struct ftrace_rec_iter *ftrace_rec_iter_start(void)
2783 {
2784 /*
2785 * We only use a single iterator.
2786 * Protected by the ftrace_lock mutex.
2787 */
2788 static struct ftrace_rec_iter ftrace_rec_iter;
2789 struct ftrace_rec_iter *iter = &ftrace_rec_iter;
2790
2791 iter->pg = ftrace_pages_start;
2792 iter->index = 0;
2793
2794 /* Could have empty pages */
2795 while (iter->pg && !iter->pg->index)
2796 iter->pg = iter->pg->next;
2797
2798 if (!iter->pg)
2799 return NULL;
2800
2801 return iter;
2802 }
2803
2804 /**
2805 * ftrace_rec_iter_next - get the next record to process.
2806 * @iter: The handle to the iterator.
2807 *
2808 * Returns: the next iterator after the given iterator @iter.
2809 */
ftrace_rec_iter_next(struct ftrace_rec_iter * iter)2810 struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter)
2811 {
2812 iter->index++;
2813
2814 if (iter->index >= iter->pg->index) {
2815 iter->pg = iter->pg->next;
2816 iter->index = 0;
2817
2818 /* Could have empty pages */
2819 while (iter->pg && !iter->pg->index)
2820 iter->pg = iter->pg->next;
2821 }
2822
2823 if (!iter->pg)
2824 return NULL;
2825
2826 return iter;
2827 }
2828
2829 /**
2830 * ftrace_rec_iter_record - get the record at the iterator location
2831 * @iter: The current iterator location
2832 *
2833 * Returns: the record that the current @iter is at.
2834 */
ftrace_rec_iter_record(struct ftrace_rec_iter * iter)2835 struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter)
2836 {
2837 return &iter->pg->records[iter->index];
2838 }
2839
2840 static int
ftrace_nop_initialize(struct module * mod,struct dyn_ftrace * rec)2841 ftrace_nop_initialize(struct module *mod, struct dyn_ftrace *rec)
2842 {
2843 int ret;
2844
2845 if (unlikely(ftrace_disabled))
2846 return 0;
2847
2848 ret = ftrace_init_nop(mod, rec);
2849 if (ret) {
2850 ftrace_bug_type = FTRACE_BUG_INIT;
2851 ftrace_bug(ret, rec);
2852 return 0;
2853 }
2854 return 1;
2855 }
2856
2857 /*
2858 * archs can override this function if they must do something
2859 * before the modifying code is performed.
2860 */
ftrace_arch_code_modify_prepare(void)2861 void __weak ftrace_arch_code_modify_prepare(void)
2862 {
2863 }
2864
2865 /*
2866 * archs can override this function if they must do something
2867 * after the modifying code is performed.
2868 */
ftrace_arch_code_modify_post_process(void)2869 void __weak ftrace_arch_code_modify_post_process(void)
2870 {
2871 }
2872
update_ftrace_func(ftrace_func_t func)2873 static int update_ftrace_func(ftrace_func_t func)
2874 {
2875 static ftrace_func_t save_func;
2876
2877 /* Avoid updating if it hasn't changed */
2878 if (func == save_func)
2879 return 0;
2880
2881 save_func = func;
2882
2883 return ftrace_update_ftrace_func(func);
2884 }
2885
ftrace_modify_all_code(int command)2886 void ftrace_modify_all_code(int command)
2887 {
2888 int update = command & FTRACE_UPDATE_TRACE_FUNC;
2889 int mod_flags = 0;
2890 int err = 0;
2891
2892 if (command & FTRACE_MAY_SLEEP)
2893 mod_flags = FTRACE_MODIFY_MAY_SLEEP_FL;
2894
2895 /*
2896 * If the ftrace_caller calls a ftrace_ops func directly,
2897 * we need to make sure that it only traces functions it
2898 * expects to trace. When doing the switch of functions,
2899 * we need to update to the ftrace_ops_list_func first
2900 * before the transition between old and new calls are set,
2901 * as the ftrace_ops_list_func will check the ops hashes
2902 * to make sure the ops are having the right functions
2903 * traced.
2904 */
2905 if (update) {
2906 err = update_ftrace_func(ftrace_ops_list_func);
2907 if (FTRACE_WARN_ON(err))
2908 return;
2909 }
2910
2911 if (command & FTRACE_UPDATE_CALLS)
2912 ftrace_replace_code(mod_flags | FTRACE_MODIFY_ENABLE_FL);
2913 else if (command & FTRACE_DISABLE_CALLS)
2914 ftrace_replace_code(mod_flags);
2915
2916 if (update && ftrace_trace_function != ftrace_ops_list_func) {
2917 function_trace_op = set_function_trace_op;
2918 smp_wmb();
2919 /* If irqs are disabled, we are in stop machine */
2920 if (!irqs_disabled())
2921 smp_call_function(ftrace_sync_ipi, NULL, 1);
2922 err = update_ftrace_func(ftrace_trace_function);
2923 if (FTRACE_WARN_ON(err))
2924 return;
2925 }
2926
2927 if (command & FTRACE_START_FUNC_RET)
2928 err = ftrace_enable_ftrace_graph_caller();
2929 else if (command & FTRACE_STOP_FUNC_RET)
2930 err = ftrace_disable_ftrace_graph_caller();
2931 FTRACE_WARN_ON(err);
2932 }
2933
__ftrace_modify_code(void * data)2934 static int __ftrace_modify_code(void *data)
2935 {
2936 int *command = data;
2937
2938 ftrace_modify_all_code(*command);
2939
2940 return 0;
2941 }
2942
2943 /**
2944 * ftrace_run_stop_machine - go back to the stop machine method
2945 * @command: The command to tell ftrace what to do
2946 *
2947 * If an arch needs to fall back to the stop machine method, the
2948 * it can call this function.
2949 */
ftrace_run_stop_machine(int command)2950 void ftrace_run_stop_machine(int command)
2951 {
2952 stop_machine(__ftrace_modify_code, &command, NULL);
2953 }
2954
2955 /**
2956 * arch_ftrace_update_code - modify the code to trace or not trace
2957 * @command: The command that needs to be done
2958 *
2959 * Archs can override this function if it does not need to
2960 * run stop_machine() to modify code.
2961 */
arch_ftrace_update_code(int command)2962 void __weak arch_ftrace_update_code(int command)
2963 {
2964 ftrace_run_stop_machine(command);
2965 }
2966
ftrace_run_update_code(int command)2967 static void ftrace_run_update_code(int command)
2968 {
2969 ftrace_arch_code_modify_prepare();
2970
2971 /*
2972 * By default we use stop_machine() to modify the code.
2973 * But archs can do what ever they want as long as it
2974 * is safe. The stop_machine() is the safest, but also
2975 * produces the most overhead.
2976 */
2977 arch_ftrace_update_code(command);
2978
2979 ftrace_arch_code_modify_post_process();
2980 }
2981
ftrace_run_modify_code(struct ftrace_ops * ops,int command,struct ftrace_ops_hash * old_hash)2982 static void ftrace_run_modify_code(struct ftrace_ops *ops, int command,
2983 struct ftrace_ops_hash *old_hash)
2984 {
2985 ops->flags |= FTRACE_OPS_FL_MODIFYING;
2986 ops->old_hash.filter_hash = old_hash->filter_hash;
2987 ops->old_hash.notrace_hash = old_hash->notrace_hash;
2988 ftrace_run_update_code(command);
2989 ops->old_hash.filter_hash = NULL;
2990 ops->old_hash.notrace_hash = NULL;
2991 ops->flags &= ~FTRACE_OPS_FL_MODIFYING;
2992 }
2993
2994 static ftrace_func_t saved_ftrace_func;
2995 static int ftrace_start_up;
2996
arch_ftrace_trampoline_free(struct ftrace_ops * ops)2997 void __weak arch_ftrace_trampoline_free(struct ftrace_ops *ops)
2998 {
2999 }
3000
3001 /* List of trace_ops that have allocated trampolines */
3002 static LIST_HEAD(ftrace_ops_trampoline_list);
3003
ftrace_add_trampoline_to_kallsyms(struct ftrace_ops * ops)3004 static void ftrace_add_trampoline_to_kallsyms(struct ftrace_ops *ops)
3005 {
3006 lockdep_assert_held(&ftrace_lock);
3007 list_add_rcu(&ops->list, &ftrace_ops_trampoline_list);
3008 }
3009
ftrace_remove_trampoline_from_kallsyms(struct ftrace_ops * ops)3010 static void ftrace_remove_trampoline_from_kallsyms(struct ftrace_ops *ops)
3011 {
3012 lockdep_assert_held(&ftrace_lock);
3013 list_del_rcu(&ops->list);
3014 synchronize_rcu();
3015 }
3016
3017 /*
3018 * "__builtin__ftrace" is used as a module name in /proc/kallsyms for symbols
3019 * for pages allocated for ftrace purposes, even though "__builtin__ftrace" is
3020 * not a module.
3021 */
3022 #define FTRACE_TRAMPOLINE_MOD "__builtin__ftrace"
3023 #define FTRACE_TRAMPOLINE_SYM "ftrace_trampoline"
3024
ftrace_trampoline_free(struct ftrace_ops * ops)3025 static void ftrace_trampoline_free(struct ftrace_ops *ops)
3026 {
3027 if (ops && (ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP) &&
3028 ops->trampoline) {
3029 /*
3030 * Record the text poke event before the ksymbol unregister
3031 * event.
3032 */
3033 perf_event_text_poke((void *)ops->trampoline,
3034 (void *)ops->trampoline,
3035 ops->trampoline_size, NULL, 0);
3036 perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_OOL,
3037 ops->trampoline, ops->trampoline_size,
3038 true, FTRACE_TRAMPOLINE_SYM);
3039 /* Remove from kallsyms after the perf events */
3040 ftrace_remove_trampoline_from_kallsyms(ops);
3041 }
3042
3043 arch_ftrace_trampoline_free(ops);
3044 }
3045
ftrace_startup_enable(int command)3046 static void ftrace_startup_enable(int command)
3047 {
3048 if (saved_ftrace_func != ftrace_trace_function) {
3049 saved_ftrace_func = ftrace_trace_function;
3050 command |= FTRACE_UPDATE_TRACE_FUNC;
3051 }
3052
3053 if (!command || !ftrace_enabled)
3054 return;
3055
3056 ftrace_run_update_code(command);
3057 }
3058
ftrace_startup_all(int command)3059 static void ftrace_startup_all(int command)
3060 {
3061 update_all_ops = true;
3062 ftrace_startup_enable(command);
3063 update_all_ops = false;
3064 }
3065
ftrace_startup(struct ftrace_ops * ops,int command)3066 int ftrace_startup(struct ftrace_ops *ops, int command)
3067 {
3068 int ret;
3069
3070 if (unlikely(ftrace_disabled))
3071 return -ENODEV;
3072
3073 ret = __register_ftrace_function(ops);
3074 if (ret)
3075 return ret;
3076
3077 ftrace_start_up++;
3078
3079 /*
3080 * Note that ftrace probes uses this to start up
3081 * and modify functions it will probe. But we still
3082 * set the ADDING flag for modification, as probes
3083 * do not have trampolines. If they add them in the
3084 * future, then the probes will need to distinguish
3085 * between adding and updating probes.
3086 */
3087 ops->flags |= FTRACE_OPS_FL_ENABLED | FTRACE_OPS_FL_ADDING;
3088
3089 ret = ftrace_hash_ipmodify_enable(ops);
3090 if (ret < 0) {
3091 /* Rollback registration process */
3092 __unregister_ftrace_function(ops);
3093 ftrace_start_up--;
3094 ops->flags &= ~FTRACE_OPS_FL_ENABLED;
3095 if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
3096 ftrace_trampoline_free(ops);
3097 return ret;
3098 }
3099
3100 if (ftrace_hash_rec_enable(ops))
3101 command |= FTRACE_UPDATE_CALLS;
3102
3103 ftrace_startup_enable(command);
3104
3105 /*
3106 * If ftrace is in an undefined state, we just remove ops from list
3107 * to prevent the NULL pointer, instead of totally rolling it back and
3108 * free trampoline, because those actions could cause further damage.
3109 */
3110 if (unlikely(ftrace_disabled)) {
3111 __unregister_ftrace_function(ops);
3112 return -ENODEV;
3113 }
3114
3115 ops->flags &= ~FTRACE_OPS_FL_ADDING;
3116
3117 return 0;
3118 }
3119
ftrace_shutdown(struct ftrace_ops * ops,int command)3120 int ftrace_shutdown(struct ftrace_ops *ops, int command)
3121 {
3122 int ret;
3123
3124 if (unlikely(ftrace_disabled))
3125 return -ENODEV;
3126
3127 ret = __unregister_ftrace_function(ops);
3128 if (ret)
3129 return ret;
3130
3131 ftrace_start_up--;
3132 /*
3133 * Just warn in case of unbalance, no need to kill ftrace, it's not
3134 * critical but the ftrace_call callers may be never nopped again after
3135 * further ftrace uses.
3136 */
3137 WARN_ON_ONCE(ftrace_start_up < 0);
3138
3139 /* Disabling ipmodify never fails */
3140 ftrace_hash_ipmodify_disable(ops);
3141
3142 if (ftrace_hash_rec_disable(ops))
3143 command |= FTRACE_UPDATE_CALLS;
3144
3145 ops->flags &= ~FTRACE_OPS_FL_ENABLED;
3146
3147 if (saved_ftrace_func != ftrace_trace_function) {
3148 saved_ftrace_func = ftrace_trace_function;
3149 command |= FTRACE_UPDATE_TRACE_FUNC;
3150 }
3151
3152 if (!command || !ftrace_enabled)
3153 goto out;
3154
3155 /*
3156 * If the ops uses a trampoline, then it needs to be
3157 * tested first on update.
3158 */
3159 ops->flags |= FTRACE_OPS_FL_REMOVING;
3160 removed_ops = ops;
3161
3162 /* The trampoline logic checks the old hashes */
3163 ops->old_hash.filter_hash = ops->func_hash->filter_hash;
3164 ops->old_hash.notrace_hash = ops->func_hash->notrace_hash;
3165
3166 ftrace_run_update_code(command);
3167
3168 /*
3169 * If there's no more ops registered with ftrace, run a
3170 * sanity check to make sure all rec flags are cleared.
3171 */
3172 if (rcu_dereference_protected(ftrace_ops_list,
3173 lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) {
3174 struct ftrace_page *pg;
3175 struct dyn_ftrace *rec;
3176
3177 do_for_each_ftrace_rec(pg, rec) {
3178 if (FTRACE_WARN_ON_ONCE(rec->flags & ~FTRACE_NOCLEAR_FLAGS))
3179 pr_warn(" %pS flags:%lx\n",
3180 (void *)rec->ip, rec->flags);
3181 } while_for_each_ftrace_rec();
3182 }
3183
3184 ops->old_hash.filter_hash = NULL;
3185 ops->old_hash.notrace_hash = NULL;
3186
3187 removed_ops = NULL;
3188 ops->flags &= ~FTRACE_OPS_FL_REMOVING;
3189
3190 out:
3191 /*
3192 * Dynamic ops may be freed, we must make sure that all
3193 * callers are done before leaving this function.
3194 */
3195 if (ops->flags & FTRACE_OPS_FL_DYNAMIC) {
3196 /*
3197 * We need to do a hard force of sched synchronization.
3198 * This is because we use preempt_disable() to do RCU, but
3199 * the function tracers can be called where RCU is not watching
3200 * (like before user_exit()). We can not rely on the RCU
3201 * infrastructure to do the synchronization, thus we must do it
3202 * ourselves.
3203 */
3204 synchronize_rcu_tasks_rude();
3205
3206 /*
3207 * When the kernel is preemptive, tasks can be preempted
3208 * while on a ftrace trampoline. Just scheduling a task on
3209 * a CPU is not good enough to flush them. Calling
3210 * synchronize_rcu_tasks() will wait for those tasks to
3211 * execute and either schedule voluntarily or enter user space.
3212 */
3213 synchronize_rcu_tasks();
3214
3215 ftrace_trampoline_free(ops);
3216 }
3217
3218 return 0;
3219 }
3220
3221 /* Simply make a copy of @src and return it */
copy_hash(struct ftrace_hash * src)3222 static struct ftrace_hash *copy_hash(struct ftrace_hash *src)
3223 {
3224 if (ftrace_hash_empty(src))
3225 return EMPTY_HASH;
3226
3227 return alloc_and_copy_ftrace_hash(src->size_bits, src);
3228 }
3229
3230 /*
3231 * Append @new_hash entries to @hash:
3232 *
3233 * If @hash is the EMPTY_HASH then it traces all functions and nothing
3234 * needs to be done.
3235 *
3236 * If @new_hash is the EMPTY_HASH, then make *hash the EMPTY_HASH so
3237 * that it traces everything.
3238 *
3239 * Otherwise, go through all of @new_hash and add anything that @hash
3240 * doesn't already have, to @hash.
3241 *
3242 * The filter_hash updates uses just the append_hash() function
3243 * and the notrace_hash does not.
3244 */
append_hash(struct ftrace_hash ** hash,struct ftrace_hash * new_hash)3245 static int append_hash(struct ftrace_hash **hash, struct ftrace_hash *new_hash)
3246 {
3247 struct ftrace_func_entry *entry;
3248 int size;
3249 int i;
3250
3251 /* An empty hash does everything */
3252 if (ftrace_hash_empty(*hash))
3253 return 0;
3254
3255 /* If new_hash has everything make hash have everything */
3256 if (ftrace_hash_empty(new_hash)) {
3257 free_ftrace_hash(*hash);
3258 *hash = EMPTY_HASH;
3259 return 0;
3260 }
3261
3262 size = 1 << new_hash->size_bits;
3263 for (i = 0; i < size; i++) {
3264 hlist_for_each_entry(entry, &new_hash->buckets[i], hlist) {
3265 /* Only add if not already in hash */
3266 if (!__ftrace_lookup_ip(*hash, entry->ip) &&
3267 add_hash_entry(*hash, entry->ip) == NULL)
3268 return -ENOMEM;
3269 }
3270 }
3271 return 0;
3272 }
3273
3274 /*
3275 * Add to @hash only those that are in both @new_hash1 and @new_hash2
3276 *
3277 * The notrace_hash updates uses just the intersect_hash() function
3278 * and the filter_hash does not.
3279 */
intersect_hash(struct ftrace_hash ** hash,struct ftrace_hash * new_hash1,struct ftrace_hash * new_hash2)3280 static int intersect_hash(struct ftrace_hash **hash, struct ftrace_hash *new_hash1,
3281 struct ftrace_hash *new_hash2)
3282 {
3283 struct ftrace_func_entry *entry;
3284 int size;
3285 int i;
3286
3287 /*
3288 * If new_hash1 or new_hash2 is the EMPTY_HASH then make the hash
3289 * empty as well as empty for notrace means none are notraced.
3290 */
3291 if (ftrace_hash_empty(new_hash1) || ftrace_hash_empty(new_hash2)) {
3292 free_ftrace_hash(*hash);
3293 *hash = EMPTY_HASH;
3294 return 0;
3295 }
3296
3297 size = 1 << new_hash1->size_bits;
3298 for (i = 0; i < size; i++) {
3299 hlist_for_each_entry(entry, &new_hash1->buckets[i], hlist) {
3300 /* Only add if in both @new_hash1 and @new_hash2 */
3301 if (__ftrace_lookup_ip(new_hash2, entry->ip) &&
3302 add_hash_entry(*hash, entry->ip) == NULL)
3303 return -ENOMEM;
3304 }
3305 }
3306 /* If nothing intersects, make it the empty set */
3307 if (ftrace_hash_empty(*hash)) {
3308 free_ftrace_hash(*hash);
3309 *hash = EMPTY_HASH;
3310 }
3311 return 0;
3312 }
3313
3314 /* Return a new hash that has a union of all @ops->filter_hash entries */
append_hashes(struct ftrace_ops * ops)3315 static struct ftrace_hash *append_hashes(struct ftrace_ops *ops)
3316 {
3317 struct ftrace_hash *new_hash;
3318 struct ftrace_ops *subops;
3319 int ret;
3320
3321 new_hash = alloc_ftrace_hash(ops->func_hash->filter_hash->size_bits);
3322 if (!new_hash)
3323 return NULL;
3324
3325 list_for_each_entry(subops, &ops->subop_list, list) {
3326 ret = append_hash(&new_hash, subops->func_hash->filter_hash);
3327 if (ret < 0) {
3328 free_ftrace_hash(new_hash);
3329 return NULL;
3330 }
3331 /* Nothing more to do if new_hash is empty */
3332 if (ftrace_hash_empty(new_hash))
3333 break;
3334 }
3335 return new_hash;
3336 }
3337
3338 /* Make @ops trace evenything except what all its subops do not trace */
intersect_hashes(struct ftrace_ops * ops)3339 static struct ftrace_hash *intersect_hashes(struct ftrace_ops *ops)
3340 {
3341 struct ftrace_hash *new_hash = NULL;
3342 struct ftrace_ops *subops;
3343 int size_bits;
3344 int ret;
3345
3346 list_for_each_entry(subops, &ops->subop_list, list) {
3347 struct ftrace_hash *next_hash;
3348
3349 if (!new_hash) {
3350 size_bits = subops->func_hash->notrace_hash->size_bits;
3351 new_hash = alloc_and_copy_ftrace_hash(size_bits, ops->func_hash->notrace_hash);
3352 if (!new_hash)
3353 return NULL;
3354 continue;
3355 }
3356 size_bits = new_hash->size_bits;
3357 next_hash = new_hash;
3358 new_hash = alloc_ftrace_hash(size_bits);
3359 ret = intersect_hash(&new_hash, next_hash, subops->func_hash->notrace_hash);
3360 free_ftrace_hash(next_hash);
3361 if (ret < 0) {
3362 free_ftrace_hash(new_hash);
3363 return NULL;
3364 }
3365 /* Nothing more to do if new_hash is empty */
3366 if (ftrace_hash_empty(new_hash))
3367 break;
3368 }
3369 return new_hash;
3370 }
3371
ops_equal(struct ftrace_hash * A,struct ftrace_hash * B)3372 static bool ops_equal(struct ftrace_hash *A, struct ftrace_hash *B)
3373 {
3374 struct ftrace_func_entry *entry;
3375 int size;
3376 int i;
3377
3378 if (ftrace_hash_empty(A))
3379 return ftrace_hash_empty(B);
3380
3381 if (ftrace_hash_empty(B))
3382 return ftrace_hash_empty(A);
3383
3384 if (A->count != B->count)
3385 return false;
3386
3387 size = 1 << A->size_bits;
3388 for (i = 0; i < size; i++) {
3389 hlist_for_each_entry(entry, &A->buckets[i], hlist) {
3390 if (!__ftrace_lookup_ip(B, entry->ip))
3391 return false;
3392 }
3393 }
3394
3395 return true;
3396 }
3397
3398 static void ftrace_ops_update_code(struct ftrace_ops *ops,
3399 struct ftrace_ops_hash *old_hash);
3400
__ftrace_hash_move_and_update_ops(struct ftrace_ops * ops,struct ftrace_hash ** orig_hash,struct ftrace_hash * hash,int enable)3401 static int __ftrace_hash_move_and_update_ops(struct ftrace_ops *ops,
3402 struct ftrace_hash **orig_hash,
3403 struct ftrace_hash *hash,
3404 int enable)
3405 {
3406 struct ftrace_ops_hash old_hash_ops;
3407 struct ftrace_hash *old_hash;
3408 int ret;
3409
3410 old_hash = *orig_hash;
3411 old_hash_ops.filter_hash = ops->func_hash->filter_hash;
3412 old_hash_ops.notrace_hash = ops->func_hash->notrace_hash;
3413 ret = ftrace_hash_move(ops, enable, orig_hash, hash);
3414 if (!ret) {
3415 ftrace_ops_update_code(ops, &old_hash_ops);
3416 free_ftrace_hash_rcu(old_hash);
3417 }
3418 return ret;
3419 }
3420
ftrace_update_ops(struct ftrace_ops * ops,struct ftrace_hash * filter_hash,struct ftrace_hash * notrace_hash)3421 static int ftrace_update_ops(struct ftrace_ops *ops, struct ftrace_hash *filter_hash,
3422 struct ftrace_hash *notrace_hash)
3423 {
3424 int ret;
3425
3426 if (!ops_equal(filter_hash, ops->func_hash->filter_hash)) {
3427 ret = __ftrace_hash_move_and_update_ops(ops, &ops->func_hash->filter_hash,
3428 filter_hash, 1);
3429 if (ret < 0)
3430 return ret;
3431 }
3432
3433 if (!ops_equal(notrace_hash, ops->func_hash->notrace_hash)) {
3434 ret = __ftrace_hash_move_and_update_ops(ops, &ops->func_hash->notrace_hash,
3435 notrace_hash, 0);
3436 if (ret < 0)
3437 return ret;
3438 }
3439
3440 return 0;
3441 }
3442
3443 /**
3444 * ftrace_startup_subops - enable tracing for subops of an ops
3445 * @ops: Manager ops (used to pick all the functions of its subops)
3446 * @subops: A new ops to add to @ops
3447 * @command: Extra commands to use to enable tracing
3448 *
3449 * The @ops is a manager @ops that has the filter that includes all the functions
3450 * that its list of subops are tracing. Adding a new @subops will add the
3451 * functions of @subops to @ops.
3452 */
ftrace_startup_subops(struct ftrace_ops * ops,struct ftrace_ops * subops,int command)3453 int ftrace_startup_subops(struct ftrace_ops *ops, struct ftrace_ops *subops, int command)
3454 {
3455 struct ftrace_hash *filter_hash;
3456 struct ftrace_hash *notrace_hash;
3457 struct ftrace_hash *save_filter_hash;
3458 struct ftrace_hash *save_notrace_hash;
3459 int size_bits;
3460 int ret;
3461
3462 if (unlikely(ftrace_disabled))
3463 return -ENODEV;
3464
3465 ftrace_ops_init(ops);
3466 ftrace_ops_init(subops);
3467
3468 if (WARN_ON_ONCE(subops->flags & FTRACE_OPS_FL_ENABLED))
3469 return -EBUSY;
3470
3471 /* Make everything canonical (Just in case!) */
3472 if (!ops->func_hash->filter_hash)
3473 ops->func_hash->filter_hash = EMPTY_HASH;
3474 if (!ops->func_hash->notrace_hash)
3475 ops->func_hash->notrace_hash = EMPTY_HASH;
3476 if (!subops->func_hash->filter_hash)
3477 subops->func_hash->filter_hash = EMPTY_HASH;
3478 if (!subops->func_hash->notrace_hash)
3479 subops->func_hash->notrace_hash = EMPTY_HASH;
3480
3481 /* For the first subops to ops just enable it normally */
3482 if (list_empty(&ops->subop_list)) {
3483 /* Just use the subops hashes */
3484 filter_hash = copy_hash(subops->func_hash->filter_hash);
3485 notrace_hash = copy_hash(subops->func_hash->notrace_hash);
3486 if (!filter_hash || !notrace_hash) {
3487 free_ftrace_hash(filter_hash);
3488 free_ftrace_hash(notrace_hash);
3489 return -ENOMEM;
3490 }
3491
3492 save_filter_hash = ops->func_hash->filter_hash;
3493 save_notrace_hash = ops->func_hash->notrace_hash;
3494
3495 ops->func_hash->filter_hash = filter_hash;
3496 ops->func_hash->notrace_hash = notrace_hash;
3497 list_add(&subops->list, &ops->subop_list);
3498 ret = ftrace_startup(ops, command);
3499 if (ret < 0) {
3500 list_del(&subops->list);
3501 ops->func_hash->filter_hash = save_filter_hash;
3502 ops->func_hash->notrace_hash = save_notrace_hash;
3503 free_ftrace_hash(filter_hash);
3504 free_ftrace_hash(notrace_hash);
3505 } else {
3506 free_ftrace_hash(save_filter_hash);
3507 free_ftrace_hash(save_notrace_hash);
3508 subops->flags |= FTRACE_OPS_FL_ENABLED | FTRACE_OPS_FL_SUBOP;
3509 subops->managed = ops;
3510 }
3511 return ret;
3512 }
3513
3514 /*
3515 * Here there's already something attached. Here are the rules:
3516 * o If either filter_hash is empty then the final stays empty
3517 * o Otherwise, the final is a superset of both hashes
3518 * o If either notrace_hash is empty then the final stays empty
3519 * o Otherwise, the final is an intersection between the hashes
3520 */
3521 if (ftrace_hash_empty(ops->func_hash->filter_hash) ||
3522 ftrace_hash_empty(subops->func_hash->filter_hash)) {
3523 filter_hash = EMPTY_HASH;
3524 } else {
3525 size_bits = max(ops->func_hash->filter_hash->size_bits,
3526 subops->func_hash->filter_hash->size_bits);
3527 filter_hash = alloc_and_copy_ftrace_hash(size_bits, ops->func_hash->filter_hash);
3528 if (!filter_hash)
3529 return -ENOMEM;
3530 ret = append_hash(&filter_hash, subops->func_hash->filter_hash);
3531 if (ret < 0) {
3532 free_ftrace_hash(filter_hash);
3533 return ret;
3534 }
3535 }
3536
3537 if (ftrace_hash_empty(ops->func_hash->notrace_hash) ||
3538 ftrace_hash_empty(subops->func_hash->notrace_hash)) {
3539 notrace_hash = EMPTY_HASH;
3540 } else {
3541 size_bits = max(ops->func_hash->filter_hash->size_bits,
3542 subops->func_hash->filter_hash->size_bits);
3543 notrace_hash = alloc_ftrace_hash(size_bits);
3544 if (!notrace_hash) {
3545 free_ftrace_hash(filter_hash);
3546 return -ENOMEM;
3547 }
3548
3549 ret = intersect_hash(¬race_hash, ops->func_hash->filter_hash,
3550 subops->func_hash->filter_hash);
3551 if (ret < 0) {
3552 free_ftrace_hash(filter_hash);
3553 free_ftrace_hash(notrace_hash);
3554 return ret;
3555 }
3556 }
3557
3558 list_add(&subops->list, &ops->subop_list);
3559
3560 ret = ftrace_update_ops(ops, filter_hash, notrace_hash);
3561 free_ftrace_hash(filter_hash);
3562 free_ftrace_hash(notrace_hash);
3563 if (ret < 0) {
3564 list_del(&subops->list);
3565 } else {
3566 subops->flags |= FTRACE_OPS_FL_ENABLED | FTRACE_OPS_FL_SUBOP;
3567 subops->managed = ops;
3568 }
3569 return ret;
3570 }
3571
3572 /**
3573 * ftrace_shutdown_subops - Remove a subops from a manager ops
3574 * @ops: A manager ops to remove @subops from
3575 * @subops: The subops to remove from @ops
3576 * @command: Any extra command flags to add to modifying the text
3577 *
3578 * Removes the functions being traced by the @subops from @ops. Note, it
3579 * will not affect functions that are being traced by other subops that
3580 * still exist in @ops.
3581 *
3582 * If the last subops is removed from @ops, then @ops is shutdown normally.
3583 */
ftrace_shutdown_subops(struct ftrace_ops * ops,struct ftrace_ops * subops,int command)3584 int ftrace_shutdown_subops(struct ftrace_ops *ops, struct ftrace_ops *subops, int command)
3585 {
3586 struct ftrace_hash *filter_hash;
3587 struct ftrace_hash *notrace_hash;
3588 int ret;
3589
3590 if (unlikely(ftrace_disabled))
3591 return -ENODEV;
3592
3593 if (WARN_ON_ONCE(!(subops->flags & FTRACE_OPS_FL_ENABLED)))
3594 return -EINVAL;
3595
3596 list_del(&subops->list);
3597
3598 if (list_empty(&ops->subop_list)) {
3599 /* Last one, just disable the current ops */
3600
3601 ret = ftrace_shutdown(ops, command);
3602 if (ret < 0) {
3603 list_add(&subops->list, &ops->subop_list);
3604 return ret;
3605 }
3606
3607 subops->flags &= ~FTRACE_OPS_FL_ENABLED;
3608
3609 free_ftrace_hash(ops->func_hash->filter_hash);
3610 free_ftrace_hash(ops->func_hash->notrace_hash);
3611 ops->func_hash->filter_hash = EMPTY_HASH;
3612 ops->func_hash->notrace_hash = EMPTY_HASH;
3613 subops->flags &= ~(FTRACE_OPS_FL_ENABLED | FTRACE_OPS_FL_SUBOP);
3614 subops->managed = NULL;
3615
3616 return 0;
3617 }
3618
3619 /* Rebuild the hashes without subops */
3620 filter_hash = append_hashes(ops);
3621 notrace_hash = intersect_hashes(ops);
3622 if (!filter_hash || !notrace_hash) {
3623 free_ftrace_hash(filter_hash);
3624 free_ftrace_hash(notrace_hash);
3625 list_add(&subops->list, &ops->subop_list);
3626 return -ENOMEM;
3627 }
3628
3629 ret = ftrace_update_ops(ops, filter_hash, notrace_hash);
3630 if (ret < 0) {
3631 list_add(&subops->list, &ops->subop_list);
3632 } else {
3633 subops->flags &= ~(FTRACE_OPS_FL_ENABLED | FTRACE_OPS_FL_SUBOP);
3634 subops->managed = NULL;
3635 }
3636 free_ftrace_hash(filter_hash);
3637 free_ftrace_hash(notrace_hash);
3638 return ret;
3639 }
3640
ftrace_hash_move_and_update_subops(struct ftrace_ops * subops,struct ftrace_hash ** orig_subhash,struct ftrace_hash * hash,int enable)3641 static int ftrace_hash_move_and_update_subops(struct ftrace_ops *subops,
3642 struct ftrace_hash **orig_subhash,
3643 struct ftrace_hash *hash,
3644 int enable)
3645 {
3646 struct ftrace_ops *ops = subops->managed;
3647 struct ftrace_hash **orig_hash;
3648 struct ftrace_hash *save_hash;
3649 struct ftrace_hash *new_hash;
3650 int ret;
3651
3652 /* Manager ops can not be subops (yet) */
3653 if (WARN_ON_ONCE(!ops || ops->flags & FTRACE_OPS_FL_SUBOP))
3654 return -EINVAL;
3655
3656 /* Move the new hash over to the subops hash */
3657 save_hash = *orig_subhash;
3658 *orig_subhash = __ftrace_hash_move(hash);
3659 if (!*orig_subhash) {
3660 *orig_subhash = save_hash;
3661 return -ENOMEM;
3662 }
3663
3664 /* Create a new_hash to hold the ops new functions */
3665 if (enable) {
3666 orig_hash = &ops->func_hash->filter_hash;
3667 new_hash = append_hashes(ops);
3668 } else {
3669 orig_hash = &ops->func_hash->notrace_hash;
3670 new_hash = intersect_hashes(ops);
3671 }
3672
3673 /* Move the hash over to the new hash */
3674 ret = __ftrace_hash_move_and_update_ops(ops, orig_hash, new_hash, enable);
3675
3676 free_ftrace_hash(new_hash);
3677
3678 if (ret) {
3679 /* Put back the original hash */
3680 free_ftrace_hash_rcu(*orig_subhash);
3681 *orig_subhash = save_hash;
3682 } else {
3683 free_ftrace_hash_rcu(save_hash);
3684 }
3685 return ret;
3686 }
3687
3688
3689 u64 ftrace_update_time;
3690 u64 ftrace_total_mod_time;
3691 unsigned long ftrace_update_tot_cnt;
3692 unsigned long ftrace_number_of_pages;
3693 unsigned long ftrace_number_of_groups;
3694
ops_traces_mod(struct ftrace_ops * ops)3695 static inline int ops_traces_mod(struct ftrace_ops *ops)
3696 {
3697 /*
3698 * Filter_hash being empty will default to trace module.
3699 * But notrace hash requires a test of individual module functions.
3700 */
3701 return ftrace_hash_empty(ops->func_hash->filter_hash) &&
3702 ftrace_hash_empty(ops->func_hash->notrace_hash);
3703 }
3704
ftrace_update_code(struct module * mod,struct ftrace_page * new_pgs)3705 static int ftrace_update_code(struct module *mod, struct ftrace_page *new_pgs)
3706 {
3707 bool init_nop = ftrace_need_init_nop();
3708 struct ftrace_page *pg;
3709 struct dyn_ftrace *p;
3710 u64 start, stop, update_time;
3711 unsigned long update_cnt = 0;
3712 unsigned long rec_flags = 0;
3713 int i;
3714
3715 start = ftrace_now(raw_smp_processor_id());
3716
3717 /*
3718 * When a module is loaded, this function is called to convert
3719 * the calls to mcount in its text to nops, and also to create
3720 * an entry in the ftrace data. Now, if ftrace is activated
3721 * after this call, but before the module sets its text to
3722 * read-only, the modification of enabling ftrace can fail if
3723 * the read-only is done while ftrace is converting the calls.
3724 * To prevent this, the module's records are set as disabled
3725 * and will be enabled after the call to set the module's text
3726 * to read-only.
3727 */
3728 if (mod)
3729 rec_flags |= FTRACE_FL_DISABLED;
3730
3731 for (pg = new_pgs; pg; pg = pg->next) {
3732
3733 for (i = 0; i < pg->index; i++) {
3734
3735 /* If something went wrong, bail without enabling anything */
3736 if (unlikely(ftrace_disabled))
3737 return -1;
3738
3739 p = &pg->records[i];
3740 p->flags = rec_flags;
3741
3742 /*
3743 * Do the initial record conversion from mcount jump
3744 * to the NOP instructions.
3745 */
3746 if (init_nop && !ftrace_nop_initialize(mod, p))
3747 break;
3748
3749 update_cnt++;
3750 }
3751 }
3752
3753 stop = ftrace_now(raw_smp_processor_id());
3754 update_time = stop - start;
3755 if (mod)
3756 ftrace_total_mod_time += update_time;
3757 else
3758 ftrace_update_time = update_time;
3759 ftrace_update_tot_cnt += update_cnt;
3760
3761 return 0;
3762 }
3763
ftrace_allocate_records(struct ftrace_page * pg,int count)3764 static int ftrace_allocate_records(struct ftrace_page *pg, int count)
3765 {
3766 int order;
3767 int pages;
3768 int cnt;
3769
3770 if (WARN_ON(!count))
3771 return -EINVAL;
3772
3773 /* We want to fill as much as possible, with no empty pages */
3774 pages = DIV_ROUND_UP(count, ENTRIES_PER_PAGE);
3775 order = fls(pages) - 1;
3776
3777 again:
3778 pg->records = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
3779
3780 if (!pg->records) {
3781 /* if we can't allocate this size, try something smaller */
3782 if (!order)
3783 return -ENOMEM;
3784 order--;
3785 goto again;
3786 }
3787
3788 ftrace_number_of_pages += 1 << order;
3789 ftrace_number_of_groups++;
3790
3791 cnt = (PAGE_SIZE << order) / ENTRY_SIZE;
3792 pg->order = order;
3793
3794 if (cnt > count)
3795 cnt = count;
3796
3797 return cnt;
3798 }
3799
ftrace_free_pages(struct ftrace_page * pages)3800 static void ftrace_free_pages(struct ftrace_page *pages)
3801 {
3802 struct ftrace_page *pg = pages;
3803
3804 while (pg) {
3805 if (pg->records) {
3806 free_pages((unsigned long)pg->records, pg->order);
3807 ftrace_number_of_pages -= 1 << pg->order;
3808 }
3809 pages = pg->next;
3810 kfree(pg);
3811 pg = pages;
3812 ftrace_number_of_groups--;
3813 }
3814 }
3815
3816 static struct ftrace_page *
ftrace_allocate_pages(unsigned long num_to_init)3817 ftrace_allocate_pages(unsigned long num_to_init)
3818 {
3819 struct ftrace_page *start_pg;
3820 struct ftrace_page *pg;
3821 int cnt;
3822
3823 if (!num_to_init)
3824 return NULL;
3825
3826 start_pg = pg = kzalloc(sizeof(*pg), GFP_KERNEL);
3827 if (!pg)
3828 return NULL;
3829
3830 /*
3831 * Try to allocate as much as possible in one continues
3832 * location that fills in all of the space. We want to
3833 * waste as little space as possible.
3834 */
3835 for (;;) {
3836 cnt = ftrace_allocate_records(pg, num_to_init);
3837 if (cnt < 0)
3838 goto free_pages;
3839
3840 num_to_init -= cnt;
3841 if (!num_to_init)
3842 break;
3843
3844 pg->next = kzalloc(sizeof(*pg), GFP_KERNEL);
3845 if (!pg->next)
3846 goto free_pages;
3847
3848 pg = pg->next;
3849 }
3850
3851 return start_pg;
3852
3853 free_pages:
3854 ftrace_free_pages(start_pg);
3855 pr_info("ftrace: FAILED to allocate memory for functions\n");
3856 return NULL;
3857 }
3858
3859 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
3860
3861 struct ftrace_iterator {
3862 loff_t pos;
3863 loff_t func_pos;
3864 loff_t mod_pos;
3865 struct ftrace_page *pg;
3866 struct dyn_ftrace *func;
3867 struct ftrace_func_probe *probe;
3868 struct ftrace_func_entry *probe_entry;
3869 struct trace_parser parser;
3870 struct ftrace_hash *hash;
3871 struct ftrace_ops *ops;
3872 struct trace_array *tr;
3873 struct list_head *mod_list;
3874 int pidx;
3875 int idx;
3876 unsigned flags;
3877 };
3878
3879 static void *
t_probe_next(struct seq_file * m,loff_t * pos)3880 t_probe_next(struct seq_file *m, loff_t *pos)
3881 {
3882 struct ftrace_iterator *iter = m->private;
3883 struct trace_array *tr = iter->ops->private;
3884 struct list_head *func_probes;
3885 struct ftrace_hash *hash;
3886 struct list_head *next;
3887 struct hlist_node *hnd = NULL;
3888 struct hlist_head *hhd;
3889 int size;
3890
3891 (*pos)++;
3892 iter->pos = *pos;
3893
3894 if (!tr)
3895 return NULL;
3896
3897 func_probes = &tr->func_probes;
3898 if (list_empty(func_probes))
3899 return NULL;
3900
3901 if (!iter->probe) {
3902 next = func_probes->next;
3903 iter->probe = list_entry(next, struct ftrace_func_probe, list);
3904 }
3905
3906 if (iter->probe_entry)
3907 hnd = &iter->probe_entry->hlist;
3908
3909 hash = iter->probe->ops.func_hash->filter_hash;
3910
3911 /*
3912 * A probe being registered may temporarily have an empty hash
3913 * and it's at the end of the func_probes list.
3914 */
3915 if (!hash || hash == EMPTY_HASH)
3916 return NULL;
3917
3918 size = 1 << hash->size_bits;
3919
3920 retry:
3921 if (iter->pidx >= size) {
3922 if (iter->probe->list.next == func_probes)
3923 return NULL;
3924 next = iter->probe->list.next;
3925 iter->probe = list_entry(next, struct ftrace_func_probe, list);
3926 hash = iter->probe->ops.func_hash->filter_hash;
3927 size = 1 << hash->size_bits;
3928 iter->pidx = 0;
3929 }
3930
3931 hhd = &hash->buckets[iter->pidx];
3932
3933 if (hlist_empty(hhd)) {
3934 iter->pidx++;
3935 hnd = NULL;
3936 goto retry;
3937 }
3938
3939 if (!hnd)
3940 hnd = hhd->first;
3941 else {
3942 hnd = hnd->next;
3943 if (!hnd) {
3944 iter->pidx++;
3945 goto retry;
3946 }
3947 }
3948
3949 if (WARN_ON_ONCE(!hnd))
3950 return NULL;
3951
3952 iter->probe_entry = hlist_entry(hnd, struct ftrace_func_entry, hlist);
3953
3954 return iter;
3955 }
3956
t_probe_start(struct seq_file * m,loff_t * pos)3957 static void *t_probe_start(struct seq_file *m, loff_t *pos)
3958 {
3959 struct ftrace_iterator *iter = m->private;
3960 void *p = NULL;
3961 loff_t l;
3962
3963 if (!(iter->flags & FTRACE_ITER_DO_PROBES))
3964 return NULL;
3965
3966 if (iter->mod_pos > *pos)
3967 return NULL;
3968
3969 iter->probe = NULL;
3970 iter->probe_entry = NULL;
3971 iter->pidx = 0;
3972 for (l = 0; l <= (*pos - iter->mod_pos); ) {
3973 p = t_probe_next(m, &l);
3974 if (!p)
3975 break;
3976 }
3977 if (!p)
3978 return NULL;
3979
3980 /* Only set this if we have an item */
3981 iter->flags |= FTRACE_ITER_PROBE;
3982
3983 return iter;
3984 }
3985
3986 static int
t_probe_show(struct seq_file * m,struct ftrace_iterator * iter)3987 t_probe_show(struct seq_file *m, struct ftrace_iterator *iter)
3988 {
3989 struct ftrace_func_entry *probe_entry;
3990 struct ftrace_probe_ops *probe_ops;
3991 struct ftrace_func_probe *probe;
3992
3993 probe = iter->probe;
3994 probe_entry = iter->probe_entry;
3995
3996 if (WARN_ON_ONCE(!probe || !probe_entry))
3997 return -EIO;
3998
3999 probe_ops = probe->probe_ops;
4000
4001 if (probe_ops->print)
4002 return probe_ops->print(m, probe_entry->ip, probe_ops, probe->data);
4003
4004 seq_printf(m, "%ps:%ps\n", (void *)probe_entry->ip,
4005 (void *)probe_ops->func);
4006
4007 return 0;
4008 }
4009
4010 static void *
t_mod_next(struct seq_file * m,loff_t * pos)4011 t_mod_next(struct seq_file *m, loff_t *pos)
4012 {
4013 struct ftrace_iterator *iter = m->private;
4014 struct trace_array *tr = iter->tr;
4015
4016 (*pos)++;
4017 iter->pos = *pos;
4018
4019 iter->mod_list = iter->mod_list->next;
4020
4021 if (iter->mod_list == &tr->mod_trace ||
4022 iter->mod_list == &tr->mod_notrace) {
4023 iter->flags &= ~FTRACE_ITER_MOD;
4024 return NULL;
4025 }
4026
4027 iter->mod_pos = *pos;
4028
4029 return iter;
4030 }
4031
t_mod_start(struct seq_file * m,loff_t * pos)4032 static void *t_mod_start(struct seq_file *m, loff_t *pos)
4033 {
4034 struct ftrace_iterator *iter = m->private;
4035 void *p = NULL;
4036 loff_t l;
4037
4038 if (iter->func_pos > *pos)
4039 return NULL;
4040
4041 iter->mod_pos = iter->func_pos;
4042
4043 /* probes are only available if tr is set */
4044 if (!iter->tr)
4045 return NULL;
4046
4047 for (l = 0; l <= (*pos - iter->func_pos); ) {
4048 p = t_mod_next(m, &l);
4049 if (!p)
4050 break;
4051 }
4052 if (!p) {
4053 iter->flags &= ~FTRACE_ITER_MOD;
4054 return t_probe_start(m, pos);
4055 }
4056
4057 /* Only set this if we have an item */
4058 iter->flags |= FTRACE_ITER_MOD;
4059
4060 return iter;
4061 }
4062
4063 static int
t_mod_show(struct seq_file * m,struct ftrace_iterator * iter)4064 t_mod_show(struct seq_file *m, struct ftrace_iterator *iter)
4065 {
4066 struct ftrace_mod_load *ftrace_mod;
4067 struct trace_array *tr = iter->tr;
4068
4069 if (WARN_ON_ONCE(!iter->mod_list) ||
4070 iter->mod_list == &tr->mod_trace ||
4071 iter->mod_list == &tr->mod_notrace)
4072 return -EIO;
4073
4074 ftrace_mod = list_entry(iter->mod_list, struct ftrace_mod_load, list);
4075
4076 if (ftrace_mod->func)
4077 seq_printf(m, "%s", ftrace_mod->func);
4078 else
4079 seq_putc(m, '*');
4080
4081 seq_printf(m, ":mod:%s\n", ftrace_mod->module);
4082
4083 return 0;
4084 }
4085
4086 static void *
t_func_next(struct seq_file * m,loff_t * pos)4087 t_func_next(struct seq_file *m, loff_t *pos)
4088 {
4089 struct ftrace_iterator *iter = m->private;
4090 struct dyn_ftrace *rec = NULL;
4091
4092 (*pos)++;
4093
4094 retry:
4095 if (iter->idx >= iter->pg->index) {
4096 if (iter->pg->next) {
4097 iter->pg = iter->pg->next;
4098 iter->idx = 0;
4099 goto retry;
4100 }
4101 } else {
4102 rec = &iter->pg->records[iter->idx++];
4103 if (((iter->flags & (FTRACE_ITER_FILTER | FTRACE_ITER_NOTRACE)) &&
4104 !ftrace_lookup_ip(iter->hash, rec->ip)) ||
4105
4106 ((iter->flags & FTRACE_ITER_ENABLED) &&
4107 !(rec->flags & FTRACE_FL_ENABLED)) ||
4108
4109 ((iter->flags & FTRACE_ITER_TOUCHED) &&
4110 !(rec->flags & FTRACE_FL_TOUCHED))) {
4111
4112 rec = NULL;
4113 goto retry;
4114 }
4115 }
4116
4117 if (!rec)
4118 return NULL;
4119
4120 iter->pos = iter->func_pos = *pos;
4121 iter->func = rec;
4122
4123 return iter;
4124 }
4125
4126 static void *
t_next(struct seq_file * m,void * v,loff_t * pos)4127 t_next(struct seq_file *m, void *v, loff_t *pos)
4128 {
4129 struct ftrace_iterator *iter = m->private;
4130 loff_t l = *pos; /* t_probe_start() must use original pos */
4131 void *ret;
4132
4133 if (unlikely(ftrace_disabled))
4134 return NULL;
4135
4136 if (iter->flags & FTRACE_ITER_PROBE)
4137 return t_probe_next(m, pos);
4138
4139 if (iter->flags & FTRACE_ITER_MOD)
4140 return t_mod_next(m, pos);
4141
4142 if (iter->flags & FTRACE_ITER_PRINTALL) {
4143 /* next must increment pos, and t_probe_start does not */
4144 (*pos)++;
4145 return t_mod_start(m, &l);
4146 }
4147
4148 ret = t_func_next(m, pos);
4149
4150 if (!ret)
4151 return t_mod_start(m, &l);
4152
4153 return ret;
4154 }
4155
reset_iter_read(struct ftrace_iterator * iter)4156 static void reset_iter_read(struct ftrace_iterator *iter)
4157 {
4158 iter->pos = 0;
4159 iter->func_pos = 0;
4160 iter->flags &= ~(FTRACE_ITER_PRINTALL | FTRACE_ITER_PROBE | FTRACE_ITER_MOD);
4161 }
4162
t_start(struct seq_file * m,loff_t * pos)4163 static void *t_start(struct seq_file *m, loff_t *pos)
4164 {
4165 struct ftrace_iterator *iter = m->private;
4166 void *p = NULL;
4167 loff_t l;
4168
4169 mutex_lock(&ftrace_lock);
4170
4171 if (unlikely(ftrace_disabled))
4172 return NULL;
4173
4174 /*
4175 * If an lseek was done, then reset and start from beginning.
4176 */
4177 if (*pos < iter->pos)
4178 reset_iter_read(iter);
4179
4180 /*
4181 * For set_ftrace_filter reading, if we have the filter
4182 * off, we can short cut and just print out that all
4183 * functions are enabled.
4184 */
4185 if ((iter->flags & (FTRACE_ITER_FILTER | FTRACE_ITER_NOTRACE)) &&
4186 ftrace_hash_empty(iter->hash)) {
4187 iter->func_pos = 1; /* Account for the message */
4188 if (*pos > 0)
4189 return t_mod_start(m, pos);
4190 iter->flags |= FTRACE_ITER_PRINTALL;
4191 /* reset in case of seek/pread */
4192 iter->flags &= ~FTRACE_ITER_PROBE;
4193 return iter;
4194 }
4195
4196 if (iter->flags & FTRACE_ITER_MOD)
4197 return t_mod_start(m, pos);
4198
4199 /*
4200 * Unfortunately, we need to restart at ftrace_pages_start
4201 * every time we let go of the ftrace_mutex. This is because
4202 * those pointers can change without the lock.
4203 */
4204 iter->pg = ftrace_pages_start;
4205 iter->idx = 0;
4206 for (l = 0; l <= *pos; ) {
4207 p = t_func_next(m, &l);
4208 if (!p)
4209 break;
4210 }
4211
4212 if (!p)
4213 return t_mod_start(m, pos);
4214
4215 return iter;
4216 }
4217
t_stop(struct seq_file * m,void * p)4218 static void t_stop(struct seq_file *m, void *p)
4219 {
4220 mutex_unlock(&ftrace_lock);
4221 }
4222
4223 void * __weak
arch_ftrace_trampoline_func(struct ftrace_ops * ops,struct dyn_ftrace * rec)4224 arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec)
4225 {
4226 return NULL;
4227 }
4228
add_trampoline_func(struct seq_file * m,struct ftrace_ops * ops,struct dyn_ftrace * rec)4229 static void add_trampoline_func(struct seq_file *m, struct ftrace_ops *ops,
4230 struct dyn_ftrace *rec)
4231 {
4232 void *ptr;
4233
4234 ptr = arch_ftrace_trampoline_func(ops, rec);
4235 if (ptr)
4236 seq_printf(m, " ->%pS", ptr);
4237 }
4238
4239 #ifdef FTRACE_MCOUNT_MAX_OFFSET
4240 /*
4241 * Weak functions can still have an mcount/fentry that is saved in
4242 * the __mcount_loc section. These can be detected by having a
4243 * symbol offset of greater than FTRACE_MCOUNT_MAX_OFFSET, as the
4244 * symbol found by kallsyms is not the function that the mcount/fentry
4245 * is part of. The offset is much greater in these cases.
4246 *
4247 * Test the record to make sure that the ip points to a valid kallsyms
4248 * and if not, mark it disabled.
4249 */
test_for_valid_rec(struct dyn_ftrace * rec)4250 static int test_for_valid_rec(struct dyn_ftrace *rec)
4251 {
4252 char str[KSYM_SYMBOL_LEN];
4253 unsigned long offset;
4254 const char *ret;
4255
4256 ret = kallsyms_lookup(rec->ip, NULL, &offset, NULL, str);
4257
4258 /* Weak functions can cause invalid addresses */
4259 if (!ret || offset > FTRACE_MCOUNT_MAX_OFFSET) {
4260 rec->flags |= FTRACE_FL_DISABLED;
4261 return 0;
4262 }
4263 return 1;
4264 }
4265
4266 static struct workqueue_struct *ftrace_check_wq __initdata;
4267 static struct work_struct ftrace_check_work __initdata;
4268
4269 /*
4270 * Scan all the mcount/fentry entries to make sure they are valid.
4271 */
ftrace_check_work_func(struct work_struct * work)4272 static __init void ftrace_check_work_func(struct work_struct *work)
4273 {
4274 struct ftrace_page *pg;
4275 struct dyn_ftrace *rec;
4276
4277 mutex_lock(&ftrace_lock);
4278 do_for_each_ftrace_rec(pg, rec) {
4279 test_for_valid_rec(rec);
4280 } while_for_each_ftrace_rec();
4281 mutex_unlock(&ftrace_lock);
4282 }
4283
ftrace_check_for_weak_functions(void)4284 static int __init ftrace_check_for_weak_functions(void)
4285 {
4286 INIT_WORK(&ftrace_check_work, ftrace_check_work_func);
4287
4288 ftrace_check_wq = alloc_workqueue("ftrace_check_wq", WQ_UNBOUND, 0);
4289
4290 queue_work(ftrace_check_wq, &ftrace_check_work);
4291 return 0;
4292 }
4293
ftrace_check_sync(void)4294 static int __init ftrace_check_sync(void)
4295 {
4296 /* Make sure the ftrace_check updates are finished */
4297 if (ftrace_check_wq)
4298 destroy_workqueue(ftrace_check_wq);
4299 return 0;
4300 }
4301
4302 late_initcall_sync(ftrace_check_sync);
4303 subsys_initcall(ftrace_check_for_weak_functions);
4304
print_rec(struct seq_file * m,unsigned long ip)4305 static int print_rec(struct seq_file *m, unsigned long ip)
4306 {
4307 unsigned long offset;
4308 char str[KSYM_SYMBOL_LEN];
4309 char *modname;
4310 const char *ret;
4311
4312 ret = kallsyms_lookup(ip, NULL, &offset, &modname, str);
4313 /* Weak functions can cause invalid addresses */
4314 if (!ret || offset > FTRACE_MCOUNT_MAX_OFFSET) {
4315 snprintf(str, KSYM_SYMBOL_LEN, "%s_%ld",
4316 FTRACE_INVALID_FUNCTION, offset);
4317 ret = NULL;
4318 }
4319
4320 seq_puts(m, str);
4321 if (modname)
4322 seq_printf(m, " [%s]", modname);
4323 return ret == NULL ? -1 : 0;
4324 }
4325 #else
test_for_valid_rec(struct dyn_ftrace * rec)4326 static inline int test_for_valid_rec(struct dyn_ftrace *rec)
4327 {
4328 return 1;
4329 }
4330
print_rec(struct seq_file * m,unsigned long ip)4331 static inline int print_rec(struct seq_file *m, unsigned long ip)
4332 {
4333 seq_printf(m, "%ps", (void *)ip);
4334 return 0;
4335 }
4336 #endif
4337
t_show(struct seq_file * m,void * v)4338 static int t_show(struct seq_file *m, void *v)
4339 {
4340 struct ftrace_iterator *iter = m->private;
4341 struct dyn_ftrace *rec;
4342
4343 if (iter->flags & FTRACE_ITER_PROBE)
4344 return t_probe_show(m, iter);
4345
4346 if (iter->flags & FTRACE_ITER_MOD)
4347 return t_mod_show(m, iter);
4348
4349 if (iter->flags & FTRACE_ITER_PRINTALL) {
4350 if (iter->flags & FTRACE_ITER_NOTRACE)
4351 seq_puts(m, "#### no functions disabled ####\n");
4352 else
4353 seq_puts(m, "#### all functions enabled ####\n");
4354 return 0;
4355 }
4356
4357 rec = iter->func;
4358
4359 if (!rec)
4360 return 0;
4361
4362 if (iter->flags & FTRACE_ITER_ADDRS)
4363 seq_printf(m, "%lx ", rec->ip);
4364
4365 if (print_rec(m, rec->ip)) {
4366 /* This should only happen when a rec is disabled */
4367 WARN_ON_ONCE(!(rec->flags & FTRACE_FL_DISABLED));
4368 seq_putc(m, '\n');
4369 return 0;
4370 }
4371
4372 if (iter->flags & (FTRACE_ITER_ENABLED | FTRACE_ITER_TOUCHED)) {
4373 struct ftrace_ops *ops;
4374
4375 seq_printf(m, " (%ld)%s%s%s%s%s",
4376 ftrace_rec_count(rec),
4377 rec->flags & FTRACE_FL_REGS ? " R" : " ",
4378 rec->flags & FTRACE_FL_IPMODIFY ? " I" : " ",
4379 rec->flags & FTRACE_FL_DIRECT ? " D" : " ",
4380 rec->flags & FTRACE_FL_CALL_OPS ? " O" : " ",
4381 rec->flags & FTRACE_FL_MODIFIED ? " M " : " ");
4382 if (rec->flags & FTRACE_FL_TRAMP_EN) {
4383 ops = ftrace_find_tramp_ops_any(rec);
4384 if (ops) {
4385 do {
4386 seq_printf(m, "\ttramp: %pS (%pS)",
4387 (void *)ops->trampoline,
4388 (void *)ops->func);
4389 add_trampoline_func(m, ops, rec);
4390 ops = ftrace_find_tramp_ops_next(rec, ops);
4391 } while (ops);
4392 } else
4393 seq_puts(m, "\ttramp: ERROR!");
4394 } else {
4395 add_trampoline_func(m, NULL, rec);
4396 }
4397 if (rec->flags & FTRACE_FL_CALL_OPS_EN) {
4398 ops = ftrace_find_unique_ops(rec);
4399 if (ops) {
4400 seq_printf(m, "\tops: %pS (%pS)",
4401 ops, ops->func);
4402 } else {
4403 seq_puts(m, "\tops: ERROR!");
4404 }
4405 }
4406 if (rec->flags & FTRACE_FL_DIRECT) {
4407 unsigned long direct;
4408
4409 direct = ftrace_find_rec_direct(rec->ip);
4410 if (direct)
4411 seq_printf(m, "\n\tdirect-->%pS", (void *)direct);
4412 }
4413 }
4414
4415 seq_putc(m, '\n');
4416
4417 return 0;
4418 }
4419
4420 static const struct seq_operations show_ftrace_seq_ops = {
4421 .start = t_start,
4422 .next = t_next,
4423 .stop = t_stop,
4424 .show = t_show,
4425 };
4426
4427 static int
ftrace_avail_open(struct inode * inode,struct file * file)4428 ftrace_avail_open(struct inode *inode, struct file *file)
4429 {
4430 struct ftrace_iterator *iter;
4431 int ret;
4432
4433 ret = security_locked_down(LOCKDOWN_TRACEFS);
4434 if (ret)
4435 return ret;
4436
4437 if (unlikely(ftrace_disabled))
4438 return -ENODEV;
4439
4440 iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
4441 if (!iter)
4442 return -ENOMEM;
4443
4444 iter->pg = ftrace_pages_start;
4445 iter->ops = &global_ops;
4446
4447 return 0;
4448 }
4449
4450 static int
ftrace_enabled_open(struct inode * inode,struct file * file)4451 ftrace_enabled_open(struct inode *inode, struct file *file)
4452 {
4453 struct ftrace_iterator *iter;
4454
4455 /*
4456 * This shows us what functions are currently being
4457 * traced and by what. Not sure if we want lockdown
4458 * to hide such critical information for an admin.
4459 * Although, perhaps it can show information we don't
4460 * want people to see, but if something is tracing
4461 * something, we probably want to know about it.
4462 */
4463
4464 iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
4465 if (!iter)
4466 return -ENOMEM;
4467
4468 iter->pg = ftrace_pages_start;
4469 iter->flags = FTRACE_ITER_ENABLED;
4470 iter->ops = &global_ops;
4471
4472 return 0;
4473 }
4474
4475 static int
ftrace_touched_open(struct inode * inode,struct file * file)4476 ftrace_touched_open(struct inode *inode, struct file *file)
4477 {
4478 struct ftrace_iterator *iter;
4479
4480 /*
4481 * This shows us what functions have ever been enabled
4482 * (traced, direct, patched, etc). Not sure if we want lockdown
4483 * to hide such critical information for an admin.
4484 * Although, perhaps it can show information we don't
4485 * want people to see, but if something had traced
4486 * something, we probably want to know about it.
4487 */
4488
4489 iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
4490 if (!iter)
4491 return -ENOMEM;
4492
4493 iter->pg = ftrace_pages_start;
4494 iter->flags = FTRACE_ITER_TOUCHED;
4495 iter->ops = &global_ops;
4496
4497 return 0;
4498 }
4499
4500 static int
ftrace_avail_addrs_open(struct inode * inode,struct file * file)4501 ftrace_avail_addrs_open(struct inode *inode, struct file *file)
4502 {
4503 struct ftrace_iterator *iter;
4504 int ret;
4505
4506 ret = security_locked_down(LOCKDOWN_TRACEFS);
4507 if (ret)
4508 return ret;
4509
4510 if (unlikely(ftrace_disabled))
4511 return -ENODEV;
4512
4513 iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
4514 if (!iter)
4515 return -ENOMEM;
4516
4517 iter->pg = ftrace_pages_start;
4518 iter->flags = FTRACE_ITER_ADDRS;
4519 iter->ops = &global_ops;
4520
4521 return 0;
4522 }
4523
4524 /**
4525 * ftrace_regex_open - initialize function tracer filter files
4526 * @ops: The ftrace_ops that hold the hash filters
4527 * @flag: The type of filter to process
4528 * @inode: The inode, usually passed in to your open routine
4529 * @file: The file, usually passed in to your open routine
4530 *
4531 * ftrace_regex_open() initializes the filter files for the
4532 * @ops. Depending on @flag it may process the filter hash or
4533 * the notrace hash of @ops. With this called from the open
4534 * routine, you can use ftrace_filter_write() for the write
4535 * routine if @flag has FTRACE_ITER_FILTER set, or
4536 * ftrace_notrace_write() if @flag has FTRACE_ITER_NOTRACE set.
4537 * tracing_lseek() should be used as the lseek routine, and
4538 * release must call ftrace_regex_release().
4539 *
4540 * Returns: 0 on success or a negative errno value on failure
4541 */
4542 int
ftrace_regex_open(struct ftrace_ops * ops,int flag,struct inode * inode,struct file * file)4543 ftrace_regex_open(struct ftrace_ops *ops, int flag,
4544 struct inode *inode, struct file *file)
4545 {
4546 struct ftrace_iterator *iter;
4547 struct ftrace_hash *hash;
4548 struct list_head *mod_head;
4549 struct trace_array *tr = ops->private;
4550 int ret = -ENOMEM;
4551
4552 ftrace_ops_init(ops);
4553
4554 if (unlikely(ftrace_disabled))
4555 return -ENODEV;
4556
4557 if (tracing_check_open_get_tr(tr))
4558 return -ENODEV;
4559
4560 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
4561 if (!iter)
4562 goto out;
4563
4564 if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX))
4565 goto out;
4566
4567 iter->ops = ops;
4568 iter->flags = flag;
4569 iter->tr = tr;
4570
4571 mutex_lock(&ops->func_hash->regex_lock);
4572
4573 if (flag & FTRACE_ITER_NOTRACE) {
4574 hash = ops->func_hash->notrace_hash;
4575 mod_head = tr ? &tr->mod_notrace : NULL;
4576 } else {
4577 hash = ops->func_hash->filter_hash;
4578 mod_head = tr ? &tr->mod_trace : NULL;
4579 }
4580
4581 iter->mod_list = mod_head;
4582
4583 if (file->f_mode & FMODE_WRITE) {
4584 const int size_bits = FTRACE_HASH_DEFAULT_BITS;
4585
4586 if (file->f_flags & O_TRUNC) {
4587 iter->hash = alloc_ftrace_hash(size_bits);
4588 clear_ftrace_mod_list(mod_head);
4589 } else {
4590 iter->hash = alloc_and_copy_ftrace_hash(size_bits, hash);
4591 }
4592
4593 if (!iter->hash) {
4594 trace_parser_put(&iter->parser);
4595 goto out_unlock;
4596 }
4597 } else
4598 iter->hash = hash;
4599
4600 ret = 0;
4601
4602 if (file->f_mode & FMODE_READ) {
4603 iter->pg = ftrace_pages_start;
4604
4605 ret = seq_open(file, &show_ftrace_seq_ops);
4606 if (!ret) {
4607 struct seq_file *m = file->private_data;
4608 m->private = iter;
4609 } else {
4610 /* Failed */
4611 free_ftrace_hash(iter->hash);
4612 trace_parser_put(&iter->parser);
4613 }
4614 } else
4615 file->private_data = iter;
4616
4617 out_unlock:
4618 mutex_unlock(&ops->func_hash->regex_lock);
4619
4620 out:
4621 if (ret) {
4622 kfree(iter);
4623 if (tr)
4624 trace_array_put(tr);
4625 }
4626
4627 return ret;
4628 }
4629
4630 static int
ftrace_filter_open(struct inode * inode,struct file * file)4631 ftrace_filter_open(struct inode *inode, struct file *file)
4632 {
4633 struct ftrace_ops *ops = inode->i_private;
4634
4635 /* Checks for tracefs lockdown */
4636 return ftrace_regex_open(ops,
4637 FTRACE_ITER_FILTER | FTRACE_ITER_DO_PROBES,
4638 inode, file);
4639 }
4640
4641 static int
ftrace_notrace_open(struct inode * inode,struct file * file)4642 ftrace_notrace_open(struct inode *inode, struct file *file)
4643 {
4644 struct ftrace_ops *ops = inode->i_private;
4645
4646 /* Checks for tracefs lockdown */
4647 return ftrace_regex_open(ops, FTRACE_ITER_NOTRACE,
4648 inode, file);
4649 }
4650
4651 /* Type for quick search ftrace basic regexes (globs) from filter_parse_regex */
4652 struct ftrace_glob {
4653 char *search;
4654 unsigned len;
4655 int type;
4656 };
4657
4658 /*
4659 * If symbols in an architecture don't correspond exactly to the user-visible
4660 * name of what they represent, it is possible to define this function to
4661 * perform the necessary adjustments.
4662 */
arch_ftrace_match_adjust(char * str,const char * search)4663 char * __weak arch_ftrace_match_adjust(char *str, const char *search)
4664 {
4665 return str;
4666 }
4667
ftrace_match(char * str,struct ftrace_glob * g)4668 static int ftrace_match(char *str, struct ftrace_glob *g)
4669 {
4670 int matched = 0;
4671 int slen;
4672
4673 str = arch_ftrace_match_adjust(str, g->search);
4674
4675 switch (g->type) {
4676 case MATCH_FULL:
4677 if (strcmp(str, g->search) == 0)
4678 matched = 1;
4679 break;
4680 case MATCH_FRONT_ONLY:
4681 if (strncmp(str, g->search, g->len) == 0)
4682 matched = 1;
4683 break;
4684 case MATCH_MIDDLE_ONLY:
4685 if (strstr(str, g->search))
4686 matched = 1;
4687 break;
4688 case MATCH_END_ONLY:
4689 slen = strlen(str);
4690 if (slen >= g->len &&
4691 memcmp(str + slen - g->len, g->search, g->len) == 0)
4692 matched = 1;
4693 break;
4694 case MATCH_GLOB:
4695 if (glob_match(g->search, str))
4696 matched = 1;
4697 break;
4698 }
4699
4700 return matched;
4701 }
4702
4703 static int
enter_record(struct ftrace_hash * hash,struct dyn_ftrace * rec,int clear_filter)4704 enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int clear_filter)
4705 {
4706 struct ftrace_func_entry *entry;
4707 int ret = 0;
4708
4709 entry = ftrace_lookup_ip(hash, rec->ip);
4710 if (clear_filter) {
4711 /* Do nothing if it doesn't exist */
4712 if (!entry)
4713 return 0;
4714
4715 free_hash_entry(hash, entry);
4716 } else {
4717 /* Do nothing if it exists */
4718 if (entry)
4719 return 0;
4720 if (add_hash_entry(hash, rec->ip) == NULL)
4721 ret = -ENOMEM;
4722 }
4723 return ret;
4724 }
4725
4726 static int
add_rec_by_index(struct ftrace_hash * hash,struct ftrace_glob * func_g,int clear_filter)4727 add_rec_by_index(struct ftrace_hash *hash, struct ftrace_glob *func_g,
4728 int clear_filter)
4729 {
4730 long index;
4731 struct ftrace_page *pg;
4732 struct dyn_ftrace *rec;
4733
4734 /* The index starts at 1 */
4735 if (kstrtoul(func_g->search, 0, &index) || --index < 0)
4736 return 0;
4737
4738 do_for_each_ftrace_rec(pg, rec) {
4739 if (pg->index <= index) {
4740 index -= pg->index;
4741 /* this is a double loop, break goes to the next page */
4742 break;
4743 }
4744 rec = &pg->records[index];
4745 enter_record(hash, rec, clear_filter);
4746 return 1;
4747 } while_for_each_ftrace_rec();
4748 return 0;
4749 }
4750
4751 #ifdef FTRACE_MCOUNT_MAX_OFFSET
lookup_ip(unsigned long ip,char ** modname,char * str)4752 static int lookup_ip(unsigned long ip, char **modname, char *str)
4753 {
4754 unsigned long offset;
4755
4756 kallsyms_lookup(ip, NULL, &offset, modname, str);
4757 if (offset > FTRACE_MCOUNT_MAX_OFFSET)
4758 return -1;
4759 return 0;
4760 }
4761 #else
lookup_ip(unsigned long ip,char ** modname,char * str)4762 static int lookup_ip(unsigned long ip, char **modname, char *str)
4763 {
4764 kallsyms_lookup(ip, NULL, NULL, modname, str);
4765 return 0;
4766 }
4767 #endif
4768
4769 static int
ftrace_match_record(struct dyn_ftrace * rec,struct ftrace_glob * func_g,struct ftrace_glob * mod_g,int exclude_mod)4770 ftrace_match_record(struct dyn_ftrace *rec, struct ftrace_glob *func_g,
4771 struct ftrace_glob *mod_g, int exclude_mod)
4772 {
4773 char str[KSYM_SYMBOL_LEN];
4774 char *modname;
4775
4776 if (lookup_ip(rec->ip, &modname, str)) {
4777 /* This should only happen when a rec is disabled */
4778 WARN_ON_ONCE(system_state == SYSTEM_RUNNING &&
4779 !(rec->flags & FTRACE_FL_DISABLED));
4780 return 0;
4781 }
4782
4783 if (mod_g) {
4784 int mod_matches = (modname) ? ftrace_match(modname, mod_g) : 0;
4785
4786 /* blank module name to match all modules */
4787 if (!mod_g->len) {
4788 /* blank module globbing: modname xor exclude_mod */
4789 if (!exclude_mod != !modname)
4790 goto func_match;
4791 return 0;
4792 }
4793
4794 /*
4795 * exclude_mod is set to trace everything but the given
4796 * module. If it is set and the module matches, then
4797 * return 0. If it is not set, and the module doesn't match
4798 * also return 0. Otherwise, check the function to see if
4799 * that matches.
4800 */
4801 if (!mod_matches == !exclude_mod)
4802 return 0;
4803 func_match:
4804 /* blank search means to match all funcs in the mod */
4805 if (!func_g->len)
4806 return 1;
4807 }
4808
4809 return ftrace_match(str, func_g);
4810 }
4811
4812 static int
match_records(struct ftrace_hash * hash,char * func,int len,char * mod)4813 match_records(struct ftrace_hash *hash, char *func, int len, char *mod)
4814 {
4815 struct ftrace_page *pg;
4816 struct dyn_ftrace *rec;
4817 struct ftrace_glob func_g = { .type = MATCH_FULL };
4818 struct ftrace_glob mod_g = { .type = MATCH_FULL };
4819 struct ftrace_glob *mod_match = (mod) ? &mod_g : NULL;
4820 int exclude_mod = 0;
4821 int found = 0;
4822 int ret;
4823 int clear_filter = 0;
4824
4825 if (func) {
4826 func_g.type = filter_parse_regex(func, len, &func_g.search,
4827 &clear_filter);
4828 func_g.len = strlen(func_g.search);
4829 }
4830
4831 if (mod) {
4832 mod_g.type = filter_parse_regex(mod, strlen(mod),
4833 &mod_g.search, &exclude_mod);
4834 mod_g.len = strlen(mod_g.search);
4835 }
4836
4837 guard(mutex)(&ftrace_lock);
4838
4839 if (unlikely(ftrace_disabled))
4840 return 0;
4841
4842 if (func_g.type == MATCH_INDEX)
4843 return add_rec_by_index(hash, &func_g, clear_filter);
4844
4845 do_for_each_ftrace_rec(pg, rec) {
4846
4847 if (rec->flags & FTRACE_FL_DISABLED)
4848 continue;
4849
4850 if (ftrace_match_record(rec, &func_g, mod_match, exclude_mod)) {
4851 ret = enter_record(hash, rec, clear_filter);
4852 if (ret < 0)
4853 return ret;
4854 found = 1;
4855 }
4856 cond_resched();
4857 } while_for_each_ftrace_rec();
4858
4859 return found;
4860 }
4861
4862 static int
ftrace_match_records(struct ftrace_hash * hash,char * buff,int len)4863 ftrace_match_records(struct ftrace_hash *hash, char *buff, int len)
4864 {
4865 return match_records(hash, buff, len, NULL);
4866 }
4867
ftrace_ops_update_code(struct ftrace_ops * ops,struct ftrace_ops_hash * old_hash)4868 static void ftrace_ops_update_code(struct ftrace_ops *ops,
4869 struct ftrace_ops_hash *old_hash)
4870 {
4871 struct ftrace_ops *op;
4872
4873 if (!ftrace_enabled)
4874 return;
4875
4876 if (ops->flags & FTRACE_OPS_FL_ENABLED) {
4877 ftrace_run_modify_code(ops, FTRACE_UPDATE_CALLS, old_hash);
4878 return;
4879 }
4880
4881 /*
4882 * If this is the shared global_ops filter, then we need to
4883 * check if there is another ops that shares it, is enabled.
4884 * If so, we still need to run the modify code.
4885 */
4886 if (ops->func_hash != &global_ops.local_hash)
4887 return;
4888
4889 do_for_each_ftrace_op(op, ftrace_ops_list) {
4890 if (op->func_hash == &global_ops.local_hash &&
4891 op->flags & FTRACE_OPS_FL_ENABLED) {
4892 ftrace_run_modify_code(op, FTRACE_UPDATE_CALLS, old_hash);
4893 /* Only need to do this once */
4894 return;
4895 }
4896 } while_for_each_ftrace_op(op);
4897 }
4898
ftrace_hash_move_and_update_ops(struct ftrace_ops * ops,struct ftrace_hash ** orig_hash,struct ftrace_hash * hash,int enable)4899 static int ftrace_hash_move_and_update_ops(struct ftrace_ops *ops,
4900 struct ftrace_hash **orig_hash,
4901 struct ftrace_hash *hash,
4902 int enable)
4903 {
4904 if (ops->flags & FTRACE_OPS_FL_SUBOP)
4905 return ftrace_hash_move_and_update_subops(ops, orig_hash, hash, enable);
4906
4907 /*
4908 * If this ops is not enabled, it could be sharing its filters
4909 * with a subop. If that's the case, update the subop instead of
4910 * this ops. Shared filters are only allowed to have one ops set
4911 * at a time, and if we update the ops that is not enabled,
4912 * it will not affect subops that share it.
4913 */
4914 if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) {
4915 struct ftrace_ops *op;
4916
4917 /* Check if any other manager subops maps to this hash */
4918 do_for_each_ftrace_op(op, ftrace_ops_list) {
4919 struct ftrace_ops *subops;
4920
4921 list_for_each_entry(subops, &op->subop_list, list) {
4922 if ((subops->flags & FTRACE_OPS_FL_ENABLED) &&
4923 subops->func_hash == ops->func_hash) {
4924 return ftrace_hash_move_and_update_subops(subops, orig_hash, hash, enable);
4925 }
4926 }
4927 } while_for_each_ftrace_op(op);
4928 }
4929
4930 return __ftrace_hash_move_and_update_ops(ops, orig_hash, hash, enable);
4931 }
4932
module_exists(const char * module)4933 static bool module_exists(const char *module)
4934 {
4935 /* All modules have the symbol __this_module */
4936 static const char this_mod[] = "__this_module";
4937 char modname[MAX_PARAM_PREFIX_LEN + sizeof(this_mod) + 2];
4938 unsigned long val;
4939 int n;
4940
4941 n = snprintf(modname, sizeof(modname), "%s:%s", module, this_mod);
4942
4943 if (n > sizeof(modname) - 1)
4944 return false;
4945
4946 val = module_kallsyms_lookup_name(modname);
4947 return val != 0;
4948 }
4949
cache_mod(struct trace_array * tr,const char * func,char * module,int enable)4950 static int cache_mod(struct trace_array *tr,
4951 const char *func, char *module, int enable)
4952 {
4953 struct ftrace_mod_load *ftrace_mod, *n;
4954 struct list_head *head = enable ? &tr->mod_trace : &tr->mod_notrace;
4955
4956 guard(mutex)(&ftrace_lock);
4957
4958 /* We do not cache inverse filters */
4959 if (func[0] == '!') {
4960 int ret = -EINVAL;
4961
4962 func++;
4963
4964 /* Look to remove this hash */
4965 list_for_each_entry_safe(ftrace_mod, n, head, list) {
4966 if (strcmp(ftrace_mod->module, module) != 0)
4967 continue;
4968
4969 /* no func matches all */
4970 if (strcmp(func, "*") == 0 ||
4971 (ftrace_mod->func &&
4972 strcmp(ftrace_mod->func, func) == 0)) {
4973 ret = 0;
4974 free_ftrace_mod(ftrace_mod);
4975 continue;
4976 }
4977 }
4978 return ret;
4979 }
4980
4981 /* We only care about modules that have not been loaded yet */
4982 if (module_exists(module))
4983 return -EINVAL;
4984
4985 /* Save this string off, and execute it when the module is loaded */
4986 return ftrace_add_mod(tr, func, module, enable);
4987 }
4988
4989 static int
4990 ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
4991 int reset, int enable);
4992
4993 #ifdef CONFIG_MODULES
process_mod_list(struct list_head * head,struct ftrace_ops * ops,char * mod,bool enable)4994 static void process_mod_list(struct list_head *head, struct ftrace_ops *ops,
4995 char *mod, bool enable)
4996 {
4997 struct ftrace_mod_load *ftrace_mod, *n;
4998 struct ftrace_hash **orig_hash, *new_hash;
4999 LIST_HEAD(process_mods);
5000 char *func;
5001
5002 mutex_lock(&ops->func_hash->regex_lock);
5003
5004 if (enable)
5005 orig_hash = &ops->func_hash->filter_hash;
5006 else
5007 orig_hash = &ops->func_hash->notrace_hash;
5008
5009 new_hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS,
5010 *orig_hash);
5011 if (!new_hash)
5012 goto out; /* warn? */
5013
5014 mutex_lock(&ftrace_lock);
5015
5016 list_for_each_entry_safe(ftrace_mod, n, head, list) {
5017
5018 if (strcmp(ftrace_mod->module, mod) != 0)
5019 continue;
5020
5021 if (ftrace_mod->func)
5022 func = kstrdup(ftrace_mod->func, GFP_KERNEL);
5023 else
5024 func = kstrdup("*", GFP_KERNEL);
5025
5026 if (!func) /* warn? */
5027 continue;
5028
5029 list_move(&ftrace_mod->list, &process_mods);
5030
5031 /* Use the newly allocated func, as it may be "*" */
5032 kfree(ftrace_mod->func);
5033 ftrace_mod->func = func;
5034 }
5035
5036 mutex_unlock(&ftrace_lock);
5037
5038 list_for_each_entry_safe(ftrace_mod, n, &process_mods, list) {
5039
5040 func = ftrace_mod->func;
5041
5042 /* Grabs ftrace_lock, which is why we have this extra step */
5043 match_records(new_hash, func, strlen(func), mod);
5044 free_ftrace_mod(ftrace_mod);
5045 }
5046
5047 if (enable && list_empty(head))
5048 new_hash->flags &= ~FTRACE_HASH_FL_MOD;
5049
5050 mutex_lock(&ftrace_lock);
5051
5052 ftrace_hash_move_and_update_ops(ops, orig_hash,
5053 new_hash, enable);
5054 mutex_unlock(&ftrace_lock);
5055
5056 out:
5057 mutex_unlock(&ops->func_hash->regex_lock);
5058
5059 free_ftrace_hash(new_hash);
5060 }
5061
process_cached_mods(const char * mod_name)5062 static void process_cached_mods(const char *mod_name)
5063 {
5064 struct trace_array *tr;
5065 char *mod;
5066
5067 mod = kstrdup(mod_name, GFP_KERNEL);
5068 if (!mod)
5069 return;
5070
5071 mutex_lock(&trace_types_lock);
5072 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
5073 if (!list_empty(&tr->mod_trace))
5074 process_mod_list(&tr->mod_trace, tr->ops, mod, true);
5075 if (!list_empty(&tr->mod_notrace))
5076 process_mod_list(&tr->mod_notrace, tr->ops, mod, false);
5077 }
5078 mutex_unlock(&trace_types_lock);
5079
5080 kfree(mod);
5081 }
5082 #endif
5083
5084 /*
5085 * We register the module command as a template to show others how
5086 * to register the a command as well.
5087 */
5088
5089 static int
ftrace_mod_callback(struct trace_array * tr,struct ftrace_hash * hash,char * func_orig,char * cmd,char * module,int enable)5090 ftrace_mod_callback(struct trace_array *tr, struct ftrace_hash *hash,
5091 char *func_orig, char *cmd, char *module, int enable)
5092 {
5093 char *func;
5094 int ret;
5095
5096 if (!tr)
5097 return -ENODEV;
5098
5099 /* match_records() modifies func, and we need the original */
5100 func = kstrdup(func_orig, GFP_KERNEL);
5101 if (!func)
5102 return -ENOMEM;
5103
5104 /*
5105 * cmd == 'mod' because we only registered this func
5106 * for the 'mod' ftrace_func_command.
5107 * But if you register one func with multiple commands,
5108 * you can tell which command was used by the cmd
5109 * parameter.
5110 */
5111 ret = match_records(hash, func, strlen(func), module);
5112 kfree(func);
5113
5114 if (!ret)
5115 return cache_mod(tr, func_orig, module, enable);
5116 if (ret < 0)
5117 return ret;
5118 return 0;
5119 }
5120
5121 static struct ftrace_func_command ftrace_mod_cmd = {
5122 .name = "mod",
5123 .func = ftrace_mod_callback,
5124 };
5125
ftrace_mod_cmd_init(void)5126 static int __init ftrace_mod_cmd_init(void)
5127 {
5128 return register_ftrace_command(&ftrace_mod_cmd);
5129 }
5130 core_initcall(ftrace_mod_cmd_init);
5131
function_trace_probe_call(unsigned long ip,unsigned long parent_ip,struct ftrace_ops * op,struct ftrace_regs * fregs)5132 static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
5133 struct ftrace_ops *op, struct ftrace_regs *fregs)
5134 {
5135 struct ftrace_probe_ops *probe_ops;
5136 struct ftrace_func_probe *probe;
5137
5138 probe = container_of(op, struct ftrace_func_probe, ops);
5139 probe_ops = probe->probe_ops;
5140
5141 /*
5142 * Disable preemption for these calls to prevent a RCU grace
5143 * period. This syncs the hash iteration and freeing of items
5144 * on the hash. rcu_read_lock is too dangerous here.
5145 */
5146 preempt_disable_notrace();
5147 probe_ops->func(ip, parent_ip, probe->tr, probe_ops, probe->data);
5148 preempt_enable_notrace();
5149 }
5150
5151 struct ftrace_func_map {
5152 struct ftrace_func_entry entry;
5153 void *data;
5154 };
5155
5156 struct ftrace_func_mapper {
5157 struct ftrace_hash hash;
5158 };
5159
5160 /**
5161 * allocate_ftrace_func_mapper - allocate a new ftrace_func_mapper
5162 *
5163 * Returns: a ftrace_func_mapper descriptor that can be used to map ips to data.
5164 */
allocate_ftrace_func_mapper(void)5165 struct ftrace_func_mapper *allocate_ftrace_func_mapper(void)
5166 {
5167 struct ftrace_hash *hash;
5168
5169 /*
5170 * The mapper is simply a ftrace_hash, but since the entries
5171 * in the hash are not ftrace_func_entry type, we define it
5172 * as a separate structure.
5173 */
5174 hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
5175 return (struct ftrace_func_mapper *)hash;
5176 }
5177
5178 /**
5179 * ftrace_func_mapper_find_ip - Find some data mapped to an ip
5180 * @mapper: The mapper that has the ip maps
5181 * @ip: the instruction pointer to find the data for
5182 *
5183 * Returns: the data mapped to @ip if found otherwise NULL. The return
5184 * is actually the address of the mapper data pointer. The address is
5185 * returned for use cases where the data is no bigger than a long, and
5186 * the user can use the data pointer as its data instead of having to
5187 * allocate more memory for the reference.
5188 */
ftrace_func_mapper_find_ip(struct ftrace_func_mapper * mapper,unsigned long ip)5189 void **ftrace_func_mapper_find_ip(struct ftrace_func_mapper *mapper,
5190 unsigned long ip)
5191 {
5192 struct ftrace_func_entry *entry;
5193 struct ftrace_func_map *map;
5194
5195 entry = ftrace_lookup_ip(&mapper->hash, ip);
5196 if (!entry)
5197 return NULL;
5198
5199 map = (struct ftrace_func_map *)entry;
5200 return &map->data;
5201 }
5202
5203 /**
5204 * ftrace_func_mapper_add_ip - Map some data to an ip
5205 * @mapper: The mapper that has the ip maps
5206 * @ip: The instruction pointer address to map @data to
5207 * @data: The data to map to @ip
5208 *
5209 * Returns: 0 on success otherwise an error.
5210 */
ftrace_func_mapper_add_ip(struct ftrace_func_mapper * mapper,unsigned long ip,void * data)5211 int ftrace_func_mapper_add_ip(struct ftrace_func_mapper *mapper,
5212 unsigned long ip, void *data)
5213 {
5214 struct ftrace_func_entry *entry;
5215 struct ftrace_func_map *map;
5216
5217 entry = ftrace_lookup_ip(&mapper->hash, ip);
5218 if (entry)
5219 return -EBUSY;
5220
5221 map = kmalloc(sizeof(*map), GFP_KERNEL);
5222 if (!map)
5223 return -ENOMEM;
5224
5225 map->entry.ip = ip;
5226 map->data = data;
5227
5228 __add_hash_entry(&mapper->hash, &map->entry);
5229
5230 return 0;
5231 }
5232
5233 /**
5234 * ftrace_func_mapper_remove_ip - Remove an ip from the mapping
5235 * @mapper: The mapper that has the ip maps
5236 * @ip: The instruction pointer address to remove the data from
5237 *
5238 * Returns: the data if it is found, otherwise NULL.
5239 * Note, if the data pointer is used as the data itself, (see
5240 * ftrace_func_mapper_find_ip(), then the return value may be meaningless,
5241 * if the data pointer was set to zero.
5242 */
ftrace_func_mapper_remove_ip(struct ftrace_func_mapper * mapper,unsigned long ip)5243 void *ftrace_func_mapper_remove_ip(struct ftrace_func_mapper *mapper,
5244 unsigned long ip)
5245 {
5246 struct ftrace_func_entry *entry;
5247 struct ftrace_func_map *map;
5248 void *data;
5249
5250 entry = ftrace_lookup_ip(&mapper->hash, ip);
5251 if (!entry)
5252 return NULL;
5253
5254 map = (struct ftrace_func_map *)entry;
5255 data = map->data;
5256
5257 remove_hash_entry(&mapper->hash, entry);
5258 kfree(entry);
5259
5260 return data;
5261 }
5262
5263 /**
5264 * free_ftrace_func_mapper - free a mapping of ips and data
5265 * @mapper: The mapper that has the ip maps
5266 * @free_func: A function to be called on each data item.
5267 *
5268 * This is used to free the function mapper. The @free_func is optional
5269 * and can be used if the data needs to be freed as well.
5270 */
free_ftrace_func_mapper(struct ftrace_func_mapper * mapper,ftrace_mapper_func free_func)5271 void free_ftrace_func_mapper(struct ftrace_func_mapper *mapper,
5272 ftrace_mapper_func free_func)
5273 {
5274 struct ftrace_func_entry *entry;
5275 struct ftrace_func_map *map;
5276 struct hlist_head *hhd;
5277 int size, i;
5278
5279 if (!mapper)
5280 return;
5281
5282 if (free_func && mapper->hash.count) {
5283 size = 1 << mapper->hash.size_bits;
5284 for (i = 0; i < size; i++) {
5285 hhd = &mapper->hash.buckets[i];
5286 hlist_for_each_entry(entry, hhd, hlist) {
5287 map = (struct ftrace_func_map *)entry;
5288 free_func(map);
5289 }
5290 }
5291 }
5292 free_ftrace_hash(&mapper->hash);
5293 }
5294
release_probe(struct ftrace_func_probe * probe)5295 static void release_probe(struct ftrace_func_probe *probe)
5296 {
5297 struct ftrace_probe_ops *probe_ops;
5298
5299 guard(mutex)(&ftrace_lock);
5300
5301 WARN_ON(probe->ref <= 0);
5302
5303 /* Subtract the ref that was used to protect this instance */
5304 probe->ref--;
5305
5306 if (!probe->ref) {
5307 probe_ops = probe->probe_ops;
5308 /*
5309 * Sending zero as ip tells probe_ops to free
5310 * the probe->data itself
5311 */
5312 if (probe_ops->free)
5313 probe_ops->free(probe_ops, probe->tr, 0, probe->data);
5314 list_del(&probe->list);
5315 kfree(probe);
5316 }
5317 }
5318
acquire_probe_locked(struct ftrace_func_probe * probe)5319 static void acquire_probe_locked(struct ftrace_func_probe *probe)
5320 {
5321 /*
5322 * Add one ref to keep it from being freed when releasing the
5323 * ftrace_lock mutex.
5324 */
5325 probe->ref++;
5326 }
5327
5328 int
register_ftrace_function_probe(char * glob,struct trace_array * tr,struct ftrace_probe_ops * probe_ops,void * data)5329 register_ftrace_function_probe(char *glob, struct trace_array *tr,
5330 struct ftrace_probe_ops *probe_ops,
5331 void *data)
5332 {
5333 struct ftrace_func_probe *probe = NULL, *iter;
5334 struct ftrace_func_entry *entry;
5335 struct ftrace_hash **orig_hash;
5336 struct ftrace_hash *old_hash;
5337 struct ftrace_hash *hash;
5338 int count = 0;
5339 int size;
5340 int ret;
5341 int i;
5342
5343 if (WARN_ON(!tr))
5344 return -EINVAL;
5345
5346 /* We do not support '!' for function probes */
5347 if (WARN_ON(glob[0] == '!'))
5348 return -EINVAL;
5349
5350
5351 mutex_lock(&ftrace_lock);
5352 /* Check if the probe_ops is already registered */
5353 list_for_each_entry(iter, &tr->func_probes, list) {
5354 if (iter->probe_ops == probe_ops) {
5355 probe = iter;
5356 break;
5357 }
5358 }
5359 if (!probe) {
5360 probe = kzalloc(sizeof(*probe), GFP_KERNEL);
5361 if (!probe) {
5362 mutex_unlock(&ftrace_lock);
5363 return -ENOMEM;
5364 }
5365 probe->probe_ops = probe_ops;
5366 probe->ops.func = function_trace_probe_call;
5367 probe->tr = tr;
5368 ftrace_ops_init(&probe->ops);
5369 list_add(&probe->list, &tr->func_probes);
5370 }
5371
5372 acquire_probe_locked(probe);
5373
5374 mutex_unlock(&ftrace_lock);
5375
5376 /*
5377 * Note, there's a small window here that the func_hash->filter_hash
5378 * may be NULL or empty. Need to be careful when reading the loop.
5379 */
5380 mutex_lock(&probe->ops.func_hash->regex_lock);
5381
5382 orig_hash = &probe->ops.func_hash->filter_hash;
5383 old_hash = *orig_hash;
5384 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash);
5385
5386 if (!hash) {
5387 ret = -ENOMEM;
5388 goto out;
5389 }
5390
5391 ret = ftrace_match_records(hash, glob, strlen(glob));
5392
5393 /* Nothing found? */
5394 if (!ret)
5395 ret = -EINVAL;
5396
5397 if (ret < 0)
5398 goto out;
5399
5400 size = 1 << hash->size_bits;
5401 for (i = 0; i < size; i++) {
5402 hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
5403 if (ftrace_lookup_ip(old_hash, entry->ip))
5404 continue;
5405 /*
5406 * The caller might want to do something special
5407 * for each function we find. We call the callback
5408 * to give the caller an opportunity to do so.
5409 */
5410 if (probe_ops->init) {
5411 ret = probe_ops->init(probe_ops, tr,
5412 entry->ip, data,
5413 &probe->data);
5414 if (ret < 0) {
5415 if (probe_ops->free && count)
5416 probe_ops->free(probe_ops, tr,
5417 0, probe->data);
5418 probe->data = NULL;
5419 goto out;
5420 }
5421 }
5422 count++;
5423 }
5424 }
5425
5426 mutex_lock(&ftrace_lock);
5427
5428 if (!count) {
5429 /* Nothing was added? */
5430 ret = -EINVAL;
5431 goto out_unlock;
5432 }
5433
5434 ret = ftrace_hash_move_and_update_ops(&probe->ops, orig_hash,
5435 hash, 1);
5436 if (ret < 0)
5437 goto err_unlock;
5438
5439 /* One ref for each new function traced */
5440 probe->ref += count;
5441
5442 if (!(probe->ops.flags & FTRACE_OPS_FL_ENABLED))
5443 ret = ftrace_startup(&probe->ops, 0);
5444
5445 out_unlock:
5446 mutex_unlock(&ftrace_lock);
5447
5448 if (!ret)
5449 ret = count;
5450 out:
5451 mutex_unlock(&probe->ops.func_hash->regex_lock);
5452 free_ftrace_hash(hash);
5453
5454 release_probe(probe);
5455
5456 return ret;
5457
5458 err_unlock:
5459 if (!probe_ops->free || !count)
5460 goto out_unlock;
5461
5462 /* Failed to do the move, need to call the free functions */
5463 for (i = 0; i < size; i++) {
5464 hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
5465 if (ftrace_lookup_ip(old_hash, entry->ip))
5466 continue;
5467 probe_ops->free(probe_ops, tr, entry->ip, probe->data);
5468 }
5469 }
5470 goto out_unlock;
5471 }
5472
5473 int
unregister_ftrace_function_probe_func(char * glob,struct trace_array * tr,struct ftrace_probe_ops * probe_ops)5474 unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr,
5475 struct ftrace_probe_ops *probe_ops)
5476 {
5477 struct ftrace_func_probe *probe = NULL, *iter;
5478 struct ftrace_ops_hash old_hash_ops;
5479 struct ftrace_func_entry *entry;
5480 struct ftrace_glob func_g;
5481 struct ftrace_hash **orig_hash;
5482 struct ftrace_hash *old_hash;
5483 struct ftrace_hash *hash = NULL;
5484 struct hlist_node *tmp;
5485 struct hlist_head hhd;
5486 char str[KSYM_SYMBOL_LEN];
5487 int count = 0;
5488 int i, ret = -ENODEV;
5489 int size;
5490
5491 if (!glob || !strlen(glob) || !strcmp(glob, "*"))
5492 func_g.search = NULL;
5493 else {
5494 int not;
5495
5496 func_g.type = filter_parse_regex(glob, strlen(glob),
5497 &func_g.search, ¬);
5498 func_g.len = strlen(func_g.search);
5499
5500 /* we do not support '!' for function probes */
5501 if (WARN_ON(not))
5502 return -EINVAL;
5503 }
5504
5505 mutex_lock(&ftrace_lock);
5506 /* Check if the probe_ops is already registered */
5507 list_for_each_entry(iter, &tr->func_probes, list) {
5508 if (iter->probe_ops == probe_ops) {
5509 probe = iter;
5510 break;
5511 }
5512 }
5513 if (!probe)
5514 goto err_unlock_ftrace;
5515
5516 ret = -EINVAL;
5517 if (!(probe->ops.flags & FTRACE_OPS_FL_INITIALIZED))
5518 goto err_unlock_ftrace;
5519
5520 acquire_probe_locked(probe);
5521
5522 mutex_unlock(&ftrace_lock);
5523
5524 mutex_lock(&probe->ops.func_hash->regex_lock);
5525
5526 orig_hash = &probe->ops.func_hash->filter_hash;
5527 old_hash = *orig_hash;
5528
5529 if (ftrace_hash_empty(old_hash))
5530 goto out_unlock;
5531
5532 old_hash_ops.filter_hash = old_hash;
5533 /* Probes only have filters */
5534 old_hash_ops.notrace_hash = NULL;
5535
5536 ret = -ENOMEM;
5537 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash);
5538 if (!hash)
5539 goto out_unlock;
5540
5541 INIT_HLIST_HEAD(&hhd);
5542
5543 size = 1 << hash->size_bits;
5544 for (i = 0; i < size; i++) {
5545 hlist_for_each_entry_safe(entry, tmp, &hash->buckets[i], hlist) {
5546
5547 if (func_g.search) {
5548 kallsyms_lookup(entry->ip, NULL, NULL,
5549 NULL, str);
5550 if (!ftrace_match(str, &func_g))
5551 continue;
5552 }
5553 count++;
5554 remove_hash_entry(hash, entry);
5555 hlist_add_head(&entry->hlist, &hhd);
5556 }
5557 }
5558
5559 /* Nothing found? */
5560 if (!count) {
5561 ret = -EINVAL;
5562 goto out_unlock;
5563 }
5564
5565 mutex_lock(&ftrace_lock);
5566
5567 WARN_ON(probe->ref < count);
5568
5569 probe->ref -= count;
5570
5571 if (ftrace_hash_empty(hash))
5572 ftrace_shutdown(&probe->ops, 0);
5573
5574 ret = ftrace_hash_move_and_update_ops(&probe->ops, orig_hash,
5575 hash, 1);
5576
5577 /* still need to update the function call sites */
5578 if (ftrace_enabled && !ftrace_hash_empty(hash))
5579 ftrace_run_modify_code(&probe->ops, FTRACE_UPDATE_CALLS,
5580 &old_hash_ops);
5581 synchronize_rcu();
5582
5583 hlist_for_each_entry_safe(entry, tmp, &hhd, hlist) {
5584 hlist_del(&entry->hlist);
5585 if (probe_ops->free)
5586 probe_ops->free(probe_ops, tr, entry->ip, probe->data);
5587 kfree(entry);
5588 }
5589 mutex_unlock(&ftrace_lock);
5590
5591 out_unlock:
5592 mutex_unlock(&probe->ops.func_hash->regex_lock);
5593 free_ftrace_hash(hash);
5594
5595 release_probe(probe);
5596
5597 return ret;
5598
5599 err_unlock_ftrace:
5600 mutex_unlock(&ftrace_lock);
5601 return ret;
5602 }
5603
clear_ftrace_function_probes(struct trace_array * tr)5604 void clear_ftrace_function_probes(struct trace_array *tr)
5605 {
5606 struct ftrace_func_probe *probe, *n;
5607
5608 list_for_each_entry_safe(probe, n, &tr->func_probes, list)
5609 unregister_ftrace_function_probe_func(NULL, tr, probe->probe_ops);
5610 }
5611
5612 static LIST_HEAD(ftrace_commands);
5613 static DEFINE_MUTEX(ftrace_cmd_mutex);
5614
5615 /*
5616 * Currently we only register ftrace commands from __init, so mark this
5617 * __init too.
5618 */
register_ftrace_command(struct ftrace_func_command * cmd)5619 __init int register_ftrace_command(struct ftrace_func_command *cmd)
5620 {
5621 struct ftrace_func_command *p;
5622 int ret = 0;
5623
5624 mutex_lock(&ftrace_cmd_mutex);
5625 list_for_each_entry(p, &ftrace_commands, list) {
5626 if (strcmp(cmd->name, p->name) == 0) {
5627 ret = -EBUSY;
5628 goto out_unlock;
5629 }
5630 }
5631 list_add(&cmd->list, &ftrace_commands);
5632 out_unlock:
5633 mutex_unlock(&ftrace_cmd_mutex);
5634
5635 return ret;
5636 }
5637
5638 /*
5639 * Currently we only unregister ftrace commands from __init, so mark
5640 * this __init too.
5641 */
unregister_ftrace_command(struct ftrace_func_command * cmd)5642 __init int unregister_ftrace_command(struct ftrace_func_command *cmd)
5643 {
5644 struct ftrace_func_command *p, *n;
5645 int ret = -ENODEV;
5646
5647 mutex_lock(&ftrace_cmd_mutex);
5648 list_for_each_entry_safe(p, n, &ftrace_commands, list) {
5649 if (strcmp(cmd->name, p->name) == 0) {
5650 ret = 0;
5651 list_del_init(&p->list);
5652 goto out_unlock;
5653 }
5654 }
5655 out_unlock:
5656 mutex_unlock(&ftrace_cmd_mutex);
5657
5658 return ret;
5659 }
5660
ftrace_process_regex(struct ftrace_iterator * iter,char * buff,int len,int enable)5661 static int ftrace_process_regex(struct ftrace_iterator *iter,
5662 char *buff, int len, int enable)
5663 {
5664 struct ftrace_hash *hash = iter->hash;
5665 struct trace_array *tr = iter->ops->private;
5666 char *func, *command, *next = buff;
5667 struct ftrace_func_command *p;
5668 int ret = -EINVAL;
5669
5670 func = strsep(&next, ":");
5671
5672 if (!next) {
5673 ret = ftrace_match_records(hash, func, len);
5674 if (!ret)
5675 ret = -EINVAL;
5676 if (ret < 0)
5677 return ret;
5678 return 0;
5679 }
5680
5681 /* command found */
5682
5683 command = strsep(&next, ":");
5684
5685 mutex_lock(&ftrace_cmd_mutex);
5686 list_for_each_entry(p, &ftrace_commands, list) {
5687 if (strcmp(p->name, command) == 0) {
5688 ret = p->func(tr, hash, func, command, next, enable);
5689 goto out_unlock;
5690 }
5691 }
5692 out_unlock:
5693 mutex_unlock(&ftrace_cmd_mutex);
5694
5695 return ret;
5696 }
5697
5698 static ssize_t
ftrace_regex_write(struct file * file,const char __user * ubuf,size_t cnt,loff_t * ppos,int enable)5699 ftrace_regex_write(struct file *file, const char __user *ubuf,
5700 size_t cnt, loff_t *ppos, int enable)
5701 {
5702 struct ftrace_iterator *iter;
5703 struct trace_parser *parser;
5704 ssize_t ret, read;
5705
5706 if (!cnt)
5707 return 0;
5708
5709 if (file->f_mode & FMODE_READ) {
5710 struct seq_file *m = file->private_data;
5711 iter = m->private;
5712 } else
5713 iter = file->private_data;
5714
5715 if (unlikely(ftrace_disabled))
5716 return -ENODEV;
5717
5718 /* iter->hash is a local copy, so we don't need regex_lock */
5719
5720 parser = &iter->parser;
5721 read = trace_get_user(parser, ubuf, cnt, ppos);
5722
5723 if (read >= 0 && trace_parser_loaded(parser) &&
5724 !trace_parser_cont(parser)) {
5725 ret = ftrace_process_regex(iter, parser->buffer,
5726 parser->idx, enable);
5727 trace_parser_clear(parser);
5728 if (ret < 0)
5729 goto out;
5730 }
5731
5732 ret = read;
5733 out:
5734 return ret;
5735 }
5736
5737 ssize_t
ftrace_filter_write(struct file * file,const char __user * ubuf,size_t cnt,loff_t * ppos)5738 ftrace_filter_write(struct file *file, const char __user *ubuf,
5739 size_t cnt, loff_t *ppos)
5740 {
5741 return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
5742 }
5743
5744 ssize_t
ftrace_notrace_write(struct file * file,const char __user * ubuf,size_t cnt,loff_t * ppos)5745 ftrace_notrace_write(struct file *file, const char __user *ubuf,
5746 size_t cnt, loff_t *ppos)
5747 {
5748 return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
5749 }
5750
5751 static int
__ftrace_match_addr(struct ftrace_hash * hash,unsigned long ip,int remove)5752 __ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove)
5753 {
5754 struct ftrace_func_entry *entry;
5755
5756 ip = ftrace_location(ip);
5757 if (!ip)
5758 return -EINVAL;
5759
5760 if (remove) {
5761 entry = ftrace_lookup_ip(hash, ip);
5762 if (!entry)
5763 return -ENOENT;
5764 free_hash_entry(hash, entry);
5765 return 0;
5766 }
5767
5768 entry = add_hash_entry(hash, ip);
5769 return entry ? 0 : -ENOMEM;
5770 }
5771
5772 static int
ftrace_match_addr(struct ftrace_hash * hash,unsigned long * ips,unsigned int cnt,int remove)5773 ftrace_match_addr(struct ftrace_hash *hash, unsigned long *ips,
5774 unsigned int cnt, int remove)
5775 {
5776 unsigned int i;
5777 int err;
5778
5779 for (i = 0; i < cnt; i++) {
5780 err = __ftrace_match_addr(hash, ips[i], remove);
5781 if (err) {
5782 /*
5783 * This expects the @hash is a temporary hash and if this
5784 * fails the caller must free the @hash.
5785 */
5786 return err;
5787 }
5788 }
5789 return 0;
5790 }
5791
5792 static int
ftrace_set_hash(struct ftrace_ops * ops,unsigned char * buf,int len,unsigned long * ips,unsigned int cnt,int remove,int reset,int enable)5793 ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
5794 unsigned long *ips, unsigned int cnt,
5795 int remove, int reset, int enable)
5796 {
5797 struct ftrace_hash **orig_hash;
5798 struct ftrace_hash *hash;
5799 int ret;
5800
5801 if (unlikely(ftrace_disabled))
5802 return -ENODEV;
5803
5804 mutex_lock(&ops->func_hash->regex_lock);
5805
5806 if (enable)
5807 orig_hash = &ops->func_hash->filter_hash;
5808 else
5809 orig_hash = &ops->func_hash->notrace_hash;
5810
5811 if (reset)
5812 hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
5813 else
5814 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
5815
5816 if (!hash) {
5817 ret = -ENOMEM;
5818 goto out_regex_unlock;
5819 }
5820
5821 if (buf && !ftrace_match_records(hash, buf, len)) {
5822 ret = -EINVAL;
5823 goto out_regex_unlock;
5824 }
5825 if (ips) {
5826 ret = ftrace_match_addr(hash, ips, cnt, remove);
5827 if (ret < 0)
5828 goto out_regex_unlock;
5829 }
5830
5831 mutex_lock(&ftrace_lock);
5832 ret = ftrace_hash_move_and_update_ops(ops, orig_hash, hash, enable);
5833 mutex_unlock(&ftrace_lock);
5834
5835 out_regex_unlock:
5836 mutex_unlock(&ops->func_hash->regex_lock);
5837
5838 free_ftrace_hash(hash);
5839 return ret;
5840 }
5841
5842 static int
ftrace_set_addr(struct ftrace_ops * ops,unsigned long * ips,unsigned int cnt,int remove,int reset,int enable)5843 ftrace_set_addr(struct ftrace_ops *ops, unsigned long *ips, unsigned int cnt,
5844 int remove, int reset, int enable)
5845 {
5846 return ftrace_set_hash(ops, NULL, 0, ips, cnt, remove, reset, enable);
5847 }
5848
5849 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
5850
5851 static int register_ftrace_function_nolock(struct ftrace_ops *ops);
5852
5853 /*
5854 * If there are multiple ftrace_ops, use SAVE_REGS by default, so that direct
5855 * call will be jumped from ftrace_regs_caller. Only if the architecture does
5856 * not support ftrace_regs_caller but direct_call, use SAVE_ARGS so that it
5857 * jumps from ftrace_caller for multiple ftrace_ops.
5858 */
5859 #ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS
5860 #define MULTI_FLAGS (FTRACE_OPS_FL_DIRECT | FTRACE_OPS_FL_SAVE_ARGS)
5861 #else
5862 #define MULTI_FLAGS (FTRACE_OPS_FL_DIRECT | FTRACE_OPS_FL_SAVE_REGS)
5863 #endif
5864
check_direct_multi(struct ftrace_ops * ops)5865 static int check_direct_multi(struct ftrace_ops *ops)
5866 {
5867 if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED))
5868 return -EINVAL;
5869 if ((ops->flags & MULTI_FLAGS) != MULTI_FLAGS)
5870 return -EINVAL;
5871 return 0;
5872 }
5873
remove_direct_functions_hash(struct ftrace_hash * hash,unsigned long addr)5874 static void remove_direct_functions_hash(struct ftrace_hash *hash, unsigned long addr)
5875 {
5876 struct ftrace_func_entry *entry, *del;
5877 int size, i;
5878
5879 size = 1 << hash->size_bits;
5880 for (i = 0; i < size; i++) {
5881 hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
5882 del = __ftrace_lookup_ip(direct_functions, entry->ip);
5883 if (del && del->direct == addr) {
5884 remove_hash_entry(direct_functions, del);
5885 kfree(del);
5886 }
5887 }
5888 }
5889 }
5890
register_ftrace_direct_cb(struct rcu_head * rhp)5891 static void register_ftrace_direct_cb(struct rcu_head *rhp)
5892 {
5893 struct ftrace_hash *fhp = container_of(rhp, struct ftrace_hash, rcu);
5894
5895 free_ftrace_hash(fhp);
5896 }
5897
5898 /**
5899 * register_ftrace_direct - Call a custom trampoline directly
5900 * for multiple functions registered in @ops
5901 * @ops: The address of the struct ftrace_ops object
5902 * @addr: The address of the trampoline to call at @ops functions
5903 *
5904 * This is used to connect a direct calls to @addr from the nop locations
5905 * of the functions registered in @ops (with by ftrace_set_filter_ip
5906 * function).
5907 *
5908 * The location that it calls (@addr) must be able to handle a direct call,
5909 * and save the parameters of the function being traced, and restore them
5910 * (or inject new ones if needed), before returning.
5911 *
5912 * Returns:
5913 * 0 on success
5914 * -EINVAL - The @ops object was already registered with this call or
5915 * when there are no functions in @ops object.
5916 * -EBUSY - Another direct function is already attached (there can be only one)
5917 * -ENODEV - @ip does not point to a ftrace nop location (or not supported)
5918 * -ENOMEM - There was an allocation failure.
5919 */
register_ftrace_direct(struct ftrace_ops * ops,unsigned long addr)5920 int register_ftrace_direct(struct ftrace_ops *ops, unsigned long addr)
5921 {
5922 struct ftrace_hash *hash, *new_hash = NULL, *free_hash = NULL;
5923 struct ftrace_func_entry *entry, *new;
5924 int err = -EBUSY, size, i;
5925
5926 if (ops->func || ops->trampoline)
5927 return -EINVAL;
5928 if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED))
5929 return -EINVAL;
5930 if (ops->flags & FTRACE_OPS_FL_ENABLED)
5931 return -EINVAL;
5932
5933 hash = ops->func_hash->filter_hash;
5934 if (ftrace_hash_empty(hash))
5935 return -EINVAL;
5936
5937 mutex_lock(&direct_mutex);
5938
5939 /* Make sure requested entries are not already registered.. */
5940 size = 1 << hash->size_bits;
5941 for (i = 0; i < size; i++) {
5942 hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
5943 if (ftrace_find_rec_direct(entry->ip))
5944 goto out_unlock;
5945 }
5946 }
5947
5948 err = -ENOMEM;
5949
5950 /* Make a copy hash to place the new and the old entries in */
5951 size = hash->count + direct_functions->count;
5952 if (size > 32)
5953 size = 32;
5954 new_hash = alloc_ftrace_hash(fls(size));
5955 if (!new_hash)
5956 goto out_unlock;
5957
5958 /* Now copy over the existing direct entries */
5959 size = 1 << direct_functions->size_bits;
5960 for (i = 0; i < size; i++) {
5961 hlist_for_each_entry(entry, &direct_functions->buckets[i], hlist) {
5962 new = add_hash_entry(new_hash, entry->ip);
5963 if (!new)
5964 goto out_unlock;
5965 new->direct = entry->direct;
5966 }
5967 }
5968
5969 /* ... and add the new entries */
5970 size = 1 << hash->size_bits;
5971 for (i = 0; i < size; i++) {
5972 hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
5973 new = add_hash_entry(new_hash, entry->ip);
5974 if (!new)
5975 goto out_unlock;
5976 /* Update both the copy and the hash entry */
5977 new->direct = addr;
5978 entry->direct = addr;
5979 }
5980 }
5981
5982 free_hash = direct_functions;
5983 rcu_assign_pointer(direct_functions, new_hash);
5984 new_hash = NULL;
5985
5986 ops->func = call_direct_funcs;
5987 ops->flags = MULTI_FLAGS;
5988 ops->trampoline = FTRACE_REGS_ADDR;
5989 ops->direct_call = addr;
5990
5991 err = register_ftrace_function_nolock(ops);
5992
5993 out_unlock:
5994 mutex_unlock(&direct_mutex);
5995
5996 if (free_hash && free_hash != EMPTY_HASH)
5997 call_rcu_tasks(&free_hash->rcu, register_ftrace_direct_cb);
5998
5999 if (new_hash)
6000 free_ftrace_hash(new_hash);
6001
6002 return err;
6003 }
6004 EXPORT_SYMBOL_GPL(register_ftrace_direct);
6005
6006 /**
6007 * unregister_ftrace_direct - Remove calls to custom trampoline
6008 * previously registered by register_ftrace_direct for @ops object.
6009 * @ops: The address of the struct ftrace_ops object
6010 * @addr: The address of the direct function that is called by the @ops functions
6011 * @free_filters: Set to true to remove all filters for the ftrace_ops, false otherwise
6012 *
6013 * This is used to remove a direct calls to @addr from the nop locations
6014 * of the functions registered in @ops (with by ftrace_set_filter_ip
6015 * function).
6016 *
6017 * Returns:
6018 * 0 on success
6019 * -EINVAL - The @ops object was not properly registered.
6020 */
unregister_ftrace_direct(struct ftrace_ops * ops,unsigned long addr,bool free_filters)6021 int unregister_ftrace_direct(struct ftrace_ops *ops, unsigned long addr,
6022 bool free_filters)
6023 {
6024 struct ftrace_hash *hash = ops->func_hash->filter_hash;
6025 int err;
6026
6027 if (check_direct_multi(ops))
6028 return -EINVAL;
6029 if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
6030 return -EINVAL;
6031
6032 mutex_lock(&direct_mutex);
6033 err = unregister_ftrace_function(ops);
6034 remove_direct_functions_hash(hash, addr);
6035 mutex_unlock(&direct_mutex);
6036
6037 /* cleanup for possible another register call */
6038 ops->func = NULL;
6039 ops->trampoline = 0;
6040
6041 if (free_filters)
6042 ftrace_free_filter(ops);
6043 return err;
6044 }
6045 EXPORT_SYMBOL_GPL(unregister_ftrace_direct);
6046
6047 static int
__modify_ftrace_direct(struct ftrace_ops * ops,unsigned long addr)6048 __modify_ftrace_direct(struct ftrace_ops *ops, unsigned long addr)
6049 {
6050 struct ftrace_hash *hash;
6051 struct ftrace_func_entry *entry, *iter;
6052 static struct ftrace_ops tmp_ops = {
6053 .func = ftrace_stub,
6054 .flags = FTRACE_OPS_FL_STUB,
6055 };
6056 int i, size;
6057 int err;
6058
6059 lockdep_assert_held_once(&direct_mutex);
6060
6061 /* Enable the tmp_ops to have the same functions as the direct ops */
6062 ftrace_ops_init(&tmp_ops);
6063 tmp_ops.func_hash = ops->func_hash;
6064 tmp_ops.direct_call = addr;
6065
6066 err = register_ftrace_function_nolock(&tmp_ops);
6067 if (err)
6068 return err;
6069
6070 /*
6071 * Now the ftrace_ops_list_func() is called to do the direct callers.
6072 * We can safely change the direct functions attached to each entry.
6073 */
6074 mutex_lock(&ftrace_lock);
6075
6076 hash = ops->func_hash->filter_hash;
6077 size = 1 << hash->size_bits;
6078 for (i = 0; i < size; i++) {
6079 hlist_for_each_entry(iter, &hash->buckets[i], hlist) {
6080 entry = __ftrace_lookup_ip(direct_functions, iter->ip);
6081 if (!entry)
6082 continue;
6083 entry->direct = addr;
6084 }
6085 }
6086 /* Prevent store tearing if a trampoline concurrently accesses the value */
6087 WRITE_ONCE(ops->direct_call, addr);
6088
6089 mutex_unlock(&ftrace_lock);
6090
6091 /* Removing the tmp_ops will add the updated direct callers to the functions */
6092 unregister_ftrace_function(&tmp_ops);
6093
6094 return err;
6095 }
6096
6097 /**
6098 * modify_ftrace_direct_nolock - Modify an existing direct 'multi' call
6099 * to call something else
6100 * @ops: The address of the struct ftrace_ops object
6101 * @addr: The address of the new trampoline to call at @ops functions
6102 *
6103 * This is used to unregister currently registered direct caller and
6104 * register new one @addr on functions registered in @ops object.
6105 *
6106 * Note there's window between ftrace_shutdown and ftrace_startup calls
6107 * where there will be no callbacks called.
6108 *
6109 * Caller should already have direct_mutex locked, so we don't lock
6110 * direct_mutex here.
6111 *
6112 * Returns: zero on success. Non zero on error, which includes:
6113 * -EINVAL - The @ops object was not properly registered.
6114 */
modify_ftrace_direct_nolock(struct ftrace_ops * ops,unsigned long addr)6115 int modify_ftrace_direct_nolock(struct ftrace_ops *ops, unsigned long addr)
6116 {
6117 if (check_direct_multi(ops))
6118 return -EINVAL;
6119 if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
6120 return -EINVAL;
6121
6122 return __modify_ftrace_direct(ops, addr);
6123 }
6124 EXPORT_SYMBOL_GPL(modify_ftrace_direct_nolock);
6125
6126 /**
6127 * modify_ftrace_direct - Modify an existing direct 'multi' call
6128 * to call something else
6129 * @ops: The address of the struct ftrace_ops object
6130 * @addr: The address of the new trampoline to call at @ops functions
6131 *
6132 * This is used to unregister currently registered direct caller and
6133 * register new one @addr on functions registered in @ops object.
6134 *
6135 * Note there's window between ftrace_shutdown and ftrace_startup calls
6136 * where there will be no callbacks called.
6137 *
6138 * Returns: zero on success. Non zero on error, which includes:
6139 * -EINVAL - The @ops object was not properly registered.
6140 */
modify_ftrace_direct(struct ftrace_ops * ops,unsigned long addr)6141 int modify_ftrace_direct(struct ftrace_ops *ops, unsigned long addr)
6142 {
6143 int err;
6144
6145 if (check_direct_multi(ops))
6146 return -EINVAL;
6147 if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
6148 return -EINVAL;
6149
6150 mutex_lock(&direct_mutex);
6151 err = __modify_ftrace_direct(ops, addr);
6152 mutex_unlock(&direct_mutex);
6153 return err;
6154 }
6155 EXPORT_SYMBOL_GPL(modify_ftrace_direct);
6156 #endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
6157
6158 /**
6159 * ftrace_set_filter_ip - set a function to filter on in ftrace by address
6160 * @ops: the ops to set the filter with
6161 * @ip: the address to add to or remove from the filter.
6162 * @remove: non zero to remove the ip from the filter
6163 * @reset: non zero to reset all filters before applying this filter.
6164 *
6165 * Filters denote which functions should be enabled when tracing is enabled
6166 * If @ip is NULL, it fails to update filter.
6167 *
6168 * This can allocate memory which must be freed before @ops can be freed,
6169 * either by removing each filtered addr or by using
6170 * ftrace_free_filter(@ops).
6171 */
ftrace_set_filter_ip(struct ftrace_ops * ops,unsigned long ip,int remove,int reset)6172 int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
6173 int remove, int reset)
6174 {
6175 ftrace_ops_init(ops);
6176 return ftrace_set_addr(ops, &ip, 1, remove, reset, 1);
6177 }
6178 EXPORT_SYMBOL_GPL(ftrace_set_filter_ip);
6179
6180 /**
6181 * ftrace_set_filter_ips - set functions to filter on in ftrace by addresses
6182 * @ops: the ops to set the filter with
6183 * @ips: the array of addresses to add to or remove from the filter.
6184 * @cnt: the number of addresses in @ips
6185 * @remove: non zero to remove ips from the filter
6186 * @reset: non zero to reset all filters before applying this filter.
6187 *
6188 * Filters denote which functions should be enabled when tracing is enabled
6189 * If @ips array or any ip specified within is NULL , it fails to update filter.
6190 *
6191 * This can allocate memory which must be freed before @ops can be freed,
6192 * either by removing each filtered addr or by using
6193 * ftrace_free_filter(@ops).
6194 */
ftrace_set_filter_ips(struct ftrace_ops * ops,unsigned long * ips,unsigned int cnt,int remove,int reset)6195 int ftrace_set_filter_ips(struct ftrace_ops *ops, unsigned long *ips,
6196 unsigned int cnt, int remove, int reset)
6197 {
6198 ftrace_ops_init(ops);
6199 return ftrace_set_addr(ops, ips, cnt, remove, reset, 1);
6200 }
6201 EXPORT_SYMBOL_GPL(ftrace_set_filter_ips);
6202
6203 /**
6204 * ftrace_ops_set_global_filter - setup ops to use global filters
6205 * @ops: the ops which will use the global filters
6206 *
6207 * ftrace users who need global function trace filtering should call this.
6208 * It can set the global filter only if ops were not initialized before.
6209 */
ftrace_ops_set_global_filter(struct ftrace_ops * ops)6210 void ftrace_ops_set_global_filter(struct ftrace_ops *ops)
6211 {
6212 if (ops->flags & FTRACE_OPS_FL_INITIALIZED)
6213 return;
6214
6215 ftrace_ops_init(ops);
6216 ops->func_hash = &global_ops.local_hash;
6217 }
6218 EXPORT_SYMBOL_GPL(ftrace_ops_set_global_filter);
6219
6220 static int
ftrace_set_regex(struct ftrace_ops * ops,unsigned char * buf,int len,int reset,int enable)6221 ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
6222 int reset, int enable)
6223 {
6224 return ftrace_set_hash(ops, buf, len, NULL, 0, 0, reset, enable);
6225 }
6226
6227 /**
6228 * ftrace_set_filter - set a function to filter on in ftrace
6229 * @ops: the ops to set the filter with
6230 * @buf: the string that holds the function filter text.
6231 * @len: the length of the string.
6232 * @reset: non-zero to reset all filters before applying this filter.
6233 *
6234 * Filters denote which functions should be enabled when tracing is enabled.
6235 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
6236 *
6237 * This can allocate memory which must be freed before @ops can be freed,
6238 * either by removing each filtered addr or by using
6239 * ftrace_free_filter(@ops).
6240 */
ftrace_set_filter(struct ftrace_ops * ops,unsigned char * buf,int len,int reset)6241 int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
6242 int len, int reset)
6243 {
6244 ftrace_ops_init(ops);
6245 return ftrace_set_regex(ops, buf, len, reset, 1);
6246 }
6247 EXPORT_SYMBOL_GPL(ftrace_set_filter);
6248
6249 /**
6250 * ftrace_set_notrace - set a function to not trace in ftrace
6251 * @ops: the ops to set the notrace filter with
6252 * @buf: the string that holds the function notrace text.
6253 * @len: the length of the string.
6254 * @reset: non-zero to reset all filters before applying this filter.
6255 *
6256 * Notrace Filters denote which functions should not be enabled when tracing
6257 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
6258 * for tracing.
6259 *
6260 * This can allocate memory which must be freed before @ops can be freed,
6261 * either by removing each filtered addr or by using
6262 * ftrace_free_filter(@ops).
6263 */
ftrace_set_notrace(struct ftrace_ops * ops,unsigned char * buf,int len,int reset)6264 int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
6265 int len, int reset)
6266 {
6267 ftrace_ops_init(ops);
6268 return ftrace_set_regex(ops, buf, len, reset, 0);
6269 }
6270 EXPORT_SYMBOL_GPL(ftrace_set_notrace);
6271 /**
6272 * ftrace_set_global_filter - set a function to filter on with global tracers
6273 * @buf: the string that holds the function filter text.
6274 * @len: the length of the string.
6275 * @reset: non-zero to reset all filters before applying this filter.
6276 *
6277 * Filters denote which functions should be enabled when tracing is enabled.
6278 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
6279 */
ftrace_set_global_filter(unsigned char * buf,int len,int reset)6280 void ftrace_set_global_filter(unsigned char *buf, int len, int reset)
6281 {
6282 ftrace_set_regex(&global_ops, buf, len, reset, 1);
6283 }
6284 EXPORT_SYMBOL_GPL(ftrace_set_global_filter);
6285
6286 /**
6287 * ftrace_set_global_notrace - set a function to not trace with global tracers
6288 * @buf: the string that holds the function notrace text.
6289 * @len: the length of the string.
6290 * @reset: non-zero to reset all filters before applying this filter.
6291 *
6292 * Notrace Filters denote which functions should not be enabled when tracing
6293 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
6294 * for tracing.
6295 */
ftrace_set_global_notrace(unsigned char * buf,int len,int reset)6296 void ftrace_set_global_notrace(unsigned char *buf, int len, int reset)
6297 {
6298 ftrace_set_regex(&global_ops, buf, len, reset, 0);
6299 }
6300 EXPORT_SYMBOL_GPL(ftrace_set_global_notrace);
6301
6302 /*
6303 * command line interface to allow users to set filters on boot up.
6304 */
6305 #define FTRACE_FILTER_SIZE COMMAND_LINE_SIZE
6306 static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
6307 static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;
6308
6309 /* Used by function selftest to not test if filter is set */
6310 bool ftrace_filter_param __initdata;
6311
set_ftrace_notrace(char * str)6312 static int __init set_ftrace_notrace(char *str)
6313 {
6314 ftrace_filter_param = true;
6315 strscpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
6316 return 1;
6317 }
6318 __setup("ftrace_notrace=", set_ftrace_notrace);
6319
set_ftrace_filter(char * str)6320 static int __init set_ftrace_filter(char *str)
6321 {
6322 ftrace_filter_param = true;
6323 strscpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
6324 return 1;
6325 }
6326 __setup("ftrace_filter=", set_ftrace_filter);
6327
6328 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
6329 static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
6330 static char ftrace_graph_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
6331 static int ftrace_graph_set_hash(struct ftrace_hash *hash, char *buffer);
6332
set_graph_function(char * str)6333 static int __init set_graph_function(char *str)
6334 {
6335 strscpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE);
6336 return 1;
6337 }
6338 __setup("ftrace_graph_filter=", set_graph_function);
6339
set_graph_notrace_function(char * str)6340 static int __init set_graph_notrace_function(char *str)
6341 {
6342 strscpy(ftrace_graph_notrace_buf, str, FTRACE_FILTER_SIZE);
6343 return 1;
6344 }
6345 __setup("ftrace_graph_notrace=", set_graph_notrace_function);
6346
set_graph_max_depth_function(char * str)6347 static int __init set_graph_max_depth_function(char *str)
6348 {
6349 if (!str || kstrtouint(str, 0, &fgraph_max_depth))
6350 return 0;
6351 return 1;
6352 }
6353 __setup("ftrace_graph_max_depth=", set_graph_max_depth_function);
6354
set_ftrace_early_graph(char * buf,int enable)6355 static void __init set_ftrace_early_graph(char *buf, int enable)
6356 {
6357 int ret;
6358 char *func;
6359 struct ftrace_hash *hash;
6360
6361 hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
6362 if (MEM_FAIL(!hash, "Failed to allocate hash\n"))
6363 return;
6364
6365 while (buf) {
6366 func = strsep(&buf, ",");
6367 /* we allow only one expression at a time */
6368 ret = ftrace_graph_set_hash(hash, func);
6369 if (ret)
6370 printk(KERN_DEBUG "ftrace: function %s not "
6371 "traceable\n", func);
6372 }
6373
6374 if (enable)
6375 ftrace_graph_hash = hash;
6376 else
6377 ftrace_graph_notrace_hash = hash;
6378 }
6379 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
6380
6381 void __init
ftrace_set_early_filter(struct ftrace_ops * ops,char * buf,int enable)6382 ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable)
6383 {
6384 char *func;
6385
6386 ftrace_ops_init(ops);
6387
6388 while (buf) {
6389 func = strsep(&buf, ",");
6390 ftrace_set_regex(ops, func, strlen(func), 0, enable);
6391 }
6392 }
6393
set_ftrace_early_filters(void)6394 static void __init set_ftrace_early_filters(void)
6395 {
6396 if (ftrace_filter_buf[0])
6397 ftrace_set_early_filter(&global_ops, ftrace_filter_buf, 1);
6398 if (ftrace_notrace_buf[0])
6399 ftrace_set_early_filter(&global_ops, ftrace_notrace_buf, 0);
6400 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
6401 if (ftrace_graph_buf[0])
6402 set_ftrace_early_graph(ftrace_graph_buf, 1);
6403 if (ftrace_graph_notrace_buf[0])
6404 set_ftrace_early_graph(ftrace_graph_notrace_buf, 0);
6405 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
6406 }
6407
ftrace_regex_release(struct inode * inode,struct file * file)6408 int ftrace_regex_release(struct inode *inode, struct file *file)
6409 {
6410 struct seq_file *m = (struct seq_file *)file->private_data;
6411 struct ftrace_iterator *iter;
6412 struct ftrace_hash **orig_hash;
6413 struct trace_parser *parser;
6414 int filter_hash;
6415
6416 if (file->f_mode & FMODE_READ) {
6417 iter = m->private;
6418 seq_release(inode, file);
6419 } else
6420 iter = file->private_data;
6421
6422 parser = &iter->parser;
6423 if (trace_parser_loaded(parser)) {
6424 int enable = !(iter->flags & FTRACE_ITER_NOTRACE);
6425
6426 ftrace_process_regex(iter, parser->buffer,
6427 parser->idx, enable);
6428 }
6429
6430 trace_parser_put(parser);
6431
6432 mutex_lock(&iter->ops->func_hash->regex_lock);
6433
6434 if (file->f_mode & FMODE_WRITE) {
6435 filter_hash = !!(iter->flags & FTRACE_ITER_FILTER);
6436
6437 if (filter_hash) {
6438 orig_hash = &iter->ops->func_hash->filter_hash;
6439 if (iter->tr) {
6440 if (list_empty(&iter->tr->mod_trace))
6441 iter->hash->flags &= ~FTRACE_HASH_FL_MOD;
6442 else
6443 iter->hash->flags |= FTRACE_HASH_FL_MOD;
6444 }
6445 } else
6446 orig_hash = &iter->ops->func_hash->notrace_hash;
6447
6448 mutex_lock(&ftrace_lock);
6449 ftrace_hash_move_and_update_ops(iter->ops, orig_hash,
6450 iter->hash, filter_hash);
6451 mutex_unlock(&ftrace_lock);
6452 } else {
6453 /* For read only, the hash is the ops hash */
6454 iter->hash = NULL;
6455 }
6456
6457 mutex_unlock(&iter->ops->func_hash->regex_lock);
6458 free_ftrace_hash(iter->hash);
6459 if (iter->tr)
6460 trace_array_put(iter->tr);
6461 kfree(iter);
6462
6463 return 0;
6464 }
6465
6466 static const struct file_operations ftrace_avail_fops = {
6467 .open = ftrace_avail_open,
6468 .read = seq_read,
6469 .llseek = seq_lseek,
6470 .release = seq_release_private,
6471 };
6472
6473 static const struct file_operations ftrace_enabled_fops = {
6474 .open = ftrace_enabled_open,
6475 .read = seq_read,
6476 .llseek = seq_lseek,
6477 .release = seq_release_private,
6478 };
6479
6480 static const struct file_operations ftrace_touched_fops = {
6481 .open = ftrace_touched_open,
6482 .read = seq_read,
6483 .llseek = seq_lseek,
6484 .release = seq_release_private,
6485 };
6486
6487 static const struct file_operations ftrace_avail_addrs_fops = {
6488 .open = ftrace_avail_addrs_open,
6489 .read = seq_read,
6490 .llseek = seq_lseek,
6491 .release = seq_release_private,
6492 };
6493
6494 static const struct file_operations ftrace_filter_fops = {
6495 .open = ftrace_filter_open,
6496 .read = seq_read,
6497 .write = ftrace_filter_write,
6498 .llseek = tracing_lseek,
6499 .release = ftrace_regex_release,
6500 };
6501
6502 static const struct file_operations ftrace_notrace_fops = {
6503 .open = ftrace_notrace_open,
6504 .read = seq_read,
6505 .write = ftrace_notrace_write,
6506 .llseek = tracing_lseek,
6507 .release = ftrace_regex_release,
6508 };
6509
6510 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
6511
6512 static DEFINE_MUTEX(graph_lock);
6513
6514 struct ftrace_hash __rcu *ftrace_graph_hash = EMPTY_HASH;
6515 struct ftrace_hash __rcu *ftrace_graph_notrace_hash = EMPTY_HASH;
6516
6517 enum graph_filter_type {
6518 GRAPH_FILTER_NOTRACE = 0,
6519 GRAPH_FILTER_FUNCTION,
6520 };
6521
6522 #define FTRACE_GRAPH_EMPTY ((void *)1)
6523
6524 struct ftrace_graph_data {
6525 struct ftrace_hash *hash;
6526 struct ftrace_func_entry *entry;
6527 int idx; /* for hash table iteration */
6528 enum graph_filter_type type;
6529 struct ftrace_hash *new_hash;
6530 const struct seq_operations *seq_ops;
6531 struct trace_parser parser;
6532 };
6533
6534 static void *
__g_next(struct seq_file * m,loff_t * pos)6535 __g_next(struct seq_file *m, loff_t *pos)
6536 {
6537 struct ftrace_graph_data *fgd = m->private;
6538 struct ftrace_func_entry *entry = fgd->entry;
6539 struct hlist_head *head;
6540 int i, idx = fgd->idx;
6541
6542 if (*pos >= fgd->hash->count)
6543 return NULL;
6544
6545 if (entry) {
6546 hlist_for_each_entry_continue(entry, hlist) {
6547 fgd->entry = entry;
6548 return entry;
6549 }
6550
6551 idx++;
6552 }
6553
6554 for (i = idx; i < 1 << fgd->hash->size_bits; i++) {
6555 head = &fgd->hash->buckets[i];
6556 hlist_for_each_entry(entry, head, hlist) {
6557 fgd->entry = entry;
6558 fgd->idx = i;
6559 return entry;
6560 }
6561 }
6562 return NULL;
6563 }
6564
6565 static void *
g_next(struct seq_file * m,void * v,loff_t * pos)6566 g_next(struct seq_file *m, void *v, loff_t *pos)
6567 {
6568 (*pos)++;
6569 return __g_next(m, pos);
6570 }
6571
g_start(struct seq_file * m,loff_t * pos)6572 static void *g_start(struct seq_file *m, loff_t *pos)
6573 {
6574 struct ftrace_graph_data *fgd = m->private;
6575
6576 mutex_lock(&graph_lock);
6577
6578 if (fgd->type == GRAPH_FILTER_FUNCTION)
6579 fgd->hash = rcu_dereference_protected(ftrace_graph_hash,
6580 lockdep_is_held(&graph_lock));
6581 else
6582 fgd->hash = rcu_dereference_protected(ftrace_graph_notrace_hash,
6583 lockdep_is_held(&graph_lock));
6584
6585 /* Nothing, tell g_show to print all functions are enabled */
6586 if (ftrace_hash_empty(fgd->hash) && !*pos)
6587 return FTRACE_GRAPH_EMPTY;
6588
6589 fgd->idx = 0;
6590 fgd->entry = NULL;
6591 return __g_next(m, pos);
6592 }
6593
g_stop(struct seq_file * m,void * p)6594 static void g_stop(struct seq_file *m, void *p)
6595 {
6596 mutex_unlock(&graph_lock);
6597 }
6598
g_show(struct seq_file * m,void * v)6599 static int g_show(struct seq_file *m, void *v)
6600 {
6601 struct ftrace_func_entry *entry = v;
6602
6603 if (!entry)
6604 return 0;
6605
6606 if (entry == FTRACE_GRAPH_EMPTY) {
6607 struct ftrace_graph_data *fgd = m->private;
6608
6609 if (fgd->type == GRAPH_FILTER_FUNCTION)
6610 seq_puts(m, "#### all functions enabled ####\n");
6611 else
6612 seq_puts(m, "#### no functions disabled ####\n");
6613 return 0;
6614 }
6615
6616 seq_printf(m, "%ps\n", (void *)entry->ip);
6617
6618 return 0;
6619 }
6620
6621 static const struct seq_operations ftrace_graph_seq_ops = {
6622 .start = g_start,
6623 .next = g_next,
6624 .stop = g_stop,
6625 .show = g_show,
6626 };
6627
6628 static int
__ftrace_graph_open(struct inode * inode,struct file * file,struct ftrace_graph_data * fgd)6629 __ftrace_graph_open(struct inode *inode, struct file *file,
6630 struct ftrace_graph_data *fgd)
6631 {
6632 int ret;
6633 struct ftrace_hash *new_hash = NULL;
6634
6635 ret = security_locked_down(LOCKDOWN_TRACEFS);
6636 if (ret)
6637 return ret;
6638
6639 if (file->f_mode & FMODE_WRITE) {
6640 const int size_bits = FTRACE_HASH_DEFAULT_BITS;
6641
6642 if (trace_parser_get_init(&fgd->parser, FTRACE_BUFF_MAX))
6643 return -ENOMEM;
6644
6645 if (file->f_flags & O_TRUNC)
6646 new_hash = alloc_ftrace_hash(size_bits);
6647 else
6648 new_hash = alloc_and_copy_ftrace_hash(size_bits,
6649 fgd->hash);
6650 if (!new_hash) {
6651 ret = -ENOMEM;
6652 goto out;
6653 }
6654 }
6655
6656 if (file->f_mode & FMODE_READ) {
6657 ret = seq_open(file, &ftrace_graph_seq_ops);
6658 if (!ret) {
6659 struct seq_file *m = file->private_data;
6660 m->private = fgd;
6661 } else {
6662 /* Failed */
6663 free_ftrace_hash(new_hash);
6664 new_hash = NULL;
6665 }
6666 } else
6667 file->private_data = fgd;
6668
6669 out:
6670 if (ret < 0 && file->f_mode & FMODE_WRITE)
6671 trace_parser_put(&fgd->parser);
6672
6673 fgd->new_hash = new_hash;
6674
6675 /*
6676 * All uses of fgd->hash must be taken with the graph_lock
6677 * held. The graph_lock is going to be released, so force
6678 * fgd->hash to be reinitialized when it is taken again.
6679 */
6680 fgd->hash = NULL;
6681
6682 return ret;
6683 }
6684
6685 static int
ftrace_graph_open(struct inode * inode,struct file * file)6686 ftrace_graph_open(struct inode *inode, struct file *file)
6687 {
6688 struct ftrace_graph_data *fgd;
6689 int ret;
6690
6691 if (unlikely(ftrace_disabled))
6692 return -ENODEV;
6693
6694 fgd = kmalloc(sizeof(*fgd), GFP_KERNEL);
6695 if (fgd == NULL)
6696 return -ENOMEM;
6697
6698 mutex_lock(&graph_lock);
6699
6700 fgd->hash = rcu_dereference_protected(ftrace_graph_hash,
6701 lockdep_is_held(&graph_lock));
6702 fgd->type = GRAPH_FILTER_FUNCTION;
6703 fgd->seq_ops = &ftrace_graph_seq_ops;
6704
6705 ret = __ftrace_graph_open(inode, file, fgd);
6706 if (ret < 0)
6707 kfree(fgd);
6708
6709 mutex_unlock(&graph_lock);
6710 return ret;
6711 }
6712
6713 static int
ftrace_graph_notrace_open(struct inode * inode,struct file * file)6714 ftrace_graph_notrace_open(struct inode *inode, struct file *file)
6715 {
6716 struct ftrace_graph_data *fgd;
6717 int ret;
6718
6719 if (unlikely(ftrace_disabled))
6720 return -ENODEV;
6721
6722 fgd = kmalloc(sizeof(*fgd), GFP_KERNEL);
6723 if (fgd == NULL)
6724 return -ENOMEM;
6725
6726 mutex_lock(&graph_lock);
6727
6728 fgd->hash = rcu_dereference_protected(ftrace_graph_notrace_hash,
6729 lockdep_is_held(&graph_lock));
6730 fgd->type = GRAPH_FILTER_NOTRACE;
6731 fgd->seq_ops = &ftrace_graph_seq_ops;
6732
6733 ret = __ftrace_graph_open(inode, file, fgd);
6734 if (ret < 0)
6735 kfree(fgd);
6736
6737 mutex_unlock(&graph_lock);
6738 return ret;
6739 }
6740
6741 static int
ftrace_graph_release(struct inode * inode,struct file * file)6742 ftrace_graph_release(struct inode *inode, struct file *file)
6743 {
6744 struct ftrace_graph_data *fgd;
6745 struct ftrace_hash *old_hash, *new_hash;
6746 struct trace_parser *parser;
6747 int ret = 0;
6748
6749 if (file->f_mode & FMODE_READ) {
6750 struct seq_file *m = file->private_data;
6751
6752 fgd = m->private;
6753 seq_release(inode, file);
6754 } else {
6755 fgd = file->private_data;
6756 }
6757
6758
6759 if (file->f_mode & FMODE_WRITE) {
6760
6761 parser = &fgd->parser;
6762
6763 if (trace_parser_loaded((parser))) {
6764 ret = ftrace_graph_set_hash(fgd->new_hash,
6765 parser->buffer);
6766 }
6767
6768 trace_parser_put(parser);
6769
6770 new_hash = __ftrace_hash_move(fgd->new_hash);
6771 if (!new_hash) {
6772 ret = -ENOMEM;
6773 goto out;
6774 }
6775
6776 mutex_lock(&graph_lock);
6777
6778 if (fgd->type == GRAPH_FILTER_FUNCTION) {
6779 old_hash = rcu_dereference_protected(ftrace_graph_hash,
6780 lockdep_is_held(&graph_lock));
6781 rcu_assign_pointer(ftrace_graph_hash, new_hash);
6782 } else {
6783 old_hash = rcu_dereference_protected(ftrace_graph_notrace_hash,
6784 lockdep_is_held(&graph_lock));
6785 rcu_assign_pointer(ftrace_graph_notrace_hash, new_hash);
6786 }
6787
6788 mutex_unlock(&graph_lock);
6789
6790 /*
6791 * We need to do a hard force of sched synchronization.
6792 * This is because we use preempt_disable() to do RCU, but
6793 * the function tracers can be called where RCU is not watching
6794 * (like before user_exit()). We can not rely on the RCU
6795 * infrastructure to do the synchronization, thus we must do it
6796 * ourselves.
6797 */
6798 if (old_hash != EMPTY_HASH)
6799 synchronize_rcu_tasks_rude();
6800
6801 free_ftrace_hash(old_hash);
6802 }
6803
6804 out:
6805 free_ftrace_hash(fgd->new_hash);
6806 kfree(fgd);
6807
6808 return ret;
6809 }
6810
6811 static int
ftrace_graph_set_hash(struct ftrace_hash * hash,char * buffer)6812 ftrace_graph_set_hash(struct ftrace_hash *hash, char *buffer)
6813 {
6814 struct ftrace_glob func_g;
6815 struct dyn_ftrace *rec;
6816 struct ftrace_page *pg;
6817 struct ftrace_func_entry *entry;
6818 int fail = 1;
6819 int not;
6820
6821 /* decode regex */
6822 func_g.type = filter_parse_regex(buffer, strlen(buffer),
6823 &func_g.search, ¬);
6824
6825 func_g.len = strlen(func_g.search);
6826
6827 guard(mutex)(&ftrace_lock);
6828
6829 if (unlikely(ftrace_disabled))
6830 return -ENODEV;
6831
6832 do_for_each_ftrace_rec(pg, rec) {
6833
6834 if (rec->flags & FTRACE_FL_DISABLED)
6835 continue;
6836
6837 if (ftrace_match_record(rec, &func_g, NULL, 0)) {
6838 entry = ftrace_lookup_ip(hash, rec->ip);
6839
6840 if (!not) {
6841 fail = 0;
6842
6843 if (entry)
6844 continue;
6845 if (add_hash_entry(hash, rec->ip) == NULL)
6846 return 0;
6847 } else {
6848 if (entry) {
6849 free_hash_entry(hash, entry);
6850 fail = 0;
6851 }
6852 }
6853 }
6854 } while_for_each_ftrace_rec();
6855
6856 return fail ? -EINVAL : 0;
6857 }
6858
6859 static ssize_t
ftrace_graph_write(struct file * file,const char __user * ubuf,size_t cnt,loff_t * ppos)6860 ftrace_graph_write(struct file *file, const char __user *ubuf,
6861 size_t cnt, loff_t *ppos)
6862 {
6863 ssize_t read, ret = 0;
6864 struct ftrace_graph_data *fgd = file->private_data;
6865 struct trace_parser *parser;
6866
6867 if (!cnt)
6868 return 0;
6869
6870 /* Read mode uses seq functions */
6871 if (file->f_mode & FMODE_READ) {
6872 struct seq_file *m = file->private_data;
6873 fgd = m->private;
6874 }
6875
6876 parser = &fgd->parser;
6877
6878 read = trace_get_user(parser, ubuf, cnt, ppos);
6879
6880 if (read >= 0 && trace_parser_loaded(parser) &&
6881 !trace_parser_cont(parser)) {
6882
6883 ret = ftrace_graph_set_hash(fgd->new_hash,
6884 parser->buffer);
6885 trace_parser_clear(parser);
6886 }
6887
6888 if (!ret)
6889 ret = read;
6890
6891 return ret;
6892 }
6893
6894 static const struct file_operations ftrace_graph_fops = {
6895 .open = ftrace_graph_open,
6896 .read = seq_read,
6897 .write = ftrace_graph_write,
6898 .llseek = tracing_lseek,
6899 .release = ftrace_graph_release,
6900 };
6901
6902 static const struct file_operations ftrace_graph_notrace_fops = {
6903 .open = ftrace_graph_notrace_open,
6904 .read = seq_read,
6905 .write = ftrace_graph_write,
6906 .llseek = tracing_lseek,
6907 .release = ftrace_graph_release,
6908 };
6909 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
6910
ftrace_create_filter_files(struct ftrace_ops * ops,struct dentry * parent)6911 void ftrace_create_filter_files(struct ftrace_ops *ops,
6912 struct dentry *parent)
6913 {
6914
6915 trace_create_file("set_ftrace_filter", TRACE_MODE_WRITE, parent,
6916 ops, &ftrace_filter_fops);
6917
6918 trace_create_file("set_ftrace_notrace", TRACE_MODE_WRITE, parent,
6919 ops, &ftrace_notrace_fops);
6920 }
6921
6922 /*
6923 * The name "destroy_filter_files" is really a misnomer. Although
6924 * in the future, it may actually delete the files, but this is
6925 * really intended to make sure the ops passed in are disabled
6926 * and that when this function returns, the caller is free to
6927 * free the ops.
6928 *
6929 * The "destroy" name is only to match the "create" name that this
6930 * should be paired with.
6931 */
ftrace_destroy_filter_files(struct ftrace_ops * ops)6932 void ftrace_destroy_filter_files(struct ftrace_ops *ops)
6933 {
6934 mutex_lock(&ftrace_lock);
6935 if (ops->flags & FTRACE_OPS_FL_ENABLED)
6936 ftrace_shutdown(ops, 0);
6937 ops->flags |= FTRACE_OPS_FL_DELETED;
6938 ftrace_free_filter(ops);
6939 mutex_unlock(&ftrace_lock);
6940 }
6941
ftrace_init_dyn_tracefs(struct dentry * d_tracer)6942 static __init int ftrace_init_dyn_tracefs(struct dentry *d_tracer)
6943 {
6944
6945 trace_create_file("available_filter_functions", TRACE_MODE_READ,
6946 d_tracer, NULL, &ftrace_avail_fops);
6947
6948 trace_create_file("available_filter_functions_addrs", TRACE_MODE_READ,
6949 d_tracer, NULL, &ftrace_avail_addrs_fops);
6950
6951 trace_create_file("enabled_functions", TRACE_MODE_READ,
6952 d_tracer, NULL, &ftrace_enabled_fops);
6953
6954 trace_create_file("touched_functions", TRACE_MODE_READ,
6955 d_tracer, NULL, &ftrace_touched_fops);
6956
6957 ftrace_create_filter_files(&global_ops, d_tracer);
6958
6959 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
6960 trace_create_file("set_graph_function", TRACE_MODE_WRITE, d_tracer,
6961 NULL,
6962 &ftrace_graph_fops);
6963 trace_create_file("set_graph_notrace", TRACE_MODE_WRITE, d_tracer,
6964 NULL,
6965 &ftrace_graph_notrace_fops);
6966 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
6967
6968 return 0;
6969 }
6970
ftrace_cmp_ips(const void * a,const void * b)6971 static int ftrace_cmp_ips(const void *a, const void *b)
6972 {
6973 const unsigned long *ipa = a;
6974 const unsigned long *ipb = b;
6975
6976 if (*ipa > *ipb)
6977 return 1;
6978 if (*ipa < *ipb)
6979 return -1;
6980 return 0;
6981 }
6982
6983 #ifdef CONFIG_FTRACE_SORT_STARTUP_TEST
test_is_sorted(unsigned long * start,unsigned long count)6984 static void test_is_sorted(unsigned long *start, unsigned long count)
6985 {
6986 int i;
6987
6988 for (i = 1; i < count; i++) {
6989 if (WARN(start[i - 1] > start[i],
6990 "[%d] %pS at %lx is not sorted with %pS at %lx\n", i,
6991 (void *)start[i - 1], start[i - 1],
6992 (void *)start[i], start[i]))
6993 break;
6994 }
6995 if (i == count)
6996 pr_info("ftrace section at %px sorted properly\n", start);
6997 }
6998 #else
test_is_sorted(unsigned long * start,unsigned long count)6999 static void test_is_sorted(unsigned long *start, unsigned long count)
7000 {
7001 }
7002 #endif
7003
ftrace_process_locs(struct module * mod,unsigned long * start,unsigned long * end)7004 static int ftrace_process_locs(struct module *mod,
7005 unsigned long *start,
7006 unsigned long *end)
7007 {
7008 struct ftrace_page *pg_unuse = NULL;
7009 struct ftrace_page *start_pg;
7010 struct ftrace_page *pg;
7011 struct dyn_ftrace *rec;
7012 unsigned long skipped = 0;
7013 unsigned long count;
7014 unsigned long *p;
7015 unsigned long addr;
7016 unsigned long flags = 0; /* Shut up gcc */
7017 int ret = -ENOMEM;
7018
7019 count = end - start;
7020
7021 if (!count)
7022 return 0;
7023
7024 /*
7025 * Sorting mcount in vmlinux at build time depend on
7026 * CONFIG_BUILDTIME_MCOUNT_SORT, while mcount loc in
7027 * modules can not be sorted at build time.
7028 */
7029 if (!IS_ENABLED(CONFIG_BUILDTIME_MCOUNT_SORT) || mod) {
7030 sort(start, count, sizeof(*start),
7031 ftrace_cmp_ips, NULL);
7032 } else {
7033 test_is_sorted(start, count);
7034 }
7035
7036 start_pg = ftrace_allocate_pages(count);
7037 if (!start_pg)
7038 return -ENOMEM;
7039
7040 mutex_lock(&ftrace_lock);
7041
7042 /*
7043 * Core and each module needs their own pages, as
7044 * modules will free them when they are removed.
7045 * Force a new page to be allocated for modules.
7046 */
7047 if (!mod) {
7048 WARN_ON(ftrace_pages || ftrace_pages_start);
7049 /* First initialization */
7050 ftrace_pages = ftrace_pages_start = start_pg;
7051 } else {
7052 if (!ftrace_pages)
7053 goto out;
7054
7055 if (WARN_ON(ftrace_pages->next)) {
7056 /* Hmm, we have free pages? */
7057 while (ftrace_pages->next)
7058 ftrace_pages = ftrace_pages->next;
7059 }
7060
7061 ftrace_pages->next = start_pg;
7062 }
7063
7064 p = start;
7065 pg = start_pg;
7066 while (p < end) {
7067 unsigned long end_offset;
7068 addr = ftrace_call_adjust(*p++);
7069 /*
7070 * Some architecture linkers will pad between
7071 * the different mcount_loc sections of different
7072 * object files to satisfy alignments.
7073 * Skip any NULL pointers.
7074 */
7075 if (!addr) {
7076 skipped++;
7077 continue;
7078 }
7079
7080 end_offset = (pg->index+1) * sizeof(pg->records[0]);
7081 if (end_offset > PAGE_SIZE << pg->order) {
7082 /* We should have allocated enough */
7083 if (WARN_ON(!pg->next))
7084 break;
7085 pg = pg->next;
7086 }
7087
7088 rec = &pg->records[pg->index++];
7089 rec->ip = addr;
7090 }
7091
7092 if (pg->next) {
7093 pg_unuse = pg->next;
7094 pg->next = NULL;
7095 }
7096
7097 /* Assign the last page to ftrace_pages */
7098 ftrace_pages = pg;
7099
7100 /*
7101 * We only need to disable interrupts on start up
7102 * because we are modifying code that an interrupt
7103 * may execute, and the modification is not atomic.
7104 * But for modules, nothing runs the code we modify
7105 * until we are finished with it, and there's no
7106 * reason to cause large interrupt latencies while we do it.
7107 */
7108 if (!mod)
7109 local_irq_save(flags);
7110 ftrace_update_code(mod, start_pg);
7111 if (!mod)
7112 local_irq_restore(flags);
7113 ret = 0;
7114 out:
7115 mutex_unlock(&ftrace_lock);
7116
7117 /* We should have used all pages unless we skipped some */
7118 if (pg_unuse) {
7119 WARN_ON(!skipped);
7120 /* Need to synchronize with ftrace_location_range() */
7121 synchronize_rcu();
7122 ftrace_free_pages(pg_unuse);
7123 }
7124 return ret;
7125 }
7126
7127 struct ftrace_mod_func {
7128 struct list_head list;
7129 char *name;
7130 unsigned long ip;
7131 unsigned int size;
7132 };
7133
7134 struct ftrace_mod_map {
7135 struct rcu_head rcu;
7136 struct list_head list;
7137 struct module *mod;
7138 unsigned long start_addr;
7139 unsigned long end_addr;
7140 struct list_head funcs;
7141 unsigned int num_funcs;
7142 };
7143
ftrace_get_trampoline_kallsym(unsigned int symnum,unsigned long * value,char * type,char * name,char * module_name,int * exported)7144 static int ftrace_get_trampoline_kallsym(unsigned int symnum,
7145 unsigned long *value, char *type,
7146 char *name, char *module_name,
7147 int *exported)
7148 {
7149 struct ftrace_ops *op;
7150
7151 list_for_each_entry_rcu(op, &ftrace_ops_trampoline_list, list) {
7152 if (!op->trampoline || symnum--)
7153 continue;
7154 *value = op->trampoline;
7155 *type = 't';
7156 strscpy(name, FTRACE_TRAMPOLINE_SYM, KSYM_NAME_LEN);
7157 strscpy(module_name, FTRACE_TRAMPOLINE_MOD, MODULE_NAME_LEN);
7158 *exported = 0;
7159 return 0;
7160 }
7161
7162 return -ERANGE;
7163 }
7164
7165 #if defined(CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS) || defined(CONFIG_MODULES)
7166 /*
7167 * Check if the current ops references the given ip.
7168 *
7169 * If the ops traces all functions, then it was already accounted for.
7170 * If the ops does not trace the current record function, skip it.
7171 * If the ops ignores the function via notrace filter, skip it.
7172 */
7173 static bool
ops_references_ip(struct ftrace_ops * ops,unsigned long ip)7174 ops_references_ip(struct ftrace_ops *ops, unsigned long ip)
7175 {
7176 /* If ops isn't enabled, ignore it */
7177 if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
7178 return false;
7179
7180 /* If ops traces all then it includes this function */
7181 if (ops_traces_mod(ops))
7182 return true;
7183
7184 /* The function must be in the filter */
7185 if (!ftrace_hash_empty(ops->func_hash->filter_hash) &&
7186 !__ftrace_lookup_ip(ops->func_hash->filter_hash, ip))
7187 return false;
7188
7189 /* If in notrace hash, we ignore it too */
7190 if (ftrace_lookup_ip(ops->func_hash->notrace_hash, ip))
7191 return false;
7192
7193 return true;
7194 }
7195 #endif
7196
7197 #ifdef CONFIG_MODULES
7198
7199 #define next_to_ftrace_page(p) container_of(p, struct ftrace_page, next)
7200
7201 static LIST_HEAD(ftrace_mod_maps);
7202
referenced_filters(struct dyn_ftrace * rec)7203 static int referenced_filters(struct dyn_ftrace *rec)
7204 {
7205 struct ftrace_ops *ops;
7206 int cnt = 0;
7207
7208 for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) {
7209 if (ops_references_ip(ops, rec->ip)) {
7210 if (WARN_ON_ONCE(ops->flags & FTRACE_OPS_FL_DIRECT))
7211 continue;
7212 if (WARN_ON_ONCE(ops->flags & FTRACE_OPS_FL_IPMODIFY))
7213 continue;
7214 cnt++;
7215 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
7216 rec->flags |= FTRACE_FL_REGS;
7217 if (cnt == 1 && ops->trampoline)
7218 rec->flags |= FTRACE_FL_TRAMP;
7219 else
7220 rec->flags &= ~FTRACE_FL_TRAMP;
7221 }
7222 }
7223
7224 return cnt;
7225 }
7226
7227 static void
clear_mod_from_hash(struct ftrace_page * pg,struct ftrace_hash * hash)7228 clear_mod_from_hash(struct ftrace_page *pg, struct ftrace_hash *hash)
7229 {
7230 struct ftrace_func_entry *entry;
7231 struct dyn_ftrace *rec;
7232 int i;
7233
7234 if (ftrace_hash_empty(hash))
7235 return;
7236
7237 for (i = 0; i < pg->index; i++) {
7238 rec = &pg->records[i];
7239 entry = __ftrace_lookup_ip(hash, rec->ip);
7240 /*
7241 * Do not allow this rec to match again.
7242 * Yeah, it may waste some memory, but will be removed
7243 * if/when the hash is modified again.
7244 */
7245 if (entry)
7246 entry->ip = 0;
7247 }
7248 }
7249
7250 /* Clear any records from hashes */
clear_mod_from_hashes(struct ftrace_page * pg)7251 static void clear_mod_from_hashes(struct ftrace_page *pg)
7252 {
7253 struct trace_array *tr;
7254
7255 mutex_lock(&trace_types_lock);
7256 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
7257 if (!tr->ops || !tr->ops->func_hash)
7258 continue;
7259 mutex_lock(&tr->ops->func_hash->regex_lock);
7260 clear_mod_from_hash(pg, tr->ops->func_hash->filter_hash);
7261 clear_mod_from_hash(pg, tr->ops->func_hash->notrace_hash);
7262 mutex_unlock(&tr->ops->func_hash->regex_lock);
7263 }
7264 mutex_unlock(&trace_types_lock);
7265 }
7266
ftrace_free_mod_map(struct rcu_head * rcu)7267 static void ftrace_free_mod_map(struct rcu_head *rcu)
7268 {
7269 struct ftrace_mod_map *mod_map = container_of(rcu, struct ftrace_mod_map, rcu);
7270 struct ftrace_mod_func *mod_func;
7271 struct ftrace_mod_func *n;
7272
7273 /* All the contents of mod_map are now not visible to readers */
7274 list_for_each_entry_safe(mod_func, n, &mod_map->funcs, list) {
7275 kfree(mod_func->name);
7276 list_del(&mod_func->list);
7277 kfree(mod_func);
7278 }
7279
7280 kfree(mod_map);
7281 }
7282
ftrace_release_mod(struct module * mod)7283 void ftrace_release_mod(struct module *mod)
7284 {
7285 struct ftrace_mod_map *mod_map;
7286 struct ftrace_mod_map *n;
7287 struct dyn_ftrace *rec;
7288 struct ftrace_page **last_pg;
7289 struct ftrace_page *tmp_page = NULL;
7290 struct ftrace_page *pg;
7291
7292 mutex_lock(&ftrace_lock);
7293
7294 if (ftrace_disabled)
7295 goto out_unlock;
7296
7297 list_for_each_entry_safe(mod_map, n, &ftrace_mod_maps, list) {
7298 if (mod_map->mod == mod) {
7299 list_del_rcu(&mod_map->list);
7300 call_rcu(&mod_map->rcu, ftrace_free_mod_map);
7301 break;
7302 }
7303 }
7304
7305 /*
7306 * Each module has its own ftrace_pages, remove
7307 * them from the list.
7308 */
7309 last_pg = &ftrace_pages_start;
7310 for (pg = ftrace_pages_start; pg; pg = *last_pg) {
7311 rec = &pg->records[0];
7312 if (within_module(rec->ip, mod)) {
7313 /*
7314 * As core pages are first, the first
7315 * page should never be a module page.
7316 */
7317 if (WARN_ON(pg == ftrace_pages_start))
7318 goto out_unlock;
7319
7320 /* Check if we are deleting the last page */
7321 if (pg == ftrace_pages)
7322 ftrace_pages = next_to_ftrace_page(last_pg);
7323
7324 ftrace_update_tot_cnt -= pg->index;
7325 *last_pg = pg->next;
7326
7327 pg->next = tmp_page;
7328 tmp_page = pg;
7329 } else
7330 last_pg = &pg->next;
7331 }
7332 out_unlock:
7333 mutex_unlock(&ftrace_lock);
7334
7335 /* Need to synchronize with ftrace_location_range() */
7336 if (tmp_page)
7337 synchronize_rcu();
7338 for (pg = tmp_page; pg; pg = tmp_page) {
7339
7340 /* Needs to be called outside of ftrace_lock */
7341 clear_mod_from_hashes(pg);
7342
7343 if (pg->records) {
7344 free_pages((unsigned long)pg->records, pg->order);
7345 ftrace_number_of_pages -= 1 << pg->order;
7346 }
7347 tmp_page = pg->next;
7348 kfree(pg);
7349 ftrace_number_of_groups--;
7350 }
7351 }
7352
ftrace_module_enable(struct module * mod)7353 void ftrace_module_enable(struct module *mod)
7354 {
7355 struct dyn_ftrace *rec;
7356 struct ftrace_page *pg;
7357
7358 mutex_lock(&ftrace_lock);
7359
7360 if (ftrace_disabled)
7361 goto out_unlock;
7362
7363 /*
7364 * If the tracing is enabled, go ahead and enable the record.
7365 *
7366 * The reason not to enable the record immediately is the
7367 * inherent check of ftrace_make_nop/ftrace_make_call for
7368 * correct previous instructions. Making first the NOP
7369 * conversion puts the module to the correct state, thus
7370 * passing the ftrace_make_call check.
7371 *
7372 * We also delay this to after the module code already set the
7373 * text to read-only, as we now need to set it back to read-write
7374 * so that we can modify the text.
7375 */
7376 if (ftrace_start_up)
7377 ftrace_arch_code_modify_prepare();
7378
7379 do_for_each_ftrace_rec(pg, rec) {
7380 int cnt;
7381 /*
7382 * do_for_each_ftrace_rec() is a double loop.
7383 * module text shares the pg. If a record is
7384 * not part of this module, then skip this pg,
7385 * which the "break" will do.
7386 */
7387 if (!within_module(rec->ip, mod))
7388 break;
7389
7390 /* Weak functions should still be ignored */
7391 if (!test_for_valid_rec(rec)) {
7392 /* Clear all other flags. Should not be enabled anyway */
7393 rec->flags = FTRACE_FL_DISABLED;
7394 continue;
7395 }
7396
7397 cnt = 0;
7398
7399 /*
7400 * When adding a module, we need to check if tracers are
7401 * currently enabled and if they are, and can trace this record,
7402 * we need to enable the module functions as well as update the
7403 * reference counts for those function records.
7404 */
7405 if (ftrace_start_up)
7406 cnt += referenced_filters(rec);
7407
7408 rec->flags &= ~FTRACE_FL_DISABLED;
7409 rec->flags += cnt;
7410
7411 if (ftrace_start_up && cnt) {
7412 int failed = __ftrace_replace_code(rec, 1);
7413 if (failed) {
7414 ftrace_bug(failed, rec);
7415 goto out_loop;
7416 }
7417 }
7418
7419 } while_for_each_ftrace_rec();
7420
7421 out_loop:
7422 if (ftrace_start_up)
7423 ftrace_arch_code_modify_post_process();
7424
7425 out_unlock:
7426 mutex_unlock(&ftrace_lock);
7427
7428 process_cached_mods(mod->name);
7429 }
7430
ftrace_module_init(struct module * mod)7431 void ftrace_module_init(struct module *mod)
7432 {
7433 int ret;
7434
7435 if (ftrace_disabled || !mod->num_ftrace_callsites)
7436 return;
7437
7438 ret = ftrace_process_locs(mod, mod->ftrace_callsites,
7439 mod->ftrace_callsites + mod->num_ftrace_callsites);
7440 if (ret)
7441 pr_warn("ftrace: failed to allocate entries for module '%s' functions\n",
7442 mod->name);
7443 }
7444
save_ftrace_mod_rec(struct ftrace_mod_map * mod_map,struct dyn_ftrace * rec)7445 static void save_ftrace_mod_rec(struct ftrace_mod_map *mod_map,
7446 struct dyn_ftrace *rec)
7447 {
7448 struct ftrace_mod_func *mod_func;
7449 unsigned long symsize;
7450 unsigned long offset;
7451 char str[KSYM_SYMBOL_LEN];
7452 char *modname;
7453 const char *ret;
7454
7455 ret = kallsyms_lookup(rec->ip, &symsize, &offset, &modname, str);
7456 if (!ret)
7457 return;
7458
7459 mod_func = kmalloc(sizeof(*mod_func), GFP_KERNEL);
7460 if (!mod_func)
7461 return;
7462
7463 mod_func->name = kstrdup(str, GFP_KERNEL);
7464 if (!mod_func->name) {
7465 kfree(mod_func);
7466 return;
7467 }
7468
7469 mod_func->ip = rec->ip - offset;
7470 mod_func->size = symsize;
7471
7472 mod_map->num_funcs++;
7473
7474 list_add_rcu(&mod_func->list, &mod_map->funcs);
7475 }
7476
7477 static struct ftrace_mod_map *
allocate_ftrace_mod_map(struct module * mod,unsigned long start,unsigned long end)7478 allocate_ftrace_mod_map(struct module *mod,
7479 unsigned long start, unsigned long end)
7480 {
7481 struct ftrace_mod_map *mod_map;
7482
7483 mod_map = kmalloc(sizeof(*mod_map), GFP_KERNEL);
7484 if (!mod_map)
7485 return NULL;
7486
7487 mod_map->mod = mod;
7488 mod_map->start_addr = start;
7489 mod_map->end_addr = end;
7490 mod_map->num_funcs = 0;
7491
7492 INIT_LIST_HEAD_RCU(&mod_map->funcs);
7493
7494 list_add_rcu(&mod_map->list, &ftrace_mod_maps);
7495
7496 return mod_map;
7497 }
7498
7499 static int
ftrace_func_address_lookup(struct ftrace_mod_map * mod_map,unsigned long addr,unsigned long * size,unsigned long * off,char * sym)7500 ftrace_func_address_lookup(struct ftrace_mod_map *mod_map,
7501 unsigned long addr, unsigned long *size,
7502 unsigned long *off, char *sym)
7503 {
7504 struct ftrace_mod_func *found_func = NULL;
7505 struct ftrace_mod_func *mod_func;
7506
7507 list_for_each_entry_rcu(mod_func, &mod_map->funcs, list) {
7508 if (addr >= mod_func->ip &&
7509 addr < mod_func->ip + mod_func->size) {
7510 found_func = mod_func;
7511 break;
7512 }
7513 }
7514
7515 if (found_func) {
7516 if (size)
7517 *size = found_func->size;
7518 if (off)
7519 *off = addr - found_func->ip;
7520 return strscpy(sym, found_func->name, KSYM_NAME_LEN);
7521 }
7522
7523 return 0;
7524 }
7525
7526 int
ftrace_mod_address_lookup(unsigned long addr,unsigned long * size,unsigned long * off,char ** modname,char * sym)7527 ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
7528 unsigned long *off, char **modname, char *sym)
7529 {
7530 struct ftrace_mod_map *mod_map;
7531 int ret = 0;
7532
7533 /* mod_map is freed via call_rcu() */
7534 preempt_disable();
7535 list_for_each_entry_rcu(mod_map, &ftrace_mod_maps, list) {
7536 ret = ftrace_func_address_lookup(mod_map, addr, size, off, sym);
7537 if (ret) {
7538 if (modname)
7539 *modname = mod_map->mod->name;
7540 break;
7541 }
7542 }
7543 preempt_enable();
7544
7545 return ret;
7546 }
7547
ftrace_mod_get_kallsym(unsigned int symnum,unsigned long * value,char * type,char * name,char * module_name,int * exported)7548 int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
7549 char *type, char *name,
7550 char *module_name, int *exported)
7551 {
7552 struct ftrace_mod_map *mod_map;
7553 struct ftrace_mod_func *mod_func;
7554 int ret;
7555
7556 preempt_disable();
7557 list_for_each_entry_rcu(mod_map, &ftrace_mod_maps, list) {
7558
7559 if (symnum >= mod_map->num_funcs) {
7560 symnum -= mod_map->num_funcs;
7561 continue;
7562 }
7563
7564 list_for_each_entry_rcu(mod_func, &mod_map->funcs, list) {
7565 if (symnum > 1) {
7566 symnum--;
7567 continue;
7568 }
7569
7570 *value = mod_func->ip;
7571 *type = 'T';
7572 strscpy(name, mod_func->name, KSYM_NAME_LEN);
7573 strscpy(module_name, mod_map->mod->name, MODULE_NAME_LEN);
7574 *exported = 1;
7575 preempt_enable();
7576 return 0;
7577 }
7578 WARN_ON(1);
7579 break;
7580 }
7581 ret = ftrace_get_trampoline_kallsym(symnum, value, type, name,
7582 module_name, exported);
7583 preempt_enable();
7584 return ret;
7585 }
7586
7587 #else
save_ftrace_mod_rec(struct ftrace_mod_map * mod_map,struct dyn_ftrace * rec)7588 static void save_ftrace_mod_rec(struct ftrace_mod_map *mod_map,
7589 struct dyn_ftrace *rec) { }
7590 static inline struct ftrace_mod_map *
allocate_ftrace_mod_map(struct module * mod,unsigned long start,unsigned long end)7591 allocate_ftrace_mod_map(struct module *mod,
7592 unsigned long start, unsigned long end)
7593 {
7594 return NULL;
7595 }
ftrace_mod_get_kallsym(unsigned int symnum,unsigned long * value,char * type,char * name,char * module_name,int * exported)7596 int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
7597 char *type, char *name, char *module_name,
7598 int *exported)
7599 {
7600 int ret;
7601
7602 preempt_disable();
7603 ret = ftrace_get_trampoline_kallsym(symnum, value, type, name,
7604 module_name, exported);
7605 preempt_enable();
7606 return ret;
7607 }
7608 #endif /* CONFIG_MODULES */
7609
7610 struct ftrace_init_func {
7611 struct list_head list;
7612 unsigned long ip;
7613 };
7614
7615 /* Clear any init ips from hashes */
7616 static void
clear_func_from_hash(struct ftrace_init_func * func,struct ftrace_hash * hash)7617 clear_func_from_hash(struct ftrace_init_func *func, struct ftrace_hash *hash)
7618 {
7619 struct ftrace_func_entry *entry;
7620
7621 entry = ftrace_lookup_ip(hash, func->ip);
7622 /*
7623 * Do not allow this rec to match again.
7624 * Yeah, it may waste some memory, but will be removed
7625 * if/when the hash is modified again.
7626 */
7627 if (entry)
7628 entry->ip = 0;
7629 }
7630
7631 static void
clear_func_from_hashes(struct ftrace_init_func * func)7632 clear_func_from_hashes(struct ftrace_init_func *func)
7633 {
7634 struct trace_array *tr;
7635
7636 mutex_lock(&trace_types_lock);
7637 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
7638 if (!tr->ops || !tr->ops->func_hash)
7639 continue;
7640 mutex_lock(&tr->ops->func_hash->regex_lock);
7641 clear_func_from_hash(func, tr->ops->func_hash->filter_hash);
7642 clear_func_from_hash(func, tr->ops->func_hash->notrace_hash);
7643 mutex_unlock(&tr->ops->func_hash->regex_lock);
7644 }
7645 mutex_unlock(&trace_types_lock);
7646 }
7647
add_to_clear_hash_list(struct list_head * clear_list,struct dyn_ftrace * rec)7648 static void add_to_clear_hash_list(struct list_head *clear_list,
7649 struct dyn_ftrace *rec)
7650 {
7651 struct ftrace_init_func *func;
7652
7653 func = kmalloc(sizeof(*func), GFP_KERNEL);
7654 if (!func) {
7655 MEM_FAIL(1, "alloc failure, ftrace filter could be stale\n");
7656 return;
7657 }
7658
7659 func->ip = rec->ip;
7660 list_add(&func->list, clear_list);
7661 }
7662
ftrace_free_mem(struct module * mod,void * start_ptr,void * end_ptr)7663 void ftrace_free_mem(struct module *mod, void *start_ptr, void *end_ptr)
7664 {
7665 unsigned long start = (unsigned long)(start_ptr);
7666 unsigned long end = (unsigned long)(end_ptr);
7667 struct ftrace_page **last_pg = &ftrace_pages_start;
7668 struct ftrace_page *tmp_page = NULL;
7669 struct ftrace_page *pg;
7670 struct dyn_ftrace *rec;
7671 struct dyn_ftrace key;
7672 struct ftrace_mod_map *mod_map = NULL;
7673 struct ftrace_init_func *func, *func_next;
7674 LIST_HEAD(clear_hash);
7675
7676 key.ip = start;
7677 key.flags = end; /* overload flags, as it is unsigned long */
7678
7679 mutex_lock(&ftrace_lock);
7680
7681 /*
7682 * If we are freeing module init memory, then check if
7683 * any tracer is active. If so, we need to save a mapping of
7684 * the module functions being freed with the address.
7685 */
7686 if (mod && ftrace_ops_list != &ftrace_list_end)
7687 mod_map = allocate_ftrace_mod_map(mod, start, end);
7688
7689 for (pg = ftrace_pages_start; pg; last_pg = &pg->next, pg = *last_pg) {
7690 if (end < pg->records[0].ip ||
7691 start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
7692 continue;
7693 again:
7694 rec = bsearch(&key, pg->records, pg->index,
7695 sizeof(struct dyn_ftrace),
7696 ftrace_cmp_recs);
7697 if (!rec)
7698 continue;
7699
7700 /* rec will be cleared from hashes after ftrace_lock unlock */
7701 add_to_clear_hash_list(&clear_hash, rec);
7702
7703 if (mod_map)
7704 save_ftrace_mod_rec(mod_map, rec);
7705
7706 pg->index--;
7707 ftrace_update_tot_cnt--;
7708 if (!pg->index) {
7709 *last_pg = pg->next;
7710 pg->next = tmp_page;
7711 tmp_page = pg;
7712 pg = container_of(last_pg, struct ftrace_page, next);
7713 if (!(*last_pg))
7714 ftrace_pages = pg;
7715 continue;
7716 }
7717 memmove(rec, rec + 1,
7718 (pg->index - (rec - pg->records)) * sizeof(*rec));
7719 /* More than one function may be in this block */
7720 goto again;
7721 }
7722 mutex_unlock(&ftrace_lock);
7723
7724 list_for_each_entry_safe(func, func_next, &clear_hash, list) {
7725 clear_func_from_hashes(func);
7726 kfree(func);
7727 }
7728 /* Need to synchronize with ftrace_location_range() */
7729 if (tmp_page) {
7730 synchronize_rcu();
7731 ftrace_free_pages(tmp_page);
7732 }
7733 }
7734
ftrace_free_init_mem(void)7735 void __init ftrace_free_init_mem(void)
7736 {
7737 void *start = (void *)(&__init_begin);
7738 void *end = (void *)(&__init_end);
7739
7740 ftrace_boot_snapshot();
7741
7742 ftrace_free_mem(NULL, start, end);
7743 }
7744
ftrace_dyn_arch_init(void)7745 int __init __weak ftrace_dyn_arch_init(void)
7746 {
7747 return 0;
7748 }
7749
ftrace_init(void)7750 void __init ftrace_init(void)
7751 {
7752 extern unsigned long __start_mcount_loc[];
7753 extern unsigned long __stop_mcount_loc[];
7754 unsigned long count, flags;
7755 int ret;
7756
7757 local_irq_save(flags);
7758 ret = ftrace_dyn_arch_init();
7759 local_irq_restore(flags);
7760 if (ret)
7761 goto failed;
7762
7763 count = __stop_mcount_loc - __start_mcount_loc;
7764 if (!count) {
7765 pr_info("ftrace: No functions to be traced?\n");
7766 goto failed;
7767 }
7768
7769 pr_info("ftrace: allocating %ld entries in %ld pages\n",
7770 count, DIV_ROUND_UP(count, ENTRIES_PER_PAGE));
7771
7772 ret = ftrace_process_locs(NULL,
7773 __start_mcount_loc,
7774 __stop_mcount_loc);
7775 if (ret) {
7776 pr_warn("ftrace: failed to allocate entries for functions\n");
7777 goto failed;
7778 }
7779
7780 pr_info("ftrace: allocated %ld pages with %ld groups\n",
7781 ftrace_number_of_pages, ftrace_number_of_groups);
7782
7783 last_ftrace_enabled = ftrace_enabled = 1;
7784
7785 set_ftrace_early_filters();
7786
7787 return;
7788 failed:
7789 ftrace_disabled = 1;
7790 }
7791
7792 /* Do nothing if arch does not support this */
arch_ftrace_update_trampoline(struct ftrace_ops * ops)7793 void __weak arch_ftrace_update_trampoline(struct ftrace_ops *ops)
7794 {
7795 }
7796
ftrace_update_trampoline(struct ftrace_ops * ops)7797 static void ftrace_update_trampoline(struct ftrace_ops *ops)
7798 {
7799 unsigned long trampoline = ops->trampoline;
7800
7801 arch_ftrace_update_trampoline(ops);
7802 if (ops->trampoline && ops->trampoline != trampoline &&
7803 (ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP)) {
7804 /* Add to kallsyms before the perf events */
7805 ftrace_add_trampoline_to_kallsyms(ops);
7806 perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_OOL,
7807 ops->trampoline, ops->trampoline_size, false,
7808 FTRACE_TRAMPOLINE_SYM);
7809 /*
7810 * Record the perf text poke event after the ksymbol register
7811 * event.
7812 */
7813 perf_event_text_poke((void *)ops->trampoline, NULL, 0,
7814 (void *)ops->trampoline,
7815 ops->trampoline_size);
7816 }
7817 }
7818
ftrace_init_trace_array(struct trace_array * tr)7819 void ftrace_init_trace_array(struct trace_array *tr)
7820 {
7821 INIT_LIST_HEAD(&tr->func_probes);
7822 INIT_LIST_HEAD(&tr->mod_trace);
7823 INIT_LIST_HEAD(&tr->mod_notrace);
7824 }
7825 #else
7826
7827 struct ftrace_ops global_ops = {
7828 .func = ftrace_stub,
7829 .flags = FTRACE_OPS_FL_INITIALIZED |
7830 FTRACE_OPS_FL_PID,
7831 };
7832
ftrace_nodyn_init(void)7833 static int __init ftrace_nodyn_init(void)
7834 {
7835 ftrace_enabled = 1;
7836 return 0;
7837 }
7838 core_initcall(ftrace_nodyn_init);
7839
ftrace_init_dyn_tracefs(struct dentry * d_tracer)7840 static inline int ftrace_init_dyn_tracefs(struct dentry *d_tracer) { return 0; }
ftrace_startup_all(int command)7841 static inline void ftrace_startup_all(int command) { }
7842
ftrace_update_trampoline(struct ftrace_ops * ops)7843 static void ftrace_update_trampoline(struct ftrace_ops *ops)
7844 {
7845 }
7846
7847 #endif /* CONFIG_DYNAMIC_FTRACE */
7848
ftrace_init_global_array_ops(struct trace_array * tr)7849 __init void ftrace_init_global_array_ops(struct trace_array *tr)
7850 {
7851 tr->ops = &global_ops;
7852 tr->ops->private = tr;
7853 ftrace_init_trace_array(tr);
7854 init_array_fgraph_ops(tr, tr->ops);
7855 }
7856
ftrace_init_array_ops(struct trace_array * tr,ftrace_func_t func)7857 void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func)
7858 {
7859 /* If we filter on pids, update to use the pid function */
7860 if (tr->flags & TRACE_ARRAY_FL_GLOBAL) {
7861 if (WARN_ON(tr->ops->func != ftrace_stub))
7862 printk("ftrace ops had %pS for function\n",
7863 tr->ops->func);
7864 }
7865 tr->ops->func = func;
7866 tr->ops->private = tr;
7867 }
7868
ftrace_reset_array_ops(struct trace_array * tr)7869 void ftrace_reset_array_ops(struct trace_array *tr)
7870 {
7871 tr->ops->func = ftrace_stub;
7872 }
7873
7874 static nokprobe_inline void
__ftrace_ops_list_func(unsigned long ip,unsigned long parent_ip,struct ftrace_ops * ignored,struct ftrace_regs * fregs)7875 __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
7876 struct ftrace_ops *ignored, struct ftrace_regs *fregs)
7877 {
7878 struct pt_regs *regs = ftrace_get_regs(fregs);
7879 struct ftrace_ops *op;
7880 int bit;
7881
7882 /*
7883 * The ftrace_test_and_set_recursion() will disable preemption,
7884 * which is required since some of the ops may be dynamically
7885 * allocated, they must be freed after a synchronize_rcu().
7886 */
7887 bit = trace_test_and_set_recursion(ip, parent_ip, TRACE_LIST_START);
7888 if (bit < 0)
7889 return;
7890
7891 do_for_each_ftrace_op(op, ftrace_ops_list) {
7892 /* Stub functions don't need to be called nor tested */
7893 if (op->flags & FTRACE_OPS_FL_STUB)
7894 continue;
7895 /*
7896 * Check the following for each ops before calling their func:
7897 * if RCU flag is set, then rcu_is_watching() must be true
7898 * Otherwise test if the ip matches the ops filter
7899 *
7900 * If any of the above fails then the op->func() is not executed.
7901 */
7902 if ((!(op->flags & FTRACE_OPS_FL_RCU) || rcu_is_watching()) &&
7903 ftrace_ops_test(op, ip, regs)) {
7904 if (FTRACE_WARN_ON(!op->func)) {
7905 pr_warn("op=%p %pS\n", op, op);
7906 goto out;
7907 }
7908 op->func(ip, parent_ip, op, fregs);
7909 }
7910 } while_for_each_ftrace_op(op);
7911 out:
7912 trace_clear_recursion(bit);
7913 }
7914
7915 /*
7916 * Some archs only support passing ip and parent_ip. Even though
7917 * the list function ignores the op parameter, we do not want any
7918 * C side effects, where a function is called without the caller
7919 * sending a third parameter.
7920 * Archs are to support both the regs and ftrace_ops at the same time.
7921 * If they support ftrace_ops, it is assumed they support regs.
7922 * If call backs want to use regs, they must either check for regs
7923 * being NULL, or CONFIG_DYNAMIC_FTRACE_WITH_REGS.
7924 * Note, CONFIG_DYNAMIC_FTRACE_WITH_REGS expects a full regs to be saved.
7925 * An architecture can pass partial regs with ftrace_ops and still
7926 * set the ARCH_SUPPORTS_FTRACE_OPS.
7927 *
7928 * In vmlinux.lds.h, ftrace_ops_list_func() is defined to be
7929 * arch_ftrace_ops_list_func.
7930 */
7931 #if ARCH_SUPPORTS_FTRACE_OPS
arch_ftrace_ops_list_func(unsigned long ip,unsigned long parent_ip,struct ftrace_ops * op,struct ftrace_regs * fregs)7932 void arch_ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
7933 struct ftrace_ops *op, struct ftrace_regs *fregs)
7934 {
7935 kmsan_unpoison_memory(fregs, ftrace_regs_size());
7936 __ftrace_ops_list_func(ip, parent_ip, NULL, fregs);
7937 }
7938 #else
arch_ftrace_ops_list_func(unsigned long ip,unsigned long parent_ip)7939 void arch_ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip)
7940 {
7941 __ftrace_ops_list_func(ip, parent_ip, NULL, NULL);
7942 }
7943 #endif
7944 NOKPROBE_SYMBOL(arch_ftrace_ops_list_func);
7945
7946 /*
7947 * If there's only one function registered but it does not support
7948 * recursion, needs RCU protection, then this function will be called
7949 * by the mcount trampoline.
7950 */
ftrace_ops_assist_func(unsigned long ip,unsigned long parent_ip,struct ftrace_ops * op,struct ftrace_regs * fregs)7951 static void ftrace_ops_assist_func(unsigned long ip, unsigned long parent_ip,
7952 struct ftrace_ops *op, struct ftrace_regs *fregs)
7953 {
7954 int bit;
7955
7956 bit = trace_test_and_set_recursion(ip, parent_ip, TRACE_LIST_START);
7957 if (bit < 0)
7958 return;
7959
7960 if (!(op->flags & FTRACE_OPS_FL_RCU) || rcu_is_watching())
7961 op->func(ip, parent_ip, op, fregs);
7962
7963 trace_clear_recursion(bit);
7964 }
7965 NOKPROBE_SYMBOL(ftrace_ops_assist_func);
7966
7967 /**
7968 * ftrace_ops_get_func - get the function a trampoline should call
7969 * @ops: the ops to get the function for
7970 *
7971 * Normally the mcount trampoline will call the ops->func, but there
7972 * are times that it should not. For example, if the ops does not
7973 * have its own recursion protection, then it should call the
7974 * ftrace_ops_assist_func() instead.
7975 *
7976 * Returns: the function that the trampoline should call for @ops.
7977 */
ftrace_ops_get_func(struct ftrace_ops * ops)7978 ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops)
7979 {
7980 /*
7981 * If the function does not handle recursion or needs to be RCU safe,
7982 * then we need to call the assist handler.
7983 */
7984 if (ops->flags & (FTRACE_OPS_FL_RECURSION |
7985 FTRACE_OPS_FL_RCU))
7986 return ftrace_ops_assist_func;
7987
7988 return ops->func;
7989 }
7990
7991 static void
ftrace_filter_pid_sched_switch_probe(void * data,bool preempt,struct task_struct * prev,struct task_struct * next,unsigned int prev_state)7992 ftrace_filter_pid_sched_switch_probe(void *data, bool preempt,
7993 struct task_struct *prev,
7994 struct task_struct *next,
7995 unsigned int prev_state)
7996 {
7997 struct trace_array *tr = data;
7998 struct trace_pid_list *pid_list;
7999 struct trace_pid_list *no_pid_list;
8000
8001 pid_list = rcu_dereference_sched(tr->function_pids);
8002 no_pid_list = rcu_dereference_sched(tr->function_no_pids);
8003
8004 if (trace_ignore_this_task(pid_list, no_pid_list, next))
8005 this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid,
8006 FTRACE_PID_IGNORE);
8007 else
8008 this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid,
8009 next->pid);
8010 }
8011
8012 static void
ftrace_pid_follow_sched_process_fork(void * data,struct task_struct * self,struct task_struct * task)8013 ftrace_pid_follow_sched_process_fork(void *data,
8014 struct task_struct *self,
8015 struct task_struct *task)
8016 {
8017 struct trace_pid_list *pid_list;
8018 struct trace_array *tr = data;
8019
8020 pid_list = rcu_dereference_sched(tr->function_pids);
8021 trace_filter_add_remove_task(pid_list, self, task);
8022
8023 pid_list = rcu_dereference_sched(tr->function_no_pids);
8024 trace_filter_add_remove_task(pid_list, self, task);
8025 }
8026
8027 static void
ftrace_pid_follow_sched_process_exit(void * data,struct task_struct * task)8028 ftrace_pid_follow_sched_process_exit(void *data, struct task_struct *task)
8029 {
8030 struct trace_pid_list *pid_list;
8031 struct trace_array *tr = data;
8032
8033 pid_list = rcu_dereference_sched(tr->function_pids);
8034 trace_filter_add_remove_task(pid_list, NULL, task);
8035
8036 pid_list = rcu_dereference_sched(tr->function_no_pids);
8037 trace_filter_add_remove_task(pid_list, NULL, task);
8038 }
8039
ftrace_pid_follow_fork(struct trace_array * tr,bool enable)8040 void ftrace_pid_follow_fork(struct trace_array *tr, bool enable)
8041 {
8042 if (enable) {
8043 register_trace_sched_process_fork(ftrace_pid_follow_sched_process_fork,
8044 tr);
8045 register_trace_sched_process_free(ftrace_pid_follow_sched_process_exit,
8046 tr);
8047 } else {
8048 unregister_trace_sched_process_fork(ftrace_pid_follow_sched_process_fork,
8049 tr);
8050 unregister_trace_sched_process_free(ftrace_pid_follow_sched_process_exit,
8051 tr);
8052 }
8053 }
8054
clear_ftrace_pids(struct trace_array * tr,int type)8055 static void clear_ftrace_pids(struct trace_array *tr, int type)
8056 {
8057 struct trace_pid_list *pid_list;
8058 struct trace_pid_list *no_pid_list;
8059 int cpu;
8060
8061 pid_list = rcu_dereference_protected(tr->function_pids,
8062 lockdep_is_held(&ftrace_lock));
8063 no_pid_list = rcu_dereference_protected(tr->function_no_pids,
8064 lockdep_is_held(&ftrace_lock));
8065
8066 /* Make sure there's something to do */
8067 if (!pid_type_enabled(type, pid_list, no_pid_list))
8068 return;
8069
8070 /* See if the pids still need to be checked after this */
8071 if (!still_need_pid_events(type, pid_list, no_pid_list)) {
8072 unregister_trace_sched_switch(ftrace_filter_pid_sched_switch_probe, tr);
8073 for_each_possible_cpu(cpu)
8074 per_cpu_ptr(tr->array_buffer.data, cpu)->ftrace_ignore_pid = FTRACE_PID_TRACE;
8075 }
8076
8077 if (type & TRACE_PIDS)
8078 rcu_assign_pointer(tr->function_pids, NULL);
8079
8080 if (type & TRACE_NO_PIDS)
8081 rcu_assign_pointer(tr->function_no_pids, NULL);
8082
8083 /* Wait till all users are no longer using pid filtering */
8084 synchronize_rcu();
8085
8086 if ((type & TRACE_PIDS) && pid_list)
8087 trace_pid_list_free(pid_list);
8088
8089 if ((type & TRACE_NO_PIDS) && no_pid_list)
8090 trace_pid_list_free(no_pid_list);
8091 }
8092
ftrace_clear_pids(struct trace_array * tr)8093 void ftrace_clear_pids(struct trace_array *tr)
8094 {
8095 mutex_lock(&ftrace_lock);
8096
8097 clear_ftrace_pids(tr, TRACE_PIDS | TRACE_NO_PIDS);
8098
8099 mutex_unlock(&ftrace_lock);
8100 }
8101
ftrace_pid_reset(struct trace_array * tr,int type)8102 static void ftrace_pid_reset(struct trace_array *tr, int type)
8103 {
8104 mutex_lock(&ftrace_lock);
8105 clear_ftrace_pids(tr, type);
8106
8107 ftrace_update_pid_func();
8108 ftrace_startup_all(0);
8109
8110 mutex_unlock(&ftrace_lock);
8111 }
8112
8113 /* Greater than any max PID */
8114 #define FTRACE_NO_PIDS (void *)(PID_MAX_LIMIT + 1)
8115
fpid_start(struct seq_file * m,loff_t * pos)8116 static void *fpid_start(struct seq_file *m, loff_t *pos)
8117 __acquires(RCU)
8118 {
8119 struct trace_pid_list *pid_list;
8120 struct trace_array *tr = m->private;
8121
8122 mutex_lock(&ftrace_lock);
8123 rcu_read_lock_sched();
8124
8125 pid_list = rcu_dereference_sched(tr->function_pids);
8126
8127 if (!pid_list)
8128 return !(*pos) ? FTRACE_NO_PIDS : NULL;
8129
8130 return trace_pid_start(pid_list, pos);
8131 }
8132
fpid_next(struct seq_file * m,void * v,loff_t * pos)8133 static void *fpid_next(struct seq_file *m, void *v, loff_t *pos)
8134 {
8135 struct trace_array *tr = m->private;
8136 struct trace_pid_list *pid_list = rcu_dereference_sched(tr->function_pids);
8137
8138 if (v == FTRACE_NO_PIDS) {
8139 (*pos)++;
8140 return NULL;
8141 }
8142 return trace_pid_next(pid_list, v, pos);
8143 }
8144
fpid_stop(struct seq_file * m,void * p)8145 static void fpid_stop(struct seq_file *m, void *p)
8146 __releases(RCU)
8147 {
8148 rcu_read_unlock_sched();
8149 mutex_unlock(&ftrace_lock);
8150 }
8151
fpid_show(struct seq_file * m,void * v)8152 static int fpid_show(struct seq_file *m, void *v)
8153 {
8154 if (v == FTRACE_NO_PIDS) {
8155 seq_puts(m, "no pid\n");
8156 return 0;
8157 }
8158
8159 return trace_pid_show(m, v);
8160 }
8161
8162 static const struct seq_operations ftrace_pid_sops = {
8163 .start = fpid_start,
8164 .next = fpid_next,
8165 .stop = fpid_stop,
8166 .show = fpid_show,
8167 };
8168
fnpid_start(struct seq_file * m,loff_t * pos)8169 static void *fnpid_start(struct seq_file *m, loff_t *pos)
8170 __acquires(RCU)
8171 {
8172 struct trace_pid_list *pid_list;
8173 struct trace_array *tr = m->private;
8174
8175 mutex_lock(&ftrace_lock);
8176 rcu_read_lock_sched();
8177
8178 pid_list = rcu_dereference_sched(tr->function_no_pids);
8179
8180 if (!pid_list)
8181 return !(*pos) ? FTRACE_NO_PIDS : NULL;
8182
8183 return trace_pid_start(pid_list, pos);
8184 }
8185
fnpid_next(struct seq_file * m,void * v,loff_t * pos)8186 static void *fnpid_next(struct seq_file *m, void *v, loff_t *pos)
8187 {
8188 struct trace_array *tr = m->private;
8189 struct trace_pid_list *pid_list = rcu_dereference_sched(tr->function_no_pids);
8190
8191 if (v == FTRACE_NO_PIDS) {
8192 (*pos)++;
8193 return NULL;
8194 }
8195 return trace_pid_next(pid_list, v, pos);
8196 }
8197
8198 static const struct seq_operations ftrace_no_pid_sops = {
8199 .start = fnpid_start,
8200 .next = fnpid_next,
8201 .stop = fpid_stop,
8202 .show = fpid_show,
8203 };
8204
pid_open(struct inode * inode,struct file * file,int type)8205 static int pid_open(struct inode *inode, struct file *file, int type)
8206 {
8207 const struct seq_operations *seq_ops;
8208 struct trace_array *tr = inode->i_private;
8209 struct seq_file *m;
8210 int ret = 0;
8211
8212 ret = tracing_check_open_get_tr(tr);
8213 if (ret)
8214 return ret;
8215
8216 if ((file->f_mode & FMODE_WRITE) &&
8217 (file->f_flags & O_TRUNC))
8218 ftrace_pid_reset(tr, type);
8219
8220 switch (type) {
8221 case TRACE_PIDS:
8222 seq_ops = &ftrace_pid_sops;
8223 break;
8224 case TRACE_NO_PIDS:
8225 seq_ops = &ftrace_no_pid_sops;
8226 break;
8227 default:
8228 trace_array_put(tr);
8229 WARN_ON_ONCE(1);
8230 return -EINVAL;
8231 }
8232
8233 ret = seq_open(file, seq_ops);
8234 if (ret < 0) {
8235 trace_array_put(tr);
8236 } else {
8237 m = file->private_data;
8238 /* copy tr over to seq ops */
8239 m->private = tr;
8240 }
8241
8242 return ret;
8243 }
8244
8245 static int
ftrace_pid_open(struct inode * inode,struct file * file)8246 ftrace_pid_open(struct inode *inode, struct file *file)
8247 {
8248 return pid_open(inode, file, TRACE_PIDS);
8249 }
8250
8251 static int
ftrace_no_pid_open(struct inode * inode,struct file * file)8252 ftrace_no_pid_open(struct inode *inode, struct file *file)
8253 {
8254 return pid_open(inode, file, TRACE_NO_PIDS);
8255 }
8256
ignore_task_cpu(void * data)8257 static void ignore_task_cpu(void *data)
8258 {
8259 struct trace_array *tr = data;
8260 struct trace_pid_list *pid_list;
8261 struct trace_pid_list *no_pid_list;
8262
8263 /*
8264 * This function is called by on_each_cpu() while the
8265 * event_mutex is held.
8266 */
8267 pid_list = rcu_dereference_protected(tr->function_pids,
8268 mutex_is_locked(&ftrace_lock));
8269 no_pid_list = rcu_dereference_protected(tr->function_no_pids,
8270 mutex_is_locked(&ftrace_lock));
8271
8272 if (trace_ignore_this_task(pid_list, no_pid_list, current))
8273 this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid,
8274 FTRACE_PID_IGNORE);
8275 else
8276 this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid,
8277 current->pid);
8278 }
8279
8280 static ssize_t
pid_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos,int type)8281 pid_write(struct file *filp, const char __user *ubuf,
8282 size_t cnt, loff_t *ppos, int type)
8283 {
8284 struct seq_file *m = filp->private_data;
8285 struct trace_array *tr = m->private;
8286 struct trace_pid_list *filtered_pids;
8287 struct trace_pid_list *other_pids;
8288 struct trace_pid_list *pid_list;
8289 ssize_t ret;
8290
8291 if (!cnt)
8292 return 0;
8293
8294 mutex_lock(&ftrace_lock);
8295
8296 switch (type) {
8297 case TRACE_PIDS:
8298 filtered_pids = rcu_dereference_protected(tr->function_pids,
8299 lockdep_is_held(&ftrace_lock));
8300 other_pids = rcu_dereference_protected(tr->function_no_pids,
8301 lockdep_is_held(&ftrace_lock));
8302 break;
8303 case TRACE_NO_PIDS:
8304 filtered_pids = rcu_dereference_protected(tr->function_no_pids,
8305 lockdep_is_held(&ftrace_lock));
8306 other_pids = rcu_dereference_protected(tr->function_pids,
8307 lockdep_is_held(&ftrace_lock));
8308 break;
8309 default:
8310 ret = -EINVAL;
8311 WARN_ON_ONCE(1);
8312 goto out;
8313 }
8314
8315 ret = trace_pid_write(filtered_pids, &pid_list, ubuf, cnt);
8316 if (ret < 0)
8317 goto out;
8318
8319 switch (type) {
8320 case TRACE_PIDS:
8321 rcu_assign_pointer(tr->function_pids, pid_list);
8322 break;
8323 case TRACE_NO_PIDS:
8324 rcu_assign_pointer(tr->function_no_pids, pid_list);
8325 break;
8326 }
8327
8328
8329 if (filtered_pids) {
8330 synchronize_rcu();
8331 trace_pid_list_free(filtered_pids);
8332 } else if (pid_list && !other_pids) {
8333 /* Register a probe to set whether to ignore the tracing of a task */
8334 register_trace_sched_switch(ftrace_filter_pid_sched_switch_probe, tr);
8335 }
8336
8337 /*
8338 * Ignoring of pids is done at task switch. But we have to
8339 * check for those tasks that are currently running.
8340 * Always do this in case a pid was appended or removed.
8341 */
8342 on_each_cpu(ignore_task_cpu, tr, 1);
8343
8344 ftrace_update_pid_func();
8345 ftrace_startup_all(0);
8346 out:
8347 mutex_unlock(&ftrace_lock);
8348
8349 if (ret > 0)
8350 *ppos += ret;
8351
8352 return ret;
8353 }
8354
8355 static ssize_t
ftrace_pid_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)8356 ftrace_pid_write(struct file *filp, const char __user *ubuf,
8357 size_t cnt, loff_t *ppos)
8358 {
8359 return pid_write(filp, ubuf, cnt, ppos, TRACE_PIDS);
8360 }
8361
8362 static ssize_t
ftrace_no_pid_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)8363 ftrace_no_pid_write(struct file *filp, const char __user *ubuf,
8364 size_t cnt, loff_t *ppos)
8365 {
8366 return pid_write(filp, ubuf, cnt, ppos, TRACE_NO_PIDS);
8367 }
8368
8369 static int
ftrace_pid_release(struct inode * inode,struct file * file)8370 ftrace_pid_release(struct inode *inode, struct file *file)
8371 {
8372 struct trace_array *tr = inode->i_private;
8373
8374 trace_array_put(tr);
8375
8376 return seq_release(inode, file);
8377 }
8378
8379 static const struct file_operations ftrace_pid_fops = {
8380 .open = ftrace_pid_open,
8381 .write = ftrace_pid_write,
8382 .read = seq_read,
8383 .llseek = tracing_lseek,
8384 .release = ftrace_pid_release,
8385 };
8386
8387 static const struct file_operations ftrace_no_pid_fops = {
8388 .open = ftrace_no_pid_open,
8389 .write = ftrace_no_pid_write,
8390 .read = seq_read,
8391 .llseek = tracing_lseek,
8392 .release = ftrace_pid_release,
8393 };
8394
ftrace_init_tracefs(struct trace_array * tr,struct dentry * d_tracer)8395 void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d_tracer)
8396 {
8397 trace_create_file("set_ftrace_pid", TRACE_MODE_WRITE, d_tracer,
8398 tr, &ftrace_pid_fops);
8399 trace_create_file("set_ftrace_notrace_pid", TRACE_MODE_WRITE,
8400 d_tracer, tr, &ftrace_no_pid_fops);
8401 }
8402
ftrace_init_tracefs_toplevel(struct trace_array * tr,struct dentry * d_tracer)8403 void __init ftrace_init_tracefs_toplevel(struct trace_array *tr,
8404 struct dentry *d_tracer)
8405 {
8406 /* Only the top level directory has the dyn_tracefs and profile */
8407 WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL));
8408
8409 ftrace_init_dyn_tracefs(d_tracer);
8410 ftrace_profile_tracefs(d_tracer);
8411 }
8412
8413 /**
8414 * ftrace_kill - kill ftrace
8415 *
8416 * This function should be used by panic code. It stops ftrace
8417 * but in a not so nice way. If you need to simply kill ftrace
8418 * from a non-atomic section, use ftrace_kill.
8419 */
ftrace_kill(void)8420 void ftrace_kill(void)
8421 {
8422 ftrace_disabled = 1;
8423 ftrace_enabled = 0;
8424 ftrace_trace_function = ftrace_stub;
8425 kprobe_ftrace_kill();
8426 }
8427
8428 /**
8429 * ftrace_is_dead - Test if ftrace is dead or not.
8430 *
8431 * Returns: 1 if ftrace is "dead", zero otherwise.
8432 */
ftrace_is_dead(void)8433 int ftrace_is_dead(void)
8434 {
8435 return ftrace_disabled;
8436 }
8437
8438 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
8439 /*
8440 * When registering ftrace_ops with IPMODIFY, it is necessary to make sure
8441 * it doesn't conflict with any direct ftrace_ops. If there is existing
8442 * direct ftrace_ops on a kernel function being patched, call
8443 * FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_PEER on it to enable sharing.
8444 *
8445 * @ops: ftrace_ops being registered.
8446 *
8447 * Returns:
8448 * 0 on success;
8449 * Negative on failure.
8450 */
prepare_direct_functions_for_ipmodify(struct ftrace_ops * ops)8451 static int prepare_direct_functions_for_ipmodify(struct ftrace_ops *ops)
8452 {
8453 struct ftrace_func_entry *entry;
8454 struct ftrace_hash *hash;
8455 struct ftrace_ops *op;
8456 int size, i, ret;
8457
8458 lockdep_assert_held_once(&direct_mutex);
8459
8460 if (!(ops->flags & FTRACE_OPS_FL_IPMODIFY))
8461 return 0;
8462
8463 hash = ops->func_hash->filter_hash;
8464 size = 1 << hash->size_bits;
8465 for (i = 0; i < size; i++) {
8466 hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
8467 unsigned long ip = entry->ip;
8468 bool found_op = false;
8469
8470 mutex_lock(&ftrace_lock);
8471 do_for_each_ftrace_op(op, ftrace_ops_list) {
8472 if (!(op->flags & FTRACE_OPS_FL_DIRECT))
8473 continue;
8474 if (ops_references_ip(op, ip)) {
8475 found_op = true;
8476 break;
8477 }
8478 } while_for_each_ftrace_op(op);
8479 mutex_unlock(&ftrace_lock);
8480
8481 if (found_op) {
8482 if (!op->ops_func)
8483 return -EBUSY;
8484
8485 ret = op->ops_func(op, FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_PEER);
8486 if (ret)
8487 return ret;
8488 }
8489 }
8490 }
8491
8492 return 0;
8493 }
8494
8495 /*
8496 * Similar to prepare_direct_functions_for_ipmodify, clean up after ops
8497 * with IPMODIFY is unregistered. The cleanup is optional for most DIRECT
8498 * ops.
8499 */
cleanup_direct_functions_after_ipmodify(struct ftrace_ops * ops)8500 static void cleanup_direct_functions_after_ipmodify(struct ftrace_ops *ops)
8501 {
8502 struct ftrace_func_entry *entry;
8503 struct ftrace_hash *hash;
8504 struct ftrace_ops *op;
8505 int size, i;
8506
8507 if (!(ops->flags & FTRACE_OPS_FL_IPMODIFY))
8508 return;
8509
8510 mutex_lock(&direct_mutex);
8511
8512 hash = ops->func_hash->filter_hash;
8513 size = 1 << hash->size_bits;
8514 for (i = 0; i < size; i++) {
8515 hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
8516 unsigned long ip = entry->ip;
8517 bool found_op = false;
8518
8519 mutex_lock(&ftrace_lock);
8520 do_for_each_ftrace_op(op, ftrace_ops_list) {
8521 if (!(op->flags & FTRACE_OPS_FL_DIRECT))
8522 continue;
8523 if (ops_references_ip(op, ip)) {
8524 found_op = true;
8525 break;
8526 }
8527 } while_for_each_ftrace_op(op);
8528 mutex_unlock(&ftrace_lock);
8529
8530 /* The cleanup is optional, ignore any errors */
8531 if (found_op && op->ops_func)
8532 op->ops_func(op, FTRACE_OPS_CMD_DISABLE_SHARE_IPMODIFY_PEER);
8533 }
8534 }
8535 mutex_unlock(&direct_mutex);
8536 }
8537
8538 #define lock_direct_mutex() mutex_lock(&direct_mutex)
8539 #define unlock_direct_mutex() mutex_unlock(&direct_mutex)
8540
8541 #else /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
8542
prepare_direct_functions_for_ipmodify(struct ftrace_ops * ops)8543 static int prepare_direct_functions_for_ipmodify(struct ftrace_ops *ops)
8544 {
8545 return 0;
8546 }
8547
cleanup_direct_functions_after_ipmodify(struct ftrace_ops * ops)8548 static void cleanup_direct_functions_after_ipmodify(struct ftrace_ops *ops)
8549 {
8550 }
8551
8552 #define lock_direct_mutex() do { } while (0)
8553 #define unlock_direct_mutex() do { } while (0)
8554
8555 #endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
8556
8557 /*
8558 * Similar to register_ftrace_function, except we don't lock direct_mutex.
8559 */
register_ftrace_function_nolock(struct ftrace_ops * ops)8560 static int register_ftrace_function_nolock(struct ftrace_ops *ops)
8561 {
8562 int ret;
8563
8564 ftrace_ops_init(ops);
8565
8566 mutex_lock(&ftrace_lock);
8567
8568 ret = ftrace_startup(ops, 0);
8569
8570 mutex_unlock(&ftrace_lock);
8571
8572 return ret;
8573 }
8574
8575 /**
8576 * register_ftrace_function - register a function for profiling
8577 * @ops: ops structure that holds the function for profiling.
8578 *
8579 * Register a function to be called by all functions in the
8580 * kernel.
8581 *
8582 * Note: @ops->func and all the functions it calls must be labeled
8583 * with "notrace", otherwise it will go into a
8584 * recursive loop.
8585 */
register_ftrace_function(struct ftrace_ops * ops)8586 int register_ftrace_function(struct ftrace_ops *ops)
8587 {
8588 int ret;
8589
8590 lock_direct_mutex();
8591 ret = prepare_direct_functions_for_ipmodify(ops);
8592 if (ret < 0)
8593 goto out_unlock;
8594
8595 ret = register_ftrace_function_nolock(ops);
8596
8597 out_unlock:
8598 unlock_direct_mutex();
8599 return ret;
8600 }
8601 EXPORT_SYMBOL_GPL(register_ftrace_function);
8602
8603 /**
8604 * unregister_ftrace_function - unregister a function for profiling.
8605 * @ops: ops structure that holds the function to unregister
8606 *
8607 * Unregister a function that was added to be called by ftrace profiling.
8608 */
unregister_ftrace_function(struct ftrace_ops * ops)8609 int unregister_ftrace_function(struct ftrace_ops *ops)
8610 {
8611 int ret;
8612
8613 mutex_lock(&ftrace_lock);
8614 ret = ftrace_shutdown(ops, 0);
8615 mutex_unlock(&ftrace_lock);
8616
8617 cleanup_direct_functions_after_ipmodify(ops);
8618 return ret;
8619 }
8620 EXPORT_SYMBOL_GPL(unregister_ftrace_function);
8621
symbols_cmp(const void * a,const void * b)8622 static int symbols_cmp(const void *a, const void *b)
8623 {
8624 const char **str_a = (const char **) a;
8625 const char **str_b = (const char **) b;
8626
8627 return strcmp(*str_a, *str_b);
8628 }
8629
8630 struct kallsyms_data {
8631 unsigned long *addrs;
8632 const char **syms;
8633 size_t cnt;
8634 size_t found;
8635 };
8636
8637 /* This function gets called for all kernel and module symbols
8638 * and returns 1 in case we resolved all the requested symbols,
8639 * 0 otherwise.
8640 */
kallsyms_callback(void * data,const char * name,unsigned long addr)8641 static int kallsyms_callback(void *data, const char *name, unsigned long addr)
8642 {
8643 struct kallsyms_data *args = data;
8644 const char **sym;
8645 int idx;
8646
8647 sym = bsearch(&name, args->syms, args->cnt, sizeof(*args->syms), symbols_cmp);
8648 if (!sym)
8649 return 0;
8650
8651 idx = sym - args->syms;
8652 if (args->addrs[idx])
8653 return 0;
8654
8655 if (!ftrace_location(addr))
8656 return 0;
8657
8658 args->addrs[idx] = addr;
8659 args->found++;
8660 return args->found == args->cnt ? 1 : 0;
8661 }
8662
8663 /**
8664 * ftrace_lookup_symbols - Lookup addresses for array of symbols
8665 *
8666 * @sorted_syms: array of symbols pointers symbols to resolve,
8667 * must be alphabetically sorted
8668 * @cnt: number of symbols/addresses in @syms/@addrs arrays
8669 * @addrs: array for storing resulting addresses
8670 *
8671 * This function looks up addresses for array of symbols provided in
8672 * @syms array (must be alphabetically sorted) and stores them in
8673 * @addrs array, which needs to be big enough to store at least @cnt
8674 * addresses.
8675 *
8676 * Returns: 0 if all provided symbols are found, -ESRCH otherwise.
8677 */
ftrace_lookup_symbols(const char ** sorted_syms,size_t cnt,unsigned long * addrs)8678 int ftrace_lookup_symbols(const char **sorted_syms, size_t cnt, unsigned long *addrs)
8679 {
8680 struct kallsyms_data args;
8681 int found_all;
8682
8683 memset(addrs, 0, sizeof(*addrs) * cnt);
8684 args.addrs = addrs;
8685 args.syms = sorted_syms;
8686 args.cnt = cnt;
8687 args.found = 0;
8688
8689 found_all = kallsyms_on_each_symbol(kallsyms_callback, &args);
8690 if (found_all)
8691 return 0;
8692 found_all = module_kallsyms_on_each_symbol(NULL, kallsyms_callback, &args);
8693 return found_all ? 0 : -ESRCH;
8694 }
8695
8696 #ifdef CONFIG_SYSCTL
8697
8698 #ifdef CONFIG_DYNAMIC_FTRACE
ftrace_startup_sysctl(void)8699 static void ftrace_startup_sysctl(void)
8700 {
8701 int command;
8702
8703 if (unlikely(ftrace_disabled))
8704 return;
8705
8706 /* Force update next time */
8707 saved_ftrace_func = NULL;
8708 /* ftrace_start_up is true if we want ftrace running */
8709 if (ftrace_start_up) {
8710 command = FTRACE_UPDATE_CALLS;
8711 if (ftrace_graph_active)
8712 command |= FTRACE_START_FUNC_RET;
8713 ftrace_startup_enable(command);
8714 }
8715 }
8716
ftrace_shutdown_sysctl(void)8717 static void ftrace_shutdown_sysctl(void)
8718 {
8719 int command;
8720
8721 if (unlikely(ftrace_disabled))
8722 return;
8723
8724 /* ftrace_start_up is true if ftrace is running */
8725 if (ftrace_start_up) {
8726 command = FTRACE_DISABLE_CALLS;
8727 if (ftrace_graph_active)
8728 command |= FTRACE_STOP_FUNC_RET;
8729 ftrace_run_update_code(command);
8730 }
8731 }
8732 #else
8733 # define ftrace_startup_sysctl() do { } while (0)
8734 # define ftrace_shutdown_sysctl() do { } while (0)
8735 #endif /* CONFIG_DYNAMIC_FTRACE */
8736
is_permanent_ops_registered(void)8737 static bool is_permanent_ops_registered(void)
8738 {
8739 struct ftrace_ops *op;
8740
8741 do_for_each_ftrace_op(op, ftrace_ops_list) {
8742 if (op->flags & FTRACE_OPS_FL_PERMANENT)
8743 return true;
8744 } while_for_each_ftrace_op(op);
8745
8746 return false;
8747 }
8748
8749 static int
ftrace_enable_sysctl(const struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)8750 ftrace_enable_sysctl(const struct ctl_table *table, int write,
8751 void *buffer, size_t *lenp, loff_t *ppos)
8752 {
8753 int ret = -ENODEV;
8754
8755 mutex_lock(&ftrace_lock);
8756
8757 if (unlikely(ftrace_disabled))
8758 goto out;
8759
8760 ret = proc_dointvec(table, write, buffer, lenp, ppos);
8761
8762 if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
8763 goto out;
8764
8765 if (ftrace_enabled) {
8766
8767 /* we are starting ftrace again */
8768 if (rcu_dereference_protected(ftrace_ops_list,
8769 lockdep_is_held(&ftrace_lock)) != &ftrace_list_end)
8770 update_ftrace_function();
8771
8772 ftrace_startup_sysctl();
8773
8774 } else {
8775 if (is_permanent_ops_registered()) {
8776 ftrace_enabled = true;
8777 ret = -EBUSY;
8778 goto out;
8779 }
8780
8781 /* stopping ftrace calls (just send to ftrace_stub) */
8782 ftrace_trace_function = ftrace_stub;
8783
8784 ftrace_shutdown_sysctl();
8785 }
8786
8787 last_ftrace_enabled = !!ftrace_enabled;
8788 out:
8789 mutex_unlock(&ftrace_lock);
8790 return ret;
8791 }
8792
8793 static struct ctl_table ftrace_sysctls[] = {
8794 {
8795 .procname = "ftrace_enabled",
8796 .data = &ftrace_enabled,
8797 .maxlen = sizeof(int),
8798 .mode = 0644,
8799 .proc_handler = ftrace_enable_sysctl,
8800 },
8801 };
8802
ftrace_sysctl_init(void)8803 static int __init ftrace_sysctl_init(void)
8804 {
8805 register_sysctl_init("kernel", ftrace_sysctls);
8806 return 0;
8807 }
8808 late_initcall(ftrace_sysctl_init);
8809 #endif
8810